mirror of https://gitee.com/openkylin/linux.git
- Unwind failure on pinning the gen7 PPGTT (Chris)
- Fastset updates to make sure DRRS and PSR are properly enabled (Hans) - Header include clean-up (Brajeswar, Jani) - Improvements and clean-up on debugfs (Chris, Jani) - Avoid division by zero on CNL clocks setup (Xiao) - Restrict PSMI context load w/a to Haswell GT1 (Chris) - Remove HW semaphores for gen7 inter-engine sync (Chris) - Pull the render flush into breadcrumb emission (Chris) - i915_params copy and free helpers and other reorgs and docs (Jani) - Remove has_pooled_eu static initializer (Tvrtko) - Updates on kerneldoc (Chris) - Remove redundant trailing request flush (Chris) - ringbuffer irq seqno fixes and clean-up (Chris) - splitting off runtime device info and other clean-up around (Jani) - Selftests improvements (Chris, Daniele) - Flush RING_IMR changes before changing the global GT IMR on gen6 and HSW (Chris) - Some improvements and fixes around GPU reset and GPU hang report (Chris) - Remove partial attempt to swizzle on pread/pwrite (Chris) - Return immediately if trylock fails for direct-reclaim (Chris) - Downgrade scare message for unknown HuC firmware (Jani) - ACPI / PMIC for MIPI / DSI (Hans) - Reduce i915_request_alloc retirement to local context (Chris) - Init per-engine WAs for all engines (Daniele) - drop DPF code for gen8+ (Daniele) - Guard error capture against unpinned vma (Chris) - Use mutex_lock_killable from inside the shrinker (Chris) - Removing pooling from struct_mutex from vmap shrinker (Chris) -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJcN9waAAoJEPpiX2QO6xPKW3oH/AkR8oFAcCF8VCYtPHnRUV6F AsKYG57NMfWH3tfJRg+W5gVtPV+gxOv5IIf6DHjoT2nR0N/r57sGlpKaR4uhq14r NUK6+cEoNDSDr3zdGt5QI3p8YokEo5MFIVYNPD9THO0hz4uvlzowru1bSC5bJvjn RvMsb4SUYFMA3BmHNsSczPPVuJLccndl5JFv8oUOrtA5ksct/8G3CI6V2KSOwT7W ebjrQ56vFMLuDMYwgfrss5sFMoRT60Da0C6dCroHKOzB9L/w7oPOVfwNE9miDRCx 7IRIfICiPTCB2GsJ635sXet2sMV7RFxSjKkDND5iwV2vp96yqAwEjhXJq3t8MCE= =MqY8 -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-2019-01-10' of git://anongit.freedesktop.org/drm/drm-intel into drm-next - Unwind failure on pinning the gen7 PPGTT (Chris) - Fastset updates to make sure DRRS and PSR are properly enabled (Hans) - Header include clean-up (Brajeswar, Jani) - Improvements and clean-up on debugfs (Chris, Jani) - Avoid division by zero on CNL clocks setup (Xiao) - Restrict PSMI context load w/a to Haswell GT1 (Chris) - Remove HW semaphores for gen7 inter-engine sync (Chris) - Pull the render flush into breadcrumb emission (Chris) - i915_params copy and free helpers and other reorgs and docs (Jani) - Remove has_pooled_eu static initializer (Tvrtko) - Updates on kerneldoc (Chris) - Remove redundant trailing request flush (Chris) - ringbuffer irq seqno fixes and clean-up (Chris) - splitting off runtime device info and other clean-up around (Jani) - Selftests improvements (Chris, Daniele) - Flush RING_IMR changes before changing the global GT IMR on gen6 and HSW (Chris) - Some improvements and fixes around GPU reset and GPU hang report (Chris) - Remove partial attempt to swizzle on pread/pwrite (Chris) - Return immediately if trylock fails for direct-reclaim (Chris) - Downgrade scare message for unknown HuC firmware (Jani) - ACPI / PMIC for MIPI / DSI (Hans) - Reduce i915_request_alloc retirement to local context (Chris) - Init per-engine WAs for all engines (Daniele) - drop DPF code for gen8+ (Daniele) - Guard error capture against unpinned vma (Chris) - Use mutex_lock_killable from inside the shrinker (Chris) - Removing pooling from struct_mutex from vmap shrinker (Chris) Signed-off-by: Dave Airlie <airlied@redhat.com> # gpg: Signature made Fri 11 Jan 2019 09:58:18 AEST # gpg: using RSA key FA625F640EEB13CA # gpg: Good signature from "Rodrigo Vivi <rodrigo.vivi@intel.com>" # gpg: aka "Rodrigo Vivi <rodrigo.vivi@gmail.com>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6D20 7068 EEDD 6509 1C2C E2A3 FA62 5F64 0EEB 13CA # Conflicts: # drivers/gpu/drm/i915/intel_dp.c # drivers/gpu/drm/i915/intel_drv.h From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190114183820.GA2855@intel.com
This commit is contained in:
commit
8ca4fd0406
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include <linux/export.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/mfd/intel_soc_pmic.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <acpi/acpi_lpat.h>
|
||||
#include "intel_pmic.h"
|
||||
|
@ -36,6 +37,8 @@ struct intel_pmic_opregion {
|
|||
struct intel_pmic_regs_handler_ctx ctx;
|
||||
};
|
||||
|
||||
static struct intel_pmic_opregion *intel_pmic_opregion;
|
||||
|
||||
static int pmic_get_reg_bit(int address, struct pmic_table *table,
|
||||
int count, int *reg, int *bit)
|
||||
{
|
||||
|
@ -304,6 +307,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
|
|||
}
|
||||
|
||||
opregion->data = d;
|
||||
intel_pmic_opregion = opregion;
|
||||
return 0;
|
||||
|
||||
out_remove_thermal_handler:
|
||||
|
@ -319,3 +323,60 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
|
|||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
|
||||
|
||||
/**
|
||||
* intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence
|
||||
* @i2c_address: I2C client address for the PMIC
|
||||
* @reg_address: PMIC register address
|
||||
* @value: New value for the register bits to change
|
||||
* @mask: Mask indicating which register bits to change
|
||||
*
|
||||
* DSI LCD panels describe an initialization sequence in the i915 VBT (Video
|
||||
* BIOS Tables) using so called MIPI sequences. One possible element in these
|
||||
* sequences is a PMIC specific element of 15 bytes.
|
||||
*
|
||||
* This function executes these PMIC specific elements sending the embedded
|
||||
* commands to the PMIC.
|
||||
*
|
||||
* Return 0 on success, < 0 on failure.
|
||||
*/
|
||||
int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
|
||||
u32 value, u32 mask)
|
||||
{
|
||||
struct intel_pmic_opregion_data *d;
|
||||
int ret;
|
||||
|
||||
if (!intel_pmic_opregion) {
|
||||
pr_warn("%s: No PMIC registered\n", __func__);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
d = intel_pmic_opregion->data;
|
||||
|
||||
mutex_lock(&intel_pmic_opregion->lock);
|
||||
|
||||
if (d->exec_mipi_pmic_seq_element) {
|
||||
ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap,
|
||||
i2c_address, reg_address,
|
||||
value, mask);
|
||||
} else if (d->pmic_i2c_address) {
|
||||
if (i2c_address == d->pmic_i2c_address) {
|
||||
ret = regmap_update_bits(intel_pmic_opregion->regmap,
|
||||
reg_address, mask, value);
|
||||
} else {
|
||||
pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
|
||||
__func__, i2c_address, reg_address, value, mask);
|
||||
ret = -ENXIO;
|
||||
}
|
||||
} else {
|
||||
pr_warn("%s: Not implemented\n", __func__);
|
||||
pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n",
|
||||
__func__, i2c_address, reg_address, value, mask);
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
mutex_unlock(&intel_pmic_opregion->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element);
|
||||
|
|
|
@ -15,10 +15,14 @@ struct intel_pmic_opregion_data {
|
|||
int (*update_aux)(struct regmap *r, int reg, int raw_temp);
|
||||
int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value);
|
||||
int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
|
||||
int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address,
|
||||
u32 reg_address, u32 value, u32 mask);
|
||||
struct pmic_table *power_table;
|
||||
int power_table_count;
|
||||
struct pmic_table *thermal_table;
|
||||
int thermal_table_count;
|
||||
/* For generic exec_mipi_pmic_seq_element handling */
|
||||
int pmic_i2c_address;
|
||||
};
|
||||
|
||||
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
|
||||
|
|
|
@ -231,6 +231,24 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
|
|||
return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0);
|
||||
}
|
||||
|
||||
static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
|
||||
u16 i2c_client_address,
|
||||
u32 reg_address,
|
||||
u32 value, u32 mask)
|
||||
{
|
||||
u32 address;
|
||||
|
||||
if (i2c_client_address > 0xff || reg_address > 0xff) {
|
||||
pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n",
|
||||
__func__, i2c_client_address, reg_address);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
address = (i2c_client_address << 8) | reg_address;
|
||||
|
||||
return regmap_update_bits(regmap, address, mask, value);
|
||||
}
|
||||
|
||||
/*
|
||||
* The thermal table and ops are empty, we do not support the Thermal opregion
|
||||
* (DPTF) due to lacking documentation.
|
||||
|
@ -238,6 +256,7 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
|
|||
static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
|
||||
.get_power = intel_cht_wc_pmic_get_power,
|
||||
.update_power = intel_cht_wc_pmic_update_power,
|
||||
.exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element,
|
||||
.power_table = power_table,
|
||||
.power_table_count = ARRAY_SIZE(power_table),
|
||||
};
|
||||
|
|
|
@ -240,6 +240,7 @@ static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
|
|||
.power_table_count = ARRAY_SIZE(power_table),
|
||||
.thermal_table = thermal_table,
|
||||
.thermal_table_count = ARRAY_SIZE(thermal_table),
|
||||
.pmic_i2c_address = 0x34,
|
||||
};
|
||||
|
||||
static acpi_status intel_xpower_pmic_gpio_handler(u32 function,
|
||||
|
|
|
@ -1276,6 +1276,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
|
|||
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
|
||||
/* LG LP140WF6-SPM1 eDP panel */
|
||||
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
|
||||
/* Apple panels need some additional handling to support PSR */
|
||||
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
|
||||
};
|
||||
|
||||
#undef OUI
|
||||
|
|
|
@ -40,7 +40,7 @@ i915-y := i915_drv.o \
|
|||
i915_mm.o \
|
||||
i915_params.o \
|
||||
i915_pci.o \
|
||||
i915_suspend.o \
|
||||
i915_suspend.o \
|
||||
i915_syncmap.o \
|
||||
i915_sw_fence.o \
|
||||
i915_sysfs.o \
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#define _INTEL_DVO_H
|
||||
|
||||
#include <linux/i2c.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include "intel_drv.h"
|
||||
|
||||
|
|
|
@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
|||
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
|
||||
high_avail / vgpu_types[i].high_mm);
|
||||
|
||||
if (IS_GEN8(gvt->dev_priv))
|
||||
if (IS_GEN(gvt->dev_priv, 8))
|
||||
sprintf(gvt->types[i].name, "GVTg_V4_%s",
|
||||
vgpu_types[i].name);
|
||||
else if (IS_GEN9(gvt->dev_priv))
|
||||
else if (IS_GEN(gvt->dev_priv, 9))
|
||||
sprintf(gvt->types[i].name, "GVTg_V5_%s",
|
||||
vgpu_types[i].name);
|
||||
|
||||
|
|
|
@ -865,7 +865,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
|||
int cmd_table_count;
|
||||
int ret;
|
||||
|
||||
if (!IS_GEN7(engine->i915))
|
||||
if (!IS_GEN(engine->i915, 7))
|
||||
return;
|
||||
|
||||
switch (engine->id) {
|
||||
|
|
|
@ -48,7 +48,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
|||
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
|
||||
|
||||
intel_device_info_dump_flags(info, &p);
|
||||
intel_device_info_dump_runtime(info, &p);
|
||||
intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
|
||||
intel_driver_caps_print(&dev_priv->caps, &p);
|
||||
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
|
@ -297,11 +297,12 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
struct file_stats {
|
||||
struct drm_i915_file_private *file_priv;
|
||||
struct i915_address_space *vm;
|
||||
unsigned long count;
|
||||
u64 total, unbound;
|
||||
u64 global, shared;
|
||||
u64 active, inactive;
|
||||
u64 closed;
|
||||
};
|
||||
|
||||
static int per_file_stats(int id, void *ptr, void *data)
|
||||
|
@ -326,9 +327,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
if (i915_vma_is_ggtt(vma)) {
|
||||
stats->global += vma->node.size;
|
||||
} else {
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
|
||||
|
||||
if (ppgtt->vm.file != stats->file_priv)
|
||||
if (vma->vm != stats->vm)
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -336,6 +335,9 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
stats->active += vma->node.size;
|
||||
else
|
||||
stats->inactive += vma->node.size;
|
||||
|
||||
if (i915_vma_is_closed(vma))
|
||||
stats->closed += vma->node.size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -343,7 +345,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
|
||||
#define print_file_stats(m, name, stats) do { \
|
||||
if (stats.count) \
|
||||
seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
|
||||
seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
|
||||
name, \
|
||||
stats.count, \
|
||||
stats.total, \
|
||||
|
@ -351,20 +353,19 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
stats.inactive, \
|
||||
stats.global, \
|
||||
stats.shared, \
|
||||
stats.unbound); \
|
||||
stats.unbound, \
|
||||
stats.closed); \
|
||||
} while (0)
|
||||
|
||||
static void print_batch_pool_stats(struct seq_file *m,
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct file_stats stats;
|
||||
struct intel_engine_cs *engine;
|
||||
struct file_stats stats = {};
|
||||
enum intel_engine_id id;
|
||||
int j;
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
|
||||
list_for_each_entry(obj,
|
||||
|
@ -377,44 +378,47 @@ static void print_batch_pool_stats(struct seq_file *m,
|
|||
print_file_stats(m, "[k]batch pool", stats);
|
||||
}
|
||||
|
||||
static int per_file_ctx_stats(int idx, void *ptr, void *data)
|
||||
{
|
||||
struct i915_gem_context *ctx = ptr;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, ctx->i915, id) {
|
||||
struct intel_context *ce = to_intel_context(ctx, engine);
|
||||
|
||||
if (ce->state)
|
||||
per_file_stats(0, ce->state->obj, data);
|
||||
if (ce->ring)
|
||||
per_file_stats(0, ce->ring->vma->obj, data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_context_stats(struct seq_file *m,
|
||||
struct drm_i915_private *dev_priv)
|
||||
struct drm_i915_private *i915)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct file_stats stats;
|
||||
struct drm_file *file;
|
||||
struct file_stats kstats = {};
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
list_for_each_entry(ctx, &i915->contexts.list, link) {
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev_priv->kernel_context)
|
||||
per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct intel_context *ce = to_intel_context(ctx, engine);
|
||||
|
||||
list_for_each_entry(file, &dev->filelist, lhead) {
|
||||
struct drm_i915_file_private *fpriv = file->driver_priv;
|
||||
idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
|
||||
if (ce->state)
|
||||
per_file_stats(0, ce->state->obj, &kstats);
|
||||
if (ce->ring)
|
||||
per_file_stats(0, ce->ring->vma->obj, &kstats);
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
|
||||
struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
|
||||
struct drm_file *file = ctx->file_priv->file;
|
||||
struct task_struct *task;
|
||||
char name[80];
|
||||
|
||||
spin_lock(&file->table_lock);
|
||||
idr_for_each(&file->object_idr, per_file_stats, &stats);
|
||||
spin_unlock(&file->table_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
|
||||
snprintf(name, sizeof(name), "%s/%d",
|
||||
task ? task->comm : "<unknown>",
|
||||
ctx->user_handle);
|
||||
rcu_read_unlock();
|
||||
|
||||
print_file_stats(m, name, stats);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
print_file_stats(m, "[k]contexts", stats);
|
||||
print_file_stats(m, "[k]contexts", kstats);
|
||||
}
|
||||
|
||||
static int i915_gem_object_info(struct seq_file *m, void *data)
|
||||
|
@ -426,14 +430,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned int page_sizes = 0;
|
||||
struct drm_file *file;
|
||||
char buf[80];
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq_printf(m, "%u objects, %llu bytes\n",
|
||||
dev_priv->mm.object_count,
|
||||
dev_priv->mm.object_memory);
|
||||
|
@ -514,43 +513,14 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
buf, sizeof(buf)));
|
||||
|
||||
seq_putc(m, '\n');
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
print_batch_pool_stats(m, dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mutex_lock(&dev->filelist_mutex);
|
||||
print_context_stats(m, dev_priv);
|
||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||
struct file_stats stats;
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct i915_request *request;
|
||||
struct task_struct *task;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
stats.file_priv = file->driver_priv;
|
||||
spin_lock(&file->table_lock);
|
||||
idr_for_each(&file->object_idr, per_file_stats, &stats);
|
||||
spin_unlock(&file->table_lock);
|
||||
/*
|
||||
* Although we have a valid reference on file->pid, that does
|
||||
* not guarantee that the task_struct who called get_pid() is
|
||||
* still alive (e.g. get_pid(current) => fork() => exit()).
|
||||
* Therefore, we need to protect this ->comm access using RCU.
|
||||
*/
|
||||
request = list_first_entry_or_null(&file_priv->mm.request_list,
|
||||
struct i915_request,
|
||||
client_link);
|
||||
rcu_read_lock();
|
||||
task = pid_task(request && request->gem_context->pid ?
|
||||
request->gem_context->pid : file->pid,
|
||||
PIDTYPE_PID);
|
||||
print_file_stats(m, task ? task->comm : "<unknown>", stats);
|
||||
rcu_read_unlock();
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
mutex_unlock(&dev->filelist_mutex);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -984,8 +954,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
|
|||
intel_runtime_pm_get(i915);
|
||||
gpu = i915_capture_gpu_state(i915);
|
||||
intel_runtime_pm_put(i915);
|
||||
if (!gpu)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(gpu))
|
||||
return PTR_ERR(gpu);
|
||||
|
||||
file->private_data = gpu;
|
||||
return 0;
|
||||
|
@ -1018,7 +988,13 @@ i915_error_state_write(struct file *filp,
|
|||
|
||||
static int i915_error_state_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
file->private_data = i915_first_error_state(inode->i_private);
|
||||
struct i915_gpu_state *error;
|
||||
|
||||
error = i915_first_error_state(inode->i_private);
|
||||
if (IS_ERR(error))
|
||||
return PTR_ERR(error);
|
||||
|
||||
file->private_data = error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1032,30 +1008,6 @@ static const struct file_operations i915_error_state_fops = {
|
|||
};
|
||||
#endif
|
||||
|
||||
static int
|
||||
i915_next_seqno_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
ret = i915_gem_set_global_seqno(dev, val);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
|
||||
NULL, i915_next_seqno_set,
|
||||
"0x%llx\n");
|
||||
|
||||
static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
|
@ -1064,7 +1016,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_GEN5(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
u16 rgvswctl = I915_READ16(MEMSWCTL);
|
||||
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
|
||||
|
||||
|
@ -1785,7 +1737,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
|
|||
unsigned long temp, chipset, gfx;
|
||||
int ret;
|
||||
|
||||
if (!IS_GEN5(dev_priv))
|
||||
if (!IS_GEN(dev_priv, 5))
|
||||
return -ENODEV;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
@ -2034,7 +1986,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
|
||||
swizzle_string(dev_priv->mm.bit_6_swizzle_y));
|
||||
|
||||
if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
|
||||
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
|
||||
seq_printf(m, "DDC = 0x%08x\n",
|
||||
I915_READ(DCC));
|
||||
seq_printf(m, "DDC2 = 0x%08x\n",
|
||||
|
@ -2070,124 +2022,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int per_file_ctx(int id, void *ptr, void *data)
|
||||
{
|
||||
struct i915_gem_context *ctx = ptr;
|
||||
struct seq_file *m = data;
|
||||
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
||||
|
||||
if (!ppgtt) {
|
||||
seq_printf(m, " no ppgtt for context %d\n",
|
||||
ctx->user_handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (i915_gem_context_is_default(ctx))
|
||||
seq_puts(m, " default context:\n");
|
||||
else
|
||||
seq_printf(m, " context %d:\n", ctx->user_handle);
|
||||
ppgtt->debug_dump(ppgtt, m);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_info(struct seq_file *m,
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int i;
|
||||
|
||||
if (!ppgtt)
|
||||
return;
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
seq_printf(m, "%s\n", engine->name);
|
||||
for (i = 0; i < 4; i++) {
|
||||
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
|
||||
pdp <<= 32;
|
||||
pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
|
||||
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_info(struct seq_file *m,
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
seq_printf(m, "%s\n", engine->name);
|
||||
if (IS_GEN7(dev_priv))
|
||||
seq_printf(m, "GFX_MODE: 0x%08x\n",
|
||||
I915_READ(RING_MODE_GEN7(engine)));
|
||||
seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_BASE(engine)));
|
||||
seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_BASE_READ(engine)));
|
||||
seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_DCLV(engine)));
|
||||
}
|
||||
if (dev_priv->mm.aliasing_ppgtt) {
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
seq_puts(m, "aliasing PPGTT:\n");
|
||||
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
|
||||
|
||||
ppgtt->debug_dump(ppgtt, m);
|
||||
}
|
||||
|
||||
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
|
||||
}
|
||||
|
||||
static int i915_ppgtt_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_file *file;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->filelist_mutex);
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
gen8_ppgtt_info(m, dev_priv);
|
||||
else if (INTEL_GEN(dev_priv) >= 6)
|
||||
gen6_ppgtt_info(m, dev_priv);
|
||||
|
||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct task_struct *task;
|
||||
|
||||
task = get_pid_task(file->pid, PIDTYPE_PID);
|
||||
if (!task) {
|
||||
ret = -ESRCH;
|
||||
goto out_rpm;
|
||||
}
|
||||
seq_printf(m, "\nproc: %s\n", task->comm);
|
||||
put_task_struct(task);
|
||||
idr_for_each(&file_priv->context_idr, per_file_ctx,
|
||||
(void *)(unsigned long)m);
|
||||
}
|
||||
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->filelist_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int count_irq_waiters(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
@ -3120,14 +2954,13 @@ static const char *plane_type(enum drm_plane_type type)
|
|||
return "unknown";
|
||||
}
|
||||
|
||||
static const char *plane_rotation(unsigned int rotation)
|
||||
static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
|
||||
{
|
||||
static char buf[48];
|
||||
/*
|
||||
* According to doc only one DRM_MODE_ROTATE_ is allowed but this
|
||||
* will print them all to visualize if the values are misused
|
||||
*/
|
||||
snprintf(buf, sizeof(buf),
|
||||
snprintf(buf, bufsize,
|
||||
"%s%s%s%s%s%s(0x%08x)",
|
||||
(rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
|
||||
(rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
|
||||
|
@ -3136,8 +2969,6 @@ static const char *plane_rotation(unsigned int rotation)
|
|||
(rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
|
||||
(rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
|
||||
rotation);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
|
||||
|
@ -3150,6 +2981,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
|
|||
struct drm_plane_state *state;
|
||||
struct drm_plane *plane = &intel_plane->base;
|
||||
struct drm_format_name_buf format_name;
|
||||
char rot_str[48];
|
||||
|
||||
if (!plane->state) {
|
||||
seq_puts(m, "plane->state is NULL!\n");
|
||||
|
@ -3165,6 +2997,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
|
|||
sprintf(format_name.str, "N/A");
|
||||
}
|
||||
|
||||
plane_rotation(rot_str, sizeof(rot_str), state->rotation);
|
||||
|
||||
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
|
||||
plane->base.id,
|
||||
plane_type(intel_plane->base.type),
|
||||
|
@ -3179,7 +3013,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
|
|||
(state->src_h >> 16),
|
||||
((state->src_h & 0xffff) * 15625) >> 10,
|
||||
format_name.str,
|
||||
plane_rotation(state->rotation));
|
||||
rot_str);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3286,7 +3120,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "Global active requests: %d\n",
|
||||
dev_priv->gt.active_requests);
|
||||
seq_printf(m, "CS timestamp frequency: %u kHz\n",
|
||||
dev_priv->info.cs_timestamp_frequency_khz);
|
||||
RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
|
||||
|
||||
p = drm_seq_file_printer(m);
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
|
@ -3302,7 +3136,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
|
|||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
|
||||
intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4206,9 +4040,6 @@ i915_drop_caches_set(void *data, u64 val)
|
|||
I915_WAIT_LOCKED,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
if (ret == 0 && val & DROP_RESET_SEQNO)
|
||||
ret = i915_gem_set_global_seqno(&i915->drm, 1);
|
||||
|
||||
if (val & DROP_RETIRE)
|
||||
i915_retire_requests(i915);
|
||||
|
||||
|
@ -4261,7 +4092,7 @@ i915_cache_sharing_get(void *data, u64 *val)
|
|||
struct drm_i915_private *dev_priv = data;
|
||||
u32 snpcr;
|
||||
|
||||
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
|
||||
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
|
||||
return -ENODEV;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
@ -4281,7 +4112,7 @@ i915_cache_sharing_set(void *data, u64 val)
|
|||
struct drm_i915_private *dev_priv = data;
|
||||
u32 snpcr;
|
||||
|
||||
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
|
||||
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
|
||||
return -ENODEV;
|
||||
|
||||
if (val > 3)
|
||||
|
@ -4341,7 +4172,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
|
|||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
#define SS_MAX 6
|
||||
const struct intel_device_info *info = INTEL_INFO(dev_priv);
|
||||
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
||||
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
|
||||
int s, ss;
|
||||
|
||||
|
@ -4397,7 +4228,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
|
|||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
#define SS_MAX 3
|
||||
const struct intel_device_info *info = INTEL_INFO(dev_priv);
|
||||
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
||||
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
|
||||
int s, ss;
|
||||
|
||||
|
@ -4425,7 +4256,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
sseu->subslice_mask[s] =
|
||||
INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
|
||||
RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
|
||||
|
||||
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
|
||||
unsigned int eu_cnt;
|
||||
|
@ -4459,10 +4290,10 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (sseu->slice_mask) {
|
||||
sseu->eu_per_subslice =
|
||||
INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
|
||||
RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
|
||||
for (s = 0; s < fls(sseu->slice_mask); s++) {
|
||||
sseu->subslice_mask[s] =
|
||||
INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
|
||||
RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
|
||||
}
|
||||
sseu->eu_total = sseu->eu_per_subslice *
|
||||
sseu_subslice_total(sseu);
|
||||
|
@ -4470,7 +4301,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
|
|||
/* subtract fused off EU(s) from enabled slice(s) */
|
||||
for (s = 0; s < fls(sseu->slice_mask); s++) {
|
||||
u8 subslice_7eu =
|
||||
INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
|
||||
RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
|
||||
|
||||
sseu->eu_total -= hweight8(subslice_7eu);
|
||||
}
|
||||
|
@ -4523,14 +4354,14 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
|
|||
return -ENODEV;
|
||||
|
||||
seq_puts(m, "SSEU Device Info\n");
|
||||
i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
|
||||
i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
|
||||
|
||||
seq_puts(m, "SSEU Device Status\n");
|
||||
memset(&sseu, 0, sizeof(sseu));
|
||||
sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
|
||||
sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
|
||||
sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
|
||||
sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
|
||||
sseu.max_eus_per_subslice =
|
||||
INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
|
||||
RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
|
@ -4538,7 +4369,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
|
|||
cherryview_sseu_device_status(dev_priv, &sseu);
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
broadwell_sseu_device_status(dev_priv, &sseu);
|
||||
} else if (IS_GEN9(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 9)) {
|
||||
gen9_sseu_device_status(dev_priv, &sseu);
|
||||
} else if (INTEL_GEN(dev_priv) >= 10) {
|
||||
gen10_sseu_device_status(dev_priv, &sseu);
|
||||
|
@ -4899,7 +4730,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_context_status", i915_context_status, 0},
|
||||
{"i915_forcewake_domains", i915_forcewake_domains, 0},
|
||||
{"i915_swizzle_info", i915_swizzle_info, 0},
|
||||
{"i915_ppgtt_info", i915_ppgtt_info, 0},
|
||||
{"i915_llc", i915_llc, 0},
|
||||
{"i915_edp_psr_status", i915_edp_psr_status, 0},
|
||||
{"i915_energy_uJ", i915_energy_uJ, 0},
|
||||
|
@ -4934,7 +4764,6 @@ static const struct i915_debugfs_files {
|
|||
{"i915_gpu_info", &i915_gpu_info_fops},
|
||||
#endif
|
||||
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
|
||||
{"i915_next_seqno", &i915_next_seqno_fops},
|
||||
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
|
||||
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
|
||||
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
|
||||
|
@ -5081,6 +4910,106 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
|
|||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
|
||||
|
||||
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_dp *intel_dp;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
struct intel_crtc_state *crtc_state = NULL;
|
||||
int ret = 0;
|
||||
bool try_again = false;
|
||||
|
||||
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
|
||||
|
||||
do {
|
||||
try_again = false;
|
||||
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
|
||||
&ctx);
|
||||
if (ret) {
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
crtc = connector->state->crtc;
|
||||
if (connector->status != connector_status_connected || !crtc) {
|
||||
ret = -ENODEV;
|
||||
break;
|
||||
}
|
||||
ret = drm_modeset_lock(&crtc->mutex, &ctx);
|
||||
if (ret == -EDEADLK) {
|
||||
ret = drm_modeset_backoff(&ctx);
|
||||
if (!ret) {
|
||||
try_again = true;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} else if (ret) {
|
||||
break;
|
||||
}
|
||||
intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
|
||||
crtc_state = to_intel_crtc_state(crtc->state);
|
||||
seq_printf(m, "DSC_Enabled: %s\n",
|
||||
yesno(crtc_state->dsc_params.compression_enable));
|
||||
if (intel_dp->dsc_dpcd)
|
||||
seq_printf(m, "DSC_Sink_Support: %s\n",
|
||||
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
|
||||
if (!intel_dp_is_edp(intel_dp))
|
||||
seq_printf(m, "FEC_Sink_Support: %s\n",
|
||||
yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
|
||||
} while (try_again);
|
||||
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t i915_dsc_fec_support_write(struct file *file,
|
||||
const char __user *ubuf,
|
||||
size_t len, loff_t *offp)
|
||||
{
|
||||
bool dsc_enable = false;
|
||||
int ret;
|
||||
struct drm_connector *connector =
|
||||
((struct seq_file *)file->private_data)->private;
|
||||
struct intel_encoder *encoder = intel_attached_encoder(connector);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
|
||||
len);
|
||||
|
||||
ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
|
||||
(dsc_enable) ? "true" : "false");
|
||||
intel_dp->force_dsc_en = dsc_enable;
|
||||
|
||||
*offp += len;
|
||||
return len;
|
||||
}
|
||||
|
||||
static int i915_dsc_fec_support_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
return single_open(file, i915_dsc_fec_support_show,
|
||||
inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations i915_dsc_fec_support_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = i915_dsc_fec_support_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = i915_dsc_fec_support_write
|
||||
};
|
||||
|
||||
/**
|
||||
* i915_debugfs_connector_add - add i915 specific connector debugfs files
|
||||
* @connector: pointer to a registered drm_connector
|
||||
|
@ -5093,6 +5022,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
|
|||
int i915_debugfs_connector_add(struct drm_connector *connector)
|
||||
{
|
||||
struct dentry *root = connector->debugfs_entry;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
|
||||
/* The connector must have been registered beforehands. */
|
||||
if (!root)
|
||||
|
@ -5117,5 +5047,11 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
|
|||
connector, &i915_hdcp_sink_capability_fops);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10 &&
|
||||
(connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_eDP))
|
||||
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
|
||||
connector, &i915_dsc_fec_support_fops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
#include <linux/vt.h>
|
||||
#include <acpi/video.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
@ -132,15 +131,15 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
|
|||
switch (id) {
|
||||
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
|
||||
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
|
||||
WARN_ON(!IS_GEN5(dev_priv));
|
||||
WARN_ON(!IS_GEN(dev_priv, 5));
|
||||
return PCH_IBX;
|
||||
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
|
||||
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
|
||||
WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
|
||||
WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
|
||||
return PCH_CPT;
|
||||
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
|
||||
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
|
||||
WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
|
||||
WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
|
||||
/* PantherPoint is CPT compatible */
|
||||
return PCH_CPT;
|
||||
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
|
||||
|
@ -217,9 +216,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
|
|||
* make an educated guess as to which PCH is really there.
|
||||
*/
|
||||
|
||||
if (IS_GEN5(dev_priv))
|
||||
if (IS_GEN(dev_priv, 5))
|
||||
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
|
||||
else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
|
||||
id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
|
||||
else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
|
||||
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
|
||||
|
@ -349,7 +348,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
|
||||
break;
|
||||
case I915_PARAM_HAS_SEMAPHORES:
|
||||
value = HAS_LEGACY_SEMAPHORES(dev_priv);
|
||||
value = 0;
|
||||
break;
|
||||
case I915_PARAM_HAS_SECURE_BATCHES:
|
||||
value = capable(CAP_SYS_ADMIN);
|
||||
|
@ -358,12 +357,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
value = i915_cmd_parser_get_version(dev_priv);
|
||||
break;
|
||||
case I915_PARAM_SUBSLICE_TOTAL:
|
||||
value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
|
||||
value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
|
||||
if (!value)
|
||||
return -ENODEV;
|
||||
break;
|
||||
case I915_PARAM_EU_TOTAL:
|
||||
value = INTEL_INFO(dev_priv)->sseu.eu_total;
|
||||
value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
|
||||
if (!value)
|
||||
return -ENODEV;
|
||||
break;
|
||||
|
@ -380,7 +379,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
value = HAS_POOLED_EU(dev_priv);
|
||||
break;
|
||||
case I915_PARAM_MIN_EU_IN_POOL:
|
||||
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
|
||||
value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
|
||||
break;
|
||||
case I915_PARAM_HUC_STATUS:
|
||||
value = intel_huc_check_status(&dev_priv->huc);
|
||||
|
@ -430,17 +429,17 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
value = intel_engines_has_context_isolation(dev_priv);
|
||||
break;
|
||||
case I915_PARAM_SLICE_MASK:
|
||||
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
|
||||
value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
|
||||
if (!value)
|
||||
return -ENODEV;
|
||||
break;
|
||||
case I915_PARAM_SUBSLICE_MASK:
|
||||
value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
|
||||
value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
|
||||
if (!value)
|
||||
return -ENODEV;
|
||||
break;
|
||||
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
|
||||
value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
|
||||
value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
|
||||
break;
|
||||
case I915_PARAM_MMAP_GTT_COHERENT:
|
||||
value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
|
||||
|
@ -966,7 +965,7 @@ static int i915_mmio_setup(struct drm_i915_private *dev_priv)
|
|||
int mmio_bar;
|
||||
int mmio_size;
|
||||
|
||||
mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
|
||||
mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
|
||||
/*
|
||||
* Before gen4, the registers and the GTT are behind different BARs.
|
||||
* However, from gen4 onwards, the registers and the GTT are shared
|
||||
|
@ -1341,7 +1340,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
|
|||
/* Need to calculate bandwidth only for Gen9 */
|
||||
if (IS_BROXTON(dev_priv))
|
||||
ret = bxt_get_dram_info(dev_priv);
|
||||
else if (IS_GEN9(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 9))
|
||||
ret = skl_get_dram_info(dev_priv);
|
||||
else
|
||||
ret = skl_dram_get_channels_info(dev_priv);
|
||||
|
@ -1374,7 +1373,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
|
||||
intel_device_info_runtime_init(dev_priv);
|
||||
|
||||
if (HAS_PPGTT(dev_priv)) {
|
||||
if (intel_vgpu_active(dev_priv) &&
|
||||
|
@ -1436,7 +1435,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
pci_set_master(pdev);
|
||||
|
||||
/* overlay on gen2 is broken and can't address above 1G */
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 2)) {
|
||||
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to set DMA mask\n");
|
||||
|
@ -1574,7 +1573,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
|
|||
acpi_video_register();
|
||||
}
|
||||
|
||||
if (IS_GEN5(dev_priv))
|
||||
if (IS_GEN(dev_priv, 5))
|
||||
intel_gpu_ips_init(dev_priv);
|
||||
|
||||
intel_audio_init(dev_priv);
|
||||
|
@ -1636,8 +1635,14 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
|
|||
if (drm_debug & DRM_UT_DRIVER) {
|
||||
struct drm_printer p = drm_debug_printer("i915 device info:");
|
||||
|
||||
intel_device_info_dump(&dev_priv->info, &p);
|
||||
intel_device_info_dump_runtime(&dev_priv->info, &p);
|
||||
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
|
||||
INTEL_DEVID(dev_priv),
|
||||
INTEL_REVID(dev_priv),
|
||||
intel_platform_name(INTEL_INFO(dev_priv)->platform),
|
||||
INTEL_GEN(dev_priv));
|
||||
|
||||
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
|
||||
intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
||||
|
@ -1674,7 +1679,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Setup the write-once "constant" device info */
|
||||
device_info = mkwrite_device_info(i915);
|
||||
memcpy(device_info, match_info, sizeof(*device_info));
|
||||
device_info->device_id = pdev->device;
|
||||
RUNTIME_INFO(i915)->device_id = pdev->device;
|
||||
|
||||
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
|
||||
BITS_PER_TYPE(device_info->platform_mask));
|
||||
|
@ -2174,7 +2179,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
|||
|
||||
intel_power_domains_resume(dev_priv);
|
||||
|
||||
intel_engines_sanitize(dev_priv);
|
||||
intel_engines_sanitize(dev_priv, true);
|
||||
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
|
@ -2226,6 +2231,7 @@ void i915_reset(struct drm_i915_private *i915,
|
|||
|
||||
might_sleep();
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
assert_rpm_wakelock_held(i915);
|
||||
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
|
||||
|
||||
if (!test_bit(I915_RESET_HANDOFF, &error->flags))
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
#include <linux/reservation.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/intel-gtt.h>
|
||||
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
|
||||
#include <drm/drm_gem.h>
|
||||
|
@ -54,6 +53,7 @@
|
|||
#include <drm/drm_cache.h>
|
||||
#include <drm/drm_util.h>
|
||||
#include <drm/drm_dsc.h>
|
||||
#include <drm/drm_connector.h>
|
||||
|
||||
#include "i915_fixed.h"
|
||||
#include "i915_params.h"
|
||||
|
@ -90,8 +90,8 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20181204"
|
||||
#define DRIVER_TIMESTAMP 1543944377
|
||||
#define DRIVER_DATE "20190110"
|
||||
#define DRIVER_TIMESTAMP 1547162337
|
||||
|
||||
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
|
||||
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
|
||||
|
@ -281,16 +281,14 @@ struct drm_i915_display_funcs {
|
|||
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
|
||||
enum i9xx_plane_id i9xx_plane);
|
||||
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
|
||||
int (*compute_intermediate_wm)(struct drm_device *dev,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *newstate);
|
||||
int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
|
||||
void (*initial_watermarks)(struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *cstate);
|
||||
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *cstate);
|
||||
void (*optimize_watermarks)(struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *cstate);
|
||||
int (*compute_global_watermarks)(struct drm_atomic_state *state);
|
||||
int (*compute_global_watermarks)(struct intel_atomic_state *state);
|
||||
void (*update_wm)(struct intel_crtc *crtc);
|
||||
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
|
||||
/* Returns the active state of the crtc, and if the crtc is active,
|
||||
|
@ -322,8 +320,8 @@ struct drm_i915_display_funcs {
|
|||
/* display clock increase/decrease */
|
||||
/* pll clock increase/decrease */
|
||||
|
||||
void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
|
||||
void (*load_luts)(struct drm_crtc_state *crtc_state);
|
||||
void (*load_csc_matrix)(struct intel_crtc_state *crtc_state);
|
||||
void (*load_luts)(struct intel_crtc_state *crtc_state);
|
||||
};
|
||||
|
||||
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
|
||||
|
@ -509,6 +507,7 @@ struct i915_psr {
|
|||
ktime_t last_exit;
|
||||
bool sink_not_reliable;
|
||||
bool irq_aux_error;
|
||||
u16 su_x_granularity;
|
||||
};
|
||||
|
||||
enum intel_pch {
|
||||
|
@ -936,6 +935,8 @@ struct ddi_vbt_port_info {
|
|||
uint8_t supports_hdmi:1;
|
||||
uint8_t supports_dp:1;
|
||||
uint8_t supports_edp:1;
|
||||
uint8_t supports_typec_usb:1;
|
||||
uint8_t supports_tbt:1;
|
||||
|
||||
uint8_t alternate_aux_channel;
|
||||
uint8_t alternate_ddc_pin;
|
||||
|
@ -1430,7 +1431,8 @@ struct drm_i915_private {
|
|||
struct kmem_cache *dependencies;
|
||||
struct kmem_cache *priorities;
|
||||
|
||||
const struct intel_device_info info;
|
||||
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
|
||||
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
|
||||
struct intel_driver_caps caps;
|
||||
|
||||
/**
|
||||
|
@ -1947,7 +1949,6 @@ struct drm_i915_private {
|
|||
struct list_head active_rings;
|
||||
struct list_head closed_vma;
|
||||
u32 active_requests;
|
||||
u32 request_serial;
|
||||
|
||||
/**
|
||||
* Is the GPU currently considered idle, or busy executing
|
||||
|
@ -2191,17 +2192,12 @@ static inline unsigned int i915_sg_segment_size(void)
|
|||
return size;
|
||||
}
|
||||
|
||||
static inline const struct intel_device_info *
|
||||
intel_info(const struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return &dev_priv->info;
|
||||
}
|
||||
|
||||
#define INTEL_INFO(dev_priv) intel_info((dev_priv))
|
||||
#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
|
||||
#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
|
||||
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
|
||||
|
||||
#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
|
||||
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
|
||||
#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
|
||||
#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
|
||||
|
||||
#define REVID_FOREVER 0xff
|
||||
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
|
||||
|
@ -2212,8 +2208,12 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
GENMASK((e) - 1, (s) - 1))
|
||||
|
||||
/* Returns true if Gen is in inclusive range [Start, End] */
|
||||
#define IS_GEN(dev_priv, s, e) \
|
||||
(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
|
||||
#define IS_GEN_RANGE(dev_priv, s, e) \
|
||||
(!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
|
||||
|
||||
#define IS_GEN(dev_priv, n) \
|
||||
(BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
|
||||
INTEL_INFO(dev_priv)->gen == (n))
|
||||
|
||||
/*
|
||||
* Return true if revision is in range [since,until] inclusive.
|
||||
|
@ -2223,7 +2223,7 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define IS_REVID(p, since, until) \
|
||||
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
|
||||
|
||||
#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
|
||||
#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
|
||||
|
||||
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
|
||||
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
|
||||
|
@ -2245,7 +2245,7 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
|
||||
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
|
||||
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
|
||||
(dev_priv)->info.gt == 1)
|
||||
INTEL_INFO(dev_priv)->gt == 1)
|
||||
#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
|
||||
#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
|
||||
#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
|
||||
|
@ -2257,7 +2257,7 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
|
||||
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
|
||||
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
|
||||
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
|
||||
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
|
||||
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
|
||||
|
@ -2268,11 +2268,13 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
|
||||
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
|
||||
(dev_priv)->info.gt == 3)
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
|
||||
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
(dev_priv)->info.gt == 3)
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 1)
|
||||
/* ULX machines are also considered ULT. */
|
||||
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
|
||||
INTEL_DEVID(dev_priv) == 0x0A1E)
|
||||
|
@ -2295,21 +2297,21 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
|
||||
INTEL_DEVID(dev_priv) == 0x87C0)
|
||||
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
(dev_priv)->info.gt == 2)
|
||||
INTEL_INFO(dev_priv)->gt == 2)
|
||||
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
(dev_priv)->info.gt == 3)
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
(dev_priv)->info.gt == 4)
|
||||
INTEL_INFO(dev_priv)->gt == 4)
|
||||
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
|
||||
(dev_priv)->info.gt == 2)
|
||||
INTEL_INFO(dev_priv)->gt == 2)
|
||||
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
|
||||
(dev_priv)->info.gt == 3)
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
|
||||
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
||||
(dev_priv)->info.gt == 2)
|
||||
INTEL_INFO(dev_priv)->gt == 2)
|
||||
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
||||
(dev_priv)->info.gt == 3)
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
|
||||
|
||||
|
@ -2366,26 +2368,9 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define IS_ICL_REVID(p, since, until) \
|
||||
(IS_ICELAKE(p) && IS_REVID(p, since, until))
|
||||
|
||||
/*
|
||||
* The genX designation typically refers to the render engine, so render
|
||||
* capability related checks should use IS_GEN, while display and other checks
|
||||
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
|
||||
* chips, etc.).
|
||||
*/
|
||||
#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
|
||||
#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
|
||||
#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
|
||||
#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
|
||||
#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
|
||||
#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
|
||||
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
|
||||
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
|
||||
#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
|
||||
#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
|
||||
|
||||
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
|
||||
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
|
||||
#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
|
||||
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
|
||||
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
|
||||
|
||||
#define ENGINE_MASK(id) BIT(id)
|
||||
#define RENDER_RING ENGINE_MASK(RCS)
|
||||
|
@ -2399,29 +2384,27 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define ALL_ENGINES (~0)
|
||||
|
||||
#define HAS_ENGINE(dev_priv, id) \
|
||||
(!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
|
||||
(!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
|
||||
|
||||
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
|
||||
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
|
||||
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
|
||||
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
|
||||
|
||||
#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
|
||||
|
||||
#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
|
||||
#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
|
||||
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
|
||||
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
|
||||
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
|
||||
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
|
||||
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
|
||||
|
||||
#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
|
||||
#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
|
||||
|
||||
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
|
||||
((dev_priv)->info.has_logical_ring_contexts)
|
||||
(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
|
||||
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
|
||||
((dev_priv)->info.has_logical_ring_elsq)
|
||||
(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
|
||||
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
|
||||
((dev_priv)->info.has_logical_ring_preemption)
|
||||
(INTEL_INFO(dev_priv)->has_logical_ring_preemption)
|
||||
|
||||
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
|
||||
|
||||
|
@ -2435,12 +2418,12 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
|
||||
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
|
||||
GEM_BUG_ON((sizes) == 0); \
|
||||
((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
|
||||
((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
|
||||
})
|
||||
|
||||
#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay)
|
||||
#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
|
||||
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
|
||||
((dev_priv)->info.display.overlay_needs_physical)
|
||||
(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
|
||||
|
||||
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
|
||||
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
|
||||
|
@ -2458,42 +2441,42 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
|
||||
* rows, which changed the alignment requirements and fence programming.
|
||||
*/
|
||||
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
|
||||
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
|
||||
!(IS_I915G(dev_priv) || \
|
||||
IS_I915GM(dev_priv)))
|
||||
#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv)
|
||||
#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug)
|
||||
#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
|
||||
#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
|
||||
|
||||
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
|
||||
#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc)
|
||||
#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
|
||||
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
|
||||
|
||||
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
|
||||
#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst)
|
||||
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
|
||||
|
||||
#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi)
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
|
||||
#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr)
|
||||
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
|
||||
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
|
||||
|
||||
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
|
||||
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
|
||||
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
|
||||
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
|
||||
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
|
||||
|
||||
#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr)
|
||||
#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
|
||||
|
||||
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
|
||||
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
|
||||
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
|
||||
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
|
||||
|
||||
#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc)
|
||||
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
|
||||
|
||||
/*
|
||||
* For now, anything with a GuC requires uCode loading, and then supports
|
||||
* command submission once loaded. But these are logically independent
|
||||
* properties, so we have separate macros to test them.
|
||||
*/
|
||||
#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
|
||||
#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
|
||||
#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
|
||||
#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct)
|
||||
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
|
||||
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
|
||||
|
||||
|
@ -2502,11 +2485,11 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
|
||||
|
||||
/* Having a GuC is not the same as using a GuC */
|
||||
#define USES_GUC(dev_priv) intel_uc_is_using_guc()
|
||||
#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
|
||||
#define USES_HUC(dev_priv) intel_uc_is_using_huc()
|
||||
#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv)
|
||||
#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv)
|
||||
#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv)
|
||||
|
||||
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
|
||||
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
|
@ -2546,12 +2529,12 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
|
||||
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
|
||||
|
||||
#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
|
||||
#define HAS_GMCH_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch_display)
|
||||
|
||||
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
|
||||
|
||||
/* DPF == dynamic parity feature */
|
||||
#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
|
||||
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
|
||||
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
|
||||
2 : HAS_L3_DPF(dev_priv))
|
||||
|
||||
|
@ -2916,9 +2899,9 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
|||
__i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
|
||||
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
|
||||
I915_MM_NORMAL = 0,
|
||||
I915_MM_SHRINKER
|
||||
I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
|
||||
};
|
||||
|
||||
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
|
@ -3204,7 +3187,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
|
|||
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
|
||||
void i915_gem_shrinker_register(struct drm_i915_private *i915);
|
||||
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
|
||||
void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
|
||||
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
|
||||
struct mutex *mutex);
|
||||
|
||||
/* i915_gem_tiling.c */
|
||||
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
|
||||
|
@ -3313,7 +3297,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
|
|||
static inline struct intel_device_info *
|
||||
mkwrite_device_info(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return (struct intel_device_info *)&dev_priv->info;
|
||||
return (struct intel_device_info *)INTEL_INFO(dev_priv);
|
||||
}
|
||||
|
||||
/* modesetting */
|
||||
|
@ -3599,90 +3583,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
|
|||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
__i915_request_irq_complete(const struct i915_request *rq)
|
||||
{
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
u32 seqno;
|
||||
|
||||
/* Note that the engine may have wrapped around the seqno, and
|
||||
* so our request->global_seqno will be ahead of the hardware,
|
||||
* even though it completed the request before wrapping. We catch
|
||||
* this by kicking all the waiters before resetting the seqno
|
||||
* in hardware, and also signal the fence.
|
||||
*/
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
|
||||
return true;
|
||||
|
||||
/* The request was dequeued before we were awoken. We check after
|
||||
* inspecting the hw to confirm that this was the same request
|
||||
* that generated the HWS update. The memory barriers within
|
||||
* the request execution are sufficient to ensure that a check
|
||||
* after reading the value from hw matches this request.
|
||||
*/
|
||||
seqno = i915_request_global_seqno(rq);
|
||||
if (!seqno)
|
||||
return false;
|
||||
|
||||
/* Before we do the heavier coherent read of the seqno,
|
||||
* check the value (hopefully) in the CPU cacheline.
|
||||
*/
|
||||
if (__i915_request_completed(rq, seqno))
|
||||
return true;
|
||||
|
||||
/* Ensure our read of the seqno is coherent so that we
|
||||
* do not "miss an interrupt" (i.e. if this is the last
|
||||
* request and the seqno write from the GPU is not visible
|
||||
* by the time the interrupt fires, we will see that the
|
||||
* request is incomplete and go back to sleep awaiting
|
||||
* another interrupt that will never come.)
|
||||
*
|
||||
* Strictly, we only need to do this once after an interrupt,
|
||||
* but it is easier and safer to do it every time the waiter
|
||||
* is woken.
|
||||
*/
|
||||
if (engine->irq_seqno_barrier &&
|
||||
test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
/* The ordering of irq_posted versus applying the barrier
|
||||
* is crucial. The clearing of the current irq_posted must
|
||||
* be visible before we perform the barrier operation,
|
||||
* such that if a subsequent interrupt arrives, irq_posted
|
||||
* is reasserted and our task rewoken (which causes us to
|
||||
* do another __i915_request_irq_complete() immediately
|
||||
* and reapply the barrier). Conversely, if the clear
|
||||
* occurs after the barrier, then an interrupt that arrived
|
||||
* whilst we waited on the barrier would not trigger a
|
||||
* barrier on the next pass, and the read may not see the
|
||||
* seqno update.
|
||||
*/
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
/* If we consume the irq, but we are no longer the bottom-half,
|
||||
* the real bottom-half may not have serialised their own
|
||||
* seqno check with the irq-barrier (i.e. may have inspected
|
||||
* the seqno before we believe it coherent since they see
|
||||
* irq_posted == false but we are still running).
|
||||
*/
|
||||
spin_lock_irq(&b->irq_lock);
|
||||
if (b->irq_wait && b->irq_wait->tsk != current)
|
||||
/* Note that if the bottom-half is changed as we
|
||||
* are sending the wake-up, the new bottom-half will
|
||||
* be woken by whomever made the change. We only have
|
||||
* to worry about when we steal the irq-posted for
|
||||
* ourself.
|
||||
*/
|
||||
wake_up_process(b->irq_wait->tsk);
|
||||
spin_unlock_irq(&b->irq_lock);
|
||||
|
||||
if (__i915_request_completed(rq, seqno))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
|
||||
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
@ -859,58 +858,6 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
|
|||
obj->write_domain = 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__copy_to_user_swizzled(char __user *cpu_vaddr,
|
||||
const char *gpu_vaddr, int gpu_offset,
|
||||
int length)
|
||||
{
|
||||
int ret, cpu_offset = 0;
|
||||
|
||||
while (length > 0) {
|
||||
int cacheline_end = ALIGN(gpu_offset + 1, 64);
|
||||
int this_length = min(cacheline_end - gpu_offset, length);
|
||||
int swizzled_gpu_offset = gpu_offset ^ 64;
|
||||
|
||||
ret = __copy_to_user(cpu_vaddr + cpu_offset,
|
||||
gpu_vaddr + swizzled_gpu_offset,
|
||||
this_length);
|
||||
if (ret)
|
||||
return ret + length;
|
||||
|
||||
cpu_offset += this_length;
|
||||
gpu_offset += this_length;
|
||||
length -= this_length;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
|
||||
const char __user *cpu_vaddr,
|
||||
int length)
|
||||
{
|
||||
int ret, cpu_offset = 0;
|
||||
|
||||
while (length > 0) {
|
||||
int cacheline_end = ALIGN(gpu_offset + 1, 64);
|
||||
int this_length = min(cacheline_end - gpu_offset, length);
|
||||
int swizzled_gpu_offset = gpu_offset ^ 64;
|
||||
|
||||
ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
|
||||
cpu_vaddr + cpu_offset,
|
||||
this_length);
|
||||
if (ret)
|
||||
return ret + length;
|
||||
|
||||
cpu_offset += this_length;
|
||||
gpu_offset += this_length;
|
||||
length -= this_length;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pins the specified object's pages and synchronizes the object with
|
||||
* GPU accesses. Sets needs_clflush to non-zero if the caller should
|
||||
|
@ -1030,72 +977,23 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
shmem_clflush_swizzled_range(char *addr, unsigned long length,
|
||||
bool swizzled)
|
||||
{
|
||||
if (unlikely(swizzled)) {
|
||||
unsigned long start = (unsigned long) addr;
|
||||
unsigned long end = (unsigned long) addr + length;
|
||||
|
||||
/* For swizzling simply ensure that we always flush both
|
||||
* channels. Lame, but simple and it works. Swizzled
|
||||
* pwrite/pread is far from a hotpath - current userspace
|
||||
* doesn't use it at all. */
|
||||
start = round_down(start, 128);
|
||||
end = round_up(end, 128);
|
||||
|
||||
drm_clflush_virt_range((void *)start, end - start);
|
||||
} else {
|
||||
drm_clflush_virt_range(addr, length);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Only difference to the fast-path function is that this can handle bit17
|
||||
* and uses non-atomic copy and kmap functions. */
|
||||
static int
|
||||
shmem_pread_slow(struct page *page, int offset, int length,
|
||||
char __user *user_data,
|
||||
bool page_do_bit17_swizzling, bool needs_clflush)
|
||||
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
|
||||
bool needs_clflush)
|
||||
{
|
||||
char *vaddr;
|
||||
int ret;
|
||||
|
||||
vaddr = kmap(page);
|
||||
if (needs_clflush)
|
||||
shmem_clflush_swizzled_range(vaddr + offset, length,
|
||||
page_do_bit17_swizzling);
|
||||
|
||||
if (page_do_bit17_swizzling)
|
||||
ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
|
||||
else
|
||||
ret = __copy_to_user(user_data, vaddr + offset, length);
|
||||
if (needs_clflush)
|
||||
drm_clflush_virt_range(vaddr + offset, len);
|
||||
|
||||
ret = __copy_to_user(user_data, vaddr + offset, len);
|
||||
|
||||
kunmap(page);
|
||||
|
||||
return ret ? - EFAULT : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
|
||||
bool page_do_bit17_swizzling, bool needs_clflush)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!page_do_bit17_swizzling) {
|
||||
char *vaddr = kmap_atomic(page);
|
||||
|
||||
if (needs_clflush)
|
||||
drm_clflush_virt_range(vaddr + offset, length);
|
||||
ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
|
||||
kunmap_atomic(vaddr);
|
||||
}
|
||||
if (ret == 0)
|
||||
return 0;
|
||||
|
||||
return shmem_pread_slow(page, offset, length, user_data,
|
||||
page_do_bit17_swizzling, needs_clflush);
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1104,15 +1002,10 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
char __user *user_data;
|
||||
u64 remain;
|
||||
unsigned int obj_do_bit17_swizzling;
|
||||
unsigned int needs_clflush;
|
||||
unsigned int idx, offset;
|
||||
int ret;
|
||||
|
||||
obj_do_bit17_swizzling = 0;
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
obj_do_bit17_swizzling = BIT(17);
|
||||
|
||||
ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1130,7 +1023,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
|
|||
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
|
||||
|
||||
ret = shmem_pread(page, offset, length, user_data,
|
||||
page_to_phys(page) & obj_do_bit17_swizzling,
|
||||
needs_clflush);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1470,33 +1362,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
shmem_pwrite_slow(struct page *page, int offset, int length,
|
||||
char __user *user_data,
|
||||
bool page_do_bit17_swizzling,
|
||||
bool needs_clflush_before,
|
||||
bool needs_clflush_after)
|
||||
{
|
||||
char *vaddr;
|
||||
int ret;
|
||||
|
||||
vaddr = kmap(page);
|
||||
if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
|
||||
shmem_clflush_swizzled_range(vaddr + offset, length,
|
||||
page_do_bit17_swizzling);
|
||||
if (page_do_bit17_swizzling)
|
||||
ret = __copy_from_user_swizzled(vaddr, offset, user_data,
|
||||
length);
|
||||
else
|
||||
ret = __copy_from_user(vaddr + offset, user_data, length);
|
||||
if (needs_clflush_after)
|
||||
shmem_clflush_swizzled_range(vaddr + offset, length,
|
||||
page_do_bit17_swizzling);
|
||||
kunmap(page);
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
/* Per-page copy function for the shmem pwrite fastpath.
|
||||
* Flushes invalid cachelines before writing to the target if
|
||||
* needs_clflush_before is set and flushes out any written cachelines after
|
||||
|
@ -1504,31 +1369,24 @@ shmem_pwrite_slow(struct page *page, int offset, int length,
|
|||
*/
|
||||
static int
|
||||
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
|
||||
bool page_do_bit17_swizzling,
|
||||
bool needs_clflush_before,
|
||||
bool needs_clflush_after)
|
||||
{
|
||||
char *vaddr;
|
||||
int ret;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!page_do_bit17_swizzling) {
|
||||
char *vaddr = kmap_atomic(page);
|
||||
vaddr = kmap(page);
|
||||
|
||||
if (needs_clflush_before)
|
||||
drm_clflush_virt_range(vaddr + offset, len);
|
||||
ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
|
||||
if (needs_clflush_after)
|
||||
drm_clflush_virt_range(vaddr + offset, len);
|
||||
if (needs_clflush_before)
|
||||
drm_clflush_virt_range(vaddr + offset, len);
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
}
|
||||
if (ret == 0)
|
||||
return ret;
|
||||
ret = __copy_from_user(vaddr + offset, user_data, len);
|
||||
if (!ret && needs_clflush_after)
|
||||
drm_clflush_virt_range(vaddr + offset, len);
|
||||
|
||||
return shmem_pwrite_slow(page, offset, len, user_data,
|
||||
page_do_bit17_swizzling,
|
||||
needs_clflush_before,
|
||||
needs_clflush_after);
|
||||
kunmap(page);
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1538,7 +1396,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
|
|||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
void __user *user_data;
|
||||
u64 remain;
|
||||
unsigned int obj_do_bit17_swizzling;
|
||||
unsigned int partial_cacheline_write;
|
||||
unsigned int needs_clflush;
|
||||
unsigned int offset, idx;
|
||||
|
@ -1553,10 +1410,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj_do_bit17_swizzling = 0;
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
obj_do_bit17_swizzling = BIT(17);
|
||||
|
||||
/* If we don't overwrite a cacheline completely we need to be
|
||||
* careful to have up-to-date data by first clflushing. Don't
|
||||
* overcomplicate things and flush the entire patch.
|
||||
|
@ -1573,7 +1426,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
|
|||
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
|
||||
|
||||
ret = shmem_pwrite(page, offset, length, user_data,
|
||||
page_to_phys(page) & obj_do_bit17_swizzling,
|
||||
(offset | length) & partial_cacheline_write,
|
||||
needs_clflush & CLFLUSH_AFTER);
|
||||
if (ret)
|
||||
|
@ -3227,13 +3079,6 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
|
|||
struct i915_request *request,
|
||||
bool stalled)
|
||||
{
|
||||
/*
|
||||
* Make sure this write is visible before we re-enable the interrupt
|
||||
* handlers on another CPU, as tasklet_enable() resolves to just
|
||||
* a compiler barrier which is insufficient for our purpose here.
|
||||
*/
|
||||
smp_store_mb(engine->irq_posted, 0);
|
||||
|
||||
if (request)
|
||||
request = i915_gem_reset_request(engine, request, stalled);
|
||||
|
||||
|
@ -3315,7 +3160,7 @@ static void nop_submit_request(struct i915_request *request)
|
|||
|
||||
spin_lock_irqsave(&request->engine->timeline.lock, flags);
|
||||
__i915_request_submit(request);
|
||||
intel_engine_init_global_seqno(request->engine, request->global_seqno);
|
||||
intel_engine_write_global_seqno(request->engine, request->global_seqno);
|
||||
spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
|
@ -3356,7 +3201,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
|
|||
|
||||
/*
|
||||
* Make sure no request can slip through without getting completed by
|
||||
* either this call here to intel_engine_init_global_seqno, or the one
|
||||
* either this call here to intel_engine_write_global_seqno, or the one
|
||||
* in nop_submit_request.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
@ -3384,6 +3229,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
|||
if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
|
||||
return true;
|
||||
|
||||
if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
|
||||
return false;
|
||||
|
||||
GEM_TRACE("start\n");
|
||||
|
||||
/*
|
||||
|
@ -3422,8 +3270,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
|||
i915_retire_requests(i915);
|
||||
GEM_BUG_ON(i915->gt.active_requests);
|
||||
|
||||
if (!intel_gpu_reset(i915, ALL_ENGINES))
|
||||
intel_engines_sanitize(i915);
|
||||
intel_engines_sanitize(i915, false);
|
||||
|
||||
/*
|
||||
* Undo nop_submit_request. We prevent all new i915 requests from
|
||||
|
@ -5027,8 +4874,6 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
|
|||
|
||||
void i915_gem_sanitize(struct drm_i915_private *i915)
|
||||
{
|
||||
int err;
|
||||
|
||||
GEM_TRACE("\n");
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
@ -5053,11 +4898,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
|
|||
* it may impact the display and we are uncertain about the stability
|
||||
* of the reset, so this could be applied to even earlier gen.
|
||||
*/
|
||||
err = -ENODEV;
|
||||
if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
|
||||
err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
|
||||
if (!err)
|
||||
intel_engines_sanitize(i915);
|
||||
intel_engines_sanitize(i915, false);
|
||||
|
||||
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
@ -5223,15 +5064,15 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
|
||||
DISP_TILE_SURFACE_SWIZZLING);
|
||||
|
||||
if (IS_GEN5(dev_priv))
|
||||
if (IS_GEN(dev_priv, 5))
|
||||
return;
|
||||
|
||||
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
|
||||
if (IS_GEN6(dev_priv))
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
|
||||
else if (IS_GEN7(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
|
||||
else if (IS_GEN8(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 8))
|
||||
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
|
||||
else
|
||||
BUG();
|
||||
|
@ -5253,10 +5094,10 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
|
|||
init_unused_ring(dev_priv, SRB1_BASE);
|
||||
init_unused_ring(dev_priv, SRB2_BASE);
|
||||
init_unused_ring(dev_priv, SRB3_BASE);
|
||||
} else if (IS_GEN2(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
init_unused_ring(dev_priv, SRB0_BASE);
|
||||
init_unused_ring(dev_priv, SRB1_BASE);
|
||||
} else if (IS_GEN3(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 3)) {
|
||||
init_unused_ring(dev_priv, PRB1_BASE);
|
||||
init_unused_ring(dev_priv, PRB2_BASE);
|
||||
}
|
||||
|
@ -5580,7 +5421,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
ret = i915_gem_init_scratch(dev_priv,
|
||||
IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
|
||||
IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
|
||||
if (ret) {
|
||||
GEM_BUG_ON(ret == -EIO);
|
||||
goto err_ggtt;
|
||||
|
|
|
@ -86,7 +86,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/log2.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
@ -311,7 +310,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
|
|||
address_mode = INTEL_LEGACY_64B_CONTEXT;
|
||||
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
|
||||
if (IS_GEN8(i915))
|
||||
if (IS_GEN(i915, 8))
|
||||
desc |= GEN8_CTX_L3LLC_COHERENT;
|
||||
|
||||
/* TODO: WaDisableLiteRestore when we start using semaphore
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <linux/dma-buf.h>
|
||||
#include <linux/reservation.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/sync_file.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_syncobj.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
|
@ -1380,7 +1379,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
|
|||
* batchbuffers.
|
||||
*/
|
||||
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
|
||||
IS_GEN6(eb->i915)) {
|
||||
IS_GEN(eb->i915, 6)) {
|
||||
err = i915_vma_bind(target, target->obj->cache_level,
|
||||
PIN_GLOBAL);
|
||||
if (WARN_ONCE(err,
|
||||
|
@ -1896,7 +1895,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
|
|||
u32 *cs;
|
||||
int i;
|
||||
|
||||
if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
|
||||
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
|
||||
DRM_DEBUG("sol reset is gen7/rcs only\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
|
|||
* and explicitly managed for internal users.
|
||||
*/
|
||||
|
||||
if (IS_GEN2(fence->i915))
|
||||
if (IS_GEN(fence->i915, 2))
|
||||
i830_write_fence_reg(fence, vma);
|
||||
else if (IS_GEN3(fence->i915))
|
||||
else if (IS_GEN(fence->i915, 3))
|
||||
i915_write_fence_reg(fence, vma);
|
||||
else
|
||||
i965_write_fence_reg(fence, vma);
|
||||
|
@ -596,13 +595,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
|
|||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
}
|
||||
}
|
||||
} else if (IS_GEN5(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 5)) {
|
||||
/* On Ironlake whatever DRAM config, GPU always do
|
||||
* same swizzling setup.
|
||||
*/
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_9;
|
||||
} else if (IS_GEN2(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
/* As far as we know, the 865 doesn't have these bit 6
|
||||
* swizzling issues.
|
||||
*/
|
||||
|
@ -647,7 +646,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
/* check for L-shaped memory aka modified enhanced addressing */
|
||||
if (IS_GEN4(dev_priv) &&
|
||||
if (IS_GEN(dev_priv, 4) &&
|
||||
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
@ -483,7 +482,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
|
|||
* attempt holding the lock is immediately reported by lockdep.
|
||||
*/
|
||||
mutex_init(&vm->mutex);
|
||||
i915_gem_shrinker_taints_mutex(&vm->mutex);
|
||||
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
|
||||
|
||||
GEM_BUG_ON(!vm->total);
|
||||
drm_mm_init(&vm->mm, 0, vm->total);
|
||||
|
@ -1423,8 +1422,6 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
|
|||
gen8_initialize_pd(vm, pd);
|
||||
gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
|
||||
GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
|
||||
|
||||
mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
|
||||
}
|
||||
|
||||
ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
|
||||
|
@ -1490,84 +1487,6 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
|
||||
struct i915_page_directory_pointer *pdp,
|
||||
u64 start, u64 length,
|
||||
gen8_pte_t scratch_pte,
|
||||
struct seq_file *m)
|
||||
{
|
||||
struct i915_address_space *vm = &ppgtt->vm;
|
||||
struct i915_page_directory *pd;
|
||||
u32 pdpe;
|
||||
|
||||
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
|
||||
struct i915_page_table *pt;
|
||||
u64 pd_len = length;
|
||||
u64 pd_start = start;
|
||||
u32 pde;
|
||||
|
||||
if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "\tPDPE #%d\n", pdpe);
|
||||
gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
|
||||
u32 pte;
|
||||
gen8_pte_t *pt_vaddr;
|
||||
|
||||
if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
|
||||
continue;
|
||||
|
||||
pt_vaddr = kmap_atomic_px(pt);
|
||||
for (pte = 0; pte < GEN8_PTES; pte += 4) {
|
||||
u64 va = (pdpe << GEN8_PDPE_SHIFT |
|
||||
pde << GEN8_PDE_SHIFT |
|
||||
pte << GEN8_PTE_SHIFT);
|
||||
int i;
|
||||
bool found = false;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
if (pt_vaddr[pte + i] != scratch_pte)
|
||||
found = true;
|
||||
if (!found)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (pt_vaddr[pte + i] != scratch_pte)
|
||||
seq_printf(m, " %llx", pt_vaddr[pte + i]);
|
||||
else
|
||||
seq_puts(m, " SCRATCH ");
|
||||
}
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
kunmap_atomic(pt_vaddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
|
||||
{
|
||||
struct i915_address_space *vm = &ppgtt->vm;
|
||||
const gen8_pte_t scratch_pte = vm->scratch_pte;
|
||||
u64 start = 0, length = ppgtt->vm.total;
|
||||
|
||||
if (use_4lvl(vm)) {
|
||||
u64 pml4e;
|
||||
struct i915_pml4 *pml4 = &ppgtt->pml4;
|
||||
struct i915_page_directory_pointer *pdp;
|
||||
|
||||
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
|
||||
if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
|
||||
continue;
|
||||
|
||||
seq_printf(m, " PML4E #%llu\n", pml4e);
|
||||
gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
|
||||
}
|
||||
} else {
|
||||
gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
|
||||
}
|
||||
}
|
||||
|
||||
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct i915_address_space *vm = &ppgtt->vm;
|
||||
|
@ -1672,7 +1591,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
|
|||
gen8_ppgtt_notify_vgt(ppgtt, true);
|
||||
|
||||
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
|
||||
ppgtt->debug_dump = gen8_dump_ppgtt;
|
||||
|
||||
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
|
||||
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
|
||||
|
@ -1688,60 +1606,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
|
||||
{
|
||||
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
|
||||
const gen6_pte_t scratch_pte = base->vm.scratch_pte;
|
||||
struct i915_page_table *pt;
|
||||
u32 pte, pde;
|
||||
|
||||
gen6_for_all_pdes(pt, &base->pd, pde) {
|
||||
gen6_pte_t *vaddr;
|
||||
|
||||
if (pt == base->vm.scratch_pt)
|
||||
continue;
|
||||
|
||||
if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
|
||||
u32 expected =
|
||||
GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
|
||||
GEN6_PDE_VALID;
|
||||
u32 pd_entry = readl(ppgtt->pd_addr + pde);
|
||||
|
||||
if (pd_entry != expected)
|
||||
seq_printf(m,
|
||||
"\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
|
||||
pde,
|
||||
pd_entry,
|
||||
expected);
|
||||
|
||||
seq_printf(m, "\tPDE: %x\n", pd_entry);
|
||||
}
|
||||
|
||||
vaddr = kmap_atomic_px(base->pd.page_table[pde]);
|
||||
for (pte = 0; pte < GEN6_PTES; pte += 4) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
if (vaddr[pte + i] != scratch_pte)
|
||||
break;
|
||||
if (i == 4)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
|
||||
pde, pte,
|
||||
(pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (vaddr[pte + i] != scratch_pte)
|
||||
seq_printf(m, " %08x", vaddr[pte + i]);
|
||||
else
|
||||
seq_puts(m, " SCRATCH");
|
||||
}
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
kunmap_atomic(vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write pde (index) from the page directory @pd to the page table @pt */
|
||||
static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
|
||||
const unsigned int pde,
|
||||
|
@ -2075,6 +1939,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
|
|||
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
|
||||
{
|
||||
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
|
||||
|
@ -2090,9 +1955,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
|
|||
* allocator works in address space sizes, so it's multiplied by page
|
||||
* size. We allocate at the top of the GTT to avoid fragmentation.
|
||||
*/
|
||||
return i915_vma_pin(ppgtt->vma,
|
||||
0, GEN6_PD_ALIGN,
|
||||
PIN_GLOBAL | PIN_HIGH);
|
||||
err = i915_vma_pin(ppgtt->vma,
|
||||
0, GEN6_PD_ALIGN,
|
||||
PIN_GLOBAL | PIN_HIGH);
|
||||
if (err)
|
||||
goto unpin;
|
||||
|
||||
return 0;
|
||||
|
||||
unpin:
|
||||
ppgtt->pin_count = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
|
||||
|
@ -2129,7 +2002,6 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
|
|||
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
|
||||
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
|
||||
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
|
||||
ppgtt->base.debug_dump = gen6_dump_ppgtt;
|
||||
|
||||
ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
|
||||
ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
|
||||
|
@ -2195,9 +2067,9 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
gtt_write_workarounds(dev_priv);
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
gen6_ppgtt_enable(dev_priv);
|
||||
else if (IS_GEN7(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
gen7_ppgtt_enable(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
@ -2279,7 +2151,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
|
|||
/* Query intel_iommu to see if we need the workaround. Presumably that
|
||||
* was loaded first.
|
||||
*/
|
||||
return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
|
||||
return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
|
||||
}
|
||||
|
||||
static void gen6_check_faults(struct drm_i915_private *dev_priv)
|
||||
|
@ -2372,7 +2244,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
|
|||
DMA_ATTR_NO_WARN))
|
||||
return 0;
|
||||
|
||||
/* If the DMA remap fails, one cause can be that we have
|
||||
/*
|
||||
* If the DMA remap fails, one cause can be that we have
|
||||
* too many objects pinned in a small remapping table,
|
||||
* such as swiotlb. Incrementally purge all other objects and
|
||||
* try again - if there are no more pages to remove from
|
||||
|
@ -2382,8 +2255,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
|
|||
} while (i915_gem_shrink(to_i915(obj->base.dev),
|
||||
obj->base.size >> PAGE_SHIFT, NULL,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_ACTIVE));
|
||||
I915_SHRINK_UNBOUND));
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
|
|
@ -413,8 +413,6 @@ struct i915_hw_ppgtt {
|
|||
struct i915_page_directory_pointer pdp; /* GEN8+ */
|
||||
struct i915_page_directory pd; /* GEN6-7 */
|
||||
};
|
||||
|
||||
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
|
||||
};
|
||||
|
||||
struct gen6_hw_ppgtt {
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
|
|
@ -29,7 +29,8 @@
|
|||
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_device.h>
|
||||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
|
|
|
@ -30,30 +30,27 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
|
||||
static bool shrinker_lock(struct drm_i915_private *i915,
|
||||
unsigned int flags,
|
||||
bool *unlock)
|
||||
{
|
||||
switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
|
||||
struct mutex *m = &i915->drm.struct_mutex;
|
||||
|
||||
switch (mutex_trylock_recursive(m)) {
|
||||
case MUTEX_TRYLOCK_RECURSIVE:
|
||||
*unlock = false;
|
||||
return true;
|
||||
|
||||
case MUTEX_TRYLOCK_FAILED:
|
||||
*unlock = false;
|
||||
preempt_disable();
|
||||
do {
|
||||
cpu_relax();
|
||||
if (mutex_trylock(&i915->drm.struct_mutex)) {
|
||||
*unlock = true;
|
||||
break;
|
||||
}
|
||||
} while (!need_resched());
|
||||
preempt_enable();
|
||||
if (flags & I915_SHRINK_ACTIVE &&
|
||||
mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
|
||||
*unlock = true;
|
||||
return *unlock;
|
||||
|
||||
case MUTEX_TRYLOCK_SUCCESS:
|
||||
|
@ -160,7 +157,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
|
|||
unsigned long scanned = 0;
|
||||
bool unlock;
|
||||
|
||||
if (!shrinker_lock(i915, &unlock))
|
||||
if (!shrinker_lock(i915, flags, &unlock))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -357,7 +354,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
|
||||
sc->nr_scanned = 0;
|
||||
|
||||
if (!shrinker_lock(i915, &unlock))
|
||||
if (!shrinker_lock(i915, 0, &unlock))
|
||||
return SHRINK_STOP;
|
||||
|
||||
freed = i915_gem_shrink(i915,
|
||||
|
@ -388,31 +385,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
return sc->nr_scanned ? freed : SHRINK_STOP;
|
||||
}
|
||||
|
||||
static bool
|
||||
shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
|
||||
int timeout_ms)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
|
||||
|
||||
do {
|
||||
if (i915_gem_wait_for_idle(i915,
|
||||
0, MAX_SCHEDULE_TIMEOUT) == 0 &&
|
||||
shrinker_lock(i915, unlock))
|
||||
break;
|
||||
|
||||
schedule_timeout_killable(1);
|
||||
if (fatal_signal_pending(current))
|
||||
return false;
|
||||
|
||||
if (time_after(jiffies, timeout)) {
|
||||
pr_err("Unable to lock GPU to purge memory.\n");
|
||||
return false;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
{
|
||||
|
@ -421,7 +393,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||
struct drm_i915_gem_object *obj;
|
||||
unsigned long unevictable, bound, unbound, freed_pages;
|
||||
|
||||
freed_pages = i915_gem_shrink_all(i915);
|
||||
intel_runtime_pm_get(i915);
|
||||
freed_pages = i915_gem_shrink(i915, -1UL, NULL,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
||||
/* Because we may be allocating inside our own driver, we cannot
|
||||
* assert that there are no objects with pinned pages that are not
|
||||
|
@ -447,10 +423,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||
pr_info("Purging GPU memory, %lu pages freed, "
|
||||
"%lu pages still pinned.\n",
|
||||
freed_pages, unevictable);
|
||||
if (unbound || bound)
|
||||
pr_err("%lu and %lu pages still available in the "
|
||||
"bound and unbound GPU page lists.\n",
|
||||
bound, unbound);
|
||||
|
||||
*(unsigned long *)ptr += freed_pages;
|
||||
return NOTIFY_DONE;
|
||||
|
@ -464,23 +436,20 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
|||
struct i915_vma *vma, *next;
|
||||
unsigned long freed_pages = 0;
|
||||
bool unlock;
|
||||
int ret;
|
||||
|
||||
if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
|
||||
if (!shrinker_lock(i915, 0, &unlock))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Force everything onto the inactive lists */
|
||||
ret = i915_gem_wait_for_idle(i915,
|
||||
I915_WAIT_LOCKED,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret)
|
||||
if (i915_gem_wait_for_idle(i915,
|
||||
I915_WAIT_LOCKED,
|
||||
MAX_SCHEDULE_TIMEOUT))
|
||||
goto out;
|
||||
|
||||
intel_runtime_pm_get(i915);
|
||||
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_ACTIVE |
|
||||
I915_SHRINK_VMAPS);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
||||
|
@ -533,13 +502,40 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
|
|||
unregister_shrinker(&i915->mm.shrinker);
|
||||
}
|
||||
|
||||
void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
|
||||
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
bool unlock = false;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_LOCKDEP))
|
||||
return;
|
||||
|
||||
if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
|
||||
mutex_acquire(&i915->drm.struct_mutex.dep_map,
|
||||
I915_MM_NORMAL, 0, _RET_IP_);
|
||||
unlock = true;
|
||||
}
|
||||
|
||||
fs_reclaim_acquire(GFP_KERNEL);
|
||||
mutex_lock(mutex);
|
||||
mutex_unlock(mutex);
|
||||
|
||||
/*
|
||||
* As we invariably rely on the struct_mutex within the shrinker,
|
||||
* but have a complicated recursion dance, taint all the mutexes used
|
||||
* within the shrinker with the struct_mutex. For completeness, we
|
||||
* taint with all subclass of struct_mutex, even though we should
|
||||
* only need tainting by I915_MM_NORMAL to catch possible ABBA
|
||||
* deadlocks from using struct_mutex inside @mutex.
|
||||
*/
|
||||
mutex_acquire(&i915->drm.struct_mutex.dep_map,
|
||||
I915_MM_SHRINKER, 0, _RET_IP_);
|
||||
|
||||
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
|
||||
mutex_release(&mutex->dep_map, 0, _RET_IP_);
|
||||
|
||||
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
|
||||
|
||||
fs_reclaim_release(GFP_KERNEL);
|
||||
|
||||
if (unlock)
|
||||
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -102,7 +101,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
|
|||
resource_size_t ggtt_start;
|
||||
|
||||
ggtt_start = I915_READ(PGTBL_CTL);
|
||||
if (IS_GEN4(dev_priv))
|
||||
if (IS_GEN(dev_priv, 4))
|
||||
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
|
||||
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
|
||||
else
|
||||
|
@ -156,7 +155,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
|
|||
* GEN3 firmware likes to smash pci bridges into the stolen
|
||||
* range. Apparently this works.
|
||||
*/
|
||||
if (r == NULL && !IS_GEN3(dev_priv)) {
|
||||
if (r == NULL && !IS_GEN(dev_priv, 3)) {
|
||||
DRM_ERROR("conflict detected with stolen region: %pR\n",
|
||||
dsm);
|
||||
|
||||
|
@ -194,7 +193,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
|||
* Whether ILK really reuses the ELK register for this is unclear.
|
||||
* Let's see if we catch anyone with this supposedly enabled on ILK.
|
||||
*/
|
||||
WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
|
||||
WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
|
||||
reg_val);
|
||||
|
||||
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
|
||||
return;
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
|
||||
#include <linux/string.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -87,7 +86,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
|
|||
}
|
||||
|
||||
/* Previous chips need a power-of-two fence region when tiling */
|
||||
if (IS_GEN3(i915))
|
||||
if (IS_GEN(i915, 3))
|
||||
ggtt_size = 1024*1024;
|
||||
else
|
||||
ggtt_size = 512*1024;
|
||||
|
@ -162,7 +161,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (IS_GEN2(i915) ||
|
||||
if (IS_GEN(i915, 2) ||
|
||||
(tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
|
||||
tile_width = 128;
|
||||
else
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
|
|
@ -594,13 +594,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
|
|||
|
||||
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
|
||||
const struct intel_device_info *info,
|
||||
const struct intel_runtime_info *runtime,
|
||||
const struct intel_driver_caps *caps)
|
||||
{
|
||||
struct drm_printer p = i915_error_printer(m);
|
||||
|
||||
intel_device_info_dump_flags(info, &p);
|
||||
intel_driver_caps_print(caps, &p);
|
||||
intel_device_info_dump_topology(&info->sseu, &p);
|
||||
intel_device_info_dump_topology(&runtime->sseu, &p);
|
||||
}
|
||||
|
||||
static void err_print_params(struct drm_i915_error_state_buf *m,
|
||||
|
@ -664,7 +665,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
|
|||
|
||||
if (*error->error_msg)
|
||||
err_printf(m, "%s\n", error->error_msg);
|
||||
err_printf(m, "Kernel: %s\n", init_utsname()->release);
|
||||
err_printf(m, "Kernel: %s %s\n",
|
||||
init_utsname()->release,
|
||||
init_utsname()->machine);
|
||||
ts = ktime_to_timespec64(error->time);
|
||||
err_printf(m, "Time: %lld s %ld us\n",
|
||||
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
|
||||
|
@ -735,7 +738,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
|
||||
}
|
||||
|
||||
if (IS_GEN7(m->i915))
|
||||
if (IS_GEN(m->i915, 7))
|
||||
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
|
||||
|
@ -844,7 +847,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
|
|||
if (error->display)
|
||||
intel_display_print_error_state(m, error->display);
|
||||
|
||||
err_print_capabilities(m, &error->device_info, &error->driver_caps);
|
||||
err_print_capabilities(m, &error->device_info, &error->runtime_info,
|
||||
&error->driver_caps);
|
||||
err_print_params(m, &error->params);
|
||||
err_print_uc(m, &error->uc);
|
||||
}
|
||||
|
@ -963,17 +967,10 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
|
|||
kfree(obj);
|
||||
}
|
||||
|
||||
static __always_inline void free_param(const char *type, void *x)
|
||||
{
|
||||
if (!__builtin_strcmp(type, "char *"))
|
||||
kfree(*(void **)x);
|
||||
}
|
||||
|
||||
static void cleanup_params(struct i915_gpu_state *error)
|
||||
{
|
||||
#define FREE(T, x, ...) free_param(#T, &error->params.x);
|
||||
I915_PARAMS_FOR_EACH(FREE);
|
||||
#undef FREE
|
||||
i915_params_free(&error->params);
|
||||
}
|
||||
|
||||
static void cleanup_uc_state(struct i915_gpu_state *error)
|
||||
|
@ -1037,7 +1034,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
dma_addr_t dma;
|
||||
int ret;
|
||||
|
||||
if (!vma)
|
||||
if (!vma || !vma->pages)
|
||||
return NULL;
|
||||
|
||||
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
|
||||
|
@ -1314,7 +1311,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
|||
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
|
||||
i915_reg_t mmio;
|
||||
|
||||
if (IS_GEN7(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 7)) {
|
||||
switch (engine->id) {
|
||||
default:
|
||||
case RCS:
|
||||
|
@ -1330,7 +1327,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
|||
mmio = VEBOX_HWS_PGA_GEN7;
|
||||
break;
|
||||
}
|
||||
} else if (IS_GEN6(engine->i915)) {
|
||||
} else if (IS_GEN(engine->i915, 6)) {
|
||||
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
|
||||
} else {
|
||||
/* XXX: gen8 returns to sanity */
|
||||
|
@ -1352,10 +1349,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
|||
|
||||
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
ee->vm_info.pp_dir_base =
|
||||
I915_READ(RING_PP_DIR_BASE_READ(engine));
|
||||
else if (IS_GEN7(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
ee->vm_info.pp_dir_base =
|
||||
I915_READ(RING_PP_DIR_BASE(engine));
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
|
@ -1725,7 +1722,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
|
|||
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
|
||||
}
|
||||
|
||||
if (IS_GEN7(dev_priv))
|
||||
if (IS_GEN(dev_priv, 7))
|
||||
error->err_int = I915_READ(GEN7_ERR_INT);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
|
@ -1733,7 +1730,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
|
|||
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
|
||||
}
|
||||
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
error->forcewake = I915_READ_FW(FORCEWAKE);
|
||||
error->gab_ctl = I915_READ(GAB_CTL);
|
||||
error->gfx_mode = I915_READ(GFX_MODE);
|
||||
|
@ -1753,7 +1750,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
|
|||
error->ccid = I915_READ(CCID);
|
||||
|
||||
/* 3: Feature specific registers */
|
||||
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
|
||||
if (IS_GEN_RANGE(dev_priv, 6, 7)) {
|
||||
error->gam_ecochk = I915_READ(GAM_ECOCHK);
|
||||
error->gac_eco = I915_READ(GAC_ECO_BITS);
|
||||
}
|
||||
|
@ -1777,7 +1774,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
|
|||
error->ier = I915_READ(DEIER);
|
||||
error->gtier[0] = I915_READ(GTIER);
|
||||
error->ngtier = 1;
|
||||
} else if (IS_GEN2(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
error->ier = I915_READ16(IER);
|
||||
} else if (!IS_VALLEYVIEW(dev_priv)) {
|
||||
error->ier = I915_READ(IER);
|
||||
|
@ -1831,21 +1828,15 @@ static void capture_gen_state(struct i915_gpu_state *error)
|
|||
memcpy(&error->device_info,
|
||||
INTEL_INFO(i915),
|
||||
sizeof(error->device_info));
|
||||
memcpy(&error->runtime_info,
|
||||
RUNTIME_INFO(i915),
|
||||
sizeof(error->runtime_info));
|
||||
error->driver_caps = i915->caps;
|
||||
}
|
||||
|
||||
static __always_inline void dup_param(const char *type, void *x)
|
||||
{
|
||||
if (!__builtin_strcmp(type, "char *"))
|
||||
*(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static void capture_params(struct i915_gpu_state *error)
|
||||
{
|
||||
error->params = i915_modparams;
|
||||
#define DUP(T, x, ...) dup_param(#T, &error->params.x);
|
||||
I915_PARAMS_FOR_EACH(DUP);
|
||||
#undef DUP
|
||||
i915_params_copy(&error->params, &i915_modparams);
|
||||
}
|
||||
|
||||
static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
|
||||
|
@ -1907,9 +1898,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
|
|||
{
|
||||
struct i915_gpu_state *error;
|
||||
|
||||
/* Check if GPU capture has been disabled */
|
||||
error = READ_ONCE(i915->gpu_error.first_error);
|
||||
if (IS_ERR(error))
|
||||
return error;
|
||||
|
||||
error = kzalloc(sizeof(*error), GFP_ATOMIC);
|
||||
if (!error)
|
||||
return NULL;
|
||||
if (!error) {
|
||||
i915_disable_error_state(i915, -ENOMEM);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
kref_init(&error->ref);
|
||||
error->i915 = i915;
|
||||
|
@ -1945,11 +1943,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
|
|||
return;
|
||||
|
||||
error = i915_capture_gpu_state(i915);
|
||||
if (!error) {
|
||||
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
|
||||
i915_disable_error_state(i915, -ENOMEM);
|
||||
if (IS_ERR(error))
|
||||
return;
|
||||
}
|
||||
|
||||
i915_error_capture_msg(i915, error, engine_mask, error_msg);
|
||||
DRM_INFO("%s\n", error->error_msg);
|
||||
|
@ -1987,7 +1982,7 @@ i915_first_error_state(struct drm_i915_private *i915)
|
|||
|
||||
spin_lock_irq(&i915->gpu_error.lock);
|
||||
error = i915->gpu_error.first_error;
|
||||
if (error)
|
||||
if (!IS_ERR_OR_NULL(error))
|
||||
i915_gpu_state_get(error);
|
||||
spin_unlock_irq(&i915->gpu_error.lock);
|
||||
|
||||
|
@ -2000,10 +1995,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
|
|||
|
||||
spin_lock_irq(&i915->gpu_error.lock);
|
||||
error = i915->gpu_error.first_error;
|
||||
i915->gpu_error.first_error = NULL;
|
||||
if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
|
||||
i915->gpu_error.first_error = NULL;
|
||||
spin_unlock_irq(&i915->gpu_error.lock);
|
||||
|
||||
if (!IS_ERR(error))
|
||||
if (!IS_ERR_OR_NULL(error))
|
||||
i915_gpu_state_put(error);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ struct i915_gpu_state {
|
|||
u32 reset_count;
|
||||
u32 suspend_count;
|
||||
struct intel_device_info device_info;
|
||||
struct intel_runtime_info runtime_info;
|
||||
struct intel_driver_caps driver_caps;
|
||||
struct i915_params params;
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
*/
|
||||
#include <linux/compat.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/sysrq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/circ_buf.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
@ -950,7 +949,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
|||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vtotal /= 2;
|
||||
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
|
||||
else
|
||||
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
|
@ -1030,7 +1029,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
|||
if (stime)
|
||||
*stime = ktime_get();
|
||||
|
||||
if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
|
||||
if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
|
||||
/* No obvious pixelcount register. Only query vertical
|
||||
* scanout position from Display scan line register.
|
||||
*/
|
||||
|
@ -1090,7 +1089,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
|||
else
|
||||
position += vtotal - vbl_end;
|
||||
|
||||
if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
|
||||
if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
|
||||
*vpos = position;
|
||||
*hpos = 0;
|
||||
} else {
|
||||
|
@ -1189,13 +1188,6 @@ static void notify_ring(struct intel_engine_cs *engine)
|
|||
rq = i915_request_get(waiter);
|
||||
|
||||
tsk = wait->tsk;
|
||||
} else {
|
||||
if (engine->irq_seqno_barrier &&
|
||||
i915_seqno_passed(seqno, wait->seqno - 1)) {
|
||||
set_bit(ENGINE_IRQ_BREADCRUMB,
|
||||
&engine->irq_posted);
|
||||
tsk = wait->tsk;
|
||||
}
|
||||
}
|
||||
|
||||
engine->breadcrumbs.irq_count++;
|
||||
|
@ -2547,7 +2539,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(SDEIIR, pch_iir);
|
||||
}
|
||||
|
||||
if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
|
||||
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
|
||||
ironlake_rps_change_irq_handler(dev_priv);
|
||||
}
|
||||
|
||||
|
@ -3243,7 +3235,7 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
u32 eir;
|
||||
|
||||
if (!IS_GEN2(dev_priv))
|
||||
if (!IS_GEN(dev_priv, 2))
|
||||
I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 4)
|
||||
|
@ -3586,11 +3578,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (IS_GEN5(dev_priv))
|
||||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
|
||||
GEN3_IRQ_RESET(DE);
|
||||
if (IS_GEN7(dev_priv))
|
||||
if (IS_GEN(dev_priv, 7))
|
||||
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
|
||||
|
||||
if (IS_HASWELL(dev_priv)) {
|
||||
|
@ -4045,7 +4034,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
|||
}
|
||||
|
||||
gt_irqs |= GT_RENDER_USER_INTERRUPT;
|
||||
if (IS_GEN5(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
gt_irqs |= ILK_BSD_USER_INTERRUPT;
|
||||
} else {
|
||||
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
|
||||
|
@ -4183,9 +4172,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
|
||||
};
|
||||
|
||||
if (HAS_L3_DPF(dev_priv))
|
||||
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
||||
|
||||
dev_priv->pm_ier = 0x0;
|
||||
dev_priv->pm_imr = ~dev_priv->pm_ier;
|
||||
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
|
||||
|
@ -4368,8 +4354,6 @@ static void i8xx_irq_reset(struct drm_device *dev)
|
|||
|
||||
i9xx_pipestat_irq_reset(dev_priv);
|
||||
|
||||
I915_WRITE16(HWSTAM, 0xffff);
|
||||
|
||||
GEN2_IRQ_RESET();
|
||||
}
|
||||
|
||||
|
@ -4537,8 +4521,6 @@ static void i915_irq_reset(struct drm_device *dev)
|
|||
|
||||
i9xx_pipestat_irq_reset(dev_priv);
|
||||
|
||||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
|
||||
GEN3_IRQ_RESET();
|
||||
}
|
||||
|
||||
|
@ -4648,8 +4630,6 @@ static void i965_irq_reset(struct drm_device *dev)
|
|||
|
||||
i9xx_pipestat_irq_reset(dev_priv);
|
||||
|
||||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
|
||||
GEN3_IRQ_RESET();
|
||||
}
|
||||
|
||||
|
@ -4836,7 +4816,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
|
||||
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 2)) {
|
||||
/* Gen2 doesn't have a hardware frame counter */
|
||||
dev->max_vblank_count = 0;
|
||||
} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
|
||||
|
@ -4852,7 +4832,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
* Gen2 doesn't have a hardware frame counter and so depends on
|
||||
* vblank interrupts to produce sane vblank seuquence numbers.
|
||||
*/
|
||||
if (!IS_GEN2(dev_priv))
|
||||
if (!IS_GEN(dev_priv, 2))
|
||||
dev->vblank_disable_immediate = true;
|
||||
|
||||
/* Most platforms treat the display irq block as an always-on
|
||||
|
@ -4924,14 +4904,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
dev->driver->disable_vblank = ironlake_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
|
||||
} else {
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 2)) {
|
||||
dev->driver->irq_preinstall = i8xx_irq_reset;
|
||||
dev->driver->irq_postinstall = i8xx_irq_postinstall;
|
||||
dev->driver->irq_handler = i8xx_irq_handler;
|
||||
dev->driver->irq_uninstall = i8xx_irq_reset;
|
||||
dev->driver->enable_vblank = i8xx_enable_vblank;
|
||||
dev->driver->disable_vblank = i8xx_disable_vblank;
|
||||
} else if (IS_GEN3(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 3)) {
|
||||
dev->driver->irq_preinstall = i915_irq_reset;
|
||||
dev->driver->irq_postinstall = i915_irq_postinstall;
|
||||
dev->driver->irq_uninstall = i915_irq_reset;
|
||||
|
|
|
@ -77,7 +77,7 @@ i915_param_named(error_capture, bool, 0600,
|
|||
"triaging and debugging hangs.");
|
||||
#endif
|
||||
|
||||
i915_param_named_unsafe(enable_hangcheck, bool, 0644,
|
||||
i915_param_named_unsafe(enable_hangcheck, bool, 0600,
|
||||
"Periodically check GPU activity for detecting hangs. "
|
||||
"WARNING: Disabling this can cause system wide hangs. "
|
||||
"(default: true)");
|
||||
|
@ -203,3 +203,33 @@ void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
|
|||
I915_PARAMS_FOR_EACH(PRINT);
|
||||
#undef PRINT
|
||||
}
|
||||
|
||||
static __always_inline void dup_param(const char *type, void *x)
|
||||
{
|
||||
if (!__builtin_strcmp(type, "char *"))
|
||||
*(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
|
||||
{
|
||||
*dest = *src;
|
||||
#define DUP(T, x, ...) dup_param(#T, &dest->x);
|
||||
I915_PARAMS_FOR_EACH(DUP);
|
||||
#undef DUP
|
||||
}
|
||||
|
||||
static __always_inline void free_param(const char *type, void *x)
|
||||
{
|
||||
if (!__builtin_strcmp(type, "char *")) {
|
||||
kfree(*(void **)x);
|
||||
*(void **)x = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* free the allocated members, *not* the passed in params itself */
|
||||
void i915_params_free(struct i915_params *params)
|
||||
{
|
||||
#define FREE(T, x, ...) free_param(#T, ¶ms->x);
|
||||
I915_PARAMS_FOR_EACH(FREE);
|
||||
#undef FREE
|
||||
}
|
||||
|
|
|
@ -33,6 +33,15 @@ struct drm_printer;
|
|||
#define ENABLE_GUC_SUBMISSION BIT(0)
|
||||
#define ENABLE_GUC_LOAD_HUC BIT(1)
|
||||
|
||||
/*
|
||||
* Invoke param, a function-like macro, for each i915 param, with arguments:
|
||||
*
|
||||
* param(type, name, value)
|
||||
*
|
||||
* type: parameter type, one of {bool, int, unsigned int, char *}
|
||||
* name: name of the parameter
|
||||
* value: initial/default value of the parameter
|
||||
*/
|
||||
#define I915_PARAMS_FOR_EACH(param) \
|
||||
param(char *, vbt_firmware, NULL) \
|
||||
param(int, modeset, -1) \
|
||||
|
@ -78,6 +87,8 @@ struct i915_params {
|
|||
extern struct i915_params i915_modparams __read_mostly;
|
||||
|
||||
void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
|
||||
void i915_params_copy(struct i915_params *dest, const struct i915_params *src);
|
||||
void i915_params_free(struct i915_params *params);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
.display.has_overlay = 1, \
|
||||
.display.overlay_needs_physical = 1, \
|
||||
.display.has_gmch_display = 1, \
|
||||
.gpu_reset_clobbers_display = true, \
|
||||
.hws_needs_physical = 1, \
|
||||
.unfenced_needs_alignment = 1, \
|
||||
.ring_mask = RENDER_RING, \
|
||||
|
@ -122,6 +123,7 @@ static const struct intel_device_info intel_i865g_info = {
|
|||
GEN(3), \
|
||||
.num_pipes = 2, \
|
||||
.display.has_gmch_display = 1, \
|
||||
.gpu_reset_clobbers_display = true, \
|
||||
.ring_mask = RENDER_RING, \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = true, \
|
||||
|
@ -198,6 +200,7 @@ static const struct intel_device_info intel_pineview_info = {
|
|||
.num_pipes = 2, \
|
||||
.display.has_hotplug = 1, \
|
||||
.display.has_gmch_display = 1, \
|
||||
.gpu_reset_clobbers_display = true, \
|
||||
.ring_mask = RENDER_RING, \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = true, \
|
||||
|
@ -228,6 +231,7 @@ static const struct intel_device_info intel_g45_info = {
|
|||
GEN4_FEATURES,
|
||||
PLATFORM(INTEL_G45),
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
.gpu_reset_clobbers_display = false,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_gm45_info = {
|
||||
|
@ -237,6 +241,7 @@ static const struct intel_device_info intel_gm45_info = {
|
|||
.display.has_fbc = 1,
|
||||
.display.supports_tv = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
.gpu_reset_clobbers_display = false,
|
||||
};
|
||||
|
||||
#define GEN5_FEATURES \
|
||||
|
@ -532,7 +537,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
|
|||
.display.has_fbc = 1, \
|
||||
.display.has_psr = 1, \
|
||||
.has_runtime_pm = 1, \
|
||||
.has_pooled_eu = 0, \
|
||||
.display.has_csr = 1, \
|
||||
.has_rc6 = 1, \
|
||||
.display.has_dp_mst = 1, \
|
||||
|
|
|
@ -1796,7 +1796,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
|
|||
* be read back from automatically triggered reports, as part of the
|
||||
* RPT_ID field.
|
||||
*/
|
||||
if (IS_GEN(dev_priv, 9, 11)) {
|
||||
if (IS_GEN_RANGE(dev_priv, 9, 11)) {
|
||||
I915_WRITE(GEN8_OA_DEBUG,
|
||||
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
|
||||
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
|
||||
|
@ -2646,7 +2646,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
|
|||
static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
|
||||
{
|
||||
return div64_u64(1000000000ULL * (2ULL << exponent),
|
||||
1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
|
||||
1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3415,7 +3415,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
|
|||
dev_priv->perf.oa.ops.read = gen8_oa_read;
|
||||
dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
|
||||
|
||||
if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
|
||||
if (IS_GEN_RANGE(dev_priv, 8, 9)) {
|
||||
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
|
||||
gen7_is_valid_b_counter_addr;
|
||||
dev_priv->perf.oa.ops.is_valid_mux_reg =
|
||||
|
@ -3431,7 +3431,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
|
|||
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
|
||||
dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
|
||||
|
||||
if (IS_GEN8(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 8)) {
|
||||
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
|
||||
dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
|
||||
|
||||
|
@ -3442,7 +3442,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
|
||||
}
|
||||
} else if (IS_GEN(dev_priv, 10, 11)) {
|
||||
} else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
|
||||
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
|
||||
gen7_is_valid_b_counter_addr;
|
||||
dev_priv->perf.oa.ops.is_valid_mux_reg =
|
||||
|
@ -3471,7 +3471,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
|
|||
spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
|
||||
|
||||
oa_sample_rate_hard_limit = 1000 *
|
||||
(INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
|
||||
(RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
|
||||
dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
|
||||
|
||||
mutex_init(&dev_priv->perf.metrics_lock);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
static int query_topology_info(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_query_item *query_item)
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
struct drm_i915_query_topology_info topo;
|
||||
u32 slice_length, subslice_length, eu_length, total_length;
|
||||
|
||||
|
|
|
@ -139,6 +139,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
|
||||
}
|
||||
|
||||
#define VLV_DISPLAY_BASE 0x180000
|
||||
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
|
||||
#define BXT_MIPI_BASE 0x60000
|
||||
|
||||
#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset)
|
||||
|
||||
/*
|
||||
* Given the first two numbers __a and __b of arbitrarily many evenly spaced
|
||||
* numbers, pick the 0-based __index'th value.
|
||||
|
@ -179,15 +185,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
* Device info offset array based helpers for groups of registers with unevenly
|
||||
* spaced base offsets.
|
||||
*/
|
||||
#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
|
||||
dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
|
||||
dev_priv->info.display_mmio_offset)
|
||||
#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
|
||||
dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
|
||||
dev_priv->info.display_mmio_offset)
|
||||
#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
|
||||
dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
|
||||
dev_priv->info.display_mmio_offset)
|
||||
#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
|
||||
INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
|
||||
DISPLAY_MMIO_BASE(dev_priv))
|
||||
#define _MMIO_TRANS2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
|
||||
INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
|
||||
DISPLAY_MMIO_BASE(dev_priv))
|
||||
#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
|
||||
INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
|
||||
DISPLAY_MMIO_BASE(dev_priv))
|
||||
|
||||
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
|
||||
#define _MASKED_FIELD(mask, value) ({ \
|
||||
|
@ -347,6 +353,24 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define GEN11_GRDOM_MEDIA4 (1 << 8)
|
||||
#define GEN11_GRDOM_VECS (1 << 13)
|
||||
#define GEN11_GRDOM_VECS2 (1 << 14)
|
||||
#define GEN11_GRDOM_SFC0 (1 << 17)
|
||||
#define GEN11_GRDOM_SFC1 (1 << 18)
|
||||
|
||||
#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
|
||||
#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
|
||||
|
||||
#define GEN11_VCS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x88C)
|
||||
#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
|
||||
#define GEN11_VCS_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x890)
|
||||
#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
|
||||
#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
|
||||
|
||||
#define GEN11_VECS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x201C)
|
||||
#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
|
||||
#define GEN11_VECS_SFC_LOCK_ACK(engine) _MMIO((engine)->mmio_base + 0x2018)
|
||||
#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
|
||||
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
|
||||
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
|
||||
|
||||
#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
|
||||
#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
|
||||
|
@ -1866,6 +1890,10 @@ enum i915_power_well_id {
|
|||
|
||||
#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
|
||||
#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
|
||||
#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
|
||||
#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
|
||||
#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
|
||||
#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
|
||||
#define N_SCALAR(x) ((x) << 24)
|
||||
#define N_SCALAR_MASK (0x7F << 24)
|
||||
|
||||
|
@ -2592,10 +2620,6 @@ enum i915_power_well_id {
|
|||
|
||||
#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
|
||||
|
||||
#define VLV_DISPLAY_BASE 0x180000
|
||||
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
|
||||
#define BXT_MIPI_BASE 0x60000
|
||||
|
||||
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
|
||||
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
|
||||
#define SCPD0 _MMIO(0x209c) /* 915+ only */
|
||||
|
@ -3152,9 +3176,9 @@ enum i915_power_well_id {
|
|||
/*
|
||||
* Clock control & power management
|
||||
*/
|
||||
#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
|
||||
#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
|
||||
#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
|
||||
#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
|
||||
#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
|
||||
#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
|
||||
#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
|
||||
|
||||
#define VGA0 _MMIO(0x6000)
|
||||
|
@ -3251,9 +3275,9 @@ enum i915_power_well_id {
|
|||
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
|
||||
#define SDVO_MULTIPLIER_SHIFT_VGA 0
|
||||
|
||||
#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
|
||||
#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
|
||||
#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
|
||||
#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
|
||||
#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
|
||||
#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
|
||||
#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
|
||||
|
||||
/*
|
||||
|
@ -3325,7 +3349,7 @@ enum i915_power_well_id {
|
|||
#define DSTATE_PLL_D3_OFF (1 << 3)
|
||||
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
|
||||
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
|
||||
#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
|
||||
#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
|
||||
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
|
||||
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
|
||||
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
|
||||
|
@ -3465,7 +3489,7 @@ enum i915_power_well_id {
|
|||
#define _PALETTE_A 0xa000
|
||||
#define _PALETTE_B 0xa800
|
||||
#define _CHV_PALETTE_C 0xc000
|
||||
#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \
|
||||
#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
|
||||
_PICK((pipe), _PALETTE_A, \
|
||||
_PALETTE_B, _CHV_PALETTE_C) + \
|
||||
(i) * 4)
|
||||
|
@ -4298,7 +4322,7 @@ enum {
|
|||
|
||||
|
||||
/* Hotplug control (945+ only) */
|
||||
#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110)
|
||||
#define PORT_HOTPLUG_EN _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
|
||||
#define PORTB_HOTPLUG_INT_EN (1 << 29)
|
||||
#define PORTC_HOTPLUG_INT_EN (1 << 28)
|
||||
#define PORTD_HOTPLUG_INT_EN (1 << 27)
|
||||
|
@ -4328,7 +4352,7 @@ enum {
|
|||
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
|
||||
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
|
||||
|
||||
#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
|
||||
#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
|
||||
/*
|
||||
* HDMI/DP bits are g4x+
|
||||
*
|
||||
|
@ -4410,7 +4434,7 @@ enum {
|
|||
|
||||
#define PORT_DFT_I9XX _MMIO(0x61150)
|
||||
#define DC_BALANCE_RESET (1 << 25)
|
||||
#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154)
|
||||
#define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
|
||||
#define DC_BALANCE_RESET_VLV (1 << 31)
|
||||
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
|
||||
#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
|
||||
|
@ -4695,7 +4719,7 @@ enum {
|
|||
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
|
||||
|
||||
/* Panel fitting */
|
||||
#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
|
||||
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
|
||||
#define PFIT_ENABLE (1 << 31)
|
||||
#define PFIT_PIPE_MASK (3 << 29)
|
||||
#define PFIT_PIPE_SHIFT 29
|
||||
|
@ -4713,7 +4737,7 @@ enum {
|
|||
#define PFIT_SCALING_PROGRAMMED (1 << 26)
|
||||
#define PFIT_SCALING_PILLAR (2 << 26)
|
||||
#define PFIT_SCALING_LETTER (3 << 26)
|
||||
#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234)
|
||||
#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
|
||||
/* Pre-965 */
|
||||
#define PFIT_VERT_SCALE_SHIFT 20
|
||||
#define PFIT_VERT_SCALE_MASK 0xfff00000
|
||||
|
@ -4725,25 +4749,25 @@ enum {
|
|||
#define PFIT_HORIZ_SCALE_SHIFT_965 0
|
||||
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
|
||||
|
||||
#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238)
|
||||
#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
|
||||
|
||||
#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
|
||||
#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
|
||||
#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
|
||||
#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
|
||||
#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
|
||||
_VLV_BLC_PWM_CTL2_B)
|
||||
|
||||
#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
|
||||
#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
|
||||
#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
|
||||
#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
|
||||
#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
|
||||
_VLV_BLC_PWM_CTL_B)
|
||||
|
||||
#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
|
||||
#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
|
||||
#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
|
||||
#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
|
||||
#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
|
||||
_VLV_BLC_HIST_CTL_B)
|
||||
|
||||
/* Backlight control */
|
||||
#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
|
||||
#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
|
||||
#define BLM_PWM_ENABLE (1 << 31)
|
||||
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
|
||||
#define BLM_PIPE_SELECT (1 << 29)
|
||||
|
@ -4766,7 +4790,7 @@ enum {
|
|||
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
|
||||
#define BLM_PHASE_IN_INCR_SHIFT (0)
|
||||
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
|
||||
#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254)
|
||||
#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
|
||||
/*
|
||||
* This is the most significant 15 bits of the number of backlight cycles in a
|
||||
* complete cycle of the modulated backlight control.
|
||||
|
@ -4788,7 +4812,7 @@ enum {
|
|||
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
|
||||
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
|
||||
|
||||
#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260)
|
||||
#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
|
||||
#define BLM_HISTOGRAM_ENABLE (1 << 31)
|
||||
|
||||
/* New registers for PCH-split platforms. Safe where new bits show up, the
|
||||
|
@ -5412,47 +5436,47 @@ enum {
|
|||
* is 20 bytes in each direction, hence the 5 fixed
|
||||
* data registers
|
||||
*/
|
||||
#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010)
|
||||
#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014)
|
||||
#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018)
|
||||
#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c)
|
||||
#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020)
|
||||
#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024)
|
||||
#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
|
||||
#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
|
||||
#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
|
||||
#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
|
||||
#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
|
||||
#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
|
||||
|
||||
#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110)
|
||||
#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114)
|
||||
#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118)
|
||||
#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c)
|
||||
#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120)
|
||||
#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124)
|
||||
#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
|
||||
#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
|
||||
#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
|
||||
#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
|
||||
#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
|
||||
#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
|
||||
|
||||
#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210)
|
||||
#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214)
|
||||
#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218)
|
||||
#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c)
|
||||
#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220)
|
||||
#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224)
|
||||
#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
|
||||
#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
|
||||
#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
|
||||
#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
|
||||
#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
|
||||
#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
|
||||
|
||||
#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310)
|
||||
#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314)
|
||||
#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318)
|
||||
#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c)
|
||||
#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
|
||||
#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
|
||||
#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
|
||||
#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
|
||||
#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
|
||||
#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
|
||||
#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
|
||||
#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
|
||||
|
||||
#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410)
|
||||
#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414)
|
||||
#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418)
|
||||
#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c)
|
||||
#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420)
|
||||
#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424)
|
||||
#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
|
||||
#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
|
||||
#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
|
||||
#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
|
||||
#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
|
||||
#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
|
||||
|
||||
#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
|
||||
#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
|
||||
#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
|
||||
#define _DPF_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6451c)
|
||||
#define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520)
|
||||
#define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524)
|
||||
#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
|
||||
#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
|
||||
#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
|
||||
#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
|
||||
#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
|
||||
#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
|
||||
|
||||
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
|
||||
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
|
||||
|
@ -5728,7 +5752,7 @@ enum {
|
|||
#define DPINVGTT_STATUS_MASK 0xff
|
||||
#define DPINVGTT_STATUS_MASK_CHV 0xfff
|
||||
|
||||
#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030)
|
||||
#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
|
||||
#define DSPARB_CSTART_MASK (0x7f << 7)
|
||||
#define DSPARB_CSTART_SHIFT 7
|
||||
#define DSPARB_BSTART_MASK (0x7f)
|
||||
|
@ -5763,7 +5787,7 @@ enum {
|
|||
#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
|
||||
|
||||
/* pnv/gen4/g4x/vlv/chv */
|
||||
#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
|
||||
#define DSPFW1 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
|
||||
#define DSPFW_SR_SHIFT 23
|
||||
#define DSPFW_SR_MASK (0x1ff << 23)
|
||||
#define DSPFW_CURSORB_SHIFT 16
|
||||
|
@ -5774,7 +5798,7 @@ enum {
|
|||
#define DSPFW_PLANEA_SHIFT 0
|
||||
#define DSPFW_PLANEA_MASK (0x7f << 0)
|
||||
#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
|
||||
#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
|
||||
#define DSPFW2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
|
||||
#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
|
||||
#define DSPFW_FBC_SR_SHIFT 28
|
||||
#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
|
||||
|
@ -5790,7 +5814,7 @@ enum {
|
|||
#define DSPFW_SPRITEA_SHIFT 0
|
||||
#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
|
||||
#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
|
||||
#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
|
||||
#define DSPFW3 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
|
||||
#define DSPFW_HPLL_SR_EN (1 << 31)
|
||||
#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
|
||||
#define DSPFW_CURSOR_SR_SHIFT 24
|
||||
|
@ -6206,35 +6230,35 @@ enum {
|
|||
* [10:1f] all
|
||||
* [30:32] all
|
||||
*/
|
||||
#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
|
||||
#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
|
||||
#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
|
||||
#define SWF0(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
|
||||
#define SWF1(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
|
||||
#define SWF3(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
|
||||
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
|
||||
|
||||
/* Pipe B */
|
||||
#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
|
||||
#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008)
|
||||
#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
|
||||
#define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
|
||||
#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
|
||||
#define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
|
||||
#define _PIPEBFRAMEHIGH 0x71040
|
||||
#define _PIPEBFRAMEPIXEL 0x71044
|
||||
#define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040)
|
||||
#define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044)
|
||||
#define _PIPEB_FRMCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71040)
|
||||
#define _PIPEB_FLIPCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71044)
|
||||
|
||||
|
||||
/* Display B control */
|
||||
#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
|
||||
#define _DSPBCNTR (DISPLAY_MMIO_BASE(dev_priv) + 0x71180)
|
||||
#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
|
||||
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
|
||||
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
|
||||
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
|
||||
#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184)
|
||||
#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188)
|
||||
#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C)
|
||||
#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190)
|
||||
#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C)
|
||||
#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4)
|
||||
#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
|
||||
#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
|
||||
#define _DSPBADDR (DISPLAY_MMIO_BASE(dev_priv) + 0x71184)
|
||||
#define _DSPBSTRIDE (DISPLAY_MMIO_BASE(dev_priv) + 0x71188)
|
||||
#define _DSPBPOS (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C)
|
||||
#define _DSPBSIZE (DISPLAY_MMIO_BASE(dev_priv) + 0x71190)
|
||||
#define _DSPBSURF (DISPLAY_MMIO_BASE(dev_priv) + 0x7119C)
|
||||
#define _DSPBTILEOFF (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
|
||||
#define _DSPBOFFSET (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
|
||||
#define _DSPBSURFLIVE (DISPLAY_MMIO_BASE(dev_priv) + 0x711AC)
|
||||
|
||||
/* ICL DSI 0 and 1 */
|
||||
#define _PIPEDSI0CONF 0x7b008
|
||||
|
@ -8786,7 +8810,7 @@ enum {
|
|||
#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
|
||||
|
||||
/* Audio */
|
||||
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
|
||||
#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
|
||||
#define INTEL_AUDIO_DEVCL 0x808629FB
|
||||
#define INTEL_AUDIO_DEVBLC 0x80862801
|
||||
#define INTEL_AUDIO_DEVCTG 0x80862802
|
||||
|
|
|
@ -111,99 +111,10 @@ i915_request_remove_from_client(struct i915_request *request)
|
|||
spin_unlock(&file_priv->mm.lock);
|
||||
}
|
||||
|
||||
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
||||
static void reserve_gt(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_timeline *timeline;
|
||||
enum intel_engine_id id;
|
||||
int ret;
|
||||
|
||||
/* Carefully retire all requests without writing to the rings */
|
||||
ret = i915_gem_wait_for_idle(i915,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
GEM_BUG_ON(i915->gt.active_requests);
|
||||
|
||||
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
|
||||
for_each_engine(engine, i915, id) {
|
||||
GEM_TRACE("%s seqno %d (current %d) -> %d\n",
|
||||
engine->name,
|
||||
engine->timeline.seqno,
|
||||
intel_engine_get_seqno(engine),
|
||||
seqno);
|
||||
|
||||
if (seqno == engine->timeline.seqno)
|
||||
continue;
|
||||
|
||||
kthread_park(engine->breadcrumbs.signaler);
|
||||
|
||||
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
|
||||
/* Flush any waiters before we reuse the seqno */
|
||||
intel_engine_disarm_breadcrumbs(engine);
|
||||
intel_engine_init_hangcheck(engine);
|
||||
GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
|
||||
}
|
||||
|
||||
/* Check we are idle before we fiddle with hw state! */
|
||||
GEM_BUG_ON(!intel_engine_is_idle(engine));
|
||||
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
|
||||
|
||||
/* Finally reset hw state */
|
||||
intel_engine_init_global_seqno(engine, seqno);
|
||||
engine->timeline.seqno = seqno;
|
||||
|
||||
kthread_unpark(engine->breadcrumbs.signaler);
|
||||
}
|
||||
|
||||
list_for_each_entry(timeline, &i915->gt.timelines, link)
|
||||
memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
|
||||
|
||||
i915->gt.request_serial = seqno;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
|
||||
if (seqno == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* HWS page needs to be set less than what we will inject to ring */
|
||||
return reset_all_global_seqno(i915, seqno - 1);
|
||||
}
|
||||
|
||||
static int reserve_gt(struct drm_i915_private *i915)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Reservation is fine until we may need to wrap around
|
||||
*
|
||||
* By incrementing the serial for every request, we know that no
|
||||
* individual engine may exceed that serial (as each is reset to 0
|
||||
* on any wrap). This protects even the most pessimistic of migrations
|
||||
* of every request from all engines onto just one.
|
||||
*/
|
||||
while (unlikely(++i915->gt.request_serial == 0)) {
|
||||
ret = reset_all_global_seqno(i915, 0);
|
||||
if (ret) {
|
||||
i915->gt.request_serial--;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (!i915->gt.active_requests++)
|
||||
i915_gem_unpark(i915);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void unreserve_gt(struct drm_i915_private *i915)
|
||||
|
@ -566,6 +477,38 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
|||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void ring_retire_requests(struct intel_ring *ring)
|
||||
{
|
||||
struct i915_request *rq, *rn;
|
||||
|
||||
list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
|
||||
if (!i915_request_completed(rq))
|
||||
break;
|
||||
|
||||
i915_request_retire(rq);
|
||||
}
|
||||
}
|
||||
|
||||
static noinline struct i915_request *
|
||||
i915_request_alloc_slow(struct intel_context *ce)
|
||||
{
|
||||
struct intel_ring *ring = ce->ring;
|
||||
struct i915_request *rq;
|
||||
|
||||
if (list_empty(&ring->request_list))
|
||||
goto out;
|
||||
|
||||
/* Ratelimit ourselves to prevent oom from malicious clients */
|
||||
rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
|
||||
cond_synchronize_rcu(rq->rcustate);
|
||||
|
||||
/* Retire our old requests in the hope that we free some */
|
||||
ring_retire_requests(ring);
|
||||
|
||||
out:
|
||||
return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_request_alloc - allocate a request structure
|
||||
*
|
||||
|
@ -608,13 +551,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
|||
if (IS_ERR(ce))
|
||||
return ERR_CAST(ce);
|
||||
|
||||
ret = reserve_gt(i915);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
|
||||
if (ret)
|
||||
goto err_unreserve;
|
||||
reserve_gt(i915);
|
||||
|
||||
/* Move our oldest request to the slab-cache (if not in use!) */
|
||||
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
|
||||
|
@ -654,15 +591,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
|||
rq = kmem_cache_alloc(i915->requests,
|
||||
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
|
||||
if (unlikely(!rq)) {
|
||||
i915_retire_requests(i915);
|
||||
|
||||
/* Ratelimit ourselves to prevent oom from malicious clients */
|
||||
rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
|
||||
&i915->drm.struct_mutex);
|
||||
if (rq)
|
||||
cond_synchronize_rcu(rq->rcustate);
|
||||
|
||||
rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
|
||||
rq = i915_request_alloc_slow(ce);
|
||||
if (!rq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unreserve;
|
||||
|
@ -707,9 +636,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
|||
* i915_request_add() call can't fail. Note that the reserve may need
|
||||
* to be redone if the request is not actually submitted straight
|
||||
* away, e.g. because a GPU scheduler has deferred it.
|
||||
*
|
||||
* Note that due to how we add reserved_space to intel_ring_begin()
|
||||
* we need to double our request to ensure that if we need to wrap
|
||||
* around inside i915_request_add() there is sufficient space at
|
||||
* the beginning of the ring as well.
|
||||
*/
|
||||
rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
|
||||
GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
|
||||
rq->reserved_space = 2 * engine->emit_breadcrumb_sz * sizeof(u32);
|
||||
|
||||
/*
|
||||
* Record the position of the start of the request so that
|
||||
|
@ -719,11 +652,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
|||
*/
|
||||
rq->head = rq->ring->emit;
|
||||
|
||||
/* Unconditionally invalidate GPU caches and TLBs. */
|
||||
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
|
||||
if (ret)
|
||||
goto err_unwind;
|
||||
|
||||
ret = engine->request_alloc(rq);
|
||||
if (ret)
|
||||
goto err_unwind;
|
||||
|
@ -748,7 +676,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
|||
kmem_cache_free(i915->requests, rq);
|
||||
err_unreserve:
|
||||
unreserve_gt(i915);
|
||||
err_unpin:
|
||||
intel_context_unpin(ce);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -776,34 +703,12 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
|
|||
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
|
||||
&from->submit,
|
||||
I915_FENCE_GFP);
|
||||
return ret < 0 ? ret : 0;
|
||||
} else {
|
||||
ret = i915_sw_fence_await_dma_fence(&to->submit,
|
||||
&from->fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
}
|
||||
|
||||
if (to->engine->semaphore.sync_to) {
|
||||
u32 seqno;
|
||||
|
||||
GEM_BUG_ON(!from->engine->semaphore.signal);
|
||||
|
||||
seqno = i915_request_global_seqno(from);
|
||||
if (!seqno)
|
||||
goto await_dma_fence;
|
||||
|
||||
if (seqno <= to->timeline->global_sync[from->engine->id])
|
||||
return 0;
|
||||
|
||||
trace_i915_gem_ring_sync_to(to, from);
|
||||
ret = to->engine->semaphore.sync_to(to, from);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
to->timeline->global_sync[from->engine->id] = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
await_dma_fence:
|
||||
ret = i915_sw_fence_await_dma_fence(&to->submit,
|
||||
&from->fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
|
@ -979,8 +884,8 @@ void i915_request_add(struct i915_request *request)
|
|||
* should already have been reserved in the ring buffer. Let the ring
|
||||
* know that it is time to use that space up.
|
||||
*/
|
||||
GEM_BUG_ON(request->reserved_space > request->ring->space);
|
||||
request->reserved_space = 0;
|
||||
engine->emit_flush(request, EMIT_FLUSH);
|
||||
|
||||
/*
|
||||
* Record the position of the start of the breadcrumb so that
|
||||
|
@ -1298,13 +1203,7 @@ long i915_request_wait(struct i915_request *rq,
|
|||
set_current_state(state);
|
||||
|
||||
wakeup:
|
||||
/*
|
||||
* Carefully check if the request is complete, giving time
|
||||
* for the seqno to be visible following the interrupt.
|
||||
* We also have to check in case we are kicked by the GPU
|
||||
* reset in order to drop the struct_mutex.
|
||||
*/
|
||||
if (__i915_request_irq_complete(rq))
|
||||
if (i915_request_completed(rq))
|
||||
break;
|
||||
|
||||
/*
|
||||
|
@ -1343,19 +1242,6 @@ long i915_request_wait(struct i915_request *rq,
|
|||
return timeout;
|
||||
}
|
||||
|
||||
static void ring_retire_requests(struct intel_ring *ring)
|
||||
{
|
||||
struct i915_request *request, *next;
|
||||
|
||||
list_for_each_entry_safe(request, next,
|
||||
&ring->request_list, ring_link) {
|
||||
if (!i915_request_completed(request))
|
||||
break;
|
||||
|
||||
i915_request_retire(request);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_retire_requests(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_ring *ring, *tmp;
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include "i915_gem.h"
|
||||
#include "i915_scheduler.h"
|
||||
#include "i915_sw_fence.h"
|
||||
#include "i915_scheduler.h"
|
||||
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "intel_drv.h"
|
||||
#include "i915_reg.h"
|
||||
|
@ -65,7 +64,7 @@ int i915_save_state(struct drm_i915_private *dev_priv)
|
|||
|
||||
i915_save_display(dev_priv);
|
||||
|
||||
if (IS_GEN4(dev_priv))
|
||||
if (IS_GEN(dev_priv, 4))
|
||||
pci_read_config_word(pdev, GCDGMBUS,
|
||||
&dev_priv->regfile.saveGCDGMBUS);
|
||||
|
||||
|
@ -77,14 +76,14 @@ int i915_save_state(struct drm_i915_private *dev_priv)
|
|||
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
|
||||
|
||||
/* Scratch space */
|
||||
if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
|
||||
for (i = 0; i < 7; i++) {
|
||||
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
|
||||
} else if (IS_GEN2(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
for (i = 0; i < 7; i++)
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
} else if (HAS_GMCH_DISPLAY(dev_priv)) {
|
||||
|
@ -108,7 +107,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
|
|||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
if (IS_GEN4(dev_priv))
|
||||
if (IS_GEN(dev_priv, 4))
|
||||
pci_write_config_word(pdev, GCDGMBUS,
|
||||
dev_priv->regfile.saveGCDGMBUS);
|
||||
i915_restore_display(dev_priv);
|
||||
|
@ -122,14 +121,14 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
|
||||
|
||||
/* Scratch space */
|
||||
if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
|
||||
for (i = 0; i < 7; i++) {
|
||||
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
|
||||
} else if (IS_GEN2(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
for (i = 0; i < 7; i++)
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
} else if (HAS_GMCH_DISPLAY(dev_priv)) {
|
||||
|
|
|
@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
|
|||
ssize_t ret;
|
||||
|
||||
gpu = i915_first_error_state(i915);
|
||||
if (gpu) {
|
||||
if (IS_ERR(gpu)) {
|
||||
ret = PTR_ERR(gpu);
|
||||
} else if (gpu) {
|
||||
ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
|
||||
i915_gpu_state_put(gpu);
|
||||
} else {
|
||||
|
|
|
@ -63,14 +63,6 @@ struct i915_timeline {
|
|||
* redundant and we can discard it without loss of generality.
|
||||
*/
|
||||
struct i915_syncmap *sync;
|
||||
/**
|
||||
* Separately to the inter-context seqno map above, we track the last
|
||||
* barrier (e.g. semaphore wait) to the global engine timelines. Note
|
||||
* that this tracks global_seqno rather than the context.seqno, and
|
||||
* so it is subject to the limitations of hw wraparound and that we
|
||||
* may need to revoke global_seqno (on pre-emption).
|
||||
*/
|
||||
u32 global_sync[I915_NUM_ENGINES];
|
||||
|
||||
struct list_head link;
|
||||
const char *name;
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
|
@ -585,35 +584,6 @@ TRACE_EVENT(i915_gem_evict_vm,
|
|||
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_sync_to,
|
||||
TP_PROTO(struct i915_request *to, struct i915_request *from),
|
||||
TP_ARGS(to, from),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u32, from_class)
|
||||
__field(u32, from_instance)
|
||||
__field(u32, to_class)
|
||||
__field(u32, to_instance)
|
||||
__field(u32, seqno)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = from->i915->drm.primary->index;
|
||||
__entry->from_class = from->engine->uabi_class;
|
||||
__entry->from_instance = from->engine->instance;
|
||||
__entry->to_class = to->engine->uabi_class;
|
||||
__entry->to_instance = to->engine->instance;
|
||||
__entry->seqno = from->global_seqno;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
|
||||
__entry->dev,
|
||||
__entry->from_class, __entry->from_instance,
|
||||
__entry->to_class, __entry->to_instance,
|
||||
__entry->seqno)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_request_queue,
|
||||
TP_PROTO(struct i915_request *rq, u32 flags),
|
||||
TP_ARGS(rq, flags),
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
*/
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
* See intel_atomic_plane.c for the plane-specific atomic functionality.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
@ -233,7 +232,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
|
|||
if (plane_state && plane_state->base.fb &&
|
||||
plane_state->base.fb->format->is_yuv &&
|
||||
plane_state->base.fb->format->num_planes > 1) {
|
||||
if (IS_GEN9(dev_priv) &&
|
||||
if (IS_GEN(dev_priv, 9) &&
|
||||
!IS_GEMINILAKE(dev_priv)) {
|
||||
mode = SKL_PS_SCALER_MODE_NV12;
|
||||
} else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
* prepare/check/commit/cleanup steps.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include "intel_drv.h"
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <drm/intel_lpe_audio.h>
|
||||
#include "intel_drv.h"
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -758,7 +757,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
|
|||
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
|
||||
u32 tmp;
|
||||
|
||||
if (!IS_GEN9(dev_priv))
|
||||
if (!IS_GEN(dev_priv, 9))
|
||||
return;
|
||||
|
||||
i915_audio_component_get_power(kdev);
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
*/
|
||||
|
||||
#include <drm/drm_dp_helper.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -453,7 +452,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
|
|||
* Only parse SDVO mappings on gens that could have SDVO. This isn't
|
||||
* accurate and doesn't have to be, as long as it's not too strict.
|
||||
*/
|
||||
if (!IS_GEN(dev_priv, 3, 7)) {
|
||||
if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
|
||||
DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
|
||||
return;
|
||||
}
|
||||
|
@ -1386,8 +1385,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
|||
info->supports_dp = is_dp;
|
||||
info->supports_edp = is_edp;
|
||||
|
||||
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
|
||||
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
|
||||
if (bdb_version >= 195)
|
||||
info->supports_typec_usb = child->dp_usb_type_c;
|
||||
|
||||
if (bdb_version >= 209)
|
||||
info->supports_tbt = child->tbt;
|
||||
|
||||
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
|
||||
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
|
||||
info->supports_typec_usb, info->supports_tbt);
|
||||
|
||||
if (is_edp && is_dvi)
|
||||
DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
|
||||
|
|
|
@ -166,12 +166,6 @@ static void irq_enable(struct intel_engine_cs *engine)
|
|||
*/
|
||||
GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
|
||||
|
||||
/* Enabling the IRQ may miss the generation of the interrupt, but
|
||||
* we still need to force the barrier before reading the seqno,
|
||||
* just in case.
|
||||
*/
|
||||
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
|
||||
|
||||
/* Caller disables interrupts */
|
||||
if (engine->irq_enable) {
|
||||
spin_lock(&engine->i915->irq_lock);
|
||||
|
@ -683,16 +677,6 @@ static int intel_breadcrumbs_signaler(void *arg)
|
|||
}
|
||||
|
||||
if (unlikely(do_schedule)) {
|
||||
/* Before we sleep, check for a missed seqno */
|
||||
if (current->state & TASK_NORMAL &&
|
||||
!list_empty(&b->signals) &&
|
||||
engine->irq_seqno_barrier &&
|
||||
test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
|
||||
&engine->irq_posted)) {
|
||||
engine->irq_seqno_barrier(engine);
|
||||
intel_engine_wakeup(engine);
|
||||
}
|
||||
|
||||
sleep:
|
||||
if (kthread_should_park())
|
||||
kthread_parkme();
|
||||
|
@ -859,16 +843,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
|
|||
else
|
||||
irq_disable(engine);
|
||||
|
||||
/*
|
||||
* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
|
||||
* GPU is active and may have already executed the MI_USER_INTERRUPT
|
||||
* before the CPU is ready to receive. However, the engine is currently
|
||||
* idle (we haven't started it yet), there is no possibility for a
|
||||
* missed interrupt as we enabled the irq and so we can clear the
|
||||
* immediate wakeup (until a real interrupt arrives for the waiter).
|
||||
*/
|
||||
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
|
||||
|
||||
spin_unlock_irqrestore(&b->irq_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -2140,7 +2140,7 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
return DIV_ROUND_UP(pixel_rate, 2);
|
||||
else if (IS_GEN9(dev_priv) ||
|
||||
else if (IS_GEN(dev_priv, 9) ||
|
||||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
|
||||
return pixel_rate;
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
|
@ -2176,7 +2176,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
|
|||
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
|
||||
/* Display WA #1145: glk,cnl */
|
||||
min_cdclk = max(316800, min_cdclk);
|
||||
} else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
|
||||
/* Display WA #1144: skl,bxt */
|
||||
min_cdclk = max(432000, min_cdclk);
|
||||
}
|
||||
|
@ -2537,7 +2537,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
return 2 * max_cdclk_freq;
|
||||
else if (IS_GEN9(dev_priv) ||
|
||||
else if (IS_GEN(dev_priv, 9) ||
|
||||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
|
||||
return max_cdclk_freq;
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
|
@ -2785,9 +2785,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
|
|||
dev_priv->display.get_cdclk = hsw_get_cdclk;
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
dev_priv->display.get_cdclk = vlv_get_cdclk;
|
||||
else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
|
||||
dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
|
||||
else if (IS_GEN5(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 5))
|
||||
dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
|
||||
else if (IS_GM45(dev_priv))
|
||||
dev_priv->display.get_cdclk = gm45_get_cdclk;
|
||||
|
|
|
@ -74,12 +74,17 @@
|
|||
#define ILK_CSC_COEFF_1_0 \
|
||||
((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
|
||||
|
||||
static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
|
||||
static bool lut_is_legacy(struct drm_property_blob *lut)
|
||||
{
|
||||
return !state->degamma_lut &&
|
||||
!state->ctm &&
|
||||
state->gamma_lut &&
|
||||
drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH;
|
||||
return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
|
||||
}
|
||||
|
||||
static bool crtc_state_is_legacy_gamma(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return !crtc_state->base.degamma_lut &&
|
||||
!crtc_state->base.ctm &&
|
||||
crtc_state->base.gamma_lut &&
|
||||
lut_is_legacy(crtc_state->base.gamma_lut);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -108,10 +113,10 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
|
|||
return result;
|
||||
}
|
||||
|
||||
static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
|
||||
static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
|
||||
{
|
||||
int pipe = intel_crtc->pipe;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
int pipe = crtc->pipe;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
|
||||
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
|
||||
|
@ -132,14 +137,12 @@ static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
|
|||
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
|
||||
}
|
||||
|
||||
static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
|
||||
static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_crtc *crtc = crtc_state->crtc;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int i, pipe = intel_crtc->pipe;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
int i, pipe = crtc->pipe;
|
||||
uint16_t coeffs[9] = { 0, };
|
||||
struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
|
||||
bool limited_color_range = false;
|
||||
|
||||
/*
|
||||
|
@ -147,14 +150,14 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
|
|||
* do the range compression using the gamma LUT instead.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
|
||||
limited_color_range = intel_crtc_state->limited_color_range;
|
||||
limited_color_range = crtc_state->limited_color_range;
|
||||
|
||||
if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
|
||||
intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
|
||||
ilk_load_ycbcr_conversion_matrix(intel_crtc);
|
||||
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
|
||||
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
|
||||
ilk_load_ycbcr_conversion_matrix(crtc);
|
||||
return;
|
||||
} else if (crtc_state->ctm) {
|
||||
struct drm_color_ctm *ctm = crtc_state->ctm->data;
|
||||
} else if (crtc_state->base.ctm) {
|
||||
struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
|
||||
const u64 *input;
|
||||
u64 temp[9];
|
||||
|
||||
|
@ -253,16 +256,15 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
|
|||
/*
|
||||
* Set up the pipe CSC unit on CherryView.
|
||||
*/
|
||||
static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
|
||||
static void cherryview_load_csc_matrix(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_device *dev = crtc_state->base.crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe = to_intel_crtc(crtc)->pipe;
|
||||
int pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
|
||||
uint32_t mode;
|
||||
|
||||
if (state->ctm) {
|
||||
struct drm_color_ctm *ctm = state->ctm->data;
|
||||
if (crtc_state->base.ctm) {
|
||||
struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
|
||||
uint16_t coeffs[9] = { 0, };
|
||||
int i;
|
||||
|
||||
|
@ -293,17 +295,17 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
|
|||
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
|
||||
}
|
||||
|
||||
mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
|
||||
if (!crtc_state_is_legacy_gamma(state)) {
|
||||
mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
|
||||
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
|
||||
mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
|
||||
if (!crtc_state_is_legacy_gamma(crtc_state)) {
|
||||
mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
|
||||
(crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
|
||||
}
|
||||
I915_WRITE(CGM_PIPE_MODE(pipe), mode);
|
||||
}
|
||||
|
||||
void intel_color_set_csc(struct drm_crtc_state *crtc_state)
|
||||
void intel_color_set_csc(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc_state->crtc->dev;
|
||||
struct drm_device *dev = crtc_state->base.crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (dev_priv->display.load_csc_matrix)
|
||||
|
@ -311,14 +313,12 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
|
|||
}
|
||||
|
||||
/* Loads the legacy palette/gamma unit for the CRTC. */
|
||||
static void i9xx_load_luts_internal(struct drm_crtc *crtc,
|
||||
struct drm_property_blob *blob,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state,
|
||||
struct drm_property_blob *blob)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int i;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev_priv)) {
|
||||
|
@ -353,53 +353,48 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
|
|||
}
|
||||
}
|
||||
|
||||
static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
|
||||
static void i9xx_load_luts(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
|
||||
to_intel_crtc_state(crtc_state));
|
||||
i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
|
||||
}
|
||||
|
||||
/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
|
||||
static void haswell_load_luts(struct drm_crtc_state *crtc_state)
|
||||
static void haswell_load_luts(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_crtc *crtc = crtc_state->crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *intel_crtc_state =
|
||||
to_intel_crtc_state(crtc_state);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
bool reenable_ips = false;
|
||||
|
||||
/*
|
||||
* Workaround : Do not read or write the pipe palette/gamma data while
|
||||
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
|
||||
*/
|
||||
if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
|
||||
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
|
||||
hsw_disable_ips(intel_crtc_state);
|
||||
if (IS_HASWELL(dev_priv) && crtc_state->ips_enabled &&
|
||||
(crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
|
||||
hsw_disable_ips(crtc_state);
|
||||
reenable_ips = true;
|
||||
}
|
||||
|
||||
intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
|
||||
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
|
||||
I915_WRITE(GAMMA_MODE(crtc->pipe), GAMMA_MODE_MODE_8BIT);
|
||||
|
||||
i9xx_load_luts(crtc_state);
|
||||
|
||||
if (reenable_ips)
|
||||
hsw_enable_ips(intel_crtc_state);
|
||||
hsw_enable_ips(crtc_state);
|
||||
}
|
||||
|
||||
static void bdw_load_degamma_lut(struct drm_crtc_state *state)
|
||||
static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
|
||||
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
|
||||
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
|
||||
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe),
|
||||
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
|
||||
|
||||
if (state->degamma_lut) {
|
||||
struct drm_color_lut *lut = state->degamma_lut->data;
|
||||
if (crtc_state->base.degamma_lut) {
|
||||
struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
|
||||
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
uint32_t word =
|
||||
|
@ -419,10 +414,10 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
|
|||
}
|
||||
}
|
||||
|
||||
static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
|
||||
static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
|
||||
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
|
||||
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
|
||||
|
||||
WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
|
||||
|
@ -432,8 +427,8 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
|
|||
PAL_PREC_AUTO_INCREMENT |
|
||||
offset);
|
||||
|
||||
if (state->gamma_lut) {
|
||||
struct drm_color_lut *lut = state->gamma_lut->data;
|
||||
if (crtc_state->base.gamma_lut) {
|
||||
struct drm_color_lut *lut = crtc_state->base.gamma_lut->data;
|
||||
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
uint32_t word =
|
||||
|
@ -467,22 +462,21 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
|
|||
}
|
||||
|
||||
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
|
||||
static void broadwell_load_luts(struct drm_crtc_state *state)
|
||||
static void broadwell_load_luts(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
|
||||
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
|
||||
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
|
||||
|
||||
if (crtc_state_is_legacy_gamma(state)) {
|
||||
haswell_load_luts(state);
|
||||
if (crtc_state_is_legacy_gamma(crtc_state)) {
|
||||
haswell_load_luts(crtc_state);
|
||||
return;
|
||||
}
|
||||
|
||||
bdw_load_degamma_lut(state);
|
||||
bdw_load_gamma_lut(state,
|
||||
bdw_load_degamma_lut(crtc_state);
|
||||
bdw_load_gamma_lut(crtc_state,
|
||||
INTEL_INFO(dev_priv)->color.degamma_lut_size);
|
||||
|
||||
intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
|
||||
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
|
||||
POSTING_READ(GAMMA_MODE(pipe));
|
||||
|
||||
|
@ -493,10 +487,10 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
|
|||
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
|
||||
}
|
||||
|
||||
static void glk_load_degamma_lut(struct drm_crtc_state *state)
|
||||
static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
|
||||
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
|
||||
const uint32_t lut_size = 33;
|
||||
uint32_t i;
|
||||
|
||||
|
@ -523,49 +517,46 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
|
|||
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
|
||||
}
|
||||
|
||||
static void glk_load_luts(struct drm_crtc_state *state)
|
||||
static void glk_load_luts(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_device *dev = crtc_state->base.crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
|
||||
|
||||
glk_load_degamma_lut(state);
|
||||
glk_load_degamma_lut(crtc_state);
|
||||
|
||||
if (crtc_state_is_legacy_gamma(state)) {
|
||||
haswell_load_luts(state);
|
||||
if (crtc_state_is_legacy_gamma(crtc_state)) {
|
||||
haswell_load_luts(crtc_state);
|
||||
return;
|
||||
}
|
||||
|
||||
bdw_load_gamma_lut(state, 0);
|
||||
bdw_load_gamma_lut(crtc_state, 0);
|
||||
|
||||
intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
|
||||
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
|
||||
POSTING_READ(GAMMA_MODE(pipe));
|
||||
}
|
||||
|
||||
/* Loads the palette/gamma unit for the CRTC on CherryView. */
|
||||
static void cherryview_load_luts(struct drm_crtc_state *state)
|
||||
static void cherryview_load_luts(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct drm_crtc *crtc = crtc_state->base.crtc;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
struct drm_color_lut *lut;
|
||||
uint32_t i, lut_size;
|
||||
uint32_t word0, word1;
|
||||
|
||||
if (crtc_state_is_legacy_gamma(state)) {
|
||||
if (crtc_state_is_legacy_gamma(crtc_state)) {
|
||||
/* Turn off degamma/gamma on CGM block. */
|
||||
I915_WRITE(CGM_PIPE_MODE(pipe),
|
||||
(state->ctm ? CGM_PIPE_MODE_CSC : 0));
|
||||
i9xx_load_luts_internal(crtc, state->gamma_lut,
|
||||
to_intel_crtc_state(state));
|
||||
(crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0));
|
||||
i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
|
||||
return;
|
||||
}
|
||||
|
||||
if (state->degamma_lut) {
|
||||
lut = state->degamma_lut->data;
|
||||
if (crtc_state->base.degamma_lut) {
|
||||
lut = crtc_state->base.degamma_lut->data;
|
||||
lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
/* Write LUT in U0.14 format. */
|
||||
|
@ -579,8 +570,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
|
|||
}
|
||||
}
|
||||
|
||||
if (state->gamma_lut) {
|
||||
lut = state->gamma_lut->data;
|
||||
if (crtc_state->base.gamma_lut) {
|
||||
lut = crtc_state->base.gamma_lut->data;
|
||||
lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
/* Write LUT in U0.10 format. */
|
||||
|
@ -595,29 +586,28 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
|
|||
}
|
||||
|
||||
I915_WRITE(CGM_PIPE_MODE(pipe),
|
||||
(state->ctm ? CGM_PIPE_MODE_CSC : 0) |
|
||||
(state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
|
||||
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
|
||||
(crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0) |
|
||||
(crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
|
||||
(crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
|
||||
|
||||
/*
|
||||
* Also program a linear LUT in the legacy block (behind the
|
||||
* CGM block).
|
||||
*/
|
||||
i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
|
||||
i9xx_load_luts_internal(crtc_state, NULL);
|
||||
}
|
||||
|
||||
void intel_color_load_luts(struct drm_crtc_state *crtc_state)
|
||||
void intel_color_load_luts(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc_state->crtc->dev;
|
||||
struct drm_device *dev = crtc_state->base.crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
dev_priv->display.load_luts(crtc_state);
|
||||
}
|
||||
|
||||
int intel_color_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *crtc_state)
|
||||
int intel_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
size_t gamma_length, degamma_length;
|
||||
|
||||
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
|
||||
|
@ -627,10 +617,10 @@ int intel_color_check(struct drm_crtc *crtc,
|
|||
* We allow both degamma & gamma luts at the right size or
|
||||
* NULL.
|
||||
*/
|
||||
if ((!crtc_state->degamma_lut ||
|
||||
drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) &&
|
||||
(!crtc_state->gamma_lut ||
|
||||
drm_color_lut_size(crtc_state->gamma_lut) == gamma_length))
|
||||
if ((!crtc_state->base.degamma_lut ||
|
||||
drm_color_lut_size(crtc_state->base.degamma_lut) == degamma_length) &&
|
||||
(!crtc_state->base.gamma_lut ||
|
||||
drm_color_lut_size(crtc_state->base.gamma_lut) == gamma_length))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -643,11 +633,11 @@ int intel_color_check(struct drm_crtc *crtc,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
void intel_color_init(struct drm_crtc *crtc)
|
||||
void intel_color_init(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
|
||||
|
@ -669,7 +659,7 @@ void intel_color_init(struct drm_crtc *crtc)
|
|||
/* Enable color management support when we have degamma & gamma LUTs. */
|
||||
if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
|
||||
INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
|
||||
drm_crtc_enable_color_mgmt(crtc,
|
||||
drm_crtc_enable_color_mgmt(&crtc->base,
|
||||
INTEL_INFO(dev_priv)->color.degamma_lut_size,
|
||||
true,
|
||||
INTEL_INFO(dev_priv)->color.gamma_lut_size);
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <linux/dmi.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
@ -322,7 +321,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
|||
* DAC limit supposedly 355 MHz.
|
||||
*/
|
||||
max_clock = 270000;
|
||||
else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
|
||||
else if (IS_GEN_RANGE(dev_priv, 3, 4))
|
||||
max_clock = 400000;
|
||||
else
|
||||
max_clock = 350000;
|
||||
|
@ -667,7 +666,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
|
|||
/* Set the border color to purple. */
|
||||
I915_WRITE(bclrpat_reg, 0x500050);
|
||||
|
||||
if (!IS_GEN2(dev_priv)) {
|
||||
if (!IS_GEN(dev_priv, 2)) {
|
||||
uint32_t pipeconf = I915_READ(pipeconf_reg);
|
||||
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
|
||||
POSTING_READ(pipeconf_reg);
|
||||
|
@ -982,7 +981,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
|
|||
else
|
||||
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
connector->interlace_allowed = 0;
|
||||
else
|
||||
connector->interlace_allowed = 1;
|
||||
|
|
|
@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
|
|||
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
|
||||
};
|
||||
|
||||
struct icl_combo_phy_ddi_buf_trans {
|
||||
u32 dw2_swing_select;
|
||||
u32 dw2_swing_scalar;
|
||||
u32 dw4_scaling;
|
||||
/* icl_combo_phy_ddi_translations */
|
||||
static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
|
||||
/* NT mV Trans mV db */
|
||||
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
|
||||
{ 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
|
||||
{ 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
|
||||
{ 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
|
||||
{ 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
|
||||
{ 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
|
||||
{ 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
|
||||
{ 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
|
||||
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
|
||||
};
|
||||
|
||||
/* Voltage Swing Programming for VccIO 0.85V for DP */
|
||||
static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = {
|
||||
/* Voltage mV db */
|
||||
{ 0x2, 0x98, 0x0018 }, /* 400 0.0 */
|
||||
{ 0x2, 0x98, 0x3015 }, /* 400 3.5 */
|
||||
{ 0x2, 0x98, 0x6012 }, /* 400 6.0 */
|
||||
{ 0x2, 0x98, 0x900F }, /* 400 9.5 */
|
||||
{ 0xB, 0x70, 0x0018 }, /* 600 0.0 */
|
||||
{ 0xB, 0x70, 0x3015 }, /* 600 3.5 */
|
||||
{ 0xB, 0x70, 0x6012 }, /* 600 6.0 */
|
||||
{ 0x5, 0x00, 0x0018 }, /* 800 0.0 */
|
||||
{ 0x5, 0x00, 0x3015 }, /* 800 3.5 */
|
||||
{ 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
|
||||
static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
|
||||
/* NT mV Trans mV db */
|
||||
{ 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
|
||||
{ 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
|
||||
{ 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
|
||||
{ 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
|
||||
{ 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
|
||||
{ 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
|
||||
{ 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
|
||||
{ 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
|
||||
{ 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
|
||||
{ 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
|
||||
};
|
||||
|
||||
/* FIXME - After table is updated in Bspec */
|
||||
/* Voltage Swing Programming for VccIO 0.85V for eDP */
|
||||
static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
|
||||
/* Voltage mV db */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 4.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 6.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 4.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 300 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 300 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 350 0.0 */
|
||||
static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
|
||||
/* NT mV Trans mV db */
|
||||
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
|
||||
{ 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
|
||||
{ 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
|
||||
{ 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
|
||||
{ 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
|
||||
{ 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
|
||||
{ 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
|
||||
{ 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
|
||||
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
|
||||
};
|
||||
|
||||
/* Voltage Swing Programming for VccIO 0.95V for DP */
|
||||
static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
|
||||
/* Voltage mV db */
|
||||
{ 0x2, 0x98, 0x0018 }, /* 400 0.0 */
|
||||
{ 0x2, 0x98, 0x3015 }, /* 400 3.5 */
|
||||
{ 0x2, 0x98, 0x6012 }, /* 400 6.0 */
|
||||
{ 0x2, 0x98, 0x900F }, /* 400 9.5 */
|
||||
{ 0x4, 0x98, 0x0018 }, /* 600 0.0 */
|
||||
{ 0x4, 0x98, 0x3015 }, /* 600 3.5 */
|
||||
{ 0x4, 0x98, 0x6012 }, /* 600 6.0 */
|
||||
{ 0x5, 0x76, 0x0018 }, /* 800 0.0 */
|
||||
{ 0x5, 0x76, 0x3015 }, /* 800 3.5 */
|
||||
{ 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
|
||||
};
|
||||
|
||||
/* FIXME - After table is updated in Bspec */
|
||||
/* Voltage Swing Programming for VccIO 0.95V for eDP */
|
||||
static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = {
|
||||
/* Voltage mV db */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 4.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 6.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 4.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 300 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 300 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 350 0.0 */
|
||||
};
|
||||
|
||||
/* Voltage Swing Programming for VccIO 1.05V for DP */
|
||||
static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = {
|
||||
/* Voltage mV db */
|
||||
{ 0x2, 0x98, 0x0018 }, /* 400 0.0 */
|
||||
{ 0x2, 0x98, 0x3015 }, /* 400 3.5 */
|
||||
{ 0x2, 0x98, 0x6012 }, /* 400 6.0 */
|
||||
{ 0x2, 0x98, 0x900F }, /* 400 9.5 */
|
||||
{ 0x4, 0x98, 0x0018 }, /* 600 0.0 */
|
||||
{ 0x4, 0x98, 0x3015 }, /* 600 3.5 */
|
||||
{ 0x4, 0x98, 0x6012 }, /* 600 6.0 */
|
||||
{ 0x5, 0x71, 0x0018 }, /* 800 0.0 */
|
||||
{ 0x5, 0x71, 0x3015 }, /* 800 3.5 */
|
||||
{ 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
|
||||
};
|
||||
|
||||
/* FIXME - After table is updated in Bspec */
|
||||
/* Voltage Swing Programming for VccIO 1.05V for eDP */
|
||||
static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = {
|
||||
/* Voltage mV db */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 4.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 200 6.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 250 4.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 300 0.0 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 300 1.5 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 350 0.0 */
|
||||
static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
|
||||
/* NT mV Trans mV db */
|
||||
{ 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
|
||||
{ 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
|
||||
{ 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
|
||||
{ 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
|
||||
{ 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
|
||||
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
|
||||
};
|
||||
|
||||
struct icl_mg_phy_ddi_buf_trans {
|
||||
|
@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
}
|
||||
}
|
||||
|
||||
static const struct icl_combo_phy_ddi_buf_trans *
|
||||
static const struct cnl_ddi_buf_trans *
|
||||
icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
|
||||
int type, int *n_entries)
|
||||
int type, int rate, int *n_entries)
|
||||
{
|
||||
u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK;
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
|
||||
switch (voltage) {
|
||||
case VOLTAGE_INFO_0_85V:
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V);
|
||||
return icl_combo_phy_ddi_translations_edp_0_85V;
|
||||
case VOLTAGE_INFO_0_95V:
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V);
|
||||
return icl_combo_phy_ddi_translations_edp_0_95V;
|
||||
case VOLTAGE_INFO_1_05V:
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
|
||||
return icl_combo_phy_ddi_translations_edp_1_05V;
|
||||
default:
|
||||
MISSING_CASE(voltage);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
switch (voltage) {
|
||||
case VOLTAGE_INFO_0_85V:
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
|
||||
return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
|
||||
case VOLTAGE_INFO_0_95V:
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
|
||||
return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
|
||||
case VOLTAGE_INFO_1_05V:
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
|
||||
return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
|
||||
default:
|
||||
MISSING_CASE(voltage);
|
||||
return NULL;
|
||||
}
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
|
||||
return icl_combo_phy_ddi_translations_hdmi;
|
||||
} else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
|
||||
return icl_combo_phy_ddi_translations_edp_hbr3;
|
||||
} else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
|
||||
return icl_combo_phy_ddi_translations_edp_hbr2;
|
||||
}
|
||||
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
|
||||
return icl_combo_phy_ddi_translations_dp_hbr2;
|
||||
}
|
||||
|
||||
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
|
||||
|
@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
|
|||
|
||||
if (IS_ICELAKE(dev_priv)) {
|
||||
if (intel_port_is_combophy(dev_priv, port))
|
||||
icl_get_combo_buf_trans(dev_priv, port,
|
||||
INTEL_OUTPUT_HDMI, &n_entries);
|
||||
icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
|
||||
0, &n_entries);
|
||||
else
|
||||
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
|
||||
default_entry = n_entries - 1;
|
||||
|
@ -1361,6 +1296,9 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
|||
dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
|
||||
1000) / 0x8000;
|
||||
|
||||
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
|
||||
return 0;
|
||||
|
||||
return dco_freq / (p0 * p1 * p2 * 5);
|
||||
}
|
||||
|
||||
|
@ -1880,7 +1818,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
|||
temp |= TRANS_DDI_MODE_SELECT_DVI;
|
||||
|
||||
if (crtc_state->hdmi_scrambling)
|
||||
temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
|
||||
temp |= TRANS_DDI_HDMI_SCRAMBLING;
|
||||
if (crtc_state->hdmi_high_tmds_clock_ratio)
|
||||
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
|
||||
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
|
||||
|
@ -2275,13 +2213,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
enum port port = encoder->port;
|
||||
int n_entries;
|
||||
|
||||
if (IS_ICELAKE(dev_priv)) {
|
||||
if (intel_port_is_combophy(dev_priv, port))
|
||||
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
|
||||
&n_entries);
|
||||
intel_dp->link_rate, &n_entries);
|
||||
else
|
||||
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
|
@ -2462,14 +2401,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
|
||||
u32 level, enum port port, int type)
|
||||
u32 level, enum port port, int type,
|
||||
int rate)
|
||||
{
|
||||
const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL;
|
||||
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
|
||||
u32 n_entries, val;
|
||||
int ln;
|
||||
|
||||
ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
|
||||
&n_entries);
|
||||
rate, &n_entries);
|
||||
if (!ddi_translations)
|
||||
return;
|
||||
|
||||
|
@ -2478,34 +2418,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
|
|||
level = n_entries - 1;
|
||||
}
|
||||
|
||||
/* Set PORT_TX_DW5 Rterm Sel to 110b. */
|
||||
/* Set PORT_TX_DW5 */
|
||||
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
|
||||
val &= ~RTERM_SELECT_MASK;
|
||||
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
|
||||
TAP2_DISABLE | TAP3_DISABLE);
|
||||
val |= SCALING_MODE_SEL(0x2);
|
||||
val |= RTERM_SELECT(0x6);
|
||||
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
|
||||
|
||||
/* Program PORT_TX_DW5 */
|
||||
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
|
||||
/* Set DisableTap2 and DisableTap3 if MIPI DSI
|
||||
* Clear DisableTap2 and DisableTap3 for all other Ports
|
||||
*/
|
||||
if (type == INTEL_OUTPUT_DSI) {
|
||||
val |= TAP2_DISABLE;
|
||||
val |= TAP3_DISABLE;
|
||||
} else {
|
||||
val &= ~TAP2_DISABLE;
|
||||
val &= ~TAP3_DISABLE;
|
||||
}
|
||||
val |= TAP3_DISABLE;
|
||||
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
|
||||
|
||||
/* Program PORT_TX_DW2 */
|
||||
val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
|
||||
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
|
||||
RCOMP_SCALAR_MASK);
|
||||
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select);
|
||||
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select);
|
||||
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
|
||||
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
|
||||
/* Program Rcomp scalar for every table entry */
|
||||
val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar);
|
||||
val |= RCOMP_SCALAR(0x98);
|
||||
I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
|
||||
|
||||
/* Program PORT_TX_DW4 */
|
||||
|
@ -2514,9 +2443,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
|
|||
val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
|
||||
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
|
||||
CURSOR_COEFF_MASK);
|
||||
val |= ddi_translations[level].dw4_scaling;
|
||||
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
|
||||
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
|
||||
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
|
||||
I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
|
||||
}
|
||||
|
||||
/* Program PORT_TX_DW7 */
|
||||
val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
|
||||
val &= ~N_SCALAR_MASK;
|
||||
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
|
||||
I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
|
||||
}
|
||||
|
||||
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
|
@ -2581,7 +2518,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
|
||||
|
||||
/* 5. Program swing and de-emphasis */
|
||||
icl_ddi_combo_vswing_program(dev_priv, level, port, type);
|
||||
icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
|
||||
|
||||
/* 6. Set training enable to trigger update */
|
||||
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
|
||||
|
@ -3603,6 +3540,24 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
|
|||
intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
intel_psr_enable(intel_dp, crtc_state);
|
||||
intel_edp_drrs_enable(intel_dp, crtc_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_update_pipe(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
enum port port)
|
||||
|
@ -3793,8 +3748,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
|
||||
pipe_config->has_infoframe = true;
|
||||
|
||||
if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
|
||||
TRANS_DDI_HDMI_SCRAMBLING_MASK)
|
||||
if (temp & TRANS_DDI_HDMI_SCRAMBLING)
|
||||
pipe_config->hdmi_scrambling = true;
|
||||
if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
|
||||
pipe_config->hdmi_high_tmds_clock_ratio = true;
|
||||
|
@ -3901,9 +3855,50 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
}
|
||||
|
||||
static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
|
||||
intel_dp_encoder_suspend(encoder);
|
||||
|
||||
/*
|
||||
* TODO: disconnect also from USB DP alternate mode once we have a
|
||||
* way to handle the modeset restore in that mode during resume
|
||||
* even if the sink has disappeared while being suspended.
|
||||
*/
|
||||
if (dig_port->tc_legacy_port)
|
||||
icl_tc_phy_disconnect(i915, dig_port);
|
||||
}
|
||||
|
||||
static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
|
||||
{
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
|
||||
struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
|
||||
|
||||
if (intel_port_is_tc(i915, dig_port->base.port))
|
||||
intel_digital_port_connected(&dig_port->base);
|
||||
|
||||
intel_dp_encoder_reset(drm_encoder);
|
||||
}
|
||||
|
||||
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->dev);
|
||||
|
||||
intel_dp_encoder_flush_work(encoder);
|
||||
|
||||
if (intel_port_is_tc(i915, dig_port->base.port))
|
||||
icl_tc_phy_disconnect(i915, dig_port);
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(dig_port);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs intel_ddi_funcs = {
|
||||
.reset = intel_dp_encoder_reset,
|
||||
.destroy = intel_dp_encoder_destroy,
|
||||
.reset = intel_ddi_encoder_reset,
|
||||
.destroy = intel_ddi_encoder_destroy,
|
||||
};
|
||||
|
||||
static struct intel_connector *
|
||||
|
@ -4147,16 +4142,16 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
|
|||
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
struct ddi_vbt_port_info *port_info =
|
||||
&dev_priv->vbt.ddi_port_info[port];
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
bool init_hdmi, init_dp, init_lspcon = false;
|
||||
enum pipe pipe;
|
||||
|
||||
|
||||
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
|
||||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
|
||||
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
|
||||
init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
|
||||
init_dp = port_info->supports_dp;
|
||||
|
||||
if (intel_bios_is_lspcon_present(dev_priv, port)) {
|
||||
/*
|
||||
|
@ -4195,9 +4190,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
|||
intel_encoder->pre_enable = intel_ddi_pre_enable;
|
||||
intel_encoder->disable = intel_disable_ddi;
|
||||
intel_encoder->post_disable = intel_ddi_post_disable;
|
||||
intel_encoder->update_pipe = intel_ddi_update_pipe;
|
||||
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
|
||||
intel_encoder->get_config = intel_ddi_get_config;
|
||||
intel_encoder->suspend = intel_dp_encoder_suspend;
|
||||
intel_encoder->suspend = intel_ddi_encoder_suspend;
|
||||
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
|
||||
intel_encoder->type = INTEL_OUTPUT_DDI;
|
||||
intel_encoder->power_domain = intel_port_to_power_domain(port);
|
||||
|
@ -4216,6 +4212,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
|||
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
|
||||
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
|
||||
|
||||
intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
|
||||
!port_info->supports_typec_usb &&
|
||||
!port_info->supports_tbt;
|
||||
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
intel_dig_port->ddi_io_power_domain =
|
||||
|
@ -4274,6 +4274,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
|||
}
|
||||
|
||||
intel_infoframe_init(intel_dig_port);
|
||||
|
||||
if (intel_port_is_tc(dev_priv, port))
|
||||
intel_digital_port_connected(intel_encoder);
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
|
|
|
@ -104,7 +104,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
|
|||
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
|
||||
}
|
||||
|
||||
void intel_device_info_dump_runtime(const struct intel_device_info *info,
|
||||
void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
|
||||
struct drm_printer *p)
|
||||
{
|
||||
sseu_dump(&info->sseu, p);
|
||||
|
@ -113,21 +113,6 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info,
|
|||
info->cs_timestamp_frequency_khz);
|
||||
}
|
||||
|
||||
void intel_device_info_dump(const struct intel_device_info *info,
|
||||
struct drm_printer *p)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(info, struct drm_i915_private, info);
|
||||
|
||||
drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
|
||||
INTEL_DEVID(dev_priv),
|
||||
INTEL_REVID(dev_priv),
|
||||
intel_platform_name(info->platform),
|
||||
info->gen);
|
||||
|
||||
intel_device_info_dump_flags(info, p);
|
||||
}
|
||||
|
||||
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
|
||||
struct drm_printer *p)
|
||||
{
|
||||
|
@ -164,7 +149,7 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
|
|||
|
||||
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
|
||||
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
u8 s_en;
|
||||
u32 ss_en, ss_en_mask;
|
||||
u8 eu_en;
|
||||
|
@ -203,7 +188,7 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
|
||||
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
const u32 fuse2 = I915_READ(GEN8_FUSE2);
|
||||
int s, ss;
|
||||
const int eu_mask = 0xff;
|
||||
|
@ -280,7 +265,7 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
|
||||
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
u32 fuse;
|
||||
|
||||
fuse = I915_READ(CHV_FUSE_GT);
|
||||
|
@ -334,7 +319,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
|
|||
static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
struct sseu_dev_info *sseu = &info->sseu;
|
||||
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
int s, ss;
|
||||
u32 fuse2, eu_disable, subslice_mask;
|
||||
const u8 eu_mask = 0xff;
|
||||
|
@ -437,7 +422,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
|
||||
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
int s, ss;
|
||||
u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
|
||||
|
||||
|
@ -519,8 +504,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
struct sseu_dev_info *sseu = &info->sseu;
|
||||
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
u32 fuse1;
|
||||
int s, ss;
|
||||
|
||||
|
@ -528,9 +512,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
|
|||
* There isn't a register to tell us how many slices/subslices. We
|
||||
* work off the PCI-ids here.
|
||||
*/
|
||||
switch (info->gt) {
|
||||
switch (INTEL_INFO(dev_priv)->gt) {
|
||||
default:
|
||||
MISSING_CASE(info->gt);
|
||||
MISSING_CASE(INTEL_INFO(dev_priv)->gt);
|
||||
/* fall through */
|
||||
case 1:
|
||||
sseu->slice_mask = BIT(0);
|
||||
|
@ -725,7 +709,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
|
|||
|
||||
/**
|
||||
* intel_device_info_runtime_init - initialize runtime info
|
||||
* @info: intel device info struct
|
||||
* @dev_priv: the i915 device
|
||||
*
|
||||
* Determine various intel_device_info fields at runtime.
|
||||
*
|
||||
|
@ -739,29 +723,29 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
|
|||
* - after the PCH has been detected,
|
||||
* - before the first usage of the fields it can tweak.
|
||||
*/
|
||||
void intel_device_info_runtime_init(struct intel_device_info *info)
|
||||
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(info, struct drm_i915_private, info);
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
|
||||
enum pipe pipe;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10) {
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_scalers[pipe] = 2;
|
||||
} else if (IS_GEN9(dev_priv)) {
|
||||
info->num_scalers[PIPE_A] = 2;
|
||||
info->num_scalers[PIPE_B] = 2;
|
||||
info->num_scalers[PIPE_C] = 1;
|
||||
runtime->num_scalers[pipe] = 2;
|
||||
} else if (IS_GEN(dev_priv, 9)) {
|
||||
runtime->num_scalers[PIPE_A] = 2;
|
||||
runtime->num_scalers[PIPE_B] = 2;
|
||||
runtime->num_scalers[PIPE_C] = 1;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
|
||||
|
||||
if (IS_GEN11(dev_priv))
|
||||
if (IS_GEN(dev_priv, 11))
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 6;
|
||||
else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
|
||||
runtime->num_sprites[pipe] = 6;
|
||||
else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 3;
|
||||
runtime->num_sprites[pipe] = 3;
|
||||
else if (IS_BROXTON(dev_priv)) {
|
||||
/*
|
||||
* Skylake and Broxton currently don't expose the topmost plane as its
|
||||
|
@ -772,22 +756,22 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
|
|||
* down the line.
|
||||
*/
|
||||
|
||||
info->num_sprites[PIPE_A] = 2;
|
||||
info->num_sprites[PIPE_B] = 2;
|
||||
info->num_sprites[PIPE_C] = 1;
|
||||
runtime->num_sprites[PIPE_A] = 2;
|
||||
runtime->num_sprites[PIPE_B] = 2;
|
||||
runtime->num_sprites[PIPE_C] = 1;
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 2;
|
||||
runtime->num_sprites[pipe] = 2;
|
||||
} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 1;
|
||||
runtime->num_sprites[pipe] = 1;
|
||||
}
|
||||
|
||||
if (i915_modparams.disable_display) {
|
||||
DRM_INFO("Display disabled (module parameter)\n");
|
||||
info->num_pipes = 0;
|
||||
} else if (HAS_DISPLAY(dev_priv) &&
|
||||
(IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
|
||||
(IS_GEN_RANGE(dev_priv, 7, 8)) &&
|
||||
HAS_PCH_SPLIT(dev_priv)) {
|
||||
u32 fuse_strap = I915_READ(FUSE_STRAP);
|
||||
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
|
||||
|
@ -811,7 +795,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
|
|||
DRM_INFO("PipeC fused off\n");
|
||||
info->num_pipes -= 1;
|
||||
}
|
||||
} else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
|
||||
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
|
||||
u32 dfsm = I915_READ(SKL_DFSM);
|
||||
u8 disabled_mask = 0;
|
||||
bool invalid;
|
||||
|
@ -851,20 +835,20 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
|
|||
cherryview_sseu_info_init(dev_priv);
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
broadwell_sseu_info_init(dev_priv);
|
||||
else if (IS_GEN9(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 9))
|
||||
gen9_sseu_info_init(dev_priv);
|
||||
else if (IS_GEN10(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 10))
|
||||
gen10_sseu_info_init(dev_priv);
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
gen11_sseu_info_init(dev_priv);
|
||||
|
||||
if (IS_GEN6(dev_priv) && intel_vtd_active()) {
|
||||
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
|
||||
DRM_INFO("Disabling ppGTT for VT-d support\n");
|
||||
info->ppgtt = INTEL_PPGTT_NONE;
|
||||
}
|
||||
|
||||
/* Initialize command stream timestamp frequency */
|
||||
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
|
||||
runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
|
||||
}
|
||||
|
||||
void intel_driver_caps_print(const struct intel_driver_caps *caps,
|
||||
|
@ -884,35 +868,44 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
|
|||
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
u32 media_fuse;
|
||||
unsigned int logical_vdbox = 0;
|
||||
unsigned int i;
|
||||
u32 media_fuse;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 11)
|
||||
return;
|
||||
|
||||
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
|
||||
|
||||
info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
|
||||
info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
|
||||
GEN11_GT_VEBOX_DISABLE_SHIFT;
|
||||
RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
|
||||
RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
|
||||
GEN11_GT_VEBOX_DISABLE_SHIFT;
|
||||
|
||||
DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
|
||||
DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
|
||||
for (i = 0; i < I915_MAX_VCS; i++) {
|
||||
if (!HAS_ENGINE(dev_priv, _VCS(i)))
|
||||
continue;
|
||||
|
||||
if (!(BIT(i) & info->vdbox_enable)) {
|
||||
if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
|
||||
info->ring_mask &= ~ENGINE_MASK(_VCS(i));
|
||||
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* In Gen11, only even numbered logical VDBOXes are
|
||||
* hooked up to an SFC (Scaler & Format Converter) unit.
|
||||
*/
|
||||
if (logical_vdbox++ % 2 == 0)
|
||||
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
|
||||
DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
|
||||
for (i = 0; i < I915_MAX_VECS; i++) {
|
||||
if (!HAS_ENGINE(dev_priv, _VECS(i)))
|
||||
continue;
|
||||
|
||||
if (!(BIT(i) & info->vebox_enable)) {
|
||||
if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
|
||||
info->ring_mask &= ~ENGINE_MASK(_VECS(i));
|
||||
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
|
||||
}
|
||||
|
|
|
@ -89,6 +89,7 @@ enum intel_ppgtt {
|
|||
func(is_alpha_support); \
|
||||
/* Keep has_* in alphabetical order */ \
|
||||
func(has_64bit_reloc); \
|
||||
func(gpu_reset_clobbers_display); \
|
||||
func(has_reset_engine); \
|
||||
func(has_fpga_dbg); \
|
||||
func(has_guc); \
|
||||
|
@ -152,12 +153,10 @@ struct sseu_dev_info {
|
|||
typedef u8 intel_ring_mask_t;
|
||||
|
||||
struct intel_device_info {
|
||||
u16 device_id;
|
||||
u16 gen_mask;
|
||||
|
||||
u8 gen;
|
||||
u8 gt; /* GT number, 0 if undefined */
|
||||
u8 num_rings;
|
||||
intel_ring_mask_t ring_mask; /* Rings supported by the HW */
|
||||
|
||||
enum intel_platform platform;
|
||||
|
@ -169,8 +168,6 @@ struct intel_device_info {
|
|||
u32 display_mmio_offset;
|
||||
|
||||
u8 num_pipes;
|
||||
u8 num_sprites[I915_MAX_PIPES];
|
||||
u8 num_scalers[I915_MAX_PIPES];
|
||||
|
||||
#define DEFINE_FLAG(name) u8 name:1
|
||||
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
|
||||
|
@ -189,6 +186,20 @@ struct intel_device_info {
|
|||
int trans_offsets[I915_MAX_TRANSCODERS];
|
||||
int cursor_offsets[I915_MAX_PIPES];
|
||||
|
||||
struct color_luts {
|
||||
u16 degamma_lut_size;
|
||||
u16 gamma_lut_size;
|
||||
} color;
|
||||
};
|
||||
|
||||
struct intel_runtime_info {
|
||||
u16 device_id;
|
||||
|
||||
u8 num_sprites[I915_MAX_PIPES];
|
||||
u8 num_scalers[I915_MAX_PIPES];
|
||||
|
||||
u8 num_rings;
|
||||
|
||||
/* Slice/subslice/EU info */
|
||||
struct sseu_dev_info sseu;
|
||||
|
||||
|
@ -198,10 +209,8 @@ struct intel_device_info {
|
|||
u8 vdbox_enable;
|
||||
u8 vebox_enable;
|
||||
|
||||
struct color_luts {
|
||||
u16 degamma_lut_size;
|
||||
u16 gamma_lut_size;
|
||||
} color;
|
||||
/* Media engine access to SFC per instance */
|
||||
u8 vdbox_sfc_access;
|
||||
};
|
||||
|
||||
struct intel_driver_caps {
|
||||
|
@ -258,12 +267,10 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
|
|||
|
||||
const char *intel_platform_name(enum intel_platform platform);
|
||||
|
||||
void intel_device_info_runtime_init(struct intel_device_info *info);
|
||||
void intel_device_info_dump(const struct intel_device_info *info,
|
||||
struct drm_printer *p);
|
||||
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
|
||||
void intel_device_info_dump_flags(const struct intel_device_info *info,
|
||||
struct drm_printer *p);
|
||||
void intel_device_info_dump_runtime(const struct intel_device_info *info,
|
||||
void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
|
||||
struct drm_printer *p);
|
||||
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
|
||||
struct drm_printer *p);
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include <drm/i915_drm.h>
|
||||
|
@ -984,7 +983,7 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
|
|||
u32 line1, line2;
|
||||
u32 line_mask;
|
||||
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
line_mask = DSL_LINEMASK_GEN2;
|
||||
else
|
||||
line_mask = DSL_LINEMASK_GEN3;
|
||||
|
@ -1110,7 +1109,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
|
|||
u32 val;
|
||||
|
||||
/* ILK FDI PLL is always enabled */
|
||||
if (IS_GEN5(dev_priv))
|
||||
if (IS_GEN(dev_priv, 5))
|
||||
return;
|
||||
|
||||
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
|
||||
|
@ -1850,7 +1849,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
|
|||
|
||||
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return IS_GEN2(dev_priv) ? 2048 : 4096;
|
||||
return IS_GEN(dev_priv, 2) ? 2048 : 4096;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -1863,7 +1862,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
|
|||
case DRM_FORMAT_MOD_LINEAR:
|
||||
return cpp;
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
return 128;
|
||||
else
|
||||
return 512;
|
||||
|
@ -1872,7 +1871,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
|
|||
return 128;
|
||||
/* fall through */
|
||||
case I915_FORMAT_MOD_Y_TILED:
|
||||
if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
|
||||
return 128;
|
||||
else
|
||||
return 512;
|
||||
|
@ -3193,8 +3192,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
|
|||
|
||||
dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
|
||||
|
||||
if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
|
||||
IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
|
||||
if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
|
||||
IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
|
||||
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
|
||||
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
|
@ -3746,8 +3745,8 @@ __intel_display_resume(struct drm_device *dev,
|
|||
|
||||
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return intel_has_gpu_reset(dev_priv) &&
|
||||
INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
|
||||
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
|
||||
intel_has_gpu_reset(dev_priv));
|
||||
}
|
||||
|
||||
void intel_prepare_reset(struct drm_i915_private *dev_priv)
|
||||
|
@ -4120,7 +4119,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
|
|||
temp = I915_READ(reg);
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_PATTERN_2;
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
|
||||
/* SNB-B */
|
||||
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
|
||||
|
@ -4919,10 +4918,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
|||
/* range checks */
|
||||
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
|
||||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
|
||||
(IS_GEN11(dev_priv) &&
|
||||
(IS_GEN(dev_priv, 11) &&
|
||||
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
|
||||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
|
||||
(!IS_GEN11(dev_priv) &&
|
||||
(!IS_GEN(dev_priv, 11) &&
|
||||
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
|
||||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
|
||||
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
|
||||
|
@ -5213,7 +5212,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
|
|||
* FIXME: Need to fix the logic to work when we turn off all planes
|
||||
* but leave the pipe running.
|
||||
*/
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||
|
||||
/* Underruns don't always raise interrupts, so check manually. */
|
||||
|
@ -5234,7 +5233,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
|
|||
* Gen2 reports pipe underruns whenever all planes are disabled.
|
||||
* So disable underrun reporting before all the planes get disabled.
|
||||
*/
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||
|
||||
hsw_disable_ips(to_intel_crtc_state(crtc->state));
|
||||
|
@ -5292,7 +5291,7 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
|
|||
return false;
|
||||
|
||||
/* WA Display #0827: Gen9:all */
|
||||
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
|
||||
if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -5365,7 +5364,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
|
|||
* Gen2 reports pipe underruns whenever all planes are disabled.
|
||||
* So disable underrun reporting before all the planes get disabled.
|
||||
*/
|
||||
if (IS_GEN2(dev_priv) && old_primary_state->visible &&
|
||||
if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
|
||||
(modeset || !new_primary_state->base.visible))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
|
||||
}
|
||||
|
@ -5578,6 +5577,26 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
|
|||
}
|
||||
}
|
||||
|
||||
static void intel_encoders_update_pipe(struct drm_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_atomic_state *old_state)
|
||||
{
|
||||
struct drm_connector_state *conn_state;
|
||||
struct drm_connector *conn;
|
||||
int i;
|
||||
|
||||
for_each_new_connector_in_state(old_state, conn, conn_state, i) {
|
||||
struct intel_encoder *encoder =
|
||||
to_intel_encoder(conn_state->best_encoder);
|
||||
|
||||
if (conn_state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
if (encoder->update_pipe)
|
||||
encoder->update_pipe(encoder, crtc_state, conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
|
||||
struct drm_atomic_state *old_state)
|
||||
{
|
||||
|
@ -5641,7 +5660,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
|
|||
* On ILK+ LUT must be loaded before the pipe is running but with
|
||||
* clocks enabled
|
||||
*/
|
||||
intel_color_load_luts(&pipe_config->base);
|
||||
intel_color_load_luts(pipe_config);
|
||||
|
||||
if (dev_priv->display.initial_watermarks != NULL)
|
||||
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
|
||||
|
@ -5752,7 +5771,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
|
|||
|
||||
haswell_set_pipemisc(pipe_config);
|
||||
|
||||
intel_color_set_csc(&pipe_config->base);
|
||||
intel_color_set_csc(pipe_config);
|
||||
|
||||
intel_crtc->active = true;
|
||||
|
||||
|
@ -5771,7 +5790,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
|
|||
* On ILK+ LUT must be loaded before the pipe is running but with
|
||||
* clocks enabled
|
||||
*/
|
||||
intel_color_load_luts(&pipe_config->base);
|
||||
intel_color_load_luts(pipe_config);
|
||||
|
||||
/*
|
||||
* Display WA #1153: enable hardware to bypass the alpha math
|
||||
|
@ -6117,7 +6136,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
|
|||
|
||||
i9xx_set_pipeconf(pipe_config);
|
||||
|
||||
intel_color_set_csc(&pipe_config->base);
|
||||
intel_color_set_csc(pipe_config);
|
||||
|
||||
intel_crtc->active = true;
|
||||
|
||||
|
@ -6137,7 +6156,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
|
|||
|
||||
i9xx_pfit_enable(pipe_config);
|
||||
|
||||
intel_color_load_luts(&pipe_config->base);
|
||||
intel_color_load_luts(pipe_config);
|
||||
|
||||
dev_priv->display.initial_watermarks(old_intel_state,
|
||||
pipe_config);
|
||||
|
@ -6184,7 +6203,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
|
|||
|
||||
intel_crtc->active = true;
|
||||
|
||||
if (!IS_GEN2(dev_priv))
|
||||
if (!IS_GEN(dev_priv, 2))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||
|
||||
intel_encoders_pre_enable(crtc, pipe_config, old_state);
|
||||
|
@ -6193,7 +6212,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
|
|||
|
||||
i9xx_pfit_enable(pipe_config);
|
||||
|
||||
intel_color_load_luts(&pipe_config->base);
|
||||
intel_color_load_luts(pipe_config);
|
||||
|
||||
if (dev_priv->display.initial_watermarks != NULL)
|
||||
dev_priv->display.initial_watermarks(old_intel_state,
|
||||
|
@ -6236,7 +6255,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
|
|||
* On gen2 planes are double buffered but the pipe isn't, so we must
|
||||
* wait for planes to fully turn off before disabling the pipe.
|
||||
*/
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
intel_wait_for_vblank(dev_priv, pipe);
|
||||
|
||||
intel_encoders_disable(crtc, old_crtc_state, old_state);
|
||||
|
@ -6261,7 +6280,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
|
|||
|
||||
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
|
||||
|
||||
if (!IS_GEN2(dev_priv))
|
||||
if (!IS_GEN(dev_priv, 2))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||
|
||||
if (!dev_priv->display.initial_watermarks)
|
||||
|
@ -6868,7 +6887,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
|
|||
* Strictly speaking some registers are available before
|
||||
* gen7, but we only support DRRS on gen7+
|
||||
*/
|
||||
return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
|
||||
return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
|
||||
}
|
||||
|
||||
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
|
||||
|
@ -9005,7 +9024,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
|
|||
/* We currently do not free assignements of panel fitters on
|
||||
* ivb/hsw (since we don't use the higher upscaling modes which
|
||||
* differentiates them) so just WARN about this case for now. */
|
||||
if (IS_GEN7(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 7)) {
|
||||
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
|
||||
PF_PIPE_SEL_IVB(crtc->pipe));
|
||||
}
|
||||
|
@ -9995,7 +10014,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
|
|||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
u32 cntl = 0;
|
||||
|
||||
if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
|
||||
if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
|
||||
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
|
||||
|
||||
if (INTEL_GEN(dev_priv) <= 10) {
|
||||
|
@ -10468,7 +10487,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
|
|||
return dev_priv->vbt.lvds_ssc_freq;
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
return 120000;
|
||||
else if (!IS_GEN2(dev_priv))
|
||||
else if (!IS_GEN(dev_priv, 2))
|
||||
return 96000;
|
||||
else
|
||||
return 48000;
|
||||
|
@ -10501,7 +10520,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
|||
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
|
||||
}
|
||||
|
||||
if (!IS_GEN2(dev_priv)) {
|
||||
if (!IS_GEN(dev_priv, 2)) {
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
|
||||
|
@ -10653,20 +10672,17 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
|
|||
|
||||
/**
|
||||
* intel_wm_need_update - Check whether watermarks need updating
|
||||
* @plane: drm plane
|
||||
* @state: new plane state
|
||||
* @cur: current plane state
|
||||
* @new: new plane state
|
||||
*
|
||||
* Check current plane state versus the new one to determine whether
|
||||
* watermarks need to be recalculated.
|
||||
*
|
||||
* Returns true or false.
|
||||
*/
|
||||
static bool intel_wm_need_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
static bool intel_wm_need_update(struct intel_plane_state *cur,
|
||||
struct intel_plane_state *new)
|
||||
{
|
||||
struct intel_plane_state *new = to_intel_plane_state(state);
|
||||
struct intel_plane_state *cur = to_intel_plane_state(plane->state);
|
||||
|
||||
/* Update watermarks on tiling or size changes. */
|
||||
if (new->base.visible != cur->base.visible)
|
||||
return true;
|
||||
|
@ -10775,7 +10791,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
|
|||
/* must disable cxsr around plane enable/disable */
|
||||
if (plane->id != PLANE_CURSOR)
|
||||
pipe_config->disable_cxsr = true;
|
||||
} else if (intel_wm_need_update(&plane->base, plane_state)) {
|
||||
} else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
|
||||
to_intel_plane_state(plane_state))) {
|
||||
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
|
||||
/* FIXME bollocks */
|
||||
pipe_config->update_wm_pre = true;
|
||||
|
@ -10817,7 +10834,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
|
|||
* the w/a on all three platforms.
|
||||
*/
|
||||
if (plane->id == PLANE_SPRITE0 &&
|
||||
(IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
|
||||
(IS_GEN_RANGE(dev_priv, 5, 6) ||
|
||||
IS_IVYBRIDGE(dev_priv)) &&
|
||||
(turn_on || (!needs_scaling(old_plane_state) &&
|
||||
needs_scaling(to_intel_plane_state(plane_state)))))
|
||||
|
@ -10954,8 +10971,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
|
|||
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *pipe_config =
|
||||
to_intel_crtc_state(crtc_state);
|
||||
|
@ -10975,7 +10991,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
if (crtc_state->color_mgmt_changed) {
|
||||
ret = intel_color_check(crtc, crtc_state);
|
||||
ret = intel_color_check(pipe_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -11004,9 +11020,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
* old state and the new state. We can program these
|
||||
* immediately.
|
||||
*/
|
||||
ret = dev_priv->display.compute_intermediate_wm(dev,
|
||||
intel_crtc,
|
||||
pipe_config);
|
||||
ret = dev_priv->display.compute_intermediate_wm(pipe_config);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
|
||||
return ret;
|
||||
|
@ -11014,7 +11028,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
if (mode_changed)
|
||||
if (mode_changed || pipe_config->update_pipe)
|
||||
ret = skl_update_scaler_crtc(pipe_config);
|
||||
|
||||
if (!ret)
|
||||
|
@ -11967,7 +11981,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
|
|||
if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
|
||||
return;
|
||||
|
||||
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
|
||||
skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
|
||||
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
|
||||
|
||||
skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
|
||||
|
@ -12381,7 +12395,7 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
|
|||
* However if queried just before the start of vblank we'll get an
|
||||
* answer that's slightly in the future.
|
||||
*/
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 2)) {
|
||||
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
|
||||
int vtotal;
|
||||
|
||||
|
@ -12622,9 +12636,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
|
|||
* phase. The code here should be run after the per-crtc and per-plane 'check'
|
||||
* handlers to ensure that all derived state has been updated.
|
||||
*/
|
||||
static int calc_watermark_data(struct drm_atomic_state *state)
|
||||
static int calc_watermark_data(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = state->dev;
|
||||
struct drm_device *dev = state->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Is there platform-specific watermark information to calculate? */
|
||||
|
@ -12720,7 +12734,7 @@ static int intel_atomic_check(struct drm_device *dev,
|
|||
return ret;
|
||||
|
||||
intel_fbc_choose_crtc(dev_priv, intel_state);
|
||||
return calc_watermark_data(state);
|
||||
return calc_watermark_data(intel_state);
|
||||
}
|
||||
|
||||
static int intel_atomic_prepare_commit(struct drm_device *dev,
|
||||
|
@ -12762,9 +12776,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
|
|||
} else {
|
||||
intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
|
||||
pipe_config);
|
||||
|
||||
if (pipe_config->update_pipe)
|
||||
intel_encoders_update_pipe(crtc, pipe_config, state);
|
||||
}
|
||||
|
||||
if (new_plane_state)
|
||||
if (pipe_config->update_pipe && !pipe_config->enable_fbc)
|
||||
intel_fbc_disable(intel_crtc);
|
||||
else if (new_plane_state)
|
||||
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
|
||||
|
||||
intel_begin_crtc_commit(crtc, old_crtc_state);
|
||||
|
@ -13559,8 +13578,8 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
|
|||
if (!modeset &&
|
||||
(intel_cstate->base.color_mgmt_changed ||
|
||||
intel_cstate->update_pipe)) {
|
||||
intel_color_set_csc(&intel_cstate->base);
|
||||
intel_color_load_luts(&intel_cstate->base);
|
||||
intel_color_set_csc(intel_cstate);
|
||||
intel_color_load_luts(intel_cstate);
|
||||
}
|
||||
|
||||
/* Perform vblank evasion around commit operation */
|
||||
|
@ -13585,7 +13604,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
if (!IS_GEN2(dev_priv))
|
||||
if (!IS_GEN(dev_priv, 2))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
|
||||
|
||||
if (crtc_state->has_pch_encoder) {
|
||||
|
@ -14047,7 +14066,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
|
|||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
int i;
|
||||
|
||||
crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
|
||||
crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
|
||||
if (!crtc->num_scalers)
|
||||
return;
|
||||
|
||||
|
@ -14133,7 +14152,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
|
||||
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
|
||||
|
||||
intel_color_init(&intel_crtc->base);
|
||||
intel_color_init(intel_crtc);
|
||||
|
||||
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
|
||||
|
||||
|
@ -14192,7 +14211,7 @@ static bool has_edp_a(struct drm_i915_private *dev_priv)
|
|||
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
|
||||
return false;
|
||||
|
||||
if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
|
||||
if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -14404,7 +14423,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
vlv_dsi_init(dev_priv);
|
||||
} else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
|
||||
} else if (!IS_GEN(dev_priv, 2) && !IS_PINEVIEW(dev_priv)) {
|
||||
bool found = false;
|
||||
|
||||
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
|
||||
|
@ -14438,7 +14457,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
|
||||
intel_dp_init(dev_priv, DP_D, PORT_D);
|
||||
} else if (IS_GEN2(dev_priv))
|
||||
} else if (IS_GEN(dev_priv, 2))
|
||||
intel_dvo_init(dev_priv);
|
||||
|
||||
if (SUPPORTS_TV(dev_priv))
|
||||
|
@ -14636,7 +14655,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
|
|||
* require the entire fb to accommodate that to avoid
|
||||
* potential runtime errors at plane configuration time.
|
||||
*/
|
||||
if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
|
||||
if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
|
||||
is_ccs_modifier(fb->modifier))
|
||||
stride_alignment *= 4;
|
||||
|
||||
|
@ -14841,7 +14860,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
|
|||
dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
|
||||
dev_priv->display.crtc_enable = i9xx_crtc_enable;
|
||||
dev_priv->display.crtc_disable = i9xx_crtc_disable;
|
||||
} else if (!IS_GEN2(dev_priv)) {
|
||||
} else if (!IS_GEN(dev_priv, 2)) {
|
||||
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
|
||||
dev_priv->display.get_initial_plane_config =
|
||||
i9xx_get_initial_plane_config;
|
||||
|
@ -14857,9 +14876,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
|
|||
dev_priv->display.crtc_disable = i9xx_crtc_disable;
|
||||
}
|
||||
|
||||
if (IS_GEN5(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
|
||||
} else if (IS_GEN6(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 6)) {
|
||||
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
|
||||
} else if (IS_IVYBRIDGE(dev_priv)) {
|
||||
/* FIXME: detect B0+ stepping and use auto training */
|
||||
|
@ -14991,12 +15010,12 @@ static void sanitize_watermarks(struct drm_device *dev)
|
|||
|
||||
static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_GEN5(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
u32 fdi_pll_clk =
|
||||
I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
|
||||
|
||||
dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
|
||||
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
|
||||
dev_priv->fdi_pll_freq = 270000;
|
||||
} else {
|
||||
return;
|
||||
|
@ -15112,10 +15131,10 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* maximum framebuffer dimensions */
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 2)) {
|
||||
dev->mode_config.max_width = 2048;
|
||||
dev->mode_config.max_height = 2048;
|
||||
} else if (IS_GEN3(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 3)) {
|
||||
dev->mode_config.max_width = 4096;
|
||||
dev->mode_config.max_height = 4096;
|
||||
} else {
|
||||
|
@ -15126,7 +15145,7 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
|
||||
dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
|
||||
dev->mode_config.cursor_height = 1023;
|
||||
} else if (IS_GEN2(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
dev->mode_config.cursor_width = 64;
|
||||
dev->mode_config.cursor_height = 64;
|
||||
} else {
|
||||
|
@ -15850,15 +15869,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
}
|
||||
|
||||
if (IS_G4X(dev_priv)) {
|
||||
g4x_wm_get_hw_state(dev);
|
||||
g4x_wm_get_hw_state(dev_priv);
|
||||
g4x_wm_sanitize(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
vlv_wm_get_hw_state(dev);
|
||||
vlv_wm_get_hw_state(dev_priv);
|
||||
vlv_wm_sanitize(dev_priv);
|
||||
} else if (INTEL_GEN(dev_priv) >= 9) {
|
||||
skl_wm_get_hw_state(dev);
|
||||
skl_wm_get_hw_state(dev_priv);
|
||||
} else if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
ilk_wm_get_hw_state(dev);
|
||||
ilk_wm_get_hw_state(dev_priv);
|
||||
}
|
||||
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
|
|
|
@ -121,7 +121,7 @@ enum i9xx_plane_id {
|
|||
};
|
||||
|
||||
#define plane_name(p) ((p) + 'A')
|
||||
#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
|
||||
#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
|
||||
|
||||
/*
|
||||
* Per-pipe plane identifier.
|
||||
|
@ -311,12 +311,12 @@ struct intel_link_m_n {
|
|||
|
||||
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
|
||||
for ((__p) = 0; \
|
||||
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
|
||||
(__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
|
||||
(__p)++)
|
||||
|
||||
#define for_each_sprite(__dev_priv, __p, __s) \
|
||||
for ((__s) = 0; \
|
||||
(__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
|
||||
(__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
|
||||
(__s)++)
|
||||
|
||||
#define for_each_port_masked(__port, __ports_mask) \
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <linux/notifier.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
@ -304,9 +303,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
|
|||
static int icl_max_source_rate(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum port port = dig_port->base.port;
|
||||
|
||||
if (port == PORT_B)
|
||||
if (intel_port_is_combophy(dev_priv, port) &&
|
||||
!intel_dp_is_edp(intel_dp))
|
||||
return 540000;
|
||||
|
||||
return 810000;
|
||||
|
@ -344,7 +345,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
|
|||
if (INTEL_GEN(dev_priv) >= 10) {
|
||||
source_rates = cnl_rates;
|
||||
size = ARRAY_SIZE(cnl_rates);
|
||||
if (IS_GEN10(dev_priv))
|
||||
if (IS_GEN(dev_priv, 10))
|
||||
max_rate = cnl_max_source_rate(intel_dp);
|
||||
else
|
||||
max_rate = icl_max_source_rate(intel_dp);
|
||||
|
@ -1128,7 +1129,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
to_i915(intel_dig_port->base.base.dev);
|
||||
uint32_t precharge, timeout;
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
precharge = 3;
|
||||
else
|
||||
precharge = 5;
|
||||
|
@ -2055,7 +2056,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
|||
&limits);
|
||||
|
||||
/* enable compression if the mode doesn't fit available BW */
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
|
||||
if (ret || intel_dp->force_dsc_en) {
|
||||
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
|
||||
conn_state, &limits);
|
||||
if (ret < 0)
|
||||
|
@ -2590,7 +2592,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
|
|||
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
if (IS_GEN5(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
/* ILK workaround: disable reset around power sequence */
|
||||
pp &= ~PANEL_POWER_RESET;
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
|
@ -2598,7 +2600,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
pp |= PANEL_POWER_ON;
|
||||
if (!IS_GEN5(dev_priv))
|
||||
if (!IS_GEN(dev_priv, 5))
|
||||
pp |= PANEL_POWER_RESET;
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
|
@ -2607,7 +2609,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
|
|||
wait_panel_on(intel_dp);
|
||||
intel_dp->last_power_on = jiffies;
|
||||
|
||||
if (IS_GEN5(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
@ -2836,7 +2838,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
|
|||
* 1. Wait for the start of vertical blank on the enabled pipe going to FDI
|
||||
* 2. Program DP PLL enable
|
||||
*/
|
||||
if (IS_GEN5(dev_priv))
|
||||
if (IS_GEN(dev_priv, 5))
|
||||
intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
|
||||
|
||||
intel_dp->DP |= DP_PLL_ENABLE;
|
||||
|
@ -3854,7 +3856,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
|
|||
} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
|
||||
signal_levels = ivb_cpu_edp_signal_levels(train_set);
|
||||
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
|
||||
} else if (IS_GEN6(dev_priv) && port == PORT_A) {
|
||||
} else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
|
||||
signal_levels = snb_cpu_edp_signal_levels(train_set);
|
||||
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
|
||||
} else {
|
||||
|
@ -3996,6 +3998,42 @@ intel_dp_link_down(struct intel_encoder *encoder,
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
|
||||
{
|
||||
u8 dpcd_ext[6];
|
||||
|
||||
/*
|
||||
* Prior to DP1.3 the bit represented by
|
||||
* DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
|
||||
* if it is set DP_DPCD_REV at 0000h could be at a value less than
|
||||
* the true capability of the panel. The only way to check is to
|
||||
* then compare 0000h and 2200h.
|
||||
*/
|
||||
if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
|
||||
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
|
||||
return;
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
|
||||
&dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
|
||||
DRM_ERROR("DPCD failed read at extended capabilities\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
|
||||
DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("Base DPCD: %*ph\n",
|
||||
(int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
|
||||
|
||||
memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
|
||||
}
|
||||
|
||||
bool
|
||||
intel_dp_read_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
|
@ -4003,6 +4041,8 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
|
|||
sizeof(intel_dp->dpcd)) < 0)
|
||||
return false; /* aux transfer failed */
|
||||
|
||||
intel_dp_extended_receiver_capabilities(intel_dp);
|
||||
|
||||
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
|
||||
|
||||
return intel_dp->dpcd[DP_DPCD_REV] != 0;
|
||||
|
@ -5033,28 +5073,38 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
|
|||
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
|
||||
}
|
||||
|
||||
static const char *tc_type_name(enum tc_port_type type)
|
||||
{
|
||||
static const char * const names[] = {
|
||||
[TC_PORT_UNKNOWN] = "unknown",
|
||||
[TC_PORT_LEGACY] = "legacy",
|
||||
[TC_PORT_TYPEC] = "typec",
|
||||
[TC_PORT_TBT] = "tbt",
|
||||
};
|
||||
|
||||
if (WARN_ON(type >= ARRAY_SIZE(names)))
|
||||
type = TC_PORT_UNKNOWN;
|
||||
|
||||
return names[type];
|
||||
}
|
||||
|
||||
static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *intel_dig_port,
|
||||
bool is_legacy, bool is_typec, bool is_tbt)
|
||||
{
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum tc_port_type old_type = intel_dig_port->tc_type;
|
||||
const char *type_str;
|
||||
|
||||
WARN_ON(is_legacy + is_typec + is_tbt != 1);
|
||||
|
||||
if (is_legacy) {
|
||||
if (is_legacy)
|
||||
intel_dig_port->tc_type = TC_PORT_LEGACY;
|
||||
type_str = "legacy";
|
||||
} else if (is_typec) {
|
||||
else if (is_typec)
|
||||
intel_dig_port->tc_type = TC_PORT_TYPEC;
|
||||
type_str = "typec";
|
||||
} else if (is_tbt) {
|
||||
else if (is_tbt)
|
||||
intel_dig_port->tc_type = TC_PORT_TBT;
|
||||
type_str = "tbt";
|
||||
} else {
|
||||
else
|
||||
return;
|
||||
}
|
||||
|
||||
/* Types are not supposed to be changed at runtime. */
|
||||
WARN_ON(old_type != TC_PORT_UNKNOWN &&
|
||||
|
@ -5062,12 +5112,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (old_type != intel_dig_port->tc_type)
|
||||
DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
|
||||
type_str);
|
||||
tc_type_name(intel_dig_port->tc_type));
|
||||
}
|
||||
|
||||
static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dig_port);
|
||||
|
||||
/*
|
||||
* This function implements the first part of the Connect Flow described by our
|
||||
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
|
||||
|
@ -5102,6 +5149,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
|
|||
val = I915_READ(PORT_TX_DFLEXDPPMS);
|
||||
if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
|
||||
DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
|
||||
WARN_ON(dig_port->tc_legacy_port);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -5133,8 +5181,8 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
|
|||
* See the comment at the connect function. This implements the Disconnect
|
||||
* Flow.
|
||||
*/
|
||||
static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dig_port)
|
||||
void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dig_port)
|
||||
{
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
|
||||
|
||||
|
@ -5154,6 +5202,10 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
|
||||
port_name(dig_port->base.port),
|
||||
tc_type_name(dig_port->tc_type));
|
||||
|
||||
dig_port->tc_type = TC_PORT_UNKNOWN;
|
||||
}
|
||||
|
||||
|
@ -5175,7 +5227,14 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
|
|||
bool is_legacy, is_typec, is_tbt;
|
||||
u32 dpsp;
|
||||
|
||||
is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port);
|
||||
/*
|
||||
* WARN if we got a legacy port HPD, but VBT didn't mark the port as
|
||||
* legacy. Treat the port as legacy from now on.
|
||||
*/
|
||||
if (WARN_ON(!intel_dig_port->tc_legacy_port &&
|
||||
I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
|
||||
intel_dig_port->tc_legacy_port = true;
|
||||
is_legacy = intel_dig_port->tc_legacy_port;
|
||||
|
||||
/*
|
||||
* The spec says we shouldn't be using the ISR bits for detecting
|
||||
|
@ -5187,6 +5246,7 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (!is_legacy && !is_typec && !is_tbt) {
|
||||
icl_tc_phy_disconnect(dev_priv, intel_dig_port);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -5238,17 +5298,17 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
|
|||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
return icl_digital_port_connected(encoder);
|
||||
else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
|
||||
return spt_digital_port_connected(encoder);
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
return bxt_digital_port_connected(encoder);
|
||||
else if (IS_GEN8(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 8))
|
||||
return bdw_digital_port_connected(encoder);
|
||||
else if (IS_GEN7(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
return ivb_digital_port_connected(encoder);
|
||||
else if (IS_GEN6(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 6))
|
||||
return snb_digital_port_connected(encoder);
|
||||
else if (IS_GEN5(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 5))
|
||||
return ilk_digital_port_connected(encoder);
|
||||
|
||||
MISSING_CASE(INTEL_GEN(dev_priv));
|
||||
|
@ -5495,7 +5555,7 @@ intel_dp_connector_unregister(struct drm_connector *connector)
|
|||
intel_connector_unregister(connector);
|
||||
}
|
||||
|
||||
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
||||
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
|
@ -5518,9 +5578,14 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
|||
}
|
||||
|
||||
intel_dp_aux_fini(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
intel_dp_encoder_flush_work(encoder);
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(intel_dig_port);
|
||||
kfree(enc_to_dig_port(encoder));
|
||||
}
|
||||
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
|
||||
|
@ -5583,7 +5648,12 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
|
||||
return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
|
||||
if (reply != DP_AUX_NATIVE_REPLY_ACK) {
|
||||
DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
|
||||
reply);
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
|
||||
|
@ -6366,8 +6436,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
|
|||
}
|
||||
|
||||
mutex_lock(&dev_priv->drrs.mutex);
|
||||
if (WARN_ON(dev_priv->drrs.dp)) {
|
||||
DRM_ERROR("DRRS already enabled\n");
|
||||
if (dev_priv->drrs.dp) {
|
||||
DRM_DEBUG_KMS("DRRS already enabled\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
|
|
@ -243,6 +243,9 @@ struct intel_encoder {
|
|||
void (*post_pll_disable)(struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
void (*update_pipe)(struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
/* Read out the current hw state of this connector, returning true if
|
||||
* the encoder is active. If the encoder is enabled it also set the pipe
|
||||
* it is connected to in the pipe parameter. */
|
||||
|
@ -1208,6 +1211,9 @@ struct intel_dp {
|
|||
|
||||
/* Displayport compliance testing */
|
||||
struct intel_dp_compliance compliance;
|
||||
|
||||
/* Display stream compression testing */
|
||||
bool force_dsc_en;
|
||||
};
|
||||
|
||||
enum lspcon_vendor {
|
||||
|
@ -1233,6 +1239,7 @@ struct intel_digital_port {
|
|||
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
|
||||
enum aux_ch aux_ch;
|
||||
enum intel_display_power_domain ddi_io_power_domain;
|
||||
bool tc_legacy_port:1;
|
||||
enum tc_port_type tc_type;
|
||||
|
||||
void (*write_infoframe)(struct intel_encoder *encoder,
|
||||
|
@ -1805,7 +1812,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
|
|||
bool enable);
|
||||
void intel_dp_encoder_reset(struct drm_encoder *encoder);
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
|
||||
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
|
||||
int intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state);
|
||||
|
@ -1873,6 +1880,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
|
|||
int intel_dp_link_required(int pixel_clock, int bpp);
|
||||
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
|
||||
bool intel_digital_port_connected(struct intel_encoder *encoder);
|
||||
void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dig_port);
|
||||
|
||||
/* intel_dp_aux_backlight.c */
|
||||
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
|
||||
|
@ -2199,16 +2208,16 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv);
|
|||
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
|
||||
void g4x_wm_get_hw_state(struct drm_device *dev);
|
||||
void vlv_wm_get_hw_state(struct drm_device *dev);
|
||||
void ilk_wm_get_hw_state(struct drm_device *dev);
|
||||
void skl_wm_get_hw_state(struct drm_device *dev);
|
||||
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
|
||||
struct skl_ddb_entry *ddb_y,
|
||||
struct skl_ddb_entry *ddb_uv);
|
||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct skl_ddb_allocation *ddb /* out */);
|
||||
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
|
||||
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
|
||||
struct skl_pipe_wm *out);
|
||||
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
|
@ -2326,10 +2335,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
|
|||
struct intel_plane_state *intel_state);
|
||||
|
||||
/* intel_color.c */
|
||||
void intel_color_init(struct drm_crtc *crtc);
|
||||
int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
|
||||
void intel_color_set_csc(struct drm_crtc_state *crtc_state);
|
||||
void intel_color_load_luts(struct drm_crtc_state *crtc_state);
|
||||
void intel_color_init(struct intel_crtc *crtc);
|
||||
int intel_color_check(struct intel_crtc_state *crtc_state);
|
||||
void intel_color_set_csc(struct intel_crtc_state *crtc_state);
|
||||
void intel_color_load_luts(struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* intel_lspcon.c */
|
||||
bool lspcon_init(struct intel_digital_port *intel_dig_port);
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#ifndef _INTEL_DSI_H
|
||||
#define _INTEL_DSI_H
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include "intel_drv.h"
|
||||
|
|
|
@ -24,15 +24,15 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/mfd/intel_soc_pmic.h>
|
||||
#include <linux/slab.h>
|
||||
#include <video/mipi_display.h>
|
||||
#include <asm/intel-mid.h>
|
||||
#include <video/mipi_display.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
|
@ -393,7 +393,25 @@ static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
|
|||
|
||||
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
{
|
||||
DRM_DEBUG_KMS("Skipping PMIC element execution\n");
|
||||
#ifdef CONFIG_PMIC_OPREGION
|
||||
u32 value, mask, reg_address;
|
||||
u16 i2c_address;
|
||||
int ret;
|
||||
|
||||
/* byte 0 aka PMIC Flag is reserved */
|
||||
i2c_address = get_unaligned_le16(data + 1);
|
||||
reg_address = get_unaligned_le32(data + 3);
|
||||
value = get_unaligned_le32(data + 7);
|
||||
mask = get_unaligned_le32(data + 11);
|
||||
|
||||
ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_address,
|
||||
reg_address,
|
||||
value, mask);
|
||||
if (ret)
|
||||
DRM_ERROR("%s failed, error: %d\n", __func__, ret);
|
||||
#else
|
||||
DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
|
||||
#endif
|
||||
|
||||
return data + 15;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
*/
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include "intel_drv.h"
|
||||
|
|
|
@ -261,6 +261,31 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
|
|||
info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
|
||||
}
|
||||
|
||||
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
i915_reg_t hwstam;
|
||||
|
||||
/*
|
||||
* Though they added more rings on g4x/ilk, they did not add
|
||||
* per-engine HWSTAM until gen6.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
|
||||
return;
|
||||
|
||||
hwstam = RING_HWSTAM(engine->mmio_base);
|
||||
if (INTEL_GEN(dev_priv) >= 3)
|
||||
I915_WRITE(hwstam, mask);
|
||||
else
|
||||
I915_WRITE16(hwstam, mask);
|
||||
}
|
||||
|
||||
static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
|
||||
{
|
||||
/* Mask off all writes into the unknown HWSP */
|
||||
intel_engine_set_hwsp_writemask(engine, ~0u);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_engine_setup(struct drm_i915_private *dev_priv,
|
||||
enum intel_engine_id id)
|
||||
|
@ -312,6 +337,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
|
|||
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
|
||||
|
||||
/* Scrub mmio state on takeover */
|
||||
intel_engine_sanitize_mmio(engine);
|
||||
|
||||
dev_priv->engine_class[info->class][info->instance] = engine;
|
||||
dev_priv->engine[id] = engine;
|
||||
return 0;
|
||||
|
@ -365,7 +393,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
device_info->num_rings = hweight32(mask);
|
||||
RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
|
||||
|
||||
i915_check_and_clear_faults(dev_priv);
|
||||
|
||||
|
@ -426,27 +454,9 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
|
|||
return err;
|
||||
}
|
||||
|
||||
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
|
||||
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
|
||||
* so long as the semaphore value in the register/page is greater
|
||||
* than the sync value), so whenever we reset the seqno,
|
||||
* so long as we reset the tracking semaphore value to 0, it will
|
||||
* always be before the next request's seqno. If we don't reset
|
||||
* the semaphore value, then when the seqno moves backwards all
|
||||
* future waits will complete instantly (causing rendering corruption).
|
||||
*/
|
||||
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
|
||||
I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
|
||||
I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
|
||||
if (HAS_VEBOX(dev_priv))
|
||||
I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
|
||||
}
|
||||
|
||||
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
|
||||
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
|
||||
|
||||
/* After manually advancing the seqno, fake the interrupt in case
|
||||
* there are any waiters for that seqno.
|
||||
|
@ -495,6 +505,9 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
|
|||
|
||||
static void cleanup_status_page(struct intel_engine_cs *engine)
|
||||
{
|
||||
/* Prevent writes into HWSP after returning the page to the system */
|
||||
intel_engine_set_hwsp_writemask(engine, ~0u);
|
||||
|
||||
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
|
||||
void *addr = fetch_and_zero(&engine->status_page.page_addr);
|
||||
|
||||
|
@ -769,12 +782,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
|
|||
|
||||
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
u32 mcr_s_ss_select;
|
||||
u32 slice = fls(sseu->slice_mask);
|
||||
u32 subslice = fls(sseu->subslice_mask[slice]);
|
||||
|
||||
if (IS_GEN10(dev_priv))
|
||||
if (IS_GEN(dev_priv, 10))
|
||||
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
|
||||
GEN8_MCR_SUBSLICE(subslice);
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
|
@ -1030,22 +1043,34 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
|
|||
engine->set_default_submission(engine);
|
||||
}
|
||||
|
||||
static bool reset_engines(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
|
||||
return false;
|
||||
|
||||
return intel_gpu_reset(i915, ALL_ENGINES) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engines_sanitize: called after the GPU has lost power
|
||||
* @i915: the i915 device
|
||||
* @force: ignore a failed reset and sanitize engine state anyway
|
||||
*
|
||||
* Anytime we reset the GPU, either with an explicit GPU reset or through a
|
||||
* PCI power cycle, the GPU loses state and we must reset our state tracking
|
||||
* to match. Note that calling intel_engines_sanitize() if the GPU has not
|
||||
* been reset results in much confusion!
|
||||
*/
|
||||
void intel_engines_sanitize(struct drm_i915_private *i915)
|
||||
void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
GEM_TRACE("\n");
|
||||
|
||||
if (!reset_engines(i915) && !force)
|
||||
return;
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
if (engine->reset.reset)
|
||||
engine->reset.reset(engine, NULL);
|
||||
|
@ -1248,7 +1273,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
|
|||
&engine->execlists;
|
||||
u64 addr;
|
||||
|
||||
if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
|
||||
if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
|
||||
drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
|
||||
drm_printf(m, "\tRING_START: 0x%08x\n",
|
||||
I915_READ(RING_START(engine->mmio_base)));
|
||||
|
@ -1269,16 +1294,6 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
|
|||
drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
|
||||
}
|
||||
|
||||
if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
|
||||
drm_printf(m, "\tSYNC_0: 0x%08x\n",
|
||||
I915_READ(RING_SYNC_0(engine->mmio_base)));
|
||||
drm_printf(m, "\tSYNC_1: 0x%08x\n",
|
||||
I915_READ(RING_SYNC_1(engine->mmio_base)));
|
||||
if (HAS_VEBOX(dev_priv))
|
||||
drm_printf(m, "\tSYNC_2: 0x%08x\n",
|
||||
I915_READ(RING_SYNC_2(engine->mmio_base)));
|
||||
}
|
||||
|
||||
addr = intel_engine_get_active_head(engine);
|
||||
drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
|
@ -1532,11 +1547,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||
spin_unlock(&b->rb_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
|
||||
engine->irq_posted,
|
||||
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
|
||||
&engine->irq_posted)));
|
||||
|
||||
drm_printf(m, "HWSP:\n");
|
||||
hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
|
|||
int lines;
|
||||
|
||||
intel_fbc_get_plane_source_size(cache, NULL, &lines);
|
||||
if (IS_GEN7(dev_priv))
|
||||
if (IS_GEN(dev_priv, 7))
|
||||
lines = min(lines, 2048);
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
lines = min(lines, 2560);
|
||||
|
@ -127,7 +127,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
|
|||
cfb_pitch = params->fb.stride;
|
||||
|
||||
/* FBC_CTL wants 32B or 64B units */
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
cfb_pitch = (cfb_pitch / 32) - 1;
|
||||
else
|
||||
cfb_pitch = (cfb_pitch / 64) - 1;
|
||||
|
@ -136,7 +136,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
|
|||
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
|
||||
I915_WRITE(FBC_TAG(i), 0);
|
||||
|
||||
if (IS_GEN4(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 4)) {
|
||||
u32 fbc_ctl2;
|
||||
|
||||
/* Set it up... */
|
||||
|
@ -233,9 +233,9 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (params->flags & PLANE_HAS_FENCE) {
|
||||
dpfc_ctl |= DPFC_CTL_FENCE_EN;
|
||||
if (IS_GEN5(dev_priv))
|
||||
if (IS_GEN(dev_priv, 5))
|
||||
dpfc_ctl |= params->vma->fence->id;
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
I915_WRITE(SNB_DPFC_CTL_SA,
|
||||
SNB_CPU_FENCE_ENABLE |
|
||||
params->vma->fence->id);
|
||||
|
@ -243,7 +243,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
|
|||
params->crtc.fence_y_offset);
|
||||
}
|
||||
} else {
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
I915_WRITE(SNB_DPFC_CTL_SA, 0);
|
||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
|
|||
int threshold = dev_priv->fbc.threshold;
|
||||
|
||||
/* Display WA #0529: skl, kbl, bxt. */
|
||||
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
|
||||
u32 val = I915_READ(CHICKEN_MISC_4);
|
||||
|
||||
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
|
||||
|
@ -581,10 +581,10 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
|
|||
if (stride < 512)
|
||||
return false;
|
||||
|
||||
if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
|
||||
return stride == 4096 || stride == 8192;
|
||||
|
||||
if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
|
||||
if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
|
||||
return false;
|
||||
|
||||
if (stride > 16384)
|
||||
|
@ -603,7 +603,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
|
|||
case DRM_FORMAT_XRGB1555:
|
||||
case DRM_FORMAT_RGB565:
|
||||
/* 16bpp not supported on gen2 */
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
return false;
|
||||
/* WaFbcOnly1to1Ratio:ctg */
|
||||
if (IS_G4X(dev_priv))
|
||||
|
@ -626,7 +626,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
|
|||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
unsigned int effective_w, effective_h, max_w, max_h;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
|
||||
max_w = 5120;
|
||||
max_h = 4096;
|
||||
} else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
|
||||
max_w = 4096;
|
||||
max_h = 4096;
|
||||
} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
|
||||
|
@ -784,7 +787,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
|
|||
* having a Y offset that isn't divisible by 4 causes FIFO underrun
|
||||
* and screen flicker.
|
||||
*/
|
||||
if (IS_GEN(dev_priv, 9, 10) &&
|
||||
if (IS_GEN_RANGE(dev_priv, 9, 10) &&
|
||||
(fbc->state_cache.plane.adjusted_y & 3)) {
|
||||
fbc->no_fbc_reason = "plane Y offset is misaligned";
|
||||
return false;
|
||||
|
@ -839,7 +842,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
|
|||
|
||||
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
|
||||
|
||||
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
|
||||
if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
|
||||
params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
|
||||
32 * fbc->threshold) * 8;
|
||||
}
|
||||
|
@ -1126,8 +1129,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
|
|||
if (!fbc_supported(dev_priv))
|
||||
return;
|
||||
|
||||
WARN_ON(crtc->active);
|
||||
|
||||
mutex_lock(&fbc->lock);
|
||||
if (fbc->crtc == crtc)
|
||||
__intel_fbc_disable(dev_priv);
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include "intel_drv.h"
|
||||
|
|
|
@ -260,9 +260,9 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
|||
|
||||
if (HAS_GMCH_DISPLAY(dev_priv))
|
||||
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
|
||||
else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
|
||||
else if (IS_GEN_RANGE(dev_priv, 5, 6))
|
||||
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN7(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
|
@ -423,7 +423,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (HAS_GMCH_DISPLAY(dev_priv))
|
||||
i9xx_check_fifo_underruns(crtc);
|
||||
else if (IS_GEN7(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
ivybridge_check_fifo_underruns(crtc);
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,6 @@
|
|||
* functions is deprecated and should be avoided.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
|
|
@ -77,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
|
|||
guc_fw->path = I915_KBL_GUC_UCODE;
|
||||
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
|
||||
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
|
||||
} else {
|
||||
dev_info(dev_priv->drm.dev,
|
||||
"%s: No firmware known for this platform!\n",
|
||||
intel_uc_fw_type_repr(guc_fw->type));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,7 +111,7 @@ static void guc_prepare_xfer(struct intel_guc *guc)
|
|||
else
|
||||
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
|
||||
|
||||
if (IS_GEN9(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 9)) {
|
||||
/* DOP Clock Gating Enable for GuC clocks */
|
||||
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
|
||||
I915_READ(GEN7_MISCCPCTL)));
|
||||
|
|
|
@ -572,7 +572,8 @@ static void inject_preempt_context(struct work_struct *work)
|
|||
if (engine->id == RCS) {
|
||||
cs = gen8_emit_ggtt_write_rcs(cs,
|
||||
GUC_PREEMPT_FINISHED,
|
||||
addr);
|
||||
addr,
|
||||
PIPE_CONTROL_CS_STALL);
|
||||
} else {
|
||||
cs = gen8_emit_ggtt_write(cs,
|
||||
GUC_PREEMPT_FINISHED,
|
||||
|
|
|
@ -24,144 +24,6 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
|
||||
static bool
|
||||
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
|
||||
{
|
||||
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
|
||||
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
|
||||
MI_SEMAPHORE_REGISTER);
|
||||
}
|
||||
|
||||
static struct intel_engine_cs *
|
||||
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
|
||||
u64 offset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
|
||||
struct intel_engine_cs *signaller;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(signaller, dev_priv, id) {
|
||||
if (engine == signaller)
|
||||
continue;
|
||||
|
||||
if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
|
||||
return signaller;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n",
|
||||
engine->name, ipehr);
|
||||
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static struct intel_engine_cs *
|
||||
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
void __iomem *vaddr;
|
||||
u32 cmd, ipehr, head;
|
||||
u64 offset = 0;
|
||||
int i, backwards;
|
||||
|
||||
/*
|
||||
* This function does not support execlist mode - any attempt to
|
||||
* proceed further into this function will result in a kernel panic
|
||||
* when dereferencing ring->buffer, which is not set up in execlist
|
||||
* mode.
|
||||
*
|
||||
* The correct way of doing it would be to derive the currently
|
||||
* executing ring buffer from the current context, which is derived
|
||||
* from the currently running request. Unfortunately, to get the
|
||||
* current request we would have to grab the struct_mutex before doing
|
||||
* anything else, which would be ill-advised since some other thread
|
||||
* might have grabbed it already and managed to hang itself, causing
|
||||
* the hang checker to deadlock.
|
||||
*
|
||||
* Therefore, this function does not support execlist mode in its
|
||||
* current form. Just return NULL and move on.
|
||||
*/
|
||||
if (engine->buffer == NULL)
|
||||
return NULL;
|
||||
|
||||
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||
if (!ipehr_is_semaphore_wait(engine, ipehr))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* HEAD is likely pointing to the dword after the actual command,
|
||||
* so scan backwards until we find the MBOX. But limit it to just 3
|
||||
* or 4 dwords depending on the semaphore wait command size.
|
||||
* Note that we don't care about ACTHD here since that might
|
||||
* point at at batch, and semaphores are always emitted into the
|
||||
* ringbuffer itself.
|
||||
*/
|
||||
head = I915_READ_HEAD(engine) & HEAD_ADDR;
|
||||
backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
|
||||
vaddr = (void __iomem *)engine->buffer->vaddr;
|
||||
|
||||
for (i = backwards; i; --i) {
|
||||
/*
|
||||
* Be paranoid and presume the hw has gone off into the wild -
|
||||
* our ring is smaller than what the hardware (and hence
|
||||
* HEAD_ADDR) allows. Also handles wrap-around.
|
||||
*/
|
||||
head &= engine->buffer->size - 1;
|
||||
|
||||
/* This here seems to blow up */
|
||||
cmd = ioread32(vaddr + head);
|
||||
if (cmd == ipehr)
|
||||
break;
|
||||
|
||||
head -= 4;
|
||||
}
|
||||
|
||||
if (!i)
|
||||
return NULL;
|
||||
|
||||
*seqno = ioread32(vaddr + head + 4) + 1;
|
||||
return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
|
||||
}
|
||||
|
||||
static int semaphore_passed(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct intel_engine_cs *signaller;
|
||||
u32 seqno;
|
||||
|
||||
engine->hangcheck.deadlock++;
|
||||
|
||||
signaller = semaphore_waits_for(engine, &seqno);
|
||||
if (signaller == NULL)
|
||||
return -1;
|
||||
|
||||
if (IS_ERR(signaller))
|
||||
return 0;
|
||||
|
||||
/* Prevent pathological recursion due to driver bugs */
|
||||
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
|
||||
return -1;
|
||||
|
||||
if (intel_engine_signaled(signaller, seqno))
|
||||
return 1;
|
||||
|
||||
/* cursory check for an unkickable deadlock */
|
||||
if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
|
||||
semaphore_passed(signaller) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
engine->hangcheck.deadlock = 0;
|
||||
}
|
||||
|
||||
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
|
||||
{
|
||||
u32 tmp = current_instdone | *old_instdone;
|
||||
|
@ -236,7 +98,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||
if (ha != ENGINE_DEAD)
|
||||
return ha;
|
||||
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
return ENGINE_DEAD;
|
||||
|
||||
/* Is the chip hanging on a WAIT_FOR_EVENT?
|
||||
|
@ -252,37 +114,12 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||
return ENGINE_WAIT_KICK;
|
||||
}
|
||||
|
||||
if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) {
|
||||
switch (semaphore_passed(engine)) {
|
||||
default:
|
||||
return ENGINE_DEAD;
|
||||
case 1:
|
||||
i915_handle_error(dev_priv, ALL_ENGINES, 0,
|
||||
"stuck semaphore on %s",
|
||||
engine->name);
|
||||
I915_WRITE_CTL(engine, tmp);
|
||||
return ENGINE_WAIT_KICK;
|
||||
case 0:
|
||||
return ENGINE_WAIT;
|
||||
}
|
||||
}
|
||||
|
||||
return ENGINE_DEAD;
|
||||
}
|
||||
|
||||
static void hangcheck_load_sample(struct intel_engine_cs *engine,
|
||||
struct intel_engine_hangcheck *hc)
|
||||
{
|
||||
/* We don't strictly need an irq-barrier here, as we are not
|
||||
* serving an interrupt request, be paranoid in case the
|
||||
* barrier has side-effects (such as preventing a broken
|
||||
* cacheline snoop) and so be sure that we can see the seqno
|
||||
* advance. If the seqno should stick, due to a stale
|
||||
* cacheline, we would erroneously declare the GPU hung.
|
||||
*/
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
hc->acthd = intel_engine_get_active_head(engine);
|
||||
hc->seqno = intel_engine_get_seqno(engine);
|
||||
}
|
||||
|
@ -433,8 +270,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
for_each_engine(engine, dev_priv, id) {
|
||||
struct intel_engine_hangcheck hc;
|
||||
|
||||
semaphore_clear_deadlocks(dev_priv);
|
||||
|
||||
hangcheck_load_sample(engine, &hc);
|
||||
hangcheck_accumulate_sample(engine, &hc);
|
||||
hangcheck_store_sample(engine, &hc);
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
* Sean Paul <seanpaul@chromium.org>
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_hdcp.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/random.h>
|
||||
|
@ -15,6 +14,7 @@
|
|||
#include "i915_reg.h"
|
||||
|
||||
#define KEY_LOAD_TRIES 5
|
||||
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
|
||||
|
||||
static
|
||||
bool intel_hdcp_is_ksv_valid(u8 *ksv)
|
||||
|
@ -157,10 +157,11 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
|
|||
/*
|
||||
* Initiate loading the HDCP key from fuses.
|
||||
*
|
||||
* BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
|
||||
* differ in the key load trigger process from other platforms.
|
||||
* BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
|
||||
* platforms except BXT and GLK, differ in the key load trigger process
|
||||
* from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
|
||||
*/
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
if (IS_GEN9_BC(dev_priv)) {
|
||||
mutex_lock(&dev_priv->pcu_lock);
|
||||
ret = sandybridge_pcode_write(dev_priv,
|
||||
SKL_PCODE_LOAD_HDCP_KEYS, 1);
|
||||
|
@ -636,7 +637,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
|
|||
|
||||
/* Wait for encryption confirmation */
|
||||
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
|
||||
HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
|
||||
HDCP_STATUS_ENC, HDCP_STATUS_ENC,
|
||||
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
DRM_ERROR("Timed out waiting for encryption\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -666,7 +668,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
|
|||
|
||||
I915_WRITE(PORT_HDCP_CONF(port), 0);
|
||||
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
|
||||
20)) {
|
||||
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -768,8 +770,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
|
|||
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
/* PORT E doesn't have HDCP, and PORT F is disabled */
|
||||
return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
|
||||
!IS_CHERRYVIEW(dev_priv) && port < PORT_E);
|
||||
return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
|
||||
}
|
||||
|
||||
int intel_hdcp_init(struct intel_connector *connector,
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
*/
|
||||
|
||||
#define BXT_HUC_FW_MAJOR 01
|
||||
#define BXT_HUC_FW_MINOR 07
|
||||
#define BXT_BLD_NUM 1398
|
||||
#define BXT_HUC_FW_MINOR 8
|
||||
#define BXT_BLD_NUM 2893
|
||||
|
||||
#define SKL_HUC_FW_MAJOR 01
|
||||
#define SKL_HUC_FW_MINOR 07
|
||||
|
@ -76,9 +76,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
|
|||
huc_fw->path = I915_KBL_HUC_UCODE;
|
||||
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
|
||||
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
|
||||
} else {
|
||||
DRM_WARN("%s: No firmware known for this platform!\n",
|
||||
intel_uc_fw_type_repr(huc_fw->type));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
#include <linux/export.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_hdcp.h>
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
|
|
|
@ -133,7 +133,6 @@
|
|||
*/
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_render_state.h"
|
||||
|
@ -363,31 +362,12 @@ execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
|
|||
trace_i915_request_out(rq);
|
||||
}
|
||||
|
||||
static void
|
||||
execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
|
||||
{
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
|
||||
}
|
||||
|
||||
static u64 execlists_update_context(struct i915_request *rq)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
|
||||
struct intel_context *ce = rq->hw_context;
|
||||
u32 *reg_state = ce->lrc_reg_state;
|
||||
|
||||
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
|
||||
|
||||
/*
|
||||
* True 32b PPGTT with dynamic page allocation: update PDP
|
||||
* registers and point the unallocated PDPs to scratch page.
|
||||
* PML4 is allocated during ppgtt init, so this is not needed
|
||||
* in 48-bit mode.
|
||||
*/
|
||||
if (!i915_vm_is_48bit(&ppgtt->vm))
|
||||
execlists_update_context_pdps(ppgtt, reg_state);
|
||||
ce->lrc_reg_state[CTX_RING_TAIL + 1] =
|
||||
intel_ring_set_tail(rq->ring, rq->tail);
|
||||
|
||||
/*
|
||||
* Make sure the context image is complete before we submit it to HW.
|
||||
|
@ -770,6 +750,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
|
|||
execlists_clear_all_active(execlists);
|
||||
}
|
||||
|
||||
static inline void
|
||||
invalidate_csb_entries(const u32 *first, const u32 *last)
|
||||
{
|
||||
clflush((void *)first);
|
||||
clflush((void *)last);
|
||||
}
|
||||
|
||||
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
|
||||
|
@ -785,6 +772,9 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
|
|||
*/
|
||||
execlists->csb_head = reset_value;
|
||||
WRITE_ONCE(*execlists->csb_write, reset_value);
|
||||
|
||||
invalidate_csb_entries(&execlists->csb_status[0],
|
||||
&execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
|
||||
}
|
||||
|
||||
static void nop_submission_tasklet(unsigned long data)
|
||||
|
@ -1020,6 +1010,19 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
} while (head != tail);
|
||||
|
||||
execlists->csb_head = head;
|
||||
|
||||
/*
|
||||
* Gen11 has proven to fail wrt global observation point between
|
||||
* entry and tail update, failing on the ordering and thus
|
||||
* we see an old entry in the context status buffer.
|
||||
*
|
||||
* Forcibly evict out entries for the next gpu csb update,
|
||||
* to increase the odds that we get a fresh entries with non
|
||||
* working hardware. The cost for doing so comes out mostly with
|
||||
* the wash as hardware, working or not, will need to do the
|
||||
* invalidation before.
|
||||
*/
|
||||
invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
|
||||
}
|
||||
|
||||
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
|
||||
|
@ -1247,29 +1250,88 @@ execlists_context_pin(struct intel_engine_cs *engine,
|
|||
return __execlists_context_pin(engine, ctx, ce);
|
||||
}
|
||||
|
||||
static int emit_pdps(struct i915_request *rq)
|
||||
{
|
||||
const struct intel_engine_cs * const engine = rq->engine;
|
||||
struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
|
||||
int err, i;
|
||||
u32 *cs;
|
||||
|
||||
GEM_BUG_ON(intel_vgpu_active(rq->i915));
|
||||
|
||||
/*
|
||||
* Beware ye of the dragons, this sequence is magic!
|
||||
*
|
||||
* Small changes to this sequence can cause anything from
|
||||
* GPU hangs to forcewake errors and machine lockups!
|
||||
*/
|
||||
|
||||
/* Flush any residual operations from the context load */
|
||||
err = engine->emit_flush(rq, EMIT_FLUSH);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Magic required to prevent forcewake errors! */
|
||||
err = engine->emit_flush(rq, EMIT_INVALIDATE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
/* Ensure the LRI have landed before we invalidate & continue */
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
|
||||
for (i = GEN8_3LVL_PDPES; i--; ) {
|
||||
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
|
||||
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
|
||||
*cs++ = upper_32_bits(pd_daddr);
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
|
||||
*cs++ = lower_32_bits(pd_daddr);
|
||||
}
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
/* Be doubly sure the LRI have landed before proceeding */
|
||||
err = engine->emit_flush(rq, EMIT_FLUSH);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Re-invalidate the TLB for luck */
|
||||
return engine->emit_flush(rq, EMIT_INVALIDATE);
|
||||
}
|
||||
|
||||
static int execlists_request_alloc(struct i915_request *request)
|
||||
{
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!request->hw_context->pin_count);
|
||||
|
||||
/* Flush enough space to reduce the likelihood of waiting after
|
||||
/*
|
||||
* Flush enough space to reduce the likelihood of waiting after
|
||||
* we start building the request - in which case we will just
|
||||
* have to repeat work.
|
||||
*/
|
||||
request->reserved_space += EXECLISTS_REQUEST_SIZE;
|
||||
|
||||
ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Note that after this point, we have committed to using
|
||||
/*
|
||||
* Note that after this point, we have committed to using
|
||||
* this request as it is being used to both track the
|
||||
* state of engine initialisation and liveness of the
|
||||
* golden renderstate above. Think twice before you try
|
||||
* to cancel/unwind this request now.
|
||||
*/
|
||||
|
||||
/* Unconditionally invalidate GPU caches and TLBs. */
|
||||
if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
|
||||
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
|
||||
else
|
||||
ret = emit_pdps(request);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1592,7 +1654,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
|
||||
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
|
||||
|
||||
/*
|
||||
* Make sure we're not enabling the new 12-deep CSB
|
||||
|
@ -1633,6 +1695,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
|
|||
static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_engine_apply_workarounds(engine);
|
||||
intel_engine_apply_whitelist(engine);
|
||||
|
||||
intel_mocs_init_engine(engine);
|
||||
|
||||
|
@ -1649,43 +1712,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_init_render_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
int ret;
|
||||
|
||||
ret = gen8_init_common_ring(engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_engine_apply_whitelist(engine);
|
||||
|
||||
/* We need to disable the AsyncFlip performance optimisations in order
|
||||
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
|
||||
* programmed to '1' on all products.
|
||||
*
|
||||
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
|
||||
*/
|
||||
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
|
||||
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen9_init_render_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = gen8_init_common_ring(engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_engine_apply_whitelist(engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
execlists_reset_prepare(struct intel_engine_cs *engine)
|
||||
{
|
||||
|
@ -1841,56 +1867,11 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
|
|||
atomic_read(&execlists->tasklet.count));
|
||||
}
|
||||
|
||||
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
|
||||
u32 *cs;
|
||||
int i;
|
||||
|
||||
cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
|
||||
for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
|
||||
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
|
||||
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
|
||||
*cs++ = upper_32_bits(pd_daddr);
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
|
||||
*cs++ = lower_32_bits(pd_daddr);
|
||||
}
|
||||
|
||||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_emit_bb_start(struct i915_request *rq,
|
||||
u64 offset, u32 len,
|
||||
const unsigned int flags)
|
||||
{
|
||||
u32 *cs;
|
||||
int ret;
|
||||
|
||||
/* Don't rely in hw updating PDPs, specially in lite-restore.
|
||||
* Ideally, we should set Force PD Restore in ctx descriptor,
|
||||
* but we can't. Force Restore would be a second option, but
|
||||
* it is unsafe in case of lite-restore (because the ctx is
|
||||
* not idle). PML4 is allocated during ppgtt init so this is
|
||||
* not needed in 48-bit.*/
|
||||
if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
|
||||
!i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
|
||||
!intel_vgpu_active(rq->i915)) {
|
||||
ret = intel_logical_ring_emit_pdps(rq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
|
||||
}
|
||||
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
if (IS_ERR(cs))
|
||||
|
@ -1923,6 +1904,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
|
|||
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
return 0;
|
||||
|
@ -2007,7 +1989,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
|
|||
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
|
||||
* pipe control.
|
||||
*/
|
||||
if (IS_GEN9(request->i915))
|
||||
if (IS_GEN(request->i915, 9))
|
||||
vf_flush_wa = true;
|
||||
|
||||
/* WaForGAMHang:kbl */
|
||||
|
@ -2078,10 +2060,18 @@ static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
|
|||
/* We're using qword write, seqno should be aligned to 8 bytes. */
|
||||
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
|
||||
|
||||
cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
|
||||
intel_hws_seqno_address(request->engine));
|
||||
cs = gen8_emit_ggtt_write_rcs(cs,
|
||||
request->global_seqno,
|
||||
intel_hws_seqno_address(request->engine),
|
||||
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DC_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_CS_STALL);
|
||||
|
||||
*cs++ = MI_USER_INTERRUPT;
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
||||
|
||||
request->tail = intel_ring_offset(request, cs);
|
||||
assert_ring_tail_valid(request->ring, request->tail);
|
||||
|
||||
|
@ -2244,6 +2234,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_engine_init_workarounds(engine);
|
||||
|
||||
if (HAS_LOGICAL_RING_ELSQ(i915)) {
|
||||
execlists->submit_reg = i915->regs +
|
||||
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
|
||||
|
@ -2276,19 +2268,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
|
|||
|
||||
int logical_render_ring_init(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
int ret;
|
||||
|
||||
logical_ring_setup(engine);
|
||||
|
||||
if (HAS_L3_DPF(dev_priv))
|
||||
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
||||
|
||||
/* Override some for render ring. */
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
engine->init_hw = gen9_init_render_ring;
|
||||
else
|
||||
engine->init_hw = gen8_init_render_ring;
|
||||
engine->init_context = gen8_init_rcs_context;
|
||||
engine->emit_flush = gen8_emit_flush_render;
|
||||
engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
|
||||
|
@ -2310,7 +2294,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
intel_engine_init_whitelist(engine);
|
||||
intel_engine_init_workarounds(engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2325,9 +2308,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine)
|
|||
static u32
|
||||
make_rpcs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg;
|
||||
u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
|
||||
u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]);
|
||||
bool subslice_pg = RUNTIME_INFO(dev_priv)->sseu.has_subslice_pg;
|
||||
u8 slices = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
|
||||
u8 subslices = hweight8(RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0]);
|
||||
u32 rpcs = 0;
|
||||
|
||||
/*
|
||||
|
@ -2362,7 +2345,7 @@ make_rpcs(struct drm_i915_private *dev_priv)
|
|||
* subslices are enabled, or a count between one and four on the first
|
||||
* slice.
|
||||
*/
|
||||
if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) {
|
||||
if (IS_GEN(dev_priv, 11) && slices == 1 && subslices >= 4) {
|
||||
GEM_BUG_ON(subslices & 1);
|
||||
|
||||
subslice_pg = false;
|
||||
|
@ -2375,7 +2358,7 @@ make_rpcs(struct drm_i915_private *dev_priv)
|
|||
* must make an explicit request through RPCS for full
|
||||
* enablement.
|
||||
*/
|
||||
if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
|
||||
if (RUNTIME_INFO(dev_priv)->sseu.has_slice_pg) {
|
||||
u32 mask, val = slices;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
|
@ -2403,17 +2386,17 @@ make_rpcs(struct drm_i915_private *dev_priv)
|
|||
rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
|
||||
if (RUNTIME_INFO(dev_priv)->sseu.has_eu_pg) {
|
||||
u32 val;
|
||||
|
||||
val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
|
||||
val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice <<
|
||||
GEN8_RPCS_EU_MIN_SHIFT;
|
||||
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
|
||||
val &= GEN8_RPCS_EU_MIN_MASK;
|
||||
|
||||
rpcs |= val;
|
||||
|
||||
val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
|
||||
val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice <<
|
||||
GEN8_RPCS_EU_MAX_SHIFT;
|
||||
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
|
||||
val &= GEN8_RPCS_EU_MAX_MASK;
|
||||
|
@ -2538,6 +2521,11 @@ static void execlists_init_reg_state(u32 *regs,
|
|||
* other PDP Descriptors are ignored.
|
||||
*/
|
||||
ASSIGN_CTX_PML4(ctx->ppgtt, regs);
|
||||
} else {
|
||||
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
|
||||
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
|
||||
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
|
||||
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
|
||||
}
|
||||
|
||||
if (rcs) {
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
@ -279,7 +278,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
|
|||
* special lvds dither control bit on pch-split platforms, dithering is
|
||||
* only controlled through the PIPECONF reg.
|
||||
*/
|
||||
if (IS_GEN4(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 4)) {
|
||||
/*
|
||||
* Bspec wording suggests that LVDS port dithering only exists
|
||||
* for 18bpp panels.
|
||||
|
@ -919,7 +918,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
|
|||
intel_encoder->cloneable = 0;
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
else if (IS_GEN4(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 4))
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
|
||||
else
|
||||
intel_encoder->crtc_mask = (1 << 1);
|
||||
|
|
|
@ -193,7 +193,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
|
||||
if (IS_GEN9(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 9)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->size; i++)
|
||||
|
|
|
@ -49,7 +49,6 @@
|
|||
* context handling keep the MOCS in step.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
int intel_rcs_context_init_mocs(struct i915_request *rq);
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include <linux/firmware.h>
|
||||
#include <acpi/video.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "intel_opregion.h"
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
*
|
||||
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
|
@ -541,7 +540,7 @@ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 widt
|
|||
{
|
||||
u32 sw;
|
||||
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
sw = ALIGN((offset & 31) + width, 32);
|
||||
else
|
||||
sw = ALIGN((offset & 63) + width, 64);
|
||||
|
@ -778,7 +777,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
u32 oconfig;
|
||||
|
||||
oconfig = OCONF_CC_OUT_8BIT;
|
||||
if (IS_GEN4(dev_priv))
|
||||
if (IS_GEN(dev_priv, 4))
|
||||
oconfig |= OCONF_CSC_MODE_BT709;
|
||||
oconfig |= pipe == 0 ?
|
||||
OCONF_PIPE_A : OCONF_PIPE_B;
|
||||
|
@ -1012,7 +1011,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
|
||||
return -EINVAL;
|
||||
if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
|
||||
if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
|
||||
|
@ -1246,7 +1245,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
|
|||
attrs->contrast = overlay->contrast;
|
||||
attrs->saturation = overlay->saturation;
|
||||
|
||||
if (!IS_GEN2(dev_priv)) {
|
||||
if (!IS_GEN(dev_priv, 2)) {
|
||||
attrs->gamma0 = I915_READ(OGAMC0);
|
||||
attrs->gamma1 = I915_READ(OGAMC1);
|
||||
attrs->gamma2 = I915_READ(OGAMC2);
|
||||
|
@ -1270,7 +1269,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
|
|||
update_reg_attrs(overlay, overlay->regs);
|
||||
|
||||
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
goto out_unlock;
|
||||
|
||||
if (overlay->active) {
|
||||
|
|
|
@ -563,7 +563,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
|
|||
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
|
||||
}
|
||||
|
||||
if (IS_GEN4(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 4)) {
|
||||
mask = BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
} else {
|
||||
level <<= 1;
|
||||
|
@ -929,7 +929,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
|
|||
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
|
||||
* that has backlight.
|
||||
*/
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
|
||||
}
|
||||
|
||||
|
@ -1557,7 +1557,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
|
|||
|
||||
ctl = I915_READ(BLC_PWM_CTL);
|
||||
|
||||
if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
|
||||
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
|
||||
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
|
@ -1886,7 +1886,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
|
|||
panel->backlight.get = vlv_get_backlight;
|
||||
panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
|
||||
}
|
||||
} else if (IS_GEN4(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 4)) {
|
||||
panel->backlight.setup = i965_setup_backlight;
|
||||
panel->backlight.enable = i965_enable_backlight;
|
||||
panel->backlight.disable = i965_disable_backlight;
|
||||
|
|
|
@ -427,13 +427,13 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
|
|||
enum intel_pipe_crc_source *source, u32 *val,
|
||||
bool set_wa)
|
||||
{
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
return i8xx_pipe_crc_ctl_reg(source, val);
|
||||
else if (INTEL_GEN(dev_priv) < 5)
|
||||
return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
|
||||
else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
|
||||
else if (IS_GEN_RANGE(dev_priv, 5, 6))
|
||||
return ilk_pipe_crc_ctl_reg(source, val);
|
||||
else
|
||||
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
|
||||
|
@ -544,13 +544,13 @@ static int
|
|||
intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
|
||||
const enum intel_pipe_crc_source source)
|
||||
{
|
||||
if (IS_GEN2(dev_priv))
|
||||
if (IS_GEN(dev_priv, 2))
|
||||
return i8xx_crc_source_valid(dev_priv, source);
|
||||
else if (INTEL_GEN(dev_priv) < 5)
|
||||
return i9xx_crc_source_valid(dev_priv, source);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
return vlv_crc_source_valid(dev_priv, source);
|
||||
else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
|
||||
else if (IS_GEN_RANGE(dev_priv, 5, 6))
|
||||
return ilk_crc_source_valid(dev_priv, source);
|
||||
else
|
||||
return ivb_crc_source_valid(dev_priv, source);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -51,7 +51,6 @@
|
|||
* must be correctly synchronized/cancelled when shutting down the pipe."
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
@ -261,6 +260,32 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
|
|||
return val;
|
||||
}
|
||||
|
||||
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
|
||||
{
|
||||
u16 val;
|
||||
ssize_t r;
|
||||
|
||||
/*
|
||||
* Returning the default X granularity if granularity not required or
|
||||
* if DPCD read fails
|
||||
*/
|
||||
if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
|
||||
return 4;
|
||||
|
||||
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
|
||||
if (r != 2)
|
||||
DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
|
||||
|
||||
/*
|
||||
* Spec says that if the value read is 0 the default granularity should
|
||||
* be used instead.
|
||||
*/
|
||||
if (r != 2 || val == 0)
|
||||
val = 4;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
|
@ -274,10 +299,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
|||
DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
|
||||
intel_dp->psr_dpcd[0]);
|
||||
|
||||
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
|
||||
DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
|
||||
DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dev_priv->psr.sink_support = true;
|
||||
dev_priv->psr.sink_sync_latency =
|
||||
intel_dp_get_sink_sync_latency(intel_dp);
|
||||
|
@ -309,6 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
|||
if (dev_priv->psr.sink_psr2_support) {
|
||||
dev_priv->psr.colorimetry_support =
|
||||
intel_dp_get_colorimetry_status(intel_dp);
|
||||
dev_priv->psr.su_x_granularity =
|
||||
intel_dp_get_su_x_granulartiy(intel_dp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -388,13 +421,15 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
|
|||
if (dev_priv->psr.psr2_enabled) {
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
|
||||
DP_ALPM_ENABLE);
|
||||
dpcd_val |= DP_PSR_ENABLE_PSR2;
|
||||
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
|
||||
} else {
|
||||
if (dev_priv->psr.link_standby)
|
||||
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
dpcd_val |= DP_PSR_CRC_VERIFICATION;
|
||||
}
|
||||
|
||||
if (dev_priv->psr.link_standby)
|
||||
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
|
||||
if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
|
||||
dpcd_val |= DP_PSR_CRC_VERIFICATION;
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
|
||||
|
@ -468,9 +503,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
|||
idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
|
||||
val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
|
||||
|
||||
/* FIXME: selective update is probably totally broken because it doesn't
|
||||
* mesh at all with our frontbuffer tracking. And the hw alone isn't
|
||||
* good enough. */
|
||||
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
val |= EDP_Y_COORDINATE_ENABLE;
|
||||
|
@ -519,7 +551,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
|||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
|
||||
psr_max_h = 4096;
|
||||
psr_max_v = 2304;
|
||||
} else if (IS_GEN9(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 9)) {
|
||||
psr_max_h = 3640;
|
||||
psr_max_v = 2304;
|
||||
}
|
||||
|
@ -531,6 +563,18 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* HW sends SU blocks of size four scan lines, which means the starting
|
||||
* X coordinate and Y granularity requirements will always be met. We
|
||||
* only need to validate the SU block width is a multiple of
|
||||
* x granularity.
|
||||
*/
|
||||
if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
|
||||
DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
|
||||
crtc_hdisplay, dev_priv->psr.su_x_granularity);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -641,17 +685,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
|||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
hsw_psr_setup_aux(intel_dp);
|
||||
|
||||
if (dev_priv->psr.psr2_enabled) {
|
||||
if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
|
||||
!IS_GEMINILAKE(dev_priv))) {
|
||||
i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
|
||||
cpu_transcoder);
|
||||
u32 chicken = I915_READ(reg);
|
||||
|
||||
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
|
||||
chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
|
||||
| PSR2_ADD_VERTICAL_LINE_COUNT);
|
||||
|
||||
else
|
||||
chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
|
||||
chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
|
||||
PSR2_ADD_VERTICAL_LINE_COUNT;
|
||||
I915_WRITE(reg, chicken);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
@ -133,7 +132,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
|
|||
cmd = MI_FLUSH;
|
||||
if (mode & EMIT_INVALIDATE) {
|
||||
cmd |= MI_EXE_FLUSH;
|
||||
if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
|
||||
if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
|
||||
cmd |= MI_INVALIDATE_ISP;
|
||||
}
|
||||
|
||||
|
@ -217,7 +216,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
|
|||
* really our business. That leaves only stall at scoreboard.
|
||||
*/
|
||||
static int
|
||||
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
|
||||
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
|
||||
{
|
||||
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
|
||||
u32 *cs;
|
||||
|
@ -257,7 +256,7 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
|
|||
int ret;
|
||||
|
||||
/* Force SNB workarounds for PIPE_CONTROL flushes */
|
||||
ret = intel_emit_post_sync_nonzero_flush(rq);
|
||||
ret = gen6_emit_post_sync_nonzero_flush(rq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -300,6 +299,37 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
/* First we do the gen6_emit_post_sync_nonzero_flush w/a */
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4);
|
||||
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4);
|
||||
*cs++ = PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = 0;
|
||||
|
||||
/* Finally we can flush and with it emit the breadcrumb */
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4);
|
||||
*cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DC_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_CS_STALL);
|
||||
*cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = rq->global_seqno;
|
||||
|
||||
*cs++ = MI_USER_INTERRUPT;
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
rq->tail = intel_ring_offset(rq, cs);
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
}
|
||||
static const int gen6_rcs_emit_breadcrumb_sz = 14;
|
||||
|
||||
static int
|
||||
gen7_render_ring_cs_stall_wa(struct i915_request *rq)
|
||||
{
|
||||
|
@ -379,11 +409,86 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
|
||||
static void gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4);
|
||||
*cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DC_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_CS_STALL);
|
||||
*cs++ = intel_hws_seqno_address(rq->engine);
|
||||
*cs++ = rq->global_seqno;
|
||||
|
||||
*cs++ = MI_USER_INTERRUPT;
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
rq->tail = intel_ring_offset(rq, cs);
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
}
|
||||
static const int gen7_rcs_emit_breadcrumb_sz = 6;
|
||||
|
||||
static void gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
|
||||
*cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT;
|
||||
*cs++ = rq->global_seqno;
|
||||
*cs++ = MI_USER_INTERRUPT;
|
||||
|
||||
rq->tail = intel_ring_offset(rq, cs);
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
}
|
||||
static const int gen6_xcs_emit_breadcrumb_sz = 4;
|
||||
|
||||
#define GEN7_XCS_WA 32
|
||||
static void gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
int i;
|
||||
|
||||
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
|
||||
*cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT;
|
||||
*cs++ = rq->global_seqno;
|
||||
|
||||
for (i = 0; i < GEN7_XCS_WA; i++) {
|
||||
*cs++ = MI_STORE_DWORD_INDEX;
|
||||
*cs++ = I915_GEM_HWS_INDEX_ADDR;
|
||||
*cs++ = rq->global_seqno;
|
||||
}
|
||||
|
||||
*cs++ = MI_FLUSH_DW;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
|
||||
*cs++ = MI_USER_INTERRUPT;
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
rq->tail = intel_ring_offset(rq, cs);
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
}
|
||||
static const int gen7_xcs_emit_breadcrumb_sz = 8 + GEN7_XCS_WA * 3;
|
||||
#undef GEN7_XCS_WA
|
||||
|
||||
static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
|
||||
{
|
||||
/*
|
||||
* Keep the render interrupt unmasked as this papers over
|
||||
* lost interrupts following a reset.
|
||||
*/
|
||||
if (engine->class == RENDER_CLASS) {
|
||||
if (INTEL_GEN(engine->i915) >= 6)
|
||||
mask &= ~BIT(0);
|
||||
else
|
||||
mask &= ~I915_USER_INTERRUPT;
|
||||
}
|
||||
|
||||
intel_engine_set_hwsp_writemask(engine, mask);
|
||||
}
|
||||
|
||||
static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct page *page = virt_to_page(engine->status_page.page_addr);
|
||||
phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
|
||||
u32 addr;
|
||||
|
||||
addr = lower_32_bits(phys);
|
||||
|
@ -393,15 +498,25 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
|
|||
I915_WRITE(HWS_PGA, addr);
|
||||
}
|
||||
|
||||
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
|
||||
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct page *page = virt_to_page(engine->status_page.page_addr);
|
||||
phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
|
||||
|
||||
set_hws_pga(engine, phys);
|
||||
set_hwstam(engine, ~0u);
|
||||
}
|
||||
|
||||
static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
i915_reg_t mmio;
|
||||
i915_reg_t hwsp;
|
||||
|
||||
/* The ring status page addresses are no longer next to the rest of
|
||||
/*
|
||||
* The ring status page addresses are no longer next to the rest of
|
||||
* the ring registers as of gen7.
|
||||
*/
|
||||
if (IS_GEN7(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 7)) {
|
||||
switch (engine->id) {
|
||||
/*
|
||||
* No more rings exist on Gen7. Default case is only to shut up
|
||||
|
@ -410,56 +525,55 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
|
|||
default:
|
||||
GEM_BUG_ON(engine->id);
|
||||
case RCS:
|
||||
mmio = RENDER_HWS_PGA_GEN7;
|
||||
hwsp = RENDER_HWS_PGA_GEN7;
|
||||
break;
|
||||
case BCS:
|
||||
mmio = BLT_HWS_PGA_GEN7;
|
||||
hwsp = BLT_HWS_PGA_GEN7;
|
||||
break;
|
||||
case VCS:
|
||||
mmio = BSD_HWS_PGA_GEN7;
|
||||
hwsp = BSD_HWS_PGA_GEN7;
|
||||
break;
|
||||
case VECS:
|
||||
mmio = VEBOX_HWS_PGA_GEN7;
|
||||
hwsp = VEBOX_HWS_PGA_GEN7;
|
||||
break;
|
||||
}
|
||||
} else if (IS_GEN6(dev_priv)) {
|
||||
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
|
||||
} else if (IS_GEN(dev_priv, 6)) {
|
||||
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
|
||||
} else {
|
||||
mmio = RING_HWS_PGA(engine->mmio_base);
|
||||
hwsp = RING_HWS_PGA(engine->mmio_base);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
u32 mask = ~0u;
|
||||
I915_WRITE(hwsp, offset);
|
||||
POSTING_READ(hwsp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep the render interrupt unmasked as this papers over
|
||||
* lost interrupts following a reset.
|
||||
*/
|
||||
if (engine->id == RCS)
|
||||
mask &= ~BIT(0);
|
||||
static void flush_cs_tlb(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
|
||||
|
||||
I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
|
||||
}
|
||||
if (!IS_GEN_RANGE(dev_priv, 6, 7))
|
||||
return;
|
||||
|
||||
I915_WRITE(mmio, engine->status_page.ggtt_offset);
|
||||
POSTING_READ(mmio);
|
||||
/* ring should be idle before issuing a sync flush*/
|
||||
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
|
||||
|
||||
/* Flush the TLB for this page */
|
||||
if (IS_GEN(dev_priv, 6, 7)) {
|
||||
i915_reg_t reg = RING_INSTPM(engine->mmio_base);
|
||||
I915_WRITE(instpm,
|
||||
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
||||
INSTPM_SYNC_FLUSH));
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
instpm, INSTPM_SYNC_FLUSH, 0,
|
||||
1000))
|
||||
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
||||
engine->name);
|
||||
}
|
||||
|
||||
/* ring should be idle before issuing a sync flush*/
|
||||
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
|
||||
static void ring_setup_status_page(struct intel_engine_cs *engine)
|
||||
{
|
||||
set_hwsp(engine, engine->status_page.ggtt_offset);
|
||||
set_hwstam(engine, ~0u);
|
||||
|
||||
I915_WRITE(reg,
|
||||
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
||||
INSTPM_SYNC_FLUSH));
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
reg, INSTPM_SYNC_FLUSH, 0,
|
||||
1000))
|
||||
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
||||
engine->name);
|
||||
}
|
||||
flush_cs_tlb(engine);
|
||||
}
|
||||
|
||||
static bool stop_ring(struct intel_engine_cs *engine)
|
||||
|
@ -529,17 +643,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
|||
if (HWS_NEEDS_PHYSICAL(dev_priv))
|
||||
ring_setup_phys_status_page(engine);
|
||||
else
|
||||
intel_ring_setup_status_page(engine);
|
||||
ring_setup_status_page(engine);
|
||||
|
||||
intel_engine_reset_breadcrumbs(engine);
|
||||
|
||||
if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
|
||||
I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
|
||||
I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
|
||||
if (HAS_VEBOX(dev_priv))
|
||||
I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
|
||||
}
|
||||
|
||||
/* Enforce ordering by reading HEAD register back */
|
||||
I915_READ_HEAD(engine);
|
||||
|
||||
|
@ -603,10 +710,6 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
|||
static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_engine_stop_cs(engine);
|
||||
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
return i915_gem_find_active_request(engine);
|
||||
}
|
||||
|
||||
|
@ -679,7 +782,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
|
|||
return ret;
|
||||
|
||||
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
|
||||
if (IS_GEN(dev_priv, 4, 6))
|
||||
if (IS_GEN_RANGE(dev_priv, 4, 6))
|
||||
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
|
||||
|
||||
/* We need to disable the AsyncFlip performance optimisations in order
|
||||
|
@ -688,22 +791,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
|
|||
*
|
||||
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
|
||||
*/
|
||||
if (IS_GEN(dev_priv, 6, 7))
|
||||
if (IS_GEN_RANGE(dev_priv, 6, 7))
|
||||
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
|
||||
|
||||
/* Required for the hardware to program scanline values for waiting */
|
||||
/* WaEnableFlushTlbInvalidationMode:snb */
|
||||
if (IS_GEN6(dev_priv))
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
I915_WRITE(GFX_MODE,
|
||||
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
|
||||
|
||||
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
|
||||
if (IS_GEN7(dev_priv))
|
||||
if (IS_GEN(dev_priv, 7))
|
||||
I915_WRITE(GFX_MODE_GEN7,
|
||||
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
|
||||
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
|
||||
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
/* From the Sandybridge PRM, volume 1 part 3, page 24:
|
||||
* "If this bit is set, STCunit will have LRA as replacement
|
||||
* policy. [...] This bit must be reset. LRA replacement
|
||||
|
@ -713,7 +816,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
|
|||
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
|
||||
}
|
||||
|
||||
if (IS_GEN(dev_priv, 6, 7))
|
||||
if (IS_GEN_RANGE(dev_priv, 6, 7))
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6)
|
||||
|
@ -722,33 +825,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = rq->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int num_rings = 0;
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
i915_reg_t mbox_reg;
|
||||
|
||||
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
|
||||
continue;
|
||||
|
||||
mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
|
||||
if (i915_mmio_reg_valid(mbox_reg)) {
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
||||
*cs++ = i915_mmio_reg_offset(mbox_reg);
|
||||
*cs++ = rq->global_seqno;
|
||||
num_rings++;
|
||||
}
|
||||
}
|
||||
if (num_rings & 1)
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
return cs;
|
||||
}
|
||||
|
||||
static void cancel_requests(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_request *request;
|
||||
|
@ -788,92 +864,41 @@ static void i9xx_submit_request(struct i915_request *request)
|
|||
|
||||
static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
*cs++ = MI_FLUSH;
|
||||
|
||||
*cs++ = MI_STORE_DWORD_INDEX;
|
||||
*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
|
||||
*cs++ = I915_GEM_HWS_INDEX_ADDR;
|
||||
*cs++ = rq->global_seqno;
|
||||
|
||||
*cs++ = MI_USER_INTERRUPT;
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
rq->tail = intel_ring_offset(rq, cs);
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
}
|
||||
static const int i9xx_emit_breadcrumb_sz = 6;
|
||||
|
||||
#define GEN5_WA_STORES 8 /* must be at least 1! */
|
||||
static void gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
int i;
|
||||
|
||||
*cs++ = MI_FLUSH;
|
||||
|
||||
BUILD_BUG_ON(GEN5_WA_STORES < 1);
|
||||
for (i = 0; i < GEN5_WA_STORES; i++) {
|
||||
*cs++ = MI_STORE_DWORD_INDEX;
|
||||
*cs++ = I915_GEM_HWS_INDEX_ADDR;
|
||||
*cs++ = rq->global_seqno;
|
||||
}
|
||||
|
||||
*cs++ = MI_USER_INTERRUPT;
|
||||
|
||||
rq->tail = intel_ring_offset(rq, cs);
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
}
|
||||
|
||||
static const int i9xx_emit_breadcrumb_sz = 4;
|
||||
|
||||
static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
|
||||
}
|
||||
|
||||
static int
|
||||
gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
|
||||
{
|
||||
u32 dw1 = MI_SEMAPHORE_MBOX |
|
||||
MI_SEMAPHORE_COMPARE |
|
||||
MI_SEMAPHORE_REGISTER;
|
||||
u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
|
||||
u32 *cs;
|
||||
|
||||
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
|
||||
|
||||
cs = intel_ring_begin(rq, 4);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = dw1 | wait_mbox;
|
||||
/* Throughout all of the GEM code, seqno passed implies our current
|
||||
* seqno is >= the last seqno executed. However for hardware the
|
||||
* comparison is strictly greater than.
|
||||
*/
|
||||
*cs++ = signal->global_seqno - 1;
|
||||
*cs++ = 0;
|
||||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gen5_seqno_barrier(struct intel_engine_cs *engine)
|
||||
{
|
||||
/* MI_STORE are internally buffered by the GPU and not flushed
|
||||
* either by MI_FLUSH or SyncFlush or any other combination of
|
||||
* MI commands.
|
||||
*
|
||||
* "Only the submission of the store operation is guaranteed.
|
||||
* The write result will be complete (coherent) some time later
|
||||
* (this is practically a finite period but there is no guaranteed
|
||||
* latency)."
|
||||
*
|
||||
* Empirically, we observe that we need a delay of at least 75us to
|
||||
* be sure that the seqno write is visible by the CPU.
|
||||
*/
|
||||
usleep_range(125, 250);
|
||||
}
|
||||
|
||||
static void
|
||||
gen6_seqno_barrier(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
/* Workaround to force correct ordering between irq and seqno writes on
|
||||
* ivb (and maybe also on snb) by reading from a CS register (like
|
||||
* ACTHD) before reading the status page.
|
||||
*
|
||||
* Note that this effectively stalls the read by the time it takes to
|
||||
* do a memory transaction, which more or less ensures that the write
|
||||
* from the GPU has sufficient time to invalidate the CPU cacheline.
|
||||
* Alternatively we could delay the interrupt from the CS ring to give
|
||||
* the write time to land, but that would incur a delay after every
|
||||
* batch i.e. much more frequent than a delay when waiting for the
|
||||
* interrupt (with the same net latency).
|
||||
*
|
||||
* Also note that to prevent whole machine hangs on gen7, we have to
|
||||
* take the spinlock to guard against concurrent cacheline access.
|
||||
*/
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
}
|
||||
static const int gen5_emit_breadcrumb_sz = GEN5_WA_STORES * 3 + 2;
|
||||
#undef GEN5_WA_STORES
|
||||
|
||||
static void
|
||||
gen5_irq_enable(struct intel_engine_cs *engine)
|
||||
|
@ -948,6 +973,10 @@ gen6_irq_enable(struct intel_engine_cs *engine)
|
|||
I915_WRITE_IMR(engine,
|
||||
~(engine->irq_enable_mask |
|
||||
engine->irq_keep_mask));
|
||||
|
||||
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
|
||||
POSTING_READ_FW(RING_IMR(engine->mmio_base));
|
||||
|
||||
gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
|
||||
}
|
||||
|
||||
|
@ -966,6 +995,10 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
|
|||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
|
||||
|
||||
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
|
||||
POSTING_READ_FW(RING_IMR(engine->mmio_base));
|
||||
|
||||
gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
|
||||
}
|
||||
|
||||
|
@ -1581,10 +1614,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
|||
struct intel_engine_cs *engine = rq->engine;
|
||||
enum intel_engine_id id;
|
||||
const int num_rings =
|
||||
/* Use an extended w/a on gen7 if signalling from other rings */
|
||||
(HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
|
||||
INTEL_INFO(i915)->num_rings - 1 :
|
||||
0;
|
||||
IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
|
||||
bool force_restore = false;
|
||||
int len;
|
||||
u32 *cs;
|
||||
|
@ -1597,7 +1627,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
|||
flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
|
||||
|
||||
len = 4;
|
||||
if (IS_GEN7(i915))
|
||||
if (IS_GEN(i915, 7))
|
||||
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
|
||||
if (flags & MI_FORCE_RESTORE) {
|
||||
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
|
||||
|
@ -1611,7 +1641,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
|||
return PTR_ERR(cs);
|
||||
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
||||
if (IS_GEN7(i915)) {
|
||||
if (IS_GEN(i915, 7)) {
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
|
@ -1658,7 +1688,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
|||
*/
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
if (IS_GEN7(i915)) {
|
||||
if (IS_GEN(i915, 7)) {
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
i915_reg_t last_reg = {}; /* keep gcc quiet */
|
||||
|
@ -1829,17 +1859,19 @@ static int ring_request_alloc(struct i915_request *request)
|
|||
|
||||
GEM_BUG_ON(!request->hw_context->pin_count);
|
||||
|
||||
/* Flush enough space to reduce the likelihood of waiting after
|
||||
/*
|
||||
* Flush enough space to reduce the likelihood of waiting after
|
||||
* we start building the request - in which case we will just
|
||||
* have to repeat work.
|
||||
*/
|
||||
request->reserved_space += LEGACY_REQUEST_SIZE;
|
||||
|
||||
ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
|
||||
ret = switch_context(request);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = switch_context(request);
|
||||
/* Unconditionally invalidate GPU caches and TLBs. */
|
||||
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1881,22 +1913,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
|
||||
{
|
||||
GEM_BUG_ON(bytes > ring->effective_size);
|
||||
if (unlikely(bytes > ring->effective_size - ring->emit))
|
||||
bytes += ring->size - ring->emit;
|
||||
|
||||
if (unlikely(bytes > ring->space)) {
|
||||
int ret = wait_for_space(ring, bytes);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(ring->space < bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
|
||||
{
|
||||
struct intel_ring *ring = rq->ring;
|
||||
|
@ -2129,77 +2145,15 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
|
|||
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
|
||||
}
|
||||
|
||||
static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!HAS_LEGACY_SEMAPHORES(dev_priv))
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
|
||||
engine->semaphore.sync_to = gen6_ring_sync_to;
|
||||
engine->semaphore.signal = gen6_signal;
|
||||
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8
|
||||
* platform. And there is no VCS2 ring on the pre-gen8
|
||||
* platform. So the semaphore between RCS and VCS2 is
|
||||
* initialized as INVALID.
|
||||
*/
|
||||
for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
|
||||
static const struct {
|
||||
u32 wait_mbox;
|
||||
i915_reg_t mbox_reg;
|
||||
} sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
|
||||
[RCS_HW] = {
|
||||
[VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
|
||||
[BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
|
||||
[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
|
||||
},
|
||||
[VCS_HW] = {
|
||||
[RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
|
||||
[BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
|
||||
[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
|
||||
},
|
||||
[BCS_HW] = {
|
||||
[RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
|
||||
[VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
|
||||
[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
|
||||
},
|
||||
[VECS_HW] = {
|
||||
[RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
|
||||
[VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
|
||||
[BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
|
||||
},
|
||||
};
|
||||
u32 wait_mbox;
|
||||
i915_reg_t mbox_reg;
|
||||
|
||||
if (i == engine->hw_id) {
|
||||
wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
|
||||
mbox_reg = GEN6_NOSYNC;
|
||||
} else {
|
||||
wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
|
||||
mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
|
||||
}
|
||||
|
||||
engine->semaphore.mbox.wait[i] = wait_mbox;
|
||||
engine->semaphore.mbox.signal[i] = mbox_reg;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
engine->irq_enable = gen6_irq_enable;
|
||||
engine->irq_disable = gen6_irq_disable;
|
||||
engine->irq_seqno_barrier = gen6_seqno_barrier;
|
||||
} else if (INTEL_GEN(dev_priv) >= 5) {
|
||||
engine->irq_enable = gen5_irq_enable;
|
||||
engine->irq_disable = gen5_irq_disable;
|
||||
engine->irq_seqno_barrier = gen5_seqno_barrier;
|
||||
} else if (INTEL_GEN(dev_priv) >= 3) {
|
||||
engine->irq_enable = i9xx_irq_enable;
|
||||
engine->irq_disable = i9xx_irq_disable;
|
||||
|
@ -2231,7 +2185,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
|
|||
GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
|
||||
|
||||
intel_ring_init_irq(dev_priv, engine);
|
||||
intel_ring_init_semaphores(dev_priv, engine);
|
||||
|
||||
engine->init_hw = init_ring_common;
|
||||
engine->reset.prepare = reset_prepare;
|
||||
|
@ -2243,15 +2196,9 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
|
|||
|
||||
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
|
||||
if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
|
||||
int num_rings;
|
||||
|
||||
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
|
||||
|
||||
num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
|
||||
engine->emit_breadcrumb_sz += num_rings * 3;
|
||||
if (num_rings & 1)
|
||||
engine->emit_breadcrumb_sz++;
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
engine->emit_breadcrumb = gen5_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen5_emit_breadcrumb_sz;
|
||||
}
|
||||
|
||||
engine->set_default_submission = i9xx_set_default_submission;
|
||||
|
@ -2278,12 +2225,17 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
|
|||
|
||||
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
if (INTEL_GEN(dev_priv) >= 7) {
|
||||
engine->init_context = intel_rcs_ctx_init;
|
||||
engine->emit_flush = gen7_render_ring_flush;
|
||||
if (IS_GEN6(dev_priv))
|
||||
engine->emit_flush = gen6_render_ring_flush;
|
||||
} else if (IS_GEN5(dev_priv)) {
|
||||
engine->emit_breadcrumb = gen7_rcs_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen7_rcs_emit_breadcrumb_sz;
|
||||
} else if (IS_GEN(dev_priv, 6)) {
|
||||
engine->init_context = intel_rcs_ctx_init;
|
||||
engine->emit_flush = gen6_render_ring_flush;
|
||||
engine->emit_breadcrumb = gen6_rcs_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen6_rcs_emit_breadcrumb_sz;
|
||||
} else if (IS_GEN(dev_priv, 5)) {
|
||||
engine->emit_flush = gen4_render_ring_flush;
|
||||
} else {
|
||||
if (INTEL_GEN(dev_priv) < 4)
|
||||
|
@ -2313,13 +2265,21 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
|
|||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
/* gen6 bsd needs a special wa for tail updates */
|
||||
if (IS_GEN6(dev_priv))
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
engine->set_default_submission = gen6_bsd_set_default_submission;
|
||||
engine->emit_flush = gen6_bsd_ring_flush;
|
||||
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
||||
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen6_xcs_emit_breadcrumb_sz;
|
||||
} else {
|
||||
engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
|
||||
}
|
||||
} else {
|
||||
engine->emit_flush = bsd_ring_flush;
|
||||
if (IS_GEN5(dev_priv))
|
||||
if (IS_GEN(dev_priv, 5))
|
||||
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
|
||||
else
|
||||
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
|
||||
|
@ -2332,11 +2292,21 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
|
||||
|
||||
intel_ring_default_vfuncs(dev_priv, engine);
|
||||
|
||||
engine->emit_flush = gen6_ring_flush;
|
||||
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
|
||||
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen6_xcs_emit_breadcrumb_sz;
|
||||
} else {
|
||||
engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
|
||||
}
|
||||
|
||||
return intel_init_ring_buffer(engine);
|
||||
}
|
||||
|
||||
|
@ -2344,6 +2314,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
|
||||
|
||||
intel_ring_default_vfuncs(dev_priv, engine);
|
||||
|
||||
engine->emit_flush = gen6_ring_flush;
|
||||
|
@ -2351,5 +2323,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
|
|||
engine->irq_enable = hsw_vebox_irq_enable;
|
||||
engine->irq_disable = hsw_vebox_irq_disable;
|
||||
|
||||
engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
|
||||
|
||||
return intel_init_ring_buffer(engine);
|
||||
}
|
||||
|
|
|
@ -94,12 +94,12 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
|
|||
#define I915_MAX_SUBSLICES 8
|
||||
|
||||
#define instdone_slice_mask(dev_priv__) \
|
||||
(IS_GEN7(dev_priv__) ? \
|
||||
1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
|
||||
(IS_GEN(dev_priv__, 7) ? \
|
||||
1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
|
||||
|
||||
#define instdone_subslice_mask(dev_priv__) \
|
||||
(IS_GEN7(dev_priv__) ? \
|
||||
1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
|
||||
(IS_GEN(dev_priv__, 7) ? \
|
||||
1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
|
||||
|
||||
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
|
||||
for ((slice__) = 0, (subslice__) = 0; \
|
||||
|
@ -365,9 +365,6 @@ struct intel_engine_cs {
|
|||
struct drm_i915_gem_object *default_state;
|
||||
void *pinned_default_state;
|
||||
|
||||
unsigned long irq_posted;
|
||||
#define ENGINE_IRQ_BREADCRUMB 0
|
||||
|
||||
/* Rather than have every client wait upon all user interrupts,
|
||||
* with the herd waking after every interrupt and each doing the
|
||||
* heavyweight seqno dance, we delegate the task (of being the
|
||||
|
@ -501,69 +498,8 @@ struct intel_engine_cs {
|
|||
*/
|
||||
void (*cancel_requests)(struct intel_engine_cs *engine);
|
||||
|
||||
/* Some chipsets are not quite as coherent as advertised and need
|
||||
* an expensive kick to force a true read of the up-to-date seqno.
|
||||
* However, the up-to-date seqno is not always required and the last
|
||||
* seen value is good enough. Note that the seqno will always be
|
||||
* monotonic, even if not coherent.
|
||||
*/
|
||||
void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
|
||||
void (*cleanup)(struct intel_engine_cs *engine);
|
||||
|
||||
/* GEN8 signal/wait table - never trust comments!
|
||||
* signal to signal to signal to signal to signal to
|
||||
* RCS VCS BCS VECS VCS2
|
||||
* --------------------------------------------------------------------
|
||||
* RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
|
||||
* |-------------------------------------------------------------------
|
||||
* BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
|
||||
* |-------------------------------------------------------------------
|
||||
*
|
||||
* Generalization:
|
||||
* f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
|
||||
* ie. transpose of g(x, y)
|
||||
*
|
||||
* sync from sync from sync from sync from sync from
|
||||
* RCS VCS BCS VECS VCS2
|
||||
* --------------------------------------------------------------------
|
||||
* RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
|
||||
* |-------------------------------------------------------------------
|
||||
* BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
|
||||
* |-------------------------------------------------------------------
|
||||
*
|
||||
* Generalization:
|
||||
* g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
|
||||
* ie. transpose of f(x, y)
|
||||
*/
|
||||
struct {
|
||||
#define GEN6_SEMAPHORE_LAST VECS_HW
|
||||
#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
|
||||
#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
|
||||
struct {
|
||||
/* our mbox written by others */
|
||||
u32 wait[GEN6_NUM_SEMAPHORES];
|
||||
/* mboxes this ring signals to */
|
||||
i915_reg_t signal[GEN6_NUM_SEMAPHORES];
|
||||
} mbox;
|
||||
|
||||
/* AKA wait() */
|
||||
int (*sync_to)(struct i915_request *rq,
|
||||
struct i915_request *signal);
|
||||
u32 *(*signal)(struct i915_request *rq, u32 *cs);
|
||||
} semaphore;
|
||||
|
||||
struct intel_engine_execlists execlists;
|
||||
|
||||
/* Contexts are pinned whilst they are active on the GPU. The last
|
||||
|
@ -808,7 +744,6 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
|
|||
|
||||
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
|
||||
|
||||
int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
|
||||
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
|
||||
|
||||
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
|
||||
|
@ -889,7 +824,7 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
|
|||
return tail;
|
||||
}
|
||||
|
||||
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
|
||||
void intel_engine_setup_common(struct intel_engine_cs *engine);
|
||||
int intel_engine_init_common(struct intel_engine_cs *engine);
|
||||
|
@ -903,6 +838,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
|
|||
int intel_engine_stop_cs(struct intel_engine_cs *engine);
|
||||
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
|
||||
|
||||
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
|
||||
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
|
||||
|
||||
|
@ -947,15 +884,6 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
|
|||
void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||
struct intel_instdone *instdone);
|
||||
|
||||
/*
|
||||
* Arbitrary size for largest possible 'add request' sequence. The code paths
|
||||
* are complex and variable. Empirical measurement shows that the worst case
|
||||
* is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
|
||||
* we need to allocate double the largest single packet within that emission
|
||||
* to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
|
||||
*/
|
||||
#define MIN_SPACE_FOR_ADD_REQUEST 336
|
||||
|
||||
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
|
||||
{
|
||||
return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
|
||||
|
@ -1055,7 +983,7 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
|
|||
}
|
||||
|
||||
static inline u32 *
|
||||
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
|
||||
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
|
||||
{
|
||||
/* We're using qword write, offset should be aligned to 8 bytes. */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
@ -1065,8 +993,7 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
|
|||
* following the batch.
|
||||
*/
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(6);
|
||||
*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
|
||||
*cs++ = gtt_offset;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
|
@ -1092,7 +1019,7 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
|
|||
return cs;
|
||||
}
|
||||
|
||||
void intel_engines_sanitize(struct drm_i915_private *i915);
|
||||
void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
|
||||
|
||||
bool intel_engine_is_idle(struct intel_engine_cs *engine);
|
||||
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
|
||||
|
|
|
@ -509,7 +509,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
|
|||
* BIOS's own request bits, which are forced-on for these power wells
|
||||
* when exiting DC5/6.
|
||||
*/
|
||||
if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
|
||||
if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
|
||||
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
|
||||
val |= I915_READ(regs->bios);
|
||||
|
||||
|
@ -3058,7 +3058,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
|
|||
* suspend/resume, so allow it unconditionally.
|
||||
*/
|
||||
mask = DC_STATE_EN_DC9;
|
||||
} else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
|
||||
max_dc = 2;
|
||||
mask = 0;
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
* registers; newer ones are much simpler and we can use the new DRM plane
|
||||
* support.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
@ -1087,7 +1086,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
|
|||
|
||||
dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
dvscntr |= DVS_TRICKLE_FEED_DISABLE;
|
||||
|
||||
switch (fb->format->format) {
|
||||
|
@ -1983,7 +1982,7 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
|
|||
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
|
||||
return false;
|
||||
|
||||
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
|
||||
if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
|
||||
return false;
|
||||
|
||||
if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
|
||||
|
@ -2163,7 +2162,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
|
|||
plane->check_plane = g4x_sprite_check;
|
||||
|
||||
modifiers = i9xx_plane_format_modifiers;
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
formats = snb_plane_formats;
|
||||
num_formats = ARRAY_SIZE(snb_plane_formats);
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
* Integrated TV-out support for the 915GM and 945GM.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
|
|
@ -71,7 +71,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915)
|
|||
{
|
||||
int guc_log_level;
|
||||
|
||||
if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
|
||||
if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
|
||||
guc_log_level = GUC_LOG_LEVEL_DISABLED;
|
||||
else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
|
||||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
||||
|
@ -112,11 +112,11 @@ static void sanitize_options_early(struct drm_i915_private *i915)
|
|||
|
||||
DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
|
||||
i915_modparams.enable_guc,
|
||||
yesno(intel_uc_is_using_guc_submission()),
|
||||
yesno(intel_uc_is_using_huc()));
|
||||
yesno(intel_uc_is_using_guc_submission(i915)),
|
||||
yesno(intel_uc_is_using_huc(i915)));
|
||||
|
||||
/* Verify GuC firmware availability */
|
||||
if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
|
||||
if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
|
||||
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
|
||||
"enable_guc", i915_modparams.enable_guc,
|
||||
!HAS_GUC(i915) ? "no GuC hardware" :
|
||||
|
@ -124,7 +124,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
|
|||
}
|
||||
|
||||
/* Verify HuC firmware availability */
|
||||
if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
|
||||
if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
|
||||
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
|
||||
"enable_guc", i915_modparams.enable_guc,
|
||||
!HAS_HUC(i915) ? "no HuC hardware" :
|
||||
|
@ -136,7 +136,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
|
|||
i915_modparams.guc_log_level =
|
||||
__get_default_guc_log_level(i915);
|
||||
|
||||
if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
|
||||
if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
|
||||
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
|
||||
"guc_log_level", i915_modparams.guc_log_level,
|
||||
!HAS_GUC(i915) ? "no GuC hardware" :
|
||||
|
@ -354,7 +354,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
|
|||
|
||||
/* WaEnableuKernelHeaderValidFix:skl */
|
||||
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
|
||||
if (IS_GEN9(i915))
|
||||
if (IS_GEN(i915, 9))
|
||||
attempts = 3;
|
||||
else
|
||||
attempts = 1;
|
||||
|
|
|
@ -41,19 +41,19 @@ void intel_uc_fini(struct drm_i915_private *dev_priv);
|
|||
int intel_uc_suspend(struct drm_i915_private *dev_priv);
|
||||
int intel_uc_resume(struct drm_i915_private *dev_priv);
|
||||
|
||||
static inline bool intel_uc_is_using_guc(void)
|
||||
static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
|
||||
{
|
||||
GEM_BUG_ON(i915_modparams.enable_guc < 0);
|
||||
return i915_modparams.enable_guc > 0;
|
||||
}
|
||||
|
||||
static inline bool intel_uc_is_using_guc_submission(void)
|
||||
static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915)
|
||||
{
|
||||
GEM_BUG_ON(i915_modparams.enable_guc < 0);
|
||||
return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
|
||||
}
|
||||
|
||||
static inline bool intel_uc_is_using_huc(void)
|
||||
static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915)
|
||||
{
|
||||
GEM_BUG_ON(i915_modparams.enable_guc < 0);
|
||||
return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
|
||||
|
|
|
@ -46,12 +46,17 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
|||
size_t size;
|
||||
int err;
|
||||
|
||||
if (!uc_fw->path) {
|
||||
dev_info(dev_priv->drm.dev,
|
||||
"%s: No firmware was defined for %s!\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
intel_platform_name(INTEL_INFO(dev_priv)->platform));
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
|
||||
|
||||
if (!uc_fw->path)
|
||||
return;
|
||||
|
||||
uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
|
||||
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
|
|
|
@ -528,7 +528,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
|
|||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
ret |= vlv_check_for_unclaimed_mmio(dev_priv);
|
||||
|
||||
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
|
||||
if (IS_GEN_RANGE(dev_priv, 6, 7))
|
||||
ret |= gen6_check_for_fifo_debug(dev_priv);
|
||||
|
||||
return ret;
|
||||
|
@ -556,7 +556,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
|
|||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
restore_forcewake);
|
||||
|
||||
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
|
||||
if (IS_GEN_RANGE(dev_priv, 6, 7))
|
||||
dev_priv->uncore.fifo_count =
|
||||
fifo_free_entries(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
|
@ -1398,7 +1398,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
|
|||
if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
|
||||
return;
|
||||
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
dev_priv->uncore.fw_reset = 0;
|
||||
dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
|
||||
dev_priv->uncore.fw_clear = 0;
|
||||
|
@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
|
|||
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
|
||||
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
|
||||
}
|
||||
} else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
|
||||
} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
|
||||
dev_priv->uncore.funcs.force_wake_get =
|
||||
fw_domains_get_with_fallback;
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
|
@ -1503,7 +1503,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
|
|||
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE, FORCEWAKE_ACK);
|
||||
}
|
||||
} else if (IS_GEN6(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 6)) {
|
||||
dev_priv->uncore.funcs.force_wake_get =
|
||||
fw_domains_get_with_thread_status;
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
|
@ -1567,13 +1567,13 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
|
|||
dev_priv->uncore.pmic_bus_access_nb.notifier_call =
|
||||
i915_pmic_bus_access_notifier;
|
||||
|
||||
if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
|
||||
if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
|
||||
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
|
||||
} else if (IS_GEN5(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 5)) {
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
|
||||
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
|
||||
} else if (IS_GEN(dev_priv, 6, 7)) {
|
||||
} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv)) {
|
||||
|
@ -1582,7 +1582,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
|
|||
} else {
|
||||
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
|
||||
}
|
||||
} else if (IS_GEN8(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 8)) {
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
|
||||
|
@ -1592,7 +1592,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
|
|||
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
|
||||
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
|
||||
}
|
||||
} else if (IS_GEN(dev_priv, 9, 10)) {
|
||||
} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
|
||||
ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
|
||||
ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
|
||||
|
@ -1931,6 +1931,103 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
|
|||
return gen6_hw_domain_reset(dev_priv, hw_mask);
|
||||
}
|
||||
|
||||
static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
|
||||
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
|
||||
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
|
||||
i915_reg_t sfc_usage;
|
||||
u32 sfc_usage_bit;
|
||||
u32 sfc_reset_bit;
|
||||
|
||||
switch (engine->class) {
|
||||
case VIDEO_DECODE_CLASS:
|
||||
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
|
||||
return 0;
|
||||
|
||||
sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
|
||||
sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
|
||||
|
||||
sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
|
||||
sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
|
||||
|
||||
sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
|
||||
sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
|
||||
sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
|
||||
break;
|
||||
|
||||
case VIDEO_ENHANCEMENT_CLASS:
|
||||
sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
|
||||
sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
|
||||
|
||||
sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
|
||||
sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
|
||||
|
||||
sfc_usage = GEN11_VECS_SFC_USAGE(engine);
|
||||
sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
|
||||
sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
|
||||
break;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the engine that a software reset is going to happen. The engine
|
||||
* will then try to force lock the SFC (if currently locked, it will
|
||||
* remain so until we tell the engine it is safe to unlock; if currently
|
||||
* unlocked, it will ignore this and all new lock requests). If SFC
|
||||
* ends up being locked to the engine we want to reset, we have to reset
|
||||
* it as well (we will unlock it once the reset sequence is completed).
|
||||
*/
|
||||
I915_WRITE_FW(sfc_forced_lock,
|
||||
I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
|
||||
|
||||
if (__intel_wait_for_register_fw(dev_priv,
|
||||
sfc_forced_lock_ack,
|
||||
sfc_forced_lock_ack_bit,
|
||||
sfc_forced_lock_ack_bit,
|
||||
1000, 0, NULL)) {
|
||||
DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
|
||||
return sfc_reset_bit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
|
||||
i915_reg_t sfc_forced_lock;
|
||||
u32 sfc_forced_lock_bit;
|
||||
|
||||
switch (engine->class) {
|
||||
case VIDEO_DECODE_CLASS:
|
||||
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
|
||||
return;
|
||||
|
||||
sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
|
||||
sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
|
||||
break;
|
||||
|
||||
case VIDEO_ENHANCEMENT_CLASS:
|
||||
sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
|
||||
sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
|
||||
break;
|
||||
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
I915_WRITE_FW(sfc_forced_lock,
|
||||
I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
|
||||
}
|
||||
|
||||
/**
|
||||
* gen11_reset_engines - reset individual engines
|
||||
* @dev_priv: i915 device
|
||||
|
@ -1947,7 +2044,6 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
|
|||
static int gen11_reset_engines(struct drm_i915_private *dev_priv,
|
||||
unsigned int engine_mask)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
|
||||
[RCS] = GEN11_GRDOM_RENDER,
|
||||
[BCS] = GEN11_GRDOM_BLT,
|
||||
|
@ -1958,21 +2054,30 @@ static int gen11_reset_engines(struct drm_i915_private *dev_priv,
|
|||
[VECS] = GEN11_GRDOM_VECS,
|
||||
[VECS2] = GEN11_GRDOM_VECS2,
|
||||
};
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int tmp;
|
||||
u32 hw_mask;
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
|
||||
|
||||
if (engine_mask == ALL_ENGINES) {
|
||||
hw_mask = GEN11_GRDOM_FULL;
|
||||
} else {
|
||||
unsigned int tmp;
|
||||
|
||||
hw_mask = 0;
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
||||
hw_mask |= hw_engine_mask[engine->id];
|
||||
hw_mask |= gen11_lock_sfc(dev_priv, engine);
|
||||
}
|
||||
}
|
||||
|
||||
return gen6_hw_domain_reset(dev_priv, hw_mask);
|
||||
ret = gen6_hw_domain_reset(dev_priv, hw_mask);
|
||||
|
||||
if (engine_mask != ALL_ENGINES)
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
|
||||
gen11_unlock_sfc(dev_priv, engine);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2173,7 +2278,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
|
|||
return gen8_reset_engines;
|
||||
else if (INTEL_GEN(dev_priv) >= 6)
|
||||
return gen6_reset_engines;
|
||||
else if (IS_GEN5(dev_priv))
|
||||
else if (IS_GEN(dev_priv, 5))
|
||||
return ironlake_do_reset;
|
||||
else if (IS_G4X(dev_priv))
|
||||
return g4x_do_reset;
|
||||
|
@ -2256,7 +2361,7 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
|
|||
|
||||
bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return (dev_priv->info.has_reset_engine &&
|
||||
return (INTEL_INFO(dev_priv)->has_reset_engine &&
|
||||
i915_modparams.reset >= 2);
|
||||
}
|
||||
|
||||
|
@ -2321,7 +2426,7 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
|
|||
} else if (INTEL_GEN(dev_priv) >= 6) {
|
||||
fw_domains = __gen6_reg_read_fw_domains(offset);
|
||||
} else {
|
||||
WARN_ON(!IS_GEN(dev_priv, 2, 5));
|
||||
WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
|
||||
fw_domains = 0;
|
||||
}
|
||||
|
||||
|
@ -2341,12 +2446,12 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
|
|||
fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
|
||||
} else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
|
||||
fw_domains = __fwtable_reg_write_fw_domains(offset);
|
||||
} else if (IS_GEN8(dev_priv)) {
|
||||
} else if (IS_GEN(dev_priv, 8)) {
|
||||
fw_domains = __gen8_reg_write_fw_domains(offset);
|
||||
} else if (IS_GEN(dev_priv, 6, 7)) {
|
||||
} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
|
||||
fw_domains = FORCEWAKE_RENDER;
|
||||
} else {
|
||||
WARN_ON(!IS_GEN(dev_priv, 2, 5));
|
||||
WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
|
||||
fw_domains = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
* Manasi Navare <manasi.d.navare@intel.com>
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
|
|
|
@ -130,11 +130,11 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
if (IS_GEN9(i915))
|
||||
if (IS_GEN(i915, 9))
|
||||
err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
|
||||
|
||||
if (!err &&
|
||||
(IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
|
||||
(IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
|
||||
err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
|
||||
|
||||
return err;
|
||||
|
@ -163,7 +163,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
|
|||
u32 guc_wopcm_rsvd;
|
||||
int err;
|
||||
|
||||
if (!USES_GUC(dev_priv))
|
||||
if (!USES_GUC(i915))
|
||||
return 0;
|
||||
|
||||
GEM_BUG_ON(!wopcm->size);
|
||||
|
|
|
@ -366,7 +366,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
|
|||
* Only consider slices where one, and only one, subslice has 7
|
||||
* EUs
|
||||
*/
|
||||
if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
|
||||
if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@ -375,7 +375,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
|
|||
*
|
||||
* -> 0 <= ss <= 3;
|
||||
*/
|
||||
ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
|
||||
ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
|
||||
vals[i] = 3 - ss;
|
||||
}
|
||||
|
||||
|
@ -639,10 +639,9 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
|
|||
wa_write_masked_or(wal, reg, val, val);
|
||||
}
|
||||
|
||||
static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
static void
|
||||
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
/* WaDisableKillLogic:bxt,skl,kbl */
|
||||
if (!IS_COFFEELAKE(i915))
|
||||
wa_write_or(wal,
|
||||
|
@ -666,11 +665,10 @@ static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
|
|||
BDW_DISABLE_HDC_INVALIDATION);
|
||||
}
|
||||
|
||||
static void skl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
static void
|
||||
skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
gen9_gt_workarounds_init(i915);
|
||||
gen9_gt_workarounds_init(i915, wal);
|
||||
|
||||
/* WaDisableGafsUnitClkGating:skl */
|
||||
wa_write_or(wal,
|
||||
|
@ -684,11 +682,10 @@ static void skl_gt_workarounds_init(struct drm_i915_private *i915)
|
|||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
static void
|
||||
bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
gen9_gt_workarounds_init(i915);
|
||||
gen9_gt_workarounds_init(i915, wal);
|
||||
|
||||
/* WaInPlaceDecompressionHang:bxt */
|
||||
wa_write_or(wal,
|
||||
|
@ -696,11 +693,10 @@ static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
|
|||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
static void
|
||||
kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
gen9_gt_workarounds_init(i915);
|
||||
gen9_gt_workarounds_init(i915, wal);
|
||||
|
||||
/* WaDisableDynamicCreditSharing:kbl */
|
||||
if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
|
||||
|
@ -719,16 +715,16 @@ static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
|
|||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void glk_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
static void
|
||||
glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
gen9_gt_workarounds_init(i915);
|
||||
gen9_gt_workarounds_init(i915, wal);
|
||||
}
|
||||
|
||||
static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
static void
|
||||
cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
gen9_gt_workarounds_init(i915);
|
||||
gen9_gt_workarounds_init(i915, wal);
|
||||
|
||||
/* WaDisableGafsUnitClkGating:cfl */
|
||||
wa_write_or(wal,
|
||||
|
@ -741,10 +737,10 @@ static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
|
|||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void wa_init_mcr(struct drm_i915_private *dev_priv)
|
||||
static void
|
||||
wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
|
||||
struct i915_wa_list *wal = &dev_priv->gt_wa_list;
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
u32 mcr_slice_subslice_mask;
|
||||
|
||||
/*
|
||||
|
@ -804,11 +800,10 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
|
|||
intel_calculate_mcr_s_ss_select(dev_priv));
|
||||
}
|
||||
|
||||
static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
static void
|
||||
cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
wa_init_mcr(i915);
|
||||
wa_init_mcr(i915, wal);
|
||||
|
||||
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
|
||||
if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
|
||||
|
@ -822,11 +817,10 @@ static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
|
|||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void icl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
static void
|
||||
icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
wa_init_mcr(i915);
|
||||
wa_init_mcr(i915, wal);
|
||||
|
||||
/* WaInPlaceDecompressionHang:icl */
|
||||
wa_write_or(wal,
|
||||
|
@ -879,12 +873,9 @@ static void icl_gt_workarounds_init(struct drm_i915_private *i915)
|
|||
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
||||
}
|
||||
|
||||
void intel_gt_init_workarounds(struct drm_i915_private *i915)
|
||||
static void
|
||||
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
wa_init_start(wal, "GT");
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
return;
|
||||
else if (IS_BROADWELL(i915))
|
||||
|
@ -892,22 +883,29 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
|
|||
else if (IS_CHERRYVIEW(i915))
|
||||
return;
|
||||
else if (IS_SKYLAKE(i915))
|
||||
skl_gt_workarounds_init(i915);
|
||||
skl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_BROXTON(i915))
|
||||
bxt_gt_workarounds_init(i915);
|
||||
bxt_gt_workarounds_init(i915, wal);
|
||||
else if (IS_KABYLAKE(i915))
|
||||
kbl_gt_workarounds_init(i915);
|
||||
kbl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEMINILAKE(i915))
|
||||
glk_gt_workarounds_init(i915);
|
||||
glk_gt_workarounds_init(i915, wal);
|
||||
else if (IS_COFFEELAKE(i915))
|
||||
cfl_gt_workarounds_init(i915);
|
||||
cfl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_gt_workarounds_init(i915);
|
||||
cnl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_ICELAKE(i915))
|
||||
icl_gt_workarounds_init(i915);
|
||||
icl_gt_workarounds_init(i915, wal);
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
}
|
||||
|
||||
void intel_gt_init_workarounds(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
wa_init_start(wal, "GT");
|
||||
gt_init_workarounds(i915, wal);
|
||||
wa_init_finish(wal);
|
||||
}
|
||||
|
||||
|
@ -955,8 +953,6 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
|
|||
|
||||
intel_uncore_forcewake_put__locked(dev_priv, fw);
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
|
||||
|
||||
DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
|
||||
}
|
||||
|
||||
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
|
||||
|
@ -1126,14 +1122,12 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
|
|||
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
|
||||
I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
|
||||
i915_mmio_reg_offset(RING_NOPID(base)));
|
||||
|
||||
DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
|
||||
}
|
||||
|
||||
static void rcs_engine_wa_init(struct intel_engine_cs *engine)
|
||||
static void
|
||||
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
|
||||
if (IS_ICELAKE(i915)) {
|
||||
/* This is not an Wa. Enable for better image quality */
|
||||
|
@ -1190,7 +1184,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
|
|||
GEN7_DISABLE_SAMPLER_PREFETCH);
|
||||
}
|
||||
|
||||
if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
|
||||
if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
|
||||
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
|
||||
wa_masked_en(wal,
|
||||
GEN7_FF_SLICE_CS_CHICKEN1,
|
||||
|
@ -1211,7 +1205,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
|
|||
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN9(i915)) {
|
||||
if (IS_GEN(i915, 9)) {
|
||||
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
|
||||
wa_masked_en(wal,
|
||||
GEN9_CSFE_CHICKEN1_RCS,
|
||||
|
@ -1237,10 +1231,10 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
|
|||
}
|
||||
}
|
||||
|
||||
static void xcs_engine_wa_init(struct intel_engine_cs *engine)
|
||||
static void
|
||||
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
|
||||
/* WaKBLVECSSemaphoreWaitPoll:kbl */
|
||||
if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
|
||||
|
@ -1250,6 +1244,18 @@ static void xcs_engine_wa_init(struct intel_engine_cs *engine)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
{
|
||||
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
|
||||
return;
|
||||
|
||||
if (engine->id == RCS)
|
||||
rcs_engine_wa_init(engine, wal);
|
||||
else
|
||||
xcs_engine_wa_init(engine, wal);
|
||||
}
|
||||
|
||||
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
|
@ -1258,12 +1264,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
|
|||
return;
|
||||
|
||||
wa_init_start(wal, engine->name);
|
||||
|
||||
if (engine->id == RCS)
|
||||
rcs_engine_wa_init(engine);
|
||||
else
|
||||
xcs_engine_wa_init(engine);
|
||||
|
||||
engine_init_workarounds(engine, wal);
|
||||
wa_init_finish(wal);
|
||||
}
|
||||
|
||||
|
@ -1273,11 +1274,5 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
|
||||
const char *from)
|
||||
{
|
||||
return wa_list_verify(engine->i915, &engine->wa_list, from);
|
||||
}
|
||||
|
||||
#include "selftests/intel_workarounds.c"
|
||||
#endif
|
||||
|
|
|
@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma,
|
|||
{
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *batch;
|
||||
int flags = 0;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
|
||||
|
@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
rq = i915_request_alloc(engine, ctx);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
batch = gpu_write_dw(vma, dword * sizeof(u32), value);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto err_request;
|
||||
if (IS_ERR(batch))
|
||||
return PTR_ERR(batch);
|
||||
|
||||
rq = i915_request_alloc(engine, ctx);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_batch;
|
||||
}
|
||||
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
|
@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma,
|
|||
goto err_request;
|
||||
|
||||
i915_gem_object_set_active_reference(batch->obj);
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_close(batch);
|
||||
|
||||
err = engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
flags);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
if (err)
|
||||
i915_request_skip(rq, err);
|
||||
goto err_request;
|
||||
|
||||
err = engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
0);
|
||||
err_request:
|
||||
if (err)
|
||||
i915_request_skip(rq, err);
|
||||
i915_request_add(rq);
|
||||
err_batch:
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_close(batch);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1703,7 +1702,6 @@ int i915_gem_huge_page_mock_selftests(void)
|
|||
};
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
struct pci_dev *pdev;
|
||||
int err;
|
||||
|
||||
dev_priv = mock_gem_device();
|
||||
|
@ -1713,9 +1711,6 @@ int i915_gem_huge_page_mock_selftests(void)
|
|||
/* Pretend to be a device which supports the 48b PPGTT */
|
||||
mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
|
||||
|
||||
pdev = dev_priv->drm.pdev;
|
||||
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
|
||||
if (IS_ERR(ppgtt)) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue