mirror of https://gitee.com/openkylin/linux.git
Merge tag 'drm-intel-next-2015-07-17' of git://anongit.freedesktop.org/drm-intel into drm-next
- prelim hw support dropped for skl after Damien fixed an ABI issue around planes - legacy modesetting is done using atomic infrastructure now (Maarten)! - more gen9 workarounds (Arun&Nick) - MOCS programming (cache control for better performance) for skl/bxt - vlv/chv dpll improvements (Ville) - PSR fixes from Rodrigo - fbc improvements from Paulo - plumb requests into execlist submit functions (Mika) - opregion code cleanup from Jani - resource streamer support from Abdiel for mesa - final fixes for 12bpc hdmi + enabling support from Ville drm-intel-next-2015-07-03: - dsi improvements (Gaurav) - bxt ddi dpll hw state readout (Imre) - chv dvfs support and overall wm improvements for both vlv and chv (Ville) - ppgtt polish from Mika and Michel - cdclk support for bxt (Bob Pauwe) - make frontbuffer tracking more precise - OLR removal (John Harrison) - per-ctx WA batch buffer support (Arun Siluvery) - remvoe KMS Kconfig option (Chris) - more hpd handling refactoring from Jani - use atomic states throughout modeset code and integrate with atomic plane update (Maarten) drm-intel-next-2015-06-19: - refactoring hpd irq handlers (Jani) - polish skl dpll code a bit (Damien) - dynamic cdclk adjustement (Ville & Mika) - fix up 12bpc hdmi and enable it for real again (Ville) - extend hsw cmd parser to be useful for atomic configuration (Franscico Jerez) - even more atomic conversion and rolling state handling out across modeset code from Maarten & Ander - fix DRRS idleness detection (Ramalingam) - clean up dsp address alignment handling (Ville) - some fbc cleanup patches from Paulo - prevent hard-hangs when trying to reset the gpu on skl (Mika) * tag 'drm-intel-next-2015-07-17' of git://anongit.freedesktop.org/drm-intel: (386 commits) drm/i915: Update DRIVER_DATE to 20150717 drm/i915/skl: Drop the preliminary_hw_support flag drm/i915/skl: Don't expose the top most plane on gen9 display drm/i915: Fix divide by zero on watermark update drm/i915: Invert fastboot check drm/i915: Clarify logic for initial modeset drm/i915: Unconditionally check gmch pfit state drm/i915: always disable irqs in intel_pipe_update_start drm/i915: Remove use of runtime pm in atomic commit functions drm/i915: Call plane update functions directly from intel_atomic_commit. drm/i915: Use full atomic modeset. drm/i915/gen9: Add WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken drm/i915/gen9: Add WaFlushCoherentL3CacheLinesAtContextSwitch workaround drm/i915/gen9: Add WaDisableCtxRestoreArbitration workaround drm/i915: Enable WA batch buffers for Gen9 drm/i915/gen9: Implement WaDisableKillLogic for gen 9 drm/i915: Use expcitly fixed type in compat32 structs drm/i915: Fix noatomic crtc disabling, v2. drm/i915: fill in more mode members drm/i915: Added BXT check in HAS_CORE_RING_FREQ macro ...
This commit is contained in:
commit
f60de97674
|
@ -4012,7 +4012,6 @@ int num_ioctls;</synopsis>
|
|||
<title>Frontbuffer Tracking</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
|
||||
!Idrivers/gpu/drm/i915/intel_frontbuffer.c
|
||||
!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
|
||||
!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
|
||||
</sect2>
|
||||
<sect2>
|
||||
|
@ -4044,6 +4043,11 @@ int num_ioctls;</synopsis>
|
|||
probing, so those sections fully apply.
|
||||
</para>
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Hotplug</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
|
||||
!Idrivers/gpu/drm/i915/intel_hotplug.c
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>High Definition Audio</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
|
||||
|
|
|
@ -207,7 +207,6 @@ CONFIG_AGP_AMD64=y
|
|||
CONFIG_AGP_INTEL=y
|
||||
CONFIG_DRM=y
|
||||
CONFIG_DRM_I915=y
|
||||
CONFIG_DRM_I915_KMS=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_FB_EFI=y
|
||||
|
|
|
@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
|||
}
|
||||
EXPORT_SYMBOL(intel_gmch_probe);
|
||||
|
||||
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
|
||||
phys_addr_t *mappable_base, unsigned long *mappable_end)
|
||||
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
|
||||
phys_addr_t *mappable_base, u64 *mappable_end)
|
||||
{
|
||||
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
|
||||
*stolen_size = intel_private.stolen_size;
|
||||
|
|
|
@ -4471,9 +4471,7 @@ static int drm_property_replace_global_blob(struct drm_device *dev,
|
|||
goto err_created;
|
||||
}
|
||||
|
||||
if (old_blob)
|
||||
drm_property_unreference_blob(old_blob);
|
||||
|
||||
drm_property_unreference_blob(old_blob);
|
||||
*replace = new_blob;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -36,15 +36,6 @@ config DRM_I915
|
|||
i810 driver instead, and the Atom z5xx series has an entirely
|
||||
different implementation.
|
||||
|
||||
config DRM_I915_KMS
|
||||
bool "Enable modesetting on intel by default"
|
||||
depends on DRM_I915
|
||||
default y
|
||||
help
|
||||
Choose this option if you want kernel modesetting enabled by default.
|
||||
|
||||
If in doubt, say "Y".
|
||||
|
||||
config DRM_I915_FBDEV
|
||||
bool "Enable legacy fbdev support for the modesetting intel driver"
|
||||
depends on DRM_I915
|
||||
|
|
|
@ -34,7 +34,9 @@ i915-y += i915_cmd_parser.o \
|
|||
i915_gpu_error.o \
|
||||
i915_irq.o \
|
||||
i915_trace_points.o \
|
||||
intel_hotplug.o \
|
||||
intel_lrc.o \
|
||||
intel_mocs.o \
|
||||
intel_ringbuffer.o \
|
||||
intel_uncore.o
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
*
|
||||
* Authors:
|
||||
* Eric Anholt <eric@anholt.net>
|
||||
* Thomas Richter <thor@math.tu-berlin.de>
|
||||
*
|
||||
* Minor modifications (Dithering enable):
|
||||
* Thomas Richter <thor@math.tu-berlin.de>
|
||||
|
@ -90,7 +91,7 @@
|
|||
/*
|
||||
* LCD Vertical Display Size
|
||||
*/
|
||||
#define VR21 0x20
|
||||
#define VR21 0x21
|
||||
|
||||
/*
|
||||
* Panel power down status
|
||||
|
@ -155,16 +156,33 @@
|
|||
# define VR8F_POWER_MASK (0x3c)
|
||||
# define VR8F_POWER_POS (2)
|
||||
|
||||
/* Some Bios implementations do not restore the DVO state upon
|
||||
* resume from standby. Thus, this driver has to handle it
|
||||
* instead. The following list contains all registers that
|
||||
* require saving.
|
||||
*/
|
||||
static const uint16_t backup_addresses[] = {
|
||||
0x11, 0x12,
|
||||
0x18, 0x19, 0x1a, 0x1f,
|
||||
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
|
||||
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
|
||||
0x8e, 0x8f,
|
||||
0x10 /* this must come last */
|
||||
};
|
||||
|
||||
|
||||
struct ivch_priv {
|
||||
bool quiet;
|
||||
|
||||
uint16_t width, height;
|
||||
|
||||
/* Register backup */
|
||||
|
||||
uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
|
||||
};
|
||||
|
||||
|
||||
static void ivch_dump_regs(struct intel_dvo_device *dvo);
|
||||
|
||||
/**
|
||||
* Reads a register on the ivch.
|
||||
*
|
||||
|
@ -246,6 +264,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
|
|||
{
|
||||
struct ivch_priv *priv;
|
||||
uint16_t temp;
|
||||
int i;
|
||||
|
||||
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
|
||||
if (priv == NULL)
|
||||
|
@ -273,6 +292,14 @@ static bool ivch_init(struct intel_dvo_device *dvo,
|
|||
ivch_read(dvo, VR20, &priv->width);
|
||||
ivch_read(dvo, VR21, &priv->height);
|
||||
|
||||
/* Make a backup of the registers to be able to restore them
|
||||
* upon suspend.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
|
||||
ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
|
||||
|
||||
ivch_dump_regs(dvo);
|
||||
|
||||
return true;
|
||||
|
||||
out:
|
||||
|
@ -294,12 +321,31 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
|
|||
return MODE_OK;
|
||||
}
|
||||
|
||||
/* Restore the DVO registers after a resume
|
||||
* from RAM. Registers have been saved during
|
||||
* the initialization.
|
||||
*/
|
||||
static void ivch_reset(struct intel_dvo_device *dvo)
|
||||
{
|
||||
struct ivch_priv *priv = dvo->dev_priv;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG_KMS("Resetting the IVCH registers\n");
|
||||
|
||||
ivch_write(dvo, VR10, 0x0000);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
|
||||
ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
|
||||
}
|
||||
|
||||
/** Sets the power state of the panel connected to the ivch */
|
||||
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
|
||||
{
|
||||
int i;
|
||||
uint16_t vr01, vr30, backlight;
|
||||
|
||||
ivch_reset(dvo);
|
||||
|
||||
/* Set the new power state of the panel. */
|
||||
if (!ivch_read(dvo, VR01, &vr01))
|
||||
return;
|
||||
|
@ -308,6 +354,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
|
|||
backlight = 1;
|
||||
else
|
||||
backlight = 0;
|
||||
|
||||
ivch_write(dvo, VR80, backlight);
|
||||
|
||||
if (enable)
|
||||
|
@ -334,6 +381,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
|
|||
{
|
||||
uint16_t vr01;
|
||||
|
||||
ivch_reset(dvo);
|
||||
|
||||
/* Set the new power state of the panel. */
|
||||
if (!ivch_read(dvo, VR01, &vr01))
|
||||
return false;
|
||||
|
@ -348,11 +397,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
|
|||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct ivch_priv *priv = dvo->dev_priv;
|
||||
uint16_t vr40 = 0;
|
||||
uint16_t vr01 = 0;
|
||||
uint16_t vr10;
|
||||
|
||||
ivch_read(dvo, VR10, &vr10);
|
||||
ivch_reset(dvo);
|
||||
|
||||
vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
|
||||
|
||||
/* Enable dithering for 18 bpp pipelines */
|
||||
vr10 &= VR10_INTERFACE_DEPTH_MASK;
|
||||
if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
|
||||
|
@ -366,7 +419,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
|
|||
uint16_t x_ratio, y_ratio;
|
||||
|
||||
vr01 |= VR01_PANEL_FIT_ENABLE;
|
||||
vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
|
||||
vr40 |= VR40_CLOCK_GATING_ENABLE;
|
||||
x_ratio = (((mode->hdisplay - 1) << 16) /
|
||||
(adjusted_mode->hdisplay - 1)) >> 2;
|
||||
y_ratio = (((mode->vdisplay - 1) << 16) /
|
||||
|
@ -381,8 +434,6 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
|
|||
|
||||
ivch_write(dvo, VR01, vr01);
|
||||
ivch_write(dvo, VR40, vr40);
|
||||
|
||||
ivch_dump_regs(dvo);
|
||||
}
|
||||
|
||||
static void ivch_dump_regs(struct intel_dvo_device *dvo)
|
||||
|
|
|
@ -131,7 +131,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
|
|||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
|
||||
CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
|
||||
.reg = { .offset = 1, .mask = 0x007FFFFC },
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
|
@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
|||
* only MI_LOAD_REGISTER_IMM commands.
|
||||
*/
|
||||
if (reg_addr == OACONTROL) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
|
||||
return false;
|
||||
}
|
||||
|
@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
|||
* allowed mask/value pair given in the whitelist entry.
|
||||
*/
|
||||
if (reg->mask) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
|
||||
reg_addr);
|
||||
return false;
|
||||
|
|
|
@ -117,6 +117,20 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
|
|||
return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
|
||||
}
|
||||
|
||||
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
u64 size = 0;
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
drm_mm_node_allocated(&vma->node))
|
||||
size += vma->node.size;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static void
|
||||
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
{
|
||||
|
@ -156,13 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
if (obj->fence_reg != I915_FENCE_REG_NONE)
|
||||
seq_printf(m, " (fence: %d)", obj->fence_reg);
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
if (!i915_is_ggtt(vma->vm))
|
||||
seq_puts(m, " (pp");
|
||||
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
|
||||
i915_is_ggtt(vma->vm) ? "g" : "pp",
|
||||
vma->node.start, vma->node.size);
|
||||
if (i915_is_ggtt(vma->vm))
|
||||
seq_printf(m, ", type: %u)", vma->ggtt_view.type);
|
||||
else
|
||||
seq_puts(m, " (g");
|
||||
seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
|
||||
vma->node.start, vma->node.size,
|
||||
vma->ggtt_view.type);
|
||||
seq_puts(m, ")");
|
||||
}
|
||||
if (obj->stolen)
|
||||
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
|
||||
|
@ -198,7 +212,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm = &dev_priv->gtt.base;
|
||||
struct i915_vma *vma;
|
||||
size_t total_obj_size, total_gtt_size;
|
||||
u64 total_obj_size, total_gtt_size;
|
||||
int count, ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
|
@ -231,7 +245,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
|
||||
seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
|
||||
count, total_obj_size, total_gtt_size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -253,7 +267,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
size_t total_obj_size, total_gtt_size;
|
||||
u64 total_obj_size, total_gtt_size;
|
||||
LIST_HEAD(stolen);
|
||||
int count, ret;
|
||||
|
||||
|
@ -269,7 +283,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||
list_add(&obj->obj_exec_link, &stolen);
|
||||
|
||||
total_obj_size += obj->base.size;
|
||||
total_gtt_size += i915_gem_obj_ggtt_size(obj);
|
||||
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
|
||||
count++;
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
|
||||
|
@ -292,14 +306,14 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
|
||||
seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
|
||||
count, total_obj_size, total_gtt_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define count_objects(list, member) do { \
|
||||
list_for_each_entry(obj, list, member) { \
|
||||
size += i915_gem_obj_ggtt_size(obj); \
|
||||
size += i915_gem_obj_total_ggtt_size(obj); \
|
||||
++count; \
|
||||
if (obj->map_and_fenceable) { \
|
||||
mappable_size += i915_gem_obj_ggtt_size(obj); \
|
||||
|
@ -310,10 +324,10 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||
|
||||
struct file_stats {
|
||||
struct drm_i915_file_private *file_priv;
|
||||
int count;
|
||||
size_t total, unbound;
|
||||
size_t global, shared;
|
||||
size_t active, inactive;
|
||||
unsigned long count;
|
||||
u64 total, unbound;
|
||||
u64 global, shared;
|
||||
u64 active, inactive;
|
||||
};
|
||||
|
||||
static int per_file_stats(int id, void *ptr, void *data)
|
||||
|
@ -370,7 +384,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
|
||||
#define print_file_stats(m, name, stats) do { \
|
||||
if (stats.count) \
|
||||
seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
|
||||
seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
|
||||
name, \
|
||||
stats.count, \
|
||||
stats.total, \
|
||||
|
@ -405,7 +419,7 @@ static void print_batch_pool_stats(struct seq_file *m,
|
|||
|
||||
#define count_vmas(list, member) do { \
|
||||
list_for_each_entry(vma, list, member) { \
|
||||
size += i915_gem_obj_ggtt_size(vma->obj); \
|
||||
size += i915_gem_obj_total_ggtt_size(vma->obj); \
|
||||
++count; \
|
||||
if (vma->obj->map_and_fenceable) { \
|
||||
mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
|
||||
|
@ -420,7 +434,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 count, mappable_count, purgeable_count;
|
||||
size_t size, mappable_size, purgeable_size;
|
||||
u64 size, mappable_size, purgeable_size;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_address_space *vm = &dev_priv->gtt.base;
|
||||
struct drm_file *file;
|
||||
|
@ -437,17 +451,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_objects(&dev_priv->mm.bound_list, global_list);
|
||||
seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
|
||||
seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_vmas(&vm->active_list, mm_list);
|
||||
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
|
||||
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_vmas(&vm->inactive_list, mm_list);
|
||||
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
|
||||
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
size = count = purgeable_size = purgeable_count = 0;
|
||||
|
@ -456,7 +470,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
if (obj->madv == I915_MADV_DONTNEED)
|
||||
purgeable_size += obj->base.size, ++purgeable_count;
|
||||
}
|
||||
seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
|
||||
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
|
@ -473,16 +487,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
++purgeable_count;
|
||||
}
|
||||
}
|
||||
seq_printf(m, "%u purgeable objects, %zu bytes\n",
|
||||
seq_printf(m, "%u purgeable objects, %llu bytes\n",
|
||||
purgeable_count, purgeable_size);
|
||||
seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
|
||||
seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
|
||||
mappable_count, mappable_size);
|
||||
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
|
||||
seq_printf(m, "%u fault mappable objects, %llu bytes\n",
|
||||
count, size);
|
||||
|
||||
seq_printf(m, "%zu [%lu] gtt total\n",
|
||||
seq_printf(m, "%llu [%llu] gtt total\n",
|
||||
dev_priv->gtt.base.total,
|
||||
dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
|
||||
(u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
|
||||
|
||||
seq_putc(m, '\n');
|
||||
print_batch_pool_stats(m, dev_priv);
|
||||
|
@ -519,7 +533,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
|||
uintptr_t list = (uintptr_t) node->info_ent->data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
size_t total_obj_size, total_gtt_size;
|
||||
u64 total_obj_size, total_gtt_size;
|
||||
int count, ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
|
@ -535,13 +549,13 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
|||
describe_obj(m, obj);
|
||||
seq_putc(m, '\n');
|
||||
total_obj_size += obj->base.size;
|
||||
total_gtt_size += i915_gem_obj_ggtt_size(obj);
|
||||
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
|
||||
count++;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
|
||||
seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
|
||||
count, total_obj_size, total_gtt_size);
|
||||
|
||||
return 0;
|
||||
|
@ -1132,9 +1146,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
|
||||
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
|
||||
IS_BROADWELL(dev) || IS_GEN9(dev)) {
|
||||
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
||||
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
|
||||
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
u32 rp_state_limits;
|
||||
u32 gt_perf_status;
|
||||
u32 rp_state_cap;
|
||||
u32 rpmodectl, rpinclimit, rpdeclimit;
|
||||
u32 rpstat, cagf, reqf;
|
||||
u32 rpupei, rpcurup, rpprevup;
|
||||
|
@ -1142,6 +1156,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
|
||||
int max_freq;
|
||||
|
||||
rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
|
||||
if (IS_BROXTON(dev)) {
|
||||
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
|
||||
gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
|
||||
} else {
|
||||
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
||||
}
|
||||
|
||||
/* RPSTAT1 is in the GT power well */
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
|
@ -1229,7 +1252,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "Down threshold: %d%%\n",
|
||||
dev_priv->rps.down_threshold);
|
||||
|
||||
max_freq = (rp_state_cap & 0xff0000) >> 16;
|
||||
max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
|
||||
rp_state_cap >> 16) & 0xff;
|
||||
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
|
@ -1239,7 +1263,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
|
||||
max_freq = rp_state_cap & 0xff;
|
||||
max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
|
||||
rp_state_cap >> 0) & 0xff;
|
||||
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
|
@ -1581,6 +1606,21 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
|
|||
return ironlake_drpc_info(m);
|
||||
}
|
||||
|
||||
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
|
||||
dev_priv->fb_tracking.busy_bits);
|
||||
|
||||
seq_printf(m, "FB tracking flip bits: 0x%08x\n",
|
||||
dev_priv->fb_tracking.flip_bits);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
|
@ -1593,51 +1633,20 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
|||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
|
||||
if (intel_fbc_enabled(dev)) {
|
||||
if (intel_fbc_enabled(dev_priv))
|
||||
seq_puts(m, "FBC enabled\n");
|
||||
} else {
|
||||
seq_puts(m, "FBC disabled: ");
|
||||
switch (dev_priv->fbc.no_fbc_reason) {
|
||||
case FBC_OK:
|
||||
seq_puts(m, "FBC actived, but currently disabled in hardware");
|
||||
break;
|
||||
case FBC_UNSUPPORTED:
|
||||
seq_puts(m, "unsupported by this chipset");
|
||||
break;
|
||||
case FBC_NO_OUTPUT:
|
||||
seq_puts(m, "no outputs");
|
||||
break;
|
||||
case FBC_STOLEN_TOO_SMALL:
|
||||
seq_puts(m, "not enough stolen memory");
|
||||
break;
|
||||
case FBC_UNSUPPORTED_MODE:
|
||||
seq_puts(m, "mode not supported");
|
||||
break;
|
||||
case FBC_MODE_TOO_LARGE:
|
||||
seq_puts(m, "mode too large");
|
||||
break;
|
||||
case FBC_BAD_PLANE:
|
||||
seq_puts(m, "FBC unsupported on plane");
|
||||
break;
|
||||
case FBC_NOT_TILED:
|
||||
seq_puts(m, "scanout buffer not tiled");
|
||||
break;
|
||||
case FBC_MULTIPLE_PIPES:
|
||||
seq_puts(m, "multiple pipes are enabled");
|
||||
break;
|
||||
case FBC_MODULE_PARAM:
|
||||
seq_puts(m, "disabled per module param (default off)");
|
||||
break;
|
||||
case FBC_CHIP_DEFAULT:
|
||||
seq_puts(m, "disabled per chip default");
|
||||
break;
|
||||
default:
|
||||
seq_puts(m, "unknown reason");
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
else
|
||||
seq_printf(m, "FBC disabled: %s\n",
|
||||
intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 7)
|
||||
seq_printf(m, "Compressing: %s\n",
|
||||
yesno(I915_READ(FBC_STATUS2) &
|
||||
FBC_COMPRESSION_MASK));
|
||||
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
@ -1651,9 +1660,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
|
|||
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
|
||||
return -ENODEV;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
*val = dev_priv->fbc.false_color;
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1667,7 +1674,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
|
|||
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
|
||||
return -ENODEV;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
|
||||
reg = I915_READ(ILK_DPFC_CONTROL);
|
||||
dev_priv->fbc.false_color = val;
|
||||
|
@ -1676,7 +1683,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
|
|||
(reg | FBC_CTL_FALSE_COLOR) :
|
||||
(reg & ~FBC_CTL_FALSE_COLOR));
|
||||
|
||||
drm_modeset_unlock_all(dev);
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1778,8 +1785,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
int gpu_freq, ia_freq;
|
||||
unsigned int max_gpu_freq, min_gpu_freq;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
|
||||
if (!HAS_CORE_RING_FREQ(dev)) {
|
||||
seq_puts(m, "unsupported on this chipset\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -1792,17 +1800,27 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
min_gpu_freq =
|
||||
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
|
||||
max_gpu_freq =
|
||||
dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
|
||||
} else {
|
||||
min_gpu_freq = dev_priv->rps.min_freq_softlimit;
|
||||
max_gpu_freq = dev_priv->rps.max_freq_softlimit;
|
||||
}
|
||||
|
||||
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
|
||||
|
||||
for (gpu_freq = dev_priv->rps.min_freq_softlimit;
|
||||
gpu_freq <= dev_priv->rps.max_freq_softlimit;
|
||||
gpu_freq++) {
|
||||
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
|
||||
ia_freq = gpu_freq;
|
||||
sandybridge_pcode_read(dev_priv,
|
||||
GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
||||
&ia_freq);
|
||||
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
|
||||
intel_gpu_freq(dev_priv, gpu_freq),
|
||||
intel_gpu_freq(dev_priv, (gpu_freq *
|
||||
(IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
|
||||
((ia_freq >> 0) & 0xff) * 100,
|
||||
((ia_freq >> 8) & 0xff) * 100);
|
||||
}
|
||||
|
@ -2248,7 +2266,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
seq_puts(m, "aliasing PPGTT:\n");
|
||||
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
|
||||
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
|
||||
|
||||
ppgtt->debug_dump(ppgtt, m);
|
||||
}
|
||||
|
@ -2479,13 +2497,13 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_pc8_status(struct seq_file *m, void *unused)
|
||||
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
|
||||
if (!HAS_RUNTIME_PM(dev)) {
|
||||
seq_puts(m, "not supported\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -2493,6 +2511,12 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
|
||||
seq_printf(m, "IRQs disabled: %s\n",
|
||||
yesno(!intel_irqs_enabled(dev_priv)));
|
||||
#ifdef CONFIG_PM
|
||||
seq_printf(m, "Usage count: %d\n",
|
||||
atomic_read(&dev->dev->power.usage_count));
|
||||
#else
|
||||
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2780,13 +2804,16 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "---------\n");
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
bool active;
|
||||
struct intel_crtc_state *pipe_config;
|
||||
int x, y;
|
||||
|
||||
pipe_config = to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
|
||||
crtc->base.base.id, pipe_name(crtc->pipe),
|
||||
yesno(crtc->active), crtc->config->pipe_src_w,
|
||||
crtc->config->pipe_src_h);
|
||||
if (crtc->active) {
|
||||
yesno(pipe_config->base.active),
|
||||
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
|
||||
if (pipe_config->base.active) {
|
||||
intel_crtc_info(m, crtc);
|
||||
|
||||
active = cursor_position(dev, crtc->pipe, &x, &y);
|
||||
|
@ -3027,7 +3054,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
|
|||
|
||||
seq_puts(m, "\n\n");
|
||||
|
||||
if (intel_crtc->config->has_drrs) {
|
||||
if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
|
||||
struct intel_panel *panel;
|
||||
|
||||
mutex_lock(&drrs->mutex);
|
||||
|
@ -3079,7 +3106,7 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
|
|||
for_each_intel_crtc(dev, intel_crtc) {
|
||||
drm_modeset_lock(&intel_crtc->base.mutex, NULL);
|
||||
|
||||
if (intel_crtc->active) {
|
||||
if (intel_crtc->base.state->active) {
|
||||
active_crtc_cnt++;
|
||||
seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
|
||||
|
||||
|
@ -3621,22 +3648,33 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc =
|
||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
|
||||
struct intel_crtc_state *pipe_config;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
pipe_config = to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
/*
|
||||
* If we use the eDP transcoder we need to make sure that we don't
|
||||
* bypass the pfit, since otherwise the pipe CRC source won't work. Only
|
||||
* relevant on hsw with pipe A when using the always-on power well
|
||||
* routing.
|
||||
*/
|
||||
if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
|
||||
!crtc->config->pch_pfit.enabled) {
|
||||
crtc->config->pch_pfit.force_thru = true;
|
||||
if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
|
||||
!pipe_config->pch_pfit.enabled) {
|
||||
bool active = pipe_config->base.active;
|
||||
|
||||
if (active) {
|
||||
intel_crtc_control(&crtc->base, false);
|
||||
pipe_config = to_intel_crtc_state(crtc->base.state);
|
||||
}
|
||||
|
||||
pipe_config->pch_pfit.force_thru = true;
|
||||
|
||||
intel_display_power_get(dev_priv,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
|
||||
|
||||
intel_crtc_reset(crtc);
|
||||
if (active)
|
||||
intel_crtc_control(&crtc->base, true);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
@ -3646,6 +3684,7 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc =
|
||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
|
||||
struct intel_crtc_state *pipe_config;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
/*
|
||||
|
@ -3654,13 +3693,22 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
|
|||
* relevant on hsw with pipe A when using the always-on power well
|
||||
* routing.
|
||||
*/
|
||||
if (crtc->config->pch_pfit.force_thru) {
|
||||
crtc->config->pch_pfit.force_thru = false;
|
||||
pipe_config = to_intel_crtc_state(crtc->base.state);
|
||||
if (pipe_config->pch_pfit.force_thru) {
|
||||
bool active = pipe_config->base.active;
|
||||
|
||||
intel_crtc_reset(crtc);
|
||||
if (active) {
|
||||
intel_crtc_control(&crtc->base, false);
|
||||
pipe_config = to_intel_crtc_state(crtc->base.state);
|
||||
}
|
||||
|
||||
pipe_config->pch_pfit.force_thru = false;
|
||||
|
||||
intel_display_power_put(dev_priv,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
|
||||
|
||||
if (active)
|
||||
intel_crtc_control(&crtc->base, true);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
@ -3776,7 +3824,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
|
|||
pipe_name(pipe));
|
||||
|
||||
drm_modeset_lock(&crtc->base.mutex, NULL);
|
||||
if (crtc->active)
|
||||
if (crtc->base.state->active)
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
drm_modeset_unlock(&crtc->base.mutex);
|
||||
|
||||
|
@ -4183,8 +4231,15 @@ static const struct file_operations i915_displayport_test_type_fops = {
|
|||
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
|
||||
{
|
||||
struct drm_device *dev = m->private;
|
||||
int num_levels = ilk_wm_max_level(dev) + 1;
|
||||
int level;
|
||||
int num_levels;
|
||||
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
num_levels = 3;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
num_levels = 1;
|
||||
else
|
||||
num_levels = ilk_wm_max_level(dev) + 1;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
|
@ -4193,9 +4248,9 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
|
|||
|
||||
/*
|
||||
* - WM1+ latency values in 0.5us units
|
||||
* - latencies are in us on gen9
|
||||
* - latencies are in us on gen9/vlv/chv
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
|
||||
latency *= 10;
|
||||
else if (level > 0)
|
||||
latency *= 5;
|
||||
|
@ -4259,7 +4314,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
|
|||
{
|
||||
struct drm_device *dev = inode->i_private;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev))
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
return -ENODEV;
|
||||
|
||||
return single_open(file, pri_wm_latency_show, dev);
|
||||
|
@ -4291,11 +4346,18 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
|
|||
struct seq_file *m = file->private_data;
|
||||
struct drm_device *dev = m->private;
|
||||
uint16_t new[8] = { 0 };
|
||||
int num_levels = ilk_wm_max_level(dev) + 1;
|
||||
int num_levels;
|
||||
int level;
|
||||
int ret;
|
||||
char tmp[32];
|
||||
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
num_levels = 3;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
num_levels = 1;
|
||||
else
|
||||
num_levels = ilk_wm_max_level(dev) + 1;
|
||||
|
||||
if (len >= sizeof(tmp))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -5027,6 +5089,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_drpc_info", i915_drpc_info, 0},
|
||||
{"i915_emon_status", i915_emon_status, 0},
|
||||
{"i915_ring_freq_table", i915_ring_freq_table, 0},
|
||||
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
|
||||
{"i915_fbc_status", i915_fbc_status, 0},
|
||||
{"i915_ips_status", i915_ips_status, 0},
|
||||
{"i915_sr_status", i915_sr_status, 0},
|
||||
|
@ -5042,7 +5105,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_edp_psr_status", i915_edp_psr_status, 0},
|
||||
{"i915_sink_crc_eDP1", i915_sink_crc, 0},
|
||||
{"i915_energy_uJ", i915_energy_uJ, 0},
|
||||
{"i915_pc8_status", i915_pc8_status, 0},
|
||||
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
|
||||
{"i915_power_domain_info", i915_power_domain_info, 0},
|
||||
{"i915_display_info", i915_display_info, 0},
|
||||
{"i915_semaphore_status", i915_semaphore_status, 0},
|
||||
|
|
|
@ -163,6 +163,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
if (!value)
|
||||
return -ENODEV;
|
||||
break;
|
||||
case I915_PARAM_HAS_GPU_RESET:
|
||||
value = i915.enable_hangcheck &&
|
||||
intel_has_gpu_reset(dev);
|
||||
break;
|
||||
case I915_PARAM_HAS_RESOURCE_STREAMER:
|
||||
value = HAS_RESOURCE_STREAMER(dev);
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Unknown parameter %d\n", param->param);
|
||||
return -EINVAL;
|
||||
|
@ -719,11 +726,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
|||
|
||||
info = (struct intel_device_info *)&dev_priv->info;
|
||||
|
||||
/*
|
||||
* Skylake and Broxton currently don't expose the topmost plane as its
|
||||
* use is exclusive with the legacy cursor and we only want to expose
|
||||
* one of those, not both. Until we can safely expose the topmost plane
|
||||
* as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
|
||||
* we don't expose the topmost plane at all to prevent ABI breakage
|
||||
* down the line.
|
||||
*/
|
||||
if (IS_BROXTON(dev)) {
|
||||
info->num_sprites[PIPE_A] = 3;
|
||||
info->num_sprites[PIPE_B] = 3;
|
||||
info->num_sprites[PIPE_C] = 2;
|
||||
} else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
|
||||
info->num_sprites[PIPE_A] = 2;
|
||||
info->num_sprites[PIPE_B] = 2;
|
||||
info->num_sprites[PIPE_C] = 1;
|
||||
} else if (IS_VALLEYVIEW(dev))
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 2;
|
||||
else
|
||||
|
@ -933,8 +948,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
goto out_mtrrfree;
|
||||
}
|
||||
|
||||
dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
|
||||
if (dev_priv->dp_wq == NULL) {
|
||||
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
|
||||
if (dev_priv->hotplug.dp_wq == NULL) {
|
||||
DRM_ERROR("Failed to create our dp workqueue.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_freewq;
|
||||
|
@ -1029,7 +1044,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
|
||||
out_freedpwq:
|
||||
destroy_workqueue(dev_priv->dp_wq);
|
||||
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
||||
out_freewq:
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
out_mtrrfree:
|
||||
|
@ -1116,6 +1131,7 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
intel_fbc_cleanup_cfb(dev_priv);
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
||||
intel_csr_ucode_fini(dev);
|
||||
|
@ -1123,7 +1139,7 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
intel_teardown_gmbus(dev);
|
||||
intel_teardown_mchbar(dev);
|
||||
|
||||
destroy_workqueue(dev_priv->dp_wq);
|
||||
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
|
|
|
@ -356,7 +356,6 @@ static const struct intel_device_info intel_cherryview_info = {
|
|||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_info = {
|
||||
.is_preliminary = 1,
|
||||
.is_skylake = 1,
|
||||
.gen = 9, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
|
@ -369,7 +368,6 @@ static const struct intel_device_info intel_skylake_info = {
|
|||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
.is_preliminary = 1,
|
||||
.is_skylake = 1,
|
||||
.gen = 9, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
|
@ -440,9 +438,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
|
|||
{0, 0, 0}
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_I915_KMS)
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
#endif
|
||||
|
||||
void intel_detect_pch(struct drm_device *dev)
|
||||
{
|
||||
|
@ -541,21 +537,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
dev_priv->long_hpd_port_mask = 0;
|
||||
dev_priv->short_hpd_port_mask = 0;
|
||||
dev_priv->hpd_event_bits = 0;
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
cancel_work_sync(&dev_priv->dig_port_work);
|
||||
cancel_work_sync(&dev_priv->hotplug_work);
|
||||
cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
|
||||
}
|
||||
|
||||
void i915_firmware_load_error_print(const char *fw_path, int err)
|
||||
{
|
||||
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
|
||||
|
@ -601,7 +582,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
|
|||
static int i915_drm_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
pci_power_t opregion_target_state;
|
||||
int error;
|
||||
|
||||
|
@ -632,8 +612,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
|||
* for _thaw. Also, power gate the CRTC power wells.
|
||||
*/
|
||||
drm_modeset_lock_all(dev);
|
||||
for_each_crtc(dev, crtc)
|
||||
intel_crtc_control(crtc, false);
|
||||
intel_display_suspend(dev);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
intel_dp_mst_suspend(dev);
|
||||
|
@ -760,7 +739,7 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_modeset_setup_hw_state(dev, true);
|
||||
intel_display_resume(dev);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
intel_dp_mst_resume(dev);
|
||||
|
@ -865,9 +844,6 @@ int i915_reset(struct drm_device *dev)
|
|||
bool simulated;
|
||||
int ret;
|
||||
|
||||
if (!i915.reset)
|
||||
return 0;
|
||||
|
||||
intel_reset_gt_powersave(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
@ -1727,20 +1703,14 @@ static int __init i915_init(void)
|
|||
driver.num_ioctls = i915_max_ioctl;
|
||||
|
||||
/*
|
||||
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
|
||||
* explicitly disabled with the module pararmeter.
|
||||
*
|
||||
* Otherwise, just follow the parameter (defaulting to off).
|
||||
*
|
||||
* Allow optional vga_text_mode_force boot option to override
|
||||
* the default behavior.
|
||||
* Enable KMS by default, unless explicitly overriden by
|
||||
* either the i915.modeset prarameter or by the
|
||||
* vga_text_mode_force boot option.
|
||||
*/
|
||||
#if defined(CONFIG_DRM_I915_KMS)
|
||||
if (i915.modeset != 0)
|
||||
driver.driver_features |= DRIVER_MODESET;
|
||||
#endif
|
||||
if (i915.modeset == 1)
|
||||
driver.driver_features |= DRIVER_MODESET;
|
||||
driver.driver_features |= DRIVER_MODESET;
|
||||
|
||||
if (i915.modeset == 0)
|
||||
driver.driver_features &= ~DRIVER_MODESET;
|
||||
|
||||
#ifdef CONFIG_VGA_CONSOLE
|
||||
if (vgacon_text_force() && i915.modeset == -1)
|
||||
|
@ -1759,7 +1729,7 @@ static int __init i915_init(void)
|
|||
* to the atomic ioctl and the atomic properties. Only plane operations on
|
||||
* a single CRTC will actually work.
|
||||
*/
|
||||
if (i915.nuclear_pageflip)
|
||||
if (driver.driver_features & DRIVER_MODESET)
|
||||
driver.driver_features |= DRIVER_ATOMIC;
|
||||
|
||||
return drm_pci_init(&driver, &i915_pci_driver);
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20150522"
|
||||
#define DRIVER_DATE "20150717"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
|
@ -217,6 +217,39 @@ enum hpd_pin {
|
|||
HPD_NUM_PINS
|
||||
};
|
||||
|
||||
#define for_each_hpd_pin(__pin) \
|
||||
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
|
||||
|
||||
struct i915_hotplug {
|
||||
struct work_struct hotplug_work;
|
||||
|
||||
struct {
|
||||
unsigned long last_jiffies;
|
||||
int count;
|
||||
enum {
|
||||
HPD_ENABLED = 0,
|
||||
HPD_DISABLED = 1,
|
||||
HPD_MARK_DISABLED = 2
|
||||
} state;
|
||||
} stats[HPD_NUM_PINS];
|
||||
u32 event_bits;
|
||||
struct delayed_work reenable_work;
|
||||
|
||||
struct intel_digital_port *irq_port[I915_MAX_PORTS];
|
||||
u32 long_port_mask;
|
||||
u32 short_port_mask;
|
||||
struct work_struct dig_port_work;
|
||||
|
||||
/*
|
||||
* if we get a HPD irq from DP and a HPD irq from non-DP
|
||||
* the non-DP HPD could block the workqueue on a mode config
|
||||
* mutex getting, that userspace may have taken. However
|
||||
* userspace is waiting on the DP workqueue to run which is
|
||||
* blocked behind the non-DP one.
|
||||
*/
|
||||
struct workqueue_struct *dp_wq;
|
||||
};
|
||||
|
||||
#define I915_GEM_GPU_DOMAINS \
|
||||
(I915_GEM_DOMAIN_RENDER | \
|
||||
I915_GEM_DOMAIN_SAMPLER | \
|
||||
|
@ -243,6 +276,12 @@ enum hpd_pin {
|
|||
&dev->mode_config.plane_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head) \
|
||||
if ((intel_plane)->pipe == (intel_crtc)->pipe)
|
||||
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
|
||||
|
||||
|
@ -333,7 +372,8 @@ struct intel_dpll_hw_state {
|
|||
uint32_t cfgcr1, cfgcr2;
|
||||
|
||||
/* bxt */
|
||||
uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
|
||||
uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
|
||||
pcsdw12;
|
||||
};
|
||||
|
||||
struct intel_shared_dpll_config {
|
||||
|
@ -343,7 +383,6 @@ struct intel_shared_dpll_config {
|
|||
|
||||
struct intel_shared_dpll {
|
||||
struct intel_shared_dpll_config config;
|
||||
struct intel_shared_dpll_config *new_config;
|
||||
|
||||
int active; /* count of number of active CRTCs (i.e. DPMS on) */
|
||||
bool on; /* is the PLL actually active? Disabled during modeset */
|
||||
|
@ -559,9 +598,6 @@ struct intel_limit;
|
|||
struct dpll;
|
||||
|
||||
struct drm_i915_display_funcs {
|
||||
bool (*fbc_enabled)(struct drm_device *dev);
|
||||
void (*enable_fbc)(struct drm_crtc *crtc);
|
||||
void (*disable_fbc)(struct drm_device *dev);
|
||||
int (*get_display_clock_speed)(struct drm_device *dev);
|
||||
int (*get_fifo_size)(struct drm_device *dev, int plane);
|
||||
/**
|
||||
|
@ -587,7 +623,8 @@ struct drm_i915_display_funcs {
|
|||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, uint32_t sprite_height,
|
||||
int pixel_size, bool enable, bool scaled);
|
||||
void (*modeset_global_resources)(struct drm_atomic_state *state);
|
||||
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
|
||||
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
|
||||
/* Returns the active state of the crtc, and if the crtc is active,
|
||||
* fills out the pipe-config with the hw state. */
|
||||
bool (*get_pipe_config)(struct intel_crtc *,
|
||||
|
@ -598,7 +635,6 @@ struct drm_i915_display_funcs {
|
|||
struct intel_crtc_state *crtc_state);
|
||||
void (*crtc_enable)(struct drm_crtc *crtc);
|
||||
void (*crtc_disable)(struct drm_crtc *crtc);
|
||||
void (*off)(struct drm_crtc *crtc);
|
||||
void (*audio_codec_enable)(struct drm_connector *connector,
|
||||
struct intel_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
|
@ -608,7 +644,7 @@ struct drm_i915_display_funcs {
|
|||
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags);
|
||||
void (*update_primary_plane)(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
|
@ -805,11 +841,15 @@ struct i915_ctx_hang_stats {
|
|||
|
||||
/* This must match up with the value previously used for execbuf2.rsvd1. */
|
||||
#define DEFAULT_CONTEXT_HANDLE 0
|
||||
|
||||
#define CONTEXT_NO_ZEROMAP (1<<0)
|
||||
/**
|
||||
* struct intel_context - as the name implies, represents a context.
|
||||
* @ref: reference count.
|
||||
* @user_handle: userspace tracking identity for this context.
|
||||
* @remap_slice: l3 row remapping information.
|
||||
* @flags: context specific flags:
|
||||
* CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
|
||||
* @file_priv: filp associated with this context (NULL for global default
|
||||
* context).
|
||||
* @hang_stats: information about the role of this context in possible GPU
|
||||
|
@ -827,6 +867,7 @@ struct intel_context {
|
|||
int user_handle;
|
||||
uint8_t remap_slice;
|
||||
struct drm_i915_private *i915;
|
||||
int flags;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
struct i915_ctx_hang_stats hang_stats;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
@ -856,6 +897,9 @@ enum fb_op_origin {
|
|||
};
|
||||
|
||||
struct i915_fbc {
|
||||
/* This is always the inner lock when overlapping with struct_mutex and
|
||||
* it's the outer lock when overlapping with stolen_lock. */
|
||||
struct mutex lock;
|
||||
unsigned long uncompressed_size;
|
||||
unsigned threshold;
|
||||
unsigned int fb_id;
|
||||
|
@ -875,7 +919,7 @@ struct i915_fbc {
|
|||
|
||||
struct intel_fbc_work {
|
||||
struct delayed_work work;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_crtc *crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
} *fbc_work;
|
||||
|
||||
|
@ -891,7 +935,13 @@ struct i915_fbc {
|
|||
FBC_MULTIPLE_PIPES, /* more than one pipe active */
|
||||
FBC_MODULE_PARAM,
|
||||
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
|
||||
FBC_ROTATION, /* rotation is not supported */
|
||||
FBC_IN_DBG_MASTER, /* kernel debugger is active */
|
||||
} no_fbc_reason;
|
||||
|
||||
bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
|
||||
void (*enable_fbc)(struct intel_crtc *crtc);
|
||||
void (*disable_fbc)(struct drm_i915_private *dev_priv);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1201,6 +1251,10 @@ struct intel_l3_parity {
|
|||
struct i915_gem_mm {
|
||||
/** Memory allocator for GTT stolen memory */
|
||||
struct drm_mm stolen;
|
||||
/** Protects the usage of the GTT stolen memory allocator. This is
|
||||
* always the inner lock when overlapping with struct_mutex. */
|
||||
struct mutex stolen_lock;
|
||||
|
||||
/** List of all objects in gtt_space. Used to restore gtt
|
||||
* mappings on resume */
|
||||
struct list_head bound_list;
|
||||
|
@ -1461,23 +1515,27 @@ struct ilk_wm_values {
|
|||
enum intel_ddb_partitioning partitioning;
|
||||
};
|
||||
|
||||
struct vlv_pipe_wm {
|
||||
uint16_t primary;
|
||||
uint16_t sprite[2];
|
||||
uint8_t cursor;
|
||||
};
|
||||
|
||||
struct vlv_sr_wm {
|
||||
uint16_t plane;
|
||||
uint8_t cursor;
|
||||
};
|
||||
|
||||
struct vlv_wm_values {
|
||||
struct {
|
||||
uint16_t primary;
|
||||
uint16_t sprite[2];
|
||||
uint8_t cursor;
|
||||
} pipe[3];
|
||||
|
||||
struct {
|
||||
uint16_t plane;
|
||||
uint8_t cursor;
|
||||
} sr;
|
||||
|
||||
struct vlv_pipe_wm pipe[3];
|
||||
struct vlv_sr_wm sr;
|
||||
struct {
|
||||
uint8_t cursor;
|
||||
uint8_t sprite[2];
|
||||
uint8_t primary;
|
||||
} ddl[3];
|
||||
uint8_t level;
|
||||
bool cxsr;
|
||||
};
|
||||
|
||||
struct skl_ddb_entry {
|
||||
|
@ -1611,6 +1669,18 @@ struct i915_virtual_gpu {
|
|||
bool active;
|
||||
};
|
||||
|
||||
struct i915_execbuffer_params {
|
||||
struct drm_device *dev;
|
||||
struct drm_file *file;
|
||||
uint32_t dispatch_flags;
|
||||
uint32_t args_batch_start_offset;
|
||||
uint32_t batch_obj_vm_offset;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct intel_context *ctx;
|
||||
struct drm_i915_gem_request *request;
|
||||
};
|
||||
|
||||
struct drm_i915_private {
|
||||
struct drm_device *dev;
|
||||
struct kmem_cache *objects;
|
||||
|
@ -1680,19 +1750,7 @@ struct drm_i915_private {
|
|||
u32 pm_rps_events;
|
||||
u32 pipestat_irq_mask[I915_MAX_PIPES];
|
||||
|
||||
struct work_struct hotplug_work;
|
||||
struct {
|
||||
unsigned long hpd_last_jiffies;
|
||||
int hpd_cnt;
|
||||
enum {
|
||||
HPD_ENABLED = 0,
|
||||
HPD_DISABLED = 1,
|
||||
HPD_MARK_DISABLED = 2
|
||||
} hpd_mark;
|
||||
} hpd_stats[HPD_NUM_PINS];
|
||||
u32 hpd_event_bits;
|
||||
struct delayed_work hotplug_reenable_work;
|
||||
|
||||
struct i915_hotplug hotplug;
|
||||
struct i915_fbc fbc;
|
||||
struct i915_drrs drrs;
|
||||
struct intel_opregion opregion;
|
||||
|
@ -1718,7 +1776,7 @@ struct drm_i915_private {
|
|||
|
||||
unsigned int fsb_freq, mem_freq, is_ddr3;
|
||||
unsigned int skl_boot_cdclk;
|
||||
unsigned int cdclk_freq;
|
||||
unsigned int cdclk_freq, max_cdclk_freq;
|
||||
unsigned int hpll_freq;
|
||||
|
||||
/**
|
||||
|
@ -1769,9 +1827,6 @@ struct drm_i915_private {
|
|||
|
||||
/* Reclocking support */
|
||||
bool render_reclock_avail;
|
||||
bool lvds_downclock_avail;
|
||||
/* indicates the reduced downclock for LVDS*/
|
||||
int lvds_downclock;
|
||||
|
||||
struct i915_frontbuffer_tracking fb_tracking;
|
||||
|
||||
|
@ -1858,29 +1913,11 @@ struct drm_i915_private {
|
|||
|
||||
struct i915_runtime_pm pm;
|
||||
|
||||
struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
|
||||
u32 long_hpd_port_mask;
|
||||
u32 short_hpd_port_mask;
|
||||
struct work_struct dig_port_work;
|
||||
|
||||
/*
|
||||
* if we get a HPD irq from DP and a HPD irq from non-DP
|
||||
* the non-DP HPD could block the workqueue on a mode config
|
||||
* mutex getting, that userspace may have taken. However
|
||||
* userspace is waiting on the DP workqueue to run which is
|
||||
* blocked behind the non-DP one.
|
||||
*/
|
||||
struct workqueue_struct *dp_wq;
|
||||
|
||||
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
|
||||
struct {
|
||||
int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
int (*execbuf_submit)(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u64 exec_start, u32 flags);
|
||||
struct list_head *vmas);
|
||||
int (*init_rings)(struct drm_device *dev);
|
||||
void (*cleanup_ring)(struct intel_engine_cs *ring);
|
||||
void (*stop_ring)(struct intel_engine_cs *ring);
|
||||
|
@ -2148,7 +2185,8 @@ struct drm_i915_gem_request {
|
|||
struct intel_context *ctx;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
|
||||
/** Batch buffer related to this request if any */
|
||||
/** Batch buffer related to this request if any (used for
|
||||
error state dump only) */
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
|
||||
/** Time at which this request was emitted, in jiffies. */
|
||||
|
@ -2186,8 +2224,12 @@ struct drm_i915_gem_request {
|
|||
};
|
||||
|
||||
int i915_gem_request_alloc(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx);
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request **req_out);
|
||||
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
|
||||
void i915_gem_request_free(struct kref *req_ref);
|
||||
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
struct drm_file *file);
|
||||
|
||||
static inline uint32_t
|
||||
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
|
||||
|
@ -2391,6 +2433,9 @@ struct drm_i915_cmd_table {
|
|||
((INTEL_DEVID(dev) & 0xf) == 0x6 || \
|
||||
(INTEL_DEVID(dev) & 0xf) == 0xb || \
|
||||
(INTEL_DEVID(dev) & 0xf) == 0xe))
|
||||
/* ULX machines are also considered ULT. */
|
||||
#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
|
||||
(INTEL_DEVID(dev) & 0xf) == 0xe)
|
||||
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
|
||||
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
|
||||
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
|
||||
|
@ -2400,6 +2445,14 @@ struct drm_i915_cmd_table {
|
|||
/* ULX machines are also considered ULT. */
|
||||
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
|
||||
INTEL_DEVID(dev) == 0x0A1E)
|
||||
#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
|
||||
INTEL_DEVID(dev) == 0x1913 || \
|
||||
INTEL_DEVID(dev) == 0x1916 || \
|
||||
INTEL_DEVID(dev) == 0x1921 || \
|
||||
INTEL_DEVID(dev) == 0x1926)
|
||||
#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
|
||||
INTEL_DEVID(dev) == 0x1915 || \
|
||||
INTEL_DEVID(dev) == 0x191E)
|
||||
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
||||
|
||||
#define SKL_REVID_A0 (0x0)
|
||||
|
@ -2466,9 +2519,6 @@ struct drm_i915_cmd_table {
|
|||
*/
|
||||
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
|
||||
IS_I915GM(dev)))
|
||||
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
|
||||
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
|
||||
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
|
||||
|
||||
|
@ -2494,6 +2544,12 @@ struct drm_i915_cmd_table {
|
|||
|
||||
#define HAS_CSR(dev) (IS_SKYLAKE(dev))
|
||||
|
||||
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
|
||||
INTEL_INFO(dev)->gen >= 8)
|
||||
|
||||
#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
|
||||
!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
|
||||
|
@ -2533,7 +2589,6 @@ struct i915_params {
|
|||
int modeset;
|
||||
int panel_ignore_lid;
|
||||
int semaphores;
|
||||
unsigned int lvds_downclock;
|
||||
int lvds_channel_mode;
|
||||
int panel_use_ssc;
|
||||
int vbt_sdvo_panel_type;
|
||||
|
@ -2558,7 +2613,6 @@ struct i915_params {
|
|||
int use_mmio_flip;
|
||||
int mmio_debug;
|
||||
bool verbose_state_checks;
|
||||
bool nuclear_pageflip;
|
||||
int edp_vswing;
|
||||
};
|
||||
extern struct i915_params i915 __read_mostly;
|
||||
|
@ -2578,15 +2632,22 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
|||
unsigned long arg);
|
||||
#endif
|
||||
extern int intel_gpu_reset(struct drm_device *dev);
|
||||
extern bool intel_has_gpu_reset(struct drm_device *dev);
|
||||
extern int i915_reset(struct drm_device *dev);
|
||||
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
|
||||
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
|
||||
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
|
||||
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
|
||||
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
|
||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
||||
void i915_firmware_load_error_print(const char *fw_path, int err);
|
||||
|
||||
/* intel_hotplug.c */
|
||||
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
||||
enum port intel_hpd_pin_to_port(enum hpd_pin pin);
|
||||
|
||||
/* i915_irq.c */
|
||||
void i915_queue_hangcheck(struct drm_device *dev);
|
||||
__printf(3, 4)
|
||||
|
@ -2594,7 +2655,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
|
|||
const char *fmt, ...);
|
||||
|
||||
extern void intel_irq_init(struct drm_i915_private *dev_priv);
|
||||
extern void intel_hpd_init(struct drm_i915_private *dev_priv);
|
||||
int intel_irq_install(struct drm_i915_private *dev_priv);
|
||||
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
@ -2661,19 +2721,11 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct intel_engine_cs *ring);
|
||||
void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *obj);
|
||||
int i915_gem_ringbuffer_submission(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request *req);
|
||||
void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
|
||||
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u64 exec_start, u32 flags);
|
||||
struct list_head *vmas);
|
||||
int i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
|
@ -2780,9 +2832,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
|||
|
||||
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *to);
|
||||
struct intel_engine_cs *to,
|
||||
struct drm_i915_gem_request **to_req);
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct intel_engine_cs *ring);
|
||||
struct drm_i915_gem_request *req);
|
||||
int i915_gem_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -2824,7 +2877,6 @@ bool i915_gem_retire_requests(struct drm_device *dev);
|
|||
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
|
||||
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
|
||||
bool interruptible);
|
||||
int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
|
||||
|
||||
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
|
||||
{
|
||||
|
@ -2859,16 +2911,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
|||
int __must_check i915_gem_init(struct drm_device *dev);
|
||||
int i915_gem_init_rings(struct drm_device *dev);
|
||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
|
||||
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
int __i915_add_request(struct intel_engine_cs *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_object *batch_obj);
|
||||
#define i915_add_request(ring) \
|
||||
__i915_add_request(ring, NULL, NULL)
|
||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
bool flush_caches);
|
||||
#define i915_add_request(req) \
|
||||
__i915_add_request(req, NULL, true)
|
||||
#define i915_add_request_no_flush(req) \
|
||||
__i915_add_request(req, NULL, false)
|
||||
int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
unsigned reset_counter,
|
||||
bool interruptible,
|
||||
|
@ -2888,6 +2942,7 @@ int __must_check
|
|||
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 alignment,
|
||||
struct intel_engine_cs *pipelined,
|
||||
struct drm_i915_gem_request **pipelined_request,
|
||||
const struct i915_ggtt_view *view);
|
||||
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view);
|
||||
|
@ -3012,10 +3067,9 @@ int __must_check i915_gem_context_init(struct drm_device *dev);
|
|||
void i915_gem_context_fini(struct drm_device *dev);
|
||||
void i915_gem_context_reset(struct drm_device *dev);
|
||||
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
|
||||
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
|
||||
int i915_gem_context_enable(struct drm_i915_gem_request *req);
|
||||
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
|
||||
int i915_switch_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *to);
|
||||
int i915_switch_context(struct drm_i915_gem_request *req);
|
||||
struct intel_context *
|
||||
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
|
||||
void i915_gem_context_free(struct kref *ctx_ref);
|
||||
|
@ -3065,9 +3119,12 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* i915_gem_stolen.c */
|
||||
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node, u64 size,
|
||||
unsigned alignment);
|
||||
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node);
|
||||
int i915_gem_init_stolen(struct drm_device *dev);
|
||||
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
|
||||
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
|
||||
void i915_gem_cleanup_stolen(struct drm_device *dev);
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
|
||||
|
@ -3222,8 +3279,7 @@ extern void intel_modeset_gem_init(struct drm_device *dev);
|
|||
extern void intel_modeset_cleanup(struct drm_device *dev);
|
||||
extern void intel_connector_unregister(struct intel_connector *);
|
||||
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
|
||||
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
bool force_restore);
|
||||
extern void intel_display_resume(struct drm_device *dev);
|
||||
extern void i915_redisable_vga(struct drm_device *dev);
|
||||
extern void i915_redisable_vga_power_on(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
|
|
|
@ -149,14 +149,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_get_aperture *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_gtt *ggtt = &dev_priv->gtt;
|
||||
struct i915_vma *vma;
|
||||
size_t pinned;
|
||||
|
||||
pinned = 0;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
|
||||
if (i915_gem_obj_is_pinned(obj))
|
||||
pinned += i915_gem_obj_ggtt_size(obj);
|
||||
list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
|
||||
if (vma->pin_count)
|
||||
pinned += vma->node.size;
|
||||
list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
|
||||
if (vma->pin_count)
|
||||
pinned += vma->node.size;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
args->aper_size = dev_priv->gtt.base.total;
|
||||
|
@ -347,7 +351,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
|
@ -368,7 +372,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
|||
i915_gem_chipset_flush(dev);
|
||||
|
||||
out:
|
||||
intel_fb_obj_flush(obj, false);
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -801,7 +805,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
|||
|
||||
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
|
||||
|
||||
intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_GTT);
|
||||
|
||||
while (remain > 0) {
|
||||
/* Operation in this page
|
||||
|
@ -832,7 +836,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
|||
}
|
||||
|
||||
out_flush:
|
||||
intel_fb_obj_flush(obj, false);
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_GTT);
|
||||
out_unpin:
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
out:
|
||||
|
@ -945,7 +949,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
|
||||
|
||||
i915_gem_object_pin_pages(obj);
|
||||
|
||||
|
@ -1025,7 +1029,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
|||
if (needs_clflush_after)
|
||||
i915_gem_chipset_flush(dev);
|
||||
|
||||
intel_fb_obj_flush(obj, false);
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1146,23 +1150,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare arbitrary request against outstanding lazy request. Emit on match.
|
||||
*/
|
||||
int
|
||||
i915_gem_check_olr(struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
|
||||
|
||||
ret = 0;
|
||||
if (req == req->ring->outstanding_lazy_request)
|
||||
ret = i915_add_request(req->ring);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fake_irq(unsigned long data)
|
||||
{
|
||||
wake_up_process((struct task_struct *)data);
|
||||
|
@ -1334,6 +1321,33 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_private;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
WARN_ON(!req || !file || req->file_priv);
|
||||
|
||||
if (!req || !file)
|
||||
return -EINVAL;
|
||||
|
||||
if (req->file_priv)
|
||||
return -EINVAL;
|
||||
|
||||
dev_private = req->ring->dev->dev_private;
|
||||
file_priv = file->driver_priv;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
req->file_priv = file_priv;
|
||||
list_add_tail(&req->client_list, &file_priv->mm.request_list);
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
req->pid = get_pid(task_pid(current));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
|
||||
{
|
||||
|
@ -1346,6 +1360,9 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
|
|||
list_del(&request->client_list);
|
||||
request->file_priv = NULL;
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
put_pid(request->pid);
|
||||
request->pid = NULL;
|
||||
}
|
||||
|
||||
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
|
@ -1365,8 +1382,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
|||
list_del_init(&request->list);
|
||||
i915_gem_request_remove_from_client(request);
|
||||
|
||||
put_pid(request->pid);
|
||||
|
||||
i915_gem_request_unreference(request);
|
||||
}
|
||||
|
||||
|
@ -1415,10 +1430,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_check_olr(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __i915_wait_request(req,
|
||||
atomic_read(&dev_priv->gpu_error.reset_counter),
|
||||
interruptible, NULL, NULL);
|
||||
|
@ -1518,10 +1529,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
|||
if (req == NULL)
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_check_olr(req);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
requests[n++] = i915_gem_request_reference(req);
|
||||
} else {
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
|
@ -1531,10 +1538,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
|||
if (req == NULL)
|
||||
continue;
|
||||
|
||||
ret = i915_gem_check_olr(req);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
requests[n++] = i915_gem_request_reference(req);
|
||||
}
|
||||
}
|
||||
|
@ -1545,7 +1548,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
|||
NULL, rps);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
err:
|
||||
for (i = 0; i < n; i++) {
|
||||
if (ret == 0)
|
||||
i915_gem_object_retire_request(obj, requests[i]);
|
||||
|
@ -1613,6 +1615,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
else
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
|
||||
|
||||
if (write_domain != 0)
|
||||
intel_fb_obj_invalidate(obj,
|
||||
write_domain == I915_GEM_DOMAIN_GTT ?
|
||||
ORIGIN_GTT : ORIGIN_CPU);
|
||||
|
||||
unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
unlock:
|
||||
|
@ -2349,9 +2356,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct intel_engine_cs *ring)
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
struct intel_engine_cs *ring;
|
||||
|
||||
ring = i915_gem_request_get_ring(req);
|
||||
|
||||
/* Add a reference if we're newly entering the active list. */
|
||||
if (obj->active == 0)
|
||||
|
@ -2359,8 +2369,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
|||
obj->active |= intel_ring_flag(ring);
|
||||
|
||||
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
|
||||
i915_gem_request_assign(&obj->last_read_req[ring->id],
|
||||
intel_ring_get_request(ring));
|
||||
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
|
||||
|
||||
list_move_tail(&vma->mm_list, &vma->vm->active_list);
|
||||
}
|
||||
|
@ -2372,7 +2381,7 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
|
|||
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
|
||||
|
||||
i915_gem_request_assign(&obj->last_write_req, NULL);
|
||||
intel_fb_obj_flush(obj, true);
|
||||
intel_fb_obj_flush(obj, true, ORIGIN_CS);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2472,24 +2481,34 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int __i915_add_request(struct intel_engine_cs *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_object *obj)
|
||||
/*
|
||||
* NB: This function is not allowed to fail. Doing so would mean the the
|
||||
* request is not being tracked for completion but the work itself is
|
||||
* going to happen on the hardware. This would be a Bad Thing(tm).
|
||||
*/
|
||||
void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool flush_caches)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_request *request;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
u32 request_start;
|
||||
int ret;
|
||||
|
||||
request = ring->outstanding_lazy_request;
|
||||
if (WARN_ON(request == NULL))
|
||||
return -ENOMEM;
|
||||
return;
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
ringbuf = request->ctx->engine[ring->id].ringbuf;
|
||||
} else
|
||||
ringbuf = ring->buffer;
|
||||
ring = request->ring;
|
||||
dev_priv = ring->dev->dev_private;
|
||||
ringbuf = request->ringbuf;
|
||||
|
||||
/*
|
||||
* To ensure that this call will not fail, space for its emissions
|
||||
* should already have been reserved in the ring buffer. Let the ring
|
||||
* know that it is time to use that space up.
|
||||
*/
|
||||
intel_ring_reserved_space_use(ringbuf);
|
||||
|
||||
request_start = intel_ring_get_tail(ringbuf);
|
||||
/*
|
||||
|
@ -2499,14 +2518,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||
* is that the flush _must_ happen before the next request, no matter
|
||||
* what.
|
||||
*/
|
||||
if (i915.enable_execlists) {
|
||||
ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (flush_caches) {
|
||||
if (i915.enable_execlists)
|
||||
ret = logical_ring_flush_all_caches(request);
|
||||
else
|
||||
ret = intel_ring_flush_all_caches(request);
|
||||
/* Not allowed to fail! */
|
||||
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
|
||||
}
|
||||
|
||||
/* Record the position of the start of the request so that
|
||||
|
@ -2516,17 +2534,15 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||
*/
|
||||
request->postfix = intel_ring_get_tail(ringbuf);
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
ret = ring->emit_request(ringbuf, request);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = ring->add_request(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (i915.enable_execlists)
|
||||
ret = ring->emit_request(request);
|
||||
else {
|
||||
ret = ring->add_request(request);
|
||||
|
||||
request->tail = intel_ring_get_tail(ringbuf);
|
||||
}
|
||||
/* Not allowed to fail! */
|
||||
WARN(ret, "emit|add_request failed: %d!\n", ret);
|
||||
|
||||
request->head = request_start;
|
||||
|
||||
|
@ -2538,34 +2554,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||
*/
|
||||
request->batch_obj = obj;
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
/* Hold a reference to the current context so that we can inspect
|
||||
* it later in case a hangcheck error event fires.
|
||||
*/
|
||||
request->ctx = ring->last_context;
|
||||
if (request->ctx)
|
||||
i915_gem_context_reference(request->ctx);
|
||||
}
|
||||
|
||||
request->emitted_jiffies = jiffies;
|
||||
ring->last_submitted_seqno = request->seqno;
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
request->file_priv = NULL;
|
||||
|
||||
if (file) {
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
request->file_priv = file_priv;
|
||||
list_add_tail(&request->client_list,
|
||||
&file_priv->mm.request_list);
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
request->pid = get_pid(task_pid(current));
|
||||
}
|
||||
|
||||
trace_i915_gem_request_add(request);
|
||||
ring->outstanding_lazy_request = NULL;
|
||||
|
||||
i915_queue_hangcheck(ring->dev);
|
||||
|
||||
|
@ -2574,7 +2567,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||
round_jiffies_up_relative(HZ));
|
||||
intel_mark_busy(dev_priv->dev);
|
||||
|
||||
return 0;
|
||||
/* Sanity check that the reserved size was large enough. */
|
||||
intel_ring_reserved_space_end(ringbuf);
|
||||
}
|
||||
|
||||
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
|
||||
|
@ -2628,12 +2622,13 @@ void i915_gem_request_free(struct kref *req_ref)
|
|||
typeof(*req), ref);
|
||||
struct intel_context *ctx = req->ctx;
|
||||
|
||||
if (req->file_priv)
|
||||
i915_gem_request_remove_from_client(req);
|
||||
|
||||
if (ctx) {
|
||||
if (i915.enable_execlists) {
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
|
||||
if (ctx != ring->default_context)
|
||||
intel_lr_context_unpin(ring, ctx);
|
||||
if (ctx != req->ring->default_context)
|
||||
intel_lr_context_unpin(req);
|
||||
}
|
||||
|
||||
i915_gem_context_unreference(ctx);
|
||||
|
@ -2643,36 +2638,63 @@ void i915_gem_request_free(struct kref *req_ref)
|
|||
}
|
||||
|
||||
int i915_gem_request_alloc(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx)
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request **req_out)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
if (ring->outstanding_lazy_request)
|
||||
return 0;
|
||||
if (!req_out)
|
||||
return -EINVAL;
|
||||
|
||||
*req_out = NULL;
|
||||
|
||||
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
|
||||
if (req == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
kref_init(&req->ref);
|
||||
req->i915 = dev_priv;
|
||||
|
||||
ret = i915_gem_get_seqno(ring->dev, &req->seqno);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
kref_init(&req->ref);
|
||||
req->i915 = dev_priv;
|
||||
req->ring = ring;
|
||||
req->ctx = ctx;
|
||||
i915_gem_context_reference(req->ctx);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
ret = intel_logical_ring_alloc_request_extras(req, ctx);
|
||||
ret = intel_logical_ring_alloc_request_extras(req);
|
||||
else
|
||||
ret = intel_ring_alloc_request_extras(req);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
i915_gem_context_unreference(req->ctx);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ring->outstanding_lazy_request = req;
|
||||
/*
|
||||
* Reserve space in the ring buffer for all the commands required to
|
||||
* eventually emit this request. This is to guarantee that the
|
||||
* i915_add_request() call can't fail. Note that the reserve may need
|
||||
* to be redone if the request is not actually submitted straight
|
||||
* away, e.g. because a GPU scheduler has deferred it.
|
||||
*/
|
||||
if (i915.enable_execlists)
|
||||
ret = intel_logical_ring_reserve_space(req);
|
||||
else
|
||||
ret = intel_ring_reserve_space(req);
|
||||
if (ret) {
|
||||
/*
|
||||
* At this point, the request is fully allocated even if not
|
||||
* fully prepared. Thus it can be cleaned up using the proper
|
||||
* free code.
|
||||
*/
|
||||
i915_gem_request_cancel(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*req_out = req;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -2680,6 +2702,13 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
|
||||
{
|
||||
intel_ring_reserved_space_cancel(req->ringbuf);
|
||||
|
||||
i915_gem_request_unreference(req);
|
||||
}
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_engine_cs *ring)
|
||||
{
|
||||
|
@ -2741,7 +2770,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
|||
list_del(&submit_req->execlist_link);
|
||||
|
||||
if (submit_req->ctx != ring->default_context)
|
||||
intel_lr_context_unpin(ring, submit_req->ctx);
|
||||
intel_lr_context_unpin(submit_req);
|
||||
|
||||
i915_gem_request_unreference(submit_req);
|
||||
}
|
||||
|
@ -2762,9 +2791,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
|||
|
||||
i915_gem_request_retire(request);
|
||||
}
|
||||
|
||||
/* This may not have been flushed before the reset, so clean it now */
|
||||
i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
|
||||
}
|
||||
|
||||
void i915_gem_restore_fences(struct drm_device *dev)
|
||||
|
@ -2947,7 +2973,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
|
|||
static int
|
||||
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int ret, i;
|
||||
int i;
|
||||
|
||||
if (!obj->active)
|
||||
return 0;
|
||||
|
@ -2962,10 +2988,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
|
|||
if (list_empty(&req->list))
|
||||
goto retire;
|
||||
|
||||
ret = i915_gem_check_olr(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (i915_gem_request_completed(req, true)) {
|
||||
__i915_gem_request_retire__upto(req);
|
||||
retire:
|
||||
|
@ -3068,25 +3090,22 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
static int
|
||||
__i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *to,
|
||||
struct drm_i915_gem_request *req)
|
||||
struct drm_i915_gem_request *from_req,
|
||||
struct drm_i915_gem_request **to_req)
|
||||
{
|
||||
struct intel_engine_cs *from;
|
||||
int ret;
|
||||
|
||||
from = i915_gem_request_get_ring(req);
|
||||
from = i915_gem_request_get_ring(from_req);
|
||||
if (to == from)
|
||||
return 0;
|
||||
|
||||
if (i915_gem_request_completed(req, true))
|
||||
if (i915_gem_request_completed(from_req, true))
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_check_olr(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!i915_semaphore_is_enabled(obj->base.dev)) {
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
ret = __i915_wait_request(req,
|
||||
ret = __i915_wait_request(from_req,
|
||||
atomic_read(&i915->gpu_error.reset_counter),
|
||||
i915->mm.interruptible,
|
||||
NULL,
|
||||
|
@ -3094,16 +3113,24 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_retire_request(obj, req);
|
||||
i915_gem_object_retire_request(obj, from_req);
|
||||
} else {
|
||||
int idx = intel_ring_sync_index(from, to);
|
||||
u32 seqno = i915_gem_request_get_seqno(req);
|
||||
u32 seqno = i915_gem_request_get_seqno(from_req);
|
||||
|
||||
WARN_ON(!to_req);
|
||||
|
||||
if (seqno <= from->semaphore.sync_seqno[idx])
|
||||
return 0;
|
||||
|
||||
trace_i915_gem_ring_sync_to(from, to, req);
|
||||
ret = to->semaphore.sync_to(to, from, seqno);
|
||||
if (*to_req == NULL) {
|
||||
ret = i915_gem_request_alloc(to, to->default_context, to_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
|
||||
ret = to->semaphore.sync_to(*to_req, from, seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -3123,11 +3150,14 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
*
|
||||
* @obj: object which may be in use on another ring.
|
||||
* @to: ring we wish to use the object on. May be NULL.
|
||||
* @to_req: request we wish to use the object for. See below.
|
||||
* This will be allocated and returned if a request is
|
||||
* required but not passed in.
|
||||
*
|
||||
* This code is meant to abstract object synchronization with the GPU.
|
||||
* Calling with NULL implies synchronizing the object with the CPU
|
||||
* rather than a particular GPU ring. Conceptually we serialise writes
|
||||
* between engines inside the GPU. We only allow on engine to write
|
||||
* between engines inside the GPU. We only allow one engine to write
|
||||
* into a buffer at any time, but multiple readers. To ensure each has
|
||||
* a coherent view of memory, we must:
|
||||
*
|
||||
|
@ -3138,11 +3168,22 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
* - If we are a write request (pending_write_domain is set), the new
|
||||
* request must wait for outstanding read requests to complete.
|
||||
*
|
||||
* For CPU synchronisation (NULL to) no request is required. For syncing with
|
||||
* rings to_req must be non-NULL. However, a request does not have to be
|
||||
* pre-allocated. If *to_req is NULL and sync commands will be emitted then a
|
||||
* request will be allocated automatically and returned through *to_req. Note
|
||||
* that it is not guaranteed that commands will be emitted (because the system
|
||||
* might already be idle). Hence there is no need to create a request that
|
||||
* might never have any work submitted. Note further that if a request is
|
||||
* returned in *to_req, it is the responsibility of the caller to submit
|
||||
* that request (after potentially adding more work to it).
|
||||
*
|
||||
* Returns 0 if successful, else propagates up the lower layer error.
|
||||
*/
|
||||
int
|
||||
i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *to)
|
||||
struct intel_engine_cs *to,
|
||||
struct drm_i915_gem_request **to_req)
|
||||
{
|
||||
const bool readonly = obj->base.pending_write_domain == 0;
|
||||
struct drm_i915_gem_request *req[I915_NUM_RINGS];
|
||||
|
@ -3164,7 +3205,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
req[n++] = obj->last_read_req[i];
|
||||
}
|
||||
for (i = 0; i < n; i++) {
|
||||
ret = __i915_gem_object_sync(obj, to, req[i]);
|
||||
ret = __i915_gem_object_sync(obj, to, req[i], to_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -3275,9 +3316,19 @@ int i915_gpu_idle(struct drm_device *dev)
|
|||
/* Flush everything onto the inactive list. */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (!i915.enable_execlists) {
|
||||
ret = i915_switch_context(ring, ring->default_context);
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
if (ret) {
|
||||
i915_gem_request_cancel(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
i915_add_request_no_flush(req);
|
||||
}
|
||||
|
||||
ret = intel_ring_idle(ring);
|
||||
|
@ -3673,9 +3724,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
|||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
||||
unsigned long start =
|
||||
u64 start =
|
||||
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
|
||||
unsigned long end =
|
||||
u64 end =
|
||||
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
@ -3731,7 +3782,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
|||
* attempt to find space.
|
||||
*/
|
||||
if (size > end) {
|
||||
DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
|
||||
DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
|
||||
ggtt_view ? ggtt_view->type : 0,
|
||||
size,
|
||||
flags & PIN_MAPPABLE ? "mappable" : "total",
|
||||
|
@ -3853,7 +3904,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
|
|||
old_write_domain = obj->base.write_domain;
|
||||
obj->base.write_domain = 0;
|
||||
|
||||
intel_fb_obj_flush(obj, false);
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_GTT);
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
obj->base.read_domains,
|
||||
|
@ -3875,7 +3926,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
|
|||
old_write_domain = obj->base.write_domain;
|
||||
obj->base.write_domain = 0;
|
||||
|
||||
intel_fb_obj_flush(obj, false);
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
obj->base.read_domains,
|
||||
|
@ -3937,9 +3988,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
|||
obj->dirty = 1;
|
||||
}
|
||||
|
||||
if (write)
|
||||
intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
old_read_domains,
|
||||
old_write_domain);
|
||||
|
@ -4094,12 +4142,13 @@ int
|
|||
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 alignment,
|
||||
struct intel_engine_cs *pipelined,
|
||||
struct drm_i915_gem_request **pipelined_request,
|
||||
const struct i915_ggtt_view *view)
|
||||
{
|
||||
u32 old_read_domains, old_write_domain;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_sync(obj, pipelined);
|
||||
ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -4210,9 +4259,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
|||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
}
|
||||
|
||||
if (write)
|
||||
intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
old_read_domains,
|
||||
old_write_domain);
|
||||
|
@ -4253,6 +4299,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
|||
if (time_after_eq(request->emitted_jiffies, recent_enough))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Note that the request might not have been submitted yet.
|
||||
* In which case emitted_jiffies will be zero.
|
||||
*/
|
||||
if (!request->emitted_jiffies)
|
||||
continue;
|
||||
|
||||
target = request;
|
||||
}
|
||||
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
|
@ -4810,8 +4863,9 @@ i915_gem_suspend(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
|
||||
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
|
||||
|
@ -4821,7 +4875,7 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
|
|||
if (!HAS_L3_DPF(dev) || !remap_info)
|
||||
return 0;
|
||||
|
||||
ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
|
||||
ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -4967,7 +5021,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i;
|
||||
int ret, i, j;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
|
||||
return -EIO;
|
||||
|
@ -5004,27 +5058,55 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
*/
|
||||
init_unused_rings(dev);
|
||||
|
||||
BUG_ON(!dev_priv->ring[RCS].default_context);
|
||||
|
||||
ret = i915_ppgtt_init_hw(dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("PPGTT enable HW failed %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Need to do basic initialisation of all rings first: */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
ret = ring->init_hw(ring);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_L3_SLICES(dev); i++)
|
||||
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
|
||||
/* Now it is safe to go back round and do everything else: */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
ret = i915_ppgtt_init_hw(dev);
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("PPGTT enable failed %d\n", ret);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
}
|
||||
WARN_ON(!ring->default_context);
|
||||
|
||||
ret = i915_gem_context_enable(dev_priv);
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("Context enable failed %d\n", ret);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
|
||||
if (ret) {
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
goto out;
|
||||
if (ring->id == RCS) {
|
||||
for (j = 0; j < NUM_L3_SLICES(dev); j++)
|
||||
i915_gem_l3_remap(req, j);
|
||||
}
|
||||
|
||||
ret = i915_ppgtt_init_ring(req);
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = i915_gem_context_enable(req);
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
i915_add_request_no_flush(req);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -5111,6 +5193,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
|||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
dev_priv->gt.cleanup_ring(ring);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
* expects the system to be in execlists mode on startup,
|
||||
* so we need to reset the GPU back to legacy mode.
|
||||
*/
|
||||
intel_gpu_reset(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -5387,4 +5477,3 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
|
|||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -407,32 +407,23 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
i915_gem_context_unreference(dctx);
|
||||
}
|
||||
|
||||
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
|
||||
int i915_gem_context_enable(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i;
|
||||
|
||||
BUG_ON(!dev_priv->ring[RCS].default_context);
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (ring->init_context) {
|
||||
ret = ring->init_context(ring,
|
||||
ring->default_context);
|
||||
if (ret) {
|
||||
DRM_ERROR("ring init context: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (ring->init_context == NULL)
|
||||
return 0;
|
||||
|
||||
ret = ring->init_context(req);
|
||||
} else
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
ret = i915_switch_context(ring, ring->default_context);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = i915_switch_context(req);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("ring init context: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -485,10 +476,9 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
|
|||
}
|
||||
|
||||
static inline int
|
||||
mi_set_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *new_context,
|
||||
u32 hw_flags)
|
||||
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
||||
const int num_rings =
|
||||
/* Use an extended w/a on ivb+ if signalling from other rings */
|
||||
|
@ -503,13 +493,15 @@ mi_set_context(struct intel_engine_cs *ring,
|
|||
* itlb_before_ctx_switch.
|
||||
*/
|
||||
if (IS_GEN6(ring->dev)) {
|
||||
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
|
||||
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* These flags are for resource streamer on HSW+ */
|
||||
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
|
||||
if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
|
||||
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
|
||||
else if (INTEL_INFO(ring->dev)->gen < 8)
|
||||
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
|
||||
|
||||
|
||||
|
@ -517,7 +509,7 @@ mi_set_context(struct intel_engine_cs *ring,
|
|||
if (INTEL_INFO(ring->dev)->gen >= 7)
|
||||
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
|
||||
|
||||
ret = intel_ring_begin(ring, len);
|
||||
ret = intel_ring_begin(req, len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -540,7 +532,7 @@ mi_set_context(struct intel_engine_cs *ring,
|
|||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_SET_CONTEXT);
|
||||
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
|
||||
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
|
||||
flags);
|
||||
/*
|
||||
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
||||
|
@ -621,9 +613,10 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
|
|||
return false;
|
||||
}
|
||||
|
||||
static int do_switch(struct intel_engine_cs *ring,
|
||||
struct intel_context *to)
|
||||
static int do_switch(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_context *to = req->ctx;
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_context *from = ring->last_context;
|
||||
u32 hw_flags = 0;
|
||||
|
@ -659,7 +652,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
* Register Immediate commands in Ring Buffer before submitting
|
||||
* a context."*/
|
||||
trace_switch_mm(ring, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
|
||||
|
@ -701,7 +694,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
WARN_ON(needs_pd_load_pre(ring, to) &&
|
||||
needs_pd_load_post(ring, to, hw_flags));
|
||||
|
||||
ret = mi_set_context(ring, to, hw_flags);
|
||||
ret = mi_set_context(req, hw_flags);
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
|
||||
|
@ -710,7 +703,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
*/
|
||||
if (needs_pd_load_post(ring, to, hw_flags)) {
|
||||
trace_switch_mm(ring, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
/* The hardware context switch is emitted, but we haven't
|
||||
* actually changed the state - so it's probably safe to bail
|
||||
* here. Still, let the user know something dangerous has
|
||||
|
@ -726,7 +719,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
if (!(to->remap_slice & (1<<i)))
|
||||
continue;
|
||||
|
||||
ret = i915_gem_l3_remap(ring, i);
|
||||
ret = i915_gem_l3_remap(req, i);
|
||||
/* If it failed, try again next round */
|
||||
if (ret)
|
||||
DRM_DEBUG_DRIVER("L3 remapping failed\n");
|
||||
|
@ -742,7 +735,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
*/
|
||||
if (from != NULL) {
|
||||
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
|
||||
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
||||
* whole damn pipeline, we don't need to explicitly mark the
|
||||
* object dirty. The only exception is that the context must be
|
||||
|
@ -766,7 +759,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
|
||||
if (uninitialized) {
|
||||
if (ring->init_context) {
|
||||
ret = ring->init_context(ring, to);
|
||||
ret = ring->init_context(req);
|
||||
if (ret)
|
||||
DRM_ERROR("ring init context: %d\n", ret);
|
||||
}
|
||||
|
@ -782,8 +775,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
|
||||
/**
|
||||
* i915_switch_context() - perform a GPU context switch.
|
||||
* @ring: ring for which we'll execute the context switch
|
||||
* @to: the context to switch to
|
||||
* @req: request for which we'll execute the context switch
|
||||
*
|
||||
* The context life cycle is simple. The context refcount is incremented and
|
||||
* decremented by 1 and create and destroy. If the context is in use by the GPU,
|
||||
|
@ -794,25 +786,25 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
* switched by writing to the ELSP and requests keep a reference to their
|
||||
* context.
|
||||
*/
|
||||
int i915_switch_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *to)
|
||||
int i915_switch_context(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
WARN_ON(i915.enable_execlists);
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
|
||||
if (to != ring->last_context) {
|
||||
i915_gem_context_reference(to);
|
||||
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
|
||||
if (req->ctx != ring->last_context) {
|
||||
i915_gem_context_reference(req->ctx);
|
||||
if (ring->last_context)
|
||||
i915_gem_context_unreference(ring->last_context);
|
||||
ring->last_context = to;
|
||||
ring->last_context = req->ctx;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
return do_switch(ring, to);
|
||||
return do_switch(req);
|
||||
}
|
||||
|
||||
static bool contexts_enabled(struct drm_device *dev)
|
||||
|
@ -898,6 +890,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
case I915_CONTEXT_PARAM_BAN_PERIOD:
|
||||
args->value = ctx->hang_stats.ban_period_seconds;
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ZEROMAP:
|
||||
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@ -935,6 +930,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|||
else
|
||||
ctx->hang_stats.ban_period_seconds = args->value;
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ZEROMAP:
|
||||
if (args->size) {
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
ctx->flags &= ~CONTEXT_NO_ZEROMAP;
|
||||
ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
|
|
@ -677,6 +677,7 @@ eb_vma_misplaced(struct i915_vma *vma)
|
|||
static int
|
||||
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
struct list_head *vmas,
|
||||
struct intel_context *ctx,
|
||||
bool *need_relocs)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -699,6 +700,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
|||
obj = vma->obj;
|
||||
entry = vma->exec_entry;
|
||||
|
||||
if (ctx->flags & CONTEXT_NO_ZEROMAP)
|
||||
entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
||||
|
||||
if (!has_fenced_gpu_access)
|
||||
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
||||
need_fence =
|
||||
|
@ -776,7 +780,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec)
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
struct intel_context *ctx)
|
||||
{
|
||||
struct drm_i915_gem_relocation_entry *reloc;
|
||||
struct i915_address_space *vm;
|
||||
|
@ -862,7 +867,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
goto err;
|
||||
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -887,10 +892,10 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
|
||||
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
const unsigned other_rings = ~intel_ring_flag(ring);
|
||||
const unsigned other_rings = ~intel_ring_flag(req->ring);
|
||||
struct i915_vma *vma;
|
||||
uint32_t flush_domains = 0;
|
||||
bool flush_chipset = false;
|
||||
|
@ -900,7 +905,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
|
|||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (obj->active & other_rings) {
|
||||
ret = i915_gem_object_sync(obj, ring);
|
||||
ret = i915_gem_object_sync(obj, req->ring, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -912,7 +917,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
if (flush_chipset)
|
||||
i915_gem_chipset_flush(ring->dev);
|
||||
i915_gem_chipset_flush(req->ring->dev);
|
||||
|
||||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
||||
wmb();
|
||||
|
@ -920,7 +925,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
|
|||
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
||||
* any residual writes from the previous batch.
|
||||
*/
|
||||
return intel_ring_invalidate_all_caches(ring);
|
||||
return intel_ring_invalidate_all_caches(req);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -953,6 +958,9 @@ validate_exec_list(struct drm_device *dev,
|
|||
if (exec[i].flags & invalid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
|
||||
return -EINVAL;
|
||||
|
||||
/* First check for malicious input causing overflow in
|
||||
* the worst case where we need to allocate the entire
|
||||
* relocation tree as a single array.
|
||||
|
@ -1013,9 +1021,9 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
|||
|
||||
void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct intel_engine_cs *ring)
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct drm_i915_gem_request *req = intel_ring_get_request(ring);
|
||||
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
|
@ -1029,12 +1037,12 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
|||
obj->base.pending_read_domains |= obj->base.read_domains;
|
||||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
|
||||
i915_vma_move_to_active(vma, ring);
|
||||
i915_vma_move_to_active(vma, req);
|
||||
if (obj->base.write_domain) {
|
||||
obj->dirty = 1;
|
||||
i915_gem_request_assign(&obj->last_write_req, req);
|
||||
|
||||
intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CS);
|
||||
|
||||
/* update for the implicit flush after a batch */
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
|
@ -1053,22 +1061,20 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
|||
}
|
||||
|
||||
void
|
||||
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *obj)
|
||||
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
|
||||
{
|
||||
/* Unconditionally force add_request to emit a full flush. */
|
||||
ring->gpu_caches_dirty = true;
|
||||
params->ring->gpu_caches_dirty = true;
|
||||
|
||||
/* Add a breadcrumb for the completion of the batch buffer */
|
||||
(void)__i915_add_request(ring, file, obj);
|
||||
__i915_add_request(params->request, params->batch_obj, true);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
struct intel_engine_cs *ring)
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
||||
|
@ -1077,7 +1083,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 4 * 3);
|
||||
ret = intel_ring_begin(req, 4 * 3);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1093,10 +1099,11 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_emit_box(struct intel_engine_cs *ring,
|
||||
i915_emit_box(struct drm_i915_gem_request *req,
|
||||
struct drm_clip_rect *box,
|
||||
int DR1, int DR4)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
|
||||
|
@ -1107,7 +1114,7 @@ i915_emit_box(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
if (INTEL_INFO(ring->dev)->gen >= 4) {
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1116,7 +1123,7 @@ i915_emit_box(struct intel_engine_cs *ring,
|
|||
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
|
||||
intel_ring_emit(ring, DR4);
|
||||
} else {
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1186,17 +1193,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
int
|
||||
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u64 exec_start, u32 dispatch_flags)
|
||||
struct list_head *vmas)
|
||||
{
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *ring = params->ring;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 exec_len;
|
||||
u64 exec_start, exec_len;
|
||||
int instp_mode;
|
||||
u32 instp_mask;
|
||||
int i, ret = 0;
|
||||
|
@ -1244,15 +1249,15 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
|||
}
|
||||
}
|
||||
|
||||
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
|
||||
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = i915_switch_context(ring, ctx);
|
||||
ret = i915_switch_context(params->request);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
|
||||
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
|
||||
"%s didn't clear reload\n", ring->name);
|
||||
|
||||
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
|
||||
|
@ -1294,7 +1299,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
|||
|
||||
if (ring == &dev_priv->ring[RCS] &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
|
@ -1308,37 +1313,40 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
|||
}
|
||||
|
||||
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
||||
ret = i915_reset_gen7_sol_offsets(dev, ring);
|
||||
ret = i915_reset_gen7_sol_offsets(dev, params->request);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
exec_len = args->batch_len;
|
||||
exec_len = args->batch_len;
|
||||
exec_start = params->batch_obj_vm_offset +
|
||||
params->args_batch_start_offset;
|
||||
|
||||
if (cliprects) {
|
||||
for (i = 0; i < args->num_cliprects; i++) {
|
||||
ret = i915_emit_box(ring, &cliprects[i],
|
||||
ret = i915_emit_box(params->request, &cliprects[i],
|
||||
args->DR1, args->DR4);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
ret = ring->dispatch_execbuffer(params->request,
|
||||
exec_start, exec_len,
|
||||
dispatch_flags);
|
||||
params->dispatch_flags);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
ret = ring->dispatch_execbuffer(params->request,
|
||||
exec_start, exec_len,
|
||||
dispatch_flags);
|
||||
params->dispatch_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
|
||||
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
|
||||
|
||||
i915_gem_execbuffer_move_to_active(vmas, ring);
|
||||
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
|
||||
i915_gem_execbuffer_move_to_active(vmas, params->request);
|
||||
i915_gem_execbuffer_retire_commands(params);
|
||||
|
||||
error:
|
||||
kfree(cliprects);
|
||||
|
@ -1408,8 +1416,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
struct intel_engine_cs *ring;
|
||||
struct intel_context *ctx;
|
||||
struct i915_address_space *vm;
|
||||
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
|
||||
struct i915_execbuffer_params *params = ¶ms_master;
|
||||
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
||||
u64 exec_start = args->batch_start_offset;
|
||||
u32 dispatch_flags;
|
||||
int ret;
|
||||
bool need_relocs;
|
||||
|
@ -1482,6 +1491,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
|
||||
if (!HAS_RESOURCE_STREAMER(dev)) {
|
||||
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ring->id != RCS) {
|
||||
DRM_DEBUG("RS is not available on %s\n",
|
||||
ring->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dispatch_flags |= I915_DISPATCH_RS;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
|
@ -1502,6 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
else
|
||||
vm = &dev_priv->gtt.base;
|
||||
|
||||
memset(¶ms_master, 0x00, sizeof(params_master));
|
||||
|
||||
eb = eb_create(args);
|
||||
if (eb == NULL) {
|
||||
i915_gem_context_unreference(ctx);
|
||||
|
@ -1520,7 +1545,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -1530,7 +1555,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (ret) {
|
||||
if (ret == -EFAULT) {
|
||||
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
|
||||
eb, exec);
|
||||
eb, exec, ctx);
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
}
|
||||
if (ret)
|
||||
|
@ -1544,6 +1569,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
}
|
||||
|
||||
params->args_batch_start_offset = args->batch_start_offset;
|
||||
if (i915_needs_cmd_parser(ring) && args->batch_len) {
|
||||
struct drm_i915_gem_object *parsed_batch_obj;
|
||||
|
||||
|
@ -1575,7 +1601,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
* command parser has accepted.
|
||||
*/
|
||||
dispatch_flags |= I915_DISPATCH_SECURE;
|
||||
exec_start = 0;
|
||||
params->args_batch_start_offset = 0;
|
||||
batch_obj = parsed_batch_obj;
|
||||
}
|
||||
}
|
||||
|
@ -1600,14 +1626,35 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
|
||||
params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
|
||||
} else
|
||||
exec_start += i915_gem_obj_offset(batch_obj, vm);
|
||||
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
|
||||
|
||||
ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
|
||||
&eb->vmas, batch_obj, exec_start,
|
||||
dispatch_flags);
|
||||
/* Allocate a request for this batch buffer nice and early. */
|
||||
ret = i915_gem_request_alloc(ring, ctx, ¶ms->request);
|
||||
if (ret)
|
||||
goto err_batch_unpin;
|
||||
|
||||
ret = i915_gem_request_add_to_client(params->request, file);
|
||||
if (ret)
|
||||
goto err_batch_unpin;
|
||||
|
||||
/*
|
||||
* Save assorted stuff away to pass through to *_submission().
|
||||
* NB: This data should be 'persistent' and not local as it will
|
||||
* kept around beyond the duration of the IOCTL once the GPU
|
||||
* scheduler arrives.
|
||||
*/
|
||||
params->dev = dev;
|
||||
params->file = file;
|
||||
params->ring = ring;
|
||||
params->dispatch_flags = dispatch_flags;
|
||||
params->batch_obj = batch_obj;
|
||||
params->ctx = ctx;
|
||||
|
||||
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
|
||||
|
||||
err_batch_unpin:
|
||||
/*
|
||||
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
|
||||
* batch vma for correctness. For less ugly and less fragility this
|
||||
|
@ -1616,11 +1663,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
*/
|
||||
if (dispatch_flags & I915_DISPATCH_SECURE)
|
||||
i915_gem_object_ggtt_unpin(batch_obj);
|
||||
|
||||
err:
|
||||
/* the request owns the ref now */
|
||||
i915_gem_context_unreference(ctx);
|
||||
eb_destroy(eb);
|
||||
|
||||
/*
|
||||
* If the request was created but not successfully submitted then it
|
||||
* must be freed again. If it was submitted then it is being tracked
|
||||
* on the active request list and no clean up is required here.
|
||||
*/
|
||||
if (ret && params->request)
|
||||
i915_gem_request_cancel(params->request);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
pre_mutex_err:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -126,6 +126,8 @@ struct intel_rotation_info {
|
|||
unsigned int pitch;
|
||||
uint32_t pixel_format;
|
||||
uint64_t fb_modifier;
|
||||
unsigned int width_pages, height_pages;
|
||||
uint64_t size;
|
||||
};
|
||||
|
||||
struct i915_ggtt_view {
|
||||
|
@ -205,19 +207,34 @@ struct i915_vma {
|
|||
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
|
||||
};
|
||||
|
||||
struct i915_page_table {
|
||||
struct i915_page_dma {
|
||||
struct page *page;
|
||||
dma_addr_t daddr;
|
||||
union {
|
||||
dma_addr_t daddr;
|
||||
|
||||
/* For gen6/gen7 only. This is the offset in the GGTT
|
||||
* where the page directory entries for PPGTT begin
|
||||
*/
|
||||
uint32_t ggtt_offset;
|
||||
};
|
||||
};
|
||||
|
||||
#define px_base(px) (&(px)->base)
|
||||
#define px_page(px) (px_base(px)->page)
|
||||
#define px_dma(px) (px_base(px)->daddr)
|
||||
|
||||
struct i915_page_scratch {
|
||||
struct i915_page_dma base;
|
||||
};
|
||||
|
||||
struct i915_page_table {
|
||||
struct i915_page_dma base;
|
||||
|
||||
unsigned long *used_ptes;
|
||||
};
|
||||
|
||||
struct i915_page_directory {
|
||||
struct page *page; /* NULL for GEN6-GEN7 */
|
||||
union {
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t daddr;
|
||||
};
|
||||
struct i915_page_dma base;
|
||||
|
||||
unsigned long *used_pdes;
|
||||
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
|
||||
|
@ -233,13 +250,12 @@ struct i915_address_space {
|
|||
struct drm_mm mm;
|
||||
struct drm_device *dev;
|
||||
struct list_head global_link;
|
||||
unsigned long start; /* Start offset always 0 for dri2 */
|
||||
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
|
||||
u64 start; /* Start offset always 0 for dri2 */
|
||||
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
|
||||
|
||||
struct {
|
||||
dma_addr_t addr;
|
||||
struct page *page;
|
||||
} scratch;
|
||||
struct i915_page_scratch *scratch_page;
|
||||
struct i915_page_table *scratch_pt;
|
||||
struct i915_page_directory *scratch_pd;
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering.
|
||||
|
@ -300,9 +316,9 @@ struct i915_address_space {
|
|||
*/
|
||||
struct i915_gtt {
|
||||
struct i915_address_space base;
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
|
||||
unsigned long mappable_end; /* End offset that we can CPU map */
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
|
||||
|
@ -314,9 +330,9 @@ struct i915_gtt {
|
|||
int mtrr;
|
||||
|
||||
/* global gtt ops */
|
||||
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
|
||||
int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
|
||||
size_t *stolen, phys_addr_t *mappable_base,
|
||||
unsigned long *mappable_end);
|
||||
u64 *mappable_end);
|
||||
};
|
||||
|
||||
struct i915_hw_ppgtt {
|
||||
|
@ -329,16 +345,13 @@ struct i915_hw_ppgtt {
|
|||
struct i915_page_directory pd;
|
||||
};
|
||||
|
||||
struct i915_page_table *scratch_pt;
|
||||
struct i915_page_directory *scratch_pd;
|
||||
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
gen6_pte_t __iomem *pd_addr;
|
||||
|
||||
int (*enable)(struct i915_hw_ppgtt *ppgtt);
|
||||
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_engine_cs *ring);
|
||||
struct drm_i915_gem_request *req);
|
||||
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
|
||||
};
|
||||
|
||||
|
@ -468,6 +481,14 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
|
|||
return i915_pte_count(address, length, GEN8_PDE_SHIFT);
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
|
||||
{
|
||||
return test_bit(n, ppgtt->pdp.used_pdpes) ?
|
||||
px_dma(ppgtt->pdp.page_directory[n]) :
|
||||
px_dma(ppgtt->base.scratch_pd);
|
||||
}
|
||||
|
||||
int i915_gem_gtt_init(struct drm_device *dev);
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev);
|
||||
void i915_global_gtt_cleanup(struct drm_device *dev);
|
||||
|
@ -475,6 +496,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev);
|
|||
|
||||
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
|
||||
int i915_ppgtt_init_hw(struct drm_device *dev);
|
||||
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
|
||||
void i915_ppgtt_release(struct kref *kref);
|
||||
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
|
||||
struct drm_i915_file_private *fpriv);
|
||||
|
|
|
@ -152,29 +152,26 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_render_state_init(struct intel_engine_cs *ring)
|
||||
int i915_gem_render_state_init(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct render_state so;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_render_state_prepare(ring, &so);
|
||||
ret = i915_gem_render_state_prepare(req->ring, &so);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so.rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
so.ggtt_offset,
|
||||
so.rodata->batch_items * 4,
|
||||
I915_DISPATCH_SECURE);
|
||||
ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
|
||||
so.rodata->batch_items * 4,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
|
||||
|
||||
ret = __i915_add_request(ring, NULL, so.obj);
|
||||
/* __i915_add_request moves object to inactive if it fails */
|
||||
out:
|
||||
i915_gem_render_state_fini(&so);
|
||||
return ret;
|
||||
|
|
|
@ -39,7 +39,7 @@ struct render_state {
|
|||
int gen;
|
||||
};
|
||||
|
||||
int i915_gem_render_state_init(struct intel_engine_cs *ring);
|
||||
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
|
||||
void i915_gem_render_state_fini(struct render_state *so);
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
|
||||
struct render_state *so);
|
||||
|
|
|
@ -42,6 +42,31 @@
|
|||
* for is a boon.
|
||||
*/
|
||||
|
||||
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node, u64 size,
|
||||
unsigned alignment)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&dev_priv->mm.stolen_lock);
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
mutex_unlock(&dev_priv->mm.stolen_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node)
|
||||
{
|
||||
mutex_lock(&dev_priv->mm.stolen_lock);
|
||||
drm_mm_remove_node(node);
|
||||
mutex_unlock(&dev_priv->mm.stolen_lock);
|
||||
}
|
||||
|
||||
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -151,134 +176,6 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
|||
return base;
|
||||
}
|
||||
|
||||
static int find_compression_threshold(struct drm_device *dev,
|
||||
struct drm_mm_node *node,
|
||||
int size,
|
||||
int fb_cpp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int compression_threshold = 1;
|
||||
int ret;
|
||||
|
||||
/* HACK: This code depends on what we will do in *_enable_fbc. If that
|
||||
* code changes, this code needs to change as well.
|
||||
*
|
||||
* The enable_fbc code will attempt to use one of our 2 compression
|
||||
* thresholds, therefore, in that case, we only have 1 resort.
|
||||
*/
|
||||
|
||||
/* Try to over-allocate to reduce reallocations and fragmentation. */
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
|
||||
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
|
||||
if (ret == 0)
|
||||
return compression_threshold;
|
||||
|
||||
again:
|
||||
/* HW's ability to limit the CFB is 1:4 */
|
||||
if (compression_threshold > 4 ||
|
||||
(fb_cpp == 2 && compression_threshold == 2))
|
||||
return 0;
|
||||
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
|
||||
size >>= 1, 4096,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
if (ret && INTEL_INFO(dev)->gen <= 4) {
|
||||
return 0;
|
||||
} else if (ret) {
|
||||
compression_threshold <<= 1;
|
||||
goto again;
|
||||
} else {
|
||||
return compression_threshold;
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_mm_node *uninitialized_var(compressed_llb);
|
||||
int ret;
|
||||
|
||||
ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
|
||||
size, fb_cpp);
|
||||
if (!ret)
|
||||
goto err_llb;
|
||||
else if (ret > 1) {
|
||||
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
||||
|
||||
}
|
||||
|
||||
dev_priv->fbc.threshold = ret;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 5)
|
||||
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
|
||||
else if (IS_GM45(dev)) {
|
||||
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
|
||||
} else {
|
||||
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
|
||||
if (!compressed_llb)
|
||||
goto err_fb;
|
||||
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
|
||||
4096, 4096, DRM_MM_SEARCH_DEFAULT);
|
||||
if (ret)
|
||||
goto err_fb;
|
||||
|
||||
dev_priv->fbc.compressed_llb = compressed_llb;
|
||||
|
||||
I915_WRITE(FBC_CFB_BASE,
|
||||
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
|
||||
I915_WRITE(FBC_LL_BASE,
|
||||
dev_priv->mm.stolen_base + compressed_llb->start);
|
||||
}
|
||||
|
||||
dev_priv->fbc.uncompressed_size = size;
|
||||
|
||||
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
|
||||
size);
|
||||
|
||||
return 0;
|
||||
|
||||
err_fb:
|
||||
kfree(compressed_llb);
|
||||
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
|
||||
err_llb:
|
||||
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return -ENODEV;
|
||||
|
||||
if (size <= dev_priv->fbc.uncompressed_size)
|
||||
return 0;
|
||||
|
||||
/* Release any current block */
|
||||
i915_gem_stolen_cleanup_compression(dev);
|
||||
|
||||
return i915_setup_compression(dev, size, fb_cpp);
|
||||
}
|
||||
|
||||
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->fbc.uncompressed_size == 0)
|
||||
return;
|
||||
|
||||
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
|
||||
|
||||
if (dev_priv->fbc.compressed_llb) {
|
||||
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
|
||||
kfree(dev_priv->fbc.compressed_llb);
|
||||
}
|
||||
|
||||
dev_priv->fbc.uncompressed_size = 0;
|
||||
}
|
||||
|
||||
void i915_gem_cleanup_stolen(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -286,7 +183,6 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
|
|||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return;
|
||||
|
||||
i915_gem_stolen_cleanup_compression(dev);
|
||||
drm_mm_takedown(&dev_priv->mm.stolen);
|
||||
}
|
||||
|
||||
|
@ -296,6 +192,8 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
|||
u32 tmp;
|
||||
int bios_reserved = 0;
|
||||
|
||||
mutex_init(&dev_priv->mm.stolen_lock);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
|
||||
DRM_INFO("DMAR active, disabling use of stolen memory\n");
|
||||
|
@ -386,8 +284,10 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
|
|||
static void
|
||||
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
|
||||
if (obj->stolen) {
|
||||
drm_mm_remove_node(obj->stolen);
|
||||
i915_gem_stolen_remove_node(dev_priv, obj->stolen);
|
||||
kfree(obj->stolen);
|
||||
obj->stolen = NULL;
|
||||
}
|
||||
|
@ -448,8 +348,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
|
|||
if (!stolen)
|
||||
return NULL;
|
||||
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
|
||||
4096, DRM_MM_SEARCH_DEFAULT);
|
||||
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
|
||||
if (ret) {
|
||||
kfree(stolen);
|
||||
return NULL;
|
||||
|
@ -459,7 +358,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
|
|||
if (obj)
|
||||
return obj;
|
||||
|
||||
drm_mm_remove_node(stolen);
|
||||
i915_gem_stolen_remove_node(dev_priv, stolen);
|
||||
kfree(stolen);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -494,7 +393,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
|
||||
stolen->start = stolen_offset;
|
||||
stolen->size = size;
|
||||
mutex_lock(&dev_priv->mm.stolen_lock);
|
||||
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
|
||||
mutex_unlock(&dev_priv->mm.stolen_lock);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to allocate stolen space\n");
|
||||
kfree(stolen);
|
||||
|
@ -504,7 +405,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
obj = _i915_gem_object_create_stolen(dev, stolen);
|
||||
if (obj == NULL) {
|
||||
DRM_DEBUG_KMS("failed to allocate stolen object\n");
|
||||
drm_mm_remove_node(stolen);
|
||||
i915_gem_stolen_remove_node(dev_priv, stolen);
|
||||
kfree(stolen);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -545,7 +446,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
err_vma:
|
||||
i915_gem_vma_destroy(vma);
|
||||
err_out:
|
||||
drm_mm_remove_node(stolen);
|
||||
i915_gem_stolen_remove_node(dev_priv, stolen);
|
||||
kfree(stolen);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return NULL;
|
||||
|
|
|
@ -35,107 +35,20 @@
|
|||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
typedef struct _drm_i915_batchbuffer32 {
|
||||
int start; /* agp offset */
|
||||
int used; /* nr bytes in use */
|
||||
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
|
||||
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
|
||||
int num_cliprects; /* mulitpass with multiple cliprects? */
|
||||
u32 cliprects; /* pointer to userspace cliprects */
|
||||
} drm_i915_batchbuffer32_t;
|
||||
|
||||
static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
drm_i915_batchbuffer32_t batchbuffer32;
|
||||
drm_i915_batchbuffer_t __user *batchbuffer;
|
||||
|
||||
if (copy_from_user
|
||||
(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
|
||||
return -EFAULT;
|
||||
|
||||
batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
|
||||
if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
|
||||
|| __put_user(batchbuffer32.start, &batchbuffer->start)
|
||||
|| __put_user(batchbuffer32.used, &batchbuffer->used)
|
||||
|| __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
|
||||
|| __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
|
||||
|| __put_user(batchbuffer32.num_cliprects,
|
||||
&batchbuffer->num_cliprects)
|
||||
|| __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
|
||||
&batchbuffer->cliprects))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
|
||||
(unsigned long)batchbuffer);
|
||||
}
|
||||
|
||||
typedef struct _drm_i915_cmdbuffer32 {
|
||||
u32 buf; /* pointer to userspace command buffer */
|
||||
int sz; /* nr bytes in buf */
|
||||
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
|
||||
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
|
||||
int num_cliprects; /* mulitpass with multiple cliprects? */
|
||||
u32 cliprects; /* pointer to userspace cliprects */
|
||||
} drm_i915_cmdbuffer32_t;
|
||||
|
||||
static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
drm_i915_cmdbuffer32_t cmdbuffer32;
|
||||
drm_i915_cmdbuffer_t __user *cmdbuffer;
|
||||
|
||||
if (copy_from_user
|
||||
(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
|
||||
return -EFAULT;
|
||||
|
||||
cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
|
||||
if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
|
||||
|| __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
|
||||
&cmdbuffer->buf)
|
||||
|| __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
|
||||
|| __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
|
||||
|| __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
|
||||
|| __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
|
||||
|| __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
|
||||
&cmdbuffer->cliprects))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
|
||||
(unsigned long)cmdbuffer);
|
||||
}
|
||||
|
||||
typedef struct drm_i915_irq_emit32 {
|
||||
u32 irq_seq;
|
||||
} drm_i915_irq_emit32_t;
|
||||
|
||||
static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
drm_i915_irq_emit32_t req32;
|
||||
drm_i915_irq_emit_t __user *request;
|
||||
|
||||
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
|
||||
return -EFAULT;
|
||||
|
||||
request = compat_alloc_user_space(sizeof(*request));
|
||||
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|
||||
|| __put_user((int __user *)(unsigned long)req32.irq_seq,
|
||||
&request->irq_seq))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
|
||||
(unsigned long)request);
|
||||
}
|
||||
typedef struct drm_i915_getparam32 {
|
||||
int param;
|
||||
struct drm_i915_getparam32 {
|
||||
s32 param;
|
||||
/*
|
||||
* We screwed up the generic ioctl struct here and used a variable-sized
|
||||
* pointer. Use u32 in the compat struct to match the 32bit pointer
|
||||
* userspace expects.
|
||||
*/
|
||||
u32 value;
|
||||
} drm_i915_getparam32_t;
|
||||
};
|
||||
|
||||
static int compat_i915_getparam(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
drm_i915_getparam32_t req32;
|
||||
struct drm_i915_getparam32 req32;
|
||||
drm_i915_getparam_t __user *request;
|
||||
|
||||
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
|
||||
|
@ -152,41 +65,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
|
|||
(unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_i915_mem_alloc32 {
|
||||
int region;
|
||||
int alignment;
|
||||
int size;
|
||||
u32 region_offset; /* offset from start of fb or agp */
|
||||
} drm_i915_mem_alloc32_t;
|
||||
|
||||
static int compat_i915_alloc(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
drm_i915_mem_alloc32_t req32;
|
||||
drm_i915_mem_alloc_t __user *request;
|
||||
|
||||
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
|
||||
return -EFAULT;
|
||||
|
||||
request = compat_alloc_user_space(sizeof(*request));
|
||||
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|
||||
|| __put_user(req32.region, &request->region)
|
||||
|| __put_user(req32.alignment, &request->alignment)
|
||||
|| __put_user(req32.size, &request->size)
|
||||
|| __put_user((void __user *)(unsigned long)req32.region_offset,
|
||||
&request->region_offset))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
|
||||
(unsigned long)request);
|
||||
}
|
||||
|
||||
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
|
||||
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
|
||||
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
|
||||
[DRM_I915_GETPARAM] = compat_i915_getparam,
|
||||
[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
|
||||
[DRM_I915_ALLOC] = compat_i915_alloc
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -564,8 +564,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
|
|||
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
||||
const struct drm_display_mode *mode =
|
||||
&intel_crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
|
||||
|
||||
htotal = mode->crtc_htotal;
|
||||
hsync_start = mode->crtc_hsync_start;
|
||||
|
@ -620,7 +619,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
|||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *mode = &crtc->base.hwmode;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int position, vtotal;
|
||||
|
||||
|
@ -647,14 +646,14 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
|
||||
int position;
|
||||
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
|
||||
bool in_vbl = true;
|
||||
int ret = 0;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (!intel_crtc->active) {
|
||||
if (WARN_ON(!mode->crtc_clock)) {
|
||||
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
|
||||
"pipe %c\n", pipe_name(pipe));
|
||||
return 0;
|
||||
|
@ -796,7 +795,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!crtc->state->enable) {
|
||||
if (!crtc->hwmode.crtc_clock) {
|
||||
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -805,151 +804,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
|
|||
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
|
||||
vblank_time, flags,
|
||||
crtc,
|
||||
&to_intel_crtc(crtc)->config->base.adjusted_mode);
|
||||
}
|
||||
|
||||
static bool intel_hpd_irq_event(struct drm_device *dev,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
enum drm_connector_status old_status;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
|
||||
old_status = connector->status;
|
||||
|
||||
connector->status = connector->funcs->detect(connector, false);
|
||||
if (old_status == connector->status)
|
||||
return false;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
|
||||
connector->base.id,
|
||||
connector->name,
|
||||
drm_get_connector_status_name(old_status),
|
||||
drm_get_connector_status_name(connector->status));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void i915_digport_work_func(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, dig_port_work);
|
||||
u32 long_port_mask, short_port_mask;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
int i;
|
||||
u32 old_bits = 0;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
long_port_mask = dev_priv->long_hpd_port_mask;
|
||||
dev_priv->long_hpd_port_mask = 0;
|
||||
short_port_mask = dev_priv->short_hpd_port_mask;
|
||||
dev_priv->short_hpd_port_mask = 0;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
bool valid = false;
|
||||
bool long_hpd = false;
|
||||
intel_dig_port = dev_priv->hpd_irq_port[i];
|
||||
if (!intel_dig_port || !intel_dig_port->hpd_pulse)
|
||||
continue;
|
||||
|
||||
if (long_port_mask & (1 << i)) {
|
||||
valid = true;
|
||||
long_hpd = true;
|
||||
} else if (short_port_mask & (1 << i))
|
||||
valid = true;
|
||||
|
||||
if (valid) {
|
||||
enum irqreturn ret;
|
||||
|
||||
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
|
||||
if (ret == IRQ_NONE) {
|
||||
/* fall back to old school hpd */
|
||||
old_bits |= (1 << intel_dig_port->base.hpd_pin);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (old_bits) {
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->hpd_event_bits |= old_bits;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
schedule_work(&dev_priv->hotplug_work);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle hotplug events outside the interrupt handler proper.
|
||||
*/
|
||||
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
|
||||
|
||||
static void i915_hotplug_work_func(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, hotplug_work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_connector *connector;
|
||||
bool hpd_disabled = false;
|
||||
bool changed = false;
|
||||
u32 hpd_event_bits;
|
||||
|
||||
mutex_lock(&mode_config->mutex);
|
||||
DRM_DEBUG_KMS("running encoder hotplug functions\n");
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
hpd_event_bits = dev_priv->hpd_event_bits;
|
||||
dev_priv->hpd_event_bits = 0;
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
intel_connector = to_intel_connector(connector);
|
||||
if (!intel_connector->encoder)
|
||||
continue;
|
||||
intel_encoder = intel_connector->encoder;
|
||||
if (intel_encoder->hpd_pin > HPD_NONE &&
|
||||
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
|
||||
connector->polled == DRM_CONNECTOR_POLL_HPD) {
|
||||
DRM_INFO("HPD interrupt storm detected on connector %s: "
|
||||
"switching from hotplug detection to polling\n",
|
||||
connector->name);
|
||||
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT
|
||||
| DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
hpd_disabled = true;
|
||||
}
|
||||
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
|
||||
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
|
||||
connector->name, intel_encoder->hpd_pin);
|
||||
}
|
||||
}
|
||||
/* if there were no outputs to poll, poll was disabled,
|
||||
* therefore make sure it's enabled when disabling HPD on
|
||||
* some connectors */
|
||||
if (hpd_disabled) {
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
|
||||
msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
|
||||
}
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
intel_connector = to_intel_connector(connector);
|
||||
if (!intel_connector->encoder)
|
||||
continue;
|
||||
intel_encoder = intel_connector->encoder;
|
||||
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
|
||||
if (intel_encoder->hot_plug)
|
||||
intel_encoder->hot_plug(intel_encoder);
|
||||
if (intel_hpd_irq_event(dev, connector))
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mode_config->mutex);
|
||||
|
||||
if (changed)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
&crtc->hwmode);
|
||||
}
|
||||
|
||||
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
|
||||
|
@ -1372,165 +1227,87 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define HPD_STORM_DETECT_PERIOD 1000
|
||||
#define HPD_STORM_THRESHOLD 5
|
||||
|
||||
static int pch_port_to_hotplug_shift(enum port port)
|
||||
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
case PORT_E:
|
||||
default:
|
||||
return -1;
|
||||
case PORT_B:
|
||||
return 0;
|
||||
return val & PORTB_HOTPLUG_LONG_DETECT;
|
||||
case PORT_C:
|
||||
return 8;
|
||||
return val & PORTC_HOTPLUG_LONG_DETECT;
|
||||
case PORT_D:
|
||||
return 16;
|
||||
return val & PORTD_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_port_to_hotplug_shift(enum port port)
|
||||
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
case PORT_E:
|
||||
default:
|
||||
return -1;
|
||||
case PORT_B:
|
||||
return 17;
|
||||
return val & PORTB_HOTPLUG_INT_LONG_PULSE;
|
||||
case PORT_C:
|
||||
return 19;
|
||||
return val & PORTC_HOTPLUG_INT_LONG_PULSE;
|
||||
case PORT_D:
|
||||
return 21;
|
||||
}
|
||||
}
|
||||
|
||||
static enum port get_port_from_pin(enum hpd_pin pin)
|
||||
{
|
||||
switch (pin) {
|
||||
case HPD_PORT_B:
|
||||
return PORT_B;
|
||||
case HPD_PORT_C:
|
||||
return PORT_C;
|
||||
case HPD_PORT_D:
|
||||
return PORT_D;
|
||||
return val & PORTD_HOTPLUG_INT_LONG_PULSE;
|
||||
default:
|
||||
return PORT_A; /* no hpd */
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_hpd_irq_handler(struct drm_device *dev,
|
||||
u32 hotplug_trigger,
|
||||
u32 dig_hotplug_reg,
|
||||
const u32 hpd[HPD_NUM_PINS])
|
||||
/* Get a bit mask of pins that have triggered, and which ones may be long. */
|
||||
static void pch_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
|
||||
u32 hotplug_trigger, u32 dig_hotplug_reg,
|
||||
const u32 hpd[HPD_NUM_PINS])
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
enum port port;
|
||||
bool storm_detected = false;
|
||||
bool queue_dig = false, queue_hp = false;
|
||||
u32 dig_shift;
|
||||
u32 dig_port_mask = 0;
|
||||
int i;
|
||||
|
||||
*pin_mask = 0;
|
||||
*long_mask = 0;
|
||||
|
||||
for_each_hpd_pin(i) {
|
||||
if ((hpd[i] & hotplug_trigger) == 0)
|
||||
continue;
|
||||
|
||||
*pin_mask |= BIT(i);
|
||||
|
||||
port = intel_hpd_pin_to_port(i);
|
||||
if (pch_port_hotplug_long_detect(port, dig_hotplug_reg))
|
||||
*long_mask |= BIT(i);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
|
||||
hotplug_trigger, dig_hotplug_reg, *pin_mask);
|
||||
|
||||
}
|
||||
|
||||
/* Get a bit mask of pins that have triggered, and which ones may be long. */
|
||||
static void i9xx_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
|
||||
u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS])
|
||||
{
|
||||
enum port port;
|
||||
int i;
|
||||
|
||||
*pin_mask = 0;
|
||||
*long_mask = 0;
|
||||
|
||||
if (!hotplug_trigger)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
|
||||
hotplug_trigger, dig_hotplug_reg);
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
for (i = 1; i < HPD_NUM_PINS; i++) {
|
||||
if (!(hpd[i] & hotplug_trigger))
|
||||
for_each_hpd_pin(i) {
|
||||
if ((hpd[i] & hotplug_trigger) == 0)
|
||||
continue;
|
||||
|
||||
port = get_port_from_pin(i);
|
||||
if (port && dev_priv->hpd_irq_port[port]) {
|
||||
bool long_hpd;
|
||||
*pin_mask |= BIT(i);
|
||||
|
||||
if (!HAS_GMCH_DISPLAY(dev_priv)) {
|
||||
dig_shift = pch_port_to_hotplug_shift(port);
|
||||
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
|
||||
} else {
|
||||
dig_shift = i915_port_to_hotplug_shift(port);
|
||||
long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
|
||||
port_name(port),
|
||||
long_hpd ? "long" : "short");
|
||||
/* for long HPD pulses we want to have the digital queue happen,
|
||||
but we still want HPD storm detection to function. */
|
||||
if (long_hpd) {
|
||||
dev_priv->long_hpd_port_mask |= (1 << port);
|
||||
dig_port_mask |= hpd[i];
|
||||
} else {
|
||||
/* for short HPD just trigger the digital queue */
|
||||
dev_priv->short_hpd_port_mask |= (1 << port);
|
||||
hotplug_trigger &= ~hpd[i];
|
||||
}
|
||||
queue_dig = true;
|
||||
}
|
||||
port = intel_hpd_pin_to_port(i);
|
||||
if (i9xx_port_hotplug_long_detect(port, hotplug_trigger))
|
||||
*long_mask |= BIT(i);
|
||||
}
|
||||
|
||||
for (i = 1; i < HPD_NUM_PINS; i++) {
|
||||
if (hpd[i] & hotplug_trigger &&
|
||||
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
|
||||
/*
|
||||
* On GMCH platforms the interrupt mask bits only
|
||||
* prevent irq generation, not the setting of the
|
||||
* hotplug bits itself. So only WARN about unexpected
|
||||
* interrupts on saner platforms.
|
||||
*/
|
||||
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
|
||||
"Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
|
||||
hotplug_trigger, i, hpd[i]);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(hpd[i] & hotplug_trigger) ||
|
||||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
|
||||
continue;
|
||||
|
||||
if (!(dig_port_mask & hpd[i])) {
|
||||
dev_priv->hpd_event_bits |= (1 << i);
|
||||
queue_hp = true;
|
||||
}
|
||||
|
||||
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
|
||||
dev_priv->hpd_stats[i].hpd_last_jiffies
|
||||
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
|
||||
dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
|
||||
dev_priv->hpd_stats[i].hpd_cnt = 0;
|
||||
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
|
||||
} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
|
||||
dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
|
||||
dev_priv->hpd_event_bits &= ~(1 << i);
|
||||
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
|
||||
storm_detected = true;
|
||||
} else {
|
||||
dev_priv->hpd_stats[i].hpd_cnt++;
|
||||
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
|
||||
dev_priv->hpd_stats[i].hpd_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
if (storm_detected)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
/*
|
||||
* Our hotplug handler can grab modeset locks (by calling down into the
|
||||
* fb helpers). Hence it must not be run on our own dev-priv->wq work
|
||||
* queue for otherwise the flush_work in the pageflip code will
|
||||
* deadlock.
|
||||
*/
|
||||
if (queue_dig)
|
||||
queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
|
||||
if (queue_hp)
|
||||
schedule_work(&dev_priv->hotplug_work);
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, pins 0x%08x\n",
|
||||
hotplug_trigger, *pin_mask);
|
||||
}
|
||||
|
||||
static void gmbus_irq_handler(struct drm_device *dev)
|
||||
|
@ -1755,28 +1532,31 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
|
||||
u32 pin_mask, long_mask;
|
||||
|
||||
if (hotplug_status) {
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
/*
|
||||
* Make sure hotplug status is cleared before we clear IIR, or else we
|
||||
* may miss hotplug events.
|
||||
*/
|
||||
POSTING_READ(PORT_HOTPLUG_STAT);
|
||||
if (!hotplug_status)
|
||||
return;
|
||||
|
||||
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
/*
|
||||
* Make sure hotplug status is cleared before we clear IIR, or else we
|
||||
* may miss hotplug events.
|
||||
*/
|
||||
POSTING_READ(PORT_HOTPLUG_STAT);
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
|
||||
} else {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
|
||||
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
|
||||
}
|
||||
i9xx_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, hpd_status_g4x);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
|
||||
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
|
||||
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
|
||||
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
|
||||
dp_aux_irq_handler(dev);
|
||||
} else {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
|
||||
|
||||
i9xx_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, hpd_status_i915);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1875,12 +1655,17 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
|
||||
u32 dig_hotplug_reg;
|
||||
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
if (hotplug_trigger) {
|
||||
u32 dig_hotplug_reg, pin_mask, long_mask;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
|
||||
pch_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
dig_hotplug_reg, hpd_ibx);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK) {
|
||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
|
||||
|
@ -1972,12 +1757,16 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
|
||||
u32 dig_hotplug_reg;
|
||||
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
if (hotplug_trigger) {
|
||||
u32 dig_hotplug_reg, pin_mask, long_mask;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
pch_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
dig_hotplug_reg, hpd_cpt);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
|
||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
|
||||
|
@ -2176,8 +1965,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t hp_control;
|
||||
uint32_t hp_trigger;
|
||||
u32 hp_control, hp_trigger;
|
||||
u32 pin_mask, long_mask;
|
||||
|
||||
/* Get the status */
|
||||
hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
|
||||
|
@ -2189,20 +1978,11 @@ static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
|
|||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
|
||||
hp_control & BXT_HOTPLUG_CTL_MASK);
|
||||
|
||||
/* Check for HPD storm and schedule bottom half */
|
||||
intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
|
||||
|
||||
/*
|
||||
* FIXME: Save the hot plug status for bottom half before
|
||||
* clearing the sticky status bits, else the status will be
|
||||
* lost.
|
||||
*/
|
||||
|
||||
/* Clear sticky bits in hpd status */
|
||||
I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
|
||||
|
||||
pch_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control, hpd_bxt);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
|
||||
static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
|
@ -3203,12 +2983,12 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
|
|||
if (HAS_PCH_IBX(dev)) {
|
||||
hotplug_irqs = SDE_HOTPLUG_MASK;
|
||||
for_each_intel_encoder(dev, intel_encoder)
|
||||
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
|
||||
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
|
||||
} else {
|
||||
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
|
||||
for_each_intel_encoder(dev, intel_encoder)
|
||||
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
|
||||
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
|
||||
}
|
||||
|
||||
|
@ -3237,7 +3017,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
|
|||
|
||||
/* Now, enable HPD */
|
||||
for_each_intel_encoder(dev, intel_encoder) {
|
||||
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
|
||||
== HPD_ENABLED)
|
||||
hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
|
||||
}
|
||||
|
@ -4130,7 +3910,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
|
|||
/* Note HDMI and DP share hotplug bits */
|
||||
/* enable bits are the same for all generations */
|
||||
for_each_intel_encoder(dev, intel_encoder)
|
||||
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
|
||||
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
|
||||
/* Programming the CRT detection parameters tends
|
||||
to generate a spurious hotplug event about three
|
||||
|
@ -4270,46 +4050,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
|
|||
I915_WRITE(IIR, I915_READ(IIR));
|
||||
}
|
||||
|
||||
static void intel_hpd_irq_reenable_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv),
|
||||
hotplug_reenable_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
int i;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
|
||||
struct drm_connector *connector;
|
||||
|
||||
if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
|
||||
continue;
|
||||
|
||||
dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
|
||||
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
|
||||
if (intel_connector->encoder->hpd_pin == i) {
|
||||
if (connector->polled != intel_connector->polled)
|
||||
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
|
||||
connector->name);
|
||||
connector->polled = intel_connector->polled;
|
||||
if (!connector->polled)
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_irq_init - initializes irq support
|
||||
* @dev_priv: i915 device instance
|
||||
|
@ -4321,8 +4061,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
||||
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
|
||||
intel_hpd_init_work(dev_priv);
|
||||
|
||||
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
|
||||
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
|
||||
|
||||
|
@ -4335,8 +4075,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
|
||||
i915_hangcheck_elapsed);
|
||||
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
|
||||
intel_hpd_irq_reenable_work);
|
||||
|
||||
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
|
@ -4421,46 +4159,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_hpd_init - initializes and enables hpd support
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function enables the hotplug support. It requires that interrupts have
|
||||
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
|
||||
* poll request can run concurrently to other code, so locking rules must be
|
||||
* obeyed.
|
||||
*
|
||||
* This is a separate step from interrupt enabling to simplify the locking rules
|
||||
* in the driver load and resume code.
|
||||
*/
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
int i;
|
||||
|
||||
for (i = 1; i < HPD_NUM_PINS; i++) {
|
||||
dev_priv->hpd_stats[i].hpd_cnt = 0;
|
||||
dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
|
||||
}
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
connector->polled = intel_connector->polled;
|
||||
if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
if (intel_connector->mst_port)
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
}
|
||||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked checks happy. */
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_irq_install - enables the hardware interrupt
|
||||
* @dev_priv: i915 device instance
|
||||
|
|
|
@ -28,7 +28,6 @@ struct i915_params i915 __read_mostly = {
|
|||
.modeset = -1,
|
||||
.panel_ignore_lid = 1,
|
||||
.semaphores = -1,
|
||||
.lvds_downclock = 0,
|
||||
.lvds_channel_mode = 0,
|
||||
.panel_use_ssc = -1,
|
||||
.vbt_sdvo_panel_type = -1,
|
||||
|
@ -52,13 +51,12 @@ struct i915_params i915 __read_mostly = {
|
|||
.use_mmio_flip = 0,
|
||||
.mmio_debug = 0,
|
||||
.verbose_state_checks = 1,
|
||||
.nuclear_pageflip = 0,
|
||||
.edp_vswing = 0,
|
||||
};
|
||||
|
||||
module_param_named(modeset, i915.modeset, int, 0400);
|
||||
MODULE_PARM_DESC(modeset,
|
||||
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
|
||||
"Use kernel modesetting [KMS] (0=disable, "
|
||||
"1=on, -1=force vga console preference [default])");
|
||||
|
||||
module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
|
||||
|
@ -84,11 +82,6 @@ MODULE_PARM_DESC(enable_fbc,
|
|||
"Enable frame buffer compression for power savings "
|
||||
"(default: -1 (use per-chip default))");
|
||||
|
||||
module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
|
||||
MODULE_PARM_DESC(lvds_downclock,
|
||||
"Use panel (LVDS/eDP) downclocking for power savings "
|
||||
"(default: false)");
|
||||
|
||||
module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
|
||||
MODULE_PARM_DESC(lvds_channel_mode,
|
||||
"Specify LVDS channel mode "
|
||||
|
@ -104,7 +97,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
|
|||
"Override/Ignore selection of SDVO panel mode in the VBT "
|
||||
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
|
||||
|
||||
module_param_named(reset, i915.reset, bool, 0600);
|
||||
module_param_named_unsafe(reset, i915.reset, bool, 0600);
|
||||
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
|
||||
|
||||
module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
|
||||
|
@ -182,10 +175,6 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
|
|||
MODULE_PARM_DESC(verbose_state_checks,
|
||||
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
|
||||
|
||||
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
|
||||
MODULE_PARM_DESC(nuclear_pageflip,
|
||||
"Force atomic modeset functionality; only planes work for now (default: false).");
|
||||
|
||||
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
|
||||
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
|
||||
MODULE_PARM_DESC(edp_vswing,
|
||||
|
|
|
@ -50,12 +50,17 @@
|
|||
|
||||
/* PCI config space */
|
||||
|
||||
#define HPLLCC 0xc0 /* 855 only */
|
||||
#define GC_CLOCK_CONTROL_MASK (0xf << 0)
|
||||
#define HPLLCC 0xc0 /* 85x only */
|
||||
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
|
||||
#define GC_CLOCK_133_200 (0 << 0)
|
||||
#define GC_CLOCK_100_200 (1 << 0)
|
||||
#define GC_CLOCK_100_133 (2 << 0)
|
||||
#define GC_CLOCK_166_250 (3 << 0)
|
||||
#define GC_CLOCK_133_266 (3 << 0)
|
||||
#define GC_CLOCK_133_200_2 (4 << 0)
|
||||
#define GC_CLOCK_133_266_2 (5 << 0)
|
||||
#define GC_CLOCK_166_266 (6 << 0)
|
||||
#define GC_CLOCK_166_250 (7 << 0)
|
||||
|
||||
#define GCFGC2 0xda
|
||||
#define GCFGC 0xf0 /* 915+ only */
|
||||
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
|
||||
|
@ -155,6 +160,7 @@
|
|||
#define GAM_ECOCHK 0x4090
|
||||
#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
|
||||
#define ECOCHK_SNB_BIT (1<<10)
|
||||
#define ECOCHK_DIS_TLB (1<<8)
|
||||
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
|
||||
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
|
||||
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
|
||||
|
@ -316,6 +322,8 @@
|
|||
#define MI_RESTORE_EXT_STATE_EN (1<<2)
|
||||
#define MI_FORCE_RESTORE (1<<1)
|
||||
#define MI_RESTORE_INHIBIT (1<<0)
|
||||
#define HSW_MI_RS_SAVE_STATE_EN (1<<3)
|
||||
#define HSW_MI_RS_RESTORE_STATE_EN (1<<2)
|
||||
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
|
||||
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
|
||||
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
|
||||
|
@ -347,6 +355,8 @@
|
|||
#define MI_INVALIDATE_BSD (1<<7)
|
||||
#define MI_FLUSH_DW_USE_GTT (1<<2)
|
||||
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
|
||||
#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
|
||||
#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
|
||||
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
|
||||
#define MI_BATCH_NON_SECURE (1)
|
||||
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
|
||||
|
@ -356,6 +366,7 @@
|
|||
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
|
||||
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
|
||||
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
|
||||
#define MI_BATCH_RESOURCE_STREAMER (1<<10)
|
||||
|
||||
#define MI_PREDICATE_SRC0 (0x2400)
|
||||
#define MI_PREDICATE_SRC1 (0x2408)
|
||||
|
@ -410,6 +421,7 @@
|
|||
#define DISPLAY_PLANE_A (0<<20)
|
||||
#define DISPLAY_PLANE_B (1<<20)
|
||||
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
|
||||
#define PIPE_CONTROL_FLUSH_L3 (1<<27)
|
||||
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
|
||||
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
|
||||
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
|
||||
|
@ -426,6 +438,7 @@
|
|||
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
|
||||
#define PIPE_CONTROL_NOTIFY (1<<8)
|
||||
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
|
||||
#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
|
||||
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
|
||||
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
|
||||
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
|
||||
|
@ -449,7 +462,6 @@
|
|||
#define MI_CLFLUSH MI_INSTR(0x27, 0)
|
||||
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
|
||||
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
|
||||
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
|
||||
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
|
||||
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
|
||||
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
|
||||
|
@ -1163,10 +1175,12 @@ enum skl_disp_power_wells {
|
|||
#define _PORT_PLL_EBB_0_A 0x162034
|
||||
#define _PORT_PLL_EBB_0_B 0x6C034
|
||||
#define _PORT_PLL_EBB_0_C 0x6C340
|
||||
#define PORT_PLL_P1_MASK (0x07 << 13)
|
||||
#define PORT_PLL_P1(x) ((x) << 13)
|
||||
#define PORT_PLL_P2_MASK (0x1f << 8)
|
||||
#define PORT_PLL_P2(x) ((x) << 8)
|
||||
#define PORT_PLL_P1_SHIFT 13
|
||||
#define PORT_PLL_P1_MASK (0x07 << PORT_PLL_P1_SHIFT)
|
||||
#define PORT_PLL_P1(x) ((x) << PORT_PLL_P1_SHIFT)
|
||||
#define PORT_PLL_P2_SHIFT 8
|
||||
#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
|
||||
#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
|
||||
#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \
|
||||
_PORT_PLL_EBB_0_B, \
|
||||
_PORT_PLL_EBB_0_C)
|
||||
|
@ -1186,8 +1200,9 @@ enum skl_disp_power_wells {
|
|||
/* PORT_PLL_0_A */
|
||||
#define PORT_PLL_M2_MASK 0xFF
|
||||
/* PORT_PLL_1_A */
|
||||
#define PORT_PLL_N_MASK (0x0F << 8)
|
||||
#define PORT_PLL_N(x) ((x) << 8)
|
||||
#define PORT_PLL_N_SHIFT 8
|
||||
#define PORT_PLL_N_MASK (0x0F << PORT_PLL_N_SHIFT)
|
||||
#define PORT_PLL_N(x) ((x) << PORT_PLL_N_SHIFT)
|
||||
/* PORT_PLL_2_A */
|
||||
#define PORT_PLL_M2_FRAC_MASK 0x3FFFFF
|
||||
/* PORT_PLL_3_A */
|
||||
|
@ -1201,9 +1216,11 @@ enum skl_disp_power_wells {
|
|||
/* PORT_PLL_8_A */
|
||||
#define PORT_PLL_TARGET_CNT_MASK 0x3FF
|
||||
/* PORT_PLL_9_A */
|
||||
#define PORT_PLL_LOCK_THRESHOLD_MASK 0xe
|
||||
#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
|
||||
#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
|
||||
/* PORT_PLL_10_A */
|
||||
#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
|
||||
#define PORT_PLL_DCO_AMP_DEFAULT 15
|
||||
#define PORT_PLL_DCO_AMP_MASK 0x3c00
|
||||
#define PORT_PLL_DCO_AMP(x) (x<<10)
|
||||
#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
|
||||
|
@ -1377,6 +1394,18 @@ enum skl_disp_power_wells {
|
|||
_PORT_TX_DW14_LN0_C) + \
|
||||
_BXT_LANE_OFFSET(lane))
|
||||
|
||||
/* UAIMI scratch pad register 1 */
|
||||
#define UAIMI_SPR1 0x4F074
|
||||
/* SKL VccIO mask */
|
||||
#define SKL_VCCIO_MASK 0x1
|
||||
/* SKL balance leg register */
|
||||
#define DISPIO_CR_TX_BMU_CR0 0x6C00C
|
||||
/* I_boost values */
|
||||
#define BALANCE_LEG_SHIFT(port) (8+3*(port))
|
||||
#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
|
||||
/* Balance leg disable bits */
|
||||
#define BALANCE_LEG_DISABLE_SHIFT 23
|
||||
|
||||
/*
|
||||
* Fence registers
|
||||
*/
|
||||
|
@ -1456,6 +1485,9 @@ enum skl_disp_power_wells {
|
|||
#define RING_MAX_IDLE(base) ((base)+0x54)
|
||||
#define RING_HWS_PGA(base) ((base)+0x80)
|
||||
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
|
||||
#define RING_RESET_CTL(base) ((base)+0xd0)
|
||||
#define RESET_CTL_REQUEST_RESET (1 << 0)
|
||||
#define RESET_CTL_READY_TO_RESET (1 << 1)
|
||||
|
||||
#define HSW_GTT_CACHE_EN 0x4024
|
||||
#define GTT_CACHE_EN_ALL 0xF0007FFF
|
||||
|
@ -1946,6 +1978,9 @@ enum skl_disp_power_wells {
|
|||
#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
|
||||
#define FBC_TAG 0x03300
|
||||
|
||||
#define FBC_STATUS2 0x43214
|
||||
#define FBC_COMPRESSION_MASK 0x7ff
|
||||
|
||||
#define FBC_LL_SIZE (1536)
|
||||
|
||||
/* Framebuffer compression for GM45+ */
|
||||
|
@ -2116,7 +2151,7 @@ enum skl_disp_power_wells {
|
|||
#define DPLL_DVO_2X_MODE (1 << 30)
|
||||
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
|
||||
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
|
||||
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
|
||||
#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
|
||||
#define DPLL_VGA_MODE_DIS (1 << 28)
|
||||
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
|
||||
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
|
||||
|
@ -2130,8 +2165,8 @@ enum skl_disp_power_wells {
|
|||
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
|
||||
#define DPLL_LOCK_VLV (1<<15)
|
||||
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
|
||||
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
|
||||
#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
|
||||
#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13)
|
||||
#define DPLL_SSC_REF_CLK_CHV (1<<13)
|
||||
#define DPLL_PORTC_READY_MASK (0xf << 4)
|
||||
#define DPLL_PORTB_READY_MASK (0xf)
|
||||
|
||||
|
@ -2488,6 +2523,9 @@ enum skl_disp_power_wells {
|
|||
#define CLKCFG_MEM_800 (3 << 4)
|
||||
#define CLKCFG_MEM_MASK (7 << 4)
|
||||
|
||||
#define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38)
|
||||
#define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f)
|
||||
|
||||
#define TSC1 0x11001
|
||||
#define TSE (1<<0)
|
||||
#define TR1 0x11006
|
||||
|
@ -2718,8 +2756,10 @@ enum skl_disp_power_wells {
|
|||
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
|
||||
|
||||
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
|
||||
#define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070)
|
||||
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
|
||||
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
|
||||
#define BXT_RP_STATE_CAP 0x138170
|
||||
|
||||
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
|
||||
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
|
||||
|
@ -2767,7 +2807,8 @@ enum skl_disp_power_wells {
|
|||
* valid. Now, docs explain in dwords what is in the context object. The full
|
||||
* size is 70720 bytes, however, the power context and execlist context will
|
||||
* never be saved (power context is stored elsewhere, and execlists don't work
|
||||
* on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
|
||||
* on HSW) - so the final size, including the extra state required for the
|
||||
* Resource Streamer, is 66944 bytes, which rounds to 17 pages.
|
||||
*/
|
||||
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
|
||||
/* Same as Haswell, but 72064 bytes now. */
|
||||
|
@ -4398,9 +4439,32 @@ enum skl_disp_power_wells {
|
|||
#define DSPARB_BSTART_SHIFT 0
|
||||
#define DSPARB_BEND_SHIFT 9 /* on 855 */
|
||||
#define DSPARB_AEND_SHIFT 0
|
||||
|
||||
#define DSPARB_SPRITEA_SHIFT_VLV 0
|
||||
#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
|
||||
#define DSPARB_SPRITEB_SHIFT_VLV 8
|
||||
#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
|
||||
#define DSPARB_SPRITEC_SHIFT_VLV 16
|
||||
#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
|
||||
#define DSPARB_SPRITED_SHIFT_VLV 24
|
||||
#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
|
||||
#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
|
||||
#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
|
||||
#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
|
||||
#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
|
||||
#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
|
||||
#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
|
||||
#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
|
||||
#define DSPARB_SPRITED_HI_SHIFT_VLV 12
|
||||
#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
|
||||
#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
|
||||
#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
|
||||
#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
|
||||
#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
|
||||
#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
|
||||
#define DSPARB_SPRITEE_SHIFT_VLV 0
|
||||
#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
|
||||
#define DSPARB_SPRITEF_SHIFT_VLV 8
|
||||
#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
|
||||
|
||||
/* pnv/gen4/g4x/vlv/chv */
|
||||
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
|
||||
|
@ -5754,6 +5818,13 @@ enum skl_disp_power_wells {
|
|||
#define HSW_NDE_RSTWRN_OPT 0x46408
|
||||
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
|
||||
|
||||
#define SKL_DFSM 0x51000
|
||||
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
|
||||
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
|
||||
#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
|
||||
#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
|
||||
#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
|
||||
|
||||
#define FF_SLICE_CS_CHICKEN2 0x20e4
|
||||
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
|
||||
|
||||
|
@ -5791,6 +5862,7 @@ enum skl_disp_power_wells {
|
|||
|
||||
#define GEN8_L3SQCREG4 0xb118
|
||||
#define GEN8_LQSC_RO_PERF_DIS (1<<27)
|
||||
#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21)
|
||||
|
||||
/* GEN8 chicken */
|
||||
#define HDC_CHICKEN0 0x7300
|
||||
|
@ -6047,6 +6119,9 @@ enum skl_disp_power_wells {
|
|||
#define _VIDEO_DIP_CTL_A 0xe0200
|
||||
#define _VIDEO_DIP_DATA_A 0xe0208
|
||||
#define _VIDEO_DIP_GCP_A 0xe0210
|
||||
#define GCP_COLOR_INDICATION (1 << 2)
|
||||
#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
|
||||
#define GCP_AV_MUTE (1 << 0)
|
||||
|
||||
#define _VIDEO_DIP_CTL_B 0xe1200
|
||||
#define _VIDEO_DIP_DATA_B 0xe1208
|
||||
|
@ -6186,6 +6261,7 @@ enum skl_disp_power_wells {
|
|||
#define _TRANSA_CHICKEN1 0xf0060
|
||||
#define _TRANSB_CHICKEN1 0xf1060
|
||||
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
|
||||
#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
|
||||
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
|
||||
#define _TRANSA_CHICKEN2 0xf0064
|
||||
#define _TRANSB_CHICKEN2 0xf1064
|
||||
|
@ -6370,6 +6446,8 @@ enum skl_disp_power_wells {
|
|||
#define PCH_PP_CONTROL 0xc7204
|
||||
#define PANEL_UNLOCK_REGS (0xabcd << 16)
|
||||
#define PANEL_UNLOCK_MASK (0xffff << 16)
|
||||
#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0)
|
||||
#define BXT_POWER_CYCLE_DELAY_SHIFT 4
|
||||
#define EDP_FORCE_VDD (1 << 3)
|
||||
#define EDP_BLC_ENABLE (1 << 2)
|
||||
#define PANEL_POWER_RESET (1 << 1)
|
||||
|
@ -6398,6 +6476,17 @@ enum skl_disp_power_wells {
|
|||
#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
|
||||
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
|
||||
|
||||
/* BXT PPS changes - 2nd set of PPS registers */
|
||||
#define _BXT_PP_STATUS2 0xc7300
|
||||
#define _BXT_PP_CONTROL2 0xc7304
|
||||
#define _BXT_PP_ON_DELAYS2 0xc7308
|
||||
#define _BXT_PP_OFF_DELAYS2 0xc730c
|
||||
|
||||
#define BXT_PP_STATUS(n) ((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2)
|
||||
#define BXT_PP_CONTROL(n) ((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2)
|
||||
#define BXT_PP_ON_DELAYS(n) ((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2)
|
||||
#define BXT_PP_OFF_DELAYS(n) ((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2)
|
||||
|
||||
#define PCH_DP_B 0xe4100
|
||||
#define PCH_DPB_AUX_CH_CTL 0xe4110
|
||||
#define PCH_DPB_AUX_CH_DATA1 0xe4114
|
||||
|
@ -6698,6 +6787,7 @@ enum skl_disp_power_wells {
|
|||
#define GEN6_PCODE_READ_RC6VIDS 0x5
|
||||
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
|
||||
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
|
||||
#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
|
||||
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
|
||||
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
|
||||
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
|
||||
|
@ -7163,6 +7253,7 @@ enum skl_disp_power_wells {
|
|||
#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
|
||||
#define LCPLL_CLK_FREQ_675_BDW (3<<26)
|
||||
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
|
||||
#define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24)
|
||||
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
|
||||
#define LCPLL_POWER_DOWN_ALLOW (1<<22)
|
||||
#define LCPLL_CD_SOURCE_FCLK (1<<21)
|
||||
|
@ -7265,12 +7356,6 @@ enum skl_disp_power_wells {
|
|||
#define DC_STATE_EN 0x45504
|
||||
#define DC_STATE_EN_UPTO_DC5 (1<<0)
|
||||
#define DC_STATE_EN_DC9 (1<<3)
|
||||
|
||||
/*
|
||||
* SKL DC
|
||||
*/
|
||||
#define DC_STATE_EN 0x45504
|
||||
#define DC_STATE_EN_UPTO_DC5 (1<<0)
|
||||
#define DC_STATE_EN_UPTO_DC6 (2<<0)
|
||||
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
|
||||
|
||||
|
@ -7822,4 +7907,13 @@ enum skl_disp_power_wells {
|
|||
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
|
||||
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
|
||||
|
||||
/* MOCS (Memory Object Control State) registers */
|
||||
#define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */
|
||||
|
||||
#define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/
|
||||
#define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/
|
||||
#define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/
|
||||
#define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/
|
||||
#define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
|
|
@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* only restore FBC info on the platform that supports FBC*/
|
||||
intel_fbc_disable(dev);
|
||||
intel_fbc_disable(dev_priv);
|
||||
|
||||
/* restore FBC interval */
|
||||
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
|
||||
|
|
|
@ -64,24 +64,16 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
|
|||
goto out;
|
||||
}
|
||||
|
||||
units = 0;
|
||||
div = 1000000ULL;
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
|
||||
/* Special case for 320Mhz */
|
||||
if (czcount_30ns == 1) {
|
||||
div = 10000000ULL;
|
||||
units = 3125ULL;
|
||||
} else {
|
||||
/* chv counts are one less */
|
||||
czcount_30ns += 1;
|
||||
}
|
||||
div = 10000000ULL;
|
||||
units = 3125ULL;
|
||||
} else {
|
||||
czcount_30ns += 1;
|
||||
div = 1000000ULL;
|
||||
units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
|
||||
}
|
||||
|
||||
if (units == 0)
|
||||
units = DIV_ROUND_UP_ULL(30ULL * bias,
|
||||
(u64)czcount_30ns);
|
||||
|
||||
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
|
||||
units <<= 8;
|
||||
|
||||
|
|
|
@ -424,10 +424,10 @@ TRACE_EVENT(i915_gem_evict_vm,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_sync_to,
|
||||
TP_PROTO(struct intel_engine_cs *from,
|
||||
struct intel_engine_cs *to,
|
||||
TP_PROTO(struct drm_i915_gem_request *to_req,
|
||||
struct intel_engine_cs *from,
|
||||
struct drm_i915_gem_request *req),
|
||||
TP_ARGS(from, to, req),
|
||||
TP_ARGS(to_req, from, req),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
|
@ -439,7 +439,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
|
|||
TP_fast_assign(
|
||||
__entry->dev = from->dev->primary->index;
|
||||
__entry->sync_from = from->id;
|
||||
__entry->sync_to = to->id;
|
||||
__entry->sync_to = to_req->ring->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
),
|
||||
|
||||
|
@ -475,8 +475,8 @@ TRACE_EVENT(i915_gem_ring_dispatch,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_flush,
|
||||
TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
|
||||
TP_ARGS(ring, invalidate, flush),
|
||||
TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
|
||||
TP_ARGS(req, invalidate, flush),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
|
@ -486,8 +486,8 @@ TRACE_EVENT(i915_gem_ring_flush,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
__entry->dev = req->ring->dev->primary->index;
|
||||
__entry->ring = req->ring->id;
|
||||
__entry->invalidate = invalidate;
|
||||
__entry->flush = flush;
|
||||
),
|
||||
|
|
|
@ -35,162 +35,6 @@
|
|||
#include <drm/drm_plane_helper.h>
|
||||
#include "intel_drv.h"
|
||||
|
||||
|
||||
/**
|
||||
* intel_atomic_check - validate state object
|
||||
* @dev: drm device
|
||||
* @state: state to validate
|
||||
*/
|
||||
int intel_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
int nplanes = dev->mode_config.num_total_plane;
|
||||
int ncrtcs = dev->mode_config.num_crtc;
|
||||
int nconnectors = dev->mode_config.num_connector;
|
||||
enum pipe nuclear_pipe = INVALID_PIPE;
|
||||
struct intel_crtc *nuclear_crtc = NULL;
|
||||
struct intel_crtc_state *crtc_state = NULL;
|
||||
int ret;
|
||||
int i;
|
||||
bool not_nuclear = false;
|
||||
|
||||
/*
|
||||
* FIXME: At the moment, we only support "nuclear pageflip" on a
|
||||
* single CRTC. Cross-crtc updates will be added later.
|
||||
*/
|
||||
for (i = 0; i < nplanes; i++) {
|
||||
struct intel_plane *plane = to_intel_plane(state->planes[i]);
|
||||
if (!plane)
|
||||
continue;
|
||||
|
||||
if (nuclear_pipe == INVALID_PIPE) {
|
||||
nuclear_pipe = plane->pipe;
|
||||
} else if (nuclear_pipe != plane->pipe) {
|
||||
DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: We only handle planes for now; make sure there are no CRTC's
|
||||
* or connectors involved.
|
||||
*/
|
||||
state->allow_modeset = false;
|
||||
for (i = 0; i < ncrtcs; i++) {
|
||||
struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
|
||||
if (crtc)
|
||||
memset(&crtc->atomic, 0, sizeof(crtc->atomic));
|
||||
if (crtc && crtc->pipe != nuclear_pipe)
|
||||
not_nuclear = true;
|
||||
if (crtc && crtc->pipe == nuclear_pipe) {
|
||||
nuclear_crtc = crtc;
|
||||
crtc_state = to_intel_crtc_state(state->crtc_states[i]);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < nconnectors; i++)
|
||||
if (state->connectors[i] != NULL)
|
||||
not_nuclear = true;
|
||||
|
||||
if (not_nuclear) {
|
||||
DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = drm_atomic_helper_check_planes(dev, state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* FIXME: move to crtc atomic check function once it is ready */
|
||||
ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* intel_atomic_commit - commit validated state object
|
||||
* @dev: DRM device
|
||||
* @state: the top-level driver state object
|
||||
* @async: asynchronous commit
|
||||
*
|
||||
* This function commits a top-level state object that has been validated
|
||||
* with drm_atomic_helper_check().
|
||||
*
|
||||
* FIXME: Atomic modeset support for i915 is not yet complete. At the moment
|
||||
* we can only handle plane-related operations and do not yet support
|
||||
* asynchronous commit.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero for success or -errno.
|
||||
*/
|
||||
int intel_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (async) {
|
||||
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Point of no return */
|
||||
|
||||
/*
|
||||
* FIXME: The proper sequence here will eventually be:
|
||||
*
|
||||
* drm_atomic_helper_swap_state(dev, state)
|
||||
* drm_atomic_helper_commit_modeset_disables(dev, state);
|
||||
* drm_atomic_helper_commit_planes(dev, state);
|
||||
* drm_atomic_helper_commit_modeset_enables(dev, state);
|
||||
* drm_atomic_helper_wait_for_vblanks(dev, state);
|
||||
* drm_atomic_helper_cleanup_planes(dev, state);
|
||||
* drm_atomic_state_free(state);
|
||||
*
|
||||
* once we have full atomic modeset. For now, just manually update
|
||||
* plane states to avoid clobbering good states with dummy states
|
||||
* while nuclear pageflipping.
|
||||
*/
|
||||
for (i = 0; i < dev->mode_config.num_total_plane; i++) {
|
||||
struct drm_plane *plane = state->planes[i];
|
||||
|
||||
if (!plane)
|
||||
continue;
|
||||
|
||||
plane->state->state = state;
|
||||
swap(state->plane_states[i], plane->state);
|
||||
plane->state->state = NULL;
|
||||
}
|
||||
|
||||
/* swap crtc_scaler_state */
|
||||
for (i = 0; i < dev->mode_config.num_crtc; i++) {
|
||||
struct drm_crtc *crtc = state->crtcs[i];
|
||||
if (!crtc) {
|
||||
continue;
|
||||
}
|
||||
|
||||
to_intel_crtc(crtc)->config->scaler_state =
|
||||
to_intel_crtc_state(state->crtc_states[i])->scaler_state;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skl_detach_scalers(to_intel_crtc(crtc));
|
||||
}
|
||||
|
||||
drm_atomic_helper_commit_planes(dev, state);
|
||||
drm_atomic_helper_wait_for_vblanks(dev, state);
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
drm_atomic_state_free(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_connector_atomic_get_property - fetch connector property value
|
||||
* @connector: connector to fetch property for
|
||||
|
@ -298,17 +142,12 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
|
|||
struct drm_plane *plane = NULL;
|
||||
struct intel_plane *intel_plane;
|
||||
struct intel_plane_state *plane_state = NULL;
|
||||
struct intel_crtc_scaler_state *scaler_state;
|
||||
struct drm_atomic_state *drm_state;
|
||||
struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
struct drm_atomic_state *drm_state = crtc_state->base.state;
|
||||
int num_scalers_need;
|
||||
int i, j;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
|
||||
return 0;
|
||||
|
||||
scaler_state = &crtc_state->scaler_state;
|
||||
drm_state = crtc_state->base.state;
|
||||
|
||||
num_scalers_need = hweight32(scaler_state->scaler_users);
|
||||
DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
|
||||
crtc_state, num_scalers_need, intel_crtc->num_scalers,
|
||||
|
@ -336,17 +175,21 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
|
|||
/* walkthrough scaler_users bits and start assigning scalers */
|
||||
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
|
||||
int *scaler_id;
|
||||
const char *name;
|
||||
int idx;
|
||||
|
||||
/* skip if scaler not required */
|
||||
if (!(scaler_state->scaler_users & (1 << i)))
|
||||
continue;
|
||||
|
||||
if (i == SKL_CRTC_INDEX) {
|
||||
name = "CRTC";
|
||||
idx = intel_crtc->base.base.id;
|
||||
|
||||
/* panel fitter case: assign as a crtc scaler */
|
||||
scaler_id = &scaler_state->scaler_id;
|
||||
} else {
|
||||
if (!drm_state)
|
||||
continue;
|
||||
name = "PLANE";
|
||||
|
||||
/* plane scaler case: assign as a plane scaler */
|
||||
/* find the plane that set the bit as scaler_user */
|
||||
|
@ -365,9 +208,19 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
|
|||
plane->base.id);
|
||||
return PTR_ERR(state);
|
||||
}
|
||||
|
||||
/*
|
||||
* the plane is added after plane checks are run,
|
||||
* but since this plane is unchanged just do the
|
||||
* minimum required validation.
|
||||
*/
|
||||
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
|
||||
intel_crtc->atomic.wait_for_flips = true;
|
||||
crtc_state->base.planes_changed = true;
|
||||
}
|
||||
|
||||
intel_plane = to_intel_plane(plane);
|
||||
idx = plane->base.id;
|
||||
|
||||
/* plane on different crtc cannot be a scaler user of this crtc */
|
||||
if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
|
||||
|
@ -383,23 +236,16 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
|
|||
for (j = 0; j < intel_crtc->num_scalers; j++) {
|
||||
if (!scaler_state->scalers[j].in_use) {
|
||||
scaler_state->scalers[j].in_use = 1;
|
||||
*scaler_id = scaler_state->scalers[j].id;
|
||||
*scaler_id = j;
|
||||
DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
|
||||
intel_crtc->pipe,
|
||||
i == SKL_CRTC_INDEX ? scaler_state->scaler_id :
|
||||
plane_state->scaler_id,
|
||||
i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
|
||||
i == SKL_CRTC_INDEX ? intel_crtc->base.base.id :
|
||||
plane->base.id);
|
||||
intel_crtc->pipe, *scaler_id, name, idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(*scaler_id < 0)) {
|
||||
DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n",
|
||||
i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
|
||||
i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id);
|
||||
DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -421,3 +267,54 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll_config *shared_dpll)
|
||||
{
|
||||
enum intel_dpll_id i;
|
||||
|
||||
/* Copy shared dpll state */
|
||||
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
|
||||
|
||||
shared_dpll[i] = pll->config;
|
||||
}
|
||||
}
|
||||
|
||||
struct intel_shared_dpll_config *
|
||||
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(s);
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
|
||||
|
||||
if (!state->dpll_set) {
|
||||
state->dpll_set = true;
|
||||
|
||||
intel_atomic_duplicate_dpll_state(to_i915(s->dev),
|
||||
state->shared_dpll);
|
||||
}
|
||||
|
||||
return state->shared_dpll;
|
||||
}
|
||||
|
||||
struct drm_atomic_state *
|
||||
intel_atomic_state_alloc(struct drm_device *dev)
|
||||
{
|
||||
struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
|
||||
if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
|
||||
kfree(state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
void intel_atomic_state_clear(struct drm_atomic_state *s)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(s);
|
||||
drm_atomic_state_default_clear(&state->base);
|
||||
state->dpll_set = false;
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ intel_create_plane_state(struct drm_plane *plane)
|
|||
|
||||
state->base.plane = plane;
|
||||
state->base.rotation = BIT(DRM_ROTATE_0);
|
||||
state->ckey.flags = I915_SET_COLORKEY_NONE;
|
||||
|
||||
return state;
|
||||
}
|
||||
|
@ -114,8 +115,10 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
|
|||
struct intel_crtc_state *crtc_state;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct intel_plane_state *intel_state = to_intel_plane_state(state);
|
||||
struct drm_crtc_state *drm_crtc_state;
|
||||
int ret;
|
||||
|
||||
crtc = crtc ? crtc : plane->crtc;
|
||||
crtc = crtc ? crtc : plane->state->crtc;
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
/*
|
||||
|
@ -127,16 +130,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
|
|||
if (!crtc)
|
||||
return 0;
|
||||
|
||||
/* FIXME: temporary hack necessary while we still use the plane update
|
||||
* helper. */
|
||||
if (state->state) {
|
||||
crtc_state =
|
||||
intel_atomic_get_crtc_state(state->state, intel_crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
} else {
|
||||
crtc_state = intel_crtc->config;
|
||||
}
|
||||
drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
|
||||
if (WARN_ON(!drm_crtc_state))
|
||||
return -EINVAL;
|
||||
|
||||
crtc_state = to_intel_crtc_state(drm_crtc_state);
|
||||
|
||||
/*
|
||||
* The original src/dest coordinates are stored in state->base, but
|
||||
|
@ -160,20 +158,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
|
|||
intel_state->clip.y2 =
|
||||
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
|
||||
|
||||
/*
|
||||
* Disabling a plane is always okay; we just need to update
|
||||
* fb tracking in a special way since cleanup_fb() won't
|
||||
* get called by the plane helpers.
|
||||
*/
|
||||
if (state->fb == NULL && plane->state->fb != NULL) {
|
||||
/*
|
||||
* 'prepare' is never called when plane is being disabled, so
|
||||
* we need to handle frontbuffer tracking as a special case
|
||||
*/
|
||||
intel_crtc->atomic.disabled_planes |=
|
||||
(1 << drm_plane_index(plane));
|
||||
}
|
||||
|
||||
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
|
||||
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
|
||||
|
@ -198,7 +182,12 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
|
|||
}
|
||||
}
|
||||
|
||||
return intel_plane->check_plane(plane, intel_state);
|
||||
intel_state->visible = false;
|
||||
ret = intel_plane->check_plane(plane, crtc_state, intel_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return intel_plane_atomic_calc_changes(&crtc_state->base, state);
|
||||
}
|
||||
|
||||
static void intel_plane_atomic_update(struct drm_plane *plane,
|
||||
|
|
|
@ -41,7 +41,8 @@
|
|||
*
|
||||
* The disable sequences must be performed before disabling the transcoder or
|
||||
* port. The enable sequences may only be performed after enabling the
|
||||
* transcoder and port, and after completed link training.
|
||||
* transcoder and port, and after completed link training. Therefore the audio
|
||||
* enable/disable sequences are part of the modeset sequence.
|
||||
*
|
||||
* The codec and controller sequences could be done either parallel or serial,
|
||||
* but generally the ELDV/PD change in the codec sequence indicates to the audio
|
||||
|
|
|
@ -122,42 +122,6 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
|
|||
drm_mode_set_name(panel_fixed_mode);
|
||||
}
|
||||
|
||||
static bool
|
||||
lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
|
||||
const struct lvds_dvo_timing *b)
|
||||
{
|
||||
if (a->hactive_hi != b->hactive_hi ||
|
||||
a->hactive_lo != b->hactive_lo)
|
||||
return false;
|
||||
|
||||
if (a->hsync_off_hi != b->hsync_off_hi ||
|
||||
a->hsync_off_lo != b->hsync_off_lo)
|
||||
return false;
|
||||
|
||||
if (a->hsync_pulse_width != b->hsync_pulse_width)
|
||||
return false;
|
||||
|
||||
if (a->hblank_hi != b->hblank_hi ||
|
||||
a->hblank_lo != b->hblank_lo)
|
||||
return false;
|
||||
|
||||
if (a->vactive_hi != b->vactive_hi ||
|
||||
a->vactive_lo != b->vactive_lo)
|
||||
return false;
|
||||
|
||||
if (a->vsync_off != b->vsync_off)
|
||||
return false;
|
||||
|
||||
if (a->vsync_pulse_width != b->vsync_pulse_width)
|
||||
return false;
|
||||
|
||||
if (a->vblank_hi != b->vblank_hi ||
|
||||
a->vblank_lo != b->vblank_lo)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct lvds_dvo_timing *
|
||||
get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
|
||||
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
|
||||
|
@ -213,7 +177,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
|
|||
const struct lvds_dvo_timing *panel_dvo_timing;
|
||||
const struct lvds_fp_timing *fp_timing;
|
||||
struct drm_display_mode *panel_fixed_mode;
|
||||
int i, downclock, drrs_mode;
|
||||
int drrs_mode;
|
||||
|
||||
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
|
||||
if (!lvds_options)
|
||||
|
@ -272,30 +236,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
|
|||
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
|
||||
drm_mode_debug_printmodeline(panel_fixed_mode);
|
||||
|
||||
/*
|
||||
* Iterate over the LVDS panel timing info to find the lowest clock
|
||||
* for the native resolution.
|
||||
*/
|
||||
downclock = panel_dvo_timing->clock;
|
||||
for (i = 0; i < 16; i++) {
|
||||
const struct lvds_dvo_timing *dvo_timing;
|
||||
|
||||
dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
|
||||
lvds_lfp_data_ptrs,
|
||||
i);
|
||||
if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
|
||||
dvo_timing->clock < downclock)
|
||||
downclock = dvo_timing->clock;
|
||||
}
|
||||
|
||||
if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
|
||||
dev_priv->lvds_downclock_avail = 1;
|
||||
dev_priv->lvds_downclock = downclock * 10;
|
||||
DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
|
||||
"Normal Clock %dKHz, downclock %dKHz\n",
|
||||
panel_fixed_mode->clock, 10*downclock);
|
||||
}
|
||||
|
||||
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
|
||||
lvds_lfp_data_ptrs,
|
||||
lvds_options->panel_type);
|
||||
|
|
|
@ -389,6 +389,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
|
|||
intel_csr_load_program(dev);
|
||||
fw_loaded = true;
|
||||
|
||||
DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
|
||||
out:
|
||||
if (fw_loaded)
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
@ -422,6 +423,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
|
|||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
|
||||
|
||||
/*
|
||||
* Obtain a runtime pm reference, until CSR is loaded,
|
||||
* to avoid entering runtime-suspend.
|
||||
|
@ -459,7 +462,8 @@ void intel_csr_ucode_fini(struct drm_device *dev)
|
|||
|
||||
void assert_csr_loaded(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
|
||||
WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED,
|
||||
"CSR is not loaded.\n");
|
||||
WARN(!I915_READ(CSR_PROGRAM_BASE),
|
||||
"CSR program storage start is NULL\n");
|
||||
WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -91,6 +91,8 @@ static const struct dp_link_dpll chv_dpll[] = {
|
|||
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
|
||||
};
|
||||
|
||||
static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
|
||||
324000, 432000, 540000 };
|
||||
static const int skl_rates[] = { 162000, 216000, 270000,
|
||||
324000, 432000, 540000 };
|
||||
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
|
||||
|
@ -565,7 +567,9 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (IS_BROXTON(dev))
|
||||
return BXT_PP_CONTROL(0);
|
||||
else if (HAS_PCH_SPLIT(dev))
|
||||
return PCH_PP_CONTROL;
|
||||
else
|
||||
return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
|
||||
|
@ -575,7 +579,9 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (IS_BROXTON(dev))
|
||||
return BXT_PP_STATUS(0);
|
||||
else if (HAS_PCH_SPLIT(dev))
|
||||
return PCH_PP_STATUS;
|
||||
else
|
||||
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
|
||||
|
@ -708,7 +714,8 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
|||
return 0;
|
||||
|
||||
if (intel_dig_port->port == PORT_A) {
|
||||
return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
|
||||
return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
|
||||
|
||||
} else {
|
||||
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
|
||||
}
|
||||
|
@ -723,7 +730,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
|||
if (intel_dig_port->port == PORT_A) {
|
||||
if (index)
|
||||
return 0;
|
||||
return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
|
||||
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
|
||||
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
|
||||
/* Workaround for non-ULT HSW */
|
||||
switch (index) {
|
||||
|
@ -1172,7 +1179,10 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
|
|||
static int
|
||||
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
|
||||
{
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
if (IS_BROXTON(dev)) {
|
||||
*source_rates = bxt_rates;
|
||||
return ARRAY_SIZE(bxt_rates);
|
||||
} else if (IS_SKYLAKE(dev)) {
|
||||
*source_rates = skl_rates;
|
||||
return ARRAY_SIZE(skl_rates);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
|
@ -1374,7 +1384,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
int ret;
|
||||
ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
|
||||
ret = skl_update_scaler_crtc(pipe_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -1699,8 +1709,10 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
|
|||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
||||
control = I915_READ(_pp_ctrl_reg(intel_dp));
|
||||
control &= ~PANEL_UNLOCK_MASK;
|
||||
control |= PANEL_UNLOCK_REGS;
|
||||
if (!IS_BROXTON(dev)) {
|
||||
control &= ~PANEL_UNLOCK_MASK;
|
||||
control |= PANEL_UNLOCK_REGS;
|
||||
}
|
||||
return control;
|
||||
}
|
||||
|
||||
|
@ -3414,92 +3426,6 @@ gen7_edp_signal_levels(uint8_t train_set)
|
|||
}
|
||||
}
|
||||
|
||||
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
|
||||
static uint32_t
|
||||
hsw_signal_levels(uint8_t train_set)
|
||||
{
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
switch (signal_levels) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
return DDI_BUF_TRANS_SELECT(0);
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
|
||||
return DDI_BUF_TRANS_SELECT(1);
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
|
||||
return DDI_BUF_TRANS_SELECT(2);
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
|
||||
return DDI_BUF_TRANS_SELECT(3);
|
||||
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
return DDI_BUF_TRANS_SELECT(4);
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
|
||||
return DDI_BUF_TRANS_SELECT(5);
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
|
||||
return DDI_BUF_TRANS_SELECT(6);
|
||||
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
return DDI_BUF_TRANS_SELECT(7);
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
|
||||
return DDI_BUF_TRANS_SELECT(8);
|
||||
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
return DDI_BUF_TRANS_SELECT(9);
|
||||
default:
|
||||
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
|
||||
"0x%x\n", signal_levels);
|
||||
return DDI_BUF_TRANS_SELECT(0);
|
||||
}
|
||||
}
|
||||
|
||||
static void bxt_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
enum port port = dport->port;
|
||||
struct drm_device *dev = dport->base.base.dev;
|
||||
struct intel_encoder *encoder = &dport->base;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
uint32_t level = 0;
|
||||
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
switch (signal_levels) {
|
||||
default:
|
||||
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
level = 0;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
|
||||
level = 1;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
|
||||
level = 2;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
|
||||
level = 3;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
level = 4;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
|
||||
level = 5;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
|
||||
level = 6;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
level = 7;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
|
||||
level = 8;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
level = 9;
|
||||
break;
|
||||
}
|
||||
|
||||
bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
|
||||
}
|
||||
|
||||
/* Properly updates "DP" with the correct signal levels. */
|
||||
static void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
||||
|
@ -3507,22 +3433,20 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
|||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
enum port port = intel_dig_port->port;
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
uint32_t signal_levels, mask;
|
||||
uint32_t signal_levels, mask = 0;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
|
||||
if (IS_BROXTON(dev)) {
|
||||
signal_levels = 0;
|
||||
bxt_signal_levels(intel_dp);
|
||||
mask = 0;
|
||||
} else if (HAS_DDI(dev)) {
|
||||
signal_levels = hsw_signal_levels(train_set);
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
if (HAS_DDI(dev)) {
|
||||
signal_levels = ddi_signal_levels(intel_dp);
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
signal_levels = 0;
|
||||
else
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
signal_levels = chv_signal_levels(intel_dp);
|
||||
mask = 0;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
signal_levels = vlv_signal_levels(intel_dp);
|
||||
mask = 0;
|
||||
} else if (IS_GEN7(dev) && port == PORT_A) {
|
||||
signal_levels = gen7_edp_signal_levels(train_set);
|
||||
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
|
||||
|
@ -4922,12 +4846,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
|
|||
.destroy = intel_dp_encoder_destroy,
|
||||
};
|
||||
|
||||
void
|
||||
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
enum irqreturn
|
||||
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
||||
{
|
||||
|
@ -5095,8 +5013,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct edp_power_seq cur, vbt, spec,
|
||||
*final = &intel_dp->pps_delays;
|
||||
u32 pp_on, pp_off, pp_div, pp;
|
||||
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
|
||||
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
|
||||
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
|
||||
|
||||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
||||
|
@ -5104,7 +5022,16 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
|||
if (final->t11_t12 != 0)
|
||||
return;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if (IS_BROXTON(dev)) {
|
||||
/*
|
||||
* TODO: BXT has 2 sets of PPS registers.
|
||||
* Correct Register for Broxton need to be identified
|
||||
* using VBT. hardcoding for now
|
||||
*/
|
||||
pp_ctrl_reg = BXT_PP_CONTROL(0);
|
||||
pp_on_reg = BXT_PP_ON_DELAYS(0);
|
||||
pp_off_reg = BXT_PP_OFF_DELAYS(0);
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
pp_ctrl_reg = PCH_PP_CONTROL;
|
||||
pp_on_reg = PCH_PP_ON_DELAYS;
|
||||
pp_off_reg = PCH_PP_OFF_DELAYS;
|
||||
|
@ -5120,12 +5047,14 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
|||
|
||||
/* Workaround: Need to write PP_CONTROL with the unlock key as
|
||||
* the very first thing. */
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
pp_ctl = ironlake_get_pp_control(intel_dp);
|
||||
|
||||
pp_on = I915_READ(pp_on_reg);
|
||||
pp_off = I915_READ(pp_off_reg);
|
||||
pp_div = I915_READ(pp_div_reg);
|
||||
if (!IS_BROXTON(dev)) {
|
||||
I915_WRITE(pp_ctrl_reg, pp_ctl);
|
||||
pp_div = I915_READ(pp_div_reg);
|
||||
}
|
||||
|
||||
/* Pull timing values out of registers */
|
||||
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
|
||||
|
@ -5140,8 +5069,17 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
|||
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
|
||||
PANEL_POWER_DOWN_DELAY_SHIFT;
|
||||
|
||||
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
|
||||
if (IS_BROXTON(dev)) {
|
||||
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
|
||||
BXT_POWER_CYCLE_DELAY_SHIFT;
|
||||
if (tmp > 0)
|
||||
cur.t11_t12 = (tmp - 1) * 1000;
|
||||
else
|
||||
cur.t11_t12 = 0;
|
||||
} else {
|
||||
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
|
||||
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
|
||||
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
|
||||
|
@ -5198,13 +5136,23 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp_on, pp_off, pp_div, port_sel = 0;
|
||||
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
|
||||
int pp_on_reg, pp_off_reg, pp_div_reg;
|
||||
int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
const struct edp_power_seq *seq = &intel_dp->pps_delays;
|
||||
|
||||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if (IS_BROXTON(dev)) {
|
||||
/*
|
||||
* TODO: BXT has 2 sets of PPS registers.
|
||||
* Correct Register for Broxton need to be identified
|
||||
* using VBT. hardcoding for now
|
||||
*/
|
||||
pp_ctrl_reg = BXT_PP_CONTROL(0);
|
||||
pp_on_reg = BXT_PP_ON_DELAYS(0);
|
||||
pp_off_reg = BXT_PP_OFF_DELAYS(0);
|
||||
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
pp_on_reg = PCH_PP_ON_DELAYS;
|
||||
pp_off_reg = PCH_PP_OFF_DELAYS;
|
||||
pp_div_reg = PCH_PP_DIVISOR;
|
||||
|
@ -5230,9 +5178,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
|||
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
|
||||
/* Compute the divisor for the pp clock, simply match the Bspec
|
||||
* formula. */
|
||||
pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
|
||||
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
|
||||
<< PANEL_POWER_CYCLE_DELAY_SHIFT);
|
||||
if (IS_BROXTON(dev)) {
|
||||
pp_div = I915_READ(pp_ctrl_reg);
|
||||
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
|
||||
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
|
||||
<< BXT_POWER_CYCLE_DELAY_SHIFT);
|
||||
} else {
|
||||
pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
|
||||
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
|
||||
<< PANEL_POWER_CYCLE_DELAY_SHIFT);
|
||||
}
|
||||
|
||||
/* Haswell doesn't have any port selection bits for the panel
|
||||
* power sequencer any more. */
|
||||
|
@ -5249,11 +5204,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
|||
|
||||
I915_WRITE(pp_on_reg, pp_on);
|
||||
I915_WRITE(pp_off_reg, pp_off);
|
||||
I915_WRITE(pp_div_reg, pp_div);
|
||||
if (IS_BROXTON(dev))
|
||||
I915_WRITE(pp_ctrl_reg, pp_div);
|
||||
else
|
||||
I915_WRITE(pp_div_reg, pp_div);
|
||||
|
||||
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
|
||||
I915_READ(pp_on_reg),
|
||||
I915_READ(pp_off_reg),
|
||||
IS_BROXTON(dev) ?
|
||||
(I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
|
||||
I915_READ(pp_div_reg));
|
||||
}
|
||||
|
||||
|
@ -5458,13 +5418,12 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
/**
|
||||
* intel_edp_drrs_invalidate - Invalidate DRRS
|
||||
* intel_edp_drrs_invalidate - Disable Idleness DRRS
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* When there is a disturbance on screen (due to cursor movement/time
|
||||
* update etc), DRRS needs to be invalidated, i.e. need to switch to
|
||||
* high RR.
|
||||
* This function gets called everytime rendering on the given planes start.
|
||||
* Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
|
||||
*
|
||||
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
|
||||
*/
|
||||
|
@ -5489,26 +5448,27 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
|
|||
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
|
||||
pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
||||
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
|
||||
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
||||
dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
|
||||
|
||||
/* invalidate means busy screen hence upclock */
|
||||
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
|
||||
intel_dp_set_drrs_state(dev_priv->dev,
|
||||
dev_priv->drrs.dp->attached_connector->panel.
|
||||
fixed_mode->vrefresh);
|
||||
}
|
||||
|
||||
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
||||
|
||||
dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->drrs.mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_edp_drrs_flush - Flush DRRS
|
||||
* intel_edp_drrs_flush - Restart Idleness DRRS
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* When there is no movement on screen, DRRS work can be scheduled.
|
||||
* This DRRS work is responsible for setting relevant registers after a
|
||||
* timeout of 1 second.
|
||||
* This function gets called every time rendering on the given planes has
|
||||
* completed or flip on a crtc is completed. So DRRS should be upclocked
|
||||
* (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
|
||||
* if no other planes are dirty.
|
||||
*
|
||||
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
|
||||
*/
|
||||
|
@ -5532,10 +5492,21 @@ void intel_edp_drrs_flush(struct drm_device *dev,
|
|||
|
||||
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
|
||||
pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
||||
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
||||
dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
|
||||
|
||||
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
|
||||
!dev_priv->drrs.busy_frontbuffer_bits)
|
||||
/* flush means busy screen hence upclock */
|
||||
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
|
||||
intel_dp_set_drrs_state(dev_priv->dev,
|
||||
dev_priv->drrs.dp->attached_connector->panel.
|
||||
fixed_mode->vrefresh);
|
||||
|
||||
/*
|
||||
* flush also means no more activity hence schedule downclock, if all
|
||||
* other fbs are quiescent too
|
||||
*/
|
||||
if (!dev_priv->drrs.busy_frontbuffer_bits)
|
||||
schedule_delayed_work(&dev_priv->drrs.work,
|
||||
msecs_to_jiffies(1000));
|
||||
mutex_unlock(&dev_priv->drrs.mutex);
|
||||
|
@ -5939,10 +5910,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
}
|
||||
intel_encoder->cloneable = 0;
|
||||
intel_encoder->hot_plug = intel_dp_hot_plug;
|
||||
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dev_priv->hpd_irq_port[port] = intel_dig_port;
|
||||
dev_priv->hotplug.irq_port[port] = intel_dig_port;
|
||||
|
||||
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
@ -5958,7 +5928,7 @@ void intel_dp_mst_suspend(struct drm_device *dev)
|
|||
|
||||
/* disable MST */
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
|
||||
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
|
||||
if (!intel_dig_port)
|
||||
continue;
|
||||
|
||||
|
@ -5977,7 +5947,7 @@ void intel_dp_mst_resume(struct drm_device *dev)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
|
||||
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
|
||||
if (!intel_dig_port)
|
||||
continue;
|
||||
if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
|
||||
|
|
|
@ -130,11 +130,6 @@ struct intel_fbdev {
|
|||
|
||||
struct intel_encoder {
|
||||
struct drm_encoder base;
|
||||
/*
|
||||
* The new crtc this encoder will be driven from. Only differs from
|
||||
* base->crtc while a modeset is in progress.
|
||||
*/
|
||||
struct intel_crtc *new_crtc;
|
||||
|
||||
enum intel_output_type type;
|
||||
unsigned int cloneable;
|
||||
|
@ -195,12 +190,6 @@ struct intel_connector {
|
|||
*/
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/*
|
||||
* The new encoder this connector will be driven. Only differs from
|
||||
* encoder while a modeset is in progress.
|
||||
*/
|
||||
struct intel_encoder *new_encoder;
|
||||
|
||||
/* Reads out the current hw, returning true if the connector is enabled
|
||||
* and active (i.e. dpms ON state). */
|
||||
bool (*get_hw_state)(struct intel_connector *);
|
||||
|
@ -241,6 +230,14 @@ typedef struct dpll {
|
|||
int p;
|
||||
} intel_clock_t;
|
||||
|
||||
struct intel_atomic_state {
|
||||
struct drm_atomic_state base;
|
||||
|
||||
unsigned int cdclk;
|
||||
bool dpll_set;
|
||||
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
|
||||
};
|
||||
|
||||
struct intel_plane_state {
|
||||
struct drm_plane_state base;
|
||||
struct drm_rect src;
|
||||
|
@ -256,7 +253,7 @@ struct intel_plane_state {
|
|||
* plane requiring a scaler:
|
||||
* - During check_plane, its bit is set in
|
||||
* crtc_state->scaler_state.scaler_users by calling helper function
|
||||
* update_scaler_users.
|
||||
* update_scaler_plane.
|
||||
* - scaler_id indicates the scaler it got assigned.
|
||||
*
|
||||
* plane doesn't require a scaler:
|
||||
|
@ -264,9 +261,11 @@ struct intel_plane_state {
|
|||
* got disabled.
|
||||
* - During check_plane, corresponding bit is reset in
|
||||
* crtc_state->scaler_state.scaler_users by calling helper function
|
||||
* update_scaler_users.
|
||||
* update_scaler_plane.
|
||||
*/
|
||||
int scaler_id;
|
||||
|
||||
struct drm_intel_sprite_colorkey ckey;
|
||||
};
|
||||
|
||||
struct intel_initial_plane_config {
|
||||
|
@ -286,7 +285,6 @@ struct intel_initial_plane_config {
|
|||
#define SKL_MAX_DST_H 4096
|
||||
|
||||
struct intel_scaler {
|
||||
int id;
|
||||
int in_use;
|
||||
uint32_t mode;
|
||||
};
|
||||
|
@ -319,6 +317,9 @@ struct intel_crtc_scaler_state {
|
|||
int scaler_id;
|
||||
};
|
||||
|
||||
/* drm_mode->private_flags */
|
||||
#define I915_MODE_FLAG_INHERITED 1
|
||||
|
||||
struct intel_crtc_state {
|
||||
struct drm_crtc_state base;
|
||||
|
||||
|
@ -331,7 +332,6 @@ struct intel_crtc_state {
|
|||
* accordingly.
|
||||
*/
|
||||
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
|
||||
#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
|
||||
unsigned long quirks;
|
||||
|
||||
/* Pipe source size (ie. panel fitter input size)
|
||||
|
@ -447,6 +447,18 @@ struct intel_crtc_state {
|
|||
int pbn;
|
||||
|
||||
struct intel_crtc_scaler_state scaler_state;
|
||||
|
||||
/* w/a for waiting 2 vblanks during crtc enable */
|
||||
enum pipe hsw_workaround_pipe;
|
||||
};
|
||||
|
||||
struct vlv_wm_state {
|
||||
struct vlv_pipe_wm wm[3];
|
||||
struct vlv_sr_wm sr[3];
|
||||
uint8_t num_active_planes;
|
||||
uint8_t num_levels;
|
||||
uint8_t level;
|
||||
bool cxsr;
|
||||
};
|
||||
|
||||
struct intel_pipe_wm {
|
||||
|
@ -478,16 +490,13 @@ struct skl_pipe_wm {
|
|||
* and thus can't be run with interrupts disabled.
|
||||
*/
|
||||
struct intel_crtc_atomic_commit {
|
||||
/* vblank evasion */
|
||||
bool evade;
|
||||
unsigned start_vbl_count;
|
||||
|
||||
/* Sleepable operations to perform before commit */
|
||||
bool wait_for_flips;
|
||||
bool disable_fbc;
|
||||
bool disable_ips;
|
||||
bool disable_cxsr;
|
||||
bool pre_disable_primary;
|
||||
bool update_wm;
|
||||
bool update_wm_pre, update_wm_post;
|
||||
unsigned disabled_planes;
|
||||
|
||||
/* Sleepable operations to perform after commit */
|
||||
|
@ -527,9 +536,7 @@ struct intel_crtc {
|
|||
uint32_t cursor_size;
|
||||
uint32_t cursor_base;
|
||||
|
||||
struct intel_initial_plane_config plane_config;
|
||||
struct intel_crtc_state *config;
|
||||
bool new_enabled;
|
||||
|
||||
/* reset counter value when the last flip was submitted */
|
||||
unsigned int reset_counter;
|
||||
|
@ -544,14 +551,19 @@ struct intel_crtc {
|
|||
struct intel_pipe_wm active;
|
||||
/* SKL wm values currently in use */
|
||||
struct skl_pipe_wm skl_active;
|
||||
/* allow CxSR on this pipe */
|
||||
bool cxsr_allowed;
|
||||
} wm;
|
||||
|
||||
int scanline_offset;
|
||||
|
||||
unsigned start_vbl_count;
|
||||
struct intel_crtc_atomic_commit atomic;
|
||||
|
||||
/* scalers available on this crtc */
|
||||
int num_scalers;
|
||||
|
||||
struct vlv_wm_state wm_state;
|
||||
};
|
||||
|
||||
struct intel_plane_wm_parameters {
|
||||
|
@ -570,6 +582,7 @@ struct intel_plane_wm_parameters {
|
|||
bool scaled;
|
||||
u64 tiling;
|
||||
unsigned int rotation;
|
||||
uint16_t fifo_size;
|
||||
};
|
||||
|
||||
struct intel_plane {
|
||||
|
@ -578,9 +591,7 @@ struct intel_plane {
|
|||
enum pipe pipe;
|
||||
bool can_scale;
|
||||
int max_downscale;
|
||||
|
||||
/* FIXME convert to properties */
|
||||
struct drm_intel_sprite_colorkey ckey;
|
||||
uint32_t frontbuffer_bit;
|
||||
|
||||
/* Since we need to change the watermarks before/after
|
||||
* enabling/disabling the planes, we need to store the parameters here
|
||||
|
@ -603,8 +614,9 @@ struct intel_plane {
|
|||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h);
|
||||
void (*disable_plane)(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc, bool force);
|
||||
struct drm_crtc *crtc);
|
||||
int (*check_plane)(struct drm_plane *plane,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *state);
|
||||
void (*commit_plane)(struct drm_plane *plane,
|
||||
struct intel_plane_state *state);
|
||||
|
@ -629,6 +641,7 @@ struct cxsr_latency {
|
|||
unsigned long cursor_hpll_disable;
|
||||
};
|
||||
|
||||
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
|
||||
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
|
||||
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
|
||||
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
|
||||
|
@ -940,43 +953,23 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
|
|||
void intel_ddi_clock_get(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
|
||||
void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
|
||||
enum port port, int type);
|
||||
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
|
||||
|
||||
/* intel_frontbuffer.c */
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring,
|
||||
enum fb_op_origin origin);
|
||||
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
/**
|
||||
* intel_frontbuffer_flip - synchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after scheduling a flip on @obj. This is for
|
||||
* synchronous plane updates which will happen on the next vblank and which will
|
||||
* not get delayed by pending gpu rendering.
|
||||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
static inline
|
||||
void intel_frontbuffer_flip(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits);
|
||||
}
|
||||
|
||||
unsigned frontbuffer_bits);
|
||||
unsigned int intel_fb_align_height(struct drm_device *dev,
|
||||
unsigned int height,
|
||||
uint32_t pixel_format,
|
||||
uint64_t fb_format_modifier);
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
|
||||
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
|
||||
enum fb_op_origin origin);
|
||||
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
|
||||
uint32_t pixel_format);
|
||||
|
||||
|
@ -994,8 +987,8 @@ int intel_pch_rawclk(struct drm_device *dev);
|
|||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_idle(struct drm_device *dev);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
|
||||
void intel_crtc_reset(struct intel_crtc *crtc);
|
||||
int intel_display_suspend(struct drm_device *dev);
|
||||
int intel_crtc_control(struct drm_crtc *crtc, bool enable);
|
||||
void intel_crtc_update_dpms(struct drm_crtc *crtc);
|
||||
void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||
int intel_connector_init(struct intel_connector *);
|
||||
|
@ -1035,7 +1028,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
|
|||
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
|
||||
struct drm_framebuffer *fb,
|
||||
const struct drm_plane_state *plane_state,
|
||||
struct intel_engine_cs *pipelined);
|
||||
struct intel_engine_cs *pipelined,
|
||||
struct drm_i915_gem_request **pipelined_request);
|
||||
struct drm_framebuffer *
|
||||
__intel_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
|
@ -1058,6 +1052,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
|
|||
struct drm_plane_state *state,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
struct drm_plane_state *plane_state);
|
||||
|
||||
unsigned int
|
||||
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
|
||||
|
@ -1072,9 +1068,6 @@ intel_rotation_90_or_270(unsigned int rotation)
|
|||
void intel_create_rotation_property(struct drm_device *dev,
|
||||
struct intel_plane *plane);
|
||||
|
||||
bool intel_wm_need_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
|
||||
/* shared dpll functions */
|
||||
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
|
||||
void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
|
@ -1084,7 +1077,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
|||
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
|
||||
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *state);
|
||||
void intel_put_shared_dpll(struct intel_crtc *crtc);
|
||||
|
||||
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
|
@ -1104,7 +1096,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
|||
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
|
||||
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
|
||||
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
|
||||
int *x, int *y,
|
||||
unsigned int tiling_mode,
|
||||
unsigned int bpp,
|
||||
unsigned int pitch);
|
||||
|
@ -1114,7 +1107,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
|
|||
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
|
||||
void broxton_init_cdclk(struct drm_device *dev);
|
||||
void broxton_uninit_cdclk(struct drm_device *dev);
|
||||
void broxton_set_cdclk(struct drm_device *dev, int frequency);
|
||||
void broxton_ddi_phy_init(struct drm_device *dev);
|
||||
void broxton_ddi_phy_uninit(struct drm_device *dev);
|
||||
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
|
||||
|
@ -1130,6 +1122,8 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
|
|||
int dotclock);
|
||||
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
|
||||
intel_clock_t *best_clock);
|
||||
int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
|
||||
|
||||
bool intel_crtc_active(struct drm_crtc *crtc);
|
||||
void hsw_enable_ips(struct intel_crtc *crtc);
|
||||
void hsw_disable_ips(struct intel_crtc *crtc);
|
||||
|
@ -1139,10 +1133,8 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
|||
struct intel_crtc_state *pipe_config);
|
||||
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
|
||||
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
|
||||
void skl_detach_scalers(struct intel_crtc *intel_crtc);
|
||||
int skl_update_scaler_users(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
|
||||
struct intel_plane_state *plane_state, int force_detach);
|
||||
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
|
||||
|
||||
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
|
@ -1238,15 +1230,18 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
|
|||
#endif
|
||||
|
||||
/* intel_fbc.c */
|
||||
bool intel_fbc_enabled(struct drm_device *dev);
|
||||
void intel_fbc_update(struct drm_device *dev);
|
||||
bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_update(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_init(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_disable(struct drm_device *dev);
|
||||
void intel_fbc_disable(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
|
||||
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits);
|
||||
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
|
||||
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_hdmi.c */
|
||||
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
|
||||
|
@ -1314,11 +1309,13 @@ void intel_backlight_unregister(struct drm_device *dev);
|
|||
void intel_psr_enable(struct intel_dp *intel_dp);
|
||||
void intel_psr_disable(struct intel_dp *intel_dp);
|
||||
void intel_psr_invalidate(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_psr_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_psr_init(struct drm_device *dev);
|
||||
void intel_psr_single_frame_update(struct drm_device *dev);
|
||||
void intel_psr_single_frame_update(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
|
||||
/* intel_runtime_pm.c */
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
|
@ -1372,11 +1369,12 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
|
|||
unsigned long submitted);
|
||||
void intel_queue_rps_boost_for_request(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req);
|
||||
void vlv_wm_get_hw_state(struct drm_device *dev);
|
||||
void ilk_wm_get_hw_state(struct drm_device *dev);
|
||||
void skl_wm_get_hw_state(struct drm_device *dev);
|
||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct skl_ddb_allocation *ddb /* out */);
|
||||
|
||||
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
|
||||
|
||||
/* intel_sdvo.c */
|
||||
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
|
||||
|
@ -1384,10 +1382,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
|
|||
|
||||
/* intel_sprite.c */
|
||||
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
|
||||
int intel_plane_restore(struct drm_plane *plane);
|
||||
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
bool intel_pipe_update_start(struct intel_crtc *crtc,
|
||||
void intel_pipe_update_start(struct intel_crtc *crtc,
|
||||
uint32_t *start_vbl_count);
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
|
||||
|
||||
|
@ -1395,11 +1392,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
|
|||
void intel_tv_init(struct drm_device *dev);
|
||||
|
||||
/* intel_atomic.c */
|
||||
int intel_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
int intel_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async);
|
||||
int intel_connector_atomic_get_property(struct drm_connector *connector,
|
||||
const struct drm_connector_state *state,
|
||||
struct drm_property *property,
|
||||
|
@ -1407,6 +1399,11 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
|
|||
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
|
||||
void intel_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state);
|
||||
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
|
||||
void intel_atomic_state_clear(struct drm_atomic_state *);
|
||||
struct intel_shared_dpll_config *
|
||||
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
|
||||
|
||||
static inline struct intel_crtc_state *
|
||||
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
|
|
|
@ -261,11 +261,6 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
|
|||
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
|
||||
}
|
||||
|
||||
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
}
|
||||
|
||||
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *config)
|
||||
{
|
||||
|
@ -418,12 +413,12 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
|||
/* Disable DPOunit clock gating, can stall pipe
|
||||
* and we need DPLL REFA always enabled */
|
||||
tmp = I915_READ(DPLL(pipe));
|
||||
tmp |= DPLL_REFA_CLK_ENABLE_VLV;
|
||||
tmp |= DPLL_REF_CLK_ENABLE_VLV;
|
||||
I915_WRITE(DPLL(pipe), tmp);
|
||||
|
||||
/* update the hw state for DPLL */
|
||||
intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
|
||||
DPLL_REFA_CLK_ENABLE_VLV;
|
||||
intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
|
||||
|
||||
tmp = I915_READ(DSPCLK_GATE_D);
|
||||
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
|
||||
|
@ -1022,7 +1017,6 @@ void intel_dsi_init(struct drm_device *dev)
|
|||
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
|
||||
|
||||
/* XXX: very likely not all of these are needed */
|
||||
intel_encoder->hot_plug = intel_dsi_hot_plug;
|
||||
intel_encoder->compute_config = intel_dsi_compute_config;
|
||||
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
|
||||
intel_encoder->pre_enable = intel_dsi_pre_enable;
|
||||
|
|
|
@ -38,6 +38,27 @@
|
|||
#define DSI_HFP_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_EOTP_PACKET_SIZE 4
|
||||
|
||||
static int dsi_pixel_format_bpp(int pixel_format)
|
||||
{
|
||||
int bpp;
|
||||
|
||||
switch (pixel_format) {
|
||||
default:
|
||||
case VID_MODE_FORMAT_RGB888:
|
||||
case VID_MODE_FORMAT_RGB666_LOOSE:
|
||||
bpp = 24;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB666:
|
||||
bpp = 18;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB565:
|
||||
bpp = 16;
|
||||
break;
|
||||
}
|
||||
|
||||
return bpp;
|
||||
}
|
||||
|
||||
struct dsi_mnp {
|
||||
u32 dsi_pll_ctrl;
|
||||
u32 dsi_pll_div;
|
||||
|
@ -46,8 +67,8 @@ struct dsi_mnp {
|
|||
static const u32 lfsr_converts[] = {
|
||||
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
|
||||
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
|
||||
106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
|
||||
71, 35 /* 91 - 92 */
|
||||
106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */
|
||||
71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
|
||||
};
|
||||
|
||||
#ifdef DSI_CLK_FROM_RR
|
||||
|
@ -65,19 +86,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
|
|||
u32 dsi_bit_clock_hz;
|
||||
u32 dsi_clk;
|
||||
|
||||
switch (pixel_format) {
|
||||
default:
|
||||
case VID_MODE_FORMAT_RGB888:
|
||||
case VID_MODE_FORMAT_RGB666_LOOSE:
|
||||
bpp = 24;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB666:
|
||||
bpp = 18;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB565:
|
||||
bpp = 16;
|
||||
break;
|
||||
}
|
||||
bpp = dsi_pixel_format_bpp(pixel_format);
|
||||
|
||||
hactive = mode->hdisplay;
|
||||
vactive = mode->vdisplay;
|
||||
|
@ -137,21 +146,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
|
|||
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
|
||||
{
|
||||
u32 dsi_clk_khz;
|
||||
u32 bpp;
|
||||
|
||||
switch (pixel_format) {
|
||||
default:
|
||||
case VID_MODE_FORMAT_RGB888:
|
||||
case VID_MODE_FORMAT_RGB666_LOOSE:
|
||||
bpp = 24;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB666:
|
||||
bpp = 18;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB565:
|
||||
bpp = 16;
|
||||
break;
|
||||
}
|
||||
u32 bpp = dsi_pixel_format_bpp(pixel_format);
|
||||
|
||||
/* DSI data rate = pixel clock * bits per pixel / lane count
|
||||
pixel clock is converted from KHz to Hz */
|
||||
|
@ -162,11 +157,13 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
|
|||
|
||||
#endif
|
||||
|
||||
static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
|
||||
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
|
||||
struct dsi_mnp *dsi_mnp, int target_dsi_clk)
|
||||
{
|
||||
unsigned int calc_m = 0, calc_p = 0;
|
||||
unsigned int m, n = 1, p;
|
||||
int ref_clk = 25000;
|
||||
unsigned int m_min, m_max, p_min = 2, p_max = 6;
|
||||
unsigned int m, n, p;
|
||||
int ref_clk;
|
||||
int delta = target_dsi_clk;
|
||||
u32 m_seed;
|
||||
|
||||
|
@ -176,8 +173,20 @@ static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
|
|||
return -ECHRNG;
|
||||
}
|
||||
|
||||
for (m = 62; m <= 92 && delta; m++) {
|
||||
for (p = 2; p <= 6 && delta; p++) {
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
ref_clk = 100000;
|
||||
n = 4;
|
||||
m_min = 70;
|
||||
m_max = 96;
|
||||
} else {
|
||||
ref_clk = 25000;
|
||||
n = 1;
|
||||
m_min = 62;
|
||||
m_max = 92;
|
||||
}
|
||||
|
||||
for (m = m_min; m <= m_max && delta; m++) {
|
||||
for (p = p_min; p <= p_max && delta; p++) {
|
||||
/*
|
||||
* Find the optimal m and p divisors with minimal delta
|
||||
* +/- the required clock
|
||||
|
@ -217,7 +226,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
|
|||
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
|
||||
intel_dsi->lane_count);
|
||||
|
||||
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
|
||||
ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
|
||||
return;
|
||||
|
@ -286,21 +295,7 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
|
|||
|
||||
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
|
||||
{
|
||||
int bpp;
|
||||
|
||||
switch (pixel_format) {
|
||||
default:
|
||||
case VID_MODE_FORMAT_RGB888:
|
||||
case VID_MODE_FORMAT_RGB666_LOOSE:
|
||||
bpp = 24;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB666:
|
||||
bpp = 18;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB565:
|
||||
bpp = 16;
|
||||
break;
|
||||
}
|
||||
int bpp = dsi_pixel_format_bpp(pixel_format);
|
||||
|
||||
WARN(bpp != pipe_bpp,
|
||||
"bpp match assertion failure (expected %d, current %d)\n",
|
||||
|
|
|
@ -41,9 +41,8 @@
|
|||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void i8xx_fbc_disable(struct drm_device *dev)
|
||||
static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 fbc_ctl;
|
||||
|
||||
dev_priv->fbc.enabled = false;
|
||||
|
@ -65,13 +64,11 @@ static void i8xx_fbc_disable(struct drm_device *dev)
|
|||
DRM_DEBUG_KMS("disabled FBC\n");
|
||||
}
|
||||
|
||||
static void i8xx_fbc_enable(struct drm_crtc *crtc)
|
||||
static void i8xx_fbc_enable(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int cfb_pitch;
|
||||
int i;
|
||||
u32 fbc_ctl;
|
||||
|
@ -84,7 +81,7 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
|
|||
cfb_pitch = fb->pitches[0];
|
||||
|
||||
/* FBC_CTL wants 32B or 64B units */
|
||||
if (IS_GEN2(dev))
|
||||
if (IS_GEN2(dev_priv))
|
||||
cfb_pitch = (cfb_pitch / 32) - 1;
|
||||
else
|
||||
cfb_pitch = (cfb_pitch / 64) - 1;
|
||||
|
@ -93,66 +90,61 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
|
|||
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
|
||||
I915_WRITE(FBC_TAG + (i * 4), 0);
|
||||
|
||||
if (IS_GEN4(dev)) {
|
||||
if (IS_GEN4(dev_priv)) {
|
||||
u32 fbc_ctl2;
|
||||
|
||||
/* Set it up... */
|
||||
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
|
||||
fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
|
||||
fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
|
||||
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
|
||||
I915_WRITE(FBC_FENCE_OFF, crtc->y);
|
||||
I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
|
||||
}
|
||||
|
||||
/* enable it... */
|
||||
fbc_ctl = I915_READ(FBC_CONTROL);
|
||||
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
|
||||
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
|
||||
if (IS_I945GM(dev))
|
||||
if (IS_I945GM(dev_priv))
|
||||
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
|
||||
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
|
||||
fbc_ctl |= obj->fence_reg;
|
||||
I915_WRITE(FBC_CONTROL, fbc_ctl);
|
||||
|
||||
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
|
||||
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
|
||||
cfb_pitch, crtc->base.y, plane_name(crtc->plane));
|
||||
}
|
||||
|
||||
static bool i8xx_fbc_enabled(struct drm_device *dev)
|
||||
static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
|
||||
}
|
||||
|
||||
static void g4x_fbc_enable(struct drm_crtc *crtc)
|
||||
static void g4x_fbc_enable(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 dpfc_ctl;
|
||||
|
||||
dev_priv->fbc.enabled = true;
|
||||
|
||||
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
|
||||
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
|
||||
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
|
||||
else
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
|
||||
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
|
||||
|
||||
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
|
||||
I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
|
||||
|
||||
/* enable it... */
|
||||
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
||||
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
|
||||
}
|
||||
|
||||
static void g4x_fbc_disable(struct drm_device *dev)
|
||||
static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 dpfc_ctl;
|
||||
|
||||
dev_priv->fbc.enabled = false;
|
||||
|
@ -167,10 +159,8 @@ static void g4x_fbc_disable(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static bool g4x_fbc_enabled(struct drm_device *dev)
|
||||
static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
|
||||
}
|
||||
|
||||
|
@ -180,22 +170,21 @@ static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
|
|||
POSTING_READ(MSG_FBC_REND_STATE);
|
||||
}
|
||||
|
||||
static void ilk_fbc_enable(struct drm_crtc *crtc)
|
||||
static void ilk_fbc_enable(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 dpfc_ctl;
|
||||
int threshold = dev_priv->fbc.threshold;
|
||||
|
||||
dev_priv->fbc.enabled = true;
|
||||
|
||||
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
|
||||
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
|
||||
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
||||
dev_priv->fbc.threshold++;
|
||||
threshold++;
|
||||
|
||||
switch (dev_priv->fbc.threshold) {
|
||||
switch (threshold) {
|
||||
case 4:
|
||||
case 3:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
|
||||
|
@ -208,28 +197,27 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
|
|||
break;
|
||||
}
|
||||
dpfc_ctl |= DPFC_CTL_FENCE_EN;
|
||||
if (IS_GEN5(dev))
|
||||
if (IS_GEN5(dev_priv))
|
||||
dpfc_ctl |= obj->fence_reg;
|
||||
|
||||
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
|
||||
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
|
||||
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
|
||||
/* enable it... */
|
||||
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
||||
|
||||
if (IS_GEN6(dev)) {
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
I915_WRITE(SNB_DPFC_CTL_SA,
|
||||
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
|
||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
|
||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
|
||||
}
|
||||
|
||||
intel_fbc_nuke(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
|
||||
}
|
||||
|
||||
static void ilk_fbc_disable(struct drm_device *dev)
|
||||
static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 dpfc_ctl;
|
||||
|
||||
dev_priv->fbc.enabled = false;
|
||||
|
@ -244,29 +232,29 @@ static void ilk_fbc_disable(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static bool ilk_fbc_enabled(struct drm_device *dev)
|
||||
static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
|
||||
}
|
||||
|
||||
static void gen7_fbc_enable(struct drm_crtc *crtc)
|
||||
static void gen7_fbc_enable(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 dpfc_ctl;
|
||||
int threshold = dev_priv->fbc.threshold;
|
||||
|
||||
dev_priv->fbc.enabled = true;
|
||||
|
||||
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
|
||||
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
||||
dev_priv->fbc.threshold++;
|
||||
dpfc_ctl = 0;
|
||||
if (IS_IVYBRIDGE(dev_priv))
|
||||
dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
|
||||
|
||||
switch (dev_priv->fbc.threshold) {
|
||||
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
||||
threshold++;
|
||||
|
||||
switch (threshold) {
|
||||
case 4:
|
||||
case 3:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
|
||||
|
@ -286,39 +274,37 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
|
|||
|
||||
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
||||
|
||||
if (IS_IVYBRIDGE(dev)) {
|
||||
if (IS_IVYBRIDGE(dev_priv)) {
|
||||
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
|
||||
I915_WRITE(ILK_DISPLAY_CHICKEN1,
|
||||
I915_READ(ILK_DISPLAY_CHICKEN1) |
|
||||
ILK_FBCQ_DIS);
|
||||
} else {
|
||||
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
|
||||
I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
|
||||
I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
|
||||
I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
|
||||
I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
|
||||
HSW_FBCQ_DIS);
|
||||
}
|
||||
|
||||
I915_WRITE(SNB_DPFC_CTL_SA,
|
||||
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
|
||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
|
||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
|
||||
|
||||
intel_fbc_nuke(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fbc_enabled - Is FBC enabled?
|
||||
* @dev: the drm_device
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function is used to verify the current state of FBC.
|
||||
* FIXME: This should be tracked in the plane config eventually
|
||||
* instead of queried at runtime for most callers.
|
||||
*/
|
||||
bool intel_fbc_enabled(struct drm_device *dev)
|
||||
bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return dev_priv->fbc.enabled;
|
||||
}
|
||||
|
||||
|
@ -327,31 +313,33 @@ static void intel_fbc_work_fn(struct work_struct *__work)
|
|||
struct intel_fbc_work *work =
|
||||
container_of(to_delayed_work(__work),
|
||||
struct intel_fbc_work, work);
|
||||
struct drm_device *dev = work->crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
|
||||
struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
if (work == dev_priv->fbc.fbc_work) {
|
||||
/* Double check that we haven't switched fb without cancelling
|
||||
* the prior work.
|
||||
*/
|
||||
if (work->crtc->primary->fb == work->fb) {
|
||||
dev_priv->display.enable_fbc(work->crtc);
|
||||
if (crtc_fb == work->fb) {
|
||||
dev_priv->fbc.enable_fbc(work->crtc);
|
||||
|
||||
dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
|
||||
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
|
||||
dev_priv->fbc.y = work->crtc->y;
|
||||
dev_priv->fbc.crtc = work->crtc;
|
||||
dev_priv->fbc.fb_id = crtc_fb->base.id;
|
||||
dev_priv->fbc.y = work->crtc->base.y;
|
||||
}
|
||||
|
||||
dev_priv->fbc.fbc_work = NULL;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||
|
||||
if (dev_priv->fbc.fbc_work == NULL)
|
||||
return;
|
||||
|
||||
|
@ -373,26 +361,24 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
|
|||
dev_priv->fbc.fbc_work = NULL;
|
||||
}
|
||||
|
||||
static void intel_fbc_enable(struct drm_crtc *crtc)
|
||||
static void intel_fbc_enable(struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_fbc_work *work;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
|
||||
if (!dev_priv->display.enable_fbc)
|
||||
return;
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||
|
||||
intel_fbc_cancel_work(dev_priv);
|
||||
|
||||
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (work == NULL) {
|
||||
DRM_ERROR("Failed to allocate FBC work structure\n");
|
||||
dev_priv->display.enable_fbc(crtc);
|
||||
dev_priv->fbc.enable_fbc(crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
work->crtc = crtc;
|
||||
work->fb = crtc->primary->fb;
|
||||
work->fb = crtc->base.primary->fb;
|
||||
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
|
||||
|
||||
dev_priv->fbc.fbc_work = work;
|
||||
|
@ -413,75 +399,274 @@ static void intel_fbc_enable(struct drm_crtc *crtc)
|
|||
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fbc_disable - disable FBC
|
||||
* @dev: the drm_device
|
||||
*
|
||||
* This function disables FBC.
|
||||
*/
|
||||
void intel_fbc_disable(struct drm_device *dev)
|
||||
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||
|
||||
intel_fbc_cancel_work(dev_priv);
|
||||
|
||||
if (!dev_priv->display.disable_fbc)
|
||||
return;
|
||||
|
||||
dev_priv->display.disable_fbc(dev);
|
||||
dev_priv->fbc.disable_fbc(dev_priv);
|
||||
dev_priv->fbc.crtc = NULL;
|
||||
}
|
||||
|
||||
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
|
||||
/**
|
||||
* intel_fbc_disable - disable FBC
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function disables FBC.
|
||||
*/
|
||||
void intel_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
__intel_fbc_disable(dev_priv);
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* intel_fbc_disable_crtc - disable FBC if it's associated with crtc
|
||||
* @crtc: the CRTC
|
||||
*
|
||||
* This function disables FBC if it's associated with the provided CRTC.
|
||||
*/
|
||||
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
if (dev_priv->fbc.crtc == crtc)
|
||||
__intel_fbc_disable(dev_priv);
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
}
|
||||
|
||||
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
|
||||
{
|
||||
switch (reason) {
|
||||
case FBC_OK:
|
||||
return "FBC enabled but currently disabled in hardware";
|
||||
case FBC_UNSUPPORTED:
|
||||
return "unsupported by this chipset";
|
||||
case FBC_NO_OUTPUT:
|
||||
return "no output";
|
||||
case FBC_STOLEN_TOO_SMALL:
|
||||
return "not enough stolen memory";
|
||||
case FBC_UNSUPPORTED_MODE:
|
||||
return "mode incompatible with compression";
|
||||
case FBC_MODE_TOO_LARGE:
|
||||
return "mode too large for compression";
|
||||
case FBC_BAD_PLANE:
|
||||
return "FBC unsupported on plane";
|
||||
case FBC_NOT_TILED:
|
||||
return "framebuffer not tiled or fenced";
|
||||
case FBC_MULTIPLE_PIPES:
|
||||
return "more than one pipe active";
|
||||
case FBC_MODULE_PARAM:
|
||||
return "disabled per module param";
|
||||
case FBC_CHIP_DEFAULT:
|
||||
return "disabled per chip default";
|
||||
case FBC_ROTATION:
|
||||
return "rotation unsupported";
|
||||
case FBC_IN_DBG_MASTER:
|
||||
return "Kernel debugger is active";
|
||||
default:
|
||||
MISSING_CASE(reason);
|
||||
return "unknown reason";
|
||||
}
|
||||
}
|
||||
|
||||
static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
|
||||
enum no_fbc_reason reason)
|
||||
{
|
||||
if (dev_priv->fbc.no_fbc_reason == reason)
|
||||
return false;
|
||||
return;
|
||||
|
||||
dev_priv->fbc.no_fbc_reason = reason;
|
||||
return true;
|
||||
DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
|
||||
}
|
||||
|
||||
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_crtc *crtc = NULL, *tmp_crtc;
|
||||
enum pipe pipe;
|
||||
bool pipe_a_only = false, one_pipe_only = false;
|
||||
bool pipe_a_only = false;
|
||||
|
||||
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
|
||||
pipe_a_only = true;
|
||||
else if (INTEL_INFO(dev_priv)->gen <= 4)
|
||||
one_pipe_only = true;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
|
||||
if (intel_crtc_active(tmp_crtc) &&
|
||||
to_intel_plane_state(tmp_crtc->primary->state)->visible) {
|
||||
if (one_pipe_only && crtc) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
|
||||
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
|
||||
return NULL;
|
||||
}
|
||||
to_intel_plane_state(tmp_crtc->primary->state)->visible)
|
||||
crtc = tmp_crtc;
|
||||
}
|
||||
|
||||
if (pipe_a_only)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!crtc || crtc->primary->fb == NULL) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
|
||||
DRM_DEBUG_KMS("no output, disabling\n");
|
||||
if (!crtc || crtc->primary->fb == NULL)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return crtc;
|
||||
}
|
||||
|
||||
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
enum pipe pipe;
|
||||
int n_pipes = 0;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen > 4)
|
||||
return true;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
|
||||
if (intel_crtc_active(crtc) &&
|
||||
to_intel_plane_state(crtc->primary->state)->visible)
|
||||
n_pipes++;
|
||||
}
|
||||
|
||||
return (n_pipes < 2);
|
||||
}
|
||||
|
||||
static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node,
|
||||
int size,
|
||||
int fb_cpp)
|
||||
{
|
||||
int compression_threshold = 1;
|
||||
int ret;
|
||||
|
||||
/* HACK: This code depends on what we will do in *_enable_fbc. If that
|
||||
* code changes, this code needs to change as well.
|
||||
*
|
||||
* The enable_fbc code will attempt to use one of our 2 compression
|
||||
* thresholds, therefore, in that case, we only have 1 resort.
|
||||
*/
|
||||
|
||||
/* Try to over-allocate to reduce reallocations and fragmentation. */
|
||||
ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
|
||||
if (ret == 0)
|
||||
return compression_threshold;
|
||||
|
||||
again:
|
||||
/* HW's ability to limit the CFB is 1:4 */
|
||||
if (compression_threshold > 4 ||
|
||||
(fb_cpp == 2 && compression_threshold == 2))
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
|
||||
if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
|
||||
return 0;
|
||||
} else if (ret) {
|
||||
compression_threshold <<= 1;
|
||||
goto again;
|
||||
} else {
|
||||
return compression_threshold;
|
||||
}
|
||||
}
|
||||
|
||||
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
|
||||
int fb_cpp)
|
||||
{
|
||||
struct drm_mm_node *uninitialized_var(compressed_llb);
|
||||
int ret;
|
||||
|
||||
ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
|
||||
size, fb_cpp);
|
||||
if (!ret)
|
||||
goto err_llb;
|
||||
else if (ret > 1) {
|
||||
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
||||
|
||||
}
|
||||
|
||||
dev_priv->fbc.threshold = ret;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 5)
|
||||
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
|
||||
else if (IS_GM45(dev_priv)) {
|
||||
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
|
||||
} else {
|
||||
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
|
||||
if (!compressed_llb)
|
||||
goto err_fb;
|
||||
|
||||
ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
|
||||
4096, 4096);
|
||||
if (ret)
|
||||
goto err_fb;
|
||||
|
||||
dev_priv->fbc.compressed_llb = compressed_llb;
|
||||
|
||||
I915_WRITE(FBC_CFB_BASE,
|
||||
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
|
||||
I915_WRITE(FBC_LL_BASE,
|
||||
dev_priv->mm.stolen_base + compressed_llb->start);
|
||||
}
|
||||
|
||||
dev_priv->fbc.uncompressed_size = size;
|
||||
|
||||
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
|
||||
size);
|
||||
|
||||
return 0;
|
||||
|
||||
err_fb:
|
||||
kfree(compressed_llb);
|
||||
i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
|
||||
err_llb:
|
||||
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (dev_priv->fbc.uncompressed_size == 0)
|
||||
return;
|
||||
|
||||
i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
|
||||
|
||||
if (dev_priv->fbc.compressed_llb) {
|
||||
i915_gem_stolen_remove_node(dev_priv,
|
||||
dev_priv->fbc.compressed_llb);
|
||||
kfree(dev_priv->fbc.compressed_llb);
|
||||
}
|
||||
|
||||
dev_priv->fbc.uncompressed_size = 0;
|
||||
}
|
||||
|
||||
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
__intel_fbc_cleanup_cfb(dev_priv);
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
}
|
||||
|
||||
static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
|
||||
int fb_cpp)
|
||||
{
|
||||
if (size <= dev_priv->fbc.uncompressed_size)
|
||||
return 0;
|
||||
|
||||
/* Release any current block */
|
||||
__intel_fbc_cleanup_cfb(dev_priv);
|
||||
|
||||
return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fbc_update - enable/disable FBC as needed
|
||||
* @dev: the drm_device
|
||||
* __intel_fbc_update - enable/disable FBC as needed, unlocked
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* Set up the framebuffer compression hardware at mode set time. We
|
||||
* enable it if possible:
|
||||
|
@ -498,9 +683,8 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
|
|||
*
|
||||
* We need to enable/disable FBC on a global basis.
|
||||
*/
|
||||
void intel_fbc_update(struct drm_device *dev)
|
||||
static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
|
@ -508,22 +692,19 @@ void intel_fbc_update(struct drm_device *dev)
|
|||
const struct drm_display_mode *adjusted_mode;
|
||||
unsigned int max_width, max_height;
|
||||
|
||||
if (!HAS_FBC(dev))
|
||||
return;
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||
|
||||
/* disable framebuffer compression in vGPU */
|
||||
if (intel_vgpu_active(dev))
|
||||
if (intel_vgpu_active(dev_priv->dev))
|
||||
i915.enable_fbc = 0;
|
||||
|
||||
if (i915.enable_fbc < 0) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
|
||||
DRM_DEBUG_KMS("disabled per chip default\n");
|
||||
set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (!i915.enable_fbc) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
|
||||
DRM_DEBUG_KMS("fbc disabled per module param\n");
|
||||
set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
|
@ -537,8 +718,15 @@ void intel_fbc_update(struct drm_device *dev)
|
|||
* - going to an unsupported config (interlace, pixel multiply, etc.)
|
||||
*/
|
||||
crtc = intel_fbc_find_crtc(dev_priv);
|
||||
if (!crtc)
|
||||
if (!crtc) {
|
||||
set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (!multiple_pipes_ok(dev_priv)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
fb = crtc->primary->fb;
|
||||
|
@ -547,16 +735,14 @@ void intel_fbc_update(struct drm_device *dev)
|
|||
|
||||
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
|
||||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
|
||||
DRM_DEBUG_KMS("mode incompatible with compression, "
|
||||
"disabling\n");
|
||||
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
|
||||
if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
|
||||
max_width = 4096;
|
||||
max_height = 4096;
|
||||
} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
|
||||
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
|
||||
max_width = 4096;
|
||||
max_height = 2048;
|
||||
} else {
|
||||
|
@ -565,14 +751,12 @@ void intel_fbc_update(struct drm_device *dev)
|
|||
}
|
||||
if (intel_crtc->config->pipe_src_w > max_width ||
|
||||
intel_crtc->config->pipe_src_h > max_height) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
|
||||
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
|
||||
set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
|
||||
goto out_disable;
|
||||
}
|
||||
if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
|
||||
if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
|
||||
intel_crtc->plane != PLANE_A) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
|
||||
DRM_DEBUG_KMS("plane not A, disabling compression\n");
|
||||
set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
|
@ -581,25 +765,24 @@ void intel_fbc_update(struct drm_device *dev)
|
|||
*/
|
||||
if (obj->tiling_mode != I915_TILING_X ||
|
||||
obj->fence_reg == I915_FENCE_REG_NONE) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
|
||||
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
|
||||
set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
|
||||
goto out_disable;
|
||||
}
|
||||
if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
|
||||
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
|
||||
crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
|
||||
DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
|
||||
set_no_fbc_reason(dev_priv, FBC_ROTATION);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
/* If the kernel debugger is active, always disable compression */
|
||||
if (in_dbg_master())
|
||||
if (in_dbg_master()) {
|
||||
set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (i915_gem_stolen_setup_compression(dev, obj->base.size,
|
||||
drm_format_plane_cpp(fb->pixel_format, 0))) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
|
||||
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
|
||||
if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
|
||||
drm_format_plane_cpp(fb->pixel_format, 0))) {
|
||||
set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
|
@ -613,7 +796,7 @@ void intel_fbc_update(struct drm_device *dev)
|
|||
dev_priv->fbc.y == crtc->y)
|
||||
return;
|
||||
|
||||
if (intel_fbc_enabled(dev)) {
|
||||
if (intel_fbc_enabled(dev_priv)) {
|
||||
/* We update FBC along two paths, after changing fb/crtc
|
||||
* configuration (modeswitching) and after page-flipping
|
||||
* finishes. For the latter, we know that not only did
|
||||
|
@ -638,58 +821,86 @@ void intel_fbc_update(struct drm_device *dev)
|
|||
* some point. And we wait before enabling FBC anyway.
|
||||
*/
|
||||
DRM_DEBUG_KMS("disabling active FBC for update\n");
|
||||
intel_fbc_disable(dev);
|
||||
__intel_fbc_disable(dev_priv);
|
||||
}
|
||||
|
||||
intel_fbc_enable(crtc);
|
||||
intel_fbc_enable(intel_crtc);
|
||||
dev_priv->fbc.no_fbc_reason = FBC_OK;
|
||||
return;
|
||||
|
||||
out_disable:
|
||||
/* Multiple disables should be harmless */
|
||||
if (intel_fbc_enabled(dev)) {
|
||||
if (intel_fbc_enabled(dev_priv)) {
|
||||
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
|
||||
intel_fbc_disable(dev);
|
||||
__intel_fbc_disable(dev_priv);
|
||||
}
|
||||
i915_gem_stolen_cleanup_compression(dev);
|
||||
__intel_fbc_cleanup_cfb(dev_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* intel_fbc_update - enable/disable FBC as needed
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function reevaluates the overall state and enables or disables FBC.
|
||||
*/
|
||||
void intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
__intel_fbc_update(dev_priv);
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
}
|
||||
|
||||
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
unsigned int fbc_bits;
|
||||
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
return;
|
||||
|
||||
if (origin == ORIGIN_GTT)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
|
||||
if (dev_priv->fbc.enabled)
|
||||
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
|
||||
else if (dev_priv->fbc.fbc_work)
|
||||
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
|
||||
to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
|
||||
dev_priv->fbc.fbc_work->crtc->pipe);
|
||||
else
|
||||
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
|
||||
|
||||
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
|
||||
|
||||
if (dev_priv->fbc.busy_bits)
|
||||
intel_fbc_disable(dev);
|
||||
__intel_fbc_disable(dev_priv);
|
||||
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
}
|
||||
|
||||
void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
|
||||
if (!dev_priv->fbc.busy_bits)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
|
||||
|
||||
if (!dev_priv->fbc.busy_bits)
|
||||
intel_fbc_update(dev);
|
||||
__intel_fbc_update(dev_priv);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -702,6 +913,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
enum pipe pipe;
|
||||
|
||||
mutex_init(&dev_priv->fbc.lock);
|
||||
|
||||
if (!HAS_FBC(dev_priv)) {
|
||||
dev_priv->fbc.enabled = false;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
|
||||
|
@ -717,25 +930,25 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 7) {
|
||||
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
|
||||
dev_priv->display.enable_fbc = gen7_fbc_enable;
|
||||
dev_priv->display.disable_fbc = ilk_fbc_disable;
|
||||
dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
|
||||
dev_priv->fbc.enable_fbc = gen7_fbc_enable;
|
||||
dev_priv->fbc.disable_fbc = ilk_fbc_disable;
|
||||
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
|
||||
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
|
||||
dev_priv->display.enable_fbc = ilk_fbc_enable;
|
||||
dev_priv->display.disable_fbc = ilk_fbc_disable;
|
||||
dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
|
||||
dev_priv->fbc.enable_fbc = ilk_fbc_enable;
|
||||
dev_priv->fbc.disable_fbc = ilk_fbc_disable;
|
||||
} else if (IS_GM45(dev_priv)) {
|
||||
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
|
||||
dev_priv->display.enable_fbc = g4x_fbc_enable;
|
||||
dev_priv->display.disable_fbc = g4x_fbc_disable;
|
||||
dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
|
||||
dev_priv->fbc.enable_fbc = g4x_fbc_enable;
|
||||
dev_priv->fbc.disable_fbc = g4x_fbc_disable;
|
||||
} else {
|
||||
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
|
||||
dev_priv->display.enable_fbc = i8xx_fbc_enable;
|
||||
dev_priv->display.disable_fbc = i8xx_fbc_disable;
|
||||
dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
|
||||
dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
|
||||
dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
|
||||
|
||||
/* This value was pulled out of someone's hat */
|
||||
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
|
||||
}
|
||||
|
||||
dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
|
||||
dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
|
||||
}
|
||||
|
|
|
@ -63,8 +63,7 @@ static int intel_fbdev_set_par(struct fb_info *info)
|
|||
* now until we solve this for real.
|
||||
*/
|
||||
mutex_lock(&fb_helper->dev->struct_mutex);
|
||||
ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
|
||||
true);
|
||||
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
|
||||
mutex_unlock(&fb_helper->dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
@ -89,7 +88,7 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
|
|||
* now until we solve this for real.
|
||||
*/
|
||||
mutex_lock(&fb_helper->dev->struct_mutex);
|
||||
intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
|
||||
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
|
||||
mutex_unlock(&fb_helper->dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
@ -115,7 +114,7 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
|
|||
* now until we solve this for real.
|
||||
*/
|
||||
mutex_lock(&fb_helper->dev->struct_mutex);
|
||||
intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
|
||||
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
|
||||
mutex_unlock(&fb_helper->dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
@ -177,7 +176,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
|||
}
|
||||
|
||||
/* Flush everything out, we'll be doing GTT only from now on */
|
||||
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
|
||||
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin obj: %d\n", ret);
|
||||
goto out_fb;
|
||||
|
@ -484,18 +483,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
* IMPORTANT: We want to use the adjusted mode (i.e.
|
||||
* after the panel fitter upscaling) as the initial
|
||||
* config, not the input mode, which is what crtc->mode
|
||||
* usually contains. But since our current fastboot
|
||||
* usually contains. But since our current
|
||||
* code puts a mode derived from the post-pfit timings
|
||||
* into crtc->mode this works out correctly. We don't
|
||||
* use hwmode anywhere right now, so use it for this
|
||||
* since the fb helper layer wants a pointer to
|
||||
* something we own.
|
||||
* into crtc->mode this works out correctly.
|
||||
*/
|
||||
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
|
||||
connector->name);
|
||||
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
|
||||
to_intel_crtc(encoder->crtc)->config);
|
||||
modes[i] = &encoder->crtc->hwmode;
|
||||
modes[i] = &encoder->crtc->mode;
|
||||
}
|
||||
crtcs[i] = new_crtc;
|
||||
|
||||
|
@ -582,7 +576,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
|||
struct intel_framebuffer *fb = NULL;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct intel_initial_plane_config *plane_config = NULL;
|
||||
unsigned int max_size = 0;
|
||||
|
||||
if (!i915.fastboot)
|
||||
|
@ -590,20 +583,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
|||
|
||||
/* Find the largest fb */
|
||||
for_each_crtc(dev, crtc) {
|
||||
struct drm_i915_gem_object *obj =
|
||||
intel_fb_obj(crtc->primary->state->fb);
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
if (!intel_crtc->active || !crtc->primary->fb) {
|
||||
if (!intel_crtc->active || !obj) {
|
||||
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (intel_crtc->plane_config.size > max_size) {
|
||||
if (obj->base.size > max_size) {
|
||||
DRM_DEBUG_KMS("found possible fb from plane %c\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
plane_config = &intel_crtc->plane_config;
|
||||
fb = to_intel_framebuffer(crtc->primary->fb);
|
||||
max_size = plane_config->size;
|
||||
fb = to_intel_framebuffer(crtc->primary->state->fb);
|
||||
max_size = obj->base.size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -638,7 +632,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
|||
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
cur_size, fb->base.pitches[0]);
|
||||
plane_config = NULL;
|
||||
fb = NULL;
|
||||
break;
|
||||
}
|
||||
|
@ -659,7 +652,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
|||
DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
cur_size, max_size);
|
||||
plane_config = NULL;
|
||||
fb = NULL;
|
||||
break;
|
||||
}
|
||||
|
@ -825,11 +817,20 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
|
|||
{
|
||||
int ret;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
struct drm_fb_helper *fb_helper;
|
||||
|
||||
if (!dev_priv->fbdev)
|
||||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
|
||||
if (ret)
|
||||
fb_helper = &ifbdev->helper;
|
||||
|
||||
ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
|
||||
if (ret) {
|
||||
DRM_DEBUG("failed to restore crtc mode\n");
|
||||
} else {
|
||||
mutex_lock(&fb_helper->dev->struct_mutex);
|
||||
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
|
||||
mutex_unlock(&fb_helper->dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,84 +65,29 @@
|
|||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void intel_increase_pllclock(struct drm_device *dev,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int dpll_reg = DPLL(pipe);
|
||||
int dpll;
|
||||
|
||||
if (!HAS_GMCH_DISPLAY(dev))
|
||||
return;
|
||||
|
||||
if (!dev_priv->lvds_downclock_avail)
|
||||
return;
|
||||
|
||||
dpll = I915_READ(dpll_reg);
|
||||
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
|
||||
DRM_DEBUG_DRIVER("upclocking LVDS\n");
|
||||
|
||||
assert_panel_unlocked(dev_priv, pipe);
|
||||
|
||||
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
|
||||
I915_WRITE(dpll_reg, dpll);
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
|
||||
dpll = I915_READ(dpll_reg);
|
||||
if (dpll & DISPLAY_RATE_SELECT_FPA1)
|
||||
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_mark_fb_busy - mark given planes as busy
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: bits for the affected planes
|
||||
* @ring: optional ring for asynchronous commands
|
||||
*
|
||||
* This function gets called every time the screen contents change. It can be
|
||||
* used to keep e.g. the update rate at the nominal refresh rate with DRRS.
|
||||
*/
|
||||
static void intel_mark_fb_busy(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
|
||||
continue;
|
||||
|
||||
intel_increase_pllclock(dev, pipe);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fb_obj_invalidate - invalidate frontbuffer object
|
||||
* @obj: GEM object to invalidate
|
||||
* @ring: set for asynchronous rendering
|
||||
* @origin: which operation caused the invalidation
|
||||
*
|
||||
* This function gets called every time rendering on the given object starts and
|
||||
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
|
||||
* be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
|
||||
* be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
|
||||
* until the rendering completes or a flip on this frontbuffer plane is
|
||||
* scheduled.
|
||||
*/
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (!obj->frontbuffer_bits)
|
||||
return;
|
||||
|
||||
if (ring) {
|
||||
if (origin == ORIGIN_CS) {
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.busy_bits
|
||||
|= obj->frontbuffer_bits;
|
||||
|
@ -151,8 +96,6 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
|||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
|
||||
|
||||
intel_psr_invalidate(dev, obj->frontbuffer_bits);
|
||||
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
|
||||
intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
|
||||
|
@ -162,6 +105,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
|||
* intel_frontbuffer_flush - flush frontbuffer
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
* This function gets called every time rendering on the given planes has
|
||||
* completed and frontbuffer caching can be started again. Flushes will get
|
||||
|
@ -169,20 +113,22 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
static void intel_frontbuffer_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Delay flushing when rings are still busy.*/
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
|
||||
if (!frontbuffer_bits)
|
||||
return;
|
||||
|
||||
intel_edp_drrs_flush(dev, frontbuffer_bits);
|
||||
intel_psr_flush(dev, frontbuffer_bits);
|
||||
intel_psr_flush(dev, frontbuffer_bits, origin);
|
||||
intel_fbc_flush(dev_priv, frontbuffer_bits);
|
||||
}
|
||||
|
||||
|
@ -190,16 +136,17 @@ void intel_frontbuffer_flush(struct drm_device *dev,
|
|||
* intel_fb_obj_flush - flush frontbuffer object
|
||||
* @obj: GEM object to flush
|
||||
* @retire: set when retiring asynchronous rendering
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
* This function gets called every time rendering on the given object has
|
||||
* completed and frontbuffer caching can be started again. If @retire is true
|
||||
* then any delayed flushes will be unblocked.
|
||||
*/
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
bool retire)
|
||||
bool retire, enum fb_op_origin origin)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned frontbuffer_bits;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
@ -218,7 +165,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
|||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits);
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -236,7 +183,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
|||
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
|
||||
|
@ -244,7 +191,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
|||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_psr_single_frame_update(dev);
|
||||
intel_psr_single_frame_update(dev, frontbuffer_bits);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -260,7 +207,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
|||
void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Mask any cancelled flips. */
|
||||
|
@ -268,5 +215,29 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
|||
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits);
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip - synchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after scheduling a flip on @obj. This is for
|
||||
* synchronous plane updates which will happen on the next vblank and which will
|
||||
* not get delayed by pending gpu rendering.
|
||||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Remove stale busy bits due to the old buffer. */
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
|
||||
}
|
||||
|
|
|
@ -174,10 +174,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
|
|||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
u32 val = I915_READ(VIDEO_DIP_CTL);
|
||||
|
||||
if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
|
||||
return val & VIDEO_DIP_ENABLE;
|
||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||
return false;
|
||||
|
||||
return false;
|
||||
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
|
||||
return false;
|
||||
|
||||
return val & (VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
|
||||
}
|
||||
|
||||
static void ibx_write_infoframe(struct drm_encoder *encoder,
|
||||
|
@ -227,10 +231,15 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
|
|||
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
|
||||
return val & VIDEO_DIP_ENABLE;
|
||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||
return false;
|
||||
|
||||
return false;
|
||||
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
|
||||
return false;
|
||||
|
||||
return val & (VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
}
|
||||
|
||||
static void cpt_write_infoframe(struct drm_encoder *encoder,
|
||||
|
@ -282,7 +291,12 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
|
|||
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
return val & VIDEO_DIP_ENABLE;
|
||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||
return false;
|
||||
|
||||
return val & (VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
}
|
||||
|
||||
static void vlv_write_infoframe(struct drm_encoder *encoder,
|
||||
|
@ -332,10 +346,15 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
|
|||
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
|
||||
return val & VIDEO_DIP_ENABLE;
|
||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||
return false;
|
||||
|
||||
return false;
|
||||
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
|
||||
return false;
|
||||
|
||||
return val & (VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
}
|
||||
|
||||
static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
|
@ -383,8 +402,9 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
|
|||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
|
||||
u32 val = I915_READ(ctl_reg);
|
||||
|
||||
return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
|
||||
VIDEO_DIP_ENABLE_VS_HSW);
|
||||
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
|
||||
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
|
||||
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -514,7 +534,13 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
|||
if (!enable) {
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
if (port != (val & VIDEO_DIP_PORT_MASK)) {
|
||||
DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
|
||||
(val & VIDEO_DIP_PORT_MASK) >> 29);
|
||||
return;
|
||||
}
|
||||
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
return;
|
||||
|
@ -522,16 +548,17 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
|||
|
||||
if (port != (val & VIDEO_DIP_PORT_MASK)) {
|
||||
if (val & VIDEO_DIP_ENABLE) {
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
|
||||
(val & VIDEO_DIP_PORT_MASK) >> 29);
|
||||
return;
|
||||
}
|
||||
val &= ~VIDEO_DIP_PORT_MASK;
|
||||
val |= port;
|
||||
}
|
||||
|
||||
val |= VIDEO_DIP_ENABLE;
|
||||
val &= ~VIDEO_DIP_ENABLE_VENDOR;
|
||||
val &= ~(VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
@ -541,6 +568,97 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
|||
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_connector *connector;
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
|
||||
|
||||
/*
|
||||
* HDMI cloning is only supported on g4x which doesn't
|
||||
* support deep color or GCP infoframes anyway so no
|
||||
* need to worry about multiple HDMI sinks here.
|
||||
*/
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
|
||||
if (connector->encoder == encoder)
|
||||
return connector->display_info.bpc > 8;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if default_phase=1 can be indicated in the GCP infoframe.
|
||||
*
|
||||
* From HDMI specification 1.4a:
|
||||
* - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
|
||||
* - The first pixel following each Video Data Period shall have a pixel packing phase of 0
|
||||
* - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
|
||||
* - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
|
||||
* phase of 0
|
||||
*/
|
||||
static bool gcp_default_phase_possible(int pipe_bpp,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
unsigned int pixels_per_group;
|
||||
|
||||
switch (pipe_bpp) {
|
||||
case 30:
|
||||
/* 4 pixels in 5 clocks */
|
||||
pixels_per_group = 4;
|
||||
break;
|
||||
case 36:
|
||||
/* 2 pixels in 3 clocks */
|
||||
pixels_per_group = 2;
|
||||
break;
|
||||
case 48:
|
||||
/* 1 pixel in 2 clocks */
|
||||
pixels_per_group = 1;
|
||||
break;
|
||||
default:
|
||||
/* phase information not relevant for 8bpc */
|
||||
return false;
|
||||
}
|
||||
|
||||
return mode->crtc_hdisplay % pixels_per_group == 0 &&
|
||||
mode->crtc_htotal % pixels_per_group == 0 &&
|
||||
mode->crtc_hblank_start % pixels_per_group == 0 &&
|
||||
mode->crtc_hblank_end % pixels_per_group == 0 &&
|
||||
mode->crtc_hsync_start % pixels_per_group == 0 &&
|
||||
mode->crtc_hsync_end % pixels_per_group == 0 &&
|
||||
((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
|
||||
mode->crtc_htotal/2 % pixels_per_group == 0);
|
||||
}
|
||||
|
||||
static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
|
||||
u32 reg, val = 0;
|
||||
|
||||
if (HAS_DDI(dev_priv))
|
||||
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
|
||||
else if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
reg = TVIDEO_DIP_GCP(crtc->pipe);
|
||||
else
|
||||
return false;
|
||||
|
||||
/* Indicate color depth whenever the sink supports deep color */
|
||||
if (hdmi_sink_is_deep_color(encoder))
|
||||
val |= GCP_COLOR_INDICATION;
|
||||
|
||||
/* Enable default_phase whenever the display mode is suitably aligned */
|
||||
if (gcp_default_phase_possible(crtc->config->pipe_bpp,
|
||||
&crtc->config->base.adjusted_mode))
|
||||
val |= GCP_DEFAULT_PHASE_ENABLE;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
|
||||
return val != 0;
|
||||
}
|
||||
|
||||
static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
|
@ -561,25 +679,29 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
|||
if (!enable) {
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (port != (val & VIDEO_DIP_PORT_MASK)) {
|
||||
if (val & VIDEO_DIP_ENABLE) {
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
WARN(val & VIDEO_DIP_ENABLE,
|
||||
"DIP already enabled on port %c\n",
|
||||
(val & VIDEO_DIP_PORT_MASK) >> 29);
|
||||
val &= ~VIDEO_DIP_PORT_MASK;
|
||||
val |= port;
|
||||
}
|
||||
|
||||
val |= VIDEO_DIP_ENABLE;
|
||||
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_GCP);
|
||||
val &= ~(VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder))
|
||||
val |= VIDEO_DIP_ENABLE_GCP;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
@ -607,7 +729,9 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
|
|||
if (!enable) {
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
|
||||
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
return;
|
||||
|
@ -616,7 +740,10 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
|
|||
/* Set both together, unset both together: see the spec. */
|
||||
val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
|
||||
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_GCP);
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder))
|
||||
val |= VIDEO_DIP_ENABLE_GCP;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
@ -646,25 +773,29 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
|||
if (!enable) {
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (port != (val & VIDEO_DIP_PORT_MASK)) {
|
||||
if (val & VIDEO_DIP_ENABLE) {
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
WARN(val & VIDEO_DIP_ENABLE,
|
||||
"DIP already enabled on port %c\n",
|
||||
(val & VIDEO_DIP_PORT_MASK) >> 29);
|
||||
val &= ~VIDEO_DIP_PORT_MASK;
|
||||
val |= port;
|
||||
}
|
||||
|
||||
val |= VIDEO_DIP_ENABLE;
|
||||
val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
|
||||
VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
|
||||
val &= ~(VIDEO_DIP_ENABLE_AVI |
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder))
|
||||
val |= VIDEO_DIP_ENABLE_GCP;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
@ -686,14 +817,18 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
|
|||
|
||||
assert_hdmi_port_disabled(intel_hdmi);
|
||||
|
||||
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
|
||||
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
|
||||
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
|
||||
|
||||
if (!enable) {
|
||||
I915_WRITE(reg, 0);
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
return;
|
||||
}
|
||||
|
||||
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
|
||||
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder))
|
||||
val |= VIDEO_DIP_ENABLE_GCP_HSW;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
@ -808,58 +943,146 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
|||
else
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (pipe_config->pixel_multiplier)
|
||||
dotclock /= pipe_config->pixel_multiplier;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
|
||||
}
|
||||
|
||||
static void intel_enable_hdmi(struct intel_encoder *encoder)
|
||||
static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
|
||||
WARN_ON(!crtc->config->has_hdmi_sink);
|
||||
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
intel_audio_codec_enable(encoder);
|
||||
}
|
||||
|
||||
static void g4x_enable_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
u32 temp;
|
||||
u32 enable_bits = SDVO_ENABLE;
|
||||
|
||||
if (intel_crtc->config->has_audio)
|
||||
enable_bits |= SDVO_AUDIO_ENABLE;
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* HW workaround for IBX, we need to move the port to transcoder A
|
||||
* before disabling it, so restore the transcoder select bit here. */
|
||||
if (HAS_PCH_IBX(dev))
|
||||
enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
|
||||
|
||||
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
|
||||
* we do this anyway which shows more stable in testing.
|
||||
*/
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
}
|
||||
|
||||
temp |= enable_bits;
|
||||
temp |= SDVO_ENABLE;
|
||||
if (crtc->config->has_audio)
|
||||
temp |= SDVO_AUDIO_ENABLE;
|
||||
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* HW workaround, need to write this twice for issue that may result
|
||||
* in first write getting masked.
|
||||
if (crtc->config->has_audio)
|
||||
intel_enable_hdmi_audio(encoder);
|
||||
}
|
||||
|
||||
static void ibx_enable_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
temp |= SDVO_ENABLE;
|
||||
if (crtc->config->has_audio)
|
||||
temp |= SDVO_AUDIO_ENABLE;
|
||||
|
||||
/*
|
||||
* HW workaround, need to write this twice for issue
|
||||
* that may result in first write getting masked.
|
||||
*/
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/*
|
||||
* HW workaround, need to toggle enable bit off and on
|
||||
* for 12bpc with pixel repeat.
|
||||
*
|
||||
* FIXME: BSpec says this should be done at the end of
|
||||
* of the modeset sequence, so not sure if this isn't too soon.
|
||||
*/
|
||||
if (crtc->config->pipe_bpp > 24 &&
|
||||
crtc->config->pixel_multiplier > 1) {
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/*
|
||||
* HW workaround, need to write this twice for issue
|
||||
* that may result in first write getting masked.
|
||||
*/
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
}
|
||||
|
||||
if (intel_crtc->config->has_audio) {
|
||||
WARN_ON(!intel_crtc->config->has_hdmi_sink);
|
||||
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
intel_audio_codec_enable(encoder);
|
||||
if (crtc->config->has_audio)
|
||||
intel_enable_hdmi_audio(encoder);
|
||||
}
|
||||
|
||||
static void cpt_enable_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
temp |= SDVO_ENABLE;
|
||||
if (crtc->config->has_audio)
|
||||
temp |= SDVO_AUDIO_ENABLE;
|
||||
|
||||
/*
|
||||
* WaEnableHDMI8bpcBefore12bpc:snb,ivb
|
||||
*
|
||||
* The procedure for 12bpc is as follows:
|
||||
* 1. disable HDMI clock gating
|
||||
* 2. enable HDMI with 8bpc
|
||||
* 3. enable HDMI with 12bpc
|
||||
* 4. enable HDMI clock gating
|
||||
*/
|
||||
|
||||
if (crtc->config->pipe_bpp > 24) {
|
||||
I915_WRITE(TRANS_CHICKEN1(pipe),
|
||||
I915_READ(TRANS_CHICKEN1(pipe)) |
|
||||
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
|
||||
|
||||
temp &= ~SDVO_COLOR_FORMAT_MASK;
|
||||
temp |= SDVO_COLOR_FORMAT_8bpc;
|
||||
}
|
||||
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
if (crtc->config->pipe_bpp > 24) {
|
||||
temp &= ~SDVO_COLOR_FORMAT_MASK;
|
||||
temp |= HDMI_COLOR_FORMAT_12bpc;
|
||||
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
I915_WRITE(TRANS_CHICKEN1(pipe),
|
||||
I915_READ(TRANS_CHICKEN1(pipe)) &
|
||||
~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
|
||||
}
|
||||
|
||||
if (crtc->config->has_audio)
|
||||
intel_enable_hdmi_audio(encoder);
|
||||
}
|
||||
|
||||
static void vlv_enable_hdmi(struct intel_encoder *encoder)
|
||||
|
@ -901,6 +1124,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
|||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
}
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
|
||||
}
|
||||
|
||||
static void g4x_disable_hdmi(struct intel_encoder *encoder)
|
||||
|
@ -926,7 +1151,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
|
|||
intel_disable_hdmi(encoder);
|
||||
}
|
||||
|
||||
static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
|
||||
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
|
||||
{
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
|
||||
|
@ -938,25 +1163,52 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
|
|||
return 225000;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
|
||||
int clock, bool respect_dvi_limit)
|
||||
{
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
|
||||
if (clock < 25000)
|
||||
return MODE_CLOCK_LOW;
|
||||
if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* BXT DPLL can't generate 223-240 MHz */
|
||||
if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
/* CHV DPLL can't generate 216-240 MHz */
|
||||
if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
int clock = mode->clock;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
clock *= 2;
|
||||
|
||||
if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
|
||||
true))
|
||||
return MODE_CLOCK_HIGH;
|
||||
if (clock < 20000)
|
||||
return MODE_CLOCK_LOW;
|
||||
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
enum drm_mode_status status;
|
||||
int clock;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
return MODE_OK;
|
||||
clock = mode->clock;
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
clock *= 2;
|
||||
|
||||
/* check if we can do 8bpc */
|
||||
status = hdmi_port_clock_valid(hdmi, clock, true);
|
||||
|
||||
/* if we can't do 8bpc we may still be able to do 12bpc */
|
||||
if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
|
||||
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
|
||||
|
@ -997,8 +1249,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
|
||||
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
|
||||
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
|
||||
int clock_12bpc = clock_8bpc * 3 / 2;
|
||||
int desired_bpp;
|
||||
|
||||
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
|
||||
|
@ -1017,6 +1269,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
|
||||
pipe_config->pixel_multiplier = 2;
|
||||
clock_8bpc *= 2;
|
||||
clock_12bpc *= 2;
|
||||
}
|
||||
|
||||
if (intel_hdmi->color_range)
|
||||
|
@ -1035,9 +1289,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
* within limits.
|
||||
*/
|
||||
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
|
||||
clock_12bpc <= portclock_limit &&
|
||||
hdmi_12bpc_possible(pipe_config) &&
|
||||
0 /* FIXME 12bpc support totally broken */) {
|
||||
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
|
||||
hdmi_12bpc_possible(pipe_config)) {
|
||||
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
|
||||
desired_bpp = 12*3;
|
||||
|
||||
|
@ -1046,6 +1299,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
} else {
|
||||
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
|
||||
desired_bpp = 8*3;
|
||||
|
||||
pipe_config->port_clock = clock_8bpc;
|
||||
}
|
||||
|
||||
if (!pipe_config->bw_constrained) {
|
||||
|
@ -1053,8 +1308,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
pipe_config->pipe_bpp = desired_bpp;
|
||||
}
|
||||
|
||||
if (adjusted_mode->crtc_clock > portclock_limit) {
|
||||
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
|
||||
if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
|
||||
false) != MODE_OK) {
|
||||
DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1323,7 +1579,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
intel_crtc->config->has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
|
||||
intel_enable_hdmi(encoder);
|
||||
g4x_enable_hdmi(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
}
|
||||
|
@ -1640,7 +1896,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
intel_crtc->config->has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
|
||||
intel_enable_hdmi(encoder);
|
||||
g4x_enable_hdmi(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
}
|
||||
|
@ -1827,7 +2083,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
|||
intel_encoder->post_disable = vlv_hdmi_post_disable;
|
||||
} else {
|
||||
intel_encoder->pre_enable = intel_hdmi_pre_enable;
|
||||
intel_encoder->enable = intel_enable_hdmi;
|
||||
if (HAS_PCH_CPT(dev))
|
||||
intel_encoder->enable = cpt_enable_hdmi;
|
||||
else if (HAS_PCH_IBX(dev))
|
||||
intel_encoder->enable = ibx_enable_hdmi;
|
||||
else
|
||||
intel_encoder->enable = g4x_enable_hdmi;
|
||||
}
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_HDMI;
|
||||
|
|
|
@ -0,0 +1,499 @@
|
|||
/*
|
||||
* Copyright © 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: Hotplug
|
||||
*
|
||||
* Simply put, hotplug occurs when a display is connected to or disconnected
|
||||
* from the system. However, there may be adapters and docking stations and
|
||||
* Display Port short pulses and MST devices involved, complicating matters.
|
||||
*
|
||||
* Hotplug in i915 is handled in many different levels of abstraction.
|
||||
*
|
||||
* The platform dependent interrupt handling code in i915_irq.c enables,
|
||||
* disables, and does preliminary handling of the interrupts. The interrupt
|
||||
* handlers gather the hotplug detect (HPD) information from relevant registers
|
||||
* into a platform independent mask of hotplug pins that have fired.
|
||||
*
|
||||
* The platform independent interrupt handler intel_hpd_irq_handler() in
|
||||
* intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
|
||||
* further processing to appropriate bottom halves (Display Port specific and
|
||||
* regular hotplug).
|
||||
*
|
||||
* The Display Port work function i915_digport_work_func() calls into
|
||||
* intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
|
||||
* pulses, with failures and non-MST long pulses triggering regular hotplug
|
||||
* processing on the connector.
|
||||
*
|
||||
* The regular hotplug work function i915_hotplug_work_func() calls connector
|
||||
* detect hooks, and, if connector status changes, triggers sending of hotplug
|
||||
* uevent to userspace via drm_kms_helper_hotplug_event().
|
||||
*
|
||||
* Finally, the userspace is responsible for triggering a modeset upon receiving
|
||||
* the hotplug uevent, disabling or enabling the crtc as needed.
|
||||
*
|
||||
* The hotplug interrupt storm detection and mitigation code keeps track of the
|
||||
* number of interrupts per hotplug pin per a period of time, and if the number
|
||||
* of interrupts exceeds a certain threshold, the interrupt is disabled for a
|
||||
* while before being re-enabled. The intention is to mitigate issues raising
|
||||
* from broken hardware triggering massive amounts of interrupts and grinding
|
||||
* the system to a halt.
|
||||
*
|
||||
* Current implementation expects that hotplug interrupt storm will not be
|
||||
* seen when display port sink is connected, hence on platforms whose DP
|
||||
* callback is handled by i915_digport_work_func reenabling of hpd is not
|
||||
* performed (it was never expected to be disabled in the first place ;) )
|
||||
* this is specific to DP sinks handled by this routine and any other display
|
||||
* such as HDMI or DVI enabled on the same port will have proper logic since
|
||||
* it will use i915_hotplug_work_func where this logic is handled.
|
||||
*/
|
||||
|
||||
enum port intel_hpd_pin_to_port(enum hpd_pin pin)
|
||||
{
|
||||
switch (pin) {
|
||||
case HPD_PORT_B:
|
||||
return PORT_B;
|
||||
case HPD_PORT_C:
|
||||
return PORT_C;
|
||||
case HPD_PORT_D:
|
||||
return PORT_D;
|
||||
default:
|
||||
return PORT_A; /* no hpd */
|
||||
}
|
||||
}
|
||||
|
||||
#define HPD_STORM_DETECT_PERIOD 1000
|
||||
#define HPD_STORM_THRESHOLD 5
|
||||
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
|
||||
|
||||
/**
|
||||
* intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
|
||||
* @dev_priv: private driver data pointer
|
||||
* @pin: the pin to gather stats on
|
||||
*
|
||||
* Gather stats about HPD irqs from the specified @pin, and detect irq
|
||||
* storms. Only the pin specific stats and state are changed, the caller is
|
||||
* responsible for further action.
|
||||
*
|
||||
* @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
|
||||
* otherwise it's considered an irq storm, and the irq state is set to
|
||||
* @HPD_MARK_DISABLED.
|
||||
*
|
||||
* Return true if an irq storm was detected on @pin.
|
||||
*/
|
||||
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
|
||||
enum hpd_pin pin)
|
||||
{
|
||||
unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
|
||||
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
|
||||
bool storm = false;
|
||||
|
||||
if (!time_in_range(jiffies, start, end)) {
|
||||
dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
|
||||
dev_priv->hotplug.stats[pin].count = 0;
|
||||
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
|
||||
} else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
|
||||
dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
|
||||
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
|
||||
storm = true;
|
||||
} else {
|
||||
dev_priv->hotplug.stats[pin].count++;
|
||||
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
|
||||
dev_priv->hotplug.stats[pin].count);
|
||||
}
|
||||
|
||||
return storm;
|
||||
}
|
||||
|
||||
static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_connector *connector;
|
||||
enum hpd_pin pin;
|
||||
bool hpd_disabled = false;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
if (connector->polled != DRM_CONNECTOR_POLL_HPD)
|
||||
continue;
|
||||
|
||||
intel_connector = to_intel_connector(connector);
|
||||
intel_encoder = intel_connector->encoder;
|
||||
if (!intel_encoder)
|
||||
continue;
|
||||
|
||||
pin = intel_encoder->hpd_pin;
|
||||
if (pin == HPD_NONE ||
|
||||
dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
|
||||
continue;
|
||||
|
||||
DRM_INFO("HPD interrupt storm detected on connector %s: "
|
||||
"switching from hotplug detection to polling\n",
|
||||
connector->name);
|
||||
|
||||
dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT
|
||||
| DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
hpd_disabled = true;
|
||||
}
|
||||
|
||||
/* Enable polling and queue hotplug re-enabling. */
|
||||
if (hpd_disabled) {
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
|
||||
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv),
|
||||
hotplug.reenable_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
int i;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
for_each_hpd_pin(i) {
|
||||
struct drm_connector *connector;
|
||||
|
||||
if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
|
||||
continue;
|
||||
|
||||
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
|
||||
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
|
||||
if (intel_connector->encoder->hpd_pin == i) {
|
||||
if (connector->polled != intel_connector->polled)
|
||||
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
|
||||
connector->name);
|
||||
connector->polled = intel_connector->polled;
|
||||
if (!connector->polled)
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
static bool intel_hpd_irq_event(struct drm_device *dev,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
enum drm_connector_status old_status;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
|
||||
old_status = connector->status;
|
||||
|
||||
connector->status = connector->funcs->detect(connector, false);
|
||||
if (old_status == connector->status)
|
||||
return false;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
|
||||
connector->base.id,
|
||||
connector->name,
|
||||
drm_get_connector_status_name(old_status),
|
||||
drm_get_connector_status_name(connector->status));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void i915_digport_work_func(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, hotplug.dig_port_work);
|
||||
u32 long_port_mask, short_port_mask;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
int i;
|
||||
u32 old_bits = 0;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
long_port_mask = dev_priv->hotplug.long_port_mask;
|
||||
dev_priv->hotplug.long_port_mask = 0;
|
||||
short_port_mask = dev_priv->hotplug.short_port_mask;
|
||||
dev_priv->hotplug.short_port_mask = 0;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
bool valid = false;
|
||||
bool long_hpd = false;
|
||||
intel_dig_port = dev_priv->hotplug.irq_port[i];
|
||||
if (!intel_dig_port || !intel_dig_port->hpd_pulse)
|
||||
continue;
|
||||
|
||||
if (long_port_mask & (1 << i)) {
|
||||
valid = true;
|
||||
long_hpd = true;
|
||||
} else if (short_port_mask & (1 << i))
|
||||
valid = true;
|
||||
|
||||
if (valid) {
|
||||
enum irqreturn ret;
|
||||
|
||||
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
|
||||
if (ret == IRQ_NONE) {
|
||||
/* fall back to old school hpd */
|
||||
old_bits |= (1 << intel_dig_port->base.hpd_pin);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (old_bits) {
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->hotplug.event_bits |= old_bits;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
schedule_work(&dev_priv->hotplug.hotplug_work);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle hotplug events outside the interrupt handler proper.
|
||||
*/
|
||||
static void i915_hotplug_work_func(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, hotplug.hotplug_work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_connector *connector;
|
||||
bool changed = false;
|
||||
u32 hpd_event_bits;
|
||||
|
||||
mutex_lock(&mode_config->mutex);
|
||||
DRM_DEBUG_KMS("running encoder hotplug functions\n");
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
hpd_event_bits = dev_priv->hotplug.event_bits;
|
||||
dev_priv->hotplug.event_bits = 0;
|
||||
|
||||
/* Disable hotplug on connectors that hit an irq storm. */
|
||||
intel_hpd_irq_storm_disable(dev_priv);
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
intel_connector = to_intel_connector(connector);
|
||||
if (!intel_connector->encoder)
|
||||
continue;
|
||||
intel_encoder = intel_connector->encoder;
|
||||
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
|
||||
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
|
||||
connector->name, intel_encoder->hpd_pin);
|
||||
if (intel_encoder->hot_plug)
|
||||
intel_encoder->hot_plug(intel_encoder);
|
||||
if (intel_hpd_irq_event(dev, connector))
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mode_config->mutex);
|
||||
|
||||
if (changed)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* intel_hpd_irq_handler - main hotplug irq handler
|
||||
* @dev: drm device
|
||||
* @pin_mask: a mask of hpd pins that have triggered the irq
|
||||
* @long_mask: a mask of hpd pins that may be long hpd pulses
|
||||
*
|
||||
* This is the main hotplug irq handler for all platforms. The platform specific
|
||||
* irq handlers call the platform specific hotplug irq handlers, which read and
|
||||
* decode the appropriate registers into bitmasks about hpd pins that have
|
||||
* triggered (@pin_mask), and which of those pins may be long pulses
|
||||
* (@long_mask). The @long_mask is ignored if the port corresponding to the pin
|
||||
* is not a digital port.
|
||||
*
|
||||
* Here, we do hotplug irq storm detection and mitigation, and pass further
|
||||
* processing to appropriate bottom halves.
|
||||
*/
|
||||
void intel_hpd_irq_handler(struct drm_device *dev,
|
||||
u32 pin_mask, u32 long_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
enum port port;
|
||||
bool storm_detected = false;
|
||||
bool queue_dig = false, queue_hp = false;
|
||||
bool is_dig_port;
|
||||
|
||||
if (!pin_mask)
|
||||
return;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
for_each_hpd_pin(i) {
|
||||
if (!(BIT(i) & pin_mask))
|
||||
continue;
|
||||
|
||||
port = intel_hpd_pin_to_port(i);
|
||||
is_dig_port = port && dev_priv->hotplug.irq_port[port];
|
||||
|
||||
if (is_dig_port) {
|
||||
bool long_hpd = long_mask & BIT(i);
|
||||
|
||||
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
|
||||
long_hpd ? "long" : "short");
|
||||
/*
|
||||
* For long HPD pulses we want to have the digital queue happen,
|
||||
* but we still want HPD storm detection to function.
|
||||
*/
|
||||
queue_dig = true;
|
||||
if (long_hpd) {
|
||||
dev_priv->hotplug.long_port_mask |= (1 << port);
|
||||
} else {
|
||||
/* for short HPD just trigger the digital queue */
|
||||
dev_priv->hotplug.short_port_mask |= (1 << port);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
|
||||
/*
|
||||
* On GMCH platforms the interrupt mask bits only
|
||||
* prevent irq generation, not the setting of the
|
||||
* hotplug bits itself. So only WARN about unexpected
|
||||
* interrupts on saner platforms.
|
||||
*/
|
||||
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
|
||||
"Received HPD interrupt on pin %d although disabled\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
|
||||
continue;
|
||||
|
||||
if (!is_dig_port) {
|
||||
dev_priv->hotplug.event_bits |= BIT(i);
|
||||
queue_hp = true;
|
||||
}
|
||||
|
||||
if (intel_hpd_irq_storm_detect(dev_priv, i)) {
|
||||
dev_priv->hotplug.event_bits &= ~BIT(i);
|
||||
storm_detected = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (storm_detected)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
/*
|
||||
* Our hotplug handler can grab modeset locks (by calling down into the
|
||||
* fb helpers). Hence it must not be run on our own dev-priv->wq work
|
||||
* queue for otherwise the flush_work in the pageflip code will
|
||||
* deadlock.
|
||||
*/
|
||||
if (queue_dig)
|
||||
queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
|
||||
if (queue_hp)
|
||||
schedule_work(&dev_priv->hotplug.hotplug_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_hpd_init - initializes and enables hpd support
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function enables the hotplug support. It requires that interrupts have
|
||||
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
|
||||
* poll request can run concurrently to other code, so locking rules must be
|
||||
* obeyed.
|
||||
*
|
||||
* This is a separate step from interrupt enabling to simplify the locking rules
|
||||
* in the driver load and resume code.
|
||||
*/
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
int i;
|
||||
|
||||
for_each_hpd_pin(i) {
|
||||
dev_priv->hotplug.stats[i].count = 0;
|
||||
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
|
||||
}
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
connector->polled = intel_connector->polled;
|
||||
if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
if (intel_connector->mst_port)
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked checks happy.
|
||||
*/
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
|
||||
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
|
||||
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
|
||||
intel_hpd_irq_storm_reenable_work);
|
||||
}
|
||||
|
||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
dev_priv->hotplug.long_port_mask = 0;
|
||||
dev_priv->hotplug.short_port_mask = 0;
|
||||
dev_priv->hotplug.event_bits = 0;
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
|
||||
cancel_work_sync(&dev_priv->hotplug.hotplug_work);
|
||||
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -32,18 +32,19 @@
|
|||
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
|
||||
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
|
||||
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
|
||||
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
|
||||
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
|
||||
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
|
||||
|
||||
/* Logical Rings */
|
||||
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
|
||||
struct intel_context *ctx);
|
||||
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
|
||||
void intel_logical_ring_stop(struct intel_engine_cs *ring);
|
||||
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
|
||||
int intel_logical_rings_init(struct drm_device *dev);
|
||||
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
|
||||
|
||||
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
|
||||
struct intel_context *ctx);
|
||||
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
|
||||
/**
|
||||
* intel_logical_ring_advance() - advance the ringbuffer tail
|
||||
* @ringbuf: Ringbuffer to advance.
|
||||
|
@ -70,20 +71,16 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
|||
void intel_lr_context_free(struct intel_context *ctx);
|
||||
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_lr_context_unpin(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx);
|
||||
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
|
||||
void intel_lr_context_reset(struct drm_device *dev,
|
||||
struct intel_context *ctx);
|
||||
|
||||
/* Execlists */
|
||||
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
||||
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
struct i915_execbuffer_params;
|
||||
int intel_execlists_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u64 exec_start, u32 dispatch_flags);
|
||||
struct list_head *vmas);
|
||||
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
|
||||
|
||||
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
|
||||
|
|
|
@ -239,8 +239,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
|||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct intel_connector *intel_connector =
|
||||
&lvds_encoder->attached_connector->base;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 ctl_reg, stat_reg;
|
||||
|
||||
|
@ -252,8 +250,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
|||
stat_reg = PP_STATUS;
|
||||
}
|
||||
|
||||
intel_panel_disable_backlight(intel_connector);
|
||||
|
||||
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
|
||||
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
|
||||
DRM_ERROR("timed out waiting for panel to power off\n");
|
||||
|
@ -262,6 +258,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
|||
POSTING_READ(lvds_encoder->reg);
|
||||
}
|
||||
|
||||
static void gmch_disable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct intel_connector *intel_connector =
|
||||
&lvds_encoder->attached_connector->base;
|
||||
|
||||
intel_panel_disable_backlight(intel_connector);
|
||||
|
||||
intel_disable_lvds(encoder);
|
||||
}
|
||||
|
||||
static void pch_disable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct intel_connector *intel_connector =
|
||||
&lvds_encoder->attached_connector->base;
|
||||
|
||||
intel_panel_disable_backlight(intel_connector);
|
||||
}
|
||||
|
||||
static void pch_post_disable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
intel_disable_lvds(encoder);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
intel_lvds_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
|
@ -452,7 +473,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
|||
*/
|
||||
if (!HAS_PCH_SPLIT(dev)) {
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_modeset_setup_hw_state(dev, true);
|
||||
intel_display_resume(dev);
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
|
@ -942,12 +963,6 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
if (dmi_check_system(intel_no_lvds))
|
||||
return;
|
||||
|
||||
pin = GMBUS_PIN_PANEL;
|
||||
if (!lvds_is_present_in_vbt(dev, &pin)) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
|
||||
return;
|
||||
|
@ -957,6 +972,16 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
pin = GMBUS_PIN_PANEL;
|
||||
if (!lvds_is_present_in_vbt(dev, &pin)) {
|
||||
u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS;
|
||||
if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
|
||||
return;
|
||||
}
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
|
||||
}
|
||||
|
||||
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
|
||||
if (!lvds_encoder)
|
||||
return;
|
||||
|
@ -988,7 +1013,12 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
intel_encoder->enable = intel_enable_lvds;
|
||||
intel_encoder->pre_enable = intel_pre_enable_lvds;
|
||||
intel_encoder->compute_config = intel_lvds_compute_config;
|
||||
intel_encoder->disable = intel_disable_lvds;
|
||||
if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
intel_encoder->disable = pch_disable_lvds;
|
||||
intel_encoder->post_disable = pch_post_disable_lvds;
|
||||
} else {
|
||||
intel_encoder->disable = gmch_disable_lvds;
|
||||
}
|
||||
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
|
||||
intel_encoder->get_config = intel_lvds_get_config;
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
|
@ -1068,24 +1098,8 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
drm_mode_debug_printmodeline(scan);
|
||||
|
||||
fixed_mode = drm_mode_duplicate(dev, scan);
|
||||
if (fixed_mode) {
|
||||
downclock_mode =
|
||||
intel_find_panel_downclock(dev,
|
||||
fixed_mode, connector);
|
||||
if (downclock_mode != NULL &&
|
||||
i915.lvds_downclock) {
|
||||
/* We found the downclock for LVDS. */
|
||||
dev_priv->lvds_downclock_avail = true;
|
||||
dev_priv->lvds_downclock =
|
||||
downclock_mode->clock;
|
||||
DRM_DEBUG_KMS("LVDS downclock is found"
|
||||
" in EDID. Normal clock %dKhz, "
|
||||
"downclock %dKhz\n",
|
||||
fixed_mode->clock,
|
||||
dev_priv->lvds_downclock);
|
||||
}
|
||||
if (fixed_mode)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,335 @@
|
|||
/*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions: *
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "intel_mocs.h"
|
||||
#include "intel_lrc.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
|
||||
/* structures required */
|
||||
struct drm_i915_mocs_entry {
|
||||
u32 control_value;
|
||||
u16 l3cc_value;
|
||||
};
|
||||
|
||||
struct drm_i915_mocs_table {
|
||||
u32 size;
|
||||
const struct drm_i915_mocs_entry *table;
|
||||
};
|
||||
|
||||
/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
|
||||
#define LE_CACHEABILITY(value) ((value) << 0)
|
||||
#define LE_TGT_CACHE(value) ((value) << 2)
|
||||
#define LE_LRUM(value) ((value) << 4)
|
||||
#define LE_AOM(value) ((value) << 6)
|
||||
#define LE_RSC(value) ((value) << 7)
|
||||
#define LE_SCC(value) ((value) << 8)
|
||||
#define LE_PFM(value) ((value) << 11)
|
||||
#define LE_SCF(value) ((value) << 14)
|
||||
|
||||
/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
|
||||
#define L3_ESC(value) ((value) << 0)
|
||||
#define L3_SCC(value) ((value) << 1)
|
||||
#define L3_CACHEABILITY(value) ((value) << 4)
|
||||
|
||||
/* Helper defines */
|
||||
#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
|
||||
|
||||
/* (e)LLC caching options */
|
||||
#define LE_PAGETABLE 0
|
||||
#define LE_UC 1
|
||||
#define LE_WT 2
|
||||
#define LE_WB 3
|
||||
|
||||
/* L3 caching options */
|
||||
#define L3_DIRECT 0
|
||||
#define L3_UC 1
|
||||
#define L3_RESERVED 2
|
||||
#define L3_WB 3
|
||||
|
||||
/* Target cache */
|
||||
#define ELLC 0
|
||||
#define LLC 1
|
||||
#define LLC_ELLC 2
|
||||
|
||||
/*
|
||||
* MOCS tables
|
||||
*
|
||||
* These are the MOCS tables that are programmed across all the rings.
|
||||
* The control value is programmed to all the rings that support the
|
||||
* MOCS registers. While the l3cc_values are only programmed to the
|
||||
* LNCFCMOCS0 - LNCFCMOCS32 registers.
|
||||
*
|
||||
* These tables are intended to be kept reasonably consistent across
|
||||
* platforms. However some of the fields are not applicable to all of
|
||||
* them.
|
||||
*
|
||||
* Entries not part of the following tables are undefined as far as
|
||||
* userspace is concerned and shouldn't be relied upon. For the time
|
||||
* being they will be implicitly initialized to the strictest caching
|
||||
* configuration (uncached) to guarantee forwards compatibility with
|
||||
* userspace programs written against more recent kernels providing
|
||||
* additional MOCS entries.
|
||||
*
|
||||
* NOTE: These tables MUST start with being uncached and the length
|
||||
* MUST be less than 63 as the last two registers are reserved
|
||||
* by the hardware. These tables are part of the kernel ABI and
|
||||
* may only be updated incrementally by adding entries at the
|
||||
* end.
|
||||
*/
|
||||
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
|
||||
/* { 0x00000009, 0x0010 } */
|
||||
{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
|
||||
/* { 0x00000038, 0x0030 } */
|
||||
{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
|
||||
/* { 0x0000003b, 0x0030 } */
|
||||
{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
|
||||
};
|
||||
|
||||
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
|
||||
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
|
||||
/* { 0x00000009, 0x0010 } */
|
||||
{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
|
||||
/* { 0x00000038, 0x0030 } */
|
||||
{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
|
||||
/* { 0x0000003b, 0x0030 } */
|
||||
{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
|
||||
};
|
||||
|
||||
/**
|
||||
* get_mocs_settings()
|
||||
* @dev: DRM device.
|
||||
* @table: Output table that will be made to point at appropriate
|
||||
* MOCS values for the device.
|
||||
*
|
||||
* This function will return the values of the MOCS table that needs to
|
||||
* be programmed for the platform. It will return the values that need
|
||||
* to be programmed and if they need to be programmed.
|
||||
*
|
||||
* Return: true if there are applicable MOCS settings for the device.
|
||||
*/
|
||||
static bool get_mocs_settings(struct drm_device *dev,
|
||||
struct drm_i915_mocs_table *table)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
table->size = ARRAY_SIZE(skylake_mocs_table);
|
||||
table->table = skylake_mocs_table;
|
||||
result = true;
|
||||
} else if (IS_BROXTON(dev)) {
|
||||
table->size = ARRAY_SIZE(broxton_mocs_table);
|
||||
table->table = broxton_mocs_table;
|
||||
result = true;
|
||||
} else {
|
||||
WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
|
||||
"Platform that should have a MOCS table does not.\n");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* emit_mocs_control_table() - emit the mocs control table
|
||||
* @req: Request to set up the MOCS table for.
|
||||
* @table: The values to program into the control regs.
|
||||
* @reg_base: The base for the engine that needs to be programmed.
|
||||
*
|
||||
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
|
||||
* given table starting at the given address.
|
||||
*
|
||||
* Return: 0 on success, otherwise the error status.
|
||||
*/
|
||||
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table,
|
||||
u32 reg_base)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
unsigned int index;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
|
||||
return -ENODEV;
|
||||
|
||||
ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
|
||||
if (ret) {
|
||||
DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
|
||||
|
||||
for (index = 0; index < table->size; index++) {
|
||||
intel_logical_ring_emit(ringbuf, reg_base + index * 4);
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
table->table[index].control_value);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, now set the unused entries to uncached. These entries
|
||||
* are officially undefined and no contract for the contents
|
||||
* and settings is given for these entries.
|
||||
*
|
||||
* Entry 0 in the table is uncached - so we are just writing
|
||||
* that value to all the used entries.
|
||||
*/
|
||||
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
|
||||
intel_logical_ring_emit(ringbuf, reg_base + index * 4);
|
||||
intel_logical_ring_emit(ringbuf, table->table[0].control_value);
|
||||
}
|
||||
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* emit_mocs_l3cc_table() - emit the mocs control table
|
||||
* @req: Request to set up the MOCS table for.
|
||||
* @table: The values to program into the control regs.
|
||||
*
|
||||
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
|
||||
* given table starting at the given address. This register set is
|
||||
* programmed in pairs.
|
||||
*
|
||||
* Return: 0 on success, otherwise the error status.
|
||||
*/
|
||||
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
unsigned int count;
|
||||
unsigned int i;
|
||||
u32 value;
|
||||
u32 filler = (table->table[0].l3cc_value & 0xffff) |
|
||||
((table->table[0].l3cc_value & 0xffff) << 16);
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
|
||||
return -ENODEV;
|
||||
|
||||
ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
|
||||
if (ret) {
|
||||
DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
|
||||
|
||||
for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
|
||||
value = (table->table[count].l3cc_value & 0xffff) |
|
||||
((table->table[count + 1].l3cc_value & 0xffff) << 16);
|
||||
|
||||
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
|
||||
intel_logical_ring_emit(ringbuf, value);
|
||||
}
|
||||
|
||||
if (table->size & 0x01) {
|
||||
/* Odd table size - 1 left over */
|
||||
value = (table->table[count].l3cc_value & 0xffff) |
|
||||
((table->table[0].l3cc_value & 0xffff) << 16);
|
||||
} else
|
||||
value = filler;
|
||||
|
||||
/*
|
||||
* Now set the rest of the table to uncached - use entry 0 as
|
||||
* this will be uncached. Leave the last pair uninitialised as
|
||||
* they are reserved by the hardware.
|
||||
*/
|
||||
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
|
||||
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
|
||||
intel_logical_ring_emit(ringbuf, value);
|
||||
|
||||
value = filler;
|
||||
}
|
||||
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_rcs_context_init_mocs() - program the MOCS register.
|
||||
* @req: Request to set up the MOCS tables for.
|
||||
*
|
||||
* This function will emit a batch buffer with the values required for
|
||||
* programming the MOCS register values for all the currently supported
|
||||
* rings.
|
||||
*
|
||||
* These registers are partially stored in the RCS context, so they are
|
||||
* emitted at the same time so that when a context is created these registers
|
||||
* are set up. These registers have to be emitted into the start of the
|
||||
* context as setting the ELSP will re-init some of these registers back
|
||||
* to the hw values.
|
||||
*
|
||||
* Return: 0 on success, otherwise the error status.
|
||||
*/
|
||||
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct drm_i915_mocs_table t;
|
||||
int ret;
|
||||
|
||||
if (get_mocs_settings(req->ring->dev, &t)) {
|
||||
/* Program the control registers */
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Now program the l3cc registers */
|
||||
ret = emit_mocs_l3cc_table(req, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef INTEL_MOCS_H
|
||||
#define INTEL_MOCS_H
|
||||
|
||||
/**
|
||||
* DOC: Memory Objects Control State (MOCS)
|
||||
*
|
||||
* Motivation:
|
||||
* In previous Gens the MOCS settings was a value that was set by user land as
|
||||
* part of the batch. In Gen9 this has changed to be a single table (per ring)
|
||||
* that all batches now reference by index instead of programming the MOCS
|
||||
* directly.
|
||||
*
|
||||
* The one wrinkle in this is that only PART of the MOCS tables are included
|
||||
* in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
|
||||
* registers). The rest are not (the settings for the other rings).
|
||||
*
|
||||
* This table needs to be set at system start-up because the way the table
|
||||
* interacts with the contexts and the GmmLib interface.
|
||||
*
|
||||
*
|
||||
* Implementation:
|
||||
*
|
||||
* The tables (one per supported platform) are defined in intel_mocs.c
|
||||
* and are programmed in the first batch after the context is loaded
|
||||
* (with the hardware workarounds). This will then let the usual
|
||||
* context handling keep the MOCS in step.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
|
||||
|
||||
#endif
|
|
@ -25,8 +25,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <acpi/video.h>
|
||||
|
||||
|
@ -53,6 +51,7 @@
|
|||
#define MBOX_ACPI (1<<0)
|
||||
#define MBOX_SWSCI (1<<1)
|
||||
#define MBOX_ASLE (1<<2)
|
||||
#define MBOX_ASLE_EXT (1<<4)
|
||||
|
||||
struct opregion_header {
|
||||
u8 signature[16];
|
||||
|
@ -62,7 +61,10 @@ struct opregion_header {
|
|||
u8 vbios_ver[16];
|
||||
u8 driver_ver[16];
|
||||
u32 mboxes;
|
||||
u8 reserved[164];
|
||||
u32 driver_model;
|
||||
u32 pcon;
|
||||
u8 dver[32];
|
||||
u8 rsvd[124];
|
||||
} __packed;
|
||||
|
||||
/* OpRegion mailbox #1: public ACPI methods */
|
||||
|
@ -84,7 +86,9 @@ struct opregion_acpi {
|
|||
u32 evts; /* ASL supported events */
|
||||
u32 cnot; /* current OS notification */
|
||||
u32 nrdy; /* driver status */
|
||||
u8 rsvd2[60];
|
||||
u32 did2[7]; /* extended supported display devices ID list */
|
||||
u32 cpd2[7]; /* extended attached display devices list */
|
||||
u8 rsvd2[4];
|
||||
} __packed;
|
||||
|
||||
/* OpRegion mailbox #2: SWSCI */
|
||||
|
@ -113,7 +117,10 @@ struct opregion_asle {
|
|||
u32 pcft; /* power conservation features */
|
||||
u32 srot; /* supported rotation angles */
|
||||
u32 iuer; /* IUER events */
|
||||
u8 rsvd[86];
|
||||
u64 fdss;
|
||||
u32 fdsp;
|
||||
u32 stat;
|
||||
u8 rsvd[70];
|
||||
} __packed;
|
||||
|
||||
/* Driver readiness indicator */
|
||||
|
@ -611,6 +618,38 @@ static struct notifier_block intel_opregion_notifier = {
|
|||
* (version 3)
|
||||
*/
|
||||
|
||||
static u32 get_did(struct intel_opregion *opregion, int i)
|
||||
{
|
||||
u32 did;
|
||||
|
||||
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
|
||||
did = ioread32(&opregion->acpi->didl[i]);
|
||||
} else {
|
||||
i -= ARRAY_SIZE(opregion->acpi->didl);
|
||||
|
||||
if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
|
||||
return 0;
|
||||
|
||||
did = ioread32(&opregion->acpi->did2[i]);
|
||||
}
|
||||
|
||||
return did;
|
||||
}
|
||||
|
||||
static void set_did(struct intel_opregion *opregion, int i, u32 val)
|
||||
{
|
||||
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
|
||||
iowrite32(val, &opregion->acpi->didl[i]);
|
||||
} else {
|
||||
i -= ARRAY_SIZE(opregion->acpi->didl);
|
||||
|
||||
if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
|
||||
return;
|
||||
|
||||
iowrite32(val, &opregion->acpi->did2[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_didl_outputs(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -620,7 +659,7 @@ static void intel_didl_outputs(struct drm_device *dev)
|
|||
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
|
||||
unsigned long long device_id;
|
||||
acpi_status status;
|
||||
u32 temp;
|
||||
u32 temp, max_outputs;
|
||||
int i = 0;
|
||||
|
||||
handle = ACPI_HANDLE(&dev->pdev->dev);
|
||||
|
@ -639,41 +678,50 @@ static void intel_didl_outputs(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (!acpi_video_bus) {
|
||||
pr_warn("No ACPI video bus found\n");
|
||||
DRM_ERROR("No ACPI video bus found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* In theory, did2, the extended didl, gets added at opregion version
|
||||
* 3.0. In practice, however, we're supposed to set it for earlier
|
||||
* versions as well, since a BIOS that doesn't understand did2 should
|
||||
* not look at it anyway. Use a variable so we can tweak this if a need
|
||||
* arises later.
|
||||
*/
|
||||
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
|
||||
ARRAY_SIZE(opregion->acpi->did2);
|
||||
|
||||
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
|
||||
if (i >= 8) {
|
||||
dev_dbg(&dev->pdev->dev,
|
||||
"More than 8 outputs detected via ACPI\n");
|
||||
if (i >= max_outputs) {
|
||||
DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
|
||||
max_outputs);
|
||||
return;
|
||||
}
|
||||
status =
|
||||
acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
|
||||
NULL, &device_id);
|
||||
status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
|
||||
NULL, &device_id);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
if (!device_id)
|
||||
goto blind_set;
|
||||
iowrite32((u32)(device_id & 0x0f0f),
|
||||
&opregion->acpi->didl[i]);
|
||||
i++;
|
||||
set_did(opregion, i++, (u32)(device_id & 0x0f0f));
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
/* If fewer than 8 outputs, the list must be null terminated */
|
||||
if (i < 8)
|
||||
iowrite32(0, &opregion->acpi->didl[i]);
|
||||
DRM_DEBUG_KMS("%d outputs detected\n", i);
|
||||
|
||||
/* If fewer than max outputs, the list must be null terminated */
|
||||
if (i < max_outputs)
|
||||
set_did(opregion, i, 0);
|
||||
return;
|
||||
|
||||
blind_set:
|
||||
i = 0;
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
int output_type = ACPI_OTHER_OUTPUT;
|
||||
if (i >= 8) {
|
||||
dev_dbg(&dev->pdev->dev,
|
||||
"More than 8 outputs in connector list\n");
|
||||
if (i >= max_outputs) {
|
||||
DRM_DEBUG_KMS("More than %u outputs in connector list\n",
|
||||
max_outputs);
|
||||
return;
|
||||
}
|
||||
switch (connector->connector_type) {
|
||||
|
@ -698,9 +746,8 @@ static void intel_didl_outputs(struct drm_device *dev)
|
|||
output_type = ACPI_LVDS_OUTPUT;
|
||||
break;
|
||||
}
|
||||
temp = ioread32(&opregion->acpi->didl[i]);
|
||||
iowrite32(temp | (1<<31) | output_type | i,
|
||||
&opregion->acpi->didl[i]);
|
||||
temp = get_did(opregion, i);
|
||||
set_did(opregion, i, temp | (1 << 31) | output_type | i);
|
||||
i++;
|
||||
}
|
||||
goto end;
|
||||
|
@ -720,7 +767,7 @@ static void intel_setup_cadls(struct drm_device *dev)
|
|||
* display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
|
||||
* there are less than eight devices. */
|
||||
do {
|
||||
disp_id = ioread32(&opregion->acpi->didl[i]);
|
||||
disp_id = get_did(opregion, i);
|
||||
iowrite32(disp_id, &opregion->acpi->cadl[i]);
|
||||
} while (++i < 8 && disp_id != 0);
|
||||
}
|
||||
|
@ -852,6 +899,11 @@ int intel_opregion_setup(struct drm_device *dev)
|
|||
char buf[sizeof(OPREGION_SIGNATURE)];
|
||||
int err = 0;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
|
||||
BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
|
||||
BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
|
||||
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
|
||||
|
||||
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
|
||||
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
|
||||
if (asls == 0) {
|
||||
|
|
|
@ -210,19 +210,14 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
|
|||
}
|
||||
|
||||
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
struct drm_i915_gem_request *req,
|
||||
void (*tail)(struct intel_overlay *))
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
int ret;
|
||||
|
||||
WARN_ON(overlay->last_flip_req);
|
||||
i915_gem_request_assign(&overlay->last_flip_req,
|
||||
ring->outstanding_lazy_request);
|
||||
ret = i915_add_request(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
i915_gem_request_assign(&overlay->last_flip_req, req);
|
||||
i915_add_request(req);
|
||||
|
||||
overlay->flip_tail = tail;
|
||||
ret = i915_wait_request(overlay->last_flip_req);
|
||||
|
@ -239,15 +234,22 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
WARN_ON(overlay->active);
|
||||
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret) {
|
||||
i915_gem_request_cancel(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
overlay->active = true;
|
||||
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
|
@ -256,7 +258,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, NULL);
|
||||
return intel_overlay_do_wait_request(overlay, req, NULL);
|
||||
}
|
||||
|
||||
/* overlay needs to be enabled in OCMD reg */
|
||||
|
@ -266,6 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
@ -280,18 +283,25 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
if (tmp & (1 << 17))
|
||||
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret) {
|
||||
i915_gem_request_cancel(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
WARN_ON(overlay->last_flip_req);
|
||||
i915_gem_request_assign(&overlay->last_flip_req,
|
||||
ring->outstanding_lazy_request);
|
||||
return i915_add_request(ring);
|
||||
i915_gem_request_assign(&overlay->last_flip_req, req);
|
||||
i915_add_request(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||
|
@ -327,6 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
int ret;
|
||||
|
||||
|
@ -338,10 +349,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
* of the hw. Do it in both cases */
|
||||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret) {
|
||||
i915_gem_request_cancel(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* wait for overlay to go idle */
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
|
@ -360,7 +377,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
}
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
|
||||
return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
|
||||
}
|
||||
|
||||
/* recover from an interruption due to a signal
|
||||
|
@ -404,15 +421,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
|
||||
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
||||
/* synchronous slowpath */
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret) {
|
||||
i915_gem_request_cancel(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
ret = intel_overlay_do_wait_request(overlay,
|
||||
ret = intel_overlay_do_wait_request(overlay, req,
|
||||
intel_overlay_release_old_vid_tail);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -724,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
|
||||
ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
|
||||
&i915_ggtt_view_normal);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -254,10 +254,13 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
|
|||
uint32_t max_sleep_time = 0x1f;
|
||||
/* Lately it was identified that depending on panel idle frame count
|
||||
* calculated at HW can be off by 1. So let's use what came
|
||||
* from VBT + 1 and at minimum 2 to be on the safe side.
|
||||
* from VBT + 1.
|
||||
* There are also other cases where panel demands at least 4
|
||||
* but VBT is not being set. To cover these 2 cases lets use
|
||||
* at least 5 when VBT isn't set to be on the safest side.
|
||||
*/
|
||||
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
|
||||
dev_priv->vbt.psr.idle_frames + 1 : 2;
|
||||
dev_priv->vbt.psr.idle_frames + 1 : 5;
|
||||
uint32_t val = 0x0;
|
||||
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
|
||||
|
||||
|
@ -400,7 +403,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
|
|||
|
||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
||||
EDP_PSR_DEBUG_MASK_HPD);
|
||||
|
||||
/* Enable PSR on the panel */
|
||||
hsw_psr_enable_sink(intel_dp);
|
||||
|
@ -596,13 +599,15 @@ static void intel_psr_exit(struct drm_device *dev)
|
|||
/**
|
||||
* intel_psr_single_frame_update - Single Frame Update
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* Some platforms support a single frame update feature that is used to
|
||||
* send and update only one frame on Remote Frame Buffer.
|
||||
* So far it is only implemented for Valleyview and Cherryview because
|
||||
* hardware requires this to be done before a page flip.
|
||||
*/
|
||||
void intel_psr_single_frame_update(struct drm_device *dev)
|
||||
void intel_psr_single_frame_update(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
@ -624,14 +629,16 @@ void intel_psr_single_frame_update(struct drm_device *dev)
|
|||
|
||||
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
|
||||
pipe = to_intel_crtc(crtc)->pipe;
|
||||
val = I915_READ(VLV_PSRCTL(pipe));
|
||||
|
||||
/*
|
||||
* We need to set this bit before writing registers for a flip.
|
||||
* This bit will be self-clear when it gets to the PSR active state.
|
||||
*/
|
||||
I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
|
||||
if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
|
||||
val = I915_READ(VLV_PSRCTL(pipe));
|
||||
|
||||
/*
|
||||
* We need to set this bit before writing registers for a flip.
|
||||
* This bit will be self-clear when it gets to the PSR active state.
|
||||
*/
|
||||
I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
|
||||
}
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
|
@ -648,7 +655,7 @@ void intel_psr_single_frame_update(struct drm_device *dev)
|
|||
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
|
||||
*/
|
||||
void intel_psr_invalidate(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
@ -663,11 +670,12 @@ void intel_psr_invalidate(struct drm_device *dev,
|
|||
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
|
||||
pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
||||
intel_psr_exit(dev);
|
||||
|
||||
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
||||
|
||||
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
|
||||
|
||||
if (frontbuffer_bits)
|
||||
intel_psr_exit(dev);
|
||||
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
|
@ -675,6 +683,7 @@ void intel_psr_invalidate(struct drm_device *dev,
|
|||
* intel_psr_flush - Flush PSR
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
* Since the hardware frontbuffer tracking has gaps we need to integrate
|
||||
* with the software frontbuffer tracking. This function gets called every
|
||||
|
@ -684,7 +693,7 @@ void intel_psr_invalidate(struct drm_device *dev,
|
|||
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
|
||||
*/
|
||||
void intel_psr_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
unsigned frontbuffer_bits, enum fb_op_origin origin)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
@ -698,26 +707,29 @@ void intel_psr_flush(struct drm_device *dev,
|
|||
|
||||
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
|
||||
pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
||||
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
||||
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
|
||||
|
||||
/*
|
||||
* On Haswell sprite plane updates don't result in a psr invalidating
|
||||
* signal in the hardware. Which means we need to manually fake this in
|
||||
* software for all flushes, not just when we've seen a preceding
|
||||
* invalidation through frontbuffer rendering.
|
||||
*/
|
||||
if (IS_HASWELL(dev) &&
|
||||
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
|
||||
intel_psr_exit(dev);
|
||||
|
||||
/*
|
||||
* On Valleyview and Cherryview we don't use hardware tracking so
|
||||
* any plane updates or cursor moves don't result in a PSR
|
||||
* invalidating. Which means we need to manually fake this in
|
||||
* software for all flushes, not just when we've seen a preceding
|
||||
* invalidation through frontbuffer rendering. */
|
||||
if (!HAS_DDI(dev))
|
||||
intel_psr_exit(dev);
|
||||
if (HAS_DDI(dev)) {
|
||||
/*
|
||||
* By definition every flush should mean invalidate + flush,
|
||||
* however on core platforms let's minimize the
|
||||
* disable/re-enable so we can avoid the invalidate when flip
|
||||
* originated the flush.
|
||||
*/
|
||||
if (frontbuffer_bits && origin != ORIGIN_FLIP)
|
||||
intel_psr_exit(dev);
|
||||
} else {
|
||||
/*
|
||||
* On Valleyview and Cherryview we don't use hardware tracking
|
||||
* so any plane updates or cursor moves don't result in a PSR
|
||||
* invalidating. Which means we need to manually fake this in
|
||||
* software for all flushes.
|
||||
*/
|
||||
if (frontbuffer_bits)
|
||||
intel_psr_exit(dev);
|
||||
}
|
||||
|
||||
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
|
||||
schedule_delayed_work(&dev_priv->psr.work,
|
||||
|
|
|
@ -81,7 +81,7 @@ bool intel_ring_stopped(struct intel_engine_cs *ring)
|
|||
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
|
||||
}
|
||||
|
||||
void __intel_ring_advance(struct intel_engine_cs *ring)
|
||||
static void __intel_ring_advance(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
ringbuf->tail &= ringbuf->size - 1;
|
||||
|
@ -91,10 +91,11 @@ void __intel_ring_advance(struct intel_engine_cs *ring)
|
|||
}
|
||||
|
||||
static int
|
||||
gen2_render_ring_flush(struct intel_engine_cs *ring,
|
||||
gen2_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
||||
|
@ -105,7 +106,7 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
|
|||
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
|
||||
cmd |= MI_READ_FLUSH;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -117,10 +118,11 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
gen4_render_ring_flush(struct intel_engine_cs *ring,
|
||||
gen4_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_device *dev = ring->dev;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
@ -163,7 +165,7 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
|
|||
(IS_G4X(dev) || IS_GEN5(dev)))
|
||||
cmd |= MI_INVALIDATE_ISP;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -212,13 +214,13 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
|
|||
* really our business. That leaves only stall at scoreboard.
|
||||
*/
|
||||
static int
|
||||
intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
|
||||
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -231,7 +233,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
|
|||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -247,15 +249,16 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
|
|||
}
|
||||
|
||||
static int
|
||||
gen6_render_ring_flush(struct intel_engine_cs *ring,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
gen6_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
/* Force SNB workarounds for PIPE_CONTROL flushes */
|
||||
ret = intel_emit_post_sync_nonzero_flush(ring);
|
||||
ret = intel_emit_post_sync_nonzero_flush(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -285,7 +288,7 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
|
|||
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -299,11 +302,12 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
|
||||
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -318,9 +322,10 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
|
|||
}
|
||||
|
||||
static int
|
||||
gen7_render_ring_flush(struct intel_engine_cs *ring,
|
||||
gen7_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
@ -362,10 +367,10 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
|
|||
/* Workaround: we must issue a pipe_control with CS-stall bit
|
||||
* set before a pipe_control command that has the state cache
|
||||
* invalidate bit set. */
|
||||
gen7_render_ring_cs_stall_wa(ring);
|
||||
gen7_render_ring_cs_stall_wa(req);
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -379,12 +384,13 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
gen8_emit_pipe_control(struct intel_engine_cs *ring,
|
||||
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
|
||||
u32 flags, u32 scratch_addr)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -400,11 +406,11 @@ gen8_emit_pipe_control(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
gen8_render_ring_flush(struct intel_engine_cs *ring,
|
||||
gen8_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
flags |= PIPE_CONTROL_CS_STALL;
|
||||
|
@ -424,7 +430,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
|
|||
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
|
||||
|
||||
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
|
||||
ret = gen8_emit_pipe_control(ring,
|
||||
ret = gen8_emit_pipe_control(req,
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_STALL_AT_SCOREBOARD,
|
||||
0);
|
||||
|
@ -432,7 +438,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
|
|||
return ret;
|
||||
}
|
||||
|
||||
return gen8_emit_pipe_control(ring, flags, scratch_addr);
|
||||
return gen8_emit_pipe_control(req, flags, scratch_addr);
|
||||
}
|
||||
|
||||
static void ring_write_tail(struct intel_engine_cs *ring,
|
||||
|
@ -703,10 +709,10 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx)
|
||||
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ret, i;
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_workarounds *w = &dev_priv->workarounds;
|
||||
|
@ -715,11 +721,11 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
|
|||
return 0;
|
||||
|
||||
ring->gpu_caches_dirty = true;
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
ret = intel_ring_flush_all_caches(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_ring_begin(ring, (w->count * 2 + 2));
|
||||
ret = intel_ring_begin(req, (w->count * 2 + 2));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -733,7 +739,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
|
|||
intel_ring_advance(ring);
|
||||
|
||||
ring->gpu_caches_dirty = true;
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
ret = intel_ring_flush_all_caches(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -742,16 +748,15 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx)
|
||||
static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_workarounds_emit(ring, ctx);
|
||||
ret = intel_ring_workarounds_emit(req);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_render_state_init(ring);
|
||||
ret = i915_gem_render_state_init(req);
|
||||
if (ret)
|
||||
DRM_ERROR("init render state: %d\n", ret);
|
||||
|
||||
|
@ -800,6 +805,11 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
|||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
|
||||
|
||||
/* WaDisableAsyncFlipPerfMode:bdw */
|
||||
WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
|
||||
|
||||
/* WaDisablePartialInstShootdown:bdw */
|
||||
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
|
@ -861,6 +871,11 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
|
|||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
|
||||
|
||||
/* WaDisableAsyncFlipPerfMode:chv */
|
||||
WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
|
||||
|
||||
/* WaDisablePartialInstShootdown:chv */
|
||||
/* WaDisableThreadStallDopClockGating:chv */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
|
@ -931,8 +946,11 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
|||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
||||
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
|
||||
GEN9_RHWO_OPTIMIZATION_DISABLE);
|
||||
WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
|
||||
DISABLE_PIXEL_MASK_CAMMING);
|
||||
/*
|
||||
* WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
|
||||
* but we do that in per ctx batchbuffer as there is an issue
|
||||
* with this register not getting restored on ctx restore
|
||||
*/
|
||||
}
|
||||
|
||||
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
|
||||
|
@ -1041,6 +1059,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
|||
HDC_FORCE_NON_COHERENT);
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
|
||||
INTEL_REVID(dev) == SKL_REVID_D0)
|
||||
/* WaBarrierPerformanceFixDisable:skl */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FENCE_DEST_SLM_DISABLE |
|
||||
HDC_BARRIER_PERFORMANCE_DISABLE);
|
||||
|
||||
return skl_tune_iz_hashing(ring);
|
||||
}
|
||||
|
||||
|
@ -1105,9 +1130,9 @@ static int init_render_ring(struct intel_engine_cs *ring)
|
|||
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
|
||||
* programmed to '1' on all products.
|
||||
*
|
||||
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
|
||||
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
|
||||
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
|
||||
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
|
||||
|
||||
/* Required for the hardware to program scanline values for waiting */
|
||||
|
@ -1132,7 +1157,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
|
|||
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
|
||||
|
||||
if (HAS_L3_DPF(dev))
|
||||
|
@ -1155,10 +1180,11 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
|
|||
intel_fini_pipe_control(ring);
|
||||
}
|
||||
|
||||
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
|
||||
static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 8
|
||||
struct intel_engine_cs *signaller = signaller_req->ring;
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *waiter;
|
||||
|
@ -1168,7 +1194,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
|
|||
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
ret = intel_ring_begin(signaller, num_dwords);
|
||||
ret = intel_ring_begin(signaller_req, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1178,8 +1204,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
|
|||
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
|
||||
continue;
|
||||
|
||||
seqno = i915_gem_request_get_seqno(
|
||||
signaller->outstanding_lazy_request);
|
||||
seqno = i915_gem_request_get_seqno(signaller_req);
|
||||
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
|
||||
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_QW_WRITE |
|
||||
|
@ -1196,10 +1221,11 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_xcs_signal(struct intel_engine_cs *signaller,
|
||||
static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 6
|
||||
struct intel_engine_cs *signaller = signaller_req->ring;
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *waiter;
|
||||
|
@ -1209,7 +1235,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
|
|||
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
ret = intel_ring_begin(signaller, num_dwords);
|
||||
ret = intel_ring_begin(signaller_req, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1219,8 +1245,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
|
|||
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
|
||||
continue;
|
||||
|
||||
seqno = i915_gem_request_get_seqno(
|
||||
signaller->outstanding_lazy_request);
|
||||
seqno = i915_gem_request_get_seqno(signaller_req);
|
||||
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
|
||||
MI_FLUSH_DW_OP_STOREDW);
|
||||
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
|
||||
|
@ -1235,9 +1260,10 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_signal(struct intel_engine_cs *signaller,
|
||||
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
struct intel_engine_cs *signaller = signaller_req->ring;
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *useless;
|
||||
|
@ -1248,15 +1274,14 @@ static int gen6_signal(struct intel_engine_cs *signaller,
|
|||
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
ret = intel_ring_begin(signaller, num_dwords);
|
||||
ret = intel_ring_begin(signaller_req, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_ring(useless, dev_priv, i) {
|
||||
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
|
||||
if (mbox_reg != GEN6_NOSYNC) {
|
||||
u32 seqno = i915_gem_request_get_seqno(
|
||||
signaller->outstanding_lazy_request);
|
||||
u32 seqno = i915_gem_request_get_seqno(signaller_req);
|
||||
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(signaller, mbox_reg);
|
||||
intel_ring_emit(signaller, seqno);
|
||||
|
@ -1272,30 +1297,29 @@ static int gen6_signal(struct intel_engine_cs *signaller,
|
|||
|
||||
/**
|
||||
* gen6_add_request - Update the semaphore mailbox registers
|
||||
*
|
||||
* @ring - ring that is adding a request
|
||||
* @seqno - return seqno stuck into the ring
|
||||
*
|
||||
* @request - request to write to the ring
|
||||
*
|
||||
* Update the mailbox registers in the *other* rings with the current seqno.
|
||||
* This acts like a signal in the canonical semaphore.
|
||||
*/
|
||||
static int
|
||||
gen6_add_request(struct intel_engine_cs *ring)
|
||||
gen6_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (ring->semaphore.signal)
|
||||
ret = ring->semaphore.signal(ring, 4);
|
||||
ret = ring->semaphore.signal(req, 4);
|
||||
else
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
|
||||
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
|
@ -1318,14 +1342,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
|
|||
*/
|
||||
|
||||
static int
|
||||
gen8_ring_sync(struct intel_engine_cs *waiter,
|
||||
gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
|
||||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct intel_engine_cs *waiter = waiter_req->ring;
|
||||
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(waiter, 4);
|
||||
ret = intel_ring_begin(waiter_req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1343,10 +1368,11 @@ gen8_ring_sync(struct intel_engine_cs *waiter,
|
|||
}
|
||||
|
||||
static int
|
||||
gen6_ring_sync(struct intel_engine_cs *waiter,
|
||||
gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
|
||||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct intel_engine_cs *waiter = waiter_req->ring;
|
||||
u32 dw1 = MI_SEMAPHORE_MBOX |
|
||||
MI_SEMAPHORE_COMPARE |
|
||||
MI_SEMAPHORE_REGISTER;
|
||||
|
@ -1361,7 +1387,7 @@ gen6_ring_sync(struct intel_engine_cs *waiter,
|
|||
|
||||
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
|
||||
|
||||
ret = intel_ring_begin(waiter, 4);
|
||||
ret = intel_ring_begin(waiter_req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1392,8 +1418,9 @@ do { \
|
|||
} while (0)
|
||||
|
||||
static int
|
||||
pc_render_add_request(struct intel_engine_cs *ring)
|
||||
pc_render_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
|
@ -1405,7 +1432,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
|
|||
* incoherence by flushing the 6 PIPE_NOTIFY buffers out to
|
||||
* memory before requesting an interrupt.
|
||||
*/
|
||||
ret = intel_ring_begin(ring, 32);
|
||||
ret = intel_ring_begin(req, 32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1413,8 +1440,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
|
|||
PIPE_CONTROL_WRITE_FLUSH |
|
||||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
|
||||
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
|
||||
intel_ring_emit(ring, 0);
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
|
||||
|
@ -1433,8 +1459,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
|
|||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
|
||||
PIPE_CONTROL_NOTIFY);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
|
||||
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
|
||||
intel_ring_emit(ring, 0);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
|
@ -1585,13 +1610,14 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
|
|||
}
|
||||
|
||||
static int
|
||||
bsd_ring_flush(struct intel_engine_cs *ring,
|
||||
bsd_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1602,18 +1628,18 @@ bsd_ring_flush(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
i9xx_add_request(struct intel_engine_cs *ring)
|
||||
i9xx_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
|
||||
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
|
@ -1745,13 +1771,14 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
|
|||
}
|
||||
|
||||
static int
|
||||
i965_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 length,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1771,14 +1798,15 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||
#define I830_TLB_ENTRIES (2)
|
||||
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
|
||||
static int
|
||||
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
u32 cs_offset = ring->scratch.gtt_offset;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1795,7 +1823,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||
if (len > I830_BATCH_LIMIT)
|
||||
return -ENOSPC;
|
||||
|
||||
ret = intel_ring_begin(ring, 6 + 2);
|
||||
ret = intel_ring_begin(req, 6 + 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1818,7 +1846,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||
offset = cs_offset;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1833,13 +1861,14 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2082,7 +2111,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|||
|
||||
intel_unpin_ringbuffer_obj(ringbuf);
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
|
||||
|
||||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
|
@ -2106,6 +2134,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|||
if (intel_ring_space(ringbuf) >= n)
|
||||
return 0;
|
||||
|
||||
/* The whole point of reserving space is to not wait! */
|
||||
WARN_ON(ringbuf->reserved_in_use);
|
||||
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
space = __intel_ring_space(request->postfix, ringbuf->tail,
|
||||
ringbuf->size);
|
||||
|
@ -2124,18 +2155,11 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
|
||||
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
uint32_t __iomem *virt;
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
int rem = ringbuf->size - ringbuf->tail;
|
||||
|
||||
if (ringbuf->space < rem) {
|
||||
int ret = ring_wait_for_space(ring, rem);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
virt = ringbuf->virtual_start + ringbuf->tail;
|
||||
rem /= 4;
|
||||
while (rem--)
|
||||
|
@ -2143,21 +2167,11 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
|
|||
|
||||
ringbuf->tail = 0;
|
||||
intel_ring_update_space(ringbuf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_ring_idle(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
/* We need to add any requests required to flush the objects and ring */
|
||||
if (ring->outstanding_lazy_request) {
|
||||
ret = i915_add_request(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Wait upon the last request to be completed */
|
||||
if (list_empty(&ring->request_list))
|
||||
|
@ -2180,33 +2194,126 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __intel_ring_prepare(struct intel_engine_cs *ring,
|
||||
int bytes)
|
||||
int intel_ring_reserve_space(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
int ret;
|
||||
/*
|
||||
* The first call merely notes the reserve request and is common for
|
||||
* all back ends. The subsequent localised _begin() call actually
|
||||
* ensures that the reservation is available. Without the begin, if
|
||||
* the request creator immediately submitted the request without
|
||||
* adding any commands to it then there might not actually be
|
||||
* sufficient room for the submission commands.
|
||||
*/
|
||||
intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
|
||||
|
||||
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
|
||||
ret = intel_wrap_ring_buffer(ring);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
return intel_ring_begin(request, 0);
|
||||
}
|
||||
|
||||
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
|
||||
{
|
||||
WARN_ON(ringbuf->reserved_size);
|
||||
WARN_ON(ringbuf->reserved_in_use);
|
||||
|
||||
ringbuf->reserved_size = size;
|
||||
}
|
||||
|
||||
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
WARN_ON(ringbuf->reserved_in_use);
|
||||
|
||||
ringbuf->reserved_size = 0;
|
||||
ringbuf->reserved_in_use = false;
|
||||
}
|
||||
|
||||
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
WARN_ON(ringbuf->reserved_in_use);
|
||||
|
||||
ringbuf->reserved_in_use = true;
|
||||
ringbuf->reserved_tail = ringbuf->tail;
|
||||
}
|
||||
|
||||
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
WARN_ON(!ringbuf->reserved_in_use);
|
||||
if (ringbuf->tail > ringbuf->reserved_tail) {
|
||||
WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
|
||||
"request reserved size too small: %d vs %d!\n",
|
||||
ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
|
||||
} else {
|
||||
/*
|
||||
* The ring was wrapped while the reserved space was in use.
|
||||
* That means that some unknown amount of the ring tail was
|
||||
* no-op filled and skipped. Thus simply adding the ring size
|
||||
* to the tail and doing the above space check will not work.
|
||||
* Rather than attempt to track how much tail was skipped,
|
||||
* it is much simpler to say that also skipping the sanity
|
||||
* check every once in a while is not a big issue.
|
||||
*/
|
||||
}
|
||||
|
||||
if (unlikely(ringbuf->space < bytes)) {
|
||||
ret = ring_wait_for_space(ring, bytes);
|
||||
ringbuf->reserved_size = 0;
|
||||
ringbuf->reserved_in_use = false;
|
||||
}
|
||||
|
||||
static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
int remain_usable = ringbuf->effective_size - ringbuf->tail;
|
||||
int remain_actual = ringbuf->size - ringbuf->tail;
|
||||
int ret, total_bytes, wait_bytes = 0;
|
||||
bool need_wrap = false;
|
||||
|
||||
if (ringbuf->reserved_in_use)
|
||||
total_bytes = bytes;
|
||||
else
|
||||
total_bytes = bytes + ringbuf->reserved_size;
|
||||
|
||||
if (unlikely(bytes > remain_usable)) {
|
||||
/*
|
||||
* Not enough space for the basic request. So need to flush
|
||||
* out the remainder and then wait for base + reserved.
|
||||
*/
|
||||
wait_bytes = remain_actual + total_bytes;
|
||||
need_wrap = true;
|
||||
} else {
|
||||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
}
|
||||
}
|
||||
|
||||
if (wait_bytes) {
|
||||
ret = ring_wait_for_space(ring, wait_bytes);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (need_wrap)
|
||||
__wrap_ring_buffer(ringbuf);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_ring_begin(struct intel_engine_cs *ring,
|
||||
int intel_ring_begin(struct drm_i915_gem_request *req,
|
||||
int num_dwords)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_i915_private *dev_priv;
|
||||
int ret;
|
||||
|
||||
WARN_ON(req == NULL);
|
||||
ring = req->ring;
|
||||
dev_priv = ring->dev->dev_private;
|
||||
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
|
||||
dev_priv->mm.interruptible);
|
||||
if (ret)
|
||||
|
@ -2216,18 +2323,14 @@ int intel_ring_begin(struct intel_engine_cs *ring,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Preallocate the olr before touching the ring */
|
||||
ret = i915_gem_request_alloc(ring, ring->default_context);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ring->buffer->space -= num_dwords * sizeof(uint32_t);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Align the ring tail to a cacheline boundary */
|
||||
int intel_ring_cacheline_align(struct intel_engine_cs *ring)
|
||||
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
||||
int ret;
|
||||
|
||||
|
@ -2235,7 +2338,7 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
|
|||
return 0;
|
||||
|
||||
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
|
||||
ret = intel_ring_begin(ring, num_dwords);
|
||||
ret = intel_ring_begin(req, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2252,8 +2355,6 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
|
|||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
BUG_ON(ring->outstanding_lazy_request);
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
|
||||
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
|
||||
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
|
||||
|
@ -2298,13 +2399,14 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
|
|||
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
|
||||
static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
|
||||
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2342,20 +2444,23 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
bool ppgtt = USES_PPGTT(ring->dev) &&
|
||||
!(dispatch_flags & I915_DISPATCH_SECURE);
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* FIXME(BDW): Address space and security selectors. */
|
||||
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
|
||||
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
|
||||
(dispatch_flags & I915_DISPATCH_RS ?
|
||||
MI_BATCH_RESOURCE_STREAMER : 0));
|
||||
intel_ring_emit(ring, lower_32_bits(offset));
|
||||
intel_ring_emit(ring, upper_32_bits(offset));
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
@ -2365,20 +2470,23 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring,
|
||||
MI_BATCH_BUFFER_START |
|
||||
(dispatch_flags & I915_DISPATCH_SECURE ?
|
||||
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
|
||||
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
|
||||
(dispatch_flags & I915_DISPATCH_RS ?
|
||||
MI_BATCH_RESOURCE_STREAMER : 0));
|
||||
/* bit0-7 is the length on GEN6+ */
|
||||
intel_ring_emit(ring, offset);
|
||||
intel_ring_advance(ring);
|
||||
|
@ -2387,13 +2495,14 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
ret = intel_ring_begin(req, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2410,14 +2519,15 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||
|
||||
/* Blitter support (SandyBridge+) */
|
||||
|
||||
static int gen6_ring_flush(struct intel_engine_cs *ring,
|
||||
static int gen6_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_device *dev = ring->dev;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2818,26 +2928,28 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
|
|||
}
|
||||
|
||||
int
|
||||
intel_ring_flush_all_caches(struct intel_engine_cs *ring)
|
||||
intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (!ring->gpu_caches_dirty)
|
||||
return 0;
|
||||
|
||||
ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
|
||||
ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
|
||||
trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
|
||||
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
uint32_t flush_domains;
|
||||
int ret;
|
||||
|
||||
|
@ -2845,11 +2957,11 @@ intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
|
|||
if (ring->gpu_caches_dirty)
|
||||
flush_domains = I915_GEM_GPU_DOMAINS;
|
||||
|
||||
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
|
||||
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
|
||||
trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
* workarounds!
|
||||
*/
|
||||
#define CACHELINE_BYTES 64
|
||||
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
|
||||
|
||||
/*
|
||||
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
|
||||
|
@ -105,6 +106,9 @@ struct intel_ringbuffer {
|
|||
int space;
|
||||
int size;
|
||||
int effective_size;
|
||||
int reserved_size;
|
||||
int reserved_tail;
|
||||
bool reserved_in_use;
|
||||
|
||||
/** We track the position of the requests in the ring buffer, and
|
||||
* when each is retired we increment last_retired_head as the GPU
|
||||
|
@ -120,6 +124,25 @@ struct intel_ringbuffer {
|
|||
struct intel_context;
|
||||
struct drm_i915_reg_descriptor;
|
||||
|
||||
/*
|
||||
* we use a single page to load ctx workarounds so all of these
|
||||
* values are referred in terms of dwords
|
||||
*
|
||||
* struct i915_wa_ctx_bb:
|
||||
* offset: specifies batch starting position, also helpful in case
|
||||
* if we want to have multiple batches at different offsets based on
|
||||
* some criteria. It is not a requirement at the moment but provides
|
||||
* an option for future use.
|
||||
* size: size of the batch in DWORDS
|
||||
*/
|
||||
struct i915_ctx_workarounds {
|
||||
struct i915_wa_ctx_bb {
|
||||
u32 offset;
|
||||
u32 size;
|
||||
} indirect_ctx, per_ctx;
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
struct intel_engine_cs {
|
||||
const char *name;
|
||||
enum intel_ring_id {
|
||||
|
@ -143,6 +166,7 @@ struct intel_engine_cs {
|
|||
struct i915_gem_batch_pool batch_pool;
|
||||
|
||||
struct intel_hw_status_page status_page;
|
||||
struct i915_ctx_workarounds wa_ctx;
|
||||
|
||||
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
|
||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||
|
@ -152,15 +176,14 @@ struct intel_engine_cs {
|
|||
|
||||
int (*init_hw)(struct intel_engine_cs *ring);
|
||||
|
||||
int (*init_context)(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx);
|
||||
int (*init_context)(struct drm_i915_gem_request *req);
|
||||
|
||||
void (*write_tail)(struct intel_engine_cs *ring,
|
||||
u32 value);
|
||||
int __must_check (*flush)(struct intel_engine_cs *ring,
|
||||
int __must_check (*flush)(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*add_request)(struct intel_engine_cs *ring);
|
||||
int (*add_request)(struct drm_i915_gem_request *req);
|
||||
/* Some chipsets are not quite as coherent as advertised and need
|
||||
* an expensive kick to force a true read of the up-to-date seqno.
|
||||
* However, the up-to-date seqno is not always required and the last
|
||||
|
@ -171,11 +194,12 @@ struct intel_engine_cs {
|
|||
bool lazy_coherency);
|
||||
void (*set_seqno)(struct intel_engine_cs *ring,
|
||||
u32 seqno);
|
||||
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
|
||||
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 length,
|
||||
unsigned dispatch_flags);
|
||||
#define I915_DISPATCH_SECURE 0x1
|
||||
#define I915_DISPATCH_PINNED 0x2
|
||||
#define I915_DISPATCH_RS 0x4
|
||||
void (*cleanup)(struct intel_engine_cs *ring);
|
||||
|
||||
/* GEN8 signal/wait table - never trust comments!
|
||||
|
@ -229,10 +253,10 @@ struct intel_engine_cs {
|
|||
};
|
||||
|
||||
/* AKA wait() */
|
||||
int (*sync_to)(struct intel_engine_cs *ring,
|
||||
struct intel_engine_cs *to,
|
||||
int (*sync_to)(struct drm_i915_gem_request *to_req,
|
||||
struct intel_engine_cs *from,
|
||||
u32 seqno);
|
||||
int (*signal)(struct intel_engine_cs *signaller,
|
||||
int (*signal)(struct drm_i915_gem_request *signaller_req,
|
||||
/* num_dwords needed by caller */
|
||||
unsigned int num_dwords);
|
||||
} semaphore;
|
||||
|
@ -243,14 +267,11 @@ struct intel_engine_cs {
|
|||
struct list_head execlist_retired_req_list;
|
||||
u8 next_context_status_buffer;
|
||||
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
|
||||
int (*emit_request)(struct intel_ringbuffer *ringbuf,
|
||||
struct drm_i915_gem_request *request);
|
||||
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
|
||||
struct intel_context *ctx,
|
||||
int (*emit_request)(struct drm_i915_gem_request *request);
|
||||
int (*emit_flush)(struct drm_i915_gem_request *request,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
|
||||
struct intel_context *ctx,
|
||||
int (*emit_bb_start)(struct drm_i915_gem_request *req,
|
||||
u64 offset, unsigned dispatch_flags);
|
||||
|
||||
/**
|
||||
|
@ -271,10 +292,6 @@ struct intel_engine_cs {
|
|||
*/
|
||||
struct list_head request_list;
|
||||
|
||||
/**
|
||||
* Do we have some not yet emitted requests outstanding?
|
||||
*/
|
||||
struct drm_i915_gem_request *outstanding_lazy_request;
|
||||
/**
|
||||
* Seqno of request most recently submitted to request_list.
|
||||
* Used exclusively by hang checker to avoid grabbing lock while
|
||||
|
@ -408,8 +425,8 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
|||
|
||||
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
|
||||
int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
|
||||
int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
|
||||
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
|
||||
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
|
||||
static inline void intel_ring_emit(struct intel_engine_cs *ring,
|
||||
u32 data)
|
||||
{
|
||||
|
@ -426,12 +443,11 @@ int __intel_ring_space(int head, int tail, int size);
|
|||
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
|
||||
int intel_ring_space(struct intel_ringbuffer *ringbuf);
|
||||
bool intel_ring_stopped(struct intel_engine_cs *ring);
|
||||
void __intel_ring_advance(struct intel_engine_cs *ring);
|
||||
|
||||
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
|
||||
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
|
||||
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
|
||||
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
|
||||
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
|
||||
|
||||
void intel_fini_pipe_control(struct intel_engine_cs *ring);
|
||||
int intel_init_pipe_control(struct intel_engine_cs *ring);
|
||||
|
@ -451,11 +467,29 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
|
|||
return ringbuf->tail;
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_request *
|
||||
intel_ring_get_request(struct intel_engine_cs *ring)
|
||||
{
|
||||
BUG_ON(ring->outstanding_lazy_request == NULL);
|
||||
return ring->outstanding_lazy_request;
|
||||
}
|
||||
/*
|
||||
* Arbitrary size for largest possible 'add request' sequence. The code paths
|
||||
* are complex and variable. Empirical measurement shows that the worst case
|
||||
* is ILK at 136 words. Reserving too much is better than reserving too little
|
||||
* as that allows for corner cases that might have been missed. So the figure
|
||||
* has been rounded up to 160 words.
|
||||
*/
|
||||
#define MIN_SPACE_FOR_ADD_REQUEST 160
|
||||
|
||||
/*
|
||||
* Reserve space in the ring to guarantee that the i915_add_request() call
|
||||
* will always have sufficient room to do its stuff. The request creation
|
||||
* code calls this automatically.
|
||||
*/
|
||||
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
|
||||
/* Cancel the reservation, e.g. because the request is being discarded. */
|
||||
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
|
||||
/* Use the reserved space - for use by i915_add_request() only. */
|
||||
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
|
||||
/* Finish with the reserved space - for use by i915_add_request() only. */
|
||||
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
|
||||
|
||||
/* Legacy ringbuffer specific portion of reservation code: */
|
||||
int intel_ring_reserve_space(struct drm_i915_gem_request *request);
|
||||
|
||||
#endif /* _INTEL_RINGBUFFER_H_ */
|
||||
|
|
|
@ -835,12 +835,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
|
|||
return enabled;
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
valleyview_enable_display_irqs(dev_priv);
|
||||
|
@ -858,18 +854,33 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
i915_redisable_vga_power_on(dev_priv->dev);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
valleyview_disable_display_irqs(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
vlv_power_sequencer_reset(dev_priv);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
||||
vlv_display_power_well_init(dev_priv);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
valleyview_disable_display_irqs(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
vlv_display_power_well_deinit(dev_priv);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
|
||||
vlv_power_sequencer_reset(dev_priv);
|
||||
}
|
||||
|
||||
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
|
@ -882,8 +893,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
* display and the reference clock for VGA
|
||||
* hotplug / manual detection.
|
||||
*/
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
|
||||
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
@ -933,14 +944,14 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
|
||||
phy = DPIO_PHY0;
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
|
||||
DPLL_REFA_CLK_ENABLE_VLV);
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
|
||||
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
|
||||
DPLL_REF_CLK_ENABLE_VLV);
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
} else {
|
||||
phy = DPIO_PHY1;
|
||||
I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
|
||||
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
}
|
||||
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
@ -1042,53 +1053,29 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
|
|||
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
WARN_ON_ONCE(power_well->data != PIPE_A);
|
||||
|
||||
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
|
||||
}
|
||||
|
||||
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
WARN_ON_ONCE(power_well->data != PIPE_A &&
|
||||
power_well->data != PIPE_B &&
|
||||
power_well->data != PIPE_C);
|
||||
WARN_ON_ONCE(power_well->data != PIPE_A);
|
||||
|
||||
chv_set_pipe_power_well(dev_priv, power_well, true);
|
||||
|
||||
if (power_well->data == PIPE_A) {
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
valleyview_enable_display_irqs(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/*
|
||||
* During driver initialization/resume we can avoid restoring the
|
||||
* part of the HW/SW state that will be inited anyway explicitly.
|
||||
*/
|
||||
if (dev_priv->power_domains.initializing)
|
||||
return;
|
||||
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
i915_redisable_vga_power_on(dev_priv->dev);
|
||||
}
|
||||
vlv_display_power_well_init(dev_priv);
|
||||
}
|
||||
|
||||
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
WARN_ON_ONCE(power_well->data != PIPE_A &&
|
||||
power_well->data != PIPE_B &&
|
||||
power_well->data != PIPE_C);
|
||||
WARN_ON_ONCE(power_well->data != PIPE_A);
|
||||
|
||||
if (power_well->data == PIPE_A) {
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
valleyview_disable_display_irqs(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
vlv_display_power_well_deinit(dev_priv);
|
||||
|
||||
chv_set_pipe_power_well(dev_priv, power_well, false);
|
||||
|
||||
if (power_well->data == PIPE_A)
|
||||
vlv_power_sequencer_reset(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -75,10 +75,8 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
|
|||
* until a subsequent call to intel_pipe_update_end(). That is done to
|
||||
* avoid random delays. The value written to @start_vbl_count should be
|
||||
* supplied to intel_pipe_update_end() for error checking.
|
||||
*
|
||||
* Return: true if the call was successful
|
||||
*/
|
||||
bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
||||
void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
|
||||
|
@ -96,13 +94,14 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
|||
min = vblank_start - usecs_to_scanlines(mode, 100);
|
||||
max = vblank_start - 1;
|
||||
|
||||
local_irq_disable();
|
||||
*start_vbl_count = 0;
|
||||
|
||||
if (min <= 0 || max <= 0)
|
||||
return false;
|
||||
return;
|
||||
|
||||
if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
|
||||
return false;
|
||||
|
||||
local_irq_disable();
|
||||
return;
|
||||
|
||||
trace_i915_pipe_update_start(crtc, min, max);
|
||||
|
||||
|
@ -138,8 +137,6 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
|||
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
|
||||
|
||||
trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -161,7 +158,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
|
|||
|
||||
local_irq_enable();
|
||||
|
||||
if (start_vbl_count != end_vbl_count)
|
||||
if (start_vbl_count && start_vbl_count != end_vbl_count)
|
||||
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
|
||||
pipe_name(pipe), start_vbl_count, end_vbl_count);
|
||||
}
|
||||
|
@ -182,7 +179,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
|||
const int plane = intel_plane->plane + 1;
|
||||
u32 plane_ctl, stride_div, stride;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(drm_plane->state)->ckey;
|
||||
unsigned long surf_addr;
|
||||
u32 tile_height, plane_offset, plane_size;
|
||||
unsigned int rotation;
|
||||
|
@ -272,7 +270,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
static void
|
||||
skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
|
||||
skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -344,7 +342,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
|||
u32 sprctl;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(dplane->state)->ckey;
|
||||
|
||||
sprctl = SP_ENABLE;
|
||||
|
||||
|
@ -400,10 +399,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
|||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
sprctl |= SP_TILED;
|
||||
|
||||
intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
|
@ -411,7 +406,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
|||
crtc_h--;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
|
||||
sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
|
||||
&x, &y,
|
||||
obj->tiling_mode,
|
||||
pixel_size,
|
||||
fb->pitches[0]);
|
||||
|
@ -455,7 +451,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
static void
|
||||
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
|
||||
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -467,8 +463,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
|
|||
|
||||
I915_WRITE(SPSURF(pipe, plane), 0);
|
||||
POSTING_READ(SPSURF(pipe, plane));
|
||||
|
||||
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -487,7 +481,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
u32 sprctl, sprscale = 0;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(plane->state)->ckey;
|
||||
|
||||
sprctl = SPRITE_ENABLE;
|
||||
|
||||
|
@ -546,7 +541,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
sprsurf_offset =
|
||||
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
|
||||
intel_gen4_compute_page_offset(dev_priv,
|
||||
&x, &y, obj->tiling_mode,
|
||||
pixel_size, fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
|
@ -595,7 +591,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
static void
|
||||
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
|
||||
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -627,7 +623,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
unsigned long dvssurf_offset, linear_offset;
|
||||
u32 dvscntr, dvsscale;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(plane->state)->ckey;
|
||||
|
||||
dvscntr = DVS_ENABLE;
|
||||
|
||||
|
@ -682,7 +679,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
dvssurf_offset =
|
||||
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
|
||||
intel_gen4_compute_page_offset(dev_priv,
|
||||
&x, &y, obj->tiling_mode,
|
||||
pixel_size, fb->pitches[0]);
|
||||
linear_offset -= dvssurf_offset;
|
||||
|
||||
|
@ -722,7 +720,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
static void
|
||||
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
|
||||
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -739,11 +737,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
|
|||
|
||||
static int
|
||||
intel_check_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
|
||||
struct intel_crtc_state *crtc_state;
|
||||
struct drm_crtc *crtc = state->base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = state->base.fb;
|
||||
int crtc_x, crtc_y;
|
||||
|
@ -756,15 +755,10 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
|||
int max_scale, min_scale;
|
||||
bool can_scale;
|
||||
int pixel_size;
|
||||
int ret;
|
||||
|
||||
intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
|
||||
crtc_state = state->base.state ?
|
||||
intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
|
||||
|
||||
if (!fb) {
|
||||
state->visible = false;
|
||||
goto finish;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Don't modify another pipe's plane */
|
||||
|
@ -782,7 +776,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
|||
/* setup can_scale, min_scale, max_scale */
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
/* use scaler when colorkey is not required */
|
||||
if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
|
||||
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
|
||||
can_scale = 1;
|
||||
min_scale = 1;
|
||||
max_scale = skl_max_scale(intel_crtc, crtc_state);
|
||||
|
@ -802,7 +796,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
|||
* coordinates and sizes. We probably need some way to decide whether
|
||||
* more strict checking should be done instead.
|
||||
*/
|
||||
|
||||
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
|
||||
state->base.rotation);
|
||||
|
||||
|
@ -812,7 +805,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
|||
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
|
||||
BUG_ON(vscale < 0);
|
||||
|
||||
state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
|
||||
state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
|
||||
|
||||
crtc_x = dst->x1;
|
||||
crtc_y = dst->y1;
|
||||
|
@ -917,36 +910,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
|||
dst->y1 = crtc_y;
|
||||
dst->y2 = crtc_y + crtc_h;
|
||||
|
||||
finish:
|
||||
/*
|
||||
* If the sprite is completely covering the primary plane,
|
||||
* we can disable the primary and save power.
|
||||
*/
|
||||
if (intel_crtc->active) {
|
||||
intel_crtc->atomic.fb_bits |=
|
||||
INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
|
||||
|
||||
if (intel_wm_need_update(plane, &state->base))
|
||||
intel_crtc->atomic.update_wm = true;
|
||||
|
||||
if (!state->visible) {
|
||||
/*
|
||||
* Avoid underruns when disabling the sprite.
|
||||
* FIXME remove once watermark updates are done properly.
|
||||
*/
|
||||
intel_crtc->atomic.wait_vblank = true;
|
||||
intel_crtc->atomic.update_sprite_watermarks |=
|
||||
(1 << drm_plane_index(plane));
|
||||
}
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
|
||||
state, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -955,34 +918,27 @@ intel_commit_sprite_plane(struct drm_plane *plane,
|
|||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_crtc *crtc = state->base.crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = state->base.fb;
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
|
||||
crtc = crtc ? crtc : plane->crtc;
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
plane->fb = fb;
|
||||
|
||||
if (intel_crtc->active) {
|
||||
if (state->visible) {
|
||||
crtc_x = state->dst.x1;
|
||||
crtc_y = state->dst.y1;
|
||||
crtc_w = drm_rect_width(&state->dst);
|
||||
crtc_h = drm_rect_height(&state->dst);
|
||||
src_x = state->src.x1 >> 16;
|
||||
src_y = state->src.y1 >> 16;
|
||||
src_w = drm_rect_width(&state->src) >> 16;
|
||||
src_h = drm_rect_height(&state->src) >> 16;
|
||||
intel_plane->update_plane(plane, crtc, fb,
|
||||
crtc_x, crtc_y, crtc_w, crtc_h,
|
||||
src_x, src_y, src_w, src_h);
|
||||
} else {
|
||||
intel_plane->disable_plane(plane, crtc, false);
|
||||
}
|
||||
if (!crtc->state->active)
|
||||
return;
|
||||
|
||||
if (state->visible) {
|
||||
intel_plane->update_plane(plane, crtc, fb,
|
||||
state->dst.x1, state->dst.y1,
|
||||
drm_rect_width(&state->dst),
|
||||
drm_rect_height(&state->dst),
|
||||
state->src.x1 >> 16,
|
||||
state->src.y1 >> 16,
|
||||
drm_rect_width(&state->src) >> 16,
|
||||
drm_rect_height(&state->src) >> 16);
|
||||
} else {
|
||||
intel_plane->disable_plane(plane, crtc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -991,7 +947,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct drm_intel_sprite_colorkey *set = data;
|
||||
struct drm_plane *plane;
|
||||
struct intel_plane *intel_plane;
|
||||
struct drm_plane_state *plane_state;
|
||||
struct drm_atomic_state *state;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
int ret = 0;
|
||||
|
||||
/* Make sure we don't try to enable both src & dest simultaneously */
|
||||
|
@ -1002,52 +960,43 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
|||
set->flags & I915_SET_COLORKEY_DESTINATION)
|
||||
return -EINVAL;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
plane = drm_plane_find(dev, set->plane_id);
|
||||
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
|
||||
ret = -ENOENT;
|
||||
goto out_unlock;
|
||||
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
|
||||
return -ENOENT;
|
||||
|
||||
drm_modeset_acquire_init(&ctx, 0);
|
||||
|
||||
state = drm_atomic_state_alloc(plane->dev);
|
||||
if (!state) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
state->acquire_ctx = &ctx;
|
||||
|
||||
intel_plane = to_intel_plane(plane);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
/* plane scaling and colorkey are mutually exclusive */
|
||||
if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
|
||||
DRM_ERROR("colorkey not allowed with scaler\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
while (1) {
|
||||
plane_state = drm_atomic_get_plane_state(state, plane);
|
||||
ret = PTR_ERR_OR_ZERO(plane_state);
|
||||
if (!ret) {
|
||||
to_intel_plane_state(plane_state)->ckey = *set;
|
||||
ret = drm_atomic_commit(state);
|
||||
}
|
||||
|
||||
if (ret != -EDEADLK)
|
||||
break;
|
||||
|
||||
drm_atomic_state_clear(state);
|
||||
drm_modeset_backoff(&ctx);
|
||||
}
|
||||
|
||||
intel_plane->ckey = *set;
|
||||
if (ret)
|
||||
drm_atomic_state_free(state);
|
||||
|
||||
/*
|
||||
* The only way this could fail would be due to
|
||||
* the current plane state being unsupportable already,
|
||||
* and we dont't consider that an error for the
|
||||
* colorkey ioctl. So just ignore any error.
|
||||
*/
|
||||
intel_plane_restore(plane);
|
||||
|
||||
out_unlock:
|
||||
drm_modeset_unlock_all(dev);
|
||||
out:
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_plane_restore(struct drm_plane *plane)
|
||||
{
|
||||
if (!plane->crtc || !plane->state->fb)
|
||||
return 0;
|
||||
|
||||
return drm_plane_helper_update(plane, plane->crtc, plane->state->fb,
|
||||
plane->state->crtc_x, plane->state->crtc_y,
|
||||
plane->state->crtc_w, plane->state->crtc_h,
|
||||
plane->state->src_x, plane->state->src_y,
|
||||
plane->state->src_w, plane->state->src_h);
|
||||
}
|
||||
|
||||
static const uint32_t ilk_plane_formats[] = {
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_YUYV,
|
||||
|
@ -1172,9 +1121,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
|||
|
||||
intel_plane->pipe = pipe;
|
||||
intel_plane->plane = plane;
|
||||
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe);
|
||||
intel_plane->check_plane = intel_check_sprite_plane;
|
||||
intel_plane->commit_plane = intel_commit_sprite_plane;
|
||||
intel_plane->ckey.flags = I915_SET_COLORKEY_NONE;
|
||||
possible_crtcs = (1 << pipe);
|
||||
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
|
||||
&intel_plane_funcs,
|
||||
|
@ -1189,6 +1138,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
|||
|
||||
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
|
||||
|
||||
out:
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1455,20 +1455,80 @@ static int gen6_do_reset(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int wait_for_register(struct drm_i915_private *dev_priv,
|
||||
const u32 reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms)
|
||||
{
|
||||
return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
|
||||
}
|
||||
|
||||
static int gen8_do_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine;
|
||||
int i;
|
||||
|
||||
for_each_ring(engine, dev_priv, i) {
|
||||
I915_WRITE(RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
|
||||
|
||||
if (wait_for_register(dev_priv,
|
||||
RING_RESET_CTL(engine->mmio_base),
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
700)) {
|
||||
DRM_ERROR("%s: reset request timeout\n", engine->name);
|
||||
goto not_ready;
|
||||
}
|
||||
}
|
||||
|
||||
return gen6_do_reset(dev);
|
||||
|
||||
not_ready:
|
||||
for_each_ring(engine, dev_priv, i)
|
||||
I915_WRITE(RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
|
||||
{
|
||||
if (!i915.reset)
|
||||
return NULL;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
return gen8_do_reset;
|
||||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
return gen6_do_reset;
|
||||
else if (IS_GEN5(dev))
|
||||
return ironlake_do_reset;
|
||||
else if (IS_G4X(dev))
|
||||
return g4x_do_reset;
|
||||
else if (IS_G33(dev))
|
||||
return g33_do_reset;
|
||||
else if (INTEL_INFO(dev)->gen >= 3)
|
||||
return i915_do_reset;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int intel_gpu_reset(struct drm_device *dev)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
return gen6_do_reset(dev);
|
||||
else if (IS_GEN5(dev))
|
||||
return ironlake_do_reset(dev);
|
||||
else if (IS_G4X(dev))
|
||||
return g4x_do_reset(dev);
|
||||
else if (IS_G33(dev))
|
||||
return g33_do_reset(dev);
|
||||
else if (INTEL_INFO(dev)->gen >= 3)
|
||||
return i915_do_reset(dev);
|
||||
else
|
||||
int (*reset)(struct drm_device *);
|
||||
|
||||
reset = intel_get_gpu_reset(dev);
|
||||
if (reset == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
return reset(dev);
|
||||
}
|
||||
|
||||
bool intel_has_gpu_reset(struct drm_device *dev)
|
||||
{
|
||||
return intel_get_gpu_reset(dev) != NULL;
|
||||
}
|
||||
|
||||
void intel_uncore_check_errors(struct drm_device *dev)
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
#ifndef _DRM_INTEL_GTT_H
|
||||
#define _DRM_INTEL_GTT_H
|
||||
|
||||
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
|
||||
phys_addr_t *mappable_base, unsigned long *mappable_end);
|
||||
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
|
||||
phys_addr_t *mappable_base, u64 *mappable_end);
|
||||
|
||||
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
struct agp_bridge_data *bridge);
|
||||
|
|
|
@ -354,9 +354,15 @@ typedef struct drm_i915_irq_wait {
|
|||
#define I915_PARAM_REVISION 32
|
||||
#define I915_PARAM_SUBSLICE_TOTAL 33
|
||||
#define I915_PARAM_EU_TOTAL 34
|
||||
#define I915_PARAM_HAS_GPU_RESET 35
|
||||
#define I915_PARAM_HAS_RESOURCE_STREAMER 36
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
int param;
|
||||
s32 param;
|
||||
/*
|
||||
* WARNING: Using pointers instead of fixed-size u64 means we need to write
|
||||
* compat32 code. Don't repeat this mistake.
|
||||
*/
|
||||
int __user *value;
|
||||
} drm_i915_getparam_t;
|
||||
|
||||
|
@ -764,7 +770,12 @@ struct drm_i915_gem_execbuffer2 {
|
|||
#define I915_EXEC_BSD_RING1 (1<<13)
|
||||
#define I915_EXEC_BSD_RING2 (2<<13)
|
||||
|
||||
#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
|
||||
/** Tell the kernel that the batchbuffer is processed by
|
||||
* the resource streamer.
|
||||
*/
|
||||
#define I915_EXEC_RESOURCE_STREAMER (1<<15)
|
||||
|
||||
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
|
||||
|
||||
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
|
||||
#define i915_execbuffer2_set_context_id(eb2, context) \
|
||||
|
@ -1106,6 +1117,7 @@ struct drm_i915_gem_context_param {
|
|||
__u32 size;
|
||||
__u64 param;
|
||||
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
|
||||
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue