mirror of https://gitee.com/openkylin/linux.git
Merge tag 'drm-intel-next-2012-07-06' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes: New pull for -next. Highlights: - rc6/turbo support for hsw (Eugeni) - improve corner-case of the reset handling code - gpu reset handling should be rock-solid now - support for fb offset > 4096 pixels on gen4+ (yeah, you need some fairly big screens to hit that) - the "Flush Me Harder" patch to fix the gen6+ fallout from disabling the flushing_list - no more /dev/agpgart on gen6+! - HAS_PCH_xxx improvements from Paulo - a few minor bits&pieces all over, most of it in thew hsw code * tag 'drm-intel-next-2012-07-06' of git://people.freedesktop.org/~danvet/drm-intel: (40 commits) drm/i915: program FDI_RX TP and FDI delays drm/i915: introduce for_each_encoder_on_crtc drm/i915: adjust framebuffer base address on gen4+ drm/i915: introduce crtc->dspaddr_offset drm/i915: Reject page flips with changed format/offset/pitch drm/i915: Zero initialize mode_cmd drm/i915: don't return a spurious -EIO from intel_ring_begin drm/i915: properly SIGBUS on I/O errors drm/i915: don't hang userspace when the gpu reset is stuck drm/i915: non-interruptible sleeps can't handle -EAGAIN drm/i915: don't trylock in the gpu reset code drm/i915: fix PIPE_DDI_PORT_MASK drm/i915: prevent bogus intel_update_fbc notifications drm/i915: re-initialize DDI buffer translations after resume drm/i915: don't ironlake_init_pch_refclk() on LPT drm/i915: get rid of dev_priv->info->has_pch_split drm/i915: add PCH_NONE to enum intel_pch drm/i915: prefer wide & slow to fast & narrow in DP configs drm/i915: fix up ilk rc6 disabling confusion drm/i915: move force wake support into intel_pm ...
This commit is contained in:
commit
12f0e670ff
|
@ -903,17 +903,6 @@ static struct pci_device_id agp_intel_pci_table[] = {
|
|||
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -1414,7 +1414,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
|||
if (!ap)
|
||||
return;
|
||||
|
||||
ap->ranges[0].base = dev_priv->dev->agp->base;
|
||||
ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
|
||||
ap->ranges[0].size =
|
||||
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||
primary =
|
||||
|
@ -1547,7 +1547,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
goto out_mtrrfree;
|
||||
}
|
||||
|
||||
/* This must be called before any calls to HAS_PCH_* */
|
||||
intel_detect_pch(dev);
|
||||
|
||||
intel_irq_init(dev);
|
||||
intel_gt_init(dev);
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev);
|
||||
|
@ -1580,7 +1584,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
if (!IS_I945G(dev) && !IS_I945GM(dev))
|
||||
pci_enable_msi(dev->pdev);
|
||||
|
||||
spin_lock_init(&dev_priv->gt_lock);
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->error_lock);
|
||||
spin_lock_init(&dev_priv->rps_lock);
|
||||
|
@ -1599,8 +1602,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
/* Start out suspended */
|
||||
dev_priv->mm.suspended = 1;
|
||||
|
||||
intel_detect_pch(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = i915_load_modeset_init(dev);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
#include <linux/console.h>
|
||||
|
@ -215,7 +216,6 @@ static const struct intel_device_info intel_ironlake_d_info = {
|
|||
.gen = 5,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_pch_split = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_m_info = {
|
||||
|
@ -223,7 +223,6 @@ static const struct intel_device_info intel_ironlake_m_info = {
|
|||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_pch_split = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_d_info = {
|
||||
|
@ -232,7 +231,6 @@ static const struct intel_device_info intel_sandybridge_d_info = {
|
|||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_pch_split = 1,
|
||||
.has_force_wake = 1,
|
||||
};
|
||||
|
||||
|
@ -243,7 +241,6 @@ static const struct intel_device_info intel_sandybridge_m_info = {
|
|||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_pch_split = 1,
|
||||
.has_force_wake = 1,
|
||||
};
|
||||
|
||||
|
@ -253,7 +250,6 @@ static const struct intel_device_info intel_ivybridge_d_info = {
|
|||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_pch_split = 1,
|
||||
.has_force_wake = 1,
|
||||
};
|
||||
|
||||
|
@ -264,7 +260,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
|
|||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_pch_split = 1,
|
||||
.has_force_wake = 1,
|
||||
};
|
||||
|
||||
|
@ -292,7 +287,6 @@ static const struct intel_device_info intel_haswell_d_info = {
|
|||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_pch_split = 1,
|
||||
.has_force_wake = 1,
|
||||
};
|
||||
|
||||
|
@ -302,7 +296,6 @@ static const struct intel_device_info intel_haswell_m_info = {
|
|||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_pch_split = 1,
|
||||
.has_force_wake = 1,
|
||||
};
|
||||
|
||||
|
@ -432,135 +425,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
|||
return 1;
|
||||
}
|
||||
|
||||
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
|
||||
udelay(10);
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 1);
|
||||
POSTING_READ(FORCEWAKE);
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
|
||||
udelay(10);
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
|
||||
POSTING_READ(FORCEWAKE_MT);
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generally this is called implicitly by the register read function. However,
|
||||
* if some sequence requires the GT to not power down then this function should
|
||||
* be called at the beginning of the sequence followed by a call to
|
||||
* gen6_gt_force_wake_put() at the end of the sequence.
|
||||
*/
|
||||
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
|
||||
if (dev_priv->forcewake_count++ == 0)
|
||||
dev_priv->display.force_wake_get(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
|
||||
}
|
||||
|
||||
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 gtfifodbg;
|
||||
gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
|
||||
if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
|
||||
"MMIO read or write has been dropped %x\n", gtfifodbg))
|
||||
I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
|
||||
}
|
||||
|
||||
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 0);
|
||||
/* The below doubles as a POSTING_READ */
|
||||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
|
||||
/* The below doubles as a POSTING_READ */
|
||||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* see gen6_gt_force_wake_get()
|
||||
*/
|
||||
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
|
||||
if (--dev_priv->forcewake_count == 0)
|
||||
dev_priv->display.force_wake_put(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
|
||||
}
|
||||
|
||||
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
|
||||
int loop = 500;
|
||||
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
|
||||
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
|
||||
udelay(10);
|
||||
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
|
||||
}
|
||||
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
|
||||
++ret;
|
||||
dev_priv->gt_fifo_count = fifo;
|
||||
}
|
||||
dev_priv->gt_fifo_count--;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vlv_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
|
||||
/* Already awake? */
|
||||
if ((I915_READ(0x130094) & 0xa1) == 0xa1)
|
||||
return;
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
|
||||
POSTING_READ(FORCEWAKE_VLV);
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
void vlv_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
|
||||
/* FIXME: confirm VLV behavior with Punit folks */
|
||||
POSTING_READ(FORCEWAKE_VLV);
|
||||
}
|
||||
|
||||
static int i915_drm_freeze(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -640,7 +504,7 @@ static int i915_drm_thaw(struct drm_device *dev)
|
|||
|
||||
/* KMS EnterVT equivalent */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
ironlake_init_pch_refclk(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
@ -797,9 +661,9 @@ static int gen6_do_reset(struct drm_device *dev)
|
|||
|
||||
/* If reset with a user forcewake, try to restore, otherwise turn it off */
|
||||
if (dev_priv->forcewake_count)
|
||||
dev_priv->display.force_wake_get(dev_priv);
|
||||
dev_priv->gt.force_wake_get(dev_priv);
|
||||
else
|
||||
dev_priv->display.force_wake_put(dev_priv);
|
||||
dev_priv->gt.force_wake_put(dev_priv);
|
||||
|
||||
/* Restore fifo count */
|
||||
dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
|
||||
|
@ -866,8 +730,7 @@ int i915_reset(struct drm_device *dev)
|
|||
if (!i915_try_reset)
|
||||
return 0;
|
||||
|
||||
if (!mutex_trylock(&dev->struct_mutex))
|
||||
return -EBUSY;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
i915_gem_reset(dev);
|
||||
|
||||
|
@ -930,10 +793,12 @@ int i915_reset(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int __devinit
|
||||
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct intel_device_info *intel_info =
|
||||
(struct intel_device_info *) ent->driver_data;
|
||||
|
||||
/* Only bind to function 0 of the device. Early generations
|
||||
* used function 1 as a placeholder for multi-head. This causes
|
||||
* us confusion instead, especially on the systems where both
|
||||
|
@ -942,6 +807,18 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (PCI_FUNC(pdev->devfn))
|
||||
return -ENODEV;
|
||||
|
||||
/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
|
||||
* implementation for gen3 (and only gen3) that used legacy drm maps
|
||||
* (gasp!) to share buffers between X and the client. Hence we need to
|
||||
* keep around the fake agp stuff for gen3, even when kms is enabled. */
|
||||
if (intel_info->gen != 3) {
|
||||
driver.driver_features &=
|
||||
~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
|
||||
} else if (!intel_agp_enabled) {
|
||||
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return drm_get_pci_dev(pdev, ent, &driver);
|
||||
}
|
||||
|
||||
|
@ -1102,11 +979,6 @@ static struct pci_driver i915_pci_driver = {
|
|||
|
||||
static int __init i915_init(void)
|
||||
{
|
||||
if (!intel_agp_enabled) {
|
||||
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
driver.num_ioctls = i915_max_ioctl;
|
||||
|
||||
/*
|
||||
|
@ -1239,10 +1111,10 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
|
|||
unsigned long irqflags; \
|
||||
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
|
||||
if (dev_priv->forcewake_count == 0) \
|
||||
dev_priv->display.force_wake_get(dev_priv); \
|
||||
dev_priv->gt.force_wake_get(dev_priv); \
|
||||
val = read##y(dev_priv->regs + reg); \
|
||||
if (dev_priv->forcewake_count == 0) \
|
||||
dev_priv->display.force_wake_put(dev_priv); \
|
||||
dev_priv->gt.force_wake_put(dev_priv); \
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
|
||||
} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
|
||||
val = read##y(dev_priv->regs + reg + 0x180000); \
|
||||
|
|
|
@ -79,6 +79,10 @@ enum port {
|
|||
|
||||
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
|
||||
|
||||
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
|
||||
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
|
||||
if ((intel_encoder)->base.crtc == (__crtc))
|
||||
|
||||
struct intel_pch_pll {
|
||||
int refcount; /* count of number of CRTCs sharing this PLL */
|
||||
int active; /* count of number of active CRTCs (i.e. DPMS on) */
|
||||
|
@ -262,8 +266,6 @@ struct drm_i915_display_funcs {
|
|||
struct drm_i915_gem_object *obj);
|
||||
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
int x, int y);
|
||||
void (*force_wake_get)(struct drm_i915_private *dev_priv);
|
||||
void (*force_wake_put)(struct drm_i915_private *dev_priv);
|
||||
/* clock updates for mode set */
|
||||
/* cursor updates */
|
||||
/* render clock increase/decrease */
|
||||
|
@ -271,6 +273,11 @@ struct drm_i915_display_funcs {
|
|||
/* pll clock increase/decrease */
|
||||
};
|
||||
|
||||
struct drm_i915_gt_funcs {
|
||||
void (*force_wake_get)(struct drm_i915_private *dev_priv);
|
||||
void (*force_wake_put)(struct drm_i915_private *dev_priv);
|
||||
};
|
||||
|
||||
struct intel_device_info {
|
||||
u8 gen;
|
||||
u8 is_mobile:1;
|
||||
|
@ -285,7 +292,6 @@ struct intel_device_info {
|
|||
u8 is_crestline:1;
|
||||
u8 is_ivybridge:1;
|
||||
u8 is_valleyview:1;
|
||||
u8 has_pch_split:1;
|
||||
u8 has_force_wake:1;
|
||||
u8 is_haswell:1;
|
||||
u8 has_fbc:1;
|
||||
|
@ -333,6 +339,7 @@ enum no_fbc_reason {
|
|||
};
|
||||
|
||||
enum intel_pch {
|
||||
PCH_NONE = 0, /* No PCH present */
|
||||
PCH_IBX, /* Ibexpeak PCH */
|
||||
PCH_CPT, /* Cougarpoint PCH */
|
||||
PCH_LPT, /* Lynxpoint PCH */
|
||||
|
@ -362,6 +369,8 @@ typedef struct drm_i915_private {
|
|||
int relative_constants_mode;
|
||||
|
||||
void __iomem *regs;
|
||||
|
||||
struct drm_i915_gt_funcs gt;
|
||||
/** gt_fifo_count and the subsequent register write are synchronized
|
||||
* with dev->struct_mutex. */
|
||||
unsigned gt_fifo_count;
|
||||
|
@ -1115,13 +1124,13 @@ struct drm_i915_file_private {
|
|||
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
|
||||
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
|
||||
|
||||
#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
|
||||
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
|
||||
|
||||
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
|
||||
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
|
||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
|
||||
|
||||
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
|
||||
|
||||
|
@ -1200,6 +1209,7 @@ void i915_hangcheck_elapsed(unsigned long data);
|
|||
void i915_handle_error(struct drm_device *dev, bool wedged);
|
||||
|
||||
extern void intel_irq_init(struct drm_device *dev);
|
||||
extern void intel_gt_init(struct drm_device *dev);
|
||||
|
||||
void i915_error_state_free(struct kref *error_ref);
|
||||
|
||||
|
@ -1330,6 +1340,8 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
|
|||
|
||||
void i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
|
||||
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
|
||||
bool interruptible);
|
||||
|
||||
void i915_gem_reset(struct drm_device *dev);
|
||||
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
|
||||
|
@ -1510,20 +1522,12 @@ extern bool intel_fbc_enabled(struct drm_device *dev);
|
|||
extern void intel_disable_fbc(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
extern void ironlake_init_pch_refclk(struct drm_device *dev);
|
||||
extern void ironlake_enable_rc6(struct drm_device *dev);
|
||||
extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_detect_pch(struct drm_device *dev);
|
||||
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
|
||||
extern int intel_enable_rc6(const struct drm_device *dev);
|
||||
|
||||
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
|
||||
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
|
||||
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
|
||||
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
|
||||
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
|
||||
extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* overlay */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
|
|
@ -96,9 +96,18 @@ i915_gem_wait_for_error(struct drm_device *dev)
|
|||
if (!atomic_read(&dev_priv->mm.wedged))
|
||||
return 0;
|
||||
|
||||
ret = wait_for_completion_interruptible(x);
|
||||
if (ret)
|
||||
/*
|
||||
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
|
||||
* userspace. If it takes that long something really bad is going on and
|
||||
* we should simply try to bail out and fail as gracefully as possible.
|
||||
*/
|
||||
ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
|
||||
if (ret == 0) {
|
||||
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
|
||||
return -EIO;
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (atomic_read(&dev_priv->mm.wedged)) {
|
||||
/* GPU is hung, bump the completion count to account for
|
||||
|
@ -1132,6 +1141,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
out:
|
||||
switch (ret) {
|
||||
case -EIO:
|
||||
/* If this -EIO is due to a gpu hang, give the reset code a
|
||||
* chance to clean up the mess. Otherwise return the proper
|
||||
* SIGBUS. */
|
||||
if (!atomic_read(&dev_priv->mm.wedged))
|
||||
return VM_FAULT_SIGBUS;
|
||||
case -EAGAIN:
|
||||
/* Give the error handler a chance to run and move the
|
||||
* objects off the GPU active list. Next time we service the
|
||||
|
@ -1863,11 +1877,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_check_wedge(struct drm_i915_private *dev_priv)
|
||||
int
|
||||
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
|
||||
bool interruptible)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
if (atomic_read(&dev_priv->mm.wedged)) {
|
||||
struct completion *x = &dev_priv->error_completion;
|
||||
bool recovery_complete;
|
||||
|
@ -1878,7 +1891,16 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv)
|
|||
recovery_complete = x->done > 0;
|
||||
spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
|
||||
return recovery_complete ? -EIO : -EAGAIN;
|
||||
/* Non-interruptible callers can't handle -EAGAIN, hence return
|
||||
* -EIO unconditionally for these. */
|
||||
if (!interruptible)
|
||||
return -EIO;
|
||||
|
||||
/* Recovery complete, but still wedged means reset failure. */
|
||||
if (recovery_complete)
|
||||
return -EIO;
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1932,6 +1954,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
|||
unsigned long timeout_jiffies;
|
||||
long end;
|
||||
bool wait_forever = true;
|
||||
int ret;
|
||||
|
||||
if (i915_seqno_passed(ring->get_seqno(ring), seqno))
|
||||
return 0;
|
||||
|
@ -1963,8 +1986,9 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
|||
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
|
||||
timeout_jiffies);
|
||||
|
||||
if (atomic_read(&dev_priv->mm.wedged))
|
||||
end = -EAGAIN;
|
||||
ret = i915_gem_check_wedge(dev_priv, interruptible);
|
||||
if (ret)
|
||||
end = ret;
|
||||
} while (end == 0 && wait_forever);
|
||||
|
||||
getrawmonotonic(&now);
|
||||
|
@ -2004,7 +2028,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
|
|||
|
||||
BUG_ON(seqno == 0);
|
||||
|
||||
ret = i915_gem_check_wedge(dev_priv);
|
||||
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -136,37 +136,36 @@ static void do_destroy(struct i915_hw_context *ctx)
|
|||
kfree(ctx);
|
||||
}
|
||||
|
||||
static int
|
||||
static struct i915_hw_context *
|
||||
create_hw_context(struct drm_device *dev,
|
||||
struct drm_i915_file_private *file_priv,
|
||||
struct i915_hw_context **ctx_out)
|
||||
struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
int ret, id;
|
||||
|
||||
*ctx_out = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
|
||||
if (*ctx_out == NULL)
|
||||
return -ENOMEM;
|
||||
ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
|
||||
if (ctx == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
(*ctx_out)->obj = i915_gem_alloc_object(dev,
|
||||
dev_priv->hw_context_size);
|
||||
if ((*ctx_out)->obj == NULL) {
|
||||
kfree(*ctx_out);
|
||||
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
|
||||
if (ctx->obj == NULL) {
|
||||
kfree(ctx);
|
||||
DRM_DEBUG_DRIVER("Context object allocated failed\n");
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* The ring associated with the context object is handled by the normal
|
||||
* object tracking code. We give an initial ring value simple to pass an
|
||||
* assertion in the context switch code.
|
||||
*/
|
||||
(*ctx_out)->ring = &dev_priv->ring[RCS];
|
||||
ctx->ring = &dev_priv->ring[RCS];
|
||||
|
||||
/* Default context will never have a file_priv */
|
||||
if (file_priv == NULL)
|
||||
return 0;
|
||||
return ctx;
|
||||
|
||||
(*ctx_out)->file_priv = file_priv;
|
||||
ctx->file_priv = file_priv;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
|
||||
|
@ -175,21 +174,21 @@ create_hw_context(struct drm_device *dev,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
ret = idr_get_new_above(&file_priv->context_idr, *ctx_out,
|
||||
ret = idr_get_new_above(&file_priv->context_idr, ctx,
|
||||
DEFAULT_CONTEXT_ID + 1, &id);
|
||||
if (ret == 0)
|
||||
(*ctx_out)->id = id;
|
||||
ctx->id = id;
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
goto err_out;
|
||||
|
||||
return 0;
|
||||
return ctx;
|
||||
|
||||
err_out:
|
||||
do_destroy(*ctx_out);
|
||||
return ret;
|
||||
do_destroy(ctx);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static inline bool is_default_context(struct i915_hw_context *ctx)
|
||||
|
@ -209,10 +208,9 @@ static int create_default_context(struct drm_i915_private *dev_priv)
|
|||
|
||||
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
ret = create_hw_context(dev_priv->dev, NULL,
|
||||
&dev_priv->ring[RCS].default_context);
|
||||
if (ret)
|
||||
return ret;
|
||||
ctx = create_hw_context(dev_priv->dev, NULL);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
/* We may need to do things with the shrinker which require us to
|
||||
* immediately switch back to the default context. This can cause a
|
||||
|
@ -220,7 +218,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
|
|||
* may not be available. To avoid this we always pin the
|
||||
* default context.
|
||||
*/
|
||||
ctx = dev_priv->ring[RCS].default_context;
|
||||
dev_priv->ring[RCS].default_context = ctx;
|
||||
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
|
||||
if (ret) {
|
||||
do_destroy(ctx);
|
||||
|
@ -496,13 +494,13 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = create_hw_context(dev, file_priv, &ctx);
|
||||
ctx = create_hw_context(dev, file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
args->ctx_id = ctx->id;
|
||||
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
|
||||
|
||||
return ret;
|
||||
return PTR_RET(ctx);
|
||||
}
|
||||
|
||||
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -1458,6 +1458,10 @@
|
|||
#define DDRMPLL1 0X12c20
|
||||
#define PEG_BAND_GAP_DATA 0x14d68
|
||||
|
||||
#define GEN6_GT_THREAD_STATUS_REG 0x13805c
|
||||
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
|
||||
#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
|
||||
|
||||
#define GEN6_GT_PERF_STATUS 0x145948
|
||||
#define GEN6_RP_STATE_LIMITS 0x145994
|
||||
#define GEN6_RP_STATE_CAP 0x145998
|
||||
|
@ -2975,13 +2979,14 @@
|
|||
#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
|
||||
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
|
||||
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
|
||||
#define DSPLINOFF(plane) DSPADDR(plane)
|
||||
|
||||
/* Display/Sprite base address macros */
|
||||
#define DISP_BASEADDR_MASK (0xfffff000)
|
||||
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
|
||||
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
|
||||
#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
|
||||
(I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
|
||||
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
|
||||
|
||||
/* VBIOS flags */
|
||||
#define SWF00 0x71410
|
||||
|
@ -3849,6 +3854,9 @@
|
|||
#define _FDI_RXA_TUSIZE2 0xf0038
|
||||
#define _FDI_RXB_TUSIZE1 0xf1030
|
||||
#define _FDI_RXB_TUSIZE2 0xf1038
|
||||
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
|
||||
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
|
||||
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
|
||||
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
|
||||
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
|
||||
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
|
||||
|
@ -4067,6 +4075,7 @@
|
|||
#define FORCEWAKE 0xA18C
|
||||
#define FORCEWAKE_VLV 0x1300b0
|
||||
#define FORCEWAKE_ACK_VLV 0x1300b4
|
||||
#define FORCEWAKE_ACK_HSW 0x130044
|
||||
#define FORCEWAKE_ACK 0x130090
|
||||
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
|
||||
#define FORCEWAKE_MT_ACK 0x130040
|
||||
|
@ -4127,6 +4136,7 @@
|
|||
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
|
||||
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
|
||||
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
|
||||
#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
|
||||
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
|
||||
#define GEN6_RP_UP_THRESHOLD 0xA02C
|
||||
#define GEN6_RP_DOWN_THRESHOLD 0xA030
|
||||
|
@ -4277,7 +4287,7 @@
|
|||
PIPE_DDI_FUNC_CTL_B)
|
||||
#define PIPE_DDI_FUNC_ENABLE (1<<31)
|
||||
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
|
||||
#define PIPE_DDI_PORT_MASK (0xf<<28)
|
||||
#define PIPE_DDI_PORT_MASK (7<<28)
|
||||
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
|
||||
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
|
||||
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
|
||||
|
@ -4435,7 +4445,7 @@
|
|||
#define PIPE_WM_LINETIME_B 0x45274
|
||||
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
|
||||
PIPE_WM_LINETIME_A, \
|
||||
PIPE_WM_LINETIME_A)
|
||||
PIPE_WM_LINETIME_B)
|
||||
#define PIPE_WM_LINETIME_MASK (0x1ff)
|
||||
#define PIPE_WM_LINETIME_TIME(x) ((x))
|
||||
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
|
||||
|
@ -4447,4 +4457,9 @@
|
|||
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
|
||||
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
|
||||
|
||||
#define WM_DBG 0x45280
|
||||
#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
|
||||
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
|
||||
#define WM_DBG_DISALLOW_SPRITE (1<<2)
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
|
|
@ -825,10 +825,7 @@ int i915_save_state(struct drm_device *dev)
|
|||
dev_priv->saveIMR = I915_READ(IMR);
|
||||
}
|
||||
|
||||
if (IS_IRONLAKE_M(dev))
|
||||
ironlake_disable_drps(dev);
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
gen6_disable_rps(dev);
|
||||
intel_disable_gt_powersave(dev);
|
||||
|
||||
/* Cache mode state */
|
||||
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
|
||||
|
|
|
@ -170,6 +170,15 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
|||
|
||||
udelay(600);
|
||||
|
||||
/* We need to program FDI_RX_MISC with the default TP1 to TP2
|
||||
* values before enabling the receiver, and configure the delay
|
||||
* for the FDI timing generator to 90h. Luckily, all the other
|
||||
* bits are supposed to be zeroed, so we can write those values
|
||||
* directly.
|
||||
*/
|
||||
I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
|
||||
FDI_RX_FDI_DELAY_90);
|
||||
|
||||
/* Enable CPU FDI Receiver with auto-training */
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
I915_WRITE(reg,
|
||||
|
|
|
@ -627,11 +627,10 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
|
|||
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
|
||||
if (encoder->base.crtc == crtc && encoder->type == type)
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
if (encoder->type == type)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1973,6 +1972,22 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
|
|||
i915_gem_object_unpin(obj);
|
||||
}
|
||||
|
||||
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
|
||||
* is assumed to be a power-of-two. */
|
||||
static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
|
||||
unsigned int bpp,
|
||||
unsigned int pitch)
|
||||
{
|
||||
int tile_rows, tiles;
|
||||
|
||||
tile_rows = *y / 8;
|
||||
*y %= 8;
|
||||
tiles = *x / (512/bpp);
|
||||
*x %= 512/bpp;
|
||||
|
||||
return tile_rows * pitch * 8 + tiles * 4096;
|
||||
}
|
||||
|
||||
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
int x, int y)
|
||||
{
|
||||
|
@ -1982,7 +1997,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int plane = intel_crtc->plane;
|
||||
unsigned long Start, Offset;
|
||||
unsigned long linear_offset;
|
||||
u32 dspcntr;
|
||||
u32 reg;
|
||||
|
||||
|
@ -2029,18 +2044,28 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
|
||||
I915_WRITE(reg, dspcntr);
|
||||
|
||||
Start = obj->gtt_offset;
|
||||
Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
|
||||
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
|
||||
|
||||
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
|
||||
Start, Offset, x, y, fb->pitches[0]);
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
intel_crtc->dspaddr_offset =
|
||||
gen4_compute_dspaddr_offset_xtiled(&x, &y,
|
||||
fb->bits_per_pixel / 8,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= intel_crtc->dspaddr_offset;
|
||||
} else {
|
||||
intel_crtc->dspaddr_offset = linear_offset;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
|
||||
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
|
||||
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
|
||||
I915_MODIFY_DISPBASE(DSPSURF(plane),
|
||||
obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
|
||||
I915_WRITE(DSPADDR(plane), Offset);
|
||||
I915_WRITE(DSPLINOFF(plane), linear_offset);
|
||||
} else
|
||||
I915_WRITE(DSPADDR(plane), Start + Offset);
|
||||
I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
|
||||
POSTING_READ(reg);
|
||||
|
||||
return 0;
|
||||
|
@ -2055,7 +2080,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
|
|||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int plane = intel_crtc->plane;
|
||||
unsigned long Start, Offset;
|
||||
unsigned long linear_offset;
|
||||
u32 dspcntr;
|
||||
u32 reg;
|
||||
|
||||
|
@ -2110,15 +2135,20 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
|
|||
|
||||
I915_WRITE(reg, dspcntr);
|
||||
|
||||
Start = obj->gtt_offset;
|
||||
Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
|
||||
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
|
||||
intel_crtc->dspaddr_offset =
|
||||
gen4_compute_dspaddr_offset_xtiled(&x, &y,
|
||||
fb->bits_per_pixel / 8,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= intel_crtc->dspaddr_offset;
|
||||
|
||||
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
|
||||
Start, Offset, x, y, fb->pitches[0]);
|
||||
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
|
||||
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
|
||||
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
|
||||
I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
|
||||
I915_MODIFY_DISPBASE(DSPSURF(plane),
|
||||
obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
|
||||
I915_WRITE(DSPADDR(plane), Offset);
|
||||
I915_WRITE(DSPLINOFF(plane), linear_offset);
|
||||
POSTING_READ(reg);
|
||||
|
||||
return 0;
|
||||
|
@ -2805,16 +2835,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
|||
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/*
|
||||
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
|
||||
* must be driven by its own crtc; no sharing is possible.
|
||||
*/
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
|
||||
if (encoder->base.crtc != crtc)
|
||||
continue;
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
|
||||
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
|
||||
* CPU handles all others */
|
||||
|
@ -3703,16 +3730,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
|
|||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
unsigned int display_bpc = UINT_MAX, bpc;
|
||||
|
||||
/* Walk the encoders & connectors on this crtc, get min bpc */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
|
||||
|
||||
if (encoder->crtc != crtc)
|
||||
continue;
|
||||
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
|
||||
|
||||
if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
|
||||
unsigned int lvds_bpc;
|
||||
|
@ -3744,7 +3767,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
|
|||
/* Not one of the known troublemakers, check the EDID */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list,
|
||||
head) {
|
||||
if (connector->encoder != encoder)
|
||||
if (connector->encoder != &intel_encoder->base)
|
||||
continue;
|
||||
|
||||
/* Don't use an invalid EDID bpc value */
|
||||
|
@ -4213,15 +4236,11 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
|||
u32 dspcntr, pipeconf, vsyncshift;
|
||||
bool ok, has_reduced_clock = false, is_sdvo = false;
|
||||
bool is_lvds = false, is_tv = false, is_dp = false;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
const intel_limit_t *limit;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
|
||||
if (encoder->base.crtc != crtc)
|
||||
continue;
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_LVDS:
|
||||
is_lvds = true;
|
||||
|
@ -4524,15 +4543,11 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *encoder;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *edp_encoder = NULL;
|
||||
int num_connectors = 0;
|
||||
bool is_lvds = false;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
|
||||
if (encoder->base.crtc != crtc)
|
||||
continue;
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_LVDS:
|
||||
is_lvds = true;
|
||||
|
@ -4569,7 +4584,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
|
||||
bool ok, has_reduced_clock = false, is_sdvo = false;
|
||||
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder, *edp_encoder = NULL;
|
||||
const intel_limit_t *limit;
|
||||
int ret;
|
||||
|
@ -4580,10 +4594,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
bool dither;
|
||||
bool is_cpu_edp = false, is_pch_edp = false;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
|
||||
if (encoder->base.crtc != crtc)
|
||||
continue;
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_LVDS:
|
||||
is_lvds = true;
|
||||
|
@ -6194,7 +6205,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
unsigned long offset;
|
||||
u32 flip_mask;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
int ret;
|
||||
|
@ -6203,9 +6213,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* Offset into the new buffer for cases of shared fbs between CRTCs */
|
||||
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
@ -6222,7 +6229,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|||
intel_ring_emit(ring, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(ring, fb->pitches[0]);
|
||||
intel_ring_emit(ring, obj->gtt_offset + offset);
|
||||
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
intel_ring_emit(ring, 0); /* aux display base address, unused */
|
||||
intel_ring_advance(ring);
|
||||
return 0;
|
||||
|
@ -6240,7 +6247,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
unsigned long offset;
|
||||
u32 flip_mask;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
int ret;
|
||||
|
@ -6249,9 +6255,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* Offset into the new buffer for cases of shared fbs between CRTCs */
|
||||
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
@ -6265,7 +6268,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|||
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(ring, fb->pitches[0]);
|
||||
intel_ring_emit(ring, obj->gtt_offset + offset);
|
||||
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
intel_ring_advance(ring);
|
||||
|
@ -6303,7 +6306,9 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
|||
intel_ring_emit(ring, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(ring, fb->pitches[0]);
|
||||
intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
|
||||
intel_ring_emit(ring,
|
||||
(obj->gtt_offset + intel_crtc->dspaddr_offset) |
|
||||
obj->tiling_mode);
|
||||
|
||||
/* XXX Enabling the panel-fitter across page-flip is so far
|
||||
* untested on non-native modes, so ignore it for now.
|
||||
|
@ -6343,7 +6348,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
|||
intel_ring_emit(ring, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
|
||||
intel_ring_emit(ring, obj->gtt_offset);
|
||||
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
|
||||
/* Contrary to the suggestions in the documentation,
|
||||
* "Enable Panel Fitter" does not seem to be required when page
|
||||
|
@ -6406,7 +6411,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
|
||||
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
|
||||
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
|
||||
intel_ring_emit(ring, (obj->gtt_offset));
|
||||
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
intel_ring_emit(ring, (MI_NOOP));
|
||||
intel_ring_advance(ring);
|
||||
return 0;
|
||||
|
@ -6438,6 +6443,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* Can't change pixel format via MI display flips. */
|
||||
if (fb->pixel_format != crtc->fb->pixel_format)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* TILEOFF/LINOFF registers can't be changed via MI display flips.
|
||||
* Note that pitch changes could also affect these register.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen > 3 &&
|
||||
(fb->offsets[0] != crtc->fb->offsets[0] ||
|
||||
fb->pitches[0] != crtc->fb->pitches[0]))
|
||||
return -EINVAL;
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
if (work == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -6859,7 +6877,7 @@ static void intel_setup_outputs(struct drm_device *dev)
|
|||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
ironlake_init_pch_refclk(dev);
|
||||
}
|
||||
|
||||
|
@ -7013,9 +7031,6 @@ static void intel_init_display(struct drm_device *dev)
|
|||
dev_priv->display.write_eld = ironlake_write_eld;
|
||||
} else
|
||||
dev_priv->display.update_wm = NULL;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->display.force_wake_get = vlv_force_wake_get;
|
||||
dev_priv->display.force_wake_put = vlv_force_wake_put;
|
||||
} else if (IS_G4X(dev)) {
|
||||
dev_priv->display.write_eld = g4x_write_eld;
|
||||
}
|
||||
|
@ -7172,20 +7187,13 @@ static void ivb_pch_pwm_override(struct drm_device *dev)
|
|||
|
||||
void intel_modeset_init_hw(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
intel_prepare_ddi(dev);
|
||||
|
||||
intel_init_clock_gating(dev);
|
||||
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
ironlake_enable_drps(dev);
|
||||
ironlake_enable_rc6(dev);
|
||||
intel_init_emon(dev);
|
||||
}
|
||||
|
||||
if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
|
||||
gen6_enable_rps(dev_priv);
|
||||
gen6_update_ring_freq(dev_priv);
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_enable_gt_powersave(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (IS_IVYBRIDGE(dev))
|
||||
ivb_pch_pwm_override(dev);
|
||||
|
@ -7210,8 +7218,6 @@ void intel_modeset_init(struct drm_device *dev)
|
|||
|
||||
intel_init_pm(dev);
|
||||
|
||||
intel_prepare_ddi(dev);
|
||||
|
||||
intel_init_display(dev);
|
||||
|
||||
if (IS_GEN2(dev)) {
|
||||
|
@ -7277,13 +7283,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
if (IS_IRONLAKE_M(dev))
|
||||
ironlake_disable_drps(dev);
|
||||
if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
|
||||
gen6_disable_rps(dev);
|
||||
intel_disable_gt_powersave(dev);
|
||||
|
||||
if (IS_IRONLAKE_M(dev))
|
||||
ironlake_disable_rc6(dev);
|
||||
ironlake_teardown_rc6(dev);
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
vlv_init_dpio(dev);
|
||||
|
|
|
@ -733,8 +733,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
|
||||
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
|
||||
|
||||
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
|
||||
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
|
||||
|
||||
if (mode_rate <= link_avail) {
|
||||
|
@ -793,8 +793,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
|||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_encoder *encoder;
|
||||
struct intel_encoder *encoder;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int lane_count = 4;
|
||||
|
@ -804,13 +803,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
|||
/*
|
||||
* Find the lane count in the intel_encoder private
|
||||
*/
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
|
||||
struct intel_dp *intel_dp;
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (encoder->crtc != crtc)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
|
||||
intel_dp->base.type == INTEL_OUTPUT_EDP)
|
||||
{
|
||||
|
@ -2404,16 +2399,11 @@ int
|
|||
intel_trans_dp_port_sel(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_encoder *encoder;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
|
||||
struct intel_dp *intel_dp;
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (encoder->crtc != crtc)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
|
||||
intel_dp->base.type == INTEL_OUTPUT_EDP)
|
||||
return intel_dp->output_reg;
|
||||
|
|
|
@ -177,6 +177,11 @@ struct intel_crtc {
|
|||
struct intel_unpin_work *unpin_work;
|
||||
int fdi_lanes;
|
||||
|
||||
/* Display surface base address adjustement for pageflips. Note that on
|
||||
* gen4+ this only adjusts up to a tile, offsets within a tile are
|
||||
* handled in the hw itself (with the TILEOFF register). */
|
||||
unsigned long dspaddr_offset;
|
||||
|
||||
struct drm_i915_gem_object *cursor_bo;
|
||||
uint32_t cursor_addr;
|
||||
int16_t cursor_x, cursor_y;
|
||||
|
@ -425,9 +430,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
|||
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, int regno);
|
||||
extern void intel_enable_clock_gating(struct drm_device *dev);
|
||||
extern void ironlake_disable_rc6(struct drm_device *dev);
|
||||
extern void ironlake_enable_drps(struct drm_device *dev);
|
||||
extern void ironlake_disable_drps(struct drm_device *dev);
|
||||
|
||||
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
|
@ -494,10 +496,10 @@ extern void intel_update_fbc(struct drm_device *dev);
|
|||
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
extern void intel_gpu_ips_teardown(void);
|
||||
|
||||
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
|
||||
extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
|
||||
extern void gen6_disable_rps(struct drm_device *dev);
|
||||
extern void intel_init_emon(struct drm_device *dev);
|
||||
extern void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
extern void intel_disable_gt_powersave(struct drm_device *dev);
|
||||
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
|
||||
extern void ironlake_teardown_rc6(struct drm_device *dev);
|
||||
|
||||
extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
|
||||
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
|
||||
|
|
|
@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct fb_info *info;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_mode_fb_cmd2 mode_cmd;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {};
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
int size, ret;
|
||||
|
|
|
@ -236,7 +236,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
|
||||
struct drm_encoder *tmp_encoder;
|
||||
struct intel_encoder *tmp_encoder;
|
||||
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
|
||||
int pipe;
|
||||
|
||||
|
@ -247,8 +247,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
|
|||
}
|
||||
|
||||
/* Should never happen!! */
|
||||
list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
|
||||
for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) {
|
||||
if (&tmp_encoder->base != encoder) {
|
||||
DRM_ERROR("Can't enable LVDS and another "
|
||||
"encoder on the same pipe\n");
|
||||
return false;
|
||||
|
|
|
@ -387,8 +387,6 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
struct drm_i915_gem_object *obj;
|
||||
int enable_fbc;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (!i915_powersave)
|
||||
return;
|
||||
|
||||
|
@ -2184,7 +2182,7 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
|
|||
return true;
|
||||
}
|
||||
|
||||
void ironlake_enable_drps(struct drm_device *dev)
|
||||
static void ironlake_enable_drps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 rgvmodectl = I915_READ(MEMMODECTL);
|
||||
|
@ -2248,7 +2246,7 @@ void ironlake_enable_drps(struct drm_device *dev)
|
|||
getrawmonotonic(&dev_priv->last_time2);
|
||||
}
|
||||
|
||||
void ironlake_disable_drps(struct drm_device *dev)
|
||||
static void ironlake_disable_drps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 rgvswctl = I915_READ16(MEMSWCTL);
|
||||
|
@ -2301,10 +2299,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
|||
dev_priv->cur_delay = val;
|
||||
}
|
||||
|
||||
void gen6_disable_rps(struct drm_device *dev)
|
||||
static void gen6_disable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
|
||||
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
|
||||
I915_WRITE(GEN6_PMIER, 0);
|
||||
|
@ -2334,9 +2333,11 @@ int intel_enable_rc6(const struct drm_device *dev)
|
|||
if (INTEL_INFO(dev)->gen == 5)
|
||||
return 0;
|
||||
|
||||
/* Sorry Haswell, no RC6 for you for now. */
|
||||
/* On Haswell, only RC6 is available. So let's enable it by default to
|
||||
* provide better testing and coverage since the beginning.
|
||||
*/
|
||||
if (IS_HASWELL(dev))
|
||||
return 0;
|
||||
return INTEL_RC6_ENABLE;
|
||||
|
||||
/*
|
||||
* Disable rc6 on Sandybridge
|
||||
|
@ -2349,8 +2350,9 @@ int intel_enable_rc6(const struct drm_device *dev)
|
|||
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
|
||||
}
|
||||
|
||||
void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
||||
static void gen6_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
u32 rp_state_cap;
|
||||
u32 gt_perf_status;
|
||||
|
@ -2359,6 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
int rc6_mode;
|
||||
int i;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
/* Here begins a magic sequence of register writes to enable
|
||||
* auto-downclocking.
|
||||
*
|
||||
|
@ -2366,7 +2370,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
* userspace...
|
||||
*/
|
||||
I915_WRITE(GEN6_RC_STATE, 0);
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
|
||||
/* Clear the DBG now so we don't confuse earlier errors */
|
||||
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
|
||||
|
@ -2402,20 +2405,24 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
|
||||
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
|
||||
|
||||
/* Check if we are enabling RC6 */
|
||||
rc6_mode = intel_enable_rc6(dev_priv->dev);
|
||||
if (rc6_mode & INTEL_RC6_ENABLE)
|
||||
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
|
||||
|
||||
if (rc6_mode & INTEL_RC6p_ENABLE)
|
||||
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
|
||||
/* We don't use those on Haswell */
|
||||
if (!IS_HASWELL(dev)) {
|
||||
if (rc6_mode & INTEL_RC6p_ENABLE)
|
||||
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
|
||||
|
||||
if (rc6_mode & INTEL_RC6pp_ENABLE)
|
||||
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
|
||||
if (rc6_mode & INTEL_RC6pp_ENABLE)
|
||||
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
|
||||
}
|
||||
|
||||
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
|
||||
(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
|
||||
(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
|
||||
(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
|
||||
(rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
|
||||
(rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
|
||||
(rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
|
||||
|
||||
I915_WRITE(GEN6_RC_CONTROL,
|
||||
rc6_mask |
|
||||
|
@ -2433,10 +2440,19 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
|
||||
dev_priv->max_delay << 24 |
|
||||
dev_priv->min_delay << 16);
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
|
||||
I915_WRITE(GEN6_RP_UP_EI, 100000);
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
|
||||
I915_WRITE(GEN6_RP_UP_EI, 66000);
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
|
||||
} else {
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
|
||||
I915_WRITE(GEN6_RP_UP_EI, 100000);
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
|
||||
}
|
||||
|
||||
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
|
@ -2444,7 +2460,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_CONT);
|
||||
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
|
||||
|
||||
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
|
||||
500))
|
||||
|
@ -2491,15 +2507,17 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(GEN6_PMINTRMSK, 0);
|
||||
|
||||
gen6_gt_force_wake_put(dev_priv);
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
}
|
||||
|
||||
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
||||
static void gen6_update_ring_freq(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int min_freq = 15;
|
||||
int gpu_freq, ia_freq, max_ia_freq;
|
||||
int scaling_factor = 180;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
max_ia_freq = cpufreq_quick_get_max(0);
|
||||
/*
|
||||
* Default to measured freq if none found, PCU will ensure we don't go
|
||||
|
@ -2511,8 +2529,6 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
|||
/* Convert from kHz to MHz */
|
||||
max_ia_freq /= 1000;
|
||||
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
|
||||
/*
|
||||
* For each potential GPU frequency, load a ring frequency we'd like
|
||||
* to use for memory access. We do this by specifying the IA frequency
|
||||
|
@ -2543,11 +2559,9 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
|||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void ironlake_teardown_rc6(struct drm_device *dev)
|
||||
void ironlake_teardown_rc6(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -2564,7 +2578,7 @@ static void ironlake_teardown_rc6(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
void ironlake_disable_rc6(struct drm_device *dev)
|
||||
static void ironlake_disable_rc6(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -2580,8 +2594,6 @@ void ironlake_disable_rc6(struct drm_device *dev)
|
|||
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
|
||||
POSTING_READ(RSTDBYCTL);
|
||||
}
|
||||
|
||||
ironlake_teardown_rc6(dev);
|
||||
}
|
||||
|
||||
static int ironlake_setup_rc6(struct drm_device *dev)
|
||||
|
@ -2603,7 +2615,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ironlake_enable_rc6(struct drm_device *dev)
|
||||
static void ironlake_enable_rc6(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
|
@ -2615,12 +2627,11 @@ void ironlake_enable_rc6(struct drm_device *dev)
|
|||
if (!intel_enable_rc6(dev))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
ret = ironlake_setup_rc6(dev);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* GPU can automatically power down the render unit if given a page
|
||||
|
@ -2629,7 +2640,6 @@ void ironlake_enable_rc6(struct drm_device *dev)
|
|||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret) {
|
||||
ironlake_teardown_rc6(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2654,13 +2664,11 @@ void ironlake_enable_rc6(struct drm_device *dev)
|
|||
if (ret) {
|
||||
DRM_ERROR("failed to enable ironlake power power savings\n");
|
||||
ironlake_teardown_rc6(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
|
||||
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static unsigned long intel_pxfreq(u32 vidfreq)
|
||||
|
@ -3156,8 +3164,7 @@ void intel_gpu_ips_teardown(void)
|
|||
i915_mch_dev = NULL;
|
||||
spin_unlock(&mchdev_lock);
|
||||
}
|
||||
|
||||
void intel_init_emon(struct drm_device *dev)
|
||||
static void intel_init_emon(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 lcfuse;
|
||||
|
@ -3228,6 +3235,28 @@ void intel_init_emon(struct drm_device *dev)
|
|||
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
|
||||
}
|
||||
|
||||
void intel_disable_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
ironlake_disable_drps(dev);
|
||||
ironlake_disable_rc6(dev);
|
||||
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
|
||||
gen6_disable_rps(dev);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_enable_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
ironlake_enable_drps(dev);
|
||||
ironlake_enable_rc6(dev);
|
||||
intel_init_emon(dev);
|
||||
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
|
||||
gen6_enable_rps(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3386,6 +3415,68 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
|
||||
}
|
||||
|
||||
static void haswell_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
|
||||
|
||||
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
|
||||
|
||||
I915_WRITE(WM3_LP_ILK, 0);
|
||||
I915_WRITE(WM2_LP_ILK, 0);
|
||||
I915_WRITE(WM1_LP_ILK, 0);
|
||||
|
||||
/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
|
||||
* This implements the WaDisableRCZUnitClockGating workaround.
|
||||
*/
|
||||
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
|
||||
|
||||
I915_WRITE(IVB_CHICKEN3,
|
||||
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
|
||||
CHICKEN3_DGMG_DONE_FIX_DISABLE);
|
||||
|
||||
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
|
||||
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
|
||||
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
|
||||
|
||||
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
|
||||
I915_WRITE(GEN7_L3CNTLREG1,
|
||||
GEN7_WA_FOR_GEN7_L3_CONTROL);
|
||||
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
|
||||
GEN7_WA_L3_CHICKEN_MODE);
|
||||
|
||||
/* This is required by WaCatErrorRejectionIssue */
|
||||
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
|
||||
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
|
||||
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
I915_WRITE(DSPCNTR(pipe),
|
||||
I915_READ(DSPCNTR(pipe)) |
|
||||
DISPPLANE_TRICKLE_FEED_DISABLE);
|
||||
intel_flush_display_plane(dev_priv, pipe);
|
||||
}
|
||||
|
||||
gen7_setup_fixed_func_scheduler(dev_priv);
|
||||
|
||||
/* WaDisable4x2SubspanOptimization */
|
||||
I915_WRITE(CACHE_MODE_1,
|
||||
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
|
||||
|
||||
/* XXX: This is a workaround for early silicon revisions and should be
|
||||
* removed later.
|
||||
*/
|
||||
I915_WRITE(WM_DBG,
|
||||
I915_READ(WM_DBG) |
|
||||
WM_DBG_DISALLOW_MULTIPLE_LP |
|
||||
WM_DBG_DISALLOW_SPRITE |
|
||||
WM_DBG_DISALLOW_MAXFIFO);
|
||||
|
||||
}
|
||||
|
||||
static void ivybridge_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3748,34 +3839,6 @@ void intel_init_pm(struct drm_device *dev)
|
|||
|
||||
/* For FIFO watermark updates */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
|
||||
dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
|
||||
|
||||
/* IVB configs may use multi-threaded forcewake */
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
|
||||
u32 ecobus;
|
||||
|
||||
/* A small trick here - if the bios hasn't configured MT forcewake,
|
||||
* and if the device is in RC6, then force_wake_mt_get will not wake
|
||||
* the device and the ECOBUS read will return zero. Which will be
|
||||
* (correctly) interpreted by the test below as MT forcewake being
|
||||
* disabled.
|
||||
*/
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
__gen6_gt_force_wake_mt_get(dev_priv);
|
||||
ecobus = I915_READ_NOTRACE(ECOBUS);
|
||||
__gen6_gt_force_wake_mt_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (ecobus & FORCEWAKE_MT_ENABLE) {
|
||||
DRM_DEBUG_KMS("Using MT version of forcewake\n");
|
||||
dev_priv->display.force_wake_get =
|
||||
__gen6_gt_force_wake_mt_get;
|
||||
dev_priv->display.force_wake_put =
|
||||
__gen6_gt_force_wake_mt_put;
|
||||
}
|
||||
}
|
||||
|
||||
if (HAS_PCH_IBX(dev))
|
||||
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
|
||||
else if (HAS_PCH_CPT(dev))
|
||||
|
@ -3823,7 +3886,7 @@ void intel_init_pm(struct drm_device *dev)
|
|||
"Disable CxSR\n");
|
||||
dev_priv->display.update_wm = NULL;
|
||||
}
|
||||
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
|
||||
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
|
||||
dev_priv->display.sanitize_pm = gen6_sanitize_pm;
|
||||
} else
|
||||
dev_priv->display.update_wm = NULL;
|
||||
|
@ -3831,8 +3894,6 @@ void intel_init_pm(struct drm_device *dev)
|
|||
dev_priv->display.update_wm = valleyview_update_wm;
|
||||
dev_priv->display.init_clock_gating =
|
||||
valleyview_init_clock_gating;
|
||||
dev_priv->display.force_wake_get = vlv_force_wake_get;
|
||||
dev_priv->display.force_wake_put = vlv_force_wake_put;
|
||||
} else if (IS_PINEVIEW(dev)) {
|
||||
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
|
||||
dev_priv->is_ddr3,
|
||||
|
@ -3885,3 +3946,194 @@ void intel_init_pm(struct drm_device *dev)
|
|||
intel_init_power_wells(dev);
|
||||
}
|
||||
|
||||
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 gt_thread_status_mask;
|
||||
|
||||
if (IS_HASWELL(dev_priv->dev))
|
||||
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
|
||||
else
|
||||
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
|
||||
|
||||
/* w/a for a sporadic read returning 0 by waiting for the GT
|
||||
* thread to wake up.
|
||||
*/
|
||||
if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
|
||||
DRM_ERROR("GT thread status wait timed out\n");
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 forcewake_ack;
|
||||
|
||||
if (IS_HASWELL(dev_priv->dev))
|
||||
forcewake_ack = FORCEWAKE_ACK_HSW;
|
||||
else
|
||||
forcewake_ack = FORCEWAKE_ACK;
|
||||
|
||||
if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
|
||||
DRM_ERROR("Force wake wait timed out\n");
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 1);
|
||||
|
||||
if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
|
||||
DRM_ERROR("Force wake wait timed out\n");
|
||||
|
||||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 forcewake_ack;
|
||||
|
||||
if (IS_HASWELL(dev_priv->dev))
|
||||
forcewake_ack = FORCEWAKE_ACK_HSW;
|
||||
else
|
||||
forcewake_ack = FORCEWAKE_MT_ACK;
|
||||
|
||||
if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
|
||||
DRM_ERROR("Force wake wait timed out\n");
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
|
||||
|
||||
if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
|
||||
DRM_ERROR("Force wake wait timed out\n");
|
||||
|
||||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generally this is called implicitly by the register read function. However,
|
||||
* if some sequence requires the GT to not power down then this function should
|
||||
* be called at the beginning of the sequence followed by a call to
|
||||
* gen6_gt_force_wake_put() at the end of the sequence.
|
||||
*/
|
||||
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
|
||||
if (dev_priv->forcewake_count++ == 0)
|
||||
dev_priv->gt.force_wake_get(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
|
||||
}
|
||||
|
||||
void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 gtfifodbg;
|
||||
gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
|
||||
if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
|
||||
"MMIO read or write has been dropped %x\n", gtfifodbg))
|
||||
I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 0);
|
||||
/* The below doubles as a POSTING_READ */
|
||||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
|
||||
/* The below doubles as a POSTING_READ */
|
||||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* see gen6_gt_force_wake_get()
|
||||
*/
|
||||
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
|
||||
if (--dev_priv->forcewake_count == 0)
|
||||
dev_priv->gt.force_wake_put(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
|
||||
}
|
||||
|
||||
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
|
||||
int loop = 500;
|
||||
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
|
||||
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
|
||||
udelay(10);
|
||||
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
|
||||
}
|
||||
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
|
||||
++ret;
|
||||
dev_priv->gt_fifo_count = fifo;
|
||||
}
|
||||
dev_priv->gt_fifo_count--;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Already awake? */
|
||||
if ((I915_READ(0x130094) & 0xa1) == 0xa1)
|
||||
return;
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
|
||||
POSTING_READ(FORCEWAKE_VLV);
|
||||
|
||||
if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500))
|
||||
DRM_ERROR("Force wake wait timed out\n");
|
||||
|
||||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
}
|
||||
|
||||
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
|
||||
/* FIXME: confirm VLV behavior with Punit folks */
|
||||
POSTING_READ(FORCEWAKE_VLV);
|
||||
}
|
||||
|
||||
void intel_gt_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock_init(&dev_priv->gt_lock);
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->gt.force_wake_get = vlv_force_wake_get;
|
||||
dev_priv->gt.force_wake_put = vlv_force_wake_put;
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
|
||||
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
|
||||
|
||||
/* IVB configs may use multi-threaded forcewake */
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
|
||||
u32 ecobus;
|
||||
|
||||
/* A small trick here - if the bios hasn't configured
|
||||
* MT forcewake, and if the device is in RC6, then
|
||||
* force_wake_mt_get will not wake the device and the
|
||||
* ECOBUS read will return zero. Which will be
|
||||
* (correctly) interpreted by the test below as MT
|
||||
* forcewake being disabled.
|
||||
*/
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
__gen6_gt_force_wake_mt_get(dev_priv);
|
||||
ecobus = I915_READ_NOTRACE(ECOBUS);
|
||||
__gen6_gt_force_wake_mt_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (ecobus & FORCEWAKE_MT_ENABLE) {
|
||||
DRM_DEBUG_KMS("Using MT version of forcewake\n");
|
||||
dev_priv->gt.force_wake_get =
|
||||
__gen6_gt_force_wake_mt_get;
|
||||
dev_priv->gt.force_wake_put =
|
||||
__gen6_gt_force_wake_mt_put;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -219,7 +219,9 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
|
|||
int ret;
|
||||
|
||||
/* Force SNB workarounds for PIPE_CONTROL flushes */
|
||||
intel_emit_post_sync_nonzero_flush(ring);
|
||||
ret = intel_emit_post_sync_nonzero_flush(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Just flush everything. Experiments have shown that reducing the
|
||||
* number of bits based on the write domains has little performance
|
||||
|
@ -233,6 +235,12 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
|
|||
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
|
||||
/*
|
||||
* Ensure that any following seqno writes only happen when the render
|
||||
* cache is indeed flushed (but only if the caller actually wants that).
|
||||
*/
|
||||
if (flush_domains)
|
||||
flags |= PIPE_CONTROL_CS_STALL;
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
|
@ -1109,20 +1117,9 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
|
|||
|
||||
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
bool was_interruptible;
|
||||
int ret;
|
||||
|
||||
/* XXX As we have not yet audited all the paths to check that
|
||||
* they are ready for ERESTARTSYS from intel_ring_begin, do not
|
||||
* allow us to be interruptible by a signal.
|
||||
*/
|
||||
was_interruptible = dev_priv->mm.interruptible;
|
||||
dev_priv->mm.interruptible = false;
|
||||
|
||||
ret = i915_wait_seqno(ring, seqno);
|
||||
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
if (!ret)
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
|
||||
|
@ -1220,8 +1217,10 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
|
|||
}
|
||||
|
||||
msleep(1);
|
||||
if (atomic_read(&dev_priv->mm.wedged))
|
||||
return -EAGAIN;
|
||||
|
||||
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
} while (!time_after(jiffies, end));
|
||||
trace_i915_ring_wait_end(ring);
|
||||
return -EBUSY;
|
||||
|
@ -1230,12 +1229,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
|
|||
int intel_ring_begin(struct intel_ring_buffer *ring,
|
||||
int num_dwords)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
int n = 4*num_dwords;
|
||||
int ret;
|
||||
|
||||
if (unlikely(atomic_read(&dev_priv->mm.wedged)))
|
||||
return -EIO;
|
||||
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (unlikely(ring->tail + n > ring->effective_size)) {
|
||||
ret = intel_wrap_ring_buffer(ring);
|
||||
|
|
|
@ -56,6 +56,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
|||
sprctl &= ~SPRITE_PIXFORMAT_MASK;
|
||||
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
|
||||
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
|
||||
sprctl &= ~SPRITE_TILED;
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
|
@ -84,7 +85,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
|||
break;
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
|
||||
sprctl |= DVS_FORMAT_RGBX888;
|
||||
sprctl |= SPRITE_FORMAT_RGBX888;
|
||||
pixel_size = 4;
|
||||
break;
|
||||
}
|
||||
|
@ -690,6 +691,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
|
|||
break;
|
||||
|
||||
default:
|
||||
kfree(intel_plane);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -704,4 +706,3 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -895,20 +895,16 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_mode_config *drm_config = &dev->mode_config;
|
||||
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
|
||||
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
|
||||
struct drm_encoder *other_encoder;
|
||||
struct intel_encoder *other_encoder;
|
||||
|
||||
if (!tv_mode)
|
||||
return false;
|
||||
|
||||
/* FIXME: lock encoder list */
|
||||
list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
|
||||
if (other_encoder != encoder &&
|
||||
other_encoder->crtc == encoder->crtc)
|
||||
for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder)
|
||||
if (&other_encoder->base != encoder)
|
||||
return false;
|
||||
}
|
||||
|
||||
adjusted_mode->clock = tv_mode->clock;
|
||||
return true;
|
||||
|
|
Loading…
Reference in New Issue