Merge tag 'drm-intel-fixes-2013-09-06' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes

- Early stolen mem reservation from Jesse in x86 boot code. Acked by Ingo
  and hpa.  This was ready much earlier but somehow I've thought it'd go
  in through x86 trees, hence why this is late. Avoids the pci resource
  code to plant mmiobars in the middle of stolen mem and other ugliness.
- vgaarb improvements from Alex Williamson plus the fix from Ville for the
  vgacon->fbcon smooth transition "feature".
- Render pageflips on ivb/hsw to avoid stalls due to the ring switching
  when only flipping on the blitter (Chris).
- Deadlock fixes around our flush_workqueue which crept back in - lockdep
  isn't clever enough :(
- Shrinker recursion fix from Chris - this is the thing that blew the vma
  patches from Ben I've taken out of 3.12.
- Fixup for the relocation refactoring. Also an igt testcase to make sure
  we don't break this again.
- Pile of smaller fixups all over, shortlog has full details.

* tag 'drm-intel-fixes-2013-09-06' of git://people.freedesktop.org/~danvet/drm-intel: (29 commits)
  drm/i915: Delay disabling of VGA memory until vgacon->fbcon handoff is done
  drm/i915: try not to lose backlight CBLV precision
  drm/i915: Confine page flips to BCS on Valleyview
  drm/i915: Skip stolen region initialisation if none is reserved
  drm/i915: fix gpu hang vs. flip stall deadlocks
  drm/i915: Hold an object reference whilst we shrink it
  drm/i915: fix i9xx_crtc_clock_get for multiplied pixels
  drm/i915: handle sdvo input pixel multiplier correctly again
  drm/i915: fix hpd work vs. flush_work in the pageflip code deadlock
  drm/i915: fix up the relocate_entry refactoring
  drm/i915: Fix pipe config warnings when dealing with LVDS fixed mode
  drm/i915: Don't call sg_free_table() if sg_alloc_table() fails
  i915: Update VGA arbiter support for newer devices
  vgaarb: Fix VGA decodes changes
  vgaarb: Don't disable resources that are not owned
  drm/i915: Pin pages whilst mapping the dma-buf
  drm/i915: enable trickle feed on Haswell
  x86: add early quirk for reserving Intel graphics stolen memory v5
  drm/i915: split PCI IDs out into i915_drm.h v4
  i915_gem: Convert kmem_cache_alloc(...GFP_ZERO) to kmem_cache_zalloc
  ...
This commit is contained in:
Dave Airlie 2013-09-10 12:36:55 +10:00
commit 48016851c8
29 changed files with 774 additions and 334 deletions

View File

@ -12,6 +12,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
#include <drm/i915_drm.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
@ -216,6 +217,157 @@ static void __init intel_remapping_check(int num, int slot, int func)
} }
/*
* Systems with Intel graphics controllers set aside memory exclusively
* for gfx driver use. This memory is not marked in the E820 as reserved
* or as RAM, and so is subject to overlap from E820 manipulation later
* in the boot process. On some systems, MMIO space is allocated on top,
* despite the efforts of the "RAM buffer" approach, which simply rounds
* memory boundaries up to 64M to try to catch space that may decode
* as RAM and so is not suitable for MMIO.
*
* And yes, so far on current devices the base addr is always under 4G.
*/
static u32 __init intel_stolen_base(int num, int slot, int func)
{
u32 base;
/*
* For the PCI IDs in this quirk, the stolen base is always
* in 0x5c, aka the BDSM register (yes that's really what
* it's called).
*/
base = read_pci_config(num, slot, func, 0x5c);
base &= ~((1<<20) - 1);
return base;
}
#define KB(x) ((x) * 1024)
#define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x)))
static size_t __init gen3_stolen_size(int num, int slot, int func)
{
size_t stolen_size;
u16 gmch_ctrl;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
stolen_size = MB(1);
break;
case I855_GMCH_GMS_STOLEN_4M:
stolen_size = MB(4);
break;
case I855_GMCH_GMS_STOLEN_8M:
stolen_size = MB(8);
break;
case I855_GMCH_GMS_STOLEN_16M:
stolen_size = MB(16);
break;
case I855_GMCH_GMS_STOLEN_32M:
stolen_size = MB(32);
break;
case I915_GMCH_GMS_STOLEN_48M:
stolen_size = MB(48);
break;
case I915_GMCH_GMS_STOLEN_64M:
stolen_size = MB(64);
break;
case G33_GMCH_GMS_STOLEN_128M:
stolen_size = MB(128);
break;
case G33_GMCH_GMS_STOLEN_256M:
stolen_size = MB(256);
break;
case INTEL_GMCH_GMS_STOLEN_96M:
stolen_size = MB(96);
break;
case INTEL_GMCH_GMS_STOLEN_160M:
stolen_size = MB(160);
break;
case INTEL_GMCH_GMS_STOLEN_224M:
stolen_size = MB(224);
break;
case INTEL_GMCH_GMS_STOLEN_352M:
stolen_size = MB(352);
break;
default:
stolen_size = 0;
break;
}
return stolen_size;
}
static size_t __init gen6_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
gmch_ctrl &= SNB_GMCH_GMS_MASK;
return gmch_ctrl << 25; /* 32 MB units */
}
typedef size_t (*stolen_size_fn)(int num, int slot, int func);
static struct pci_device_id intel_stolen_ids[] __initdata = {
INTEL_I915G_IDS(gen3_stolen_size),
INTEL_I915GM_IDS(gen3_stolen_size),
INTEL_I945G_IDS(gen3_stolen_size),
INTEL_I945GM_IDS(gen3_stolen_size),
INTEL_VLV_M_IDS(gen3_stolen_size),
INTEL_VLV_D_IDS(gen3_stolen_size),
INTEL_PINEVIEW_IDS(gen3_stolen_size),
INTEL_I965G_IDS(gen3_stolen_size),
INTEL_G33_IDS(gen3_stolen_size),
INTEL_I965GM_IDS(gen3_stolen_size),
INTEL_GM45_IDS(gen3_stolen_size),
INTEL_G45_IDS(gen3_stolen_size),
INTEL_IRONLAKE_D_IDS(gen3_stolen_size),
INTEL_IRONLAKE_M_IDS(gen3_stolen_size),
INTEL_SNB_D_IDS(gen6_stolen_size),
INTEL_SNB_M_IDS(gen6_stolen_size),
INTEL_IVB_M_IDS(gen6_stolen_size),
INTEL_IVB_D_IDS(gen6_stolen_size),
INTEL_HSW_D_IDS(gen6_stolen_size),
INTEL_HSW_M_IDS(gen6_stolen_size),
};
static void __init intel_graphics_stolen(int num, int slot, int func)
{
size_t size;
int i;
u32 start;
u16 device, subvendor, subdevice;
device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
subvendor = read_pci_config_16(num, slot, func,
PCI_SUBSYSTEM_VENDOR_ID);
subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
if (intel_stolen_ids[i].device == device) {
stolen_size_fn stolen_size =
(stolen_size_fn)intel_stolen_ids[i].driver_data;
size = stolen_size(num, slot, func);
start = intel_stolen_base(num, slot, func);
if (size && start) {
/* Mark this space as reserved */
e820_add_region(start, size, E820_RESERVED);
sanitize_e820_map(e820.map,
ARRAY_SIZE(e820.map),
&e820.nr_map);
}
return;
}
}
}
#define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2 #define QFLAG_APPLIED 0x2
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@ -251,6 +403,8 @@ static struct chipset early_qrk[] __initdata = {
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
QFLAG_APPLY_ONCE, intel_graphics_stolen },
{} {}
}; };

View File

@ -857,7 +857,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 rpstat, cagf; u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup; u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown; u32 rpdownei, rpcurdown, rpprevdown;
int max_freq; int max_freq;
@ -869,6 +869,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gen6_gt_force_wake_get(dev_priv); gen6_gt_force_wake_get(dev_priv);
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
if (IS_HASWELL(dev))
reqf >>= 24;
else
reqf >>= 25;
reqf *= GT_FREQUENCY_MULTIPLIER;
rpstat = I915_READ(GEN6_RPSTAT1); rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI); rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP); rpcurup = I915_READ(GEN6_RP_CUR_UP);
@ -893,6 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff); gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n", seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff); rp_state_limits & 0xff);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf); seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK); GEN6_CURICONT_MASK);

View File

@ -1290,9 +1290,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
* then we do not take part in VGA arbitration and the * then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV. * vga_client_register() fails with -ENODEV.
*/ */
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); if (!HAS_PCH_SPLIT(dev)) {
ret = vga_client_register(dev->pdev, dev, NULL,
i915_vga_set_decode);
if (ret && ret != -ENODEV) if (ret && ret != -ENODEV)
goto out; goto out;
}
intel_register_dsm_handler(); intel_register_dsm_handler();
@ -1348,6 +1351,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/ */
intel_fbdev_initial_config(dev); intel_fbdev_initial_config(dev);
/*
* Must do this after fbcon init so that
* vgacon_save_screen() works during the handover.
*/
i915_disable_vga_mem(dev);
/* Only enable hotplug handling once the fbdev is fully set up. */ /* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true; dev_priv->enable_hotplug_processing = true;

View File

@ -157,25 +157,6 @@ MODULE_PARM_DESC(prefault_disable,
static struct drm_driver driver; static struct drm_driver driver;
extern int intel_agp_enabled; extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
#define INTEL_QUANTA_VGA_DEVICE(info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = 0x16a, \
.subvendor = 0x152d, \
.subdevice = 0x8990, \
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = { static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
@ -350,118 +331,41 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_vebox_ring = 1, .has_vebox_ring = 1,
}; };
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
* and subvendor IDs, we need it to come before the more general IVB
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
#define INTEL_PCI_IDS \
INTEL_I830_IDS(&intel_i830_info), \
INTEL_I845G_IDS(&intel_845g_info), \
INTEL_I85X_IDS(&intel_i85x_info), \
INTEL_I865G_IDS(&intel_i865g_info), \
INTEL_I915G_IDS(&intel_i915g_info), \
INTEL_I915GM_IDS(&intel_i915gm_info), \
INTEL_I945G_IDS(&intel_i945g_info), \
INTEL_I945GM_IDS(&intel_i945gm_info), \
INTEL_I965G_IDS(&intel_i965g_info), \
INTEL_G33_IDS(&intel_g33_info), \
INTEL_I965GM_IDS(&intel_i965gm_info), \
INTEL_GM45_IDS(&intel_gm45_info), \
INTEL_G45_IDS(&intel_g45_info), \
INTEL_PINEVIEW_IDS(&intel_pineview_info), \
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
INTEL_VLV_D_IDS(&intel_valleyview_d_info)
static const struct pci_device_id pciidlist[] = { /* aka */ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ INTEL_PCI_IDS,
INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
{0, 0, 0} {0, 0, 0}
}; };

View File

@ -1236,6 +1236,13 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int fsb_freq, mem_freq, is_ddr3;
/**
* wq - Driver workqueue for GEM.
*
* NOTE: Work items scheduled here are not allowed to grab any modeset
* locks, for otherwise the flushing done in the pageflip code will
* result in deadlocks.
*/
struct workqueue_struct *wq; struct workqueue_struct *wq;
/* Display functions */ /* Display functions */

View File

@ -212,7 +212,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
void *i915_gem_object_alloc(struct drm_device *dev) void *i915_gem_object_alloc(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO); return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
} }
void i915_gem_object_free(struct drm_i915_gem_object *obj) void i915_gem_object_free(struct drm_i915_gem_object *obj)
@ -1695,6 +1695,7 @@ static long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only) bool purgeable_only)
{ {
struct list_head still_bound_list;
struct drm_i915_gem_object *obj, *next; struct drm_i915_gem_object *obj, *next;
long count = 0; long count = 0;
@ -1709,23 +1710,55 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
} }
} }
list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, /*
global_list) { * As we may completely rewrite the bound list whilst unbinding
* (due to retiring requests) we have to strictly process only
* one element of the list at the time, and recheck the list
* on every iteration.
*/
INIT_LIST_HEAD(&still_bound_list);
while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
struct i915_vma *vma, *v; struct i915_vma *vma, *v;
obj = list_first_entry(&dev_priv->mm.bound_list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_bound_list);
if (!i915_gem_object_is_purgeable(obj) && purgeable_only) if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue; continue;
/*
* Hold a reference whilst we unbind this object, as we may
* end up waiting for and retiring requests. This might
* release the final reference (held by the active list)
* and result in the object being freed from under us.
* in this object being freed.
*
* Note 1: Shrinking the bound list is special since only active
* (and hence bound objects) can contain such limbo objects, so
* we don't need special tricks for shrinking the unbound list.
* The only other place where we have to be careful with active
* objects suddenly disappearing due to retiring requests is the
* eviction code.
*
* Note 2: Even though the bound list doesn't hold a reference
* to the object we can safely grab one here: The final object
* unreferencing and the bound_list are both protected by the
* dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
drm_gem_object_reference(&obj->base);
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
if (i915_vma_unbind(vma)) if (i915_vma_unbind(vma))
break; break;
if (!i915_gem_object_put_pages(obj)) { if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
return count; drm_gem_object_unreference(&obj->base);
}
} }
list_splice(&still_bound_list, &dev_priv->mm.bound_list);
return count; return count;
} }
@ -1774,7 +1807,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
page_count = obj->base.size / PAGE_SIZE; page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) { if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
sg_free_table(st);
kfree(st); kfree(st);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
ret = i915_mutex_lock_interruptible(obj->base.dev); ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret) if (ret)
return ERR_PTR(ret); goto err;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_get_pages(obj);
if (ret) { if (ret)
st = ERR_PTR(ret); goto err_unlock;
goto out;
} i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */ /* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) { if (st == NULL) {
st = ERR_PTR(-ENOMEM); ret = -ENOMEM;
goto out; goto err_unpin;
} }
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
if (ret) { if (ret)
kfree(st); goto err_free;
st = ERR_PTR(ret);
goto out;
}
src = obj->pages->sgl; src = obj->pages->sgl;
dst = st->sgl; dst = st->sgl;
@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
} }
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
sg_free_table(st); ret =-ENOMEM;
kfree(st); goto err_free_sg;
st = ERR_PTR(-ENOMEM);
goto out;
} }
i915_gem_object_pin_pages(obj);
out:
mutex_unlock(&obj->base.dev->struct_mutex); mutex_unlock(&obj->base.dev->struct_mutex);
return st; return st;
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
err_unpin:
i915_gem_object_unpin_pages(obj);
err_unlock:
mutex_unlock(&obj->base.dev->struct_mutex);
err:
return ERR_PTR(ret);
} }
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,

View File

@ -310,6 +310,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
else else
ret = relocate_entry_gtt(obj, reloc); ret = relocate_entry_gtt(obj, reloc);
if (ret)
return ret;
/* and update the user's relocation entry */ /* and update the user's relocation entry */
reloc->presumed_offset = target_offset; reloc->presumed_offset = target_offset;

View File

@ -201,6 +201,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0; int bios_reserved = 0;
if (dev_priv->gtt.stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0) if (dev_priv->mm.stolen_base == 0)
return 0; return 0;

View File

@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (WARN_ON(ring->id != RCS)) if (WARN_ON(ring->id != RCS))
return NULL; return NULL;
obj = ring->private; obj = ring->scratch.obj;
if (acthd >= i915_gem_obj_ggtt_offset(obj) && if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj); return i915_error_object_create(dev_priv, obj);

View File

@ -1027,8 +1027,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev);
spin_unlock(&dev_priv->irq_lock); spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, /*
&dev_priv->hotplug_work); * Our hotplug handler can grab modeset locks (by calling down into the
* fb helpers). Hence it must not be run on our own dev-priv->wq work
* queue for otherwise the flush_work in the pageflip code will
* deadlock.
*/
schedule_work(&dev_priv->hotplug_work);
} }
static void gmbus_irq_handler(struct drm_device *dev) static void gmbus_irq_handler(struct drm_device *dev)
@ -1655,7 +1660,13 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
wake_up_all(&ring->irq_queue); wake_up_all(&ring->irq_queue);
} }
queue_work(dev_priv->wq, &dev_priv->gpu_error.work); /*
* Our reset work can grab modeset locks (since it needs to reset the
* state of outstanding pagelips). Hence it must not be run on our own
* dev-priv->wq work queue for otherwise the flush_work in the pageflip
* code will deadlock.
*/
schedule_work(&dev_priv->gpu_error.work);
} }
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@ -2027,7 +2038,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score > FIRE) { if (ring->hangcheck.score > FIRE) {
DRM_ERROR("%s on %s\n", DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress", stuck[i] ? "stuck" : "no progress",
ring->name); ring->name);
rings_hung++; rings_hung++;

View File

@ -33,21 +33,6 @@
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16) #define _MASKED_BIT_DISABLE(a) ((a) << 16)
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
* This is all handled in the intel-gtt.ko module. i915.ko only
* cares about the vga bit for the vga rbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
/* PCI config space */ /* PCI config space */
#define HPLLCC 0xc0 /* 855 only */ #define HPLLCC 0xc0 /* 855 only */
@ -245,6 +230,7 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold! * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/ */
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21) #define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18) #define MI_INVALIDATE_TLB (1<<18)
@ -693,6 +679,23 @@
#define FPGA_DBG_RM_NOCLAIM (1<<31) #define FPGA_DBG_RM_NOCLAIM (1<<31)
#define DERRMR 0x44050 #define DERRMR 0x44050
#define DERRMR_PIPEA_SCANLINE (1<<0)
#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
#define DERRMR_PIPEA_VBLANK (1<<3)
#define DERRMR_PIPEA_HBLANK (1<<5)
#define DERRMR_PIPEB_SCANLINE (1<<8)
#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
#define DERRMR_PIPEB_VBLANK (1<<11)
#define DERRMR_PIPEB_HBLANK (1<<13)
/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
#define DERRMR_PIPEC_SCANLINE (1<<14)
#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
#define DERRMR_PIPEC_VBLANK (1<<21)
#define DERRMR_PIPEC_HBLANK (1<<22)
/* GM45+ chicken bits -- debug workaround bits that may be required /* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are * for various sorts of correct behavior. The top 16 bits of each are
@ -3310,6 +3313,7 @@
#define MCURSOR_PIPE_A 0x00 #define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28) #define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26) #define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) #define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) #define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF #define CURSOR_POS_MASK 0x007FF

View File

@ -224,6 +224,18 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
} }
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.rpe_delay));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@ -366,6 +378,7 @@ static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@ -409,6 +422,14 @@ static const struct attribute *gen6_attrs[] = {
NULL, NULL,
}; };
static const struct attribute *vlv_attrs[] = {
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
&dev_attr_vlv_rpe_freq_mhz.attr,
NULL,
};
static ssize_t error_state_read(struct file *filp, struct kobject *kobj, static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf, struct bin_attribute *attr, char *buf,
loff_t off, size_t count) loff_t off, size_t count)
@ -492,11 +513,13 @@ void i915_setup_sysfs(struct drm_device *dev)
DRM_ERROR("l3 parity sysfs setup failed\n"); DRM_ERROR("l3 parity sysfs setup failed\n");
} }
if (INTEL_INFO(dev)->gen >= 6) { ret = 0;
if (IS_VALLEYVIEW(dev))
ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
else if (INTEL_INFO(dev)->gen >= 6)
ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
if (ret) if (ret)
DRM_ERROR("gen6 sysfs setup failed\n"); DRM_ERROR("RPS sysfs setup failed\n");
}
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
&error_state_attr); &error_state_attr);
@ -507,6 +530,9 @@ void i915_setup_sysfs(struct drm_device *dev)
void i915_teardown_sysfs(struct drm_device *dev) void i915_teardown_sysfs(struct drm_device *dev)
{ {
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
if (IS_VALLEYVIEW(dev))
sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
else
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM #ifdef CONFIG_PM

View File

@ -688,7 +688,7 @@ static void intel_crt_reset(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector); struct intel_crt *crt = intel_attached_crt(connector);
if (HAS_PCH_SPLIT(dev)) { if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa; u32 adpa;
adpa = I915_READ(crt->adpa_reg); adpa = I915_READ(crt->adpa_reg);

View File

@ -2077,7 +2077,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
else else
dspcntr &= ~DISPPLANE_TILED; dspcntr &= ~DISPPLANE_TILED;
/* must disable */ if (IS_HASWELL(dev))
dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
else
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr); I915_WRITE(reg, dspcntr);
@ -6762,8 +6764,10 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE; cntl |= CURSOR_MODE_DISABLE;
} }
if (IS_HASWELL(dev)) if (IS_HASWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE; cntl |= CURSOR_PIPE_CSC_ENABLE;
cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
}
I915_WRITE(CURCNTR_IVB(pipe), cntl); I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible; intel_crtc->cursor_visible = visible;
@ -7309,8 +7313,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
} }
} }
pipe_config->adjusted_mode.clock = clock.dot * pipe_config->adjusted_mode.clock = clock.dot;
pipe_config->pixel_multiplier;
} }
static void ironlake_crtc_clock_get(struct intel_crtc *crtc, static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@ -7828,12 +7831,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
return ret; return ret;
} }
/*
* On gen7 we currently use the blit ring because (in early silicon at least)
* the render ring doesn't give us interrpts for page flip completion, which
* means clients will hang after the first flip is queued. Fortunately the
* blit ring generates interrupts properly, so use it instead.
*/
static int intel_gen7_queue_flip(struct drm_device *dev, static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc, struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
@ -7842,9 +7839,13 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; struct intel_ring_buffer *ring;
uint32_t plane_bit = 0; uint32_t plane_bit = 0;
int ret; int len, ret;
ring = obj->ring;
if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
ret = intel_pin_and_fence_fb_obj(dev, obj, ring); ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret) if (ret)
@ -7866,10 +7867,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto err_unpin; goto err_unpin;
} }
ret = intel_ring_begin(ring, 4); len = 4;
if (ring->id == RCS)
len += 6;
ret = intel_ring_begin(ring, len);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
* more than one flip event at any time (or ensure that one flip message
* can be sent by waiting for flip-done prior to queueing new flips).
* Experimentation says that BCS works despite DERRMR masking all
* flip-done completion events and that unmasking all planes at once
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (ring->id == RCS) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
}
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@ -10022,6 +10047,33 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg); POSTING_READ(vga_reg);
} }
static void i915_enable_vga_mem(struct drm_device *dev)
{
/* Enable VGA memory on Intel HD */
if (HAS_PCH_SPLIT(dev)) {
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO |
VGA_RSRC_NORMAL_MEM);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
}
}
void i915_disable_vga_mem(struct drm_device *dev)
{
/* Disable VGA memory on Intel HD */
if (HAS_PCH_SPLIT(dev)) {
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
VGA_RSRC_NORMAL_IO |
VGA_RSRC_NORMAL_MEM);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
}
}
void intel_modeset_init_hw(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev)
{ {
intel_init_power_well(dev); intel_init_power_well(dev);
@ -10300,6 +10352,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev); i915_disable_vga(dev);
i915_disable_vga_mem(dev);
} }
} }
@ -10513,6 +10566,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev); intel_disable_fbc(dev);
i915_enable_vga_mem(dev);
intel_disable_gt_powersave(dev); intel_disable_gt_powersave(dev);
ironlake_teardown_rc6(dev); ironlake_teardown_rc6(dev);

View File

@ -551,7 +551,7 @@ extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode); struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel); extern void intel_panel_fini(struct intel_panel *panel);
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc, extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config, struct intel_crtc_config *pipe_config,
@ -792,5 +792,6 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
extern void hsw_pc8_restore_interrupts(struct drm_device *dev); extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
extern void i915_disable_vga_mem(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */ #endif /* __INTEL_DRV_H__ */

View File

@ -128,8 +128,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode = const struct drm_display_mode *adjusted_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode; &crtc->config.adjusted_mode;
int pipe = crtc->pipe; int pipe = crtc->pipe;
u32 temp; u32 temp;
@ -183,9 +183,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
temp &= ~LVDS_ENABLE_DITHER; temp &= ~LVDS_ENABLE_DITHER;
} }
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY; temp |= LVDS_HSYNC_POLARITY;
if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY; temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(lvds_encoder->reg, temp); I915_WRITE(lvds_encoder->reg, temp);

View File

@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLE_BACKLIGHT_FAILED; return ASLE_BACKLIGHT_FAILED;
intel_panel_set_backlight(dev, bclp, 255); intel_panel_set_backlight(dev, bclp, 255);
iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
return 0; return 0;
} }

View File

@ -36,20 +36,12 @@
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
void void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
adjusted_mode->hdisplay = fixed_mode->hdisplay; drm_mode_copy(adjusted_mode, fixed_mode);
adjusted_mode->hsync_start = fixed_mode->hsync_start;
adjusted_mode->hsync_end = fixed_mode->hsync_end;
adjusted_mode->htotal = fixed_mode->htotal;
adjusted_mode->vdisplay = fixed_mode->vdisplay; drm_mode_set_crtcinfo(adjusted_mode, 0);
adjusted_mode->vsync_start = fixed_mode->vsync_start;
adjusted_mode->vsync_end = fixed_mode->vsync_end;
adjusted_mode->vtotal = fixed_mode->vtotal;
adjusted_mode->clock = fixed_mode->clock;
} }
/* adjusted_mode has been preset to be the panel's fixed mode */ /* adjusted_mode has been preset to be the panel's fixed mode */

View File

@ -3447,14 +3447,24 @@ int intel_enable_rc6(const struct drm_device *dev)
static void gen6_enable_rps_interrupts(struct drm_device *dev) static void gen6_enable_rps_interrupts(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 enabled_intrs;
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir); WARN_ON(dev_priv->rps.pm_iir);
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
/* only unmask PM interrupts we need. Mask all others. */ /* only unmask PM interrupts we need. Mask all others. */
I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS); enabled_intrs = GEN6_PM_RPS_EVENTS;
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
} }
static void gen6_enable_rps(struct drm_device *dev) static void gen6_enable_rps(struct drm_device *dev)
@ -4950,8 +4960,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
g4x_disable_trickle_feed(dev);
/* WaVSRefCountFullforceMissDisable:hsw */ /* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv); gen7_setup_fixed_func_scheduler(dev_priv);

View File

@ -33,16 +33,6 @@
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
/*
* 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing.
*/
struct pipe_control {
struct drm_i915_gem_object *obj;
volatile u32 *cpu_page;
u32 gtt_offset;
};
static inline int ring_space(struct intel_ring_buffer *ring) static inline int ring_space(struct intel_ring_buffer *ring)
{ {
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
static int static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{ {
struct pipe_control *pc = ring->private; u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains) u32 invalidate_domains, u32 flush_domains)
{ {
u32 flags = 0; u32 flags = 0;
struct pipe_control *pc = ring->private; u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */ /* Force SNB workarounds for PIPE_CONTROL flushes */
@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains) u32 invalidate_domains, u32 flush_domains)
{ {
u32 flags = 0; u32 flags = 0;
struct pipe_control *pc = ring->private; u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
/* /*
@ -481,68 +468,43 @@ static int init_ring_common(struct intel_ring_buffer *ring)
static int static int
init_pipe_control(struct intel_ring_buffer *ring) init_pipe_control(struct intel_ring_buffer *ring)
{ {
struct pipe_control *pc;
struct drm_i915_gem_object *obj;
int ret; int ret;
if (ring->private) if (ring->scratch.obj)
return 0; return 0;
pc = kmalloc(sizeof(*pc), GFP_KERNEL); ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (!pc) if (ring->scratch.obj == NULL) {
return -ENOMEM;
obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n"); DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
if (ret) if (ret)
goto err_unref; goto err_unref;
pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl)); ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
if (pc->cpu_page == NULL) { if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_unpin; goto err_unpin;
} }
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
ring->name, pc->gtt_offset); ring->name, ring->scratch.gtt_offset);
pc->obj = obj;
ring->private = pc;
return 0; return 0;
err_unpin: err_unpin:
i915_gem_object_unpin(obj); i915_gem_object_unpin(ring->scratch.obj);
err_unref: err_unref:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&ring->scratch.obj->base);
err: err:
kfree(pc);
return ret; return ret;
} }
static void
cleanup_pipe_control(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
struct drm_i915_gem_object *obj;
obj = pc->obj;
kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
kfree(pc);
}
static int init_render_ring(struct intel_ring_buffer *ring) static int init_render_ring(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
if (!ring->private) if (ring->scratch.obj == NULL)
return; return;
if (HAS_BROKEN_CS_TLB(dev)) if (INTEL_INFO(dev)->gen >= 5) {
drm_gem_object_unreference(to_gem_object(ring->private)); kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_unpin(ring->scratch.obj);
}
if (INTEL_INFO(dev)->gen >= 5) drm_gem_object_unreference(&ring->scratch.obj->base);
cleanup_pipe_control(ring); ring->scratch.obj = NULL;
ring->private = NULL;
} }
static void static void
@ -742,8 +704,7 @@ do { \
static int static int
pc_render_add_request(struct intel_ring_buffer *ring) pc_render_add_request(struct intel_ring_buffer *ring)
{ {
struct pipe_control *pc = ring->private; u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr); PIPE_CONTROL_FLUSH(ring, scratch_addr);
@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY); PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
static u32 static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{ {
struct pipe_control *pc = ring->private; return ring->scratch.cpu_page[0];
return pc->cpu_page[0];
} }
static void static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{ {
struct pipe_control *pc = ring->private; ring->scratch.cpu_page[0] = seqno;
pc->cpu_page[0] = seqno;
} }
static bool static bool
@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
} else { } else {
struct drm_i915_gem_object *obj = ring->private; u32 cs_offset = ring->scratch.gtt_offset;
u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT) if (len > I830_BATCH_LIMIT)
return -ENOSPC; return -ENOSPC;
@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return ret; return ret;
} }
ring->private = obj; ring->scratch.obj = obj;
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
} }
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);

View File

@ -155,7 +155,11 @@ struct intel_ring_buffer {
struct intel_ring_hangcheck hangcheck; struct intel_ring_hangcheck hangcheck;
void *private; struct {
struct drm_i915_gem_object *obj;
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
}; };
static inline bool static inline bool

View File

@ -1151,11 +1151,10 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
{ {
struct drm_device *dev = intel_encoder->base.dev; struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = intel_encoder->base.crtc; struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode = struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode; &crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode; struct drm_display_mode *mode = &crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox; u32 sdvox;
struct intel_sdvo_in_out_map in_out; struct intel_sdvo_in_out_map in_out;
@ -1213,13 +1212,15 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
* adjusted_mode. * adjusted_mode.
*/ */
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
input_dtd.part1.clock /= crtc->config.pixel_multiplier;
if (intel_sdvo->is_tv || intel_sdvo->is_lvds) if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n", DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo)); SDVO_NAME(intel_sdvo));
switch (intel_crtc->config.pixel_multiplier) { switch (crtc->config.pixel_multiplier) {
default: default:
WARN(1, "unknown pixel mutlipler specified\n"); WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@ -1252,9 +1253,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
} }
if (INTEL_PCH_TYPE(dev) >= PCH_CPT) if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else else
sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe); sdvox |= SDVO_PIPE_SEL(crtc->pipe);
if (intel_sdvo->has_hdmi_audio) if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE; sdvox |= SDVO_AUDIO_ENABLE;
@ -1264,7 +1265,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */ /* done in crtc_mode_set as it lives inside the dpll register */
} else { } else {
sdvox |= (intel_crtc->config.pixel_multiplier - 1) sdvox |= (crtc->config.pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT; << SDVO_PORT_MULTIPLY_SHIFT;
} }

View File

@ -260,8 +260,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED; sprctl |= SPRITE_TILED;
/* must disable */ if (IS_HASWELL(dev))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE; sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE; sprctl |= SPRITE_ENABLE;
if (IS_HASWELL(dev)) if (IS_HASWELL(dev))

View File

@ -261,7 +261,7 @@ void intel_uncore_init(struct drm_device *dev)
} }
} }
void intel_uncore_sanitize(struct drm_device *dev) static void intel_uncore_forcewake_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -272,6 +272,11 @@ void intel_uncore_sanitize(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv); __gen6_gt_force_wake_mt_reset(dev_priv);
} }
}
void intel_uncore_sanitize(struct drm_device *dev)
{
intel_uncore_forcewake_reset(dev);
/* BIOS often leaves RC6 enabled, but disable it for hw init */ /* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev); intel_disable_gt_powersave(dev);
@ -549,6 +554,8 @@ static int gen6_do_reset(struct drm_device *dev)
/* Spin waiting for the device to ack the reset request */ /* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
intel_uncore_forcewake_reset(dev);
/* If reset with a user forcewake, try to restore, otherwise turn it off */ /* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count) if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv); dev_priv->uncore.funcs.force_wake_get(dev_priv);

View File

@ -257,9 +257,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (!conflict->bridge_has_one_vga) { if (!conflict->bridge_has_one_vga) {
vga_irq_set_state(conflict, false); vga_irq_set_state(conflict, false);
flags |= PCI_VGA_STATE_CHANGE_DECODES; flags |= PCI_VGA_STATE_CHANGE_DECODES;
if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
pci_bits |= PCI_COMMAND_MEMORY; pci_bits |= PCI_COMMAND_MEMORY;
if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO; pci_bits |= PCI_COMMAND_IO;
} }
@ -267,11 +267,11 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
flags |= PCI_VGA_STATE_CHANGE_BRIDGE; flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
pci_set_vga_state(conflict->pdev, false, pci_bits, flags); pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
conflict->owns &= ~lwants; conflict->owns &= ~match;
/* If he also owned non-legacy, that is no longer the case */ /* If he also owned non-legacy, that is no longer the case */
if (lwants & VGA_RSRC_LEGACY_MEM) if (match & VGA_RSRC_LEGACY_MEM)
conflict->owns &= ~VGA_RSRC_NORMAL_MEM; conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
if (lwants & VGA_RSRC_LEGACY_IO) if (match & VGA_RSRC_LEGACY_IO)
conflict->owns &= ~VGA_RSRC_NORMAL_IO; conflict->owns &= ~VGA_RSRC_NORMAL_IO;
} }
@ -644,10 +644,12 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
static inline void vga_update_device_decodes(struct vga_device *vgadev, static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes) int new_decodes)
{ {
int old_decodes; int old_decodes, decodes_removed, decodes_unlocked;
struct vga_device *new_vgadev, *conflict;
old_decodes = vgadev->decodes; old_decodes = vgadev->decodes;
decodes_removed = ~new_decodes & old_decodes;
decodes_unlocked = vgadev->locks & decodes_removed;
vgadev->owns &= ~decodes_removed;
vgadev->decodes = new_decodes; vgadev->decodes = new_decodes;
pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@ -656,31 +658,22 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns)); vga_iostate_to_str(vgadev->owns));
/* if we removed locked decodes, lock count goes to zero, and release */
/* if we own the decodes we should move them along to if (decodes_unlocked) {
another card */ if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
if ((vgadev->owns & old_decodes) && (vga_count > 1)) { vgadev->io_lock_cnt = 0;
/* set us to own nothing */ if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
vgadev->owns &= ~old_decodes; vgadev->mem_lock_cnt = 0;
list_for_each_entry(new_vgadev, &vga_list, list) { __vga_put(vgadev, decodes_unlocked);
if ((new_vgadev != vgadev) &&
(new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
if (!conflict)
__vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
break;
}
}
} }
/* change decodes counter */ /* change decodes counter */
if (old_decodes != new_decodes) { if (old_decodes & VGA_RSRC_LEGACY_MASK &&
if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) !(new_decodes & VGA_RSRC_LEGACY_MASK))
vga_decode_count++;
else
vga_decode_count--; vga_decode_count--;
} if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
new_decodes & VGA_RSRC_LEGACY_MASK)
vga_decode_count++;
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
} }

View File

@ -26,6 +26,7 @@
#ifndef _I915_DRM_H_ #ifndef _I915_DRM_H_
#define _I915_DRM_H_ #define _I915_DRM_H_
#include <drm/i915_pciids.h>
#include <uapi/drm/i915_drm.h> #include <uapi/drm/i915_drm.h>
/* For use by IPS driver */ /* For use by IPS driver */
@ -34,4 +35,37 @@ extern bool i915_gpu_raise(void);
extern bool i915_gpu_lower(void); extern bool i915_gpu_lower(void);
extern bool i915_gpu_busy(void); extern bool i915_gpu_busy(void);
extern bool i915_gpu_turbo_disable(void); extern bool i915_gpu_turbo_disable(void);
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
* This is all handled in the intel-gtt.ko module. i915.ko only
* cares about the vga bit for the vga rbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
#define I830_GMCH_CTRL 0x52
#define I855_GMCH_GMS_MASK 0xF0
#define I855_GMCH_GMS_STOLEN_0M 0x0
#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#endif /* _I915_DRM_H_ */ #endif /* _I915_DRM_H_ */

211
include/drm/i915_pciids.h Normal file
View File

@ -0,0 +1,211 @@
/*
* Copyright 2013 Intel Corporation
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _I915_PCIIDS_H
#define _I915_PCIIDS_H
/*
* A pci_device_id struct {
* __u32 vendor, device;
* __u32 subvendor, subdevice;
* __u32 class, class_mask;
* kernel_ulong_t driver_data;
* };
* Don't use C99 here because "class" is reserved and we want to
* give userspace flexibility.
*/
#define INTEL_VGA_DEVICE(id, info) { \
0x8086, id, \
~0, ~0, \
0x030000, 0xff0000, \
(unsigned long) info }
#define INTEL_QUANTA_VGA_DEVICE(info) { \
0x8086, 0x16a, \
0x152d, 0x8990, \
0x030000, 0xff0000, \
(unsigned long) info }
#define INTEL_I830_IDS(info) \
INTEL_VGA_DEVICE(0x3577, info)
#define INTEL_I845G_IDS(info) \
INTEL_VGA_DEVICE(0x2562, info)
#define INTEL_I85X_IDS(info) \
INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
INTEL_VGA_DEVICE(0x358e, info)
#define INTEL_I865G_IDS(info) \
INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
#define INTEL_I915G_IDS(info) \
INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
#define INTEL_I915GM_IDS(info) \
INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
#define INTEL_I945G_IDS(info) \
INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
#define INTEL_I945GM_IDS(info) \
INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
#define INTEL_I965G_IDS(info) \
INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
#define INTEL_G33_IDS(info) \
INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
#define INTEL_I965GM_IDS(info) \
INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
#define INTEL_GM45_IDS(info) \
INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
#define INTEL_G45_IDS(info) \
INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
#define INTEL_PINEVIEW_IDS(info) \
INTEL_VGA_DEVICE(0xa001, info), \
INTEL_VGA_DEVICE(0xa011, info)
#define INTEL_IRONLAKE_D_IDS(info) \
INTEL_VGA_DEVICE(0x0042, info)
#define INTEL_IRONLAKE_M_IDS(info) \
INTEL_VGA_DEVICE(0x0046, info)
#define INTEL_SNB_D_IDS(info) \
INTEL_VGA_DEVICE(0x0102, info), \
INTEL_VGA_DEVICE(0x0112, info), \
INTEL_VGA_DEVICE(0x0122, info), \
INTEL_VGA_DEVICE(0x010A, info)
#define INTEL_SNB_M_IDS(info) \
INTEL_VGA_DEVICE(0x0106, info), \
INTEL_VGA_DEVICE(0x0116, info), \
INTEL_VGA_DEVICE(0x0126, info)
#define INTEL_IVB_M_IDS(info) \
INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
#define INTEL_IVB_D_IDS(info) \
INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
#define INTEL_IVB_Q_IDS(info) \
INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
#define INTEL_HSW_D_IDS(info) \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \
#define INTEL_HSW_M_IDS(info) \
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
#define INTEL_VLV_M_IDS(info) \
INTEL_VGA_DEVICE(0x0f30, info), \
INTEL_VGA_DEVICE(0x0f31, info), \
INTEL_VGA_DEVICE(0x0f32, info), \
INTEL_VGA_DEVICE(0x0f33, info), \
INTEL_VGA_DEVICE(0x0157, info)
#define INTEL_VLV_D_IDS(info) \
INTEL_VGA_DEVICE(0x0155, info)
#endif /* _I915_PCIIDS_H */

View File

@ -65,8 +65,15 @@ struct pci_dev;
* out of the arbitration process (and can be safe to take * out of the arbitration process (and can be safe to take
* interrupts at any time. * interrupts at any time.
*/ */
#if defined(CONFIG_VGA_ARB)
extern void vga_set_legacy_decoding(struct pci_dev *pdev, extern void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes); unsigned int decodes);
#else
static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes)
{
}
#endif
/** /**
* vga_get - acquire & locks VGA resources * vga_get - acquire & locks VGA resources