mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel
* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (23 commits) drm/i915: remove full registers dump debug drm/i915: Add DP dpll limit on ironlake and use existing DPLL search function drm/i915: Select the correct BPC for LVDS on Ironlake drm/i915: Make the BPC in FDI rx/transcoder be consistent with that in pipeconf on Ironlake drm/i915: Enable/disable the dithering for LVDS based on VBT setting drm/i915: Permit pinning whilst the device is 'suspended' drm/i915: Hold struct mutex whilst pinning power context bo. drm/i915: fix unused var drm/i915: Storage class should be before const qualifier drm/i915: remove render reclock support drm/i915: Fix RC6 suspend/resume drm/i915: execbuf2 support drm/i915: Reload hangcheck timer too for Ironlake drm/i915: only enable hotplug for detected outputs drm/i915: Track whether cursor needs physical address in intel_device_info drm/i915: Implement IS_* macros using static tables drm/i915: Move PCI IDs into i915 driver drm/i915: Update LVDS connector status when receiving ACPI LID event drm/i915: Add MALATA PC-81005 to ACPI LID quirk list drm/i915: implement new pm ops for i915 ...
This commit is contained in:
commit
635b3c9d55
|
@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
|
|||
mem = kmap_atomic(pages[page], KM_USER0);
|
||||
for (i = 0; i < PAGE_SIZE; i += 4)
|
||||
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
|
||||
kunmap_atomic(pages[page], KM_USER0);
|
||||
kunmap_atomic(mem, KM_USER0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -386,34 +386,6 @@ static int i915_error_state(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_registers_info(struct seq_file *m, void *data) {
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t reg;
|
||||
|
||||
#define DUMP_RANGE(start, end) \
|
||||
for (reg=start; reg < end; reg += 4) \
|
||||
seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
|
||||
|
||||
DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
|
||||
DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
|
||||
DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
|
||||
DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
|
||||
DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
|
||||
DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
|
||||
DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
|
||||
DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
|
||||
DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
|
||||
DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
|
||||
DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
|
||||
DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
|
||||
DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
|
||||
DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_wedged_open(struct inode *inode,
|
||||
struct file *filp)
|
||||
|
@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
|
|||
}
|
||||
|
||||
static struct drm_info_list i915_debugfs_list[] = {
|
||||
{"i915_regs", i915_registers_info, 0},
|
||||
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
|
||||
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
|
||||
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
|
||||
|
|
|
@ -813,9 +813,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
case I915_PARAM_HAS_PAGEFLIPPING:
|
||||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_HAS_EXECBUF2:
|
||||
/* depends on GEM */
|
||||
value = dev_priv->has_gem;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
|
||||
param->param);
|
||||
param->param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1117,7 +1121,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_mm_node *compressed_fb, *compressed_llb;
|
||||
unsigned long cfb_base, ll_base;
|
||||
unsigned long cfb_base;
|
||||
unsigned long ll_base = 0;
|
||||
|
||||
/* Leave 1M for line length buffer & misc. */
|
||||
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
|
||||
|
@ -1200,14 +1205,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
|
||||
0xff000000;
|
||||
|
||||
if (IS_MOBILE(dev) || IS_I9XX(dev))
|
||||
dev_priv->cursor_needs_physical = true;
|
||||
else
|
||||
dev_priv->cursor_needs_physical = false;
|
||||
|
||||
if (IS_I965G(dev) || IS_G33(dev))
|
||||
dev_priv->cursor_needs_physical = false;
|
||||
|
||||
/* Basic memrange allocator for stolen space (aka vram) */
|
||||
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
|
||||
DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
|
||||
|
@ -1257,6 +1254,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto destroy_ringbuffer;
|
||||
|
||||
intel_modeset_init(dev);
|
||||
|
||||
ret = drm_irq_install(dev);
|
||||
if (ret)
|
||||
goto destroy_ringbuffer;
|
||||
|
@ -1271,8 +1270,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
|
||||
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
|
||||
|
||||
intel_modeset_init(dev);
|
||||
|
||||
drm_helper_initial_config(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -1360,7 +1357,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
resource_size_t base, size;
|
||||
int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
|
||||
int ret = 0, mmio_bar;
|
||||
uint32_t agp_size, prealloc_size, prealloc_start;
|
||||
|
||||
/* i915 has 4 more counters */
|
||||
|
@ -1376,8 +1373,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
dev_priv->dev = dev;
|
||||
dev_priv->info = (struct intel_device_info *) flags;
|
||||
|
||||
/* Add register map (needed for suspend/resume) */
|
||||
mmio_bar = IS_I9XX(dev) ? 0 : 1;
|
||||
base = drm_get_resource_start(dev, mmio_bar);
|
||||
size = drm_get_resource_len(dev, mmio_bar);
|
||||
|
||||
|
@ -1652,6 +1651,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#include "drm_pciids.h"
|
||||
#include <linux/console.h>
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
|
@ -48,8 +47,124 @@ module_param_named(powersave, i915_powersave, int, 0400);
|
|||
|
||||
static struct drm_driver driver;
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
i915_PCI_IDS
|
||||
#define INTEL_VGA_DEVICE(id, info) { \
|
||||
.class = PCI_CLASS_DISPLAY_VGA << 8, \
|
||||
.class_mask = 0xffff00, \
|
||||
.vendor = 0x8086, \
|
||||
.device = id, \
|
||||
.subvendor = PCI_ANY_ID, \
|
||||
.subdevice = PCI_ANY_ID, \
|
||||
.driver_data = (unsigned long) info }
|
||||
|
||||
const static struct intel_device_info intel_i830_info = {
|
||||
.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_845g_info = {
|
||||
.is_i8xx = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_i85x_info = {
|
||||
.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_i865g_info = {
|
||||
.is_i8xx = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_i915g_info = {
|
||||
.is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
|
||||
};
|
||||
const static struct intel_device_info intel_i915gm_info = {
|
||||
.is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
|
||||
.cursor_needs_physical = 1,
|
||||
};
|
||||
const static struct intel_device_info intel_i945g_info = {
|
||||
.is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
};
|
||||
const static struct intel_device_info intel_i945gm_info = {
|
||||
.is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
|
||||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_i965g_info = {
|
||||
.is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_i965gm_info = {
|
||||
.is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
|
||||
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
|
||||
.has_hotplug = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_g33_info = {
|
||||
.is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
|
||||
.has_hotplug = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_g45_info = {
|
||||
.is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
|
||||
.has_pipe_cxsr = 1,
|
||||
.has_hotplug = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_gm45_info = {
|
||||
.is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
|
||||
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
|
||||
.has_pipe_cxsr = 1,
|
||||
.has_hotplug = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_pineview_info = {
|
||||
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
|
||||
.has_pipe_cxsr = 1,
|
||||
.has_hotplug = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_ironlake_d_info = {
|
||||
.is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
|
||||
.has_pipe_cxsr = 1,
|
||||
.has_hotplug = 1,
|
||||
};
|
||||
|
||||
const static struct intel_device_info intel_ironlake_m_info = {
|
||||
.is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
|
||||
.need_gfx_hws = 1, .has_rc6 = 1,
|
||||
.has_hotplug = 1,
|
||||
};
|
||||
|
||||
const static struct pci_device_id pciidlist[] = {
|
||||
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
|
||||
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
|
||||
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
|
||||
INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
|
||||
INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
|
||||
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
|
||||
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
|
||||
INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
|
||||
INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
|
||||
INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
|
||||
INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
|
||||
INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
|
||||
INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
|
||||
INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
|
||||
INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
|
||||
INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
|
||||
INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
|
||||
INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
|
||||
INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
|
||||
INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
|
||||
INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
|
||||
INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
|
||||
INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
|
||||
INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
|
||||
INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
|
||||
INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
|
||||
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
|
||||
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
|
||||
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
|
||||
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_I915_KMS)
|
||||
|
@ -284,6 +399,52 @@ i915_pci_resume(struct pci_dev *pdev)
|
|||
return i915_resume(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_suspend(struct device *dev)
|
||||
{
|
||||
return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_resume(struct device *dev)
|
||||
{
|
||||
return i915_pci_resume(to_pci_dev(dev));
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_freeze(struct device *dev)
|
||||
{
|
||||
return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_thaw(struct device *dev)
|
||||
{
|
||||
/* thaw during hibernate, do nothing! */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_poweroff(struct device *dev)
|
||||
{
|
||||
return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_restore(struct device *dev)
|
||||
{
|
||||
return i915_pci_resume(to_pci_dev(dev));
|
||||
}
|
||||
|
||||
const struct dev_pm_ops i915_pm_ops = {
|
||||
.suspend = i915_pm_suspend,
|
||||
.resume = i915_pm_resume,
|
||||
.freeze = i915_pm_freeze,
|
||||
.thaw = i915_pm_thaw,
|
||||
.poweroff = i915_pm_poweroff,
|
||||
.restore = i915_pm_restore,
|
||||
};
|
||||
|
||||
static struct vm_operations_struct i915_gem_vm_ops = {
|
||||
.fault = i915_gem_fault,
|
||||
.open = drm_gem_vm_open,
|
||||
|
@ -303,8 +464,6 @@ static struct drm_driver driver = {
|
|||
.lastclose = i915_driver_lastclose,
|
||||
.preclose = i915_driver_preclose,
|
||||
.postclose = i915_driver_postclose,
|
||||
.suspend = i915_suspend,
|
||||
.resume = i915_resume,
|
||||
.device_is_agp = i915_driver_device_is_agp,
|
||||
.enable_vblank = i915_enable_vblank,
|
||||
.disable_vblank = i915_disable_vblank,
|
||||
|
@ -344,10 +503,7 @@ static struct drm_driver driver = {
|
|||
.id_table = pciidlist,
|
||||
.probe = i915_pci_probe,
|
||||
.remove = i915_pci_remove,
|
||||
#ifdef CONFIG_PM
|
||||
.resume = i915_pci_resume,
|
||||
.suspend = i915_pci_suspend,
|
||||
#endif
|
||||
.driver.pm = &i915_pm_ops,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
|
|
|
@ -172,9 +172,31 @@ struct drm_i915_display_funcs {
|
|||
|
||||
struct intel_overlay;
|
||||
|
||||
struct intel_device_info {
|
||||
u8 is_mobile : 1;
|
||||
u8 is_i8xx : 1;
|
||||
u8 is_i915g : 1;
|
||||
u8 is_i9xx : 1;
|
||||
u8 is_i945gm : 1;
|
||||
u8 is_i965g : 1;
|
||||
u8 is_i965gm : 1;
|
||||
u8 is_g33 : 1;
|
||||
u8 need_gfx_hws : 1;
|
||||
u8 is_g4x : 1;
|
||||
u8 is_pineview : 1;
|
||||
u8 is_ironlake : 1;
|
||||
u8 has_fbc : 1;
|
||||
u8 has_rc6 : 1;
|
||||
u8 has_pipe_cxsr : 1;
|
||||
u8 has_hotplug : 1;
|
||||
u8 cursor_needs_physical : 1;
|
||||
};
|
||||
|
||||
typedef struct drm_i915_private {
|
||||
struct drm_device *dev;
|
||||
|
||||
const struct intel_device_info *info;
|
||||
|
||||
int has_gem;
|
||||
|
||||
void __iomem *regs;
|
||||
|
@ -232,8 +254,6 @@ typedef struct drm_i915_private {
|
|||
int hangcheck_count;
|
||||
uint32_t last_acthd;
|
||||
|
||||
bool cursor_needs_physical;
|
||||
|
||||
struct drm_mm vram;
|
||||
|
||||
unsigned long cfb_size;
|
||||
|
@ -287,8 +307,6 @@ typedef struct drm_i915_private {
|
|||
u32 saveDSPACNTR;
|
||||
u32 saveDSPBCNTR;
|
||||
u32 saveDSPARB;
|
||||
u32 saveRENDERSTANDBY;
|
||||
u32 savePWRCTXA;
|
||||
u32 saveHWS;
|
||||
u32 savePIPEACONF;
|
||||
u32 savePIPEBCONF;
|
||||
|
@ -561,6 +579,7 @@ typedef struct drm_i915_private {
|
|||
u16 orig_clock;
|
||||
int child_dev_num;
|
||||
struct child_device_config *child_dev;
|
||||
struct drm_connector *int_lvds_connector;
|
||||
} drm_i915_private_t;
|
||||
|
||||
/** driver private structure attached to each drm_gem_object */
|
||||
|
@ -794,6 +813,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
int i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -860,6 +881,9 @@ void i915_gem_shrinker_exit(void);
|
|||
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
|
||||
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
|
||||
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
|
||||
bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
|
||||
int tiling_mode);
|
||||
bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
|
||||
|
||||
/* i915_gem_debug.c */
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
|
@ -982,67 +1006,33 @@ extern void g4x_disable_fbc(struct drm_device *dev);
|
|||
extern int i915_wrap_ring(struct drm_device * dev);
|
||||
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
||||
|
||||
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
|
||||
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
|
||||
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
|
||||
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
|
||||
#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
|
||||
|
||||
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
|
||||
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
|
||||
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
|
||||
#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
|
||||
(dev)->pci_device == 0x27AE)
|
||||
#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
|
||||
(dev)->pci_device == 0x2982 || \
|
||||
(dev)->pci_device == 0x2992 || \
|
||||
(dev)->pci_device == 0x29A2 || \
|
||||
(dev)->pci_device == 0x2A02 || \
|
||||
(dev)->pci_device == 0x2A12 || \
|
||||
(dev)->pci_device == 0x2A42 || \
|
||||
(dev)->pci_device == 0x2E02 || \
|
||||
(dev)->pci_device == 0x2E12 || \
|
||||
(dev)->pci_device == 0x2E22 || \
|
||||
(dev)->pci_device == 0x2E32 || \
|
||||
(dev)->pci_device == 0x2E42 || \
|
||||
(dev)->pci_device == 0x0042 || \
|
||||
(dev)->pci_device == 0x0046)
|
||||
|
||||
#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
|
||||
(dev)->pci_device == 0x2A12)
|
||||
|
||||
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
|
||||
|
||||
#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
|
||||
(dev)->pci_device == 0x2E12 || \
|
||||
(dev)->pci_device == 0x2E22 || \
|
||||
(dev)->pci_device == 0x2E32 || \
|
||||
(dev)->pci_device == 0x2E42 || \
|
||||
IS_GM45(dev))
|
||||
|
||||
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
|
||||
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
|
||||
#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
|
||||
|
||||
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
|
||||
(dev)->pci_device == 0x29B2 || \
|
||||
(dev)->pci_device == 0x29D2 || \
|
||||
(IS_PINEVIEW(dev)))
|
||||
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
|
||||
|
||||
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
|
||||
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
|
||||
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
|
||||
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
|
||||
#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
|
||||
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
|
||||
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
|
||||
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
|
||||
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
|
||||
#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
|
||||
#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
|
||||
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
|
||||
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
|
||||
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
|
||||
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
|
||||
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
|
||||
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
|
||||
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
|
||||
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
|
||||
#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
|
||||
#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
|
||||
#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
|
||||
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
|
||||
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
|
||||
IS_IRONLAKE(dev))
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
||||
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
|
||||
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
|
||||
IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
|
||||
|
||||
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
|
||||
IS_IRONLAKE(dev))
|
||||
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
|
||||
* rows, which changed the alignment requirements and fence programming.
|
||||
*/
|
||||
|
@ -1054,17 +1044,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
|
||||
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
|
||||
!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
|
||||
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
|
||||
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
|
||||
/* dsparb controlled by hw only */
|
||||
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
|
||||
|
||||
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
|
||||
#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
|
||||
#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
|
||||
(IS_I9XX(dev) || IS_GM45(dev)) && \
|
||||
!IS_PINEVIEW(dev) && \
|
||||
!IS_IRONLAKE(dev))
|
||||
#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
|
||||
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
|
||||
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
|
||||
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
|
||||
|
||||
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
|
||||
|
||||
|
|
|
@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|||
/* blow away mappings if mapped through GTT */
|
||||
i915_gem_release_mmap(obj);
|
||||
|
||||
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
||||
i915_gem_clear_fence_reg(obj);
|
||||
|
||||
/* Move the object to the CPU domain to ensure that
|
||||
* any possible CPU writes while it's not in the GTT
|
||||
* are flushed when we go to remap it. This will
|
||||
|
@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|||
|
||||
BUG_ON(obj_priv->active);
|
||||
|
||||
/* release the fence reg _after_ flushing */
|
||||
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
||||
i915_gem_clear_fence_reg(obj);
|
||||
|
||||
if (obj_priv->agp_mem != NULL) {
|
||||
drm_unbind_agp(obj_priv->agp_mem);
|
||||
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
|
||||
|
@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|||
bool retry_alloc = false;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->mm.suspended)
|
||||
return -EBUSY;
|
||||
|
||||
if (obj_priv->madv != I915_MADV_WILLNEED) {
|
||||
DRM_ERROR("Attempting to bind a purgeable object\n");
|
||||
return -EINVAL;
|
||||
|
@ -3198,7 +3196,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
|
|||
static int
|
||||
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_i915_gem_exec_object *entry,
|
||||
struct drm_i915_gem_exec_object2 *entry,
|
||||
struct drm_i915_gem_relocation_entry *relocs)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
@ -3206,12 +3204,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||
int i, ret;
|
||||
void __iomem *reloc_page;
|
||||
bool need_fence;
|
||||
|
||||
need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj_priv->tiling_mode != I915_TILING_NONE;
|
||||
|
||||
/* Check fence reg constraints and rebind if necessary */
|
||||
if (need_fence && !i915_obj_fenceable(dev, obj))
|
||||
i915_gem_object_unbind(obj);
|
||||
|
||||
/* Choose the GTT offset for our buffer and put it there. */
|
||||
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Pre-965 chips need a fence register set up in order to
|
||||
* properly handle blits to/from tiled surfaces.
|
||||
*/
|
||||
if (need_fence) {
|
||||
ret = i915_gem_object_get_fence_reg(obj);
|
||||
if (ret != 0) {
|
||||
if (ret != -EBUSY && ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Failure to install fence: %d\n",
|
||||
ret);
|
||||
i915_gem_object_unpin(obj);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
entry->offset = obj_priv->gtt_offset;
|
||||
|
||||
/* Apply the relocations, using the GTT aperture to avoid cache
|
||||
|
@ -3373,7 +3394,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|||
*/
|
||||
static int
|
||||
i915_dispatch_gem_execbuffer(struct drm_device *dev,
|
||||
struct drm_i915_gem_execbuffer *exec,
|
||||
struct drm_i915_gem_execbuffer2 *exec,
|
||||
struct drm_clip_rect *cliprects,
|
||||
uint64_t exec_offset)
|
||||
{
|
||||
|
@ -3463,7 +3484,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
|
||||
i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
|
||||
uint32_t buffer_count,
|
||||
struct drm_i915_gem_relocation_entry **relocs)
|
||||
{
|
||||
|
@ -3478,8 +3499,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
|
|||
}
|
||||
|
||||
*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
|
||||
if (*relocs == NULL)
|
||||
if (*relocs == NULL) {
|
||||
DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < buffer_count; i++) {
|
||||
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
||||
|
@ -3503,7 +3526,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
|
||||
i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
|
||||
uint32_t buffer_count,
|
||||
struct drm_i915_gem_relocation_entry *relocs)
|
||||
{
|
||||
|
@ -3536,7 +3559,7 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
|
||||
i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
|
||||
uint64_t exec_offset)
|
||||
{
|
||||
uint32_t exec_start, exec_len;
|
||||
|
@ -3589,18 +3612,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
|
|||
}
|
||||
|
||||
int
|
||||
i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_i915_gem_exec_object2 *exec_list)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_execbuffer *args = data;
|
||||
struct drm_i915_gem_exec_object *exec_list = NULL;
|
||||
struct drm_gem_object **object_list = NULL;
|
||||
struct drm_gem_object *batch_obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct drm_i915_gem_relocation_entry *relocs;
|
||||
int ret, ret2, i, pinned = 0;
|
||||
int ret = 0, ret2, i, pinned = 0;
|
||||
uint64_t exec_offset;
|
||||
uint32_t seqno, flush_domains, reloc_index;
|
||||
int pin_tries, flips;
|
||||
|
@ -3614,25 +3637,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Copy in the exec list from userland */
|
||||
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
|
||||
object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
|
||||
if (exec_list == NULL || object_list == NULL) {
|
||||
DRM_ERROR("Failed to allocate exec or object list "
|
||||
"for %d buffers\n",
|
||||
if (object_list == NULL) {
|
||||
DRM_ERROR("Failed to allocate object list for %d buffers\n",
|
||||
args->buffer_count);
|
||||
ret = -ENOMEM;
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
ret = copy_from_user(exec_list,
|
||||
(struct drm_i915_relocation_entry __user *)
|
||||
(uintptr_t) args->buffers_ptr,
|
||||
sizeof(*exec_list) * args->buffer_count);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("copy %d exec entries failed %d\n",
|
||||
args->buffer_count, ret);
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
|
||||
if (args->num_cliprects != 0) {
|
||||
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
|
||||
|
@ -3884,20 +3895,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!ret) {
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
||||
(uintptr_t) args->buffers_ptr,
|
||||
exec_list,
|
||||
sizeof(*exec_list) * args->buffer_count);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
DRM_ERROR("failed to copy %d exec entries "
|
||||
"back to user (%d)\n",
|
||||
args->buffer_count, ret);
|
||||
}
|
||||
}
|
||||
|
||||
/* Copy the updated relocations out regardless of current error
|
||||
* state. Failure to update the relocs would mean that the next
|
||||
* time userland calls execbuf, it would do so with presumed offset
|
||||
|
@ -3914,12 +3911,158 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
pre_mutex_err:
|
||||
drm_free_large(object_list);
|
||||
drm_free_large(exec_list);
|
||||
kfree(cliprects);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Legacy execbuffer just creates an exec2 list from the original exec object
|
||||
* list array and passes it to the real function.
|
||||
*/
|
||||
int
|
||||
i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_gem_execbuffer *args = data;
|
||||
struct drm_i915_gem_execbuffer2 exec2;
|
||||
struct drm_i915_gem_exec_object *exec_list = NULL;
|
||||
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
||||
int ret, i;
|
||||
|
||||
#if WATCH_EXEC
|
||||
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
|
||||
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
|
||||
#endif
|
||||
|
||||
if (args->buffer_count < 1) {
|
||||
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Copy in the exec list from userland */
|
||||
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
|
||||
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
||||
if (exec_list == NULL || exec2_list == NULL) {
|
||||
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
|
||||
args->buffer_count);
|
||||
drm_free_large(exec_list);
|
||||
drm_free_large(exec2_list);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = copy_from_user(exec_list,
|
||||
(struct drm_i915_relocation_entry __user *)
|
||||
(uintptr_t) args->buffers_ptr,
|
||||
sizeof(*exec_list) * args->buffer_count);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("copy %d exec entries failed %d\n",
|
||||
args->buffer_count, ret);
|
||||
drm_free_large(exec_list);
|
||||
drm_free_large(exec2_list);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
exec2_list[i].handle = exec_list[i].handle;
|
||||
exec2_list[i].relocation_count = exec_list[i].relocation_count;
|
||||
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
|
||||
exec2_list[i].alignment = exec_list[i].alignment;
|
||||
exec2_list[i].offset = exec_list[i].offset;
|
||||
if (!IS_I965G(dev))
|
||||
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
|
||||
else
|
||||
exec2_list[i].flags = 0;
|
||||
}
|
||||
|
||||
exec2.buffers_ptr = args->buffers_ptr;
|
||||
exec2.buffer_count = args->buffer_count;
|
||||
exec2.batch_start_offset = args->batch_start_offset;
|
||||
exec2.batch_len = args->batch_len;
|
||||
exec2.DR1 = args->DR1;
|
||||
exec2.DR4 = args->DR4;
|
||||
exec2.num_cliprects = args->num_cliprects;
|
||||
exec2.cliprects_ptr = args->cliprects_ptr;
|
||||
exec2.flags = 0;
|
||||
|
||||
ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
|
||||
if (!ret) {
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
for (i = 0; i < args->buffer_count; i++)
|
||||
exec_list[i].offset = exec2_list[i].offset;
|
||||
/* ... and back out to userspace */
|
||||
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
||||
(uintptr_t) args->buffers_ptr,
|
||||
exec_list,
|
||||
sizeof(*exec_list) * args->buffer_count);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
DRM_ERROR("failed to copy %d exec entries "
|
||||
"back to user (%d)\n",
|
||||
args->buffer_count, ret);
|
||||
}
|
||||
} else {
|
||||
DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
|
||||
}
|
||||
|
||||
drm_free_large(exec_list);
|
||||
drm_free_large(exec2_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_gem_execbuffer2 *args = data;
|
||||
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
||||
int ret;
|
||||
|
||||
#if WATCH_EXEC
|
||||
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
|
||||
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
|
||||
#endif
|
||||
|
||||
if (args->buffer_count < 1) {
|
||||
DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
||||
if (exec2_list == NULL) {
|
||||
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
|
||||
args->buffer_count);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = copy_from_user(exec2_list,
|
||||
(struct drm_i915_relocation_entry __user *)
|
||||
(uintptr_t) args->buffers_ptr,
|
||||
sizeof(*exec2_list) * args->buffer_count);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("copy %d exec entries failed %d\n",
|
||||
args->buffer_count, ret);
|
||||
drm_free_large(exec2_list);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
|
||||
if (!ret) {
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
||||
(uintptr_t) args->buffers_ptr,
|
||||
exec2_list,
|
||||
sizeof(*exec2_list) * args->buffer_count);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
DRM_ERROR("failed to copy %d exec entries "
|
||||
"back to user (%d)\n",
|
||||
args->buffer_count, ret);
|
||||
}
|
||||
}
|
||||
|
||||
drm_free_large(exec2_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
||||
{
|
||||
|
@ -3933,19 +4076,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Pre-965 chips need a fence register set up in order to
|
||||
* properly handle tiled surfaces.
|
||||
*/
|
||||
if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
|
||||
ret = i915_gem_object_get_fence_reg(obj);
|
||||
if (ret != 0) {
|
||||
if (ret != -EBUSY && ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Failure to install fence: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
obj_priv->pin_count++;
|
||||
|
||||
/* If the object is not active and not pending a flush,
|
||||
|
|
|
@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
|||
|
||||
|
||||
/**
|
||||
* Returns the size of the fence for a tiled object of the given size.
|
||||
* Returns whether an object is currently fenceable. If not, it may need
|
||||
* to be unbound and have its pitch adjusted.
|
||||
*/
|
||||
static int
|
||||
i915_get_fence_size(struct drm_device *dev, int size)
|
||||
bool
|
||||
i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
|
||||
{
|
||||
int i;
|
||||
int start;
|
||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||
|
||||
if (IS_I965G(dev)) {
|
||||
/* The 965 can have fences at any page boundary. */
|
||||
return ALIGN(size, 4096);
|
||||
if (obj->size & 4095)
|
||||
return false;
|
||||
return true;
|
||||
} else if (IS_I9XX(dev)) {
|
||||
if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
|
||||
return false;
|
||||
} else {
|
||||
/* Align the size to a power of two greater than the smallest
|
||||
* fence size.
|
||||
*/
|
||||
if (IS_I9XX(dev))
|
||||
start = 1024 * 1024;
|
||||
else
|
||||
start = 512 * 1024;
|
||||
|
||||
for (i = start; i < size; i <<= 1)
|
||||
;
|
||||
|
||||
return i;
|
||||
if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Power of two sized... */
|
||||
if (obj->size & (obj->size - 1))
|
||||
return false;
|
||||
|
||||
/* Objects must be size aligned as well */
|
||||
if (obj_priv->gtt_offset & (obj->size - 1))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Check pitch constriants for all chips & tiling formats */
|
||||
static bool
|
||||
bool
|
||||
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
||||
{
|
||||
int tile_width;
|
||||
|
@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
|||
if (stride & (stride - 1))
|
||||
return false;
|
||||
|
||||
/* We don't 0handle the aperture area covered by the fence being bigger
|
||||
* than the object size.
|
||||
*/
|
||||
if (i915_get_fence_size(dev, size) != size)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -313,6 +313,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
|
|||
dev_priv->mm.irq_gem_seqno = seqno;
|
||||
trace_i915_gem_request_complete(dev, seqno);
|
||||
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||
dev_priv->hangcheck_count = 0;
|
||||
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
|
||||
}
|
||||
|
||||
if (de_iir & DE_GSE)
|
||||
|
@ -1084,6 +1086,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
|
|||
(void) I915_READ(IER);
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called after intel_modeset_init or hotplug interrupts won't be
|
||||
* enabled correctly.
|
||||
*/
|
||||
int i915_driver_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
@ -1106,19 +1112,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
|
|||
if (I915_HAS_HOTPLUG(dev)) {
|
||||
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
|
||||
|
||||
/* Leave other bits alone */
|
||||
hotplug_en |= HOTPLUG_EN_MASK;
|
||||
/* Note HDMI and DP share bits */
|
||||
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= HDMID_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
|
||||
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
|
||||
hotplug_en |= CRT_HOTPLUG_INT_EN;
|
||||
/* Ignore TV since it's buggy */
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
|
||||
|
||||
dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
|
||||
TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
|
||||
SDVOB_HOTPLUG_INT_STATUS;
|
||||
if (IS_G4X(dev)) {
|
||||
dev_priv->hotplug_supported_mask |=
|
||||
HDMIB_HOTPLUG_INT_STATUS |
|
||||
HDMIC_HOTPLUG_INT_STATUS |
|
||||
HDMID_HOTPLUG_INT_STATUS;
|
||||
}
|
||||
/* Enable in IER... */
|
||||
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
|
||||
/* and unmask in IMR */
|
||||
|
|
|
@ -879,13 +879,6 @@
|
|||
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
|
||||
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
|
||||
#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
|
||||
#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
|
||||
HDMIC_HOTPLUG_INT_EN | \
|
||||
HDMID_HOTPLUG_INT_EN | \
|
||||
SDVOB_HOTPLUG_INT_EN | \
|
||||
SDVOC_HOTPLUG_INT_EN | \
|
||||
CRT_HOTPLUG_INT_EN)
|
||||
|
||||
|
||||
#define PORT_HOTPLUG_STAT 0x61114
|
||||
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
|
||||
|
@ -982,6 +975,8 @@
|
|||
#define LVDS_PORT_EN (1 << 31)
|
||||
/* Selects pipe B for LVDS data. Must be set on pre-965. */
|
||||
#define LVDS_PIPEB_SELECT (1 << 30)
|
||||
/* LVDS dithering flag on 965/g4x platform */
|
||||
#define LVDS_ENABLE_DITHER (1 << 25)
|
||||
/* Enable border for unscaled (or aspect-scaled) display */
|
||||
#define LVDS_BORDER_ENABLE (1 << 15)
|
||||
/*
|
||||
|
@ -1751,6 +1746,8 @@
|
|||
|
||||
/* Display & cursor control */
|
||||
|
||||
/* dithering flag on Ironlake */
|
||||
#define PIPE_ENABLE_DITHER (1 << 4)
|
||||
/* Pipe A */
|
||||
#define PIPEADSL 0x70000
|
||||
#define PIPEACONF 0x70008
|
||||
|
|
|
@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev)
|
|||
|
||||
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
|
||||
|
||||
/* Render Standby */
|
||||
if (I915_HAS_RC6(dev)) {
|
||||
dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
|
||||
dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
|
||||
}
|
||||
|
||||
/* Hardware status page */
|
||||
dev_priv->saveHWS = I915_READ(HWS_PGA);
|
||||
|
||||
|
@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev)
|
|||
|
||||
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
|
||||
|
||||
/* Render Standby */
|
||||
if (I915_HAS_RC6(dev)) {
|
||||
I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
|
||||
I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
|
||||
}
|
||||
|
||||
/* Hardware status page */
|
||||
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
|
||||
|
||||
|
|
|
@ -548,4 +548,6 @@ void intel_crt_init(struct drm_device *dev)
|
|||
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
|
||||
|
||||
drm_sysfs_connector_add(connector);
|
||||
|
||||
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
|
||||
}
|
||||
|
|
|
@ -262,6 +262,14 @@ struct intel_limit {
|
|||
#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
|
||||
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
|
||||
|
||||
#define IRONLAKE_P_DISPLAY_PORT_MIN 10
|
||||
#define IRONLAKE_P_DISPLAY_PORT_MAX 20
|
||||
#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
|
||||
#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
|
||||
#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
|
||||
#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
|
||||
#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
|
||||
|
||||
static bool
|
||||
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
|
||||
int target, int refclk, intel_clock_t *best_clock);
|
||||
|
@ -271,9 +279,6 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
|
|||
static bool
|
||||
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
|
||||
int target, int refclk, intel_clock_t *best_clock);
|
||||
static bool
|
||||
intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
|
||||
int target, int refclk, intel_clock_t *best_clock);
|
||||
|
||||
static bool
|
||||
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
|
||||
|
@ -496,7 +501,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = {
|
|||
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
|
||||
.p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
|
||||
.p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
|
||||
.find_pll = intel_ironlake_find_best_PLL,
|
||||
.find_pll = intel_g4x_find_best_PLL,
|
||||
};
|
||||
|
||||
static const intel_limit_t intel_limits_ironlake_lvds = {
|
||||
|
@ -511,7 +516,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = {
|
|||
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
|
||||
.p2_slow = IRONLAKE_P2_LVDS_SLOW,
|
||||
.p2_fast = IRONLAKE_P2_LVDS_FAST },
|
||||
.find_pll = intel_ironlake_find_best_PLL,
|
||||
.find_pll = intel_g4x_find_best_PLL,
|
||||
};
|
||||
|
||||
static const intel_limit_t intel_limits_ironlake_display_port = {
|
||||
.dot = { .min = IRONLAKE_DOT_MIN,
|
||||
.max = IRONLAKE_DOT_MAX },
|
||||
.vco = { .min = IRONLAKE_VCO_MIN,
|
||||
.max = IRONLAKE_VCO_MAX},
|
||||
.n = { .min = IRONLAKE_N_MIN,
|
||||
.max = IRONLAKE_N_MAX },
|
||||
.m = { .min = IRONLAKE_M_MIN,
|
||||
.max = IRONLAKE_M_MAX },
|
||||
.m1 = { .min = IRONLAKE_M1_MIN,
|
||||
.max = IRONLAKE_M1_MAX },
|
||||
.m2 = { .min = IRONLAKE_M2_MIN,
|
||||
.max = IRONLAKE_M2_MAX },
|
||||
.p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
|
||||
.max = IRONLAKE_P_DISPLAY_PORT_MAX },
|
||||
.p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
|
||||
.max = IRONLAKE_P1_DISPLAY_PORT_MAX},
|
||||
.p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
|
||||
.p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
|
||||
.p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
|
||||
.find_pll = intel_find_pll_ironlake_dp,
|
||||
};
|
||||
|
||||
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
|
||||
|
@ -519,6 +547,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
|
|||
const intel_limit_t *limit;
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
|
||||
limit = &intel_limits_ironlake_lvds;
|
||||
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
|
||||
HAS_eDP)
|
||||
limit = &intel_limits_ironlake_display_port;
|
||||
else
|
||||
limit = &intel_limits_ironlake_sdvo;
|
||||
|
||||
|
@ -791,7 +822,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
|
|||
found = false;
|
||||
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
|
||||
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
|
||||
int lvds_reg;
|
||||
|
||||
if (IS_IRONLAKE(dev))
|
||||
lvds_reg = PCH_LVDS;
|
||||
else
|
||||
lvds_reg = LVDS;
|
||||
if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
|
||||
LVDS_CLKB_POWER_UP)
|
||||
clock.p2 = limit->p2.p2_fast;
|
||||
else
|
||||
|
@ -839,6 +876,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
|
|||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
intel_clock_t clock;
|
||||
|
||||
/* return directly when it is eDP */
|
||||
if (HAS_eDP)
|
||||
return true;
|
||||
|
||||
if (target < 200000) {
|
||||
clock.n = 1;
|
||||
clock.p1 = 2;
|
||||
|
@ -857,68 +899,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
|
||||
int target, int refclk, intel_clock_t *best_clock)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
intel_clock_t clock;
|
||||
int err_most = 47;
|
||||
int err_min = 10000;
|
||||
|
||||
/* eDP has only 2 clock choice, no n/m/p setting */
|
||||
if (HAS_eDP)
|
||||
return true;
|
||||
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
return intel_find_pll_ironlake_dp(limit, crtc, target,
|
||||
refclk, best_clock);
|
||||
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
|
||||
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
|
||||
LVDS_CLKB_POWER_UP)
|
||||
clock.p2 = limit->p2.p2_fast;
|
||||
else
|
||||
clock.p2 = limit->p2.p2_slow;
|
||||
} else {
|
||||
if (target < limit->p2.dot_limit)
|
||||
clock.p2 = limit->p2.p2_slow;
|
||||
else
|
||||
clock.p2 = limit->p2.p2_fast;
|
||||
}
|
||||
|
||||
memset(best_clock, 0, sizeof(*best_clock));
|
||||
for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
|
||||
/* based on hardware requriment prefer smaller n to precision */
|
||||
for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
|
||||
/* based on hardware requirment prefere larger m1,m2 */
|
||||
for (clock.m1 = limit->m1.max;
|
||||
clock.m1 >= limit->m1.min; clock.m1--) {
|
||||
for (clock.m2 = limit->m2.max;
|
||||
clock.m2 >= limit->m2.min; clock.m2--) {
|
||||
int this_err;
|
||||
|
||||
intel_clock(dev, refclk, &clock);
|
||||
if (!intel_PLL_is_valid(crtc, &clock))
|
||||
continue;
|
||||
this_err = abs((10000 - (target*10000/clock.dot)));
|
||||
if (this_err < err_most) {
|
||||
*best_clock = clock;
|
||||
/* found on first matching */
|
||||
goto out;
|
||||
} else if (this_err < err_min) {
|
||||
*best_clock = clock;
|
||||
err_min = this_err;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
return true;
|
||||
}
|
||||
|
||||
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
|
||||
static bool
|
||||
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
|
||||
|
@ -1493,6 +1473,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
|
||||
u32 temp;
|
||||
int tries = 5, j, n;
|
||||
u32 pipe_bpc;
|
||||
|
||||
temp = I915_READ(pipeconf_reg);
|
||||
pipe_bpc = temp & PIPE_BPC_MASK;
|
||||
|
||||
/* XXX: When our outputs are all unaware of DPMS modes other than off
|
||||
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
|
||||
|
@ -1524,6 +1508,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
|
||||
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
|
||||
temp = I915_READ(fdi_rx_reg);
|
||||
/*
|
||||
* make the BPC in FDI Rx be consistent with that in
|
||||
* pipeconf reg.
|
||||
*/
|
||||
temp &= ~(0x7 << 16);
|
||||
temp |= (pipe_bpc << 11);
|
||||
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
|
||||
FDI_SEL_PCDCLK |
|
||||
FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
|
||||
|
@ -1666,6 +1656,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
|
||||
/* enable PCH transcoder */
|
||||
temp = I915_READ(transconf_reg);
|
||||
/*
|
||||
* make the BPC in transcoder be consistent with
|
||||
* that in pipeconf reg.
|
||||
*/
|
||||
temp &= ~PIPE_BPC_MASK;
|
||||
temp |= pipe_bpc;
|
||||
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
|
||||
I915_READ(transconf_reg);
|
||||
|
||||
|
@ -1745,6 +1741,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
I915_READ(fdi_tx_reg);
|
||||
|
||||
temp = I915_READ(fdi_rx_reg);
|
||||
/* BPC in FDI rx is consistent with that in pipeconf */
|
||||
temp &= ~(0x07 << 16);
|
||||
temp |= (pipe_bpc << 11);
|
||||
I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
|
||||
I915_READ(fdi_rx_reg);
|
||||
|
||||
|
@ -1789,7 +1788,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
temp = I915_READ(transconf_reg);
|
||||
/* BPC in transcoder is consistent with that in pipeconf */
|
||||
temp &= ~PIPE_BPC_MASK;
|
||||
temp |= pipe_bpc;
|
||||
I915_WRITE(transconf_reg, temp);
|
||||
I915_READ(transconf_reg);
|
||||
udelay(100);
|
||||
|
||||
/* disable PCH DPLL */
|
||||
|
@ -2448,7 +2452,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
|
|||
* A value of 5us seems to be a good balance; safe for very low end
|
||||
* platforms but not overly aggressive on lower latency configs.
|
||||
*/
|
||||
const static int latency_ns = 5000;
|
||||
static const int latency_ns = 5000;
|
||||
|
||||
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
|
||||
{
|
||||
|
@ -2559,7 +2563,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
|
|||
/* Calc sr entries for one plane configs */
|
||||
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
|
||||
/* self-refresh has much higher latency */
|
||||
const static int sr_latency_ns = 12000;
|
||||
static const int sr_latency_ns = 12000;
|
||||
|
||||
sr_clock = planea_clock ? planea_clock : planeb_clock;
|
||||
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
|
||||
|
@ -2598,7 +2602,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
|
|||
/* Calc sr entries for one plane configs */
|
||||
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
|
||||
/* self-refresh has much higher latency */
|
||||
const static int sr_latency_ns = 12000;
|
||||
static const int sr_latency_ns = 12000;
|
||||
|
||||
sr_clock = planea_clock ? planea_clock : planeb_clock;
|
||||
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
|
||||
|
@ -2667,7 +2671,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
|
|||
if (HAS_FW_BLC(dev) && sr_hdisplay &&
|
||||
(!planea_clock || !planeb_clock)) {
|
||||
/* self-refresh has much higher latency */
|
||||
const static int sr_latency_ns = 6000;
|
||||
static const int sr_latency_ns = 6000;
|
||||
|
||||
sr_clock = planea_clock ? planea_clock : planeb_clock;
|
||||
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
|
||||
|
@ -2969,6 +2973,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
|
||||
/* determine panel color depth */
|
||||
temp = I915_READ(pipeconf_reg);
|
||||
temp &= ~PIPE_BPC_MASK;
|
||||
if (is_lvds) {
|
||||
int lvds_reg = I915_READ(PCH_LVDS);
|
||||
/* the BPC will be 6 if it is 18-bit LVDS panel */
|
||||
if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
|
||||
temp |= PIPE_8BPC;
|
||||
else
|
||||
temp |= PIPE_6BPC;
|
||||
} else
|
||||
temp |= PIPE_8BPC;
|
||||
I915_WRITE(pipeconf_reg, temp);
|
||||
I915_READ(pipeconf_reg);
|
||||
|
||||
switch (temp & PIPE_BPC_MASK) {
|
||||
case PIPE_8BPC:
|
||||
|
@ -3195,7 +3211,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
* appropriately here, but we need to look more thoroughly into how
|
||||
* panels behave in the two modes.
|
||||
*/
|
||||
|
||||
/* set the dithering flag */
|
||||
if (IS_I965G(dev)) {
|
||||
if (dev_priv->lvds_dither) {
|
||||
if (IS_IRONLAKE(dev))
|
||||
pipeconf |= PIPE_ENABLE_DITHER;
|
||||
else
|
||||
lvds |= LVDS_ENABLE_DITHER;
|
||||
} else {
|
||||
if (IS_IRONLAKE(dev))
|
||||
pipeconf &= ~PIPE_ENABLE_DITHER;
|
||||
else
|
||||
lvds &= ~LVDS_ENABLE_DITHER;
|
||||
}
|
||||
}
|
||||
I915_WRITE(lvds_reg, lvds);
|
||||
I915_READ(lvds_reg);
|
||||
}
|
||||
|
@ -3385,7 +3414,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
|
||||
/* we only need to pin inside GTT if cursor is non-phy */
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (!dev_priv->cursor_needs_physical) {
|
||||
if (!dev_priv->info->cursor_needs_physical) {
|
||||
ret = i915_gem_object_pin(bo, PAGE_SIZE);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin cursor bo\n");
|
||||
|
@ -3420,7 +3449,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
I915_WRITE(base, addr);
|
||||
|
||||
if (intel_crtc->cursor_bo) {
|
||||
if (dev_priv->cursor_needs_physical) {
|
||||
if (dev_priv->info->cursor_needs_physical) {
|
||||
if (intel_crtc->cursor_bo != bo)
|
||||
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
|
||||
} else
|
||||
|
@ -3779,125 +3808,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
|
|||
queue_work(dev_priv->wq, &dev_priv->idle_work);
|
||||
}
|
||||
|
||||
void intel_increase_renderclock(struct drm_device *dev, bool schedule)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_IRONLAKE(dev))
|
||||
return;
|
||||
|
||||
if (!dev_priv->render_reclock_avail) {
|
||||
DRM_DEBUG_DRIVER("not reclocking render clock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Restore render clock frequency to original value */
|
||||
if (IS_G4X(dev) || IS_I9XX(dev))
|
||||
pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
|
||||
else if (IS_I85X(dev))
|
||||
pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
|
||||
DRM_DEBUG_DRIVER("increasing render clock frequency\n");
|
||||
|
||||
/* Schedule downclock */
|
||||
if (schedule)
|
||||
mod_timer(&dev_priv->idle_timer, jiffies +
|
||||
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
|
||||
}
|
||||
|
||||
void intel_decrease_renderclock(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_IRONLAKE(dev))
|
||||
return;
|
||||
|
||||
if (!dev_priv->render_reclock_avail) {
|
||||
DRM_DEBUG_DRIVER("not reclocking render clock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_G4X(dev)) {
|
||||
u16 gcfgc;
|
||||
|
||||
/* Adjust render clock... */
|
||||
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
|
||||
|
||||
/* Down to minimum... */
|
||||
gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
|
||||
gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
|
||||
|
||||
pci_write_config_word(dev->pdev, GCFGC, gcfgc);
|
||||
} else if (IS_I965G(dev)) {
|
||||
u16 gcfgc;
|
||||
|
||||
/* Adjust render clock... */
|
||||
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
|
||||
|
||||
/* Down to minimum... */
|
||||
gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
|
||||
gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
|
||||
|
||||
pci_write_config_word(dev->pdev, GCFGC, gcfgc);
|
||||
} else if (IS_I945G(dev) || IS_I945GM(dev)) {
|
||||
u16 gcfgc;
|
||||
|
||||
/* Adjust render clock... */
|
||||
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
|
||||
|
||||
/* Down to minimum... */
|
||||
gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
|
||||
gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
|
||||
|
||||
pci_write_config_word(dev->pdev, GCFGC, gcfgc);
|
||||
} else if (IS_I915G(dev)) {
|
||||
u16 gcfgc;
|
||||
|
||||
/* Adjust render clock... */
|
||||
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
|
||||
|
||||
/* Down to minimum... */
|
||||
gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
|
||||
gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
|
||||
|
||||
pci_write_config_word(dev->pdev, GCFGC, gcfgc);
|
||||
} else if (IS_I85X(dev)) {
|
||||
u16 hpllcc;
|
||||
|
||||
/* Adjust render clock... */
|
||||
pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
|
||||
|
||||
/* Up to maximum... */
|
||||
hpllcc &= ~GC_CLOCK_CONTROL_MASK;
|
||||
hpllcc |= GC_CLOCK_133_200;
|
||||
|
||||
pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
|
||||
}
|
||||
DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
|
||||
}
|
||||
|
||||
/* Note that no increase function is needed for this - increase_renderclock()
|
||||
* will also rewrite these bits
|
||||
*/
|
||||
void intel_decrease_displayclock(struct drm_device *dev)
|
||||
{
|
||||
if (IS_IRONLAKE(dev))
|
||||
return;
|
||||
|
||||
if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
|
||||
IS_I915GM(dev)) {
|
||||
u16 gcfgc;
|
||||
|
||||
/* Adjust render clock... */
|
||||
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
|
||||
|
||||
/* Down to minimum... */
|
||||
gcfgc &= ~0xf0;
|
||||
gcfgc |= 0x80;
|
||||
|
||||
pci_write_config_word(dev->pdev, GCFGC, gcfgc);
|
||||
}
|
||||
}
|
||||
|
||||
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
|
||||
|
||||
static void intel_crtc_idle_timer(unsigned long arg)
|
||||
|
@ -4011,12 +3921,6 @@ static void intel_idle_update(struct work_struct *work)
|
|||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* GPU isn't processing, downclock it. */
|
||||
if (!dev_priv->busy) {
|
||||
intel_decrease_renderclock(dev);
|
||||
intel_decrease_displayclock(dev);
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
/* Skip inactive CRTCs */
|
||||
if (!crtc->fb)
|
||||
|
@ -4050,13 +3954,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
|
|||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
if (!dev_priv->busy) {
|
||||
if (!dev_priv->busy)
|
||||
dev_priv->busy = true;
|
||||
intel_increase_renderclock(dev, true);
|
||||
} else {
|
||||
else
|
||||
mod_timer(&dev_priv->idle_timer, jiffies +
|
||||
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (!crtc->fb)
|
||||
|
@ -4400,29 +4302,43 @@ static void intel_setup_outputs(struct drm_device *dev)
|
|||
bool found = false;
|
||||
|
||||
if (I915_READ(SDVOB) & SDVO_DETECTED) {
|
||||
DRM_DEBUG_KMS("probing SDVOB\n");
|
||||
found = intel_sdvo_init(dev, SDVOB);
|
||||
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
|
||||
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
|
||||
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
|
||||
intel_hdmi_init(dev, SDVOB);
|
||||
}
|
||||
|
||||
if (!found && SUPPORTS_INTEGRATED_DP(dev))
|
||||
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
|
||||
DRM_DEBUG_KMS("probing DP_B\n");
|
||||
intel_dp_init(dev, DP_B);
|
||||
}
|
||||
}
|
||||
|
||||
/* Before G4X SDVOC doesn't have its own detect register */
|
||||
|
||||
if (I915_READ(SDVOB) & SDVO_DETECTED)
|
||||
if (I915_READ(SDVOB) & SDVO_DETECTED) {
|
||||
DRM_DEBUG_KMS("probing SDVOC\n");
|
||||
found = intel_sdvo_init(dev, SDVOC);
|
||||
}
|
||||
|
||||
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
|
||||
|
||||
if (SUPPORTS_INTEGRATED_HDMI(dev))
|
||||
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
|
||||
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
|
||||
intel_hdmi_init(dev, SDVOC);
|
||||
if (SUPPORTS_INTEGRATED_DP(dev))
|
||||
}
|
||||
if (SUPPORTS_INTEGRATED_DP(dev)) {
|
||||
DRM_DEBUG_KMS("probing DP_C\n");
|
||||
intel_dp_init(dev, DP_C);
|
||||
}
|
||||
}
|
||||
|
||||
if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
|
||||
if (SUPPORTS_INTEGRATED_DP(dev) &&
|
||||
(I915_READ(DP_D) & DP_DETECTED)) {
|
||||
DRM_DEBUG_KMS("probing DP_D\n");
|
||||
intel_dp_init(dev, DP_D);
|
||||
}
|
||||
} else if (IS_I8XX(dev))
|
||||
intel_dvo_init(dev);
|
||||
|
||||
|
@ -4527,6 +4443,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
|
|||
.fb_changed = intelfb_probe,
|
||||
};
|
||||
|
||||
static struct drm_gem_object *
|
||||
intel_alloc_power_context(struct drm_device *dev)
|
||||
{
|
||||
struct drm_gem_object *pwrctx;
|
||||
int ret;
|
||||
|
||||
pwrctx = drm_gem_object_alloc(dev, 4096);
|
||||
if (!pwrctx) {
|
||||
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = i915_gem_object_pin(pwrctx, 4096);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin power context: %d\n", ret);
|
||||
goto err_unref;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
|
||||
goto err_unpin;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return pwrctx;
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_unpin(pwrctx);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(pwrctx);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void intel_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -4579,42 +4531,27 @@ void intel_init_clock_gating(struct drm_device *dev)
|
|||
* GPU can automatically power down the render unit if given a page
|
||||
* to save state.
|
||||
*/
|
||||
if (I915_HAS_RC6(dev)) {
|
||||
struct drm_gem_object *pwrctx;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
int ret;
|
||||
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
struct drm_i915_gem_object *obj_priv = NULL;
|
||||
|
||||
if (dev_priv->pwrctx) {
|
||||
obj_priv = dev_priv->pwrctx->driver_private;
|
||||
} else {
|
||||
pwrctx = drm_gem_object_alloc(dev, 4096);
|
||||
if (!pwrctx) {
|
||||
DRM_DEBUG("failed to alloc power context, "
|
||||
"RC6 disabled\n");
|
||||
goto out;
|
||||
struct drm_gem_object *pwrctx;
|
||||
|
||||
pwrctx = intel_alloc_power_context(dev);
|
||||
if (pwrctx) {
|
||||
dev_priv->pwrctx = pwrctx;
|
||||
obj_priv = pwrctx->driver_private;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_pin(pwrctx, 4096);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin power context: %d\n",
|
||||
ret);
|
||||
drm_gem_object_unreference(pwrctx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
i915_gem_object_set_to_gtt_domain(pwrctx, 1);
|
||||
|
||||
dev_priv->pwrctx = pwrctx;
|
||||
obj_priv = pwrctx->driver_private;
|
||||
}
|
||||
|
||||
I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
|
||||
I915_WRITE(MCHBAR_RENDER_STANDBY,
|
||||
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
|
||||
if (obj_priv) {
|
||||
I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
|
||||
I915_WRITE(MCHBAR_RENDER_STANDBY,
|
||||
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
/* Set up chip specific display functions */
|
||||
|
@ -4770,7 +4707,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
del_timer_sync(&intel_crtc->idle_timer);
|
||||
}
|
||||
|
||||
intel_increase_renderclock(dev, false);
|
||||
del_timer_sync(&dev_priv->idle_timer);
|
||||
|
||||
if (dev_priv->display.disable_fbc)
|
||||
|
|
|
@ -1402,14 +1402,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
break;
|
||||
case DP_B:
|
||||
case PCH_DP_B:
|
||||
dev_priv->hotplug_supported_mask |=
|
||||
HDMIB_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-B";
|
||||
break;
|
||||
case DP_C:
|
||||
case PCH_DP_C:
|
||||
dev_priv->hotplug_supported_mask |=
|
||||
HDMIC_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-C";
|
||||
break;
|
||||
case DP_D:
|
||||
case PCH_DP_D:
|
||||
dev_priv->hotplug_supported_mask |=
|
||||
HDMID_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-D";
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -303,21 +303,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
|
|||
if (sdvox_reg == SDVOB) {
|
||||
intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
|
||||
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
|
||||
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == SDVOC) {
|
||||
intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
|
||||
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
|
||||
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == HDMIB) {
|
||||
intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
|
||||
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
|
||||
"HDMIB");
|
||||
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == HDMIC) {
|
||||
intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
|
||||
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
|
||||
"HDMIC");
|
||||
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == HDMID) {
|
||||
intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
|
||||
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
|
||||
"HDMID");
|
||||
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
|
||||
}
|
||||
if (!intel_output->ddc_bus)
|
||||
goto err_connector;
|
||||
|
|
|
@ -608,6 +608,13 @@ static const struct dmi_system_id bad_lid_status[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "PC-81005",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -679,7 +686,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
|||
struct drm_i915_private *dev_priv =
|
||||
container_of(nb, struct drm_i915_private, lid_notifier);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_connector *connector = dev_priv->int_lvds_connector;
|
||||
|
||||
/*
|
||||
* check and update the status of LVDS connector after receiving
|
||||
* the LID nofication event.
|
||||
*/
|
||||
if (connector)
|
||||
connector->status = connector->funcs->detect(connector);
|
||||
if (!acpi_lid_open()) {
|
||||
dev_priv->modeset_on_lid = 1;
|
||||
return NOTIFY_OK;
|
||||
|
@ -854,65 +868,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
|
|||
{ } /* terminating entry */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
* check_lid_device -- check whether @handle is an ACPI LID device.
|
||||
* @handle: ACPI device handle
|
||||
* @level : depth in the ACPI namespace tree
|
||||
* @context: the number of LID device when we find the device
|
||||
* @rv: a return value to fill if desired (Not use)
|
||||
*/
|
||||
static acpi_status
|
||||
check_lid_device(acpi_handle handle, u32 level, void *context,
|
||||
void **return_value)
|
||||
{
|
||||
struct acpi_device *acpi_dev;
|
||||
int *lid_present = context;
|
||||
|
||||
acpi_dev = NULL;
|
||||
/* Get the acpi device for device handle */
|
||||
if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
|
||||
/* If there is no ACPI device for handle, return */
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
|
||||
*lid_present = 1;
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* check whether there exists the ACPI LID device by enumerating the ACPI
|
||||
* device tree.
|
||||
*/
|
||||
static int intel_lid_present(void)
|
||||
{
|
||||
int lid_present = 0;
|
||||
|
||||
if (acpi_disabled) {
|
||||
/* If ACPI is disabled, there is no ACPI device tree to
|
||||
* check, so assume the LID device would have been present.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
check_lid_device, NULL, &lid_present, NULL);
|
||||
|
||||
return lid_present;
|
||||
}
|
||||
#else
|
||||
static int intel_lid_present(void)
|
||||
{
|
||||
/* In the absence of ACPI built in, assume that the LID device would
|
||||
* have been present.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
|
||||
* @dev: drm device
|
||||
|
@ -1031,12 +986,8 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
if (dmi_check_system(intel_no_lvds))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Assume LVDS is present if there's an ACPI lid device or if the
|
||||
* device is present in the VBT.
|
||||
*/
|
||||
if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
|
||||
if (!lvds_is_present_in_vbt(dev)) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1180,6 +1131,8 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
DRM_DEBUG_KMS("lid notifier registration failed\n");
|
||||
dev_priv->lid_notifier.notifier_call = NULL;
|
||||
}
|
||||
/* keep the LVDS connector */
|
||||
dev_priv->int_lvds_connector = connector;
|
||||
drm_sysfs_connector_add(connector);
|
||||
return;
|
||||
|
||||
|
|
|
@ -2662,6 +2662,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
|
|||
|
||||
bool intel_sdvo_init(struct drm_device *dev, int output_device)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_connector *connector;
|
||||
struct intel_output *intel_output;
|
||||
struct intel_sdvo_priv *sdvo_priv;
|
||||
|
@ -2708,10 +2709,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
|
|||
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
|
||||
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
|
||||
"SDVOB/VGA DDC BUS");
|
||||
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
|
||||
} else {
|
||||
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
|
||||
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
|
||||
"SDVOC/VGA DDC BUS");
|
||||
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
|
||||
}
|
||||
|
||||
if (intel_output->ddc_bus == NULL)
|
||||
|
|
|
@ -1840,6 +1840,8 @@ intel_tv_init(struct drm_device *dev)
|
|||
drm_connector_attach_property(connector,
|
||||
dev->mode_config.tv_bottom_margin_property,
|
||||
tv_priv->margin[TV_MARGIN_BOTTOM]);
|
||||
|
||||
dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
|
||||
out:
|
||||
drm_sysfs_connector_add(connector);
|
||||
}
|
||||
|
|
|
@ -188,6 +188,7 @@ typedef struct _drm_i915_sarea {
|
|||
#define DRM_I915_GEM_MADVISE 0x26
|
||||
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
|
||||
#define DRM_I915_OVERLAY_ATTRS 0x28
|
||||
#define DRM_I915_GEM_EXECBUFFER2 0x29
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
|
@ -207,6 +208,7 @@ typedef struct _drm_i915_sarea {
|
|||
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
|
||||
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
|
||||
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
|
||||
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
|
||||
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
|
||||
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
|
||||
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
|
||||
|
@ -272,6 +274,7 @@ typedef struct drm_i915_irq_wait {
|
|||
#define I915_PARAM_NUM_FENCES_AVAIL 6
|
||||
#define I915_PARAM_HAS_OVERLAY 7
|
||||
#define I915_PARAM_HAS_PAGEFLIPPING 8
|
||||
#define I915_PARAM_HAS_EXECBUF2 9
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
int param;
|
||||
|
@ -567,6 +570,57 @@ struct drm_i915_gem_execbuffer {
|
|||
__u64 cliprects_ptr;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_exec_object2 {
|
||||
/**
|
||||
* User's handle for a buffer to be bound into the GTT for this
|
||||
* operation.
|
||||
*/
|
||||
__u32 handle;
|
||||
|
||||
/** Number of relocations to be performed on this buffer */
|
||||
__u32 relocation_count;
|
||||
/**
|
||||
* Pointer to array of struct drm_i915_gem_relocation_entry containing
|
||||
* the relocations to be performed in this buffer.
|
||||
*/
|
||||
__u64 relocs_ptr;
|
||||
|
||||
/** Required alignment in graphics aperture */
|
||||
__u64 alignment;
|
||||
|
||||
/**
|
||||
* Returned value of the updated offset of the object, for future
|
||||
* presumed_offset writes.
|
||||
*/
|
||||
__u64 offset;
|
||||
|
||||
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
|
||||
__u64 flags;
|
||||
__u64 rsvd1;
|
||||
__u64 rsvd2;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_execbuffer2 {
|
||||
/**
|
||||
* List of gem_exec_object2 structs
|
||||
*/
|
||||
__u64 buffers_ptr;
|
||||
__u32 buffer_count;
|
||||
|
||||
/** Offset in the batchbuffer to start execution from. */
|
||||
__u32 batch_start_offset;
|
||||
/** Bytes used in batchbuffer from batch_start_offset */
|
||||
__u32 batch_len;
|
||||
__u32 DR1;
|
||||
__u32 DR4;
|
||||
__u32 num_cliprects;
|
||||
/** This is a struct drm_clip_rect *cliprects */
|
||||
__u64 cliprects_ptr;
|
||||
__u64 flags; /* currently unused */
|
||||
__u64 rsvd1;
|
||||
__u64 rsvd2;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_pin {
|
||||
/** Handle of the buffer to be pinned. */
|
||||
__u32 handle;
|
||||
|
|
Loading…
Reference in New Issue