mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (39 commits) drm/ttm: Be consistent on ttm_bo_init() failures drm/radeon/kms: Fix retrying ttm_bo_init() after it failed once. drm/radeon/kms: fix thermal sensor reporting on rv6xx drm/radeon/kms: fix bugs in ddc and cd path router code drm/radeon/kms: add support for clock/data path routers drm: vmwgfx: fix information leak to userland drivers/gpu: Use vzalloc drm/vmwgfx: Fix oops on failing bo pin drm/ttm: Remove the CAP_SYS_ADMIN requirement for bo pinning drm/ttm: Make sure a sync object doesn't disappear while we use it drm/radeon/kms: don't disable shared encoders on pre-DCE3 display blocks drivers/gpu/drm: Update WARN uses drivers/gpu/drm/vmwgfx: Fix k.alloc switched arguments DRM: ignore invalid EDID extensions drm/radeon/kms: make the connector code less verbose drm/ttm: remove failed ttm binding error printout drm/ttm: Add a barrier when unreserving drm/ttm: Remove mm init error printouts and checks drm/ttm: Remove pointless list_empty check drm/ttm: Use private locks for the default bo range manager ...
This commit is contained in:
commit
99efb9369c
|
@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
|
|||
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
|
||||
u32 pte_flags;
|
||||
|
||||
if (type_mask == AGP_USER_UNCACHED_MEMORY)
|
||||
if (type_mask == AGP_USER_MEMORY)
|
||||
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
|
||||
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
|
||||
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
|
||||
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
|
||||
if (gfdt)
|
||||
pte_flags |= GEN6_PTE_GFDT;
|
||||
} else { /* set 'normal'/'cached' to LLC by default */
|
||||
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
|
||||
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
|
||||
if (gfdt)
|
||||
pte_flags |= GEN6_PTE_GFDT;
|
||||
}
|
||||
|
|
|
@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
|
|||
struct drm_crtc *tmp;
|
||||
int crtc_mask = 1;
|
||||
|
||||
WARN(!crtc, "checking null crtc?");
|
||||
WARN(!crtc, "checking null crtc?\n");
|
||||
|
||||
dev = crtc->dev;
|
||||
|
||||
|
|
|
@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
|
|||
.addr = DDC_ADDR,
|
||||
.flags = I2C_M_RD,
|
||||
.len = len,
|
||||
.buf = buf + start,
|
||||
.buf = buf,
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
|
|||
static u8 *
|
||||
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
||||
{
|
||||
int i, j = 0;
|
||||
int i, j = 0, valid_extensions = 0;
|
||||
u8 *block, *new;
|
||||
|
||||
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
|
||||
|
@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
|||
|
||||
for (j = 1; j <= block[0x7e]; j++) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (drm_do_probe_ddc_edid(adapter, block, j,
|
||||
EDID_LENGTH))
|
||||
if (drm_do_probe_ddc_edid(adapter,
|
||||
block + (valid_extensions + 1) * EDID_LENGTH,
|
||||
j, EDID_LENGTH))
|
||||
goto out;
|
||||
if (drm_edid_block_valid(block + j * EDID_LENGTH))
|
||||
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
|
||||
valid_extensions++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == 4)
|
||||
goto carp;
|
||||
dev_warn(connector->dev->dev,
|
||||
"%s: Ignoring invalid EDID block %d.\n",
|
||||
drm_get_connector_name(connector), j);
|
||||
}
|
||||
|
||||
if (valid_extensions != block[0x7e]) {
|
||||
block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
|
||||
block[0x7e] = valid_extensions;
|
||||
new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
|
||||
if (!new)
|
||||
goto out;
|
||||
block = new;
|
||||
}
|
||||
|
||||
return block;
|
||||
|
|
|
@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
|
|||
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
|
||||
|
||||
unsigned int i915_powersave = 1;
|
||||
module_param_named(powersave, i915_powersave, int, 0400);
|
||||
module_param_named(powersave, i915_powersave, int, 0600);
|
||||
|
||||
unsigned int i915_lvds_downclock = 0;
|
||||
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
|
||||
|
|
|
@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
|
|||
|
||||
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
|
||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||
|
||||
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
|
||||
|
||||
|
|
|
@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|||
static int i915_ring_idle(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (list_empty(&ring->gpu_write_list))
|
||||
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
|
||||
return 0;
|
||||
|
||||
i915_gem_flush_ring(dev, NULL, ring,
|
||||
|
@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev)
|
|||
int ret;
|
||||
|
||||
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->render_ring.active_list) &&
|
||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
||||
list_empty(&dev_priv->blt_ring.active_list));
|
||||
list_empty(&dev_priv->mm.active_list));
|
||||
if (lists_empty)
|
||||
return 0;
|
||||
|
||||
|
@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
|
|||
* write domain
|
||||
*/
|
||||
if (obj->write_domain &&
|
||||
obj->write_domain != obj->pending_read_domains) {
|
||||
(obj->write_domain != obj->pending_read_domains ||
|
||||
obj_priv->ring != ring)) {
|
||||
flush_domains |= obj->write_domain;
|
||||
invalidate_domains |=
|
||||
obj->pending_read_domains & ~obj->write_domain;
|
||||
|
@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct drm_gem_object **objects,
|
||||
int count)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
||||
/* Zero the global flush/invalidate flags. These
|
||||
* will be modified as new domains are computed
|
||||
* for each object
|
||||
*/
|
||||
dev->invalidate_domains = 0;
|
||||
dev->flush_domains = 0;
|
||||
dev_priv->mm.flush_rings = 0;
|
||||
for (i = 0; i < count; i++)
|
||||
i915_gem_object_set_to_gpu_domain(objects[i], ring);
|
||||
|
||||
if (dev->invalidate_domains | dev->flush_domains) {
|
||||
#if WATCH_EXEC
|
||||
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
|
||||
__func__,
|
||||
dev->invalidate_domains,
|
||||
dev->flush_domains);
|
||||
#endif
|
||||
i915_gem_flush(dev, file,
|
||||
dev->invalidate_domains,
|
||||
dev->flush_domains,
|
||||
dev_priv->mm.flush_rings);
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
|
||||
/* XXX replace with semaphores */
|
||||
if (obj->ring && ring != obj->ring) {
|
||||
ret = i915_gem_object_wait_rendering(&obj->base, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Throttle our rendering by waiting until the ring has completed our requests
|
||||
* emitted over 20 msec ago.
|
||||
*
|
||||
|
@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* Zero the global flush/invalidate flags. These
|
||||
* will be modified as new domains are computed
|
||||
* for each object
|
||||
*/
|
||||
dev->invalidate_domains = 0;
|
||||
dev->flush_domains = 0;
|
||||
dev_priv->mm.flush_rings = 0;
|
||||
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
struct drm_gem_object *obj = object_list[i];
|
||||
|
||||
/* Compute new gpu domains and update invalidate/flush */
|
||||
i915_gem_object_set_to_gpu_domain(obj, ring);
|
||||
}
|
||||
|
||||
if (dev->invalidate_domains | dev->flush_domains) {
|
||||
#if WATCH_EXEC
|
||||
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
|
||||
__func__,
|
||||
dev->invalidate_domains,
|
||||
dev->flush_domains);
|
||||
#endif
|
||||
i915_gem_flush(dev, file,
|
||||
dev->invalidate_domains,
|
||||
dev->flush_domains,
|
||||
dev_priv->mm.flush_rings);
|
||||
}
|
||||
ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
|
||||
object_list, args->buffer_count);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
struct drm_gem_object *obj = object_list[i];
|
||||
|
@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|||
alignment = i915_gem_get_gtt_alignment(obj);
|
||||
if (obj_priv->gtt_offset & (alignment - 1)) {
|
||||
WARN(obj_priv->pin_count,
|
||||
"bo is already pinned with incorrect alignment:"
|
||||
" offset=%x, req.alignment=%x\n",
|
||||
"bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
|
||||
obj_priv->gtt_offset, alignment);
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
if (ret)
|
||||
|
@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
void *obj_addr;
|
||||
int ret;
|
||||
char __user *user_data;
|
||||
void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
|
||||
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||
|
||||
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||
obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
|
||||
DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
|
||||
|
||||
DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
|
||||
ret = copy_from_user(obj_addr, user_data, args->size);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
drm_agp_chipset_flush(dev);
|
||||
return 0;
|
||||
|
@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev)
|
|||
int lists_empty;
|
||||
|
||||
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->render_ring.active_list) &&
|
||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
||||
list_empty(&dev_priv->blt_ring.active_list);
|
||||
list_empty(&dev_priv->mm.active_list);
|
||||
|
||||
return !lists_empty;
|
||||
}
|
||||
|
|
|
@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
|
|||
|
||||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||
list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->render_ring.active_list) &&
|
||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
||||
list_empty(&dev_priv->blt_ring.active_list));
|
||||
list_empty(&dev_priv->mm.active_list));
|
||||
if (lists_empty)
|
||||
return -ENOSPC;
|
||||
|
||||
|
@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
|
|||
|
||||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||
list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->render_ring.active_list) &&
|
||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
||||
list_empty(&dev_priv->blt_ring.active_list));
|
||||
list_empty(&dev_priv->mm.active_list));
|
||||
BUG_ON(!lists_empty);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
|
|||
/* Clock gating state */
|
||||
intel_init_clock_gating(dev);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
ironlake_enable_drps(dev);
|
||||
intel_init_emon(dev);
|
||||
}
|
||||
|
||||
/* Cache mode state */
|
||||
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
|
||||
|
|
|
@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
|
|||
udelay(500);
|
||||
}
|
||||
|
||||
static void intel_fdi_normal_train(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
u32 reg, temp;
|
||||
|
||||
/* enable normal train */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
|
||||
I915_WRITE(reg, temp);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
||||
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
|
||||
} else {
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_NONE;
|
||||
}
|
||||
I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
|
||||
|
||||
/* wait one idle pattern time */
|
||||
POSTING_READ(reg);
|
||||
udelay(1000);
|
||||
}
|
||||
|
||||
/* The FDI link training functions for ILK/Ibexpeak. */
|
||||
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
|
||||
{
|
||||
|
@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
|
|||
|
||||
DRM_DEBUG_KMS("FDI train done\n");
|
||||
|
||||
/* enable normal train */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
|
||||
I915_WRITE(reg, temp);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
||||
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
|
||||
} else {
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_NONE;
|
||||
}
|
||||
I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
|
||||
|
||||
/* wait one idle pattern time */
|
||||
POSTING_READ(reg);
|
||||
udelay(1000);
|
||||
}
|
||||
|
||||
static const int const snb_b_fdi_train_param [] = {
|
||||
|
@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
|
||||
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
|
||||
|
||||
intel_fdi_normal_train(crtc);
|
||||
|
||||
/* For PCH DP, enable TRANS_DP_CTL */
|
||||
if (HAS_PCH_CPT(dev) &&
|
||||
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
|
||||
|
@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
|||
udelay(100);
|
||||
|
||||
/* Ironlake workaround, disable clock pointer after downing FDI */
|
||||
I915_WRITE(FDI_RX_CHICKEN(pipe),
|
||||
I915_READ(FDI_RX_CHICKEN(pipe) &
|
||||
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
|
||||
if (HAS_PCH_IBX(dev))
|
||||
I915_WRITE(FDI_RX_CHICKEN(pipe),
|
||||
I915_READ(FDI_RX_CHICKEN(pipe) &
|
||||
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
|
||||
|
||||
/* still set train pattern 1 */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
|
@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev)
|
|||
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
|
||||
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
|
||||
MEMMODE_FSTART_SHIFT;
|
||||
fstart = fmax;
|
||||
|
||||
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
|
||||
PXVFREQ_PX_SHIFT;
|
||||
|
||||
dev_priv->fmax = fstart; /* IPS callback will increase this */
|
||||
dev_priv->fmax = fmax; /* IPS callback will increase this */
|
||||
dev_priv->fstart = fstart;
|
||||
|
||||
dev_priv->max_delay = fmax;
|
||||
dev_priv->max_delay = fstart;
|
||||
dev_priv->min_delay = fmin;
|
||||
dev_priv->cur_delay = fstart;
|
||||
|
||||
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
|
||||
fstart);
|
||||
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
|
||||
fmax, fmin, fstart);
|
||||
|
||||
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
|
||||
|
||||
|
|
|
@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
|
|||
status = connector_status_connected;
|
||||
}
|
||||
|
||||
return bit;
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
|||
extern void intel_init_clock_gating(struct drm_device *dev);
|
||||
extern void ironlake_enable_drps(struct drm_device *dev);
|
||||
extern void ironlake_disable_drps(struct drm_device *dev);
|
||||
extern void intel_init_emon(struct drm_device *dev);
|
||||
|
||||
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
|
|
|
@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
|
|||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
if (intel_lvds->edid) {
|
||||
drm_mode_connector_update_edid_property(connector,
|
||||
intel_lvds->edid);
|
||||
if (intel_lvds->edid)
|
||||
return drm_add_edid_modes(connector, intel_lvds->edid);
|
||||
}
|
||||
|
||||
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
|
||||
if (mode == 0)
|
||||
|
@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
*/
|
||||
intel_lvds->edid = drm_get_edid(connector,
|
||||
&dev_priv->gmbus[pin].adapter);
|
||||
|
||||
if (intel_lvds->edid) {
|
||||
if (drm_add_edid_modes(connector,
|
||||
intel_lvds->edid)) {
|
||||
drm_mode_connector_update_edid_property(connector,
|
||||
intel_lvds->edid);
|
||||
} else {
|
||||
kfree(intel_lvds->edid);
|
||||
intel_lvds->edid = NULL;
|
||||
}
|
||||
}
|
||||
if (!intel_lvds->edid) {
|
||||
/* Didn't get an EDID, so
|
||||
* Set wide sync ranges so we get all modes
|
||||
|
|
|
@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
|
|||
return 0;
|
||||
|
||||
err_out:
|
||||
iounmap(opregion->header);
|
||||
iounmap(base);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
|
|||
{
|
||||
int uv_hscale = uv_hsubsampling(rec->flags);
|
||||
int uv_vscale = uv_vsubsampling(rec->flags);
|
||||
u32 stride_mask, depth, tmp;
|
||||
u32 stride_mask;
|
||||
int depth;
|
||||
u32 tmp;
|
||||
|
||||
/* check src dimensions */
|
||||
if (IS_845G(dev) || IS_I830(dev)) {
|
||||
|
|
|
@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev,
|
|||
|
||||
I915_WRITE_CTL(ring,
|
||||
((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
|
||||
| RING_NO_REPORT | RING_VALID);
|
||||
| RING_REPORT_64K | RING_VALID);
|
||||
|
||||
head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||
/* If the head is still not zero, the ring is dead */
|
||||
|
@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
|
|||
i915_gem_object_unpin(ring->gem_object);
|
||||
drm_gem_object_unreference(ring->gem_object);
|
||||
ring->gem_object = NULL;
|
||||
|
||||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
|
||||
cleanup_status_page(dev, ring);
|
||||
}
|
||||
|
||||
|
@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
|
|||
{
|
||||
unsigned long end;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u32 head;
|
||||
|
||||
head = intel_read_status_page(ring, 4);
|
||||
if (head) {
|
||||
ring->head = head & HEAD_ADDR;
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->size;
|
||||
if (ring->space >= n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
trace_i915_ring_wait_begin (dev);
|
||||
end = jiffies + 3 * HZ;
|
||||
|
@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
|
|||
/* do nothing */
|
||||
}
|
||||
|
||||
|
||||
/* Workaround for some stepping of SNB,
|
||||
* each time when BLT engine ring tail moved,
|
||||
* the first command in the ring to be parsed
|
||||
* should be MI_BATCH_BUFFER_START
|
||||
*/
|
||||
#define NEED_BLT_WORKAROUND(dev) \
|
||||
(IS_GEN6(dev) && (dev->pdev->revision < 8))
|
||||
|
||||
static inline struct drm_i915_gem_object *
|
||||
to_blt_workaround(struct intel_ring_buffer *ring)
|
||||
{
|
||||
return ring->private;
|
||||
}
|
||||
|
||||
static int blt_ring_init(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (NEED_BLT_WORKAROUND(dev)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
u32 __iomem *ptr;
|
||||
int ret;
|
||||
|
||||
obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
|
||||
if (obj == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = i915_gem_object_pin(&obj->base, 4096);
|
||||
if (ret) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ptr = kmap(obj->pages[0]);
|
||||
iowrite32(MI_BATCH_BUFFER_END, ptr);
|
||||
iowrite32(MI_NOOP, ptr+1);
|
||||
kunmap(obj->pages[0]);
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin(&obj->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ring->private = obj;
|
||||
}
|
||||
|
||||
return init_ring_common(dev, ring);
|
||||
}
|
||||
|
||||
static void blt_ring_begin(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
int num_dwords)
|
||||
{
|
||||
if (ring->private) {
|
||||
intel_ring_begin(dev, ring, num_dwords+2);
|
||||
intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
|
||||
intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
|
||||
} else
|
||||
intel_ring_begin(dev, ring, 4);
|
||||
}
|
||||
|
||||
static void blt_ring_flush(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
blt_ring_begin(dev, ring, 4);
|
||||
intel_ring_emit(dev, ring, MI_FLUSH_DW);
|
||||
intel_ring_emit(dev, ring, 0);
|
||||
intel_ring_emit(dev, ring, 0);
|
||||
intel_ring_emit(dev, ring, 0);
|
||||
intel_ring_advance(dev, ring);
|
||||
}
|
||||
|
||||
static u32
|
||||
blt_ring_add_request(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 flush_domains)
|
||||
{
|
||||
u32 seqno = i915_gem_get_seqno(dev);
|
||||
|
||||
blt_ring_begin(dev, ring, 4);
|
||||
intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(dev, ring,
|
||||
I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(dev, ring, seqno);
|
||||
intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
|
||||
intel_ring_advance(dev, ring);
|
||||
|
||||
DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
|
||||
return seqno;
|
||||
}
|
||||
|
||||
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (!ring->private)
|
||||
return;
|
||||
|
||||
i915_gem_object_unpin(ring->private);
|
||||
drm_gem_object_unreference(ring->private);
|
||||
ring->private = NULL;
|
||||
}
|
||||
|
||||
static const struct intel_ring_buffer gen6_blt_ring = {
|
||||
.name = "blt ring",
|
||||
.id = RING_BLT,
|
||||
.mmio_base = BLT_RING_BASE,
|
||||
.size = 32 * PAGE_SIZE,
|
||||
.init = init_ring_common,
|
||||
.init = blt_ring_init,
|
||||
.write_tail = ring_write_tail,
|
||||
.flush = gen6_ring_flush,
|
||||
.add_request = ring_add_request,
|
||||
.flush = blt_ring_flush,
|
||||
.add_request = blt_ring_add_request,
|
||||
.get_seqno = ring_status_page_get_seqno,
|
||||
.user_irq_get = blt_ring_get_user_irq,
|
||||
.user_irq_put = blt_ring_put_user_irq,
|
||||
.dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
|
||||
.cleanup = blt_ring_cleanup,
|
||||
};
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
|
|
|
@ -63,6 +63,7 @@ struct intel_ring_buffer {
|
|||
struct drm_i915_gem_execbuffer2 *exec,
|
||||
struct drm_clip_rect *cliprects,
|
||||
uint64_t exec_offset);
|
||||
void (*cleanup)(struct intel_ring_buffer *ring);
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering from the
|
||||
|
@ -98,6 +99,8 @@ struct intel_ring_buffer {
|
|||
|
||||
wait_queue_head_t irq_queue;
|
||||
drm_local_map_t map;
|
||||
|
||||
void *private;
|
||||
};
|
||||
|
||||
static inline u32
|
||||
|
|
|
@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
|
|||
u32 grbm_int_cntl = 0;
|
||||
|
||||
if (!rdev->irq.installed) {
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* don't enable anything if the ih is disabled */
|
||||
|
@ -2295,6 +2295,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
|||
case 0: /* D1 vblank */
|
||||
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
|
||||
drm_handle_vblank(rdev->ddev, 0);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D1 vblank\n");
|
||||
|
@ -2316,6 +2317,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
|||
case 0: /* D2 vblank */
|
||||
if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
|
||||
drm_handle_vblank(rdev->ddev, 1);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D2 vblank\n");
|
||||
|
@ -2337,6 +2339,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
|||
case 0: /* D3 vblank */
|
||||
if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
|
||||
drm_handle_vblank(rdev->ddev, 2);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D3 vblank\n");
|
||||
|
@ -2358,6 +2361,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
|||
case 0: /* D4 vblank */
|
||||
if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
|
||||
drm_handle_vblank(rdev->ddev, 3);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D4 vblank\n");
|
||||
|
@ -2379,6 +2383,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
|||
case 0: /* D5 vblank */
|
||||
if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
|
||||
drm_handle_vblank(rdev->ddev, 4);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D5 vblank\n");
|
||||
|
@ -2400,6 +2405,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
|||
case 0: /* D6 vblank */
|
||||
if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
|
||||
drm_handle_vblank(rdev->ddev, 5);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D6 vblank\n");
|
||||
|
|
|
@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
|
|||
int r;
|
||||
|
||||
if (rdev->gart.table.ram.ptr) {
|
||||
WARN(1, "R100 PCI GART already initialized.\n");
|
||||
WARN(1, "R100 PCI GART already initialized\n");
|
||||
return 0;
|
||||
}
|
||||
/* Initialize common gart structure */
|
||||
|
@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
|
|||
uint32_t tmp = 0;
|
||||
|
||||
if (!rdev->irq.installed) {
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
||||
WREG32(R_000040_GEN_INT_CNTL, 0);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
|
|||
int r;
|
||||
|
||||
if (rdev->gart.table.vram.robj) {
|
||||
WARN(1, "RV370 PCIE GART already initialized.\n");
|
||||
WARN(1, "RV370 PCIE GART already initialized\n");
|
||||
return 0;
|
||||
}
|
||||
/* Initialize common gart structure */
|
||||
|
|
|
@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
|
|||
{
|
||||
u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
|
||||
ASIC_T_SHIFT;
|
||||
u32 actual_temp = 0;
|
||||
|
||||
if ((temp >> 7) & 1)
|
||||
actual_temp = 0;
|
||||
else
|
||||
actual_temp = (temp >> 1) & 0xff;
|
||||
|
||||
return actual_temp * 1000;
|
||||
return temp * 1000;
|
||||
}
|
||||
|
||||
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
|
||||
|
@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
|
|||
int r;
|
||||
|
||||
if (rdev->gart.table.vram.robj) {
|
||||
WARN(1, "R600 PCIE GART already initialized.\n");
|
||||
WARN(1, "R600 PCIE GART already initialized\n");
|
||||
return 0;
|
||||
}
|
||||
/* Initialize common gart structure */
|
||||
|
@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
|
|||
u32 hdmi1, hdmi2;
|
||||
|
||||
if (!rdev->irq.installed) {
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* don't enable anything if the ih is disabled */
|
||||
|
|
|
@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
|||
if (crev < 2)
|
||||
return false;
|
||||
|
||||
router.valid = false;
|
||||
|
||||
obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
|
||||
path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
|
||||
(ctx->bios + data_offset +
|
||||
|
@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
|||
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
|
||||
continue;
|
||||
|
||||
router.ddc_valid = false;
|
||||
router.cd_valid = false;
|
||||
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
|
||||
uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
|
||||
|
||||
|
@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
|||
usDeviceTag));
|
||||
|
||||
} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
|
||||
router.valid = false;
|
||||
for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
|
||||
u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
|
||||
u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
|
||||
if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
|
||||
ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
|
||||
(ctx->bios + data_offset +
|
||||
|
@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
|||
ATOM_I2C_RECORD *i2c_record;
|
||||
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
|
||||
ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
|
||||
ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
|
||||
ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
|
||||
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
|
||||
(ctx->bios + data_offset +
|
||||
|
@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
|||
case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
|
||||
ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
|
||||
record;
|
||||
router.valid = true;
|
||||
router.mux_type = ddc_path->ucMuxType;
|
||||
router.mux_control_pin = ddc_path->ucMuxControlPin;
|
||||
router.mux_state = ddc_path->ucMuxState[enum_id];
|
||||
router.ddc_valid = true;
|
||||
router.ddc_mux_type = ddc_path->ucMuxType;
|
||||
router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
|
||||
router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
|
||||
break;
|
||||
case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
|
||||
cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
|
||||
record;
|
||||
router.cd_valid = true;
|
||||
router.cd_mux_type = cd_path->ucMuxType;
|
||||
router.cd_mux_control_pin = cd_path->ucMuxControlPin;
|
||||
router.cd_mux_state = cd_path->ucMuxState[enum_id];
|
||||
break;
|
||||
}
|
||||
record = (ATOM_COMMON_RECORD_HEADER *)
|
||||
|
@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
|||
size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
|
||||
struct radeon_router router;
|
||||
|
||||
router.valid = false;
|
||||
router.ddc_valid = false;
|
||||
router.cd_valid = false;
|
||||
|
||||
bios_connectors = kzalloc(bc_size, GFP_KERNEL);
|
||||
if (!bios_connectors)
|
||||
|
|
|
@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
|
|||
continue;
|
||||
|
||||
if (priority == true) {
|
||||
DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
|
||||
DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
|
||||
DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
|
||||
DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
|
||||
conflict->status = connector_status_disconnected;
|
||||
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
|
||||
} else {
|
||||
DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
|
||||
DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
|
||||
DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
|
||||
DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
|
||||
current_status = connector_status_disconnected;
|
||||
}
|
||||
break;
|
||||
|
@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
|
|||
mode->vdisplay == native_mode->vdisplay) {
|
||||
*native_mode = *mode;
|
||||
drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
|
||||
DRM_INFO("Determined LVDS native mode details from EDID\n");
|
||||
DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!native_mode->clock) {
|
||||
DRM_INFO("No LVDS native mode details, disabling RMX\n");
|
||||
DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
|
||||
radeon_encoder->rmx_type = RMX_OFF;
|
||||
}
|
||||
}
|
||||
|
@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
radeon_connector->shared_ddc = true;
|
||||
shared_ddc = true;
|
||||
}
|
||||
if (radeon_connector->router_bus && router->valid &&
|
||||
if (radeon_connector->router_bus && router->ddc_valid &&
|
||||
(radeon_connector->router.router_id == router->router_id)) {
|
||||
radeon_connector->shared_ddc = false;
|
||||
shared_ddc = false;
|
||||
|
@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
radeon_connector->connector_object_id = connector_object_id;
|
||||
radeon_connector->hpd = *hpd;
|
||||
radeon_connector->router = *router;
|
||||
if (router->valid) {
|
||||
if (router->ddc_valid || router->cd_valid) {
|
||||
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
|
||||
if (!radeon_connector->router_bus)
|
||||
goto failed;
|
||||
|
|
|
@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
|
|||
radeon_connector->ddc_bus->rec.en_data_reg,
|
||||
radeon_connector->ddc_bus->rec.y_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.y_data_reg);
|
||||
if (radeon_connector->router_bus)
|
||||
if (radeon_connector->router.ddc_valid)
|
||||
DRM_INFO(" DDC Router 0x%x/0x%x\n",
|
||||
radeon_connector->router.mux_control_pin,
|
||||
radeon_connector->router.mux_state);
|
||||
radeon_connector->router.ddc_mux_control_pin,
|
||||
radeon_connector->router.ddc_mux_state);
|
||||
if (radeon_connector->router.cd_valid)
|
||||
DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
|
||||
radeon_connector->router.cd_mux_control_pin,
|
||||
radeon_connector->router.cd_mux_state);
|
||||
} else {
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
|
||||
|
@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
|||
int ret = 0;
|
||||
|
||||
/* on hw with routers, select right port */
|
||||
if (radeon_connector->router.valid)
|
||||
radeon_router_select_port(radeon_connector);
|
||||
if (radeon_connector->router.ddc_valid)
|
||||
radeon_router_select_ddc_port(radeon_connector);
|
||||
|
||||
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
|
||||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
|
||||
|
@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
|
|||
int ret = 0;
|
||||
|
||||
/* on hw with routers, select right port */
|
||||
if (radeon_connector->router.valid)
|
||||
radeon_router_select_port(radeon_connector);
|
||||
if (radeon_connector->router.ddc_valid)
|
||||
radeon_router_select_ddc_port(radeon_connector);
|
||||
|
||||
if (!radeon_connector->ddc_bus)
|
||||
return -1;
|
||||
|
|
|
@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
|
|||
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
|
||||
if (radeon_encoder->active_device &
|
||||
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
|
@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
|
|||
radeon_atom_output_lock(encoder, true);
|
||||
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
|
||||
|
||||
/* select the clock/data port if it uses a router */
|
||||
if (connector) {
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
if (radeon_connector->router.cd_valid)
|
||||
radeon_router_select_cd_port(radeon_connector);
|
||||
}
|
||||
|
||||
/* this is needed for the pll/ss setup to work correctly in some cases */
|
||||
atombios_set_encoder_crtc_source(encoder);
|
||||
}
|
||||
|
@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
|
|||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
|
||||
/* check for pre-DCE3 cards with shared encoders;
|
||||
* can't really use the links individually, so don't disable
|
||||
* the encoder if it's in use by another connector
|
||||
*/
|
||||
if (!ASIC_IS_DCE3(rdev)) {
|
||||
struct drm_encoder *other_encoder;
|
||||
struct radeon_encoder *other_radeon_encoder;
|
||||
|
||||
list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
|
||||
other_radeon_encoder = to_radeon_encoder(other_encoder);
|
||||
if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
|
||||
drm_helper_encoder_in_use(other_encoder))
|
||||
goto disable_done;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
|
@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
|
|||
break;
|
||||
}
|
||||
|
||||
disable_done:
|
||||
if (radeon_encoder_is_digital(encoder)) {
|
||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
|
||||
r600_hdmi_disable(encoder);
|
||||
|
|
|
@ -240,7 +240,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
|
|||
*/
|
||||
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
|
||||
/* good news we believe it's a lockup */
|
||||
WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
|
||||
WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
|
||||
fence->seq, seq);
|
||||
/* FIXME: what should we do ? marking everyone
|
||||
* as signaled for now
|
||||
*/
|
||||
|
|
|
@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
|
|||
};
|
||||
|
||||
/* on hw with routers, select right port */
|
||||
if (radeon_connector->router.valid)
|
||||
radeon_router_select_port(radeon_connector);
|
||||
if (radeon_connector->router.ddc_valid)
|
||||
radeon_router_select_ddc_port(radeon_connector);
|
||||
|
||||
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
|
||||
if (ret == 2)
|
||||
|
@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
|
|||
addr, val);
|
||||
}
|
||||
|
||||
/* router switching */
|
||||
void radeon_router_select_port(struct radeon_connector *radeon_connector)
|
||||
/* ddc router switching */
|
||||
void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
if (!radeon_connector->router.valid)
|
||||
if (!radeon_connector->router.ddc_valid)
|
||||
return;
|
||||
|
||||
radeon_i2c_get_byte(radeon_connector->router_bus,
|
||||
radeon_connector->router.i2c_addr,
|
||||
0x3, &val);
|
||||
val &= radeon_connector->router.mux_control_pin;
|
||||
val &= ~radeon_connector->router.ddc_mux_control_pin;
|
||||
radeon_i2c_put_byte(radeon_connector->router_bus,
|
||||
radeon_connector->router.i2c_addr,
|
||||
0x3, val);
|
||||
radeon_i2c_get_byte(radeon_connector->router_bus,
|
||||
radeon_connector->router.i2c_addr,
|
||||
0x1, &val);
|
||||
val &= radeon_connector->router.mux_control_pin;
|
||||
val |= radeon_connector->router.mux_state;
|
||||
val &= ~radeon_connector->router.ddc_mux_control_pin;
|
||||
val |= radeon_connector->router.ddc_mux_state;
|
||||
radeon_i2c_put_byte(radeon_connector->router_bus,
|
||||
radeon_connector->router.i2c_addr,
|
||||
0x1, val);
|
||||
}
|
||||
|
||||
/* clock/data router switching */
|
||||
void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
if (!radeon_connector->router.cd_valid)
|
||||
return;
|
||||
|
||||
radeon_i2c_get_byte(radeon_connector->router_bus,
|
||||
radeon_connector->router.i2c_addr,
|
||||
0x3, &val);
|
||||
val &= ~radeon_connector->router.cd_mux_control_pin;
|
||||
radeon_i2c_put_byte(radeon_connector->router_bus,
|
||||
radeon_connector->router.i2c_addr,
|
||||
0x3, val);
|
||||
radeon_i2c_get_byte(radeon_connector->router_bus,
|
||||
radeon_connector->router.i2c_addr,
|
||||
0x1, &val);
|
||||
val &= ~radeon_connector->router.cd_mux_control_pin;
|
||||
val |= radeon_connector->router.cd_mux_state;
|
||||
radeon_i2c_put_byte(radeon_connector->router_bus,
|
||||
radeon_connector->router.i2c_addr,
|
||||
0x1, val);
|
||||
|
|
|
@ -401,13 +401,19 @@ struct radeon_hpd {
|
|||
};
|
||||
|
||||
struct radeon_router {
|
||||
bool valid;
|
||||
u32 router_id;
|
||||
struct radeon_i2c_bus_rec i2c_info;
|
||||
u8 i2c_addr;
|
||||
u8 mux_type;
|
||||
u8 mux_control_pin;
|
||||
u8 mux_state;
|
||||
/* i2c mux */
|
||||
bool ddc_valid;
|
||||
u8 ddc_mux_type;
|
||||
u8 ddc_mux_control_pin;
|
||||
u8 ddc_mux_state;
|
||||
/* clock/data mux */
|
||||
bool cd_valid;
|
||||
u8 cd_mux_type;
|
||||
u8 cd_mux_control_pin;
|
||||
u8 cd_mux_state;
|
||||
};
|
||||
|
||||
struct radeon_connector {
|
||||
|
@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
|
|||
u8 slave_addr,
|
||||
u8 addr,
|
||||
u8 val);
|
||||
extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
|
||||
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
|
||||
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
|
||||
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
|
||||
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
|
||||
|
||||
|
|
|
@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
|||
type = ttm_bo_type_device;
|
||||
}
|
||||
*bo_ptr = NULL;
|
||||
|
||||
retry:
|
||||
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
|||
bo->gobj = gobj;
|
||||
bo->surface_reg = -1;
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
|
||||
retry:
|
||||
radeon_ttm_placement_from_domain(bo, domain);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
mutex_lock(&rdev->vram_mutex);
|
||||
|
|
|
@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
|
|||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
gtt->offset = bo_mem->start << PAGE_SHIFT;
|
||||
if (!gtt->num_pages) {
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
gtt->num_pages, bo_mem, backend);
|
||||
}
|
||||
r = radeon_gart_bind(gtt->rdev, gtt->offset,
|
||||
gtt->num_pages, gtt->pages);
|
||||
|
|
|
@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
|
|||
int r;
|
||||
|
||||
if (rdev->gart.table.ram.ptr) {
|
||||
WARN(1, "RS400 GART already initialized.\n");
|
||||
WARN(1, "RS400 GART already initialized\n");
|
||||
return 0;
|
||||
}
|
||||
/* Check gart size */
|
||||
|
|
|
@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
|
|||
int r;
|
||||
|
||||
if (rdev->gart.table.vram.robj) {
|
||||
WARN(1, "RS600 GART already initialized.\n");
|
||||
WARN(1, "RS600 GART already initialized\n");
|
||||
return 0;
|
||||
}
|
||||
/* Initialize common gart structure */
|
||||
|
@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
|
|||
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
|
||||
|
||||
if (!rdev->irq.installed) {
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
|
||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
||||
WREG32(R_000040_GEN_INT_CNTL, 0);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -27,14 +27,6 @@
|
|||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/* Notes:
|
||||
*
|
||||
* We store bo pointer in drm_mm_node struct so we know which bo own a
|
||||
* specific node. There is no protection on the pointer, thus to make
|
||||
* sure things don't go berserk you have to access this pointer while
|
||||
* holding the global lru lock and make sure anytime you free a node you
|
||||
* reset the pointer to NULL.
|
||||
*/
|
||||
|
||||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
|
@ -45,6 +37,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#define TTM_ASSERT_LOCKED(param)
|
||||
#define TTM_DEBUG(fmt, arg...)
|
||||
|
@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
|
|||
ttm_bo_mem_put(bo, &bo->mem);
|
||||
|
||||
atomic_set(&bo->reserved, 0);
|
||||
|
||||
/*
|
||||
* Make processes trying to reserve really pick it up.
|
||||
*/
|
||||
smp_mb__after_atomic_dec();
|
||||
wake_up_all(&bo->event_queue);
|
||||
}
|
||||
|
||||
|
@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
|||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
struct ttm_bo_driver *driver;
|
||||
void *sync_obj;
|
||||
void *sync_obj = NULL;
|
||||
void *sync_obj_arg;
|
||||
int put_count;
|
||||
int ret;
|
||||
|
@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
|||
spin_lock(&glob->lru_lock);
|
||||
}
|
||||
queue:
|
||||
sync_obj = bo->sync_obj;
|
||||
sync_obj_arg = bo->sync_obj_arg;
|
||||
driver = bdev->driver;
|
||||
if (bo->sync_obj)
|
||||
sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
sync_obj_arg = bo->sync_obj_arg;
|
||||
|
||||
kref_get(&bo->list_kref);
|
||||
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&bo->lock);
|
||||
|
||||
if (sync_obj)
|
||||
if (sync_obj) {
|
||||
driver->sync_obj_flush(sync_obj, sync_obj_arg);
|
||||
driver->sync_obj_unref(&sync_obj);
|
||||
}
|
||||
schedule_delayed_work(&bdev->wq,
|
||||
((HZ / 100) < 1) ? 1 : HZ / 100);
|
||||
}
|
||||
|
@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
|||
bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
int ret;
|
||||
|
||||
|
@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
|||
return ret;
|
||||
if (mem->mm_node)
|
||||
break;
|
||||
spin_lock(&glob->lru_lock);
|
||||
if (list_empty(&man->lru)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
|
||||
no_wait_reserve, no_wait_gpu);
|
||||
if (unlikely(ret != 0))
|
||||
|
@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
|
|||
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
int i;
|
||||
BUG_ON((placement->fpfn || placement->lpfn) &&
|
||||
(bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
|
||||
|
||||
if (placement->fpfn || placement->lpfn) {
|
||||
if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
|
||||
printk(KERN_ERR TTM_PFX "Page number range to small "
|
||||
"Need %lu pages, range is [%u, %u]\n",
|
||||
bo->mem.num_pages, placement->fpfn,
|
||||
placement->lpfn);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < placement->num_placement; i++) {
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
|
||||
printk(KERN_ERR TTM_PFX "Need to be root to "
|
||||
"modify NO_EVICT status.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i = 0; i < placement->num_busy_placement; i++) {
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
|
||||
printk(KERN_ERR TTM_PFX "Need to be root to "
|
||||
"modify NO_EVICT status.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
|||
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (num_pages == 0) {
|
||||
printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
|
||||
if (destroy)
|
||||
(*destroy)(bo);
|
||||
else
|
||||
kfree(bo);
|
||||
return -EINVAL;
|
||||
}
|
||||
bo->destroy = destroy;
|
||||
|
@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
|||
int ret = -EINVAL;
|
||||
struct ttm_mem_type_manager *man;
|
||||
|
||||
if (type >= TTM_NUM_MEM_TYPES) {
|
||||
printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
BUG_ON(type >= TTM_NUM_MEM_TYPES);
|
||||
man = &bdev->man[type];
|
||||
if (man->has_type) {
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Memory manager already initialized for type %d\n",
|
||||
type);
|
||||
return ret;
|
||||
}
|
||||
BUG_ON(man->has_type);
|
||||
|
||||
ret = bdev->driver->init_mem_type(bdev, type, man);
|
||||
if (ret)
|
||||
|
@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
|||
|
||||
ret = 0;
|
||||
if (type != TTM_PL_SYSTEM) {
|
||||
if (!p_size) {
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Zero size memory manager type %d\n",
|
||||
type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = (*man->func->init)(man, p_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -31,20 +31,29 @@
|
|||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include <linux/jiffies.h>
|
||||
#include "drm_mm.h"
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/**
|
||||
* Currently we use a spinlock for the lock, but a mutex *may* be
|
||||
* more appropriate to reduce scheduling latency if the range manager
|
||||
* ends up with very fragmented allocation patterns.
|
||||
*/
|
||||
|
||||
struct ttm_range_manager {
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_bo_global *glob = man->bdev->glob;
|
||||
struct drm_mm *mm = man->priv;
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
struct drm_mm_node *node = NULL;
|
||||
unsigned long lpfn;
|
||||
int ret;
|
||||
|
@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
|||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&rman->lock);
|
||||
node = drm_mm_search_free_in_range(mm,
|
||||
mem->num_pages, mem->page_alignment,
|
||||
placement->fpfn, lpfn, 1);
|
||||
if (unlikely(node == NULL)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&rman->lock);
|
||||
return 0;
|
||||
}
|
||||
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
|
||||
mem->page_alignment,
|
||||
placement->fpfn,
|
||||
lpfn);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
mem->page_alignment,
|
||||
placement->fpfn,
|
||||
lpfn);
|
||||
spin_unlock(&rman->lock);
|
||||
} while (node == NULL);
|
||||
|
||||
mem->mm_node = node;
|
||||
|
@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
|||
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_bo_global *glob = man->bdev->glob;
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
|
||||
if (mem->mm_node) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&rman->lock);
|
||||
drm_mm_put_block(mem->mm_node);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&rman->lock);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
|
|||
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct drm_mm *mm;
|
||||
struct ttm_range_manager *rman;
|
||||
int ret;
|
||||
|
||||
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
|
||||
if (!mm)
|
||||
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
|
||||
if (!rman)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_mm_init(mm, 0, p_size);
|
||||
ret = drm_mm_init(&rman->mm, 0, p_size);
|
||||
if (ret) {
|
||||
kfree(mm);
|
||||
kfree(rman);
|
||||
return ret;
|
||||
}
|
||||
|
||||
man->priv = mm;
|
||||
spin_lock_init(&rman->lock);
|
||||
man->priv = rman;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct ttm_bo_global *glob = man->bdev->glob;
|
||||
struct drm_mm *mm = man->priv;
|
||||
int ret = 0;
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&rman->lock);
|
||||
if (drm_mm_clean(mm)) {
|
||||
drm_mm_takedown(mm);
|
||||
kfree(mm);
|
||||
spin_unlock(&rman->lock);
|
||||
kfree(rman);
|
||||
man->priv = NULL;
|
||||
} else
|
||||
ret = -EBUSY;
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&rman->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
struct ttm_bo_global *glob = man->bdev->glob;
|
||||
struct drm_mm *mm = man->priv;
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
drm_mm_debug_table(mm, prefix);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_lock(&rman->lock);
|
||||
drm_mm_debug_table(&rman->mm, prefix);
|
||||
spin_unlock(&rman->lock);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
|
||||
|
|
|
@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
|||
return ret;
|
||||
|
||||
ret = be->func->bind(be, bo_mem);
|
||||
if (ret) {
|
||||
printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm->state = tt_bound;
|
||||
|
||||
|
|
|
@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
|
||||
first_pfn + 1;
|
||||
|
||||
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
|
||||
vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
|
||||
if (NULL == vsg->pages)
|
||||
return -ENOMEM;
|
||||
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = get_user_pages(current, current->mm,
|
||||
(unsigned long)xfer->mem_addr,
|
||||
|
|
|
@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
fence_rep.error = ret;
|
||||
fence_rep.fence_seq = (uint64_t) sequence;
|
||||
fence_rep.pad64 = 0;
|
||||
|
||||
user_fence_rep = (struct drm_vmw_fence_rep __user *)
|
||||
(unsigned long)arg->fence_rep;
|
||||
|
|
|
@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
|
|||
&vmw_vram_ne_placement,
|
||||
false, &vmw_dmabuf_bo_free);
|
||||
vmw_overlay_resume_all(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
vfbs->buffer = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
|
|||
struct vmw_framebuffer_surface *vfbs =
|
||||
vmw_framebuffer_to_vfbs(&vfb->base);
|
||||
|
||||
if (unlikely(vfbs->buffer == NULL))
|
||||
return 0;
|
||||
|
||||
bo = &vfbs->buffer->base;
|
||||
ttm_bo_unref(&bo);
|
||||
vfbs->buffer = NULL;
|
||||
|
|
|
@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
|
||||
dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
|
||||
|
||||
if (!dev_priv->ldu_priv)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
|
||||
overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
|
||||
if (!overlay)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -3,6 +3,9 @@ config STUB_POULSBO
|
|||
depends on PCI
|
||||
# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
|
||||
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
|
||||
select VIDEO_OUTPUT_CONTROL if ACPI
|
||||
select BACKLIGHT_CLASS_DEVICE if ACPI
|
||||
select INPUT if ACPI
|
||||
select ACPI_VIDEO if ACPI
|
||||
help
|
||||
Choose this option if you have a system that has Intel GMA500
|
||||
|
|
|
@ -432,6 +432,10 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
|
|||
* together with the @destroy function,
|
||||
* enables driver-specific objects derived from a ttm_buffer_object.
|
||||
* On successful return, the object kref and list_kref are set to 1.
|
||||
* If a failure occurs, the function will call the @destroy function, or
|
||||
* kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
|
||||
* illegal and will likely cause memory corruption.
|
||||
*
|
||||
* Returns
|
||||
* -ENOMEM: Out of memory.
|
||||
* -EINVAL: Invalid placement flags.
|
||||
|
|
|
@ -206,14 +206,84 @@ struct ttm_tt {
|
|||
struct ttm_mem_type_manager;
|
||||
|
||||
struct ttm_mem_type_manager_func {
|
||||
/**
|
||||
* struct ttm_mem_type_manager member init
|
||||
*
|
||||
* @man: Pointer to a memory type manager.
|
||||
* @p_size: Implementation dependent, but typically the size of the
|
||||
* range to be managed in pages.
|
||||
*
|
||||
* Called to initialize a private range manager. The function is
|
||||
* expected to initialize the man::priv member.
|
||||
* Returns 0 on success, negative error code on failure.
|
||||
*/
|
||||
int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
|
||||
|
||||
/**
|
||||
* struct ttm_mem_type_manager member takedown
|
||||
*
|
||||
* @man: Pointer to a memory type manager.
|
||||
*
|
||||
* Called to undo the setup done in init. All allocated resources
|
||||
* should be freed.
|
||||
*/
|
||||
int (*takedown)(struct ttm_mem_type_manager *man);
|
||||
|
||||
/**
|
||||
* struct ttm_mem_type_manager member get_node
|
||||
*
|
||||
* @man: Pointer to a memory type manager.
|
||||
* @bo: Pointer to the buffer object we're allocating space for.
|
||||
* @placement: Placement details.
|
||||
* @mem: Pointer to a struct ttm_mem_reg to be filled in.
|
||||
*
|
||||
* This function should allocate space in the memory type managed
|
||||
* by @man. Placement details if
|
||||
* applicable are given by @placement. If successful,
|
||||
* @mem::mm_node should be set to a non-null value, and
|
||||
* @mem::start should be set to a value identifying the beginning
|
||||
* of the range allocated, and the function should return zero.
|
||||
* If the memory region accomodate the buffer object, @mem::mm_node
|
||||
* should be set to NULL, and the function should return 0.
|
||||
* If a system error occured, preventing the request to be fulfilled,
|
||||
* the function should return a negative error code.
|
||||
*
|
||||
* Note that @mem::mm_node will only be dereferenced by
|
||||
* struct ttm_mem_type_manager functions and optionally by the driver,
|
||||
* which has knowledge of the underlying type.
|
||||
*
|
||||
* This function may not be called from within atomic context, so
|
||||
* an implementation can and must use either a mutex or a spinlock to
|
||||
* protect any data structures managing the space.
|
||||
*/
|
||||
int (*get_node)(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem);
|
||||
|
||||
/**
|
||||
* struct ttm_mem_type_manager member put_node
|
||||
*
|
||||
* @man: Pointer to a memory type manager.
|
||||
* @mem: Pointer to a struct ttm_mem_reg to be filled in.
|
||||
*
|
||||
* This function frees memory type resources previously allocated
|
||||
* and that are identified by @mem::mm_node and @mem::start. May not
|
||||
* be called from within atomic context.
|
||||
*/
|
||||
void (*put_node)(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem);
|
||||
|
||||
/**
|
||||
* struct ttm_mem_type_manager member debug
|
||||
*
|
||||
* @man: Pointer to a memory type manager.
|
||||
* @prefix: Prefix to be used in printout to identify the caller.
|
||||
*
|
||||
* This function is called to print out the state of the memory
|
||||
* type manager to aid debugging of out-of-memory conditions.
|
||||
* It may not be called from within atomic context.
|
||||
*/
|
||||
void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
|
||||
};
|
||||
|
||||
|
@ -231,14 +301,13 @@ struct ttm_mem_type_manager {
|
|||
uint64_t size;
|
||||
uint32_t available_caching;
|
||||
uint32_t default_caching;
|
||||
|
||||
/*
|
||||
* Protected by the bdev->lru_lock.
|
||||
* TODO: Consider one lru_lock per ttm_mem_type_manager.
|
||||
* Plays ill with list removal, though.
|
||||
*/
|
||||
const struct ttm_mem_type_manager_func *func;
|
||||
void *priv;
|
||||
|
||||
/*
|
||||
* Protected by the global->lru_lock.
|
||||
*/
|
||||
|
||||
struct list_head lru;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue