mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm: Fix further issues in drivers/char/drm/via_irq.c drivers/char/drm/drm_memory.c: possible cleanups drm: deline a few large inlines in DRM code drm: remove master setting from add/remove context drm: drm_pci needs dma-mapping.h [PATCH] drm: Fix issue reported by Coverity in drivers/char/drm/via_irq.c
This commit is contained in:
commit
ac69e973ff
|
@ -815,8 +815,6 @@ extern int drm_mem_info(char *buf, char **start, off_t offset,
|
|||
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
|
||||
extern void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev);
|
||||
extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev);
|
||||
extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev);
|
||||
|
||||
extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type);
|
||||
|
@ -1022,11 +1020,13 @@ static __inline__ void drm_core_ioremap(struct drm_map *map,
|
|||
map->handle = drm_ioremap(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
|
|
|
@ -75,8 +75,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
|
|||
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
|
|
|
@ -80,6 +80,71 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
|
|||
}
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
/*
|
||||
* Find the drm_map that covers the range [offset, offset+size).
|
||||
*/
|
||||
static drm_map_t *drm_lookup_map(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list;
|
||||
drm_map_t *map;
|
||||
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *) list;
|
||||
map = r_list->map;
|
||||
if (!map)
|
||||
continue;
|
||||
if (map->offset <= offset
|
||||
&& (offset + size) <= (map->offset + map->size))
|
||||
return map;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *agp_remap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
unsigned long *phys_addr_map, i, num_pages =
|
||||
PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
struct drm_agp_mem *agpmem;
|
||||
struct page **page_map;
|
||||
void *addr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
#ifdef __alpha__
|
||||
offset -= dev->hose->mem_space->start;
|
||||
#endif
|
||||
|
||||
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
|
||||
if (agpmem->bound <= offset
|
||||
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
|
||||
(offset + size))
|
||||
break;
|
||||
if (!agpmem)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
|
||||
* the CPU do not get remapped by the GART. We fix this by using the kernel's
|
||||
* page-table instead (that's probably faster anyhow...).
|
||||
*/
|
||||
/* note: use vmalloc() because num_pages could be large... */
|
||||
page_map = vmalloc(num_pages * sizeof(struct page *));
|
||||
if (!page_map)
|
||||
return NULL;
|
||||
|
||||
phys_addr_map =
|
||||
agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
|
||||
for (i = 0; i < num_pages; ++i)
|
||||
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
|
||||
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
||||
vfree(page_map);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/** Wrapper around agp_allocate_memory() */
|
||||
DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
|
||||
{
|
||||
|
@ -103,5 +168,74 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
|
|||
{
|
||||
return drm_agp_unbind_memory(handle);
|
||||
}
|
||||
|
||||
#else /* __OS_HAS_AGP */
|
||||
|
||||
static inline drm_map_t *drm_lookup_map(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *agp_remap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* agp */
|
||||
|
||||
void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
return ioremap(offset, size);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioremap);
|
||||
|
||||
#if 0
|
||||
void *drm_ioremap_nocache(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
||||
void drm_ioremapfree(void *pt, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
/*
|
||||
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
|
||||
* routines for handling mappings in the AGP space. Hopefully this can be done in
|
||||
* a future revision of the interface...
|
||||
*/
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
|
||||
&& ((unsigned long)pt >= VMALLOC_START
|
||||
&& (unsigned long)pt < VMALLOC_END)) {
|
||||
unsigned long offset;
|
||||
drm_map_t *map;
|
||||
|
||||
offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
|
||||
map = drm_lookup_map(offset, size, dev);
|
||||
if (map && map->type == _DRM_AGP) {
|
||||
vunmap(pt);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
iounmap(pt);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioremapfree);
|
||||
|
||||
#endif /* debug_memory */
|
||||
|
|
|
@ -57,71 +57,6 @@
|
|||
# endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Find the drm_map that covers the range [offset, offset+size).
|
||||
*/
|
||||
static inline drm_map_t *drm_lookup_map(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list;
|
||||
drm_map_t *map;
|
||||
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *) list;
|
||||
map = r_list->map;
|
||||
if (!map)
|
||||
continue;
|
||||
if (map->offset <= offset
|
||||
&& (offset + size) <= (map->offset + map->size))
|
||||
return map;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *agp_remap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
unsigned long *phys_addr_map, i, num_pages =
|
||||
PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
struct drm_agp_mem *agpmem;
|
||||
struct page **page_map;
|
||||
void *addr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
#ifdef __alpha__
|
||||
offset -= dev->hose->mem_space->start;
|
||||
#endif
|
||||
|
||||
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
|
||||
if (agpmem->bound <= offset
|
||||
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
|
||||
(offset + size))
|
||||
break;
|
||||
if (!agpmem)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
|
||||
* the CPU do not get remapped by the GART. We fix this by using the kernel's
|
||||
* page-table instead (that's probably faster anyhow...).
|
||||
*/
|
||||
/* note: use vmalloc() because num_pages could be large... */
|
||||
page_map = vmalloc(num_pages * sizeof(struct page *));
|
||||
if (!page_map)
|
||||
return NULL;
|
||||
|
||||
phys_addr_map =
|
||||
agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
|
||||
for (i = 0; i < num_pages; ++i)
|
||||
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
|
||||
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
||||
vfree(page_map);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline unsigned long drm_follow_page(void *vaddr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k((unsigned long)vaddr);
|
||||
|
@ -133,18 +68,6 @@ static inline unsigned long drm_follow_page(void *vaddr)
|
|||
|
||||
#else /* __OS_HAS_AGP */
|
||||
|
||||
static inline drm_map_t *drm_lookup_map(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *agp_remap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline unsigned long drm_follow_page(void *vaddr)
|
||||
{
|
||||
return 0;
|
||||
|
@ -152,51 +75,8 @@ static inline unsigned long drm_follow_page(void *vaddr)
|
|||
|
||||
#endif
|
||||
|
||||
static inline void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
return ioremap(offset, size);
|
||||
}
|
||||
|
||||
static inline void *drm_ioremap_nocache(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
static inline void drm_ioremapfree(void *pt, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
/*
|
||||
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
|
||||
* routines for handling mappings in the AGP space. Hopefully this can be done in
|
||||
* a future revision of the interface...
|
||||
*/
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
|
||||
&& ((unsigned long)pt >= VMALLOC_START
|
||||
&& (unsigned long)pt < VMALLOC_END)) {
|
||||
unsigned long offset;
|
||||
drm_map_t *map;
|
||||
|
||||
offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
|
||||
map = drm_lookup_map(offset, size, dev);
|
||||
if (map && map->type == _DRM_AGP) {
|
||||
vunmap(pt);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
iounmap(pt);
|
||||
}
|
||||
void drm_ioremapfree(void *pt, unsigned long size,
|
||||
drm_device_t * dev);
|
||||
|
|
|
@ -229,6 +229,7 @@ void *drm_ioremap (unsigned long offset, unsigned long size,
|
|||
return pt;
|
||||
}
|
||||
|
||||
#if 0
|
||||
void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
@ -251,6 +252,7 @@ void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
|
|||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
||||
void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
|
||||
int alloc_count;
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include "drmP.h"
|
||||
|
||||
/**********************************************************************/
|
||||
|
|
|
@ -196,9 +196,9 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
|
|||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
unsigned int cur_irq_sequence;
|
||||
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
|
||||
drm_via_irq_t *cur_irq;
|
||||
int ret = 0;
|
||||
maskarray_t *masks = dev_priv->irq_masks;
|
||||
maskarray_t *masks;
|
||||
int real_irq;
|
||||
|
||||
DRM_DEBUG("%s\n", __FUNCTION__);
|
||||
|
@ -221,8 +221,9 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
|
|||
__FUNCTION__, irq);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
cur_irq += real_irq;
|
||||
|
||||
masks = dev_priv->irq_masks;
|
||||
cur_irq = dev_priv->via_irqs + real_irq;
|
||||
|
||||
if (masks[real_irq][2] && !force_sequence) {
|
||||
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
|
||||
|
@ -247,11 +248,12 @@ void via_driver_irq_preinstall(drm_device_t * dev)
|
|||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
u32 status;
|
||||
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
|
||||
drm_via_irq_t *cur_irq;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
|
||||
if (dev_priv) {
|
||||
cur_irq = dev_priv->via_irqs;
|
||||
|
||||
dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
|
||||
dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
|
||||
|
|
Loading…
Reference in New Issue