2012-04-24 22:47:39 +08:00
/*
* Copyright © 2008 - 2012 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eric Anholt < eric @ anholt . net >
* Chris Wilson < chris @ chris - wilson . co . uk >
*
*/
2012-10-03 01:01:07 +08:00
# include <drm/drmP.h>
# include <drm/i915_drm.h>
2012-04-24 22:47:39 +08:00
# include "i915_drv.h"
/*
* The BIOS typically reserves some of the system ' s memory for the exclusive
* use of the integrated graphics . This memory is no longer available for
* use by the OS and so the user finds that his system has less memory
* available than he put in . We refer to this memory as stolen .
*
* The BIOS will allocate its framebuffer from the stolen memory . Our
* goal is try to reuse that object for our own fbcon which must always
* be available for panics . Anything else we can reuse the stolen memory
* for is a boon .
*/
2012-11-15 19:32:18 +08:00
static unsigned long i915_stolen_to_physical ( struct drm_device * dev )
2012-04-24 22:47:39 +08:00
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-07-04 19:28:35 +08:00
struct resource * r ;
2012-04-24 22:47:39 +08:00
u32 base ;
2013-07-04 07:23:33 +08:00
/* Almost universally we can find the Graphics Base of Stolen Memory
* at offset 0x5c in the igfx configuration space . On a few ( desktop )
* machines this is also mirrored in the bridge device at different
* locations , or in the MCHBAR . On gen2 , the layout is again slightly
* different with the Graphics Segment immediately following Top of
* Memory ( or Top of Usable DRAM ) . Note it appears that TOUD is only
* reported by 865 g , so we just use the top of memory as determined
* by the e820 probe .
2012-11-15 19:32:18 +08:00
*
2013-07-04 07:23:33 +08:00
* XXX However gen2 requires an unavailable symbol .
2012-04-24 22:47:39 +08:00
*/
2012-11-15 19:32:18 +08:00
base = 0 ;
2013-07-04 07:23:33 +08:00
if ( INTEL_INFO ( dev ) - > gen > = 3 ) {
/* Read Graphics Base of Stolen Memory directly */
2013-05-09 01:45:13 +08:00
pci_read_config_dword ( dev - > pdev , 0x5c , & base ) ;
base & = ~ ( ( 1 < < 20 ) - 1 ) ;
2013-07-04 07:23:33 +08:00
} else { /* GEN2 */
2012-11-15 19:32:18 +08:00
#if 0
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped < < PAGE_SHIFT ;
2012-04-24 22:47:39 +08:00
# endif
2012-11-15 19:32:18 +08:00
}
2012-04-24 22:47:39 +08:00
2013-07-04 19:28:35 +08:00
if ( base = = 0 )
return 0 ;
2014-06-06 01:02:59 +08:00
/* make sure we don't clobber the GTT if it's within stolen memory */
if ( INTEL_INFO ( dev ) - > gen < = 4 & & ! IS_G33 ( dev ) & & ! IS_G4X ( dev ) ) {
struct {
u32 start , end ;
} stolen [ 2 ] = {
{ . start = base , . end = base + dev_priv - > gtt . stolen_size , } ,
{ . start = base , . end = base + dev_priv - > gtt . stolen_size , } ,
} ;
u64 gtt_start , gtt_end ;
gtt_start = I915_READ ( PGTBL_CTL ) ;
if ( IS_GEN4 ( dev ) )
gtt_start = ( gtt_start & PGTBL_ADDRESS_LO_MASK ) |
( gtt_start & PGTBL_ADDRESS_HI_MASK ) < < 28 ;
else
gtt_start & = PGTBL_ADDRESS_LO_MASK ;
gtt_end = gtt_start + gtt_total_entries ( dev_priv - > gtt ) * 4 ;
if ( gtt_start > = stolen [ 0 ] . start & & gtt_start < stolen [ 0 ] . end )
stolen [ 0 ] . end = gtt_start ;
if ( gtt_end > stolen [ 1 ] . start & & gtt_end < = stolen [ 1 ] . end )
stolen [ 1 ] . start = gtt_end ;
/* pick the larger of the two chunks */
if ( stolen [ 0 ] . end - stolen [ 0 ] . start >
stolen [ 1 ] . end - stolen [ 1 ] . start ) {
base = stolen [ 0 ] . start ;
dev_priv - > gtt . stolen_size = stolen [ 0 ] . end - stolen [ 0 ] . start ;
} else {
base = stolen [ 1 ] . start ;
dev_priv - > gtt . stolen_size = stolen [ 1 ] . end - stolen [ 1 ] . start ;
}
if ( stolen [ 0 ] . start ! = stolen [ 1 ] . start | |
stolen [ 0 ] . end ! = stolen [ 1 ] . end ) {
DRM_DEBUG_KMS ( " GTT within stolen memory at 0x%llx-0x%llx \n " ,
( unsigned long long ) gtt_start ,
( unsigned long long ) gtt_end - 1 ) ;
DRM_DEBUG_KMS ( " Stolen memory adjusted to 0x%x-0x%x \n " ,
base , base + ( u32 ) dev_priv - > gtt . stolen_size - 1 ) ;
}
}
2013-07-04 19:28:35 +08:00
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel . So if the region is already marked as busy , something
* is seriously wrong .
*/
r = devm_request_mem_region ( dev - > dev , base , dev_priv - > gtt . stolen_size ,
" Graphics Stolen Memory " ) ;
if ( r = = NULL ) {
2014-01-13 18:55:21 +08:00
/*
* One more attempt but this time requesting region from
* base + 1 , as we have seen that this resolves the region
* conflict with the PCI Bus .
* This is a BIOS w / a : Some BIOS wrap stolen in the root
* PCI bus , but have an off - by - one error . Hence retry the
* reservation starting from 1 instead of 0.
*/
r = devm_request_mem_region ( dev - > dev , base + 1 ,
dev_priv - > gtt . stolen_size - 1 ,
" Graphics Stolen Memory " ) ;
if ( r = = NULL ) {
DRM_ERROR ( " conflict detected with stolen region: [0x%08x - 0x%08x] \n " ,
base , base + ( uint32_t ) dev_priv - > gtt . stolen_size ) ;
base = 0 ;
}
2013-07-04 19:28:35 +08:00
}
2012-11-15 19:32:18 +08:00
return base ;
2012-04-24 22:47:39 +08:00
}
2014-06-20 03:06:11 +08:00
static int find_compression_threshold ( struct drm_device * dev ,
struct drm_mm_node * node ,
2014-07-01 01:41:24 +08:00
int size ,
int fb_cpp )
2012-04-24 22:47:39 +08:00
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2014-07-01 01:41:24 +08:00
int compression_threshold = 1 ;
2013-07-27 22:21:27 +08:00
int ret ;
2012-04-24 22:47:39 +08:00
2014-07-01 01:41:24 +08:00
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes , this code needs to change as well .
*
* The enable_fbc code will attempt to use one of our 2 compression
* thresholds , therefore , in that case , we only have 1 resort .
*/
/* Try to over-allocate to reduce reallocations and fragmentation. */
2014-06-20 03:06:11 +08:00
ret = drm_mm_insert_node ( & dev_priv - > mm . stolen , node ,
2013-07-27 22:21:27 +08:00
size < < = 1 , 4096 , DRM_MM_SEARCH_DEFAULT ) ;
2014-07-01 01:41:24 +08:00
if ( ret = = 0 )
return compression_threshold ;
again :
/* HW's ability to limit the CFB is 1:4 */
if ( compression_threshold > 4 | |
( fb_cpp = = 2 & & compression_threshold = = 2 ) )
2014-06-20 03:06:11 +08:00
return 0 ;
2014-07-01 01:41:24 +08:00
ret = drm_mm_insert_node ( & dev_priv - > mm . stolen , node ,
size > > = 1 , 4096 ,
DRM_MM_SEARCH_DEFAULT ) ;
if ( ret & & INTEL_INFO ( dev ) - > gen < = 4 ) {
return 0 ;
} else if ( ret ) {
compression_threshold < < = 1 ;
goto again ;
} else {
2014-06-20 03:06:11 +08:00
return compression_threshold ;
2014-07-01 01:41:24 +08:00
}
2014-06-20 03:06:11 +08:00
}
2014-07-01 01:41:24 +08:00
static int i915_setup_compression ( struct drm_device * dev , int size , int fb_cpp )
2014-06-20 03:06:11 +08:00
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_mm_node * uninitialized_var ( compressed_llb ) ;
int ret ;
ret = find_compression_threshold ( dev , & dev_priv - > fbc . compressed_fb ,
2014-07-01 01:41:24 +08:00
size , fb_cpp ) ;
2014-06-20 03:06:11 +08:00
if ( ! ret )
2013-07-27 22:21:27 +08:00
goto err_llb ;
2014-07-01 01:41:24 +08:00
else if ( ret > 1 ) {
DRM_INFO ( " Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS. \n " ) ;
}
dev_priv - > fbc . threshold = ret ;
2012-04-24 22:47:39 +08:00
2012-11-15 19:32:20 +08:00
if ( HAS_PCH_SPLIT ( dev ) )
2014-06-20 03:06:10 +08:00
I915_WRITE ( ILK_DPFC_CB_BASE , dev_priv - > fbc . compressed_fb . start ) ;
2012-11-15 19:32:20 +08:00
else if ( IS_GM45 ( dev ) ) {
2014-06-20 03:06:10 +08:00
I915_WRITE ( DPFC_CB_BASE , dev_priv - > fbc . compressed_fb . start ) ;
2012-11-15 19:32:20 +08:00
} else {
2013-07-27 22:21:27 +08:00
compressed_llb = kzalloc ( sizeof ( * compressed_llb ) , GFP_KERNEL ) ;
2012-04-24 22:47:39 +08:00
if ( ! compressed_llb )
goto err_fb ;
2013-07-27 22:21:27 +08:00
ret = drm_mm_insert_node ( & dev_priv - > mm . stolen , compressed_llb ,
4096 , 4096 , DRM_MM_SEARCH_DEFAULT ) ;
if ( ret )
goto err_fb ;
2013-06-28 07:30:21 +08:00
dev_priv - > fbc . compressed_llb = compressed_llb ;
2012-11-15 19:32:20 +08:00
I915_WRITE ( FBC_CFB_BASE ,
2014-06-20 03:06:10 +08:00
dev_priv - > mm . stolen_base + dev_priv - > fbc . compressed_fb . start ) ;
2012-11-15 19:32:20 +08:00
I915_WRITE ( FBC_LL_BASE ,
dev_priv - > mm . stolen_base + compressed_llb - > start ) ;
2012-04-24 22:47:39 +08:00
}
2014-07-01 01:41:24 +08:00
dev_priv - > fbc . size = size / dev_priv - > fbc . threshold ;
2012-04-24 22:47:39 +08:00
2012-11-15 19:32:20 +08:00
DRM_DEBUG_KMS ( " reserved %d bytes of contiguous stolen space for FBC \n " ,
size ) ;
2012-04-24 22:47:39 +08:00
2012-11-15 19:32:20 +08:00
return 0 ;
2012-04-24 22:47:39 +08:00
err_fb :
2013-07-27 22:21:27 +08:00
kfree ( compressed_llb ) ;
2014-06-20 03:06:10 +08:00
drm_mm_remove_node ( & dev_priv - > fbc . compressed_fb ) ;
2013-07-27 22:21:27 +08:00
err_llb :
2013-04-27 19:44:16 +08:00
pr_info_once ( " drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this. \n " , size ) ;
2012-11-15 19:32:20 +08:00
return - ENOSPC ;
}
2014-07-01 01:41:24 +08:00
int i915_gem_stolen_setup_compression ( struct drm_device * dev , int size , int fb_cpp )
2012-11-15 19:32:20 +08:00
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-07-02 16:48:31 +08:00
if ( ! drm_mm_initialized ( & dev_priv - > mm . stolen ) )
2012-11-15 19:32:20 +08:00
return - ENODEV ;
2013-06-28 07:30:21 +08:00
if ( size < dev_priv - > fbc . size )
2012-11-15 19:32:20 +08:00
return 0 ;
/* Release any current block */
i915_gem_stolen_cleanup_compression ( dev ) ;
2014-07-01 01:41:24 +08:00
return i915_setup_compression ( dev , size , fb_cpp ) ;
2012-04-24 22:47:39 +08:00
}
2012-11-15 19:32:20 +08:00
void i915_gem_stolen_cleanup_compression ( struct drm_device * dev )
2012-04-24 22:47:39 +08:00
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-06-28 07:30:21 +08:00
if ( dev_priv - > fbc . size = = 0 )
2012-11-15 19:32:20 +08:00
return ;
2014-06-20 03:06:10 +08:00
drm_mm_remove_node ( & dev_priv - > fbc . compressed_fb ) ;
2012-11-15 19:32:20 +08:00
2013-07-27 22:21:27 +08:00
if ( dev_priv - > fbc . compressed_llb ) {
drm_mm_remove_node ( dev_priv - > fbc . compressed_llb ) ;
kfree ( dev_priv - > fbc . compressed_llb ) ;
}
2012-11-15 19:32:20 +08:00
2013-06-28 07:30:21 +08:00
dev_priv - > fbc . size = 0 ;
2012-04-24 22:47:39 +08:00
}
void i915_gem_cleanup_stolen ( struct drm_device * dev )
{
2012-12-18 22:24:37 +08:00
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-07-02 16:48:31 +08:00
if ( ! drm_mm_initialized ( & dev_priv - > mm . stolen ) )
return ;
2012-11-15 19:32:20 +08:00
i915_gem_stolen_cleanup_compression ( dev ) ;
2012-12-18 22:24:37 +08:00
drm_mm_takedown ( & dev_priv - > mm . stolen ) ;
2012-04-24 22:47:39 +08:00
}
int i915_gem_init_stolen ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2014-09-11 19:28:08 +08:00
u32 tmp ;
2013-05-09 01:45:13 +08:00
int bios_reserved = 0 ;
2012-04-24 22:47:39 +08:00
2014-03-18 20:50:50 +08:00
# ifdef CONFIG_INTEL_IOMMU
2014-03-27 06:42:53 +08:00
if ( intel_iommu_gfx_mapped & & INTEL_INFO ( dev ) - > gen < 8 ) {
2014-03-18 20:50:50 +08:00
DRM_INFO ( " DMAR active, disabling use of stolen memory \n " ) ;
return 0 ;
}
# endif
2013-09-05 20:40:25 +08:00
if ( dev_priv - > gtt . stolen_size = = 0 )
return 0 ;
2012-11-15 19:32:18 +08:00
dev_priv - > mm . stolen_base = i915_stolen_to_physical ( dev ) ;
if ( dev_priv - > mm . stolen_base = = 0 )
return 0 ;
2013-01-25 06:45:00 +08:00
DRM_DEBUG_KMS ( " found %zd bytes of stolen memory at %08lx \n " ,
dev_priv - > gtt . stolen_size , dev_priv - > mm . stolen_base ) ;
2012-11-15 19:32:18 +08:00
2014-09-11 19:28:08 +08:00
if ( INTEL_INFO ( dev ) - > gen > = 8 ) {
tmp = I915_READ ( GEN7_BIOS_RESERVED ) ;
tmp > > = GEN8_BIOS_RESERVED_SHIFT ;
tmp & = GEN8_BIOS_RESERVED_MASK ;
bios_reserved = ( 1024 * 1024 ) < < tmp ;
} else if ( IS_GEN7 ( dev ) ) {
tmp = I915_READ ( GEN7_BIOS_RESERVED ) ;
bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
256 * 1024 : 1024 * 1024 ;
}
2013-05-09 01:45:13 +08:00
2013-07-09 20:44:27 +08:00
if ( WARN_ON ( bios_reserved > dev_priv - > gtt . stolen_size ) )
return 0 ;
2012-04-24 22:47:39 +08:00
/* Basic memrange allocator for stolen space */
2013-05-09 01:45:13 +08:00
drm_mm_init ( & dev_priv - > mm . stolen , 0 , dev_priv - > gtt . stolen_size -
bios_reserved ) ;
2012-04-24 22:47:39 +08:00
return 0 ;
}
2012-11-15 19:32:26 +08:00
static struct sg_table *
i915_pages_create_for_stolen ( struct drm_device * dev ,
u32 offset , u32 size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct sg_table * st ;
struct scatterlist * sg ;
DRM_DEBUG_DRIVER ( " offset=0x%x, size=%d \n " , offset , size ) ;
2013-01-25 06:45:00 +08:00
BUG_ON ( offset > dev_priv - > gtt . stolen_size - size ) ;
2012-11-15 19:32:26 +08:00
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
* dma mapping in a single scatterlist .
*/
st = kmalloc ( sizeof ( * st ) , GFP_KERNEL ) ;
if ( st = = NULL )
return NULL ;
if ( sg_alloc_table ( st , 1 , GFP_KERNEL ) ) {
kfree ( st ) ;
return NULL ;
}
sg = st - > sgl ;
2014-01-13 18:54:45 +08:00
sg - > offset = 0 ;
2013-03-26 21:14:19 +08:00
sg - > length = size ;
2012-11-15 19:32:26 +08:00
sg_dma_address ( sg ) = ( dma_addr_t ) dev_priv - > mm . stolen_base + offset ;
sg_dma_len ( sg ) = size ;
return st ;
}
static int i915_gem_object_get_pages_stolen ( struct drm_i915_gem_object * obj )
{
BUG ( ) ;
return - EINVAL ;
}
static void i915_gem_object_put_pages_stolen ( struct drm_i915_gem_object * obj )
{
/* Should only be called during free */
sg_free_table ( obj - > pages ) ;
kfree ( obj - > pages ) ;
}
2014-06-06 17:22:54 +08:00
static void
i915_gem_object_release_stolen ( struct drm_i915_gem_object * obj )
{
if ( obj - > stolen ) {
drm_mm_remove_node ( obj - > stolen ) ;
kfree ( obj - > stolen ) ;
obj - > stolen = NULL ;
}
}
2012-11-15 19:32:26 +08:00
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
. get_pages = i915_gem_object_get_pages_stolen ,
. put_pages = i915_gem_object_put_pages_stolen ,
2014-06-06 17:22:54 +08:00
. release = i915_gem_object_release_stolen ,
2012-11-15 19:32:26 +08:00
} ;
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen ( struct drm_device * dev ,
struct drm_mm_node * stolen )
{
struct drm_i915_gem_object * obj ;
2012-11-15 19:32:30 +08:00
obj = i915_gem_object_alloc ( dev ) ;
2012-11-15 19:32:26 +08:00
if ( obj = = NULL )
return NULL ;
2013-07-11 17:56:32 +08:00
drm_gem_private_object_init ( dev , & obj - > base , stolen - > size ) ;
2012-11-15 19:32:26 +08:00
i915_gem_object_init ( obj , & i915_gem_object_stolen_ops ) ;
obj - > pages = i915_pages_create_for_stolen ( dev ,
stolen - > start , stolen - > size ) ;
if ( obj - > pages = = NULL )
goto cleanup ;
obj - > has_dma_mapping = true ;
2013-06-01 05:46:19 +08:00
i915_gem_object_pin_pages ( obj ) ;
2012-11-15 19:32:26 +08:00
obj - > stolen = stolen ;
2013-08-08 21:41:06 +08:00
obj - > base . read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT ;
obj - > cache_level = HAS_LLC ( dev ) ? I915_CACHE_LLC : I915_CACHE_NONE ;
2012-11-15 19:32:26 +08:00
return obj ;
cleanup :
2012-11-15 19:32:30 +08:00
i915_gem_object_free ( obj ) ;
2012-11-15 19:32:26 +08:00
return NULL ;
}
struct drm_i915_gem_object *
i915_gem_object_create_stolen ( struct drm_device * dev , u32 size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_i915_gem_object * obj ;
struct drm_mm_node * stolen ;
2013-07-27 22:21:27 +08:00
int ret ;
2012-11-15 19:32:26 +08:00
2013-07-02 16:48:31 +08:00
if ( ! drm_mm_initialized ( & dev_priv - > mm . stolen ) )
2012-11-15 19:32:26 +08:00
return NULL ;
DRM_DEBUG_KMS ( " creating stolen object: size=%x \n " , size ) ;
if ( size = = 0 )
return NULL ;
2013-07-27 22:21:27 +08:00
stolen = kzalloc ( sizeof ( * stolen ) , GFP_KERNEL ) ;
if ( ! stolen )
2012-11-15 19:32:26 +08:00
return NULL ;
2013-07-27 22:21:27 +08:00
ret = drm_mm_insert_node ( & dev_priv - > mm . stolen , stolen , size ,
4096 , DRM_MM_SEARCH_DEFAULT ) ;
if ( ret ) {
kfree ( stolen ) ;
return NULL ;
}
2012-11-15 19:32:26 +08:00
obj = _i915_gem_object_create_stolen ( dev , stolen ) ;
if ( obj )
return obj ;
2013-07-27 22:21:27 +08:00
drm_mm_remove_node ( stolen ) ;
kfree ( stolen ) ;
2012-11-15 19:32:26 +08:00
return NULL ;
}
2013-02-20 05:31:37 +08:00
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated ( struct drm_device * dev ,
u32 stolen_offset ,
u32 gtt_offset ,
u32 size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-08-01 07:59:59 +08:00
struct i915_address_space * ggtt = & dev_priv - > gtt . base ;
2013-02-20 05:31:37 +08:00
struct drm_i915_gem_object * obj ;
struct drm_mm_node * stolen ;
2013-07-18 03:19:03 +08:00
struct i915_vma * vma ;
2013-07-06 05:41:02 +08:00
int ret ;
2013-02-20 05:31:37 +08:00
2013-07-02 16:48:31 +08:00
if ( ! drm_mm_initialized ( & dev_priv - > mm . stolen ) )
2013-02-20 05:31:37 +08:00
return NULL ;
DRM_DEBUG_KMS ( " creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x \n " ,
stolen_offset , gtt_offset , size ) ;
/* KISS and expect everything to be page-aligned */
BUG_ON ( stolen_offset & 4095 ) ;
BUG_ON ( size & 4095 ) ;
if ( WARN_ON ( size = = 0 ) )
return NULL ;
2013-07-06 05:41:02 +08:00
stolen = kzalloc ( sizeof ( * stolen ) , GFP_KERNEL ) ;
if ( ! stolen )
return NULL ;
2013-07-06 05:41:03 +08:00
stolen - > start = stolen_offset ;
stolen - > size = size ;
ret = drm_mm_reserve_node ( & dev_priv - > mm . stolen , stolen ) ;
2013-07-06 05:41:02 +08:00
if ( ret ) {
2013-02-20 05:31:37 +08:00
DRM_DEBUG_KMS ( " failed to allocate stolen space \n " ) ;
2013-07-06 05:41:02 +08:00
kfree ( stolen ) ;
2013-02-20 05:31:37 +08:00
return NULL ;
}
obj = _i915_gem_object_create_stolen ( dev , stolen ) ;
if ( obj = = NULL ) {
DRM_DEBUG_KMS ( " failed to allocate stolen object \n " ) ;
2013-07-27 22:21:27 +08:00
drm_mm_remove_node ( stolen ) ;
kfree ( stolen ) ;
2013-02-20 05:31:37 +08:00
return NULL ;
}
2013-05-09 01:45:14 +08:00
/* Some objects just need physical mem from stolen space */
2013-07-04 19:06:28 +08:00
if ( gtt_offset = = I915_GTT_OFFSET_NONE )
2013-05-09 01:45:14 +08:00
return obj ;
2013-08-14 20:14:04 +08:00
vma = i915_gem_obj_lookup_or_create_vma ( obj , ggtt ) ;
2013-07-19 13:45:46 +08:00
if ( IS_ERR ( vma ) ) {
ret = PTR_ERR ( vma ) ;
2013-07-18 03:19:03 +08:00
goto err_out ;
}
2013-02-20 05:31:37 +08:00
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space . The actual reservation will occur
* later .
*/
2013-07-18 03:19:03 +08:00
vma - > node . start = gtt_offset ;
vma - > node . size = size ;
2013-08-01 07:59:59 +08:00
if ( drm_mm_initialized ( & ggtt - > mm ) ) {
ret = drm_mm_reserve_node ( & ggtt - > mm , & vma - > node ) ;
2013-07-06 05:41:02 +08:00
if ( ret ) {
2013-02-20 05:31:37 +08:00
DRM_DEBUG_KMS ( " failed to allocate stolen GTT space \n " ) ;
2013-08-14 16:01:32 +08:00
goto err_vma ;
2013-02-20 05:31:37 +08:00
}
2013-07-06 05:41:05 +08:00
}
2013-02-20 05:31:37 +08:00
2014-10-24 19:42:33 +08:00
vma - > bound | = GLOBAL_BIND ;
2013-02-20 05:31:37 +08:00
2013-06-01 02:28:48 +08:00
list_add_tail ( & obj - > global_list , & dev_priv - > mm . bound_list ) ;
2013-08-01 08:00:14 +08:00
list_add_tail ( & vma - > mm_list , & ggtt - > inactive_list ) ;
2013-12-18 06:42:11 +08:00
i915_gem_object_pin_pages ( obj ) ;
2013-02-20 05:31:37 +08:00
return obj ;
2013-07-06 05:41:02 +08:00
2013-08-14 16:01:32 +08:00
err_vma :
i915_gem_vma_destroy ( vma ) ;
2013-07-18 03:19:02 +08:00
err_out :
2013-08-07 16:09:03 +08:00
drm_mm_remove_node ( stolen ) ;
kfree ( stolen ) ;
2013-07-06 05:41:02 +08:00
drm_gem_object_unreference ( & obj - > base ) ;
return NULL ;
2013-02-20 05:31:37 +08:00
}