drm/ttm: add bo as parameter to the ttm_tt_create callback
Instead of calculating the size in bytes just to recalculate the number of pages from it pass the BO directly to the function. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Roger He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
5d95109815
commit
dde5da2379
|
@ -982,20 +982,20 @@ static struct ttm_backend_func amdgpu_backend_func = {
|
|||
.destroy = &amdgpu_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_ttm_tt *gtt;
|
||||
|
||||
adev = amdgpu_ttm_adev(bdev);
|
||||
adev = amdgpu_ttm_adev(bo->bdev);
|
||||
|
||||
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
gtt->ttm.ttm.func = &amdgpu_backend_func;
|
||||
if (ttm_sg_tt_init(>t->ttm, bdev, size, page_flags)) {
|
||||
if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -199,8 +199,8 @@ static struct ttm_backend_func ast_tt_backend_func = {
|
|||
};
|
||||
|
||||
|
||||
static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
static struct ttm_tt *ast_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct ttm_tt *tt;
|
||||
|
||||
|
@ -208,7 +208,7 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
if (tt == NULL)
|
||||
return NULL;
|
||||
tt->func = &ast_tt_backend_func;
|
||||
if (ttm_tt_init(tt, bdev, size, page_flags)) {
|
||||
if (ttm_tt_init(tt, bo, page_flags)) {
|
||||
kfree(tt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -176,8 +176,7 @@ static struct ttm_backend_func bochs_tt_backend_func = {
|
|||
.destroy = &bochs_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
static struct ttm_tt *bochs_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct ttm_tt *tt;
|
||||
|
@ -186,7 +185,7 @@ static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
if (tt == NULL)
|
||||
return NULL;
|
||||
tt->func = &bochs_tt_backend_func;
|
||||
if (ttm_tt_init(tt, bdev, size, page_flags)) {
|
||||
if (ttm_tt_init(tt, bo, page_flags)) {
|
||||
kfree(tt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -199,8 +199,8 @@ static struct ttm_backend_func cirrus_tt_backend_func = {
|
|||
};
|
||||
|
||||
|
||||
static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct ttm_tt *tt;
|
||||
|
||||
|
@ -208,7 +208,7 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
if (tt == NULL)
|
||||
return NULL;
|
||||
tt->func = &cirrus_tt_backend_func;
|
||||
if (ttm_tt_init(tt, bdev, size, page_flags)) {
|
||||
if (ttm_tt_init(tt, bo, page_flags)) {
|
||||
kfree(tt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -200,8 +200,7 @@ static struct ttm_backend_func hibmc_tt_backend_func = {
|
|||
.destroy = &hibmc_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
u32 page_flags)
|
||||
{
|
||||
struct ttm_tt *tt;
|
||||
|
@ -213,7 +212,7 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
return NULL;
|
||||
}
|
||||
tt->func = &hibmc_tt_backend_func;
|
||||
ret = ttm_tt_init(tt, bdev, size, page_flags);
|
||||
ret = ttm_tt_init(tt, bo, page_flags);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
|
||||
kfree(tt);
|
||||
|
|
|
@ -199,8 +199,8 @@ static struct ttm_backend_func mgag200_tt_backend_func = {
|
|||
};
|
||||
|
||||
|
||||
static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct ttm_tt *tt;
|
||||
|
||||
|
@ -208,7 +208,7 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
if (tt == NULL)
|
||||
return NULL;
|
||||
tt->func = &mgag200_tt_backend_func;
|
||||
if (ttm_tt_init(tt, bdev, size, page_flags)) {
|
||||
if (ttm_tt_init(tt, bo, page_flags)) {
|
||||
kfree(tt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -604,19 +604,17 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
|
|||
}
|
||||
|
||||
static struct ttm_tt *
|
||||
nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
|
||||
uint32_t page_flags)
|
||||
nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
|
||||
if (drm->agp.bridge) {
|
||||
return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
|
||||
page_flags);
|
||||
return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
return nouveau_sgdma_create_ttm(bdev, size, page_flags);
|
||||
return nouveau_sgdma_create_ttm(bo, page_flags);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -82,10 +82,9 @@ static struct ttm_backend_func nv50_sgdma_backend = {
|
|||
};
|
||||
|
||||
struct ttm_tt *
|
||||
nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_sgdma_be *nvbe;
|
||||
|
||||
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
|
||||
|
@ -97,7 +96,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
|
|||
else
|
||||
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
|
||||
|
||||
if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags))
|
||||
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
|
||||
/*
|
||||
* A failing ttm_dma_tt_init() will call ttm_tt_destroy()
|
||||
* and thus our nouveau_sgdma_destroy() hook, so we don't need
|
||||
|
|
|
@ -12,8 +12,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
|
|||
extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
|
||||
extern const struct ttm_mem_type_manager_func nv04_gart_manager;
|
||||
|
||||
struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
|
||||
unsigned long size, u32 page_flags);
|
||||
struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo,
|
||||
u32 page_flags);
|
||||
|
||||
int nouveau_ttm_init(struct nouveau_drm *drm);
|
||||
void nouveau_ttm_fini(struct nouveau_drm *drm);
|
||||
|
|
|
@ -291,19 +291,19 @@ static struct ttm_backend_func qxl_backend_func = {
|
|||
.destroy = &qxl_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct qxl_device *qdev;
|
||||
struct qxl_ttm_tt *gtt;
|
||||
|
||||
qdev = qxl_get_qdev(bdev);
|
||||
qdev = qxl_get_qdev(bo->bdev);
|
||||
gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL)
|
||||
return NULL;
|
||||
gtt->ttm.ttm.func = &qxl_backend_func;
|
||||
gtt->qdev = qdev;
|
||||
if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags)) {
|
||||
if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -686,17 +686,17 @@ static struct ttm_backend_func radeon_backend_func = {
|
|||
.destroy = &radeon_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_ttm_tt *gtt;
|
||||
|
||||
rdev = radeon_get_rdev(bdev);
|
||||
rdev = radeon_get_rdev(bo->bdev);
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
|
||||
size, page_flags);
|
||||
return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
|
||||
page_flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -706,7 +706,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
}
|
||||
gtt->ttm.ttm.func = &radeon_backend_func;
|
||||
gtt->rdev = rdev;
|
||||
if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags)) {
|
||||
if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -110,9 +110,9 @@ static struct ttm_backend_func ttm_agp_func = {
|
|||
.destroy = ttm_agp_destroy,
|
||||
};
|
||||
|
||||
struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
|
||||
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
|
||||
struct agp_bridge_data *bridge,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be;
|
||||
|
||||
|
@ -124,7 +124,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
|
|||
agp_be->bridge = bridge;
|
||||
agp_be->ttm.func = &ttm_agp_func;
|
||||
|
||||
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags)) {
|
||||
if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
|
||||
kfree(agp_be);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -73,8 +73,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
||||
page_flags);
|
||||
bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
|
||||
if (unlikely(bo->ttm == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -237,21 +236,21 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
|
|||
ttm->func->destroy(ttm);
|
||||
}
|
||||
|
||||
void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
ttm->bdev = bdev;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->bdev = bo->bdev;
|
||||
ttm->num_pages = bo->num_pages;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->swap_storage = NULL;
|
||||
}
|
||||
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
ttm_tt_init_fields(ttm, bdev, size, page_flags);
|
||||
ttm_tt_init_fields(ttm, bo, page_flags);
|
||||
|
||||
if (ttm_tt_alloc_page_directory(ttm)) {
|
||||
ttm_tt_destroy(ttm);
|
||||
|
@ -269,12 +268,12 @@ void ttm_tt_fini(struct ttm_tt *ttm)
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_tt_fini);
|
||||
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
ttm_tt_init_fields(ttm, bdev, size, page_flags);
|
||||
ttm_tt_init_fields(ttm, bo, page_flags);
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
|
||||
|
@ -286,13 +285,13 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_init);
|
||||
|
||||
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
int ret;
|
||||
|
||||
ttm_tt_init_fields(ttm, bdev, size, page_flags);
|
||||
ttm_tt_init_fields(ttm, bo, page_flags);
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
if (page_flags & TTM_PAGE_FLAG_SG)
|
||||
|
|
|
@ -322,20 +322,19 @@ static struct ttm_backend_func virtio_gpu_backend_func = {
|
|||
.destroy = &virtio_gpu_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev;
|
||||
struct virtio_gpu_ttm_tt *gtt;
|
||||
|
||||
vgdev = virtio_gpu_get_vgdev(bdev);
|
||||
vgdev = virtio_gpu_get_vgdev(bo->bdev);
|
||||
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL)
|
||||
return NULL;
|
||||
gtt->ttm.ttm.func = &virtio_gpu_backend_func;
|
||||
gtt->vgdev = vgdev;
|
||||
if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags)) {
|
||||
if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -693,8 +693,8 @@ static struct ttm_backend_func vmw_ttm_func = {
|
|||
.destroy = vmw_ttm_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be;
|
||||
int ret;
|
||||
|
@ -704,13 +704,13 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
return NULL;
|
||||
|
||||
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
|
||||
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
|
||||
vmw_be->mob = NULL;
|
||||
|
||||
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags);
|
||||
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
|
||||
else
|
||||
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags);
|
||||
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_init;
|
||||
|
||||
|
|
|
@ -193,8 +193,7 @@ static struct ttm_backend_func vbox_tt_backend_func = {
|
|||
.destroy = &vbox_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
u32 page_flags)
|
||||
{
|
||||
struct ttm_tt *tt;
|
||||
|
@ -204,7 +203,7 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
return NULL;
|
||||
|
||||
tt->func = &vbox_tt_backend_func;
|
||||
if (ttm_tt_init(tt, bdev, size, page_flags)) {
|
||||
if (ttm_tt_init(tt, bo, page_flags)) {
|
||||
kfree(tt);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -225,8 +225,7 @@ struct ttm_bo_driver {
|
|||
/**
|
||||
* ttm_tt_create
|
||||
*
|
||||
* @bdev: pointer to a struct ttm_bo_device:
|
||||
* @size: Size of the data needed backing.
|
||||
* @bo: The buffer object to create the ttm for.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
*
|
||||
* Create a struct ttm_tt to back data with system memory pages.
|
||||
|
@ -234,8 +233,7 @@ struct ttm_bo_driver {
|
|||
* Returns:
|
||||
* NULL: Out of memory.
|
||||
*/
|
||||
struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags);
|
||||
|
||||
/**
|
||||
|
|
|
@ -150,8 +150,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
|
|||
* ttm_tt_init
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
* @bdev: pointer to a struct ttm_bo_device:
|
||||
* @size: Size of the data needed backing.
|
||||
* @bo: The buffer object we create the ttm for.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
*
|
||||
* Create a struct ttm_tt to back data with system memory pages.
|
||||
|
@ -159,12 +158,12 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
|
|||
* Returns:
|
||||
* NULL: Out of memory.
|
||||
*/
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags);
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags);
|
||||
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags);
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags);
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags);
|
||||
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags);
|
||||
|
||||
/**
|
||||
* ttm_tt_fini
|
||||
|
@ -254,9 +253,8 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm);
|
|||
/**
|
||||
* ttm_agp_tt_create
|
||||
*
|
||||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @bo: Buffer object we allocate the ttm for.
|
||||
* @bridge: The agp bridge this device is sitting on.
|
||||
* @size: Size of the data needed backing.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
*
|
||||
*
|
||||
|
@ -264,9 +262,9 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm);
|
|||
* for TT memory. This function uses the linux agpgart interface to
|
||||
* bind and unbind memory backing a ttm_tt.
|
||||
*/
|
||||
struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
|
||||
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
|
||||
struct agp_bridge_data *bridge,
|
||||
unsigned long size, uint32_t page_flags);
|
||||
uint32_t page_flags);
|
||||
int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
|
||||
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue