mirror of https://gitee.com/openkylin/linux.git
drm/nouveau: Fix fallout from reservation object rework
Commit019cbd4a4f
("drm/nouveau: Initialize GEM object before TTM object") introduced a subtle change in how the buffer allocation size is handled. Prior to that change, the size would get aligned to at least a page, whereas after that change a non-page-aligned size would get passed through unmodified. This ultimately causes a BUG_ON() to trigger in drm_gem_private_object_init() and crashes the system. Fix this by restoring the code that align the allocation size. Fixes:019cbd4a4f
("drm/nouveau: Initialize GEM object before TTM object") Reported-by: Ilia Mirkin <imirkin@alum.mit.edu> Signed-off-by: Thierry Reding <treding@nvidia.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
698c1aa9f8
commit
9ca7f7968b
|
@ -186,8 +186,8 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
|
|||
}
|
||||
|
||||
struct nouveau_bo *
|
||||
nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
|
||||
u32 tile_flags)
|
||||
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
|
||||
u32 tile_mode, u32 tile_flags)
|
||||
{
|
||||
struct nouveau_drm *drm = cli->drm;
|
||||
struct nouveau_bo *nvbo;
|
||||
|
@ -195,8 +195,8 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
|
|||
struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
|
||||
int i, pi = -1;
|
||||
|
||||
if (!size) {
|
||||
NV_WARN(drm, "skipped size %016llx\n", size);
|
||||
if (!*size) {
|
||||
NV_WARN(drm, "skipped size %016llx\n", *size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
|
|||
pi = i;
|
||||
|
||||
/* Stop once the buffer is larger than the current page size. */
|
||||
if (size >= 1ULL << vmm->page[i].shift)
|
||||
if (*size >= 1ULL << vmm->page[i].shift)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -281,6 +281,8 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
|
|||
}
|
||||
nvbo->page = vmm->page[pi].shift;
|
||||
|
||||
nouveau_bo_fixup_align(nvbo, flags, align, size);
|
||||
|
||||
return nvbo;
|
||||
}
|
||||
|
||||
|
@ -294,7 +296,6 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
|
|||
|
||||
acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
|
||||
|
||||
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
|
||||
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
|
||||
nouveau_bo_placement_set(nvbo, flags, 0);
|
||||
|
||||
|
@ -318,7 +319,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
|
|||
struct nouveau_bo *nvbo;
|
||||
int ret;
|
||||
|
||||
nvbo = nouveau_bo_alloc(cli, size, flags, tile_mode, tile_flags);
|
||||
nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
|
||||
tile_flags);
|
||||
if (IS_ERR(nvbo))
|
||||
return PTR_ERR(nvbo);
|
||||
|
||||
|
|
|
@ -71,8 +71,8 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
|
|||
extern struct ttm_bo_driver nouveau_bo_driver;
|
||||
|
||||
void nouveau_bo_move_init(struct nouveau_drm *);
|
||||
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 size, u32 flags,
|
||||
u32 tile_mode, u32 tile_flags);
|
||||
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
|
||||
u32 flags, u32 tile_mode, u32 tile_flags);
|
||||
int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags,
|
||||
struct sg_table *sg, struct dma_resv *robj);
|
||||
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
|
||||
|
|
|
@ -188,7 +188,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
|
|||
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
|
||||
flags |= TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
nvbo = nouveau_bo_alloc(cli, size, flags, tile_mode, tile_flags);
|
||||
nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
|
||||
tile_flags);
|
||||
if (IS_ERR(nvbo))
|
||||
return PTR_ERR(nvbo);
|
||||
|
||||
|
|
|
@ -62,14 +62,15 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_bo *nvbo;
|
||||
struct dma_resv *robj = attach->dmabuf->resv;
|
||||
size_t size = attach->dmabuf->size;
|
||||
u64 size = attach->dmabuf->size;
|
||||
u32 flags = 0;
|
||||
int align = 0;
|
||||
int ret;
|
||||
|
||||
flags = TTM_PL_FLAG_TT;
|
||||
|
||||
dma_resv_lock(robj, NULL);
|
||||
nvbo = nouveau_bo_alloc(&drm->client, size, flags, 0, 0);
|
||||
nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0);
|
||||
dma_resv_unlock(robj);
|
||||
if (IS_ERR(nvbo))
|
||||
return ERR_CAST(nvbo);
|
||||
|
@ -84,7 +85,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = nouveau_bo_init(nvbo, size, 0, flags, sg, robj);
|
||||
ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
|
||||
if (ret) {
|
||||
nouveau_bo_ref(NULL, &nvbo);
|
||||
return ERR_PTR(ret);
|
||||
|
|
Loading…
Reference in New Issue