drm: core: fix common struct sg_table related issues

The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function
returns the number of the created entries in the DMA address space.
However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and
dma_unmap_sg must be called with the original number of the entries
passed to the dma_map_sg().

struct sg_table is a common structure used for describing a non-contiguous
memory buffer, used commonly in the DRM and graphics subsystems. It
consists of a scatterlist with memory pages and DMA addresses (sgl entry),
as well as the number of scatterlist entries: CPU pages (orig_nents entry)
and DMA mapped pages (nents entry).

It turned out that it was a common mistake to misuse nents and orig_nents
entries, calling DMA-mapping functions with a wrong number of entries or
ignoring the number of mapped entries returned by the dma_map_sg()
function.

To avoid such issues, lets use a common dma-mapping wrappers operating
directly on the struct sg_table objects and use scatterlist page
iterators where possible. This, almost always, hides references to the
nents and orig_nents entries, making the code robust, easier to follow
and copy/paste safe.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
This commit is contained in:
Marek Szyprowski 2020-05-11 12:27:54 +02:00
parent 0552daac2d
commit 6c6fa39ca9
3 changed files with 16 additions and 11 deletions

View File

@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st)
struct sg_page_iter sg_iter;
mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
for_each_sgtable_page(st, &sg_iter, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb(); /*Make sure that all cache line entry is flushed*/

View File

@ -126,8 +126,8 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
if (shmem->sgt) {
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
shmem->sgt->nents, DMA_BIDIRECTIONAL);
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
@ -424,8 +424,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
shmem->sgt->nents, DMA_BIDIRECTIONAL);
dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
shmem->sgt = NULL;
@ -697,12 +696,17 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
goto err_put_pages;
}
/* Map the pages for use by the h/w. */
dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
goto err_free_sgt;
shmem->sgt = sgt;
return sgt;
err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
drm_gem_shmem_put_pages(shmem);
return ERR_PTR(ret);

View File

@ -617,6 +617,7 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct sg_table *sgt;
int ret;
if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
@ -626,11 +627,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
else
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC)) {
ret = dma_map_sgtable(attach->dev, sgt, dir,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(sgt);
kfree(sgt);
sgt = ERR_PTR(-ENOMEM);
sgt = ERR_PTR(ret);
}
return sgt;
@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
if (!sgt)
return;
dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC);
dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(sgt);
}