drm: omapdrm: Rename occurrences of paddr to dma_addr

The fields, variables and functions deal with DMA addresses, name them
accordingly. The omap_gem_get_paddr() and omap_gem_put_paddr() will be
addressed differently separately.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
This commit is contained in:
Laurent Pinchart 2017-04-21 00:33:51 +03:00 committed by Tomi Valkeinen
parent aa0408bcb1
commit 16869083b9
5 changed files with 57 additions and 56 deletions

View File

@ -188,14 +188,14 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync(struct drm_gem_object *obj,
enum dma_data_direction dir);
int omap_gem_get_paddr(struct drm_gem_object *obj, dma_addr_t *paddr);
int omap_gem_get_paddr(struct drm_gem_object *obj, dma_addr_t *dma_addr);
void omap_gem_put_paddr(struct drm_gem_object *obj);
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
uint32_t omap_gem_flags(struct drm_gem_object *obj);
int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
int x, int y, dma_addr_t *paddr);
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
int x, int y, dma_addr_t *dma_addr);
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);

View File

@ -74,7 +74,7 @@ struct plane {
struct drm_gem_object *bo;
uint32_t pitch;
uint32_t offset;
dma_addr_t paddr;
dma_addr_t dma_addr;
};
#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
@ -85,7 +85,7 @@ struct omap_framebuffer {
const struct drm_format_info *format;
enum omap_color_mode dss_format;
struct plane planes[2];
/* lock for pinning (pin_count and planes.paddr) */
/* lock for pinning (pin_count and planes.dma_addr) */
struct mutex lock;
};
@ -130,7 +130,7 @@ static uint32_t get_linear_addr(struct plane *plane,
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
+ (y * plane->pitch / (n == 0 ? 1 : format->vsub));
return plane->paddr + offset;
return plane->dma_addr + offset;
}
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
@ -201,7 +201,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
if (orient & MASK_X_INVERT)
x += w - 1;
omap_gem_rotated_paddr(plane->bo, orient, x, y, &info->paddr);
omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
&info->paddr);
info->rotation_type = OMAP_DSS_ROT_TILER;
info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
} else {
@ -232,8 +233,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
omap_gem_rotated_paddr(plane->bo, orient,
x/2, y/2, &info->p_uv_addr);
omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
&info->p_uv_addr);
} else {
info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
}
@ -258,7 +259,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_get_paddr(plane->bo, &plane->paddr);
ret = omap_gem_get_paddr(plane->bo, &plane->dma_addr);
if (ret)
goto fail;
omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
@ -274,7 +275,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
for (i--; i >= 0; i--) {
struct plane *plane = &omap_fb->planes[i];
omap_gem_put_paddr(plane->bo);
plane->paddr = 0;
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
@ -300,7 +301,7 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
omap_gem_put_paddr(plane->bo);
plane->paddr = 0;
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
@ -458,7 +459,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->bo = bos[i];
plane->offset = mode_cmd->offsets[i];
plane->pitch = pitch;
plane->paddr = 0;
plane->dma_addr = 0;
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);

View File

@ -106,7 +106,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
union omap_gem_size gsize;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
dma_addr_t paddr;
dma_addr_t dma_addr;
int ret;
sizes->surface_bpp = 32;
@ -162,7 +162,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
* to it). Then we just need to be sure that we are able to re-
* pin it in case of an opps.
*/
ret = omap_gem_get_paddr(fbdev->bo, &paddr);
ret = omap_gem_get_paddr(fbdev->bo, &dma_addr);
if (ret) {
dev_err(dev->dev,
"could not map (paddr)! Skipping framebuffer alloc\n");
@ -193,11 +193,11 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
dev->mode_config.fb_base = paddr;
dev->mode_config.fb_base = dma_addr;
fbi->screen_base = omap_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = fbdev->bo->size;
/* if we have DMM, then we can use it for scrolling by just

View File

@ -50,7 +50,7 @@ struct omap_gem_object {
uint32_t roll;
/**
* paddr contains the buffer DMA address. It is valid for
* dma_addr contains the buffer DMA address. It is valid for
*
* - buffers allocated through the DMA mapping API (with the
* OMAP_BO_MEM_DMA_API flag set)
@ -58,24 +58,24 @@ struct omap_gem_object {
* - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
* if they are physically contiguous (when sgt->orig_nents == 1)
*
* - buffers mapped through the TILER when paddr_cnt is not zero, in
* - buffers mapped through the TILER when dma_addr_cnt is not zero, in
* which case the DMA address points to the TILER aperture
*
* Physically contiguous buffers have their DMA address equal to the
* physical address as we don't remap those buffers through the TILER.
*
* Buffers mapped to the TILER have their DMA address pointing to the
* TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
* the DMA address must be accessed through omap_get_get_paddr() to
* ensure that the mapping won't disappear unexpectedly. References must
* be released with omap_gem_put_paddr().
* TILER aperture. As TILER mappings are refcounted (through
* dma_addr_cnt) the DMA address must be accessed through
* omap_get_get_paddr() to ensure that the mapping won't disappear
* unexpectedly. References must be released with omap_gem_put_paddr().
*/
dma_addr_t paddr;
dma_addr_t dma_addr;
/**
* # of users of paddr
* # of users of dma_addr
*/
uint32_t paddr_cnt;
uint32_t dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@ -119,7 +119,7 @@ struct omap_gem_object {
#define NUM_USERGART_ENTRIES 2
struct omap_drm_usergart_entry {
struct tiler_block *block; /* the reserved tiler block */
dma_addr_t paddr;
dma_addr_t dma_addr;
struct drm_gem_object *obj; /* the current pinned obj */
pgoff_t obj_pgoff; /* page offset of obj currently
mapped in */
@ -392,7 +392,7 @@ static int fault_1d(struct drm_gem_object *obj,
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
BUG_ON(!is_contiguous(omap_obj));
pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
}
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
@ -485,7 +485,7 @@ static int fault_2d(struct drm_gem_object *obj,
return ret;
}
pfn = entry->paddr >> PAGE_SHIFT;
pfn = entry->dma_addr >> PAGE_SHIFT;
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
@ -787,7 +787,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
/* Get physical address for DMA.. if the buffer is not already contiguous, remap
* it to pin in physically contiguous memory.. (ie. map in TILER)
*/
int omap_gem_get_paddr(struct drm_gem_object *obj, dma_addr_t *paddr)
int omap_gem_get_paddr(struct drm_gem_object *obj, dma_addr_t *dma_addr)
{
struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@ -796,7 +796,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj, dma_addr_t *paddr)
mutex_lock(&obj->dev->struct_mutex);
if (!is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->paddr_cnt == 0) {
if (omap_obj->dma_addr_cnt == 0) {
struct page **pages;
uint32_t npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
@ -833,17 +833,17 @@ int omap_gem_get_paddr(struct drm_gem_object *obj, dma_addr_t *paddr)
goto fail;
}
omap_obj->paddr = tiler_ssptr(block);
omap_obj->dma_addr = tiler_ssptr(block);
omap_obj->block = block;
DBG("got paddr: %pad", &omap_obj->paddr);
DBG("got dma address: %pad", &omap_obj->dma_addr);
}
omap_obj->paddr_cnt++;
omap_obj->dma_addr_cnt++;
*paddr = omap_obj->paddr;
*dma_addr = omap_obj->dma_addr;
} else if (is_contiguous(omap_obj)) {
*paddr = omap_obj->paddr;
*dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
goto fail;
@ -864,9 +864,9 @@ void omap_gem_put_paddr(struct drm_gem_object *obj)
int ret;
mutex_lock(&obj->dev->struct_mutex);
if (omap_obj->paddr_cnt > 0) {
omap_obj->paddr_cnt--;
if (omap_obj->paddr_cnt == 0) {
if (omap_obj->dma_addr_cnt > 0) {
omap_obj->dma_addr_cnt--;
if (omap_obj->dma_addr_cnt == 0) {
ret = tiler_unpin(omap_obj->block);
if (ret) {
dev_err(obj->dev->dev,
@ -877,7 +877,7 @@ void omap_gem_put_paddr(struct drm_gem_object *obj)
dev_err(obj->dev->dev,
"could not release unmap: %d\n", ret);
}
omap_obj->paddr = 0;
omap_obj->dma_addr = 0;
omap_obj->block = NULL;
}
}
@ -889,16 +889,16 @@ void omap_gem_put_paddr(struct drm_gem_object *obj)
* specified orientation and x,y offset from top-left corner of buffer
* (only valid for tiled 2d buffers)
*/
int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
int x, int y, dma_addr_t *paddr)
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
int x, int y, dma_addr_t *dma_addr)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
mutex_lock(&obj->dev->struct_mutex);
if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
(omap_obj->flags & OMAP_BO_TILED)) {
*paddr = tiler_tsptr(omap_obj->block, orient, x, y);
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
mutex_unlock(&obj->dev->struct_mutex);
@ -1019,7 +1019,7 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
off, &omap_obj->paddr, omap_obj->paddr_cnt,
off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED) {
@ -1076,7 +1076,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
/* this means the object is still pinned.. which really should
* not happen. I think..
*/
WARN_ON(omap_obj->paddr_cnt > 0);
WARN_ON(omap_obj->dma_addr_cnt > 0);
if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
@ -1087,7 +1087,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
omap_obj->paddr);
omap_obj->dma_addr);
} else if (omap_obj->vaddr) {
vunmap(omap_obj->vaddr);
} else if (obj->import_attach) {
@ -1184,7 +1184,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
/* Allocate memory if needed. */
if (flags & OMAP_BO_MEM_DMA_API) {
omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
&omap_obj->paddr,
&omap_obj->dma_addr,
GFP_KERNEL);
if (!omap_obj->vaddr)
goto err_release;
@ -1228,7 +1228,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
omap_obj->sgt = sgt;
if (sgt->orig_nents == 1) {
omap_obj->paddr = sg_dma_address(sgt->sgl);
omap_obj->dma_addr = sg_dma_address(sgt->sgl);
} else {
/* Create pages list from sgt */
struct sg_page_iter iter;
@ -1335,11 +1335,11 @@ void omap_gem_init(struct drm_device *dev)
i, j, PTR_ERR(block));
return;
}
entry->paddr = tiler_ssptr(block);
entry->dma_addr = tiler_ssptr(block);
entry->block = block;
DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
&entry->paddr,
DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
&entry->dma_addr,
usergart[i].stride_pfn << PAGE_SHIFT);
}
}

View File

@ -31,7 +31,7 @@ static struct sg_table *omap_gem_map_dma_buf(
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg;
dma_addr_t paddr;
dma_addr_t dma_addr;
int ret;
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
@ -41,7 +41,7 @@ static struct sg_table *omap_gem_map_dma_buf(
/* camera, etc, need physically contiguous.. but we need a
* better way to know this..
*/
ret = omap_gem_get_paddr(obj, &paddr);
ret = omap_gem_get_paddr(obj, &dma_addr);
if (ret)
goto out;
@ -51,8 +51,8 @@ static struct sg_table *omap_gem_map_dma_buf(
sg_init_table(sg->sgl, 1);
sg_dma_len(sg->sgl) = obj->size;
sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
sg_dma_address(sg->sgl) = paddr;
sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
sg_dma_address(sg->sgl) = dma_addr;
/* this should be after _get_paddr() to ensure we have pages attached */
omap_gem_dma_sync(obj, dir);