staging: android: ion: Stop butchering the DMA address
Now that we have proper caching, stop setting the DMA address manually. It should be set after properly calling dma_map. Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
204f672255
commit
62b3a094cb
|
@ -81,8 +81,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
|||
{
|
||||
struct ion_buffer *buffer;
|
||||
struct sg_table *table;
|
||||
struct scatterlist *sg;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
if (!buffer)
|
||||
|
@ -119,20 +118,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
|||
INIT_LIST_HEAD(&buffer->vmas);
|
||||
INIT_LIST_HEAD(&buffer->attachments);
|
||||
mutex_init(&buffer->lock);
|
||||
/*
|
||||
* this will set up dma addresses for the sglist -- it is not
|
||||
* technically correct as per the dma api -- a specific
|
||||
* device isn't really taking ownership here. However, in practice on
|
||||
* our systems the only dma_address space is physical addresses.
|
||||
* Additionally, we can't afford the overhead of invalidating every
|
||||
* allocation via dma_map_sg. The implicit contract here is that
|
||||
* memory coming from the heaps is ready for dma, ie if it has a
|
||||
* cached mapping that mapping has been invalidated
|
||||
*/
|
||||
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
|
||||
sg_dma_address(sg) = sg_phys(sg);
|
||||
sg_dma_len(sg) = sg->length;
|
||||
}
|
||||
mutex_lock(&dev->buffer_lock);
|
||||
ion_buffer_add(dev, buffer);
|
||||
mutex_unlock(&dev->buffer_lock);
|
||||
|
|
Loading…
Reference in New Issue