drm/vmwgfx: Use the dma scatter-gather iterator to get dma addresses

Use struct sg_dma_page_iter in favour struct of sg_page_iter, which fairly
recently was declared useless for obtaining dma addresses.

With a struct sg_dma_page_iter we can't call sg_page_iter_page() so
when the page is needed, use the same page lookup mechanism as for the
non-sg dma modes instead of calling sg_dma_page_iter.

Note, the fixes tag doesn't really point to a commit introducing a
failure / regression, but rather to a commit that implemented a simple
workaround for this problem.

Cc: Jason Gunthorpe <jgg@mellanox.com>
Fixes: d901b2760d ("lib/scatterlist: Provide a DMA page iterator")
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Thomas Hellstrom 2019-05-15 17:45:23 +02:00
parent e41c20cf50
commit 8dc39cfca3
2 changed files with 8 additions and 21 deletions

View File

@ -296,7 +296,7 @@ struct vmw_sg_table {
struct vmw_piter {
struct page **pages;
const dma_addr_t *addrs;
struct sg_page_iter iter;
struct sg_dma_page_iter iter;
unsigned long i;
unsigned long num_pages;
bool (*next)(struct vmw_piter *);

View File

@ -266,7 +266,9 @@ static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
static bool __vmw_piter_sg_next(struct vmw_piter *viter)
{
return __sg_page_iter_next(&viter->iter);
bool ret = __vmw_piter_non_sg_next(viter);
return __sg_page_iter_dma_next(&viter->iter) && ret;
}
@ -284,12 +286,6 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
return viter->pages[viter->i];
}
static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
{
return sg_page_iter_page(&viter->iter);
}
/**
* Helper functions to return the DMA address of the current page.
*
@ -311,13 +307,7 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
{
/*
* FIXME: This driver wrongly mixes DMA and CPU SG list iteration and
* needs revision. See
* https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/
*/
return sg_page_iter_dma_address(
container_of(&viter->iter, struct sg_dma_page_iter, base));
return sg_page_iter_dma_address(&viter->iter);
}
@ -336,26 +326,23 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
{
viter->i = p_offset - 1;
viter->num_pages = vsgt->num_pages;
viter->page = &__vmw_piter_non_sg_page;
viter->pages = vsgt->pages;
switch (vsgt->mode) {
case vmw_dma_phys:
viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_phys_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->pages = vsgt->pages;
break;
case vmw_dma_alloc_coherent:
viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_dma_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->addrs = vsgt->addrs;
viter->pages = vsgt->pages;
break;
case vmw_dma_map_populate:
case vmw_dma_map_bind:
viter->next = &__vmw_piter_sg_next;
viter->dma_address = &__vmw_piter_sg_addr;
viter->page = &__vmw_piter_sg_page;
__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
vsgt->sgt->orig_nents, p_offset);
break;
default: