iommu/amd: Make use of the per-domain flush queue

Fill the flush-queue on unmap and only flush the IOMMU and
device TLBs when a per-cpu queue gets full.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Joerg Roedel 2017-06-02 15:37:26 +02:00
parent d4241a2761
commit fd62190a67
1 changed files with 56 additions and 4 deletions

View File

@ -1806,6 +1806,61 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
return 0; return 0;
} }
static inline bool queue_ring_full(struct flush_queue *queue)
{
return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
}
#define queue_ring_for_each(i, q) \
for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
static void queue_release(struct dma_ops_domain *dom,
struct flush_queue *queue)
{
unsigned i;
queue_ring_for_each(i, queue)
free_iova_fast(&dom->iovad,
queue->entries[i].iova_pfn,
queue->entries[i].pages);
queue->head = queue->tail = 0;
}
static inline unsigned queue_ring_add(struct flush_queue *queue)
{
unsigned idx = queue->tail;
queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
return idx;
}
static void queue_add(struct dma_ops_domain *dom,
unsigned long address, unsigned long pages)
{
struct flush_queue *queue;
int idx;
pages = __roundup_pow_of_two(pages);
address >>= PAGE_SHIFT;
queue = get_cpu_ptr(dom->flush_queue);
if (queue_ring_full(queue)) {
domain_flush_tlb(&dom->domain);
domain_flush_complete(&dom->domain);
queue_release(dom, queue);
}
idx = queue_ring_add(queue);
queue->entries[idx].iova_pfn = address;
queue->entries[idx].pages = pages;
put_cpu_ptr(dom->flush_queue);
}
/* /*
* Free a domain, only used if something went wrong in the * Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table * allocation path and we need to free an already allocated page table
@ -2454,10 +2509,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
domain_flush_tlb(&dma_dom->domain); domain_flush_tlb(&dma_dom->domain);
domain_flush_complete(&dma_dom->domain); domain_flush_complete(&dma_dom->domain);
} else { } else {
/* Keep the if() around, we need it later again */ queue_add(dma_dom, dma_addr, pages);
dma_ops_free_iova(dma_dom, dma_addr, pages);
domain_flush_tlb(&dma_dom->domain);
domain_flush_complete(&dma_dom->domain);
} }
} }