mirror of https://gitee.com/openkylin/linux.git
sparc64: Enable sun4v dma ops to use IOMMU v2 APIs
Add Hypervisor IOMMU v2 APIs pci_iotsb_map(), pci_iotsb_demap() and enable sun4v dma ops to use IOMMU v2 API for all PCIe devices with 64bit DMA mask. Signed-off-by: Tushar Dave <tushar.n.dave@oracle.com> Reviewed-by: chris hyser <chris.hyser@oracle.com> Acked-by: Sowmini Varadhan <sowmini.varadhan@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5116ab4eab
commit
f08978b0fd
|
@ -2377,6 +2377,12 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
|
||||||
* iotsb_index Zero-based IOTTE number within an IOTSB.
|
* iotsb_index Zero-based IOTTE number within an IOTSB.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* The index_count argument consists of two fields:
|
||||||
|
* bits 63:48 #iottes and bits 47:0 iotsb_index
|
||||||
|
*/
|
||||||
|
#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
|
||||||
|
(((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
|
||||||
|
|
||||||
/* pci_iotsb_conf()
|
/* pci_iotsb_conf()
|
||||||
* TRAP: HV_FAST_TRAP
|
* TRAP: HV_FAST_TRAP
|
||||||
* FUNCTION: HV_FAST_PCI_IOTSB_CONF
|
* FUNCTION: HV_FAST_PCI_IOTSB_CONF
|
||||||
|
|
|
@ -72,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Interrupts must be disabled. */
|
/* Interrupts must be disabled. */
|
||||||
static long iommu_batch_flush(struct iommu_batch *p)
|
static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
|
||||||
{
|
{
|
||||||
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
|
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
|
||||||
|
u64 *pglist = p->pglist;
|
||||||
|
u64 index_count;
|
||||||
unsigned long devhandle = pbm->devhandle;
|
unsigned long devhandle = pbm->devhandle;
|
||||||
unsigned long prot = p->prot;
|
unsigned long prot = p->prot;
|
||||||
unsigned long entry = p->entry;
|
unsigned long entry = p->entry;
|
||||||
u64 *pglist = p->pglist;
|
|
||||||
unsigned long npages = p->npages;
|
unsigned long npages = p->npages;
|
||||||
|
unsigned long iotsb_num;
|
||||||
|
unsigned long ret;
|
||||||
|
long num;
|
||||||
|
|
||||||
/* VPCI maj=1, min=[0,1] only supports read and write */
|
/* VPCI maj=1, min=[0,1] only supports read and write */
|
||||||
if (vpci_major < 2)
|
if (vpci_major < 2)
|
||||||
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
|
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
|
||||||
|
|
||||||
while (npages != 0) {
|
while (npages != 0) {
|
||||||
long num;
|
if (mask <= DMA_BIT_MASK(32)) {
|
||||||
|
num = pci_sun4v_iommu_map(devhandle,
|
||||||
num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
|
HV_PCI_TSBID(0, entry),
|
||||||
npages, prot, __pa(pglist));
|
npages,
|
||||||
if (unlikely(num < 0)) {
|
prot,
|
||||||
if (printk_ratelimit())
|
__pa(pglist));
|
||||||
printk("iommu_batch_flush: IOMMU map of "
|
if (unlikely(num < 0)) {
|
||||||
"[%08lx:%08llx:%lx:%lx:%lx] failed with "
|
pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
|
||||||
"status %ld\n",
|
__func__,
|
||||||
devhandle, HV_PCI_TSBID(0, entry),
|
devhandle,
|
||||||
npages, prot, __pa(pglist), num);
|
HV_PCI_TSBID(0, entry),
|
||||||
return -1;
|
npages, prot, __pa(pglist),
|
||||||
|
num);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
|
||||||
|
iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
|
||||||
|
ret = pci_sun4v_iotsb_map(devhandle,
|
||||||
|
iotsb_num,
|
||||||
|
index_count,
|
||||||
|
prot,
|
||||||
|
__pa(pglist),
|
||||||
|
&num);
|
||||||
|
if (unlikely(ret != HV_EOK)) {
|
||||||
|
pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
|
||||||
|
__func__,
|
||||||
|
devhandle, iotsb_num,
|
||||||
|
index_count, prot,
|
||||||
|
__pa(pglist), ret);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
entry += num;
|
entry += num;
|
||||||
npages -= num;
|
npages -= num;
|
||||||
pglist += num;
|
pglist += num;
|
||||||
|
@ -111,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iommu_batch_new_entry(unsigned long entry)
|
static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
|
||||||
{
|
{
|
||||||
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
||||||
|
|
||||||
if (p->entry + p->npages == entry)
|
if (p->entry + p->npages == entry)
|
||||||
return;
|
return;
|
||||||
if (p->entry != ~0UL)
|
if (p->entry != ~0UL)
|
||||||
iommu_batch_flush(p);
|
iommu_batch_flush(p, mask);
|
||||||
p->entry = entry;
|
p->entry = entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Interrupts must be disabled. */
|
/* Interrupts must be disabled. */
|
||||||
static inline long iommu_batch_add(u64 phys_page)
|
static inline long iommu_batch_add(u64 phys_page, u64 mask)
|
||||||
{
|
{
|
||||||
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
||||||
|
|
||||||
|
@ -131,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page)
|
||||||
|
|
||||||
p->pglist[p->npages++] = phys_page;
|
p->pglist[p->npages++] = phys_page;
|
||||||
if (p->npages == PGLIST_NENTS)
|
if (p->npages == PGLIST_NENTS)
|
||||||
return iommu_batch_flush(p);
|
return iommu_batch_flush(p, mask);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Interrupts must be disabled. */
|
/* Interrupts must be disabled. */
|
||||||
static inline long iommu_batch_end(void)
|
static inline long iommu_batch_end(u64 mask)
|
||||||
{
|
{
|
||||||
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
||||||
|
|
||||||
BUG_ON(p->npages >= PGLIST_NENTS);
|
BUG_ON(p->npages >= PGLIST_NENTS);
|
||||||
|
|
||||||
return iommu_batch_flush(p);
|
return iommu_batch_flush(p, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_addrp, gfp_t gfp,
|
dma_addr_t *dma_addrp, gfp_t gfp,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
|
u64 mask;
|
||||||
unsigned long flags, order, first_page, npages, n;
|
unsigned long flags, order, first_page, npages, n;
|
||||||
unsigned long prot = 0;
|
unsigned long prot = 0;
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
|
struct atu *atu;
|
||||||
|
struct iommu_map_table *tbl;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *ret;
|
void *ret;
|
||||||
long entry;
|
long entry;
|
||||||
|
@ -177,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||||
memset((char *)first_page, 0, PAGE_SIZE << order);
|
memset((char *)first_page, 0, PAGE_SIZE << order);
|
||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
|
atu = iommu->atu;
|
||||||
|
|
||||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
|
mask = dev->coherent_dma_mask;
|
||||||
|
if (mask <= DMA_BIT_MASK(32))
|
||||||
|
tbl = &iommu->tbl;
|
||||||
|
else
|
||||||
|
tbl = &atu->tbl;
|
||||||
|
|
||||||
|
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
|
||||||
(unsigned long)(-1), 0);
|
(unsigned long)(-1), 0);
|
||||||
|
|
||||||
if (unlikely(entry == IOMMU_ERROR_CODE))
|
if (unlikely(entry == IOMMU_ERROR_CODE))
|
||||||
goto range_alloc_fail;
|
goto range_alloc_fail;
|
||||||
|
|
||||||
*dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
|
*dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
|
||||||
ret = (void *) first_page;
|
ret = (void *) first_page;
|
||||||
first_page = __pa(first_page);
|
first_page = __pa(first_page);
|
||||||
|
|
||||||
|
@ -196,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||||
entry);
|
entry);
|
||||||
|
|
||||||
for (n = 0; n < npages; n++) {
|
for (n = 0; n < npages; n++) {
|
||||||
long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
|
long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
|
||||||
if (unlikely(err < 0L))
|
if (unlikely(err < 0L))
|
||||||
goto iommu_map_fail;
|
goto iommu_map_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(iommu_batch_end() < 0L))
|
if (unlikely(iommu_batch_end(mask) < 0L))
|
||||||
goto iommu_map_fail;
|
goto iommu_map_fail;
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -209,7 +242,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
iommu_map_fail:
|
iommu_map_fail:
|
||||||
iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
|
iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
|
||||||
|
|
||||||
range_alloc_fail:
|
range_alloc_fail:
|
||||||
free_pages(first_page, order);
|
free_pages(first_page, order);
|
||||||
|
@ -253,18 +286,27 @@ unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
|
static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
|
||||||
unsigned long npages)
|
dma_addr_t dvma, unsigned long iotsb_num,
|
||||||
|
unsigned long entry, unsigned long npages)
|
||||||
{
|
{
|
||||||
u32 devhandle = *(u32 *)demap_arg;
|
|
||||||
unsigned long num, flags;
|
unsigned long num, flags;
|
||||||
|
unsigned long ret;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
do {
|
do {
|
||||||
num = pci_sun4v_iommu_demap(devhandle,
|
if (dvma <= DMA_BIT_MASK(32)) {
|
||||||
HV_PCI_TSBID(0, entry),
|
num = pci_sun4v_iommu_demap(devhandle,
|
||||||
npages);
|
HV_PCI_TSBID(0, entry),
|
||||||
|
npages);
|
||||||
|
} else {
|
||||||
|
ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
|
||||||
|
entry, npages, &num);
|
||||||
|
if (unlikely(ret != HV_EOK)) {
|
||||||
|
pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
|
||||||
|
ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
entry += num;
|
entry += num;
|
||||||
npages -= num;
|
npages -= num;
|
||||||
} while (npages != 0);
|
} while (npages != 0);
|
||||||
|
@ -276,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
|
||||||
{
|
{
|
||||||
struct pci_pbm_info *pbm;
|
struct pci_pbm_info *pbm;
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
|
struct atu *atu;
|
||||||
|
struct iommu_map_table *tbl;
|
||||||
unsigned long order, npages, entry;
|
unsigned long order, npages, entry;
|
||||||
|
unsigned long iotsb_num;
|
||||||
u32 devhandle;
|
u32 devhandle;
|
||||||
|
|
||||||
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
|
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
pbm = dev->archdata.host_controller;
|
pbm = dev->archdata.host_controller;
|
||||||
|
atu = iommu->atu;
|
||||||
devhandle = pbm->devhandle;
|
devhandle = pbm->devhandle;
|
||||||
entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
|
|
||||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
if (dvma <= DMA_BIT_MASK(32)) {
|
||||||
iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
|
tbl = &iommu->tbl;
|
||||||
|
iotsb_num = 0; /* we don't care for legacy iommu */
|
||||||
|
} else {
|
||||||
|
tbl = &atu->tbl;
|
||||||
|
iotsb_num = atu->iotsb->iotsb_num;
|
||||||
|
}
|
||||||
|
entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
|
||||||
|
dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
|
||||||
|
iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
|
||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
if (order < 10)
|
if (order < 10)
|
||||||
free_pages((unsigned long)cpu, order);
|
free_pages((unsigned long)cpu, order);
|
||||||
|
@ -297,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
|
struct atu *atu;
|
||||||
|
struct iommu_map_table *tbl;
|
||||||
|
u64 mask;
|
||||||
unsigned long flags, npages, oaddr;
|
unsigned long flags, npages, oaddr;
|
||||||
unsigned long i, base_paddr;
|
unsigned long i, base_paddr;
|
||||||
u32 bus_addr, ret;
|
|
||||||
unsigned long prot;
|
unsigned long prot;
|
||||||
|
dma_addr_t bus_addr, ret;
|
||||||
long entry;
|
long entry;
|
||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
|
atu = iommu->atu;
|
||||||
|
|
||||||
if (unlikely(direction == DMA_NONE))
|
if (unlikely(direction == DMA_NONE))
|
||||||
goto bad;
|
goto bad;
|
||||||
|
@ -312,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||||
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
|
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
|
||||||
npages >>= IO_PAGE_SHIFT;
|
npages >>= IO_PAGE_SHIFT;
|
||||||
|
|
||||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
|
mask = *dev->dma_mask;
|
||||||
|
if (mask <= DMA_BIT_MASK(32))
|
||||||
|
tbl = &iommu->tbl;
|
||||||
|
else
|
||||||
|
tbl = &atu->tbl;
|
||||||
|
|
||||||
|
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
|
||||||
(unsigned long)(-1), 0);
|
(unsigned long)(-1), 0);
|
||||||
|
|
||||||
if (unlikely(entry == IOMMU_ERROR_CODE))
|
if (unlikely(entry == IOMMU_ERROR_CODE))
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
|
bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
|
||||||
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
|
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
|
||||||
base_paddr = __pa(oaddr & IO_PAGE_MASK);
|
base_paddr = __pa(oaddr & IO_PAGE_MASK);
|
||||||
prot = HV_PCI_MAP_ATTR_READ;
|
prot = HV_PCI_MAP_ATTR_READ;
|
||||||
|
@ -333,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||||
iommu_batch_start(dev, prot, entry);
|
iommu_batch_start(dev, prot, entry);
|
||||||
|
|
||||||
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
|
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
|
||||||
long err = iommu_batch_add(base_paddr);
|
long err = iommu_batch_add(base_paddr, mask);
|
||||||
if (unlikely(err < 0L))
|
if (unlikely(err < 0L))
|
||||||
goto iommu_map_fail;
|
goto iommu_map_fail;
|
||||||
}
|
}
|
||||||
if (unlikely(iommu_batch_end() < 0L))
|
if (unlikely(iommu_batch_end(mask) < 0L))
|
||||||
goto iommu_map_fail;
|
goto iommu_map_fail;
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -350,7 +414,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||||
return DMA_ERROR_CODE;
|
return DMA_ERROR_CODE;
|
||||||
|
|
||||||
iommu_map_fail:
|
iommu_map_fail:
|
||||||
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
||||||
return DMA_ERROR_CODE;
|
return DMA_ERROR_CODE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||||
{
|
{
|
||||||
struct pci_pbm_info *pbm;
|
struct pci_pbm_info *pbm;
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
|
struct atu *atu;
|
||||||
|
struct iommu_map_table *tbl;
|
||||||
unsigned long npages;
|
unsigned long npages;
|
||||||
|
unsigned long iotsb_num;
|
||||||
long entry;
|
long entry;
|
||||||
u32 devhandle;
|
u32 devhandle;
|
||||||
|
|
||||||
|
@ -372,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
pbm = dev->archdata.host_controller;
|
pbm = dev->archdata.host_controller;
|
||||||
|
atu = iommu->atu;
|
||||||
devhandle = pbm->devhandle;
|
devhandle = pbm->devhandle;
|
||||||
|
|
||||||
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
|
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
|
||||||
npages >>= IO_PAGE_SHIFT;
|
npages >>= IO_PAGE_SHIFT;
|
||||||
bus_addr &= IO_PAGE_MASK;
|
bus_addr &= IO_PAGE_MASK;
|
||||||
entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
|
|
||||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
if (bus_addr <= DMA_BIT_MASK(32)) {
|
||||||
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
iotsb_num = 0; /* we don't care for legacy iommu */
|
||||||
|
tbl = &iommu->tbl;
|
||||||
|
} else {
|
||||||
|
iotsb_num = atu->iotsb->iotsb_num;
|
||||||
|
tbl = &atu->tbl;
|
||||||
|
}
|
||||||
|
entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
|
||||||
|
dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
|
||||||
|
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
|
@ -393,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
unsigned long seg_boundary_size;
|
unsigned long seg_boundary_size;
|
||||||
int outcount, incount, i;
|
int outcount, incount, i;
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
|
struct atu *atu;
|
||||||
|
struct iommu_map_table *tbl;
|
||||||
|
u64 mask;
|
||||||
unsigned long base_shift;
|
unsigned long base_shift;
|
||||||
long err;
|
long err;
|
||||||
|
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
|
atu = iommu->atu;
|
||||||
|
|
||||||
if (nelems == 0 || !iommu)
|
if (nelems == 0 || !iommu)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -424,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
max_seg_size = dma_get_max_seg_size(dev);
|
max_seg_size = dma_get_max_seg_size(dev);
|
||||||
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||||
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
||||||
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
|
|
||||||
|
mask = *dev->dma_mask;
|
||||||
|
if (mask <= DMA_BIT_MASK(32))
|
||||||
|
tbl = &iommu->tbl;
|
||||||
|
else
|
||||||
|
tbl = &atu->tbl;
|
||||||
|
|
||||||
|
base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
|
||||||
|
|
||||||
for_each_sg(sglist, s, nelems, i) {
|
for_each_sg(sglist, s, nelems, i) {
|
||||||
unsigned long paddr, npages, entry, out_entry = 0, slen;
|
unsigned long paddr, npages, entry, out_entry = 0, slen;
|
||||||
|
|
||||||
|
@ -437,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
/* Allocate iommu entries for that segment */
|
/* Allocate iommu entries for that segment */
|
||||||
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
|
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
|
||||||
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
|
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
|
||||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
|
entry = iommu_tbl_range_alloc(dev, tbl, npages,
|
||||||
&handle, (unsigned long)(-1), 0);
|
&handle, (unsigned long)(-1), 0);
|
||||||
|
|
||||||
/* Handle failure */
|
/* Handle failure */
|
||||||
if (unlikely(entry == IOMMU_ERROR_CODE)) {
|
if (unlikely(entry == IOMMU_ERROR_CODE)) {
|
||||||
if (printk_ratelimit())
|
pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
|
||||||
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
|
tbl, paddr, npages);
|
||||||
" npages %lx\n", iommu, paddr, npages);
|
|
||||||
goto iommu_map_failed;
|
goto iommu_map_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
iommu_batch_new_entry(entry);
|
iommu_batch_new_entry(entry, mask);
|
||||||
|
|
||||||
/* Convert entry to a dma_addr_t */
|
/* Convert entry to a dma_addr_t */
|
||||||
dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
|
dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
|
||||||
dma_addr |= (s->offset & ~IO_PAGE_MASK);
|
dma_addr |= (s->offset & ~IO_PAGE_MASK);
|
||||||
|
|
||||||
/* Insert into HW table */
|
/* Insert into HW table */
|
||||||
paddr &= IO_PAGE_MASK;
|
paddr &= IO_PAGE_MASK;
|
||||||
while (npages--) {
|
while (npages--) {
|
||||||
err = iommu_batch_add(paddr);
|
err = iommu_batch_add(paddr, mask);
|
||||||
if (unlikely(err < 0L))
|
if (unlikely(err < 0L))
|
||||||
goto iommu_map_failed;
|
goto iommu_map_failed;
|
||||||
paddr += IO_PAGE_SIZE;
|
paddr += IO_PAGE_SIZE;
|
||||||
|
@ -492,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
dma_next = dma_addr + slen;
|
dma_next = dma_addr + slen;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = iommu_batch_end();
|
err = iommu_batch_end(mask);
|
||||||
|
|
||||||
if (unlikely(err < 0L))
|
if (unlikely(err < 0L))
|
||||||
goto iommu_map_failed;
|
goto iommu_map_failed;
|
||||||
|
@ -515,7 +603,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
vaddr = s->dma_address & IO_PAGE_MASK;
|
vaddr = s->dma_address & IO_PAGE_MASK;
|
||||||
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
||||||
IO_PAGE_SIZE);
|
IO_PAGE_SIZE);
|
||||||
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
|
iommu_tbl_range_free(tbl, vaddr, npages,
|
||||||
IOMMU_ERROR_CODE);
|
IOMMU_ERROR_CODE);
|
||||||
/* XXX demap? XXX */
|
/* XXX demap? XXX */
|
||||||
s->dma_address = DMA_ERROR_CODE;
|
s->dma_address = DMA_ERROR_CODE;
|
||||||
|
@ -536,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
struct pci_pbm_info *pbm;
|
struct pci_pbm_info *pbm;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
|
struct atu *atu;
|
||||||
unsigned long flags, entry;
|
unsigned long flags, entry;
|
||||||
|
unsigned long iotsb_num;
|
||||||
u32 devhandle;
|
u32 devhandle;
|
||||||
|
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
pbm = dev->archdata.host_controller;
|
pbm = dev->archdata.host_controller;
|
||||||
|
atu = iommu->atu;
|
||||||
devhandle = pbm->devhandle;
|
devhandle = pbm->devhandle;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
@ -552,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
dma_addr_t dma_handle = sg->dma_address;
|
dma_addr_t dma_handle = sg->dma_address;
|
||||||
unsigned int len = sg->dma_length;
|
unsigned int len = sg->dma_length;
|
||||||
unsigned long npages;
|
unsigned long npages;
|
||||||
struct iommu_map_table *tbl = &iommu->tbl;
|
struct iommu_map_table *tbl;
|
||||||
unsigned long shift = IO_PAGE_SHIFT;
|
unsigned long shift = IO_PAGE_SHIFT;
|
||||||
|
|
||||||
if (!len)
|
if (!len)
|
||||||
break;
|
break;
|
||||||
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
|
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
|
||||||
|
|
||||||
|
if (dma_handle <= DMA_BIT_MASK(32)) {
|
||||||
|
iotsb_num = 0; /* we don't care for legacy iommu */
|
||||||
|
tbl = &iommu->tbl;
|
||||||
|
} else {
|
||||||
|
iotsb_num = atu->iotsb->iotsb_num;
|
||||||
|
tbl = &atu->tbl;
|
||||||
|
}
|
||||||
entry = ((dma_handle - tbl->table_map_base) >> shift);
|
entry = ((dma_handle - tbl->table_map_base) >> shift);
|
||||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
|
||||||
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
|
entry, npages);
|
||||||
|
iommu_tbl_range_free(tbl, dma_handle, npages,
|
||||||
IOMMU_ERROR_CODE);
|
IOMMU_ERROR_CODE);
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,4 +99,15 @@ unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
|
||||||
unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
|
unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
|
||||||
unsigned long iotsb_num,
|
unsigned long iotsb_num,
|
||||||
unsigned int pci_device);
|
unsigned int pci_device);
|
||||||
|
unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
|
||||||
|
unsigned long iotsb_num,
|
||||||
|
unsigned long iotsb_index_iottes,
|
||||||
|
unsigned long io_attributes,
|
||||||
|
unsigned long io_page_list_pa,
|
||||||
|
long *mapped);
|
||||||
|
unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
|
||||||
|
unsigned long iotsb_num,
|
||||||
|
unsigned long iotsb_index,
|
||||||
|
unsigned long iottes,
|
||||||
|
unsigned long *demapped);
|
||||||
#endif /* !(_PCI_SUN4V_H) */
|
#endif /* !(_PCI_SUN4V_H) */
|
||||||
|
|
|
@ -392,3 +392,39 @@ ENTRY(pci_sun4v_iotsb_bind)
|
||||||
retl
|
retl
|
||||||
nop
|
nop
|
||||||
ENDPROC(pci_sun4v_iotsb_bind)
|
ENDPROC(pci_sun4v_iotsb_bind)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* %o0: devhandle
|
||||||
|
* %o1: iotsb_num/iotsb_handle
|
||||||
|
* %o2: index_count
|
||||||
|
* %o3: iotte_attributes
|
||||||
|
* %o4: io_page_list_p
|
||||||
|
* %o5: &mapped
|
||||||
|
*
|
||||||
|
* returns %o0: status
|
||||||
|
* %o1: #mapped
|
||||||
|
*/
|
||||||
|
ENTRY(pci_sun4v_iotsb_map)
|
||||||
|
mov %o5, %g1
|
||||||
|
mov HV_FAST_PCI_IOTSB_MAP, %o5
|
||||||
|
ta HV_FAST_TRAP
|
||||||
|
retl
|
||||||
|
stx %o1, [%g1]
|
||||||
|
ENDPROC(pci_sun4v_iotsb_map)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* %o0: devhandle
|
||||||
|
* %o1: iotsb_num/iotsb_handle
|
||||||
|
* %o2: iotsb_index
|
||||||
|
* %o3: #iottes
|
||||||
|
* %o4: &demapped
|
||||||
|
*
|
||||||
|
* returns %o0: status
|
||||||
|
* %o1: #demapped
|
||||||
|
*/
|
||||||
|
ENTRY(pci_sun4v_iotsb_demap)
|
||||||
|
mov HV_FAST_PCI_IOTSB_DEMAP, %o5
|
||||||
|
ta HV_FAST_TRAP
|
||||||
|
retl
|
||||||
|
stx %o1, [%o4]
|
||||||
|
ENDPROC(pci_sun4v_iotsb_demap)
|
||||||
|
|
Loading…
Reference in New Issue