mirror of https://gitee.com/openkylin/linux.git
powerpc/pseries: iommu enablement for CMO
To support Cooperative Memory Overcommitment (CMO), we need to check for failure from some of the tce hcalls. These changes for the pseries platform affect the powerpc architecture; patches for the other affected platforms are included in this patch. pSeries platform IOMMU code changes: * platform TCE functions must handle H_NOT_ENOUGH_RESOURCES errors and return an error. Architecture IOMMU code changes: * Calls to ppc_md.tce_build need to check return values and return DMA_MAPPING_ERROR for transient errors. Architecture changes: * struct machdep_calls for tce_build*_pSeriesLP functions need to change to indicate failure. * all other platforms will need updates to iommu functions to match the new calling semantics; they will return 0 on success. The other platforms default configs have been built, but no further testing was performed. Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
parent
ffa5abbd0c
commit
6490c4903d
|
@ -49,6 +49,8 @@ static int novmerge = 1;
|
|||
|
||||
static int protect4gb = 1;
|
||||
|
||||
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
|
||||
|
||||
static inline unsigned long iommu_num_pages(unsigned long vaddr,
|
||||
unsigned long slen)
|
||||
{
|
||||
|
@ -191,6 +193,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|||
{
|
||||
unsigned long entry, flags;
|
||||
dma_addr_t ret = DMA_ERROR_CODE;
|
||||
int build_fail;
|
||||
|
||||
spin_lock_irqsave(&(tbl->it_lock), flags);
|
||||
|
||||
|
@ -205,9 +208,21 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|||
ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
|
||||
|
||||
/* Put the TCEs in the HW table */
|
||||
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
|
||||
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
||||
(unsigned long)page & IOMMU_PAGE_MASK,
|
||||
direction, attrs);
|
||||
|
||||
/* ppc_md.tce_build() only returns non-zero for transient errors.
|
||||
* Clean up the table bitmap in this case and return
|
||||
* DMA_ERROR_CODE. For all other errors the functionality is
|
||||
* not altered.
|
||||
*/
|
||||
if (unlikely(build_fail)) {
|
||||
__iommu_free(tbl, ret, npages);
|
||||
|
||||
spin_unlock_irqrestore(&(tbl->it_lock), flags);
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
/* Flush/invalidate TLB caches if necessary */
|
||||
if (ppc_md.tce_flush)
|
||||
|
@ -276,7 +291,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
dma_addr_t dma_next = 0, dma_addr;
|
||||
unsigned long flags;
|
||||
struct scatterlist *s, *outs, *segstart;
|
||||
int outcount, incount, i;
|
||||
int outcount, incount, i, build_fail = 0;
|
||||
unsigned int align;
|
||||
unsigned long handle;
|
||||
unsigned int max_seg_size;
|
||||
|
@ -337,8 +352,11 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
npages, entry, dma_addr);
|
||||
|
||||
/* Insert into HW table */
|
||||
ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK,
|
||||
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
||||
vaddr & IOMMU_PAGE_MASK,
|
||||
direction, attrs);
|
||||
if(unlikely(build_fail))
|
||||
goto failure;
|
||||
|
||||
/* If we are in an open segment, try merging */
|
||||
if (segstart != s) {
|
||||
|
|
|
@ -172,7 +172,7 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
|
|||
}
|
||||
}
|
||||
|
||||
static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
|
||||
static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
|
||||
unsigned long uaddr, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
|
@ -213,6 +213,7 @@ static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
|
|||
|
||||
pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
|
||||
index, npages, direction, base_pte);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#include <asm/iseries/hv_call_event.h>
|
||||
#include <asm/iseries/iommu.h>
|
||||
|
||||
static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
|
||||
static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
|
||||
unsigned long uaddr, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
|
@ -71,6 +71,7 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
|
|||
index++;
|
||||
uaddr += TCE_PAGE_SIZE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
|
||||
|
|
|
@ -83,7 +83,7 @@ static u32 *iob_l2_base;
|
|||
static struct iommu_table iommu_table_iobmap;
|
||||
static int iommu_table_iobmap_inited;
|
||||
|
||||
static void iobmap_build(struct iommu_table *tbl, long index,
|
||||
static int iobmap_build(struct iommu_table *tbl, long index,
|
||||
long npages, unsigned long uaddr,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
|
@ -108,6 +108,7 @@ static void iobmap_build(struct iommu_table *tbl, long index,
|
|||
uaddr += IOBMAP_PAGE_SIZE;
|
||||
bus_addr += IOBMAP_PAGE_SIZE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
#include "plpar_wrappers.h"
|
||||
|
||||
|
||||
static void tce_build_pSeries(struct iommu_table *tbl, long index,
|
||||
static int tce_build_pSeries(struct iommu_table *tbl, long index,
|
||||
long npages, unsigned long uaddr,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
|
@ -72,6 +72,7 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
|
|||
uaddr += TCE_PAGE_SIZE;
|
||||
tcep++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -94,14 +95,19 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
|
|||
return *tcep;
|
||||
}
|
||||
|
||||
static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||||
static void tce_free_pSeriesLP(struct iommu_table*, long, long);
|
||||
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
|
||||
|
||||
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||||
long npages, unsigned long uaddr,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
u64 rc;
|
||||
u64 rc = 0;
|
||||
u64 proto_tce, tce;
|
||||
u64 rpn;
|
||||
int ret = 0;
|
||||
long tcenum_start = tcenum, npages_start = npages;
|
||||
|
||||
rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
|
||||
proto_tce = TCE_PCI_READ;
|
||||
|
@ -112,6 +118,13 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
|
||||
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
|
||||
|
||||
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
|
||||
ret = (int)rc;
|
||||
tce_free_pSeriesLP(tbl, tcenum_start,
|
||||
(npages_start - (npages + 1)));
|
||||
break;
|
||||
}
|
||||
|
||||
if (rc && printk_ratelimit()) {
|
||||
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
|
||||
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
|
||||
|
@ -123,25 +136,27 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
tcenum++;
|
||||
rpn++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
|
||||
|
||||
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||||
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||||
long npages, unsigned long uaddr,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
u64 rc;
|
||||
u64 rc = 0;
|
||||
u64 proto_tce;
|
||||
u64 *tcep;
|
||||
u64 rpn;
|
||||
long l, limit;
|
||||
long tcenum_start = tcenum, npages_start = npages;
|
||||
int ret = 0;
|
||||
|
||||
if (npages == 1) {
|
||||
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
||||
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
||||
direction, attrs);
|
||||
return;
|
||||
}
|
||||
|
||||
tcep = __get_cpu_var(tce_page);
|
||||
|
@ -153,9 +168,8 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
tcep = (u64 *)__get_free_page(GFP_ATOMIC);
|
||||
/* If allocation fails, fall back to the loop implementation */
|
||||
if (!tcep) {
|
||||
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
||||
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
||||
direction, attrs);
|
||||
return;
|
||||
}
|
||||
__get_cpu_var(tce_page) = tcep;
|
||||
}
|
||||
|
@ -187,6 +201,13 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
tcenum += limit;
|
||||
} while (npages > 0 && !rc);
|
||||
|
||||
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
|
||||
ret = (int)rc;
|
||||
tce_freemulti_pSeriesLP(tbl, tcenum_start,
|
||||
(npages_start - (npages + limit)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (rc && printk_ratelimit()) {
|
||||
printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
|
||||
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
|
||||
|
@ -194,6 +215,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
printk("\ttce[0] val = 0x%lx\n", tcep[0]);
|
||||
show_stack(current, (unsigned long *)__get_SP());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
|
||||
|
|
|
@ -147,7 +147,7 @@ static void dart_flush(struct iommu_table *tbl)
|
|||
}
|
||||
}
|
||||
|
||||
static void dart_build(struct iommu_table *tbl, long index,
|
||||
static int dart_build(struct iommu_table *tbl, long index,
|
||||
long npages, unsigned long uaddr,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
|
@ -184,6 +184,7 @@ static void dart_build(struct iommu_table *tbl, long index,
|
|||
} else {
|
||||
dart_dirty = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ struct machdep_calls {
|
|||
* destroyed as well */
|
||||
void (*hpte_clear_all)(void);
|
||||
|
||||
void (*tce_build)(struct iommu_table * tbl,
|
||||
int (*tce_build)(struct iommu_table *tbl,
|
||||
long index,
|
||||
long npages,
|
||||
unsigned long uaddr,
|
||||
|
|
Loading…
Reference in New Issue