KVM: PPC: Add @page_shift to kvmppc_spapr_tce_table

At the moment the kvmppc_spapr_tce_table struct can only describe
4GB windows and handle fixed size (4K) pages. Dynamic DMA windows
support more so these limits need to be extended.

This replaces window_size (in bytes, 4GB max) with page_shift (32bit)
and size (64bit, in pages).

This should cause no behavioural change as this is changing
the internal structures only - the user interface still only
allows one to create a 32-bit table with 4KiB pages at this stage.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Alexey Kardashevskiy 2016-03-01 17:54:38 +11:00 committed by Paul Mackerras
parent 01d01d6919
commit fe26e52712
3 changed files with 23 additions and 23 deletions

View File

@ -182,8 +182,9 @@ struct kvmppc_spapr_tce_table {
struct list_head list; struct list_head list;
struct kvm *kvm; struct kvm *kvm;
u64 liobn; u64 liobn;
u32 window_size;
struct rcu_head rcu; struct rcu_head rcu;
u32 page_shift;
u64 size; /* window size in pages */
struct page *pages[0]; struct page *pages[0];
}; };

View File

@ -40,10 +40,9 @@
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/tce.h> #include <asm/tce.h>
static unsigned long kvmppc_tce_pages(unsigned long window_size) static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
{ {
return ALIGN((window_size >> IOMMU_PAGE_SHIFT_4K) return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
* sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
} }
static unsigned long kvmppc_stt_pages(unsigned long tce_pages) static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
@ -95,8 +94,7 @@ static void release_spapr_tce_table(struct rcu_head *head)
{ {
struct kvmppc_spapr_tce_table *stt = container_of(head, struct kvmppc_spapr_tce_table *stt = container_of(head,
struct kvmppc_spapr_tce_table, rcu); struct kvmppc_spapr_tce_table, rcu);
int i; unsigned long i, npages = kvmppc_tce_pages(stt->size);
unsigned long npages = kvmppc_tce_pages(stt->window_size);
for (i = 0; i < npages; i++) for (i = 0; i < npages; i++)
__free_page(stt->pages[i]); __free_page(stt->pages[i]);
@ -109,7 +107,7 @@ static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page; struct page *page;
if (vmf->pgoff >= kvmppc_tce_pages(stt->window_size)) if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff]; page = stt->pages[vmf->pgoff];
@ -137,7 +135,7 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
kvm_put_kvm(stt->kvm); kvm_put_kvm(stt->kvm);
kvmppc_account_memlimit( kvmppc_account_memlimit(
kvmppc_stt_pages(kvmppc_tce_pages(stt->window_size)), false); kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
call_rcu(&stt->rcu, release_spapr_tce_table); call_rcu(&stt->rcu, release_spapr_tce_table);
return 0; return 0;
@ -152,7 +150,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args) struct kvm_create_spapr_tce *args)
{ {
struct kvmppc_spapr_tce_table *stt = NULL; struct kvmppc_spapr_tce_table *stt = NULL;
unsigned long npages; unsigned long npages, size;
int ret = -ENOMEM; int ret = -ENOMEM;
int i; int i;
@ -162,7 +160,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
return -EBUSY; return -EBUSY;
} }
npages = kvmppc_tce_pages(args->window_size); size = args->window_size >> IOMMU_PAGE_SHIFT_4K;
npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
if (ret) { if (ret) {
stt = NULL; stt = NULL;
@ -175,7 +174,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
goto fail; goto fail;
stt->liobn = args->liobn; stt->liobn = args->liobn;
stt->window_size = args->window_size; stt->page_shift = IOMMU_PAGE_SHIFT_4K;
stt->size = size;
stt->kvm = kvm; stt->kvm = kvm;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
@ -218,7 +218,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (!stt) if (!stt)
return H_TOO_HARD; return H_TOO_HARD;
entry = ioba >> IOMMU_PAGE_SHIFT_4K; entry = ioba >> stt->page_shift;
/* /*
* SPAPR spec says that the maximum size of the list is 512 TCEs * SPAPR spec says that the maximum size of the list is 512 TCEs
* so the whole table fits in 4K page * so the whole table fits in 4K page

View File

@ -72,11 +72,10 @@ EXPORT_SYMBOL_GPL(kvmppc_find_table);
long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
unsigned long ioba, unsigned long npages) unsigned long ioba, unsigned long npages)
{ {
unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1; unsigned long mask = (1ULL << stt->page_shift) - 1;
unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K; unsigned long idx = ioba >> stt->page_shift;
unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
if ((ioba & mask) || (idx + npages > size) || (idx + npages < idx)) if ((ioba & mask) || (idx + npages > stt->size) || (idx + npages < idx))
return H_PARAMETER; return H_PARAMETER;
return H_SUCCESS; return H_SUCCESS;
@ -96,8 +95,8 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
*/ */
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
{ {
unsigned long mask = unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ); unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
if (tce & mask) if (tce & mask)
return H_PARAMETER; return H_PARAMETER;
@ -198,7 +197,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (ret != H_SUCCESS) if (ret != H_SUCCESS)
return ret; return ret;
kvmppc_tce_put(stt, ioba >> IOMMU_PAGE_SHIFT_4K, tce); kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
return H_SUCCESS; return H_SUCCESS;
} }
@ -244,7 +243,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (!stt) if (!stt)
return H_TOO_HARD; return H_TOO_HARD;
entry = ioba >> IOMMU_PAGE_SHIFT_4K; entry = ioba >> stt->page_shift;
/* /*
* The spec says that the maximum size of the list is 512 TCEs * The spec says that the maximum size of the list is 512 TCEs
* so the whole table addressed resides in 4K page * so the whole table addressed resides in 4K page
@ -313,8 +312,8 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
return H_PARAMETER; return H_PARAMETER;
for (i = 0; i < npages; ++i, ioba += IOMMU_PAGE_SIZE_4K) for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> IOMMU_PAGE_SHIFT_4K, tce_value); kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
return H_SUCCESS; return H_SUCCESS;
} }
@ -336,7 +335,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (ret != H_SUCCESS) if (ret != H_SUCCESS)
return ret; return ret;
idx = ioba >> IOMMU_PAGE_SHIFT_4K; idx = ioba >> stt->page_shift;
page = stt->pages[idx / TCES_PER_PAGE]; page = stt->pages[idx / TCES_PER_PAGE];
tbl = (u64 *)page_address(page); tbl = (u64 *)page_address(page);