2019-05-29 22:12:40 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-06-29 08:22:41 +08:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
|
|
|
* Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
|
2016-02-15 09:55:09 +08:00
|
|
|
* Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
|
2011-06-29 08:22:41 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kvm.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <linux/list.h>
|
2018-07-06 00:24:59 +08:00
|
|
|
#include <linux/stringify.h>
|
2011-06-29 08:22:41 +08:00
|
|
|
|
|
|
|
#include <asm/kvm_ppc.h>
|
|
|
|
#include <asm/kvm_book3s.h>
|
2016-03-01 15:29:20 +08:00
|
|
|
#include <asm/book3s/64/mmu-hash.h>
|
2016-02-15 09:55:09 +08:00
|
|
|
#include <asm/mmu_context.h>
|
2011-06-29 08:22:41 +08:00
|
|
|
#include <asm/hvcall.h>
|
|
|
|
#include <asm/synch.h>
|
|
|
|
#include <asm/ppc-opcode.h>
|
|
|
|
#include <asm/udbg.h>
|
2016-02-15 09:55:04 +08:00
|
|
|
#include <asm/iommu.h>
|
2016-02-15 09:55:08 +08:00
|
|
|
#include <asm/tce.h>
|
2017-07-27 14:24:53 +08:00
|
|
|
#include <asm/pte-walk.h>
|
2011-06-29 08:22:41 +08:00
|
|
|
|
2017-03-22 12:21:56 +08:00
|
|
|
#ifdef CONFIG_BUG
|
|
|
|
|
|
|
|
#define WARN_ON_ONCE_RM(condition) ({ \
|
|
|
|
static bool __section(.data.unlikely) __warned; \
|
|
|
|
int __ret_warn_once = !!(condition); \
|
|
|
|
\
|
|
|
|
if (unlikely(__ret_warn_once && !__warned)) { \
|
|
|
|
__warned = true; \
|
|
|
|
pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
|
|
|
|
__stringify(condition), \
|
|
|
|
__func__, __LINE__); \
|
|
|
|
dump_stack(); \
|
|
|
|
} \
|
|
|
|
unlikely(__ret_warn_once); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define WARN_ON_ONCE_RM(condition) ({ \
|
|
|
|
int __ret_warn_on = !!(condition); \
|
|
|
|
unlikely(__ret_warn_on); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2016-02-15 09:55:04 +08:00
|
|
|
/*
|
|
|
|
* Finds a TCE table descriptor by LIOBN.
|
|
|
|
*
|
|
|
|
* WARNING: This will be called in real or virtual mode on HV KVM and virtual
|
|
|
|
* mode on PR KVM
|
|
|
|
*/
|
2017-03-22 12:21:53 +08:00
|
|
|
struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
|
2016-02-15 09:55:04 +08:00
|
|
|
unsigned long liobn)
|
|
|
|
{
|
|
|
|
struct kvmppc_spapr_tce_table *stt;
|
|
|
|
|
2016-02-15 09:55:05 +08:00
|
|
|
list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
|
2016-02-15 09:55:04 +08:00
|
|
|
if (stt->liobn == liobn)
|
|
|
|
return stt;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-02-15 09:55:09 +08:00
|
|
|
EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
2016-02-15 09:55:04 +08:00
|
|
|
|
2018-09-10 16:29:10 +08:00
|
|
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
2020-05-05 15:17:23 +08:00
|
|
|
static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
|
|
|
|
unsigned long tce, unsigned long *ua)
|
2019-03-29 13:42:20 +08:00
|
|
|
{
|
|
|
|
unsigned long gfn = tce >> PAGE_SHIFT;
|
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
|
|
|
|
|
memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
|
|
|
|
if (!memslot)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*ua = __gfn_to_hva_memslot(memslot, gfn) |
|
|
|
|
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-15 09:55:08 +08:00
|
|
|
/*
|
|
|
|
* Validates TCE address.
|
|
|
|
* At the moment flags and page mask are validated.
|
|
|
|
* As the host kernel does not access those addresses (just puts them
|
|
|
|
* to the table and user space is supposed to process them), we can skip
|
|
|
|
* checking other things (such as TCE is a guest RAM address or the page
|
|
|
|
* was actually allocated).
|
|
|
|
*/
|
2018-09-10 16:29:10 +08:00
|
|
|
static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
|
|
|
|
unsigned long tce)
|
2016-02-15 09:55:08 +08:00
|
|
|
{
|
2017-03-22 12:21:55 +08:00
|
|
|
unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
|
|
|
|
enum dma_data_direction dir = iommu_tce_direction(tce);
|
2018-09-10 16:29:10 +08:00
|
|
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
|
|
|
unsigned long ua = 0;
|
2017-03-22 12:21:55 +08:00
|
|
|
|
|
|
|
/* Allow userspace to poison TCE table */
|
|
|
|
if (dir == DMA_NONE)
|
|
|
|
return H_SUCCESS;
|
2016-02-15 09:55:08 +08:00
|
|
|
|
2017-03-22 12:21:55 +08:00
|
|
|
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
2016-02-15 09:55:08 +08:00
|
|
|
return H_PARAMETER;
|
|
|
|
|
2020-05-05 15:17:23 +08:00
|
|
|
if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
|
2018-09-10 16:29:10 +08:00
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
|
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
|
|
|
unsigned long hpa = 0;
|
|
|
|
struct mm_iommu_table_group_mem_t *mem;
|
|
|
|
long shift = stit->tbl->it_page_shift;
|
|
|
|
|
|
|
|
mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
|
|
|
|
if (!mem)
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
|
|
if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
|
|
|
|
return H_TOO_HARD;
|
|
|
|
}
|
|
|
|
|
2016-02-15 09:55:08 +08:00
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note on the use of page_address() in real mode,
|
|
|
|
*
|
|
|
|
* It is safe to use page_address() in real mode on ppc64 because
|
|
|
|
* page_address() is always defined as lowmem_page_address()
|
|
|
|
* which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
|
|
|
|
* operation and does not access page struct.
|
|
|
|
*
|
|
|
|
* Theoretically page_address() could be defined different
|
|
|
|
* but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
|
|
|
|
* would have to be enabled.
|
|
|
|
* WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
|
|
|
|
* HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
|
|
|
|
* if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
|
|
|
|
* is not expected to be enabled on ppc32, page_address()
|
|
|
|
* is safe for ppc32 as well.
|
|
|
|
*
|
|
|
|
* WARNING: This will be called in real-mode on HV KVM and virtual
|
|
|
|
* mode on PR KVM
|
|
|
|
*/
|
|
|
|
static u64 *kvmppc_page_address(struct page *page)
|
|
|
|
{
|
|
|
|
#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
|
|
|
|
#error TODO: fix to avoid page_address() here
|
|
|
|
#endif
|
|
|
|
return (u64 *) page_address(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handles TCE requests for emulated devices.
|
|
|
|
* Puts guest TCE values to the table and expects user space to convert them.
|
2019-03-29 13:43:26 +08:00
|
|
|
* Cannot fail so kvmppc_rm_tce_validate must be called before it.
|
2016-02-15 09:55:08 +08:00
|
|
|
*/
|
2019-03-29 13:43:26 +08:00
|
|
|
static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
|
2016-02-15 09:55:08 +08:00
|
|
|
unsigned long idx, unsigned long tce)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
u64 *tbl;
|
|
|
|
|
2016-03-01 14:54:39 +08:00
|
|
|
idx -= stt->offset;
|
2016-02-15 09:55:08 +08:00
|
|
|
page = stt->pages[idx / TCES_PER_PAGE];
|
2019-03-29 13:43:26 +08:00
|
|
|
/*
|
|
|
|
* page must not be NULL in real mode,
|
|
|
|
* kvmppc_rm_ioba_validate() must have taken care of this.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE_RM(!page);
|
2016-02-15 09:55:08 +08:00
|
|
|
tbl = kvmppc_page_address(page);
|
|
|
|
|
|
|
|
tbl[idx % TCES_PER_PAGE] = tce;
|
|
|
|
}
|
2016-02-15 09:55:04 +08:00
|
|
|
|
2019-03-29 13:43:26 +08:00
|
|
|
/*
|
|
|
|
* TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
|
|
|
|
* in real mode.
|
|
|
|
* Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
|
|
|
|
* allocated or not required (when clearing a tce entry).
|
|
|
|
*/
|
|
|
|
static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
|
|
|
|
unsigned long ioba, unsigned long npages, bool clearing)
|
|
|
|
{
|
|
|
|
unsigned long i, idx, sttpage, sttpages;
|
|
|
|
unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
/*
|
|
|
|
* clearing==true says kvmppc_rm_tce_put won't be allocating pages
|
|
|
|
* for empty tces.
|
|
|
|
*/
|
|
|
|
if (clearing)
|
|
|
|
return H_SUCCESS;
|
|
|
|
|
|
|
|
idx = (ioba >> stt->page_shift) - stt->offset;
|
|
|
|
sttpage = idx / TCES_PER_PAGE;
|
|
|
|
sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
|
|
|
|
TCES_PER_PAGE;
|
|
|
|
for (i = sttpage; i < sttpage + sttpages; ++i)
|
|
|
|
if (!stt->pages[i])
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
|
|
|
|
struct iommu_table *tbl,
|
2018-09-10 16:29:07 +08:00
|
|
|
unsigned long entry, unsigned long *hpa,
|
|
|
|
enum dma_data_direction *direction)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
|
2018-09-10 16:29:07 +08:00
|
|
|
|
|
|
|
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
|
|
|
|
(*direction == DMA_BIDIRECTIONAL))) {
|
2018-10-15 18:08:41 +08:00
|
|
|
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
|
2018-09-10 16:29:07 +08:00
|
|
|
/*
|
|
|
|
* kvmppc_rm_tce_iommu_do_map() updates the UA cache after
|
|
|
|
* calling this so we still get here a valid UA.
|
|
|
|
*/
|
|
|
|
if (pua && *pua)
|
|
|
|
mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
extern void iommu_tce_kill_rm(struct iommu_table *tbl,
|
|
|
|
unsigned long entry, unsigned long pages)
|
|
|
|
{
|
|
|
|
if (tbl->it_ops->tce_kill)
|
|
|
|
tbl->it_ops->tce_kill(tbl, entry, pages, true);
|
|
|
|
}
|
|
|
|
|
2018-09-10 16:29:07 +08:00
|
|
|
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
|
|
|
|
unsigned long entry)
|
2017-03-22 12:21:56 +08:00
|
|
|
{
|
|
|
|
unsigned long hpa = 0;
|
|
|
|
enum dma_data_direction dir = DMA_NONE;
|
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
2017-03-22 12:21:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
|
|
|
struct iommu_table *tbl, unsigned long entry)
|
|
|
|
{
|
|
|
|
struct mm_iommu_table_group_mem_t *mem = NULL;
|
|
|
|
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
|
2018-10-15 18:08:41 +08:00
|
|
|
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
|
2017-03-22 12:21:56 +08:00
|
|
|
|
|
|
|
if (!pua)
|
|
|
|
/* it_userspace allocation might be delayed */
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
2018-07-04 14:13:46 +08:00
|
|
|
mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
|
2017-03-22 12:21:56 +08:00
|
|
|
if (!mem)
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
|
|
mm_iommu_mapped_dec(mem);
|
|
|
|
|
2018-07-04 14:13:46 +08:00
|
|
|
*pua = cpu_to_be64(0);
|
2017-03-22 12:21:56 +08:00
|
|
|
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-05-14 18:00:28 +08:00
|
|
|
static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
|
2017-03-22 12:21:56 +08:00
|
|
|
struct iommu_table *tbl, unsigned long entry)
|
|
|
|
{
|
|
|
|
enum dma_data_direction dir = DMA_NONE;
|
|
|
|
unsigned long hpa = 0;
|
|
|
|
long ret;
|
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
|
2017-03-22 12:21:56 +08:00
|
|
|
/*
|
|
|
|
* real mode xchg can fail if struct page crosses
|
|
|
|
* a page boundary
|
|
|
|
*/
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
|
|
if (dir == DMA_NONE)
|
|
|
|
return H_SUCCESS;
|
|
|
|
|
|
|
|
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
|
|
|
if (ret)
|
2019-08-29 16:52:49 +08:00
|
|
|
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
2017-03-22 12:21:56 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-14 18:00:28 +08:00
|
|
|
static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
|
|
|
|
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
|
|
|
|
unsigned long entry)
|
|
|
|
{
|
|
|
|
unsigned long i, ret = H_SUCCESS;
|
|
|
|
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
|
|
|
|
unsigned long io_entry = entry * subpages;
|
|
|
|
|
|
|
|
for (i = 0; i < subpages; ++i) {
|
|
|
|
ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
|
|
|
|
if (ret != H_SUCCESS)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
2017-03-22 12:21:56 +08:00
|
|
|
unsigned long entry, unsigned long ua,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
unsigned long hpa = 0;
|
2018-10-15 18:08:41 +08:00
|
|
|
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
|
2017-03-22 12:21:56 +08:00
|
|
|
struct mm_iommu_table_group_mem_t *mem;
|
|
|
|
|
|
|
|
if (!pua)
|
|
|
|
/* it_userspace allocation might be delayed */
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
|
|
mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
|
|
|
|
if (!mem)
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
2018-07-17 15:19:13 +08:00
|
|
|
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
|
|
|
|
&hpa)))
|
2018-09-10 16:29:09 +08:00
|
|
|
return H_TOO_HARD;
|
2017-03-22 12:21:56 +08:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
2018-09-10 16:29:09 +08:00
|
|
|
return H_TOO_HARD;
|
2017-03-22 12:21:56 +08:00
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
2017-03-22 12:21:56 +08:00
|
|
|
if (ret) {
|
|
|
|
mm_iommu_mapped_dec(mem);
|
|
|
|
/*
|
|
|
|
* real mode xchg can fail if struct page crosses
|
|
|
|
* a page boundary
|
|
|
|
*/
|
|
|
|
return H_TOO_HARD;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dir != DMA_NONE)
|
|
|
|
kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
|
|
|
|
2018-07-04 14:13:46 +08:00
|
|
|
*pua = cpu_to_be64(ua);
|
2017-03-22 12:21:56 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-14 18:00:28 +08:00
|
|
|
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
|
|
|
|
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
|
|
|
|
unsigned long entry, unsigned long ua,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
unsigned long i, pgoff, ret = H_SUCCESS;
|
|
|
|
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
|
|
|
|
unsigned long io_entry = entry * subpages;
|
|
|
|
|
|
|
|
for (i = 0, pgoff = 0; i < subpages;
|
|
|
|
++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
|
|
|
|
|
|
|
|
ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
|
|
|
|
io_entry + i, ua + pgoff, dir);
|
|
|
|
if (ret != H_SUCCESS)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-18 10:50:42 +08:00
|
|
|
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|
|
|
unsigned long ioba, unsigned long tce)
|
2011-06-29 08:22:41 +08:00
|
|
|
{
|
2017-03-22 12:21:53 +08:00
|
|
|
struct kvmppc_spapr_tce_table *stt;
|
2016-02-15 09:55:04 +08:00
|
|
|
long ret;
|
2017-03-22 12:21:56 +08:00
|
|
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
|
|
|
unsigned long entry, ua = 0;
|
|
|
|
enum dma_data_direction dir;
|
2011-06-29 08:22:41 +08:00
|
|
|
|
|
|
|
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
|
|
|
/* liobn, ioba, tce); */
|
|
|
|
|
KVM: PPC: Book3S HV: Add radix checks in real-mode hypercall handlers
POWER9 running a radix guest will take some hypervisor interrupts
without going to real mode (turning off the MMU). This means that
early hypercall handlers may now be called in virtual mode. Most of
the handlers work just fine in both modes, but there are some that
can crash the host if called in virtual mode, notably the TCE (IOMMU)
hypercalls H_PUT_TCE, H_STUFF_TCE and H_PUT_TCE_INDIRECT. These
already have both a real-mode and a virtual-mode version, so we
arrange for the real-mode version to return H_TOO_HARD for radix
guests, which will result in the virtual-mode version being called.
The other hypercall which is sensitive to the MMU mode is H_RANDOM.
It doesn't have a virtual-mode version, so this adds code to enable
it to be called in either mode.
An alternative solution was considered which would refuse to call any
of the early hypercall handlers when doing a virtual-mode exit from a
radix guest. However, the XICS-on-XIVE code depends on the XICS
hypercalls being handled early even for virtual-mode exits, because
the handlers need to be called before the XIVE vCPU state has been
pulled off the hardware. Therefore that solution would have become
quite invasive and complicated, and was rejected in favour of the
simpler, though less elegant, solution presented here.
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Tested-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-05-10 14:39:41 +08:00
|
|
|
/* For radix, we might be in virtual mode, so punt */
|
|
|
|
if (kvm_is_radix(vcpu->kvm))
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
2017-03-22 12:21:53 +08:00
|
|
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
2016-02-15 09:55:04 +08:00
|
|
|
if (!stt)
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
2019-03-29 13:43:26 +08:00
|
|
|
ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
|
2016-02-15 09:55:04 +08:00
|
|
|
if (ret != H_SUCCESS)
|
|
|
|
return ret;
|
|
|
|
|
2018-09-10 16:29:10 +08:00
|
|
|
ret = kvmppc_rm_tce_validate(stt, tce);
|
2016-02-15 09:55:08 +08:00
|
|
|
if (ret != H_SUCCESS)
|
|
|
|
return ret;
|
2016-02-15 09:55:04 +08:00
|
|
|
|
2017-03-22 12:21:56 +08:00
|
|
|
dir = iommu_tce_direction(tce);
|
2020-05-05 15:17:23 +08:00
|
|
|
if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
|
2017-03-22 12:21:56 +08:00
|
|
|
return H_PARAMETER;
|
|
|
|
|
|
|
|
entry = ioba >> stt->page_shift;
|
|
|
|
|
|
|
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
|
|
|
if (dir == DMA_NONE)
|
2018-05-14 18:00:28 +08:00
|
|
|
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
|
2017-03-22 12:21:56 +08:00
|
|
|
stit->tbl, entry);
|
|
|
|
else
|
2018-05-14 18:00:28 +08:00
|
|
|
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
|
2017-03-22 12:21:56 +08:00
|
|
|
stit->tbl, entry, ua, dir);
|
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
iommu_tce_kill_rm(stit->tbl, entry, 1);
|
|
|
|
|
2018-09-10 16:29:11 +08:00
|
|
|
if (ret != H_SUCCESS) {
|
|
|
|
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
2017-03-22 12:21:56 +08:00
|
|
|
return ret;
|
2018-09-10 16:29:11 +08:00
|
|
|
}
|
2017-03-22 12:21:56 +08:00
|
|
|
}
|
|
|
|
|
2019-03-29 13:43:26 +08:00
|
|
|
kvmppc_rm_tce_put(stt, entry, tce);
|
2016-02-15 09:55:04 +08:00
|
|
|
|
|
|
|
return H_SUCCESS;
|
2011-06-29 08:22:41 +08:00
|
|
|
}
|
2014-02-21 23:31:10 +08:00
|
|
|
|
2020-05-05 15:17:22 +08:00
|
|
|
static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
|
|
|
|
unsigned long ua, unsigned long *phpa)
|
2016-02-15 09:55:09 +08:00
|
|
|
{
|
|
|
|
pte_t *ptep, pte;
|
|
|
|
unsigned shift = 0;
|
|
|
|
|
2017-07-27 14:24:53 +08:00
|
|
|
/*
|
|
|
|
* Called in real mode with MSR_EE = 0. We are safe here.
|
|
|
|
* It is ok to do the lookup with arch.pgdir here, because
|
|
|
|
* we are doing this on secondary cpus and current task there
|
|
|
|
* is not the hypervisor. Also this is safe against THP in the
|
|
|
|
* host, because an IPI to primary thread will wait for the secondary
|
|
|
|
* to exit which will agains result in the below page table walk
|
|
|
|
* to finish.
|
|
|
|
*/
|
2020-05-05 15:17:22 +08:00
|
|
|
/* an rmap lock won't make it safe. because that just ensure hash
|
|
|
|
* page table entries are removed with rmap lock held. After that
|
|
|
|
* mmu notifier returns and we go ahead and removing ptes from Qemu page table.
|
|
|
|
*/
|
|
|
|
ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
|
|
|
|
if (!ptep)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
pte = READ_ONCE(*ptep);
|
|
|
|
if (!pte_present(pte))
|
2016-02-15 09:55:09 +08:00
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
if (!shift)
|
|
|
|
shift = PAGE_SHIFT;
|
|
|
|
|
|
|
|
/* Avoid handling anything potentially complicated in realmode */
|
|
|
|
if (shift > PAGE_SHIFT)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
if (!pte_young(pte))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
*phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
|
|
|
|
(ua & ~PAGE_MASK);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long liobn, unsigned long ioba,
|
|
|
|
unsigned long tce_list, unsigned long npages)
|
|
|
|
{
|
2020-05-05 15:17:22 +08:00
|
|
|
struct kvm *kvm = vcpu->kvm;
|
2016-02-15 09:55:09 +08:00
|
|
|
struct kvmppc_spapr_tce_table *stt;
|
|
|
|
long i, ret = H_SUCCESS;
|
|
|
|
unsigned long tces, entry, ua = 0;
|
2020-05-05 15:17:22 +08:00
|
|
|
unsigned long mmu_seq;
|
2017-03-22 12:21:54 +08:00
|
|
|
bool prereg = false;
|
2017-03-22 12:21:56 +08:00
|
|
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
2016-02-15 09:55:09 +08:00
|
|
|
|
KVM: PPC: Book3S HV: Add radix checks in real-mode hypercall handlers
POWER9 running a radix guest will take some hypervisor interrupts
without going to real mode (turning off the MMU). This means that
early hypercall handlers may now be called in virtual mode. Most of
the handlers work just fine in both modes, but there are some that
can crash the host if called in virtual mode, notably the TCE (IOMMU)
hypercalls H_PUT_TCE, H_STUFF_TCE and H_PUT_TCE_INDIRECT. These
already have both a real-mode and a virtual-mode version, so we
arrange for the real-mode version to return H_TOO_HARD for radix
guests, which will result in the virtual-mode version being called.
The other hypercall which is sensitive to the MMU mode is H_RANDOM.
It doesn't have a virtual-mode version, so this adds code to enable
it to be called in either mode.
An alternative solution was considered which would refuse to call any
of the early hypercall handlers when doing a virtual-mode exit from a
radix guest. However, the XICS-on-XIVE code depends on the XICS
hypercalls being handled early even for virtual-mode exits, because
the handlers need to be called before the XIVE vCPU state has been
pulled off the hardware. Therefore that solution would have become
quite invasive and complicated, and was rejected in favour of the
simpler, though less elegant, solution presented here.
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Tested-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-05-10 14:39:41 +08:00
|
|
|
/* For radix, we might be in virtual mode, so punt */
|
|
|
|
if (kvm_is_radix(vcpu->kvm))
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
2020-05-05 15:17:22 +08:00
|
|
|
/*
|
|
|
|
* used to check for invalidations in progress
|
|
|
|
*/
|
|
|
|
mmu_seq = kvm->mmu_notifier_seq;
|
|
|
|
smp_rmb();
|
|
|
|
|
2017-03-22 12:21:53 +08:00
|
|
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
2016-02-15 09:55:09 +08:00
|
|
|
if (!stt)
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
2016-03-01 14:54:38 +08:00
|
|
|
entry = ioba >> stt->page_shift;
|
2016-02-15 09:55:09 +08:00
|
|
|
/*
|
|
|
|
* The spec says that the maximum size of the list is 512 TCEs
|
|
|
|
* so the whole table addressed resides in 4K page
|
|
|
|
*/
|
|
|
|
if (npages > 512)
|
|
|
|
return H_PARAMETER;
|
|
|
|
|
|
|
|
if (tce_list & (SZ_4K - 1))
|
|
|
|
return H_PARAMETER;
|
|
|
|
|
2019-03-29 13:43:26 +08:00
|
|
|
ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
|
2016-02-15 09:55:09 +08:00
|
|
|
if (ret != H_SUCCESS)
|
|
|
|
return ret;
|
|
|
|
|
2017-03-22 12:21:54 +08:00
|
|
|
if (mm_iommu_preregistered(vcpu->kvm->mm)) {
|
|
|
|
/*
|
|
|
|
* We get here if guest memory was pre-registered which
|
|
|
|
* is normally VFIO case and gpa->hpa translation does not
|
|
|
|
* depend on hpt.
|
|
|
|
*/
|
|
|
|
struct mm_iommu_table_group_mem_t *mem;
|
2016-02-15 09:55:09 +08:00
|
|
|
|
2020-05-05 15:17:23 +08:00
|
|
|
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
|
2017-03-22 12:21:54 +08:00
|
|
|
return H_TOO_HARD;
|
2016-02-15 09:55:09 +08:00
|
|
|
|
2017-03-22 12:21:54 +08:00
|
|
|
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
|
|
|
if (mem)
|
2018-07-17 15:19:13 +08:00
|
|
|
prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
|
|
|
|
IOMMU_PAGE_SHIFT_4K, &tces) == 0;
|
2017-03-22 12:21:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!prereg) {
|
|
|
|
/*
|
|
|
|
* This is usually a case of a guest with emulated devices only
|
|
|
|
* when TCE list is not in preregistered memory.
|
|
|
|
* We do not require memory to be preregistered in this case
|
|
|
|
* so lock rmap and do __find_linux_pte_or_hugepte().
|
|
|
|
*/
|
2020-05-05 15:17:23 +08:00
|
|
|
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
|
2018-09-10 16:29:09 +08:00
|
|
|
return H_TOO_HARD;
|
2017-03-22 12:21:54 +08:00
|
|
|
|
2020-05-05 15:17:22 +08:00
|
|
|
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
|
|
|
|
if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
|
2017-03-22 12:21:54 +08:00
|
|
|
ret = H_TOO_HARD;
|
|
|
|
goto unlock_exit;
|
|
|
|
}
|
2016-02-15 09:55:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < npages; ++i) {
|
|
|
|
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
|
|
|
|
2018-09-10 16:29:10 +08:00
|
|
|
ret = kvmppc_rm_tce_validate(stt, tce);
|
2016-02-15 09:55:09 +08:00
|
|
|
if (ret != H_SUCCESS)
|
|
|
|
goto unlock_exit;
|
2018-09-10 16:29:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < npages; ++i) {
|
|
|
|
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
2016-02-15 09:55:09 +08:00
|
|
|
|
2017-03-22 12:21:56 +08:00
|
|
|
ua = 0;
|
2020-05-05 15:17:23 +08:00
|
|
|
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
|
2019-08-26 12:55:20 +08:00
|
|
|
ret = H_PARAMETER;
|
2019-08-29 16:52:49 +08:00
|
|
|
goto invalidate_exit;
|
2019-08-26 12:55:20 +08:00
|
|
|
}
|
2017-03-22 12:21:56 +08:00
|
|
|
|
|
|
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
2018-05-14 18:00:28 +08:00
|
|
|
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
|
2017-03-22 12:21:56 +08:00
|
|
|
stit->tbl, entry + i, ua,
|
|
|
|
iommu_tce_direction(tce));
|
|
|
|
|
2018-09-10 16:29:11 +08:00
|
|
|
if (ret != H_SUCCESS) {
|
|
|
|
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
|
|
|
|
entry);
|
2019-08-29 16:52:49 +08:00
|
|
|
goto invalidate_exit;
|
2018-09-10 16:29:11 +08:00
|
|
|
}
|
2017-03-22 12:21:56 +08:00
|
|
|
}
|
|
|
|
|
2019-03-29 13:43:26 +08:00
|
|
|
kvmppc_rm_tce_put(stt, entry + i, tce);
|
2016-02-15 09:55:09 +08:00
|
|
|
}
|
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
invalidate_exit:
|
|
|
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
|
|
|
|
iommu_tce_kill_rm(stit->tbl, entry, npages);
|
|
|
|
|
2016-02-15 09:55:09 +08:00
|
|
|
unlock_exit:
|
2020-05-05 15:17:23 +08:00
|
|
|
if (!prereg)
|
|
|
|
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
|
2016-02-15 09:55:09 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-18 10:50:42 +08:00
|
|
|
long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
2016-02-15 09:55:09 +08:00
|
|
|
unsigned long liobn, unsigned long ioba,
|
|
|
|
unsigned long tce_value, unsigned long npages)
|
|
|
|
{
|
|
|
|
struct kvmppc_spapr_tce_table *stt;
|
|
|
|
long i, ret;
|
2017-03-22 12:21:56 +08:00
|
|
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
2016-02-15 09:55:09 +08:00
|
|
|
|
KVM: PPC: Book3S HV: Add radix checks in real-mode hypercall handlers
POWER9 running a radix guest will take some hypervisor interrupts
without going to real mode (turning off the MMU). This means that
early hypercall handlers may now be called in virtual mode. Most of
the handlers work just fine in both modes, but there are some that
can crash the host if called in virtual mode, notably the TCE (IOMMU)
hypercalls H_PUT_TCE, H_STUFF_TCE and H_PUT_TCE_INDIRECT. These
already have both a real-mode and a virtual-mode version, so we
arrange for the real-mode version to return H_TOO_HARD for radix
guests, which will result in the virtual-mode version being called.
The other hypercall which is sensitive to the MMU mode is H_RANDOM.
It doesn't have a virtual-mode version, so this adds code to enable
it to be called in either mode.
An alternative solution was considered which would refuse to call any
of the early hypercall handlers when doing a virtual-mode exit from a
radix guest. However, the XICS-on-XIVE code depends on the XICS
hypercalls being handled early even for virtual-mode exits, because
the handlers need to be called before the XIVE vCPU state has been
pulled off the hardware. Therefore that solution would have become
quite invasive and complicated, and was rejected in favour of the
simpler, though less elegant, solution presented here.
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Tested-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-05-10 14:39:41 +08:00
|
|
|
/* For radix, we might be in virtual mode, so punt */
|
|
|
|
if (kvm_is_radix(vcpu->kvm))
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
2017-03-22 12:21:53 +08:00
|
|
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
2016-02-15 09:55:09 +08:00
|
|
|
if (!stt)
|
|
|
|
return H_TOO_HARD;
|
|
|
|
|
2019-03-29 13:43:26 +08:00
|
|
|
ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
|
2016-02-15 09:55:09 +08:00
|
|
|
if (ret != H_SUCCESS)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Check permission bits only to allow userspace poison TCE for debug */
|
|
|
|
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
|
|
|
|
return H_PARAMETER;
|
|
|
|
|
2017-03-22 12:21:56 +08:00
|
|
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
2018-05-14 18:00:27 +08:00
|
|
|
unsigned long entry = ioba >> stt->page_shift;
|
2017-03-22 12:21:56 +08:00
|
|
|
|
|
|
|
for (i = 0; i < npages; ++i) {
|
2018-05-14 18:00:28 +08:00
|
|
|
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
|
2017-03-22 12:21:56 +08:00
|
|
|
stit->tbl, entry + i);
|
|
|
|
|
|
|
|
if (ret == H_SUCCESS)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (ret == H_TOO_HARD)
|
2019-08-29 16:52:49 +08:00
|
|
|
goto invalidate_exit;
|
2017-03-22 12:21:56 +08:00
|
|
|
|
|
|
|
WARN_ON_ONCE_RM(1);
|
2018-09-10 16:29:07 +08:00
|
|
|
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
2017-03-22 12:21:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-01 14:54:38 +08:00
|
|
|
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
2019-03-29 13:43:26 +08:00
|
|
|
kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
2016-02-15 09:55:09 +08:00
|
|
|
|
2019-08-29 16:52:49 +08:00
|
|
|
invalidate_exit:
|
|
|
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
|
|
|
|
iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
|
|
|
|
|
|
|
|
return ret;
|
2016-02-15 09:55:09 +08:00
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S HV: Add radix checks in real-mode hypercall handlers
POWER9 running a radix guest will take some hypervisor interrupts
without going to real mode (turning off the MMU). This means that
early hypercall handlers may now be called in virtual mode. Most of
the handlers work just fine in both modes, but there are some that
can crash the host if called in virtual mode, notably the TCE (IOMMU)
hypercalls H_PUT_TCE, H_STUFF_TCE and H_PUT_TCE_INDIRECT. These
already have both a real-mode and a virtual-mode version, so we
arrange for the real-mode version to return H_TOO_HARD for radix
guests, which will result in the virtual-mode version being called.
The other hypercall which is sensitive to the MMU mode is H_RANDOM.
It doesn't have a virtual-mode version, so this adds code to enable
it to be called in either mode.
An alternative solution was considered which would refuse to call any
of the early hypercall handlers when doing a virtual-mode exit from a
radix guest. However, the XICS-on-XIVE code depends on the XICS
hypercalls being handled early even for virtual-mode exits, because
the handlers need to be called before the XIVE vCPU state has been
pulled off the hardware. Therefore that solution would have become
quite invasive and complicated, and was rejected in favour of the
simpler, though less elegant, solution presented here.
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Tested-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-05-10 14:39:41 +08:00
|
|
|
/* This can be called in either virtual mode or real mode */
|
2014-02-21 23:31:10 +08:00
|
|
|
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|
|
|
unsigned long ioba)
|
|
|
|
{
|
2017-03-22 12:21:53 +08:00
|
|
|
struct kvmppc_spapr_tce_table *stt;
|
2016-02-15 09:55:04 +08:00
|
|
|
long ret;
|
|
|
|
unsigned long idx;
|
|
|
|
struct page *page;
|
|
|
|
u64 *tbl;
|
2014-02-21 23:31:10 +08:00
|
|
|
|
2017-03-22 12:21:53 +08:00
|
|
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
2016-02-15 09:55:04 +08:00
|
|
|
if (!stt)
|
|
|
|
return H_TOO_HARD;
|
2014-02-21 23:31:10 +08:00
|
|
|
|
2016-02-15 09:55:04 +08:00
|
|
|
ret = kvmppc_ioba_validate(stt, ioba, 1);
|
|
|
|
if (ret != H_SUCCESS)
|
|
|
|
return ret;
|
2014-02-21 23:31:10 +08:00
|
|
|
|
2016-03-01 14:54:39 +08:00
|
|
|
idx = (ioba >> stt->page_shift) - stt->offset;
|
2016-02-15 09:55:04 +08:00
|
|
|
page = stt->pages[idx / TCES_PER_PAGE];
|
2019-03-29 13:43:26 +08:00
|
|
|
if (!page) {
|
|
|
|
vcpu->arch.regs.gpr[4] = 0;
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
2016-02-15 09:55:04 +08:00
|
|
|
tbl = (u64 *)page_address(page);
|
2014-02-21 23:31:10 +08:00
|
|
|
|
2018-05-07 14:20:07 +08:00
|
|
|
vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
|
2014-02-21 23:31:10 +08:00
|
|
|
|
2016-02-15 09:55:04 +08:00
|
|
|
return H_SUCCESS;
|
2014-02-21 23:31:10 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
|
2016-02-15 09:55:09 +08:00
|
|
|
|
|
|
|
#endif /* KVM_BOOK3S_HV_POSSIBLE */
|