mirror of https://gitee.com/openkylin/linux.git
Second PPC KVM update for 4.20.
Two commits; one is an optimization for PCI pass-through, and the other disables nested HV-KVM on early POWER9 chips that need a particular hardware bug workaround. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJbywJ7AAoJEJ2a6ncsY3GfZ4QH/j7rKij/OV73LATQyS0zPe66 OPl8F93n3IoPqHouTW8c9isag5OyF14ne7IlWj54zP3r67OU2K13/Fi6ITHmokQV vJ2xIOqClQtV22tpjBoJK+b0r6lwHm8JPtbmnnsHfwCtX28ZIzhZn7Dt2/KD/+c1 GemX8D1dcewHCjwWZqcFLhHAjB4pbGHOKGAlQPK9H04LFsgypQNR+vy/n++yB3tP HsraRrmqYS+lO+7DVzbNHg13/pml6+bgDkQ6Vs7j2DF8HzkpgGUpCOUxmquG8ODU Pw2O4OxYMy3Uq+pwHZnoJInfSstu63SGHgnLBqp001PKPiyMvAMugdLtxs+GjtY= =vQjp -----END PGP SIGNATURE----- Merge tag 'kvm-ppc-next-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD Second PPC KVM update for 4.20. Two commits; one is an optimization for PCI pass-through, and the other disables nested HV-KVM on early POWER9 chips that need a particular hardware bug workaround.
This commit is contained in:
commit
574c0cfbc7
|
@ -126,7 +126,7 @@ struct iommu_table {
|
|||
int it_nid;
|
||||
};
|
||||
|
||||
#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \
|
||||
#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
|
||||
((tbl)->it_ops->useraddrptr((tbl), (entry), false))
|
||||
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
|
||||
((tbl)->it_ops->useraddrptr((tbl), (entry), true))
|
||||
|
|
|
@ -410,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
|
|||
{
|
||||
struct mm_iommu_table_group_mem_t *mem = NULL;
|
||||
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
|
||||
|
||||
if (!pua)
|
||||
/* it_userspace allocation might be delayed */
|
||||
return H_TOO_HARD;
|
||||
return H_SUCCESS;
|
||||
|
||||
mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
|
||||
if (!mem)
|
||||
|
|
|
@ -214,7 +214,7 @@ static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
|
|||
|
||||
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
|
||||
(*direction == DMA_BIDIRECTIONAL))) {
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
|
||||
/*
|
||||
* kvmppc_rm_tce_iommu_do_map() updates the UA cache after
|
||||
* calling this so we still get here a valid UA.
|
||||
|
@ -240,7 +240,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
|||
{
|
||||
struct mm_iommu_table_group_mem_t *mem = NULL;
|
||||
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
|
||||
|
||||
if (!pua)
|
||||
/* it_userspace allocation might be delayed */
|
||||
|
@ -304,7 +304,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
|||
{
|
||||
long ret;
|
||||
unsigned long hpa = 0;
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
|
||||
if (!pua)
|
||||
|
|
|
@ -4174,7 +4174,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||
|
||||
do {
|
||||
if (kvm->arch.threads_indep && kvm_is_radix(kvm))
|
||||
/*
|
||||
* The early POWER9 chips that can't mix radix and HPT threads
|
||||
* on the same core also need the workaround for the problem
|
||||
* where the TLB would prefetch entries in the guest exit path
|
||||
* for radix guests using the guest PIDR value and LPID 0.
|
||||
* The workaround is in the old path (kvmppc_run_vcpu())
|
||||
* but not the new path (kvmhv_run_single_vcpu()).
|
||||
*/
|
||||
if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
|
||||
!no_mixing_hpt_and_radix)
|
||||
r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
|
||||
vcpu->arch.vcore->lpcr);
|
||||
else
|
||||
|
@ -5196,7 +5205,7 @@ static int kvmhv_enable_nested(struct kvm *kvm)
|
|||
{
|
||||
if (!nested)
|
||||
return -EPERM;
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
|
||||
return -ENODEV;
|
||||
|
||||
/* kvm == NULL means the caller is testing if the capability exists */
|
||||
|
|
|
@ -444,7 +444,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
|
|||
struct mm_iommu_table_group_mem_t *mem = NULL;
|
||||
int ret;
|
||||
unsigned long hpa = 0;
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
|
||||
|
||||
if (!pua)
|
||||
return;
|
||||
|
@ -467,8 +467,27 @@ static int tce_iommu_clear(struct tce_container *container,
|
|||
unsigned long oldhpa;
|
||||
long ret;
|
||||
enum dma_data_direction direction;
|
||||
unsigned long lastentry = entry + pages;
|
||||
|
||||
for ( ; entry < lastentry; ++entry) {
|
||||
if (tbl->it_indirect_levels && tbl->it_userspace) {
|
||||
/*
|
||||
* For multilevel tables, we can take a shortcut here
|
||||
* and skip some TCEs as we know that the userspace
|
||||
* addresses cache is a mirror of the real TCE table
|
||||
* and if it is missing some indirect levels, then
|
||||
* the hardware table does not have them allocated
|
||||
* either and therefore does not require updating.
|
||||
*/
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
|
||||
entry);
|
||||
if (!pua) {
|
||||
/* align to level_size which is power of two */
|
||||
entry |= tbl->it_level_size - 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ; pages; --pages, ++entry) {
|
||||
cond_resched();
|
||||
|
||||
direction = DMA_NONE;
|
||||
|
|
Loading…
Reference in New Issue