Revert "KVM: SVM: use NPT page attributes"
This reverts commit 3c2e7f7de3
.
Initializing the mapping from MTRR to PAT values was reported to
fail nondeterministically, and it also caused extremely slow boot
(due to caching getting disabled---bug 103321) with assigned devices.
Reported-by: Markus Trippelsdorf <markus@trippelsdorf.de>
Reported-by: Sebastian Schuette <dracon@ewetel.net>
Cc: stable@vger.kernel.org # 4.2+
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
bcf166a994
commit
fc07e76ac7
|
@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
|
|||
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
|
||||
}
|
||||
|
||||
#define MTRR_TYPE_UC_MINUS 7
|
||||
#define MTRR2PROTVAL_INVALID 0xff
|
||||
|
||||
static u8 mtrr2protval[8];
|
||||
|
||||
static u8 fallback_mtrr_type(int mtrr)
|
||||
{
|
||||
/*
|
||||
* WT and WP aren't always available in the host PAT. Treat
|
||||
* them as UC and UC- respectively. Everything else should be
|
||||
* there.
|
||||
*/
|
||||
switch (mtrr)
|
||||
{
|
||||
case MTRR_TYPE_WRTHROUGH:
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
case MTRR_TYPE_WRPROT:
|
||||
return MTRR_TYPE_UC_MINUS;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static void build_mtrr2protval(void)
|
||||
{
|
||||
int i;
|
||||
u64 pat;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
mtrr2protval[i] = MTRR2PROTVAL_INVALID;
|
||||
|
||||
/* Ignore the invalid MTRR types. */
|
||||
mtrr2protval[2] = 0;
|
||||
mtrr2protval[3] = 0;
|
||||
|
||||
/*
|
||||
* Use host PAT value to figure out the mapping from guest MTRR
|
||||
* values to nested page table PAT/PCD/PWT values. We do not
|
||||
* want to change the host PAT value every time we enter the
|
||||
* guest.
|
||||
*/
|
||||
rdmsrl(MSR_IA32_CR_PAT, pat);
|
||||
for (i = 0; i < 8; i++) {
|
||||
u8 mtrr = pat >> (8 * i);
|
||||
|
||||
if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
|
||||
mtrr2protval[mtrr] = __cm_idx2pte(i);
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
|
||||
u8 fallback = fallback_mtrr_type(i);
|
||||
mtrr2protval[i] = mtrr2protval[fallback];
|
||||
BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static __init int svm_hardware_setup(void)
|
||||
{
|
||||
int cpu;
|
||||
|
@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void)
|
|||
} else
|
||||
kvm_disable_tdp();
|
||||
|
||||
build_mtrr2protval();
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -1145,42 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
|
|||
return target_tsc - tsc;
|
||||
}
|
||||
|
||||
static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
|
||||
/* Unlike Intel, AMD takes the guest's CR0.CD into account.
|
||||
*
|
||||
* AMD does not have IPAT. To emulate it for the case of guests
|
||||
* with no assigned devices, just set everything to WB. If guests
|
||||
* have assigned devices, however, we cannot force WB for RAM
|
||||
* pages only, so use the guest PAT directly.
|
||||
*/
|
||||
if (!kvm_arch_has_assigned_device(vcpu->kvm))
|
||||
*g_pat = 0x0606060606060606;
|
||||
else
|
||||
*g_pat = vcpu->arch.pat;
|
||||
}
|
||||
|
||||
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
||||
{
|
||||
u8 mtrr;
|
||||
|
||||
/*
|
||||
* 1. MMIO: always map as UC
|
||||
* 2. No passthrough: always map as WB, and force guest PAT to WB as well
|
||||
* 3. Passthrough: can't guarantee the result, try to trust guest.
|
||||
*/
|
||||
if (is_mmio)
|
||||
return _PAGE_NOCACHE;
|
||||
|
||||
if (!kvm_arch_has_assigned_device(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
|
||||
return mtrr2protval[mtrr];
|
||||
}
|
||||
|
||||
static void init_vmcb(struct vcpu_svm *svm, bool init_event)
|
||||
{
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
|
@ -1277,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
|
|||
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
|
||||
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
|
||||
save->g_pat = svm->vcpu.arch.pat;
|
||||
svm_set_guest_pat(svm, &save->g_pat);
|
||||
save->cr3 = 0;
|
||||
save->cr4 = 0;
|
||||
}
|
||||
|
@ -4187,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue