KVM: x86: Fold decache_cr3() into cache_reg()

Handle caching CR3 (from VMX's VMCS) into struct kvm_vcpu via the common
cache_reg() callback and drop the dedicated decache_cr3().  The name
decache_cr3() is somewhat confusing as the caching behavior of CR3
follows that of GPRs, RFLAGS and PDPTRs, (handled via cache_reg()), and
has nothing in common with the caching behavior of CR0/CR4 (whose
decache_cr{0,4}_guest_bits() likely provided the 'decache' verbiage).

This would effectivel adds a BUG() if KVM attempts to cache CR3 on SVM.
Change it to a WARN_ON_ONCE() -- if the cache never requires filling,
the value is already in the right place -- and opportunistically add one
in VMX to provide an equivalent check.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2019-09-27 14:45:23 -07:00 committed by Paolo Bonzini
parent cb3c1e2f3e
commit 34059c2570
4 changed files with 8 additions and 17 deletions

View File

@ -1033,7 +1033,6 @@ struct kvm_x86_ops {
struct kvm_segment *var, int seg); struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
void (*decache_cr3)(struct kvm_vcpu *vcpu);
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

View File

@ -137,7 +137,7 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{ {
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
kvm_x86_ops->decache_cr3(vcpu); kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_CR3);
return vcpu->arch.cr3; return vcpu->arch.cr3;
} }

View File

@ -2376,7 +2376,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
break; break;
default: default:
BUG(); WARN_ON_ONCE(1);
} }
} }
@ -2529,10 +2529,6 @@ static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{ {
} }
static void svm_decache_cr3(struct kvm_vcpu *vcpu)
{
}
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{ {
} }
@ -7269,7 +7265,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.get_cpl = svm_get_cpl, .get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits, .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits, .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
.decache_cr3 = svm_decache_cr3,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0, .set_cr0 = svm_set_cr0,
.set_cr3 = svm_set_cr3, .set_cr3 = svm_set_cr3,

View File

@ -2192,7 +2192,12 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
if (enable_ept) if (enable_ept)
ept_save_pdptrs(vcpu); ept_save_pdptrs(vcpu);
break; break;
case VCPU_EXREG_CR3:
if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
break;
default: default:
WARN_ON_ONCE(1);
break; break;
} }
} }
@ -2863,13 +2868,6 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
} }
static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
{
if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
}
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{ {
ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
@ -2914,7 +2912,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
vmx_decache_cr3(vcpu); vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
if (!(cr0 & X86_CR0_PG)) { if (!(cr0 & X86_CR0_PG)) {
/* From paging/starting to nonpaging */ /* From paging/starting to nonpaging */
exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING | exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
@ -7784,7 +7782,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.get_cpl = vmx_get_cpl, .get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits, .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
.decache_cr3 = vmx_decache_cr3,
.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
.set_cr0 = vmx_set_cr0, .set_cr0 = vmx_set_cr0,
.set_cr3 = vmx_set_cr3, .set_cr3 = vmx_set_cr3,