kvm: mmu: don't set the present bit unconditionally
To support execute only mappings on behalf of L1 hypervisors, we need to teach set_spte() to honor all three of L1's XWR bits. As a start, add a new variable "shadow_present_mask" that will be set for non-EPT shadow paging and clear for EPT. Signed-off-by: Bandan Das <bsd@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
812f30b234
commit
ffb128c89b
|
@ -1031,7 +1031,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu);
|
|||
void kvm_mmu_init_vm(struct kvm *kvm);
|
||||
void kvm_mmu_uninit_vm(struct kvm *kvm);
|
||||
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
||||
u64 dirty_mask, u64 nx_mask, u64 x_mask);
|
||||
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask);
|
||||
|
||||
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
||||
|
|
|
@ -175,6 +175,7 @@ static u64 __read_mostly shadow_user_mask;
|
|||
static u64 __read_mostly shadow_accessed_mask;
|
||||
static u64 __read_mostly shadow_dirty_mask;
|
||||
static u64 __read_mostly shadow_mmio_mask;
|
||||
static u64 __read_mostly shadow_present_mask;
|
||||
|
||||
static void mmu_spte_set(u64 *sptep, u64 spte);
|
||||
static void mmu_free_roots(struct kvm_vcpu *vcpu);
|
||||
|
@ -282,13 +283,14 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
|
|||
}
|
||||
|
||||
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
||||
u64 dirty_mask, u64 nx_mask, u64 x_mask)
|
||||
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask)
|
||||
{
|
||||
shadow_user_mask = user_mask;
|
||||
shadow_accessed_mask = accessed_mask;
|
||||
shadow_dirty_mask = dirty_mask;
|
||||
shadow_nx_mask = nx_mask;
|
||||
shadow_x_mask = x_mask;
|
||||
shadow_present_mask = p_mask;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
||||
|
||||
|
@ -2245,10 +2247,9 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
{
|
||||
u64 spte;
|
||||
|
||||
BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK ||
|
||||
VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
|
||||
BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
|
||||
|
||||
spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
|
||||
spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
|
||||
shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
|
||||
|
||||
mmu_spte_set(sptep, spte);
|
||||
|
@ -2515,13 +2516,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
|
||||
bool can_unsync, bool host_writable)
|
||||
{
|
||||
u64 spte;
|
||||
u64 spte = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
|
||||
return 0;
|
||||
|
||||
spte = PT_PRESENT_MASK;
|
||||
spte |= shadow_present_mask;
|
||||
if (!speculative)
|
||||
spte |= shadow_accessed_mask;
|
||||
|
||||
|
|
|
@ -6473,6 +6473,7 @@ static __init int hardware_setup(void)
|
|||
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
|
||||
(enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
|
||||
0ull, VMX_EPT_EXECUTABLE_MASK);
|
||||
0ull, VMX_EPT_EXECUTABLE_MASK, VMX_EPT_READABLE_MASK);
|
||||
ept_set_mmio_spte_mask();
|
||||
kvm_enable_tdp();
|
||||
} else
|
||||
|
|
|
@ -5878,8 +5878,8 @@ int kvm_arch_init(void *opaque)
|
|||
kvm_x86_ops = ops;
|
||||
|
||||
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
|
||||
PT_DIRTY_MASK, PT64_NX_MASK, 0);
|
||||
|
||||
PT_DIRTY_MASK, PT64_NX_MASK, 0,
|
||||
PT_PRESENT_MASK);
|
||||
kvm_timer_init();
|
||||
|
||||
perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||
|
|
Loading…
Reference in New Issue