mirror of https://gitee.com/openkylin/linux.git
KVM: x86/mmu: Add sptep_to_sp() helper to wrap shadow page lookup
Introduce sptep_to_sp() to reduce the boilerplate code needed to get the shadow page associated with a spte pointer, and to improve readability as it's not immediately obvious that "page_header" is a KVM-specific accessor for retrieving a shadow page. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200622202034.15093-6-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
985ab27801
commit
573546820b
|
@ -677,7 +677,7 @@ union split_spte {
|
|||
|
||||
static void count_spte_clear(u64 *sptep, u64 spte)
|
||||
{
|
||||
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
||||
struct kvm_mmu_page *sp = sptep_to_sp(sptep);
|
||||
|
||||
if (is_shadow_present_pte(spte))
|
||||
return;
|
||||
|
@ -761,7 +761,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
|
|||
*/
|
||||
static u64 __get_spte_lockless(u64 *sptep)
|
||||
{
|
||||
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
||||
struct kvm_mmu_page *sp = sptep_to_sp(sptep);
|
||||
union split_spte spte, *orig = (union split_spte *)sptep;
|
||||
int count;
|
||||
|
||||
|
@ -1427,7 +1427,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
|||
struct kvm_mmu_page *sp;
|
||||
struct kvm_rmap_head *rmap_head;
|
||||
|
||||
sp = page_header(__pa(spte));
|
||||
sp = sptep_to_sp(spte);
|
||||
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
|
||||
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
|
||||
return pte_list_add(vcpu, spte, rmap_head);
|
||||
|
@ -1439,7 +1439,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
|
|||
gfn_t gfn;
|
||||
struct kvm_rmap_head *rmap_head;
|
||||
|
||||
sp = page_header(__pa(spte));
|
||||
sp = sptep_to_sp(spte);
|
||||
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
|
||||
rmap_head = gfn_to_rmap(kvm, gfn, sp);
|
||||
__pte_list_remove(spte, rmap_head);
|
||||
|
@ -1531,7 +1531,7 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
|
|||
static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
|
||||
{
|
||||
if (is_large_pte(*sptep)) {
|
||||
WARN_ON(page_header(__pa(sptep))->role.level == PG_LEVEL_4K);
|
||||
WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
|
||||
drop_spte(kvm, sptep);
|
||||
--kvm->stat.lpages;
|
||||
return true;
|
||||
|
@ -1543,7 +1543,7 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
|
|||
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
|
||||
{
|
||||
if (__drop_large_spte(vcpu->kvm, sptep)) {
|
||||
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
||||
struct kvm_mmu_page *sp = sptep_to_sp(sptep);
|
||||
|
||||
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
|
||||
KVM_PAGES_PER_HPAGE(sp->role.level));
|
||||
|
@ -2002,7 +2002,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
|||
struct kvm_rmap_head *rmap_head;
|
||||
struct kvm_mmu_page *sp;
|
||||
|
||||
sp = page_header(__pa(spte));
|
||||
sp = sptep_to_sp(spte);
|
||||
|
||||
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
|
||||
|
||||
|
@ -2124,7 +2124,7 @@ static void mark_unsync(u64 *spte)
|
|||
struct kvm_mmu_page *sp;
|
||||
unsigned int index;
|
||||
|
||||
sp = page_header(__pa(spte));
|
||||
sp = sptep_to_sp(spte);
|
||||
index = spte - sp->spt;
|
||||
if (__test_and_set_bit(index, sp->unsync_child_bitmap))
|
||||
return;
|
||||
|
@ -2449,9 +2449,7 @@ static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
|
|||
|
||||
static void clear_sp_write_flooding_count(u64 *spte)
|
||||
{
|
||||
struct kvm_mmu_page *sp = page_header(__pa(spte));
|
||||
|
||||
__clear_sp_write_flooding_count(sp);
|
||||
__clear_sp_write_flooding_count(sptep_to_sp(spte));
|
||||
}
|
||||
|
||||
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
|
@ -3026,7 +3024,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
|
||||
return 0;
|
||||
|
||||
sp = page_header(__pa(sptep));
|
||||
sp = sptep_to_sp(sptep);
|
||||
if (sp_ad_disabled(sp))
|
||||
spte |= SPTE_AD_DISABLED_MASK;
|
||||
else if (kvm_vcpu_ad_need_write_protect(vcpu))
|
||||
|
@ -3239,7 +3237,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
|||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
|
||||
sp = page_header(__pa(sptep));
|
||||
sp = sptep_to_sp(sptep);
|
||||
|
||||
/*
|
||||
* Without accessed bits, there's no way to distinguish between
|
||||
|
@ -3547,7 +3545,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
|||
if (!is_shadow_present_pte(spte))
|
||||
break;
|
||||
|
||||
sp = page_header(__pa(iterator.sptep));
|
||||
sp = sptep_to_sp(iterator.sptep);
|
||||
if (!is_last_spte(spte, sp->role.level))
|
||||
break;
|
||||
|
||||
|
@ -5926,7 +5924,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
|||
|
||||
restart:
|
||||
for_each_rmap_spte(rmap_head, &iter, sptep) {
|
||||
sp = page_header(__pa(sptep));
|
||||
sp = sptep_to_sp(sptep);
|
||||
pfn = spte_to_pfn(*sptep);
|
||||
|
||||
/*
|
||||
|
|
|
@ -97,7 +97,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
|
|||
kvm_pfn_t pfn;
|
||||
hpa_t hpa;
|
||||
|
||||
sp = page_header(__pa(sptep));
|
||||
sp = sptep_to_sp(sptep);
|
||||
|
||||
if (sp->unsync) {
|
||||
if (level != PG_LEVEL_4K) {
|
||||
|
@ -132,7 +132,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
|
|||
struct kvm_memory_slot *slot;
|
||||
gfn_t gfn;
|
||||
|
||||
rev_sp = page_header(__pa(sptep));
|
||||
rev_sp = sptep_to_sp(sptep);
|
||||
gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
|
||||
|
||||
slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
|
||||
|
@ -165,7 +165,7 @@ static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
|
|||
|
||||
static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
|
||||
{
|
||||
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
||||
struct kvm_mmu_page *sp = sptep_to_sp(sptep);
|
||||
|
||||
if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
|
||||
audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
|
||||
|
|
|
@ -50,6 +50,11 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
|
|||
return (struct kvm_mmu_page *)page_private(page);
|
||||
}
|
||||
|
||||
static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
|
||||
{
|
||||
return page_header(__pa(sptep));
|
||||
}
|
||||
|
||||
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
||||
|
|
|
@ -596,7 +596,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
|||
u64 *spte;
|
||||
int i;
|
||||
|
||||
sp = page_header(__pa(sptep));
|
||||
sp = sptep_to_sp(sptep);
|
||||
|
||||
if (sp->role.level > PG_LEVEL_4K)
|
||||
return;
|
||||
|
@ -916,7 +916,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
|
|||
level = iterator.level;
|
||||
sptep = iterator.sptep;
|
||||
|
||||
sp = page_header(__pa(sptep));
|
||||
sp = sptep_to_sp(sptep);
|
||||
if (is_last_spte(*sptep, level)) {
|
||||
pt_element_t gpte;
|
||||
gpa_t pte_gpa;
|
||||
|
|
Loading…
Reference in New Issue