KVM: Drop largepages_enabled and its accessor/mutator

Drop largepages_enabled, kvm_largepages_enabled() and
kvm_disable_largepages() now that all users are gone.

Note, largepages_enabled was an x86-only flag that got left in common
KVM code when KVM gained support for multiple architectures.

No functional change intended.

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2020-03-02 15:57:05 -08:00 committed by Paolo Bonzini
parent e884b854ee
commit 600087b614
3 changed files with 2 additions and 19 deletions

View File

@ -9918,11 +9918,9 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
ugfn = slot->userspace_addr >> PAGE_SHIFT; ugfn = slot->userspace_addr >> PAGE_SHIFT;
/* /*
* If the gfn and userspace address are not aligned wrt each * If the gfn and userspace address are not aligned wrt each
* other, or if explicitly asked to, disable large page * other, disable large page support for this slot.
* support for this slot
*/ */
if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
!kvm_largepages_enabled()) {
unsigned long j; unsigned long j;
for (j = 0; j < lpages; ++j) for (j = 0; j < lpages; ++j)

View File

@ -693,8 +693,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old, struct kvm_memory_slot *old,
const struct kvm_memory_slot *new, const struct kvm_memory_slot *new,
enum kvm_mr_change change); enum kvm_mr_change change);
bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
/* flush all memory translations */ /* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm); void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */ /* flush memory translations pointing to 'slot' */

View File

@ -149,8 +149,6 @@ static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
__visible bool kvm_rebooting; __visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting); EXPORT_SYMBOL_GPL(kvm_rebooting);
static bool largepages_enabled = true;
#define KVM_EVENT_CREATE_VM 0 #define KVM_EVENT_CREATE_VM 0
#define KVM_EVENT_DESTROY_VM 1 #define KVM_EVENT_DESTROY_VM 1
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
@ -1591,17 +1589,6 @@ static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
} }
#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
bool kvm_largepages_enabled(void)
{
return largepages_enabled;
}
void kvm_disable_largepages(void)
{
largepages_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{ {
return __gfn_to_memslot(kvm_memslots(kvm), gfn); return __gfn_to_memslot(kvm_memslots(kvm), gfn);