KVM: x86: hyperv: optimize 'all cpus' case in kvm_hv_flush_tlb()
We can use 'NULL' to represent 'all cpus' case in kvm_make_vcpus_request_mask() and avoid building vCPU mask with all vCPUs. Suggested-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Reviewed-by: Roman Kagan <rkagan@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9170200ec0
commit
a812297c4f
|
@ -1325,35 +1325,39 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
|
||||||
|
|
||||||
cpumask_clear(&hv_current->tlb_lush);
|
cpumask_clear(&hv_current->tlb_lush);
|
||||||
|
|
||||||
|
if (all_cpus) {
|
||||||
|
kvm_make_vcpus_request_mask(kvm,
|
||||||
|
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
|
||||||
|
NULL, &hv_current->tlb_lush);
|
||||||
|
goto ret_success;
|
||||||
|
}
|
||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
|
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
|
||||||
int bank = hv->vp_index / 64, sbank = 0;
|
int bank = hv->vp_index / 64, sbank = 0;
|
||||||
|
|
||||||
if (!all_cpus) {
|
/* Banks >64 can't be represented */
|
||||||
/* Banks >64 can't be represented */
|
if (bank >= 64)
|
||||||
if (bank >= 64)
|
continue;
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Non-ex hypercalls can only address first 64 vCPUs */
|
/* Non-ex hypercalls can only address first 64 vCPUs */
|
||||||
if (!ex && bank)
|
if (!ex && bank)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (ex) {
|
if (ex) {
|
||||||
/*
|
/*
|
||||||
* Check is the bank of this vCPU is in sparse
|
* Check is the bank of this vCPU is in sparse
|
||||||
* set and get the sparse bank number.
|
* set and get the sparse bank number.
|
||||||
*/
|
*/
|
||||||
sbank = get_sparse_bank_no(valid_bank_mask,
|
sbank = get_sparse_bank_no(valid_bank_mask, bank);
|
||||||
bank);
|
|
||||||
|
|
||||||
if (sbank < 0)
|
if (sbank < 0)
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
|
||||||
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
|
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
|
||||||
* can't analyze it here, flush TLB regardless of the specified
|
* can't analyze it here, flush TLB regardless of the specified
|
||||||
|
|
|
@ -219,7 +219,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||||
me = get_cpu();
|
me = get_cpu();
|
||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
if (!test_bit(i, vcpu_bitmap))
|
if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
kvm_make_request(req, vcpu);
|
kvm_make_request(req, vcpu);
|
||||||
|
@ -243,12 +243,10 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
||||||
{
|
{
|
||||||
cpumask_var_t cpus;
|
cpumask_var_t cpus;
|
||||||
bool called;
|
bool called;
|
||||||
static unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]
|
|
||||||
= {[0 ... BITS_TO_LONGS(KVM_MAX_VCPUS)-1] = ULONG_MAX};
|
|
||||||
|
|
||||||
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
|
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
|
||||||
|
|
||||||
called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap, cpus);
|
called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
|
||||||
|
|
||||||
free_cpumask_var(cpus);
|
free_cpumask_var(cpus);
|
||||||
return called;
|
return called;
|
||||||
|
|
Loading…
Reference in New Issue