mirror of https://gitee.com/openkylin/linux.git
KVM: Improve TSC offset matching
There are a few improvements that can be made to the TSC offset matching code. First, we don't need to call the 128-bit multiply (especially on a constant number), the code works much nicer to do computation in nanosecond units. Second, the way everything is setup with software TSC rate scaling, we currently have per-cpu rates. Obviously this isn't too desirable to use in practice, but if for some reason we do change the rate of all VCPUs at runtime, then reset the TSCs, we will only want to match offsets for VCPUs running at the same rate. Finally, for the case where we have an unstable host TSC, but rate scaling is being done in hardware, we should call the platform code to compute the TSC offset, so the math is reorganized to recompute the base instead, then transform the base into an offset using the existing API. [avi: fix 64-bit division on i386] Signed-off-by: Zachary Amsden <zamsden@gmail.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> KVM: Fix 64-bit division in kvm_write_tsc() Breaks i386 build. Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
cc578287e3
commit
5d3cb0f6a8
|
@ -513,6 +513,7 @@ struct kvm_arch {
|
||||||
u64 last_tsc_nsec;
|
u64 last_tsc_nsec;
|
||||||
u64 last_tsc_offset;
|
u64 last_tsc_offset;
|
||||||
u64 last_tsc_write;
|
u64 last_tsc_write;
|
||||||
|
u32 last_tsc_khz;
|
||||||
|
|
||||||
struct kvm_xen_hvm_config xen_hvm_config;
|
struct kvm_xen_hvm_config xen_hvm_config;
|
||||||
|
|
||||||
|
|
|
@ -1025,33 +1025,46 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||||
struct kvm *kvm = vcpu->kvm;
|
struct kvm *kvm = vcpu->kvm;
|
||||||
u64 offset, ns, elapsed;
|
u64 offset, ns, elapsed;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
s64 sdiff;
|
s64 nsdiff;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
|
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
|
||||||
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
|
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
|
||||||
ns = get_kernel_ns();
|
ns = get_kernel_ns();
|
||||||
elapsed = ns - kvm->arch.last_tsc_nsec;
|
elapsed = ns - kvm->arch.last_tsc_nsec;
|
||||||
sdiff = data - kvm->arch.last_tsc_write;
|
|
||||||
if (sdiff < 0)
|
/* n.b - signed multiplication and division required */
|
||||||
sdiff = -sdiff;
|
nsdiff = data - kvm->arch.last_tsc_write;
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
nsdiff = (nsdiff * 1000) / vcpu->arch.virtual_tsc_khz;
|
||||||
|
#else
|
||||||
|
/* do_div() only does unsigned */
|
||||||
|
asm("idivl %2; xor %%edx, %%edx"
|
||||||
|
: "=A"(nsdiff)
|
||||||
|
: "A"(nsdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
|
||||||
|
#endif
|
||||||
|
nsdiff -= elapsed;
|
||||||
|
if (nsdiff < 0)
|
||||||
|
nsdiff = -nsdiff;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Special case: close write to TSC within 5 seconds of
|
* Special case: TSC write with a small delta (1 second) of virtual
|
||||||
* another CPU is interpreted as an attempt to synchronize
|
* cycle time against real time is interpreted as an attempt to
|
||||||
* The 5 seconds is to accommodate host load / swapping as
|
* synchronize the CPU.
|
||||||
* well as any reset of TSC during the boot process.
|
*
|
||||||
*
|
* For a reliable TSC, we can match TSC offsets, and for an unstable
|
||||||
* In that case, for a reliable TSC, we can match TSC offsets,
|
* TSC, we add elapsed time in this computation. We could let the
|
||||||
* or make a best guest using elapsed value.
|
* compensation code attempt to catch up if we fall behind, but
|
||||||
*/
|
* it's better to try to match offsets from the beginning.
|
||||||
if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
|
*/
|
||||||
elapsed < 5ULL * NSEC_PER_SEC) {
|
if (nsdiff < NSEC_PER_SEC &&
|
||||||
|
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
|
||||||
if (!check_tsc_unstable()) {
|
if (!check_tsc_unstable()) {
|
||||||
offset = kvm->arch.last_tsc_offset;
|
offset = kvm->arch.last_tsc_offset;
|
||||||
pr_debug("kvm: matched tsc offset for %llu\n", data);
|
pr_debug("kvm: matched tsc offset for %llu\n", data);
|
||||||
} else {
|
} else {
|
||||||
u64 delta = nsec_to_cycles(vcpu, elapsed);
|
u64 delta = nsec_to_cycles(vcpu, elapsed);
|
||||||
offset += delta;
|
data += delta;
|
||||||
|
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
|
||||||
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
|
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
|
||||||
}
|
}
|
||||||
ns = kvm->arch.last_tsc_nsec;
|
ns = kvm->arch.last_tsc_nsec;
|
||||||
|
@ -1059,6 +1072,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||||
kvm->arch.last_tsc_nsec = ns;
|
kvm->arch.last_tsc_nsec = ns;
|
||||||
kvm->arch.last_tsc_write = data;
|
kvm->arch.last_tsc_write = data;
|
||||||
kvm->arch.last_tsc_offset = offset;
|
kvm->arch.last_tsc_offset = offset;
|
||||||
|
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
|
||||||
kvm_x86_ops->write_tsc_offset(vcpu, offset);
|
kvm_x86_ops->write_tsc_offset(vcpu, offset);
|
||||||
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue