ARM and x86 fixes.
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJXdTiMAAoJEL/70l94x66D070H/ifn+e3nWddEeBTCjyELQiu+ a0h81Vk+QYMbtzZ76lr7gn31LTWbiI/yUBC/kAJCaiN3X44PTqJUhSLPiP/93/2S A+/LZPtyjT5Qq8t0M4zByveOcT3AXI2akln/7b1rdHN4gyPqFJfGk2y0O5qttfi/ fhHqceKsUWfi2jMw3n6JtbshykhBbUMacq2suQLsX3vDPaC3dn64GAyDVYKfHaCl 7YzUmRJeXceQ4IQBXVoSnj/Bf5JDdUNglJl7zOeZlIcZnxhmCx5Uh3qRm3kdh4K0 yi76iwTAIbDI6DuFB09Sbr8lCxZCTTc2HsII1n8FrqlBZB6rDQlvfuRUR5DgbK0= =b7zY -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "ARM and x86 fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: nVMX: VMX instructions: fix segment checks when L1 is in long mode. KVM: LAPIC: cap __delay at lapic_timer_advance_ns KVM: x86: move nsec_to_cycles from x86.c to x86.h pvclock: Get rid of __pvclock_read_cycles in function pvclock_read_flags pvclock: Cleanup to remove function pvclock_get_nsec_offset pvclock: Add CPU barriers to get correct version value KVM: arm/arm64: Stop leaking vcpu pid references arm64: KVM: fix build with CONFIG_ARM_PMU disabled
This commit is contained in:
commit
1a0a02d1ef
|
@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
|||
kvm_timer_vcpu_terminate(vcpu);
|
||||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
kvm_pmu_vcpu_destroy(vcpu);
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -68,30 +68,23 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
|
|||
return product;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
|
||||
{
|
||||
u64 delta = rdtsc_ordered() - src->tsc_timestamp;
|
||||
return pvclock_scale_delta(delta, src->tsc_to_system_mul,
|
||||
src->tsc_shift);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
|
||||
cycle_t *cycles, u8 *flags)
|
||||
{
|
||||
unsigned version;
|
||||
cycle_t ret, offset;
|
||||
u8 ret_flags;
|
||||
cycle_t offset;
|
||||
u64 delta;
|
||||
|
||||
version = src->version;
|
||||
/* Make the latest version visible */
|
||||
smp_rmb();
|
||||
|
||||
offset = pvclock_get_nsec_offset(src);
|
||||
ret = src->system_time + offset;
|
||||
ret_flags = src->flags;
|
||||
|
||||
*cycles = ret;
|
||||
*flags = ret_flags;
|
||||
delta = rdtsc_ordered() - src->tsc_timestamp;
|
||||
offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
|
||||
src->tsc_shift);
|
||||
*cycles = src->system_time + offset;
|
||||
*flags = src->flags;
|
||||
return version;
|
||||
}
|
||||
|
||||
|
|
|
@ -61,11 +61,16 @@ void pvclock_resume(void)
|
|||
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
|
||||
{
|
||||
unsigned version;
|
||||
cycle_t ret;
|
||||
u8 flags;
|
||||
|
||||
do {
|
||||
version = __pvclock_read_cycles(src, &ret, &flags);
|
||||
version = src->version;
|
||||
/* Make the latest version visible */
|
||||
smp_rmb();
|
||||
|
||||
flags = src->flags;
|
||||
/* Make sure that the version double-check is last. */
|
||||
smp_rmb();
|
||||
} while ((src->version & 1) || version != src->version);
|
||||
|
||||
return flags & valid_flags;
|
||||
|
@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
|||
|
||||
do {
|
||||
version = __pvclock_read_cycles(src, &ret, &flags);
|
||||
/* Make sure that the version double-check is last. */
|
||||
smp_rmb();
|
||||
} while ((src->version & 1) || version != src->version);
|
||||
|
||||
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
|
||||
|
|
|
@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
|
|||
|
||||
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */
|
||||
if (guest_tsc < tsc_deadline)
|
||||
__delay(tsc_deadline - guest_tsc);
|
||||
__delay(min(tsc_deadline - guest_tsc,
|
||||
nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
|
||||
}
|
||||
|
||||
static void start_apic_timer(struct kvm_lapic *apic)
|
||||
|
|
|
@ -6671,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
|
|||
|
||||
/* Checks for #GP/#SS exceptions. */
|
||||
exn = false;
|
||||
if (is_protmode(vcpu)) {
|
||||
if (is_long_mode(vcpu)) {
|
||||
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
|
||||
* non-canonical form. This is the only check on the memory
|
||||
* destination for long mode!
|
||||
*/
|
||||
exn = is_noncanonical_address(*ret);
|
||||
} else if (is_protmode(vcpu)) {
|
||||
/* Protected mode: apply checks for segment validity in the
|
||||
* following order:
|
||||
* - segment type check (#GP(0) may be thrown)
|
||||
|
@ -6688,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
|
|||
* execute-only code segment
|
||||
*/
|
||||
exn = ((s.type & 0xa) == 8);
|
||||
}
|
||||
if (exn) {
|
||||
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
|
||||
return 1;
|
||||
}
|
||||
if (is_long_mode(vcpu)) {
|
||||
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
|
||||
* non-canonical form. This is an only check for long mode.
|
||||
*/
|
||||
exn = is_noncanonical_address(*ret);
|
||||
} else if (is_protmode(vcpu)) {
|
||||
if (exn) {
|
||||
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
|
||||
return 1;
|
||||
}
|
||||
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
|
||||
*/
|
||||
exn = (s.unusable != 0);
|
||||
|
|
|
@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
|
|||
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
|
||||
static unsigned long max_tsc_khz;
|
||||
|
||||
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
|
||||
{
|
||||
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
|
||||
vcpu->arch.virtual_tsc_shift);
|
||||
}
|
||||
|
||||
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
|
||||
{
|
||||
u64 v = (u64)khz * (1000000 + ppm);
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define ARCH_X86_KVM_X86_H
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/pvclock.h>
|
||||
#include "kvm_cache_regs.h"
|
||||
|
||||
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
|
||||
|
@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;
|
|||
|
||||
extern struct static_key kvm_no_apic_vcpu;
|
||||
|
||||
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
|
||||
{
|
||||
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
|
||||
vcpu->arch.virtual_tsc_shift);
|
||||
}
|
||||
|
||||
/* Same "calling convention" as do_div:
|
||||
* - divide (n << 32) by base
|
||||
* - put result in n
|
||||
|
|
|
@ -18,13 +18,13 @@
|
|||
#ifndef __ASM_ARM_KVM_PMU_H
|
||||
#define __ASM_ARM_KVM_PMU_H
|
||||
|
||||
#ifdef CONFIG_KVM_ARM_PMU
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
||||
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
|
||||
|
||||
#ifdef CONFIG_KVM_ARM_PMU
|
||||
|
||||
struct kvm_pmc {
|
||||
u8 idx; /* index into the pmu->pmc array */
|
||||
struct perf_event *perf_event;
|
||||
|
|
Loading…
Reference in New Issue