locking/pvqspinlock, x86: Enable PV qspinlock for KVM
This patch adds the necessary KVM specific code to allow KVM to support the CPU halting and kicking operations needed by the queue spinlock PV code. Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Daniel J Blueman <daniel@numascale.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paolo Bonzini <paolo.bonzini@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1429901803-29771-11-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f233f7f158
commit
bf0c7c34ad
|
@ -584,6 +584,39 @@ static void kvm_kick_cpu(int cpu)
|
||||||
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
|
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef CONFIG_QUEUED_SPINLOCK
|
||||||
|
|
||||||
|
#include <asm/qspinlock.h>
|
||||||
|
|
||||||
|
static void kvm_wait(u8 *ptr, u8 val)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (in_nmi())
|
||||||
|
return;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
if (READ_ONCE(*ptr) != val)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* halt until it's our turn and kicked. Note that we do safe halt
|
||||||
|
* for irq enabled case to avoid hang when lock info is overwritten
|
||||||
|
* in irq spinlock slowpath and no spurious interrupt occur to save us.
|
||||||
|
*/
|
||||||
|
if (arch_irqs_disabled_flags(flags))
|
||||||
|
halt();
|
||||||
|
else
|
||||||
|
safe_halt();
|
||||||
|
|
||||||
|
out:
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* !CONFIG_QUEUED_SPINLOCK */
|
||||||
|
|
||||||
enum kvm_contention_stat {
|
enum kvm_contention_stat {
|
||||||
TAKEN_SLOW,
|
TAKEN_SLOW,
|
||||||
TAKEN_SLOW_PICKUP,
|
TAKEN_SLOW_PICKUP,
|
||||||
|
@ -817,6 +850,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif /* !CONFIG_QUEUED_SPINLOCK */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
|
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
|
||||||
*/
|
*/
|
||||||
|
@ -828,8 +863,16 @@ void __init kvm_spinlock_init(void)
|
||||||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
#ifdef CONFIG_QUEUED_SPINLOCK
|
||||||
|
__pv_init_lock_hash();
|
||||||
|
pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
|
||||||
|
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||||
|
pv_lock_ops.wait = kvm_wait;
|
||||||
|
pv_lock_ops.kick = kvm_kick_cpu;
|
||||||
|
#else /* !CONFIG_QUEUED_SPINLOCK */
|
||||||
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
|
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
|
||||||
pv_lock_ops.unlock_kick = kvm_unlock_kick;
|
pv_lock_ops.unlock_kick = kvm_unlock_kick;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int kvm_spinlock_init_jump(void)
|
static __init int kvm_spinlock_init_jump(void)
|
||||||
|
|
|
@ -240,7 +240,7 @@ config ARCH_USE_QUEUED_SPINLOCK
|
||||||
|
|
||||||
config QUEUED_SPINLOCK
|
config QUEUED_SPINLOCK
|
||||||
def_bool y if ARCH_USE_QUEUED_SPINLOCK
|
def_bool y if ARCH_USE_QUEUED_SPINLOCK
|
||||||
depends on SMP && !PARAVIRT_SPINLOCKS
|
depends on SMP && (!PARAVIRT_SPINLOCKS || !XEN)
|
||||||
|
|
||||||
config ARCH_USE_QUEUE_RWLOCK
|
config ARCH_USE_QUEUE_RWLOCK
|
||||||
bool
|
bool
|
||||||
|
|
Loading…
Reference in New Issue