[PATCH] Directed yield: direct yield of spinlocks for s390.
Use the new diagnose 0x9c in the spinlock implementation for s390. It yields the remaining timeslice of the virtual cpu that tries to acquire a lock to the virtual cpu that is the current holder of the lock. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
cdc39363d3
commit
3c1fcfe229
|
@ -254,6 +254,16 @@ startup_continue:
|
||||||
oi 3(%r12),0x80 # set IDTE flag
|
oi 3(%r12),0x80 # set IDTE flag
|
||||||
.Lchkidte:
|
.Lchkidte:
|
||||||
|
|
||||||
|
#
|
||||||
|
# find out if the diag 0x9c is available
|
||||||
|
#
|
||||||
|
mvc __LC_PGM_NEW_PSW(8),.Lpcdiag9c-.LPG1(%r13)
|
||||||
|
stap __LC_CPUID+4 # store cpu address
|
||||||
|
lh %r1,__LC_CPUID+4
|
||||||
|
diag %r1,0,0x9c # test diag 0x9c
|
||||||
|
oi 2(%r12),1 # set diag9c flag
|
||||||
|
.Lchkdiag9c:
|
||||||
|
|
||||||
lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
|
lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
|
||||||
# virtual and never return ...
|
# virtual and never return ...
|
||||||
.align 8
|
.align 8
|
||||||
|
@ -281,6 +291,7 @@ startup_continue:
|
||||||
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
|
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
|
||||||
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
|
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
|
||||||
.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
|
.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
|
||||||
|
.Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c
|
||||||
.Lmemsize:.long memory_size
|
.Lmemsize:.long memory_size
|
||||||
.Lmchunk:.long memory_chunk
|
.Lmchunk:.long memory_chunk
|
||||||
.Lmflags:.long machine_flags
|
.Lmflags:.long machine_flags
|
||||||
|
|
|
@ -250,6 +250,17 @@ startup_continue:
|
||||||
oi 7(%r12),0x80 # set IDTE flag
|
oi 7(%r12),0x80 # set IDTE flag
|
||||||
0:
|
0:
|
||||||
|
|
||||||
|
#
|
||||||
|
# find out if the diag 0x9c is available
|
||||||
|
#
|
||||||
|
la %r1,0f-.LPG1(%r13) # set program check address
|
||||||
|
stg %r1,__LC_PGM_NEW_PSW+8
|
||||||
|
stap __LC_CPUID+4 # store cpu address
|
||||||
|
lh %r1,__LC_CPUID+4
|
||||||
|
diag %r1,0,0x9c # test diag 0x9c
|
||||||
|
oi 6(%r12),1 # set diag9c flag
|
||||||
|
0:
|
||||||
|
|
||||||
#
|
#
|
||||||
# find out if we have the MVCOS instruction
|
# find out if we have the MVCOS instruction
|
||||||
#
|
#
|
||||||
|
|
|
@ -24,57 +24,76 @@ static int __init spin_retry_setup(char *str)
|
||||||
}
|
}
|
||||||
__setup("spin_retry=", spin_retry_setup);
|
__setup("spin_retry=", spin_retry_setup);
|
||||||
|
|
||||||
static inline void
|
static inline void _raw_yield(void)
|
||||||
_diag44(void)
|
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_64BIT
|
|
||||||
if (MACHINE_HAS_DIAG44)
|
if (MACHINE_HAS_DIAG44)
|
||||||
#endif
|
|
||||||
asm volatile("diag 0,0,0x44");
|
asm volatile("diag 0,0,0x44");
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static inline void _raw_yield_cpu(int cpu)
|
||||||
_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
|
{
|
||||||
|
if (MACHINE_HAS_DIAG9C)
|
||||||
|
asm volatile("diag %0,0,0x9c"
|
||||||
|
: : "d" (__cpu_logical_map[cpu]));
|
||||||
|
else
|
||||||
|
_raw_yield();
|
||||||
|
}
|
||||||
|
|
||||||
|
void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
unsigned int cpu = ~smp_processor_id();
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (count-- <= 0) {
|
if (count-- <= 0) {
|
||||||
_diag44();
|
unsigned int owner = lp->owner_cpu;
|
||||||
|
if (owner != 0)
|
||||||
|
_raw_yield_cpu(~owner);
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (__raw_spin_is_locked(lp))
|
if (__raw_spin_is_locked(lp))
|
||||||
continue;
|
continue;
|
||||||
if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
|
||||||
|
lp->owner_pc = pc;
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_lock_wait);
|
EXPORT_SYMBOL(_raw_spin_lock_wait);
|
||||||
|
|
||||||
int
|
int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
|
||||||
_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
|
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
unsigned int cpu = ~smp_processor_id();
|
||||||
|
int count;
|
||||||
|
|
||||||
while (count-- > 0) {
|
for (count = spin_retry; count > 0; count--) {
|
||||||
if (__raw_spin_is_locked(lp))
|
if (__raw_spin_is_locked(lp))
|
||||||
continue;
|
continue;
|
||||||
if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
|
||||||
|
lp->owner_pc = pc;
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_trylock_retry);
|
EXPORT_SYMBOL(_raw_spin_trylock_retry);
|
||||||
|
|
||||||
void
|
void _raw_spin_relax(raw_spinlock_t *lock)
|
||||||
_raw_read_lock_wait(raw_rwlock_t *rw)
|
{
|
||||||
|
unsigned int cpu = lock->owner_cpu;
|
||||||
|
if (cpu != 0)
|
||||||
|
_raw_yield_cpu(~cpu);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_raw_spin_relax);
|
||||||
|
|
||||||
|
void _raw_read_lock_wait(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (count-- <= 0) {
|
if (count-- <= 0) {
|
||||||
_diag44();
|
_raw_yield();
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (!__raw_read_can_lock(rw))
|
if (!__raw_read_can_lock(rw))
|
||||||
|
@ -86,8 +105,7 @@ _raw_read_lock_wait(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_read_lock_wait);
|
EXPORT_SYMBOL(_raw_read_lock_wait);
|
||||||
|
|
||||||
int
|
int _raw_read_trylock_retry(raw_rwlock_t *rw)
|
||||||
_raw_read_trylock_retry(raw_rwlock_t *rw)
|
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
@ -103,14 +121,13 @@ _raw_read_trylock_retry(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_read_trylock_retry);
|
EXPORT_SYMBOL(_raw_read_trylock_retry);
|
||||||
|
|
||||||
void
|
void _raw_write_lock_wait(raw_rwlock_t *rw)
|
||||||
_raw_write_lock_wait(raw_rwlock_t *rw)
|
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (count-- <= 0) {
|
if (count-- <= 0) {
|
||||||
_diag44();
|
_raw_yield();
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (!__raw_write_can_lock(rw))
|
if (!__raw_write_can_lock(rw))
|
||||||
|
@ -121,8 +138,7 @@ _raw_write_lock_wait(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||||
|
|
||||||
int
|
int _raw_write_trylock_retry(raw_rwlock_t *rw)
|
||||||
_raw_write_trylock_retry(raw_rwlock_t *rw)
|
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ extern unsigned long machine_flags;
|
||||||
#define MACHINE_IS_P390 (machine_flags & 4)
|
#define MACHINE_IS_P390 (machine_flags & 4)
|
||||||
#define MACHINE_HAS_MVPG (machine_flags & 16)
|
#define MACHINE_HAS_MVPG (machine_flags & 16)
|
||||||
#define MACHINE_HAS_IDTE (machine_flags & 128)
|
#define MACHINE_HAS_IDTE (machine_flags & 128)
|
||||||
|
#define MACHINE_HAS_DIAG9C (machine_flags & 256)
|
||||||
|
|
||||||
#ifndef __s390x__
|
#ifndef __s390x__
|
||||||
#define MACHINE_HAS_IEEE (machine_flags & 2)
|
#define MACHINE_HAS_IEEE (machine_flags & 2)
|
||||||
|
|
|
@ -13,6 +13,8 @@
|
||||||
|
|
||||||
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
|
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
|
||||||
|
|
||||||
|
#include <linux/smp.h>
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
_raw_compare_and_swap(volatile unsigned int *lock,
|
_raw_compare_and_swap(volatile unsigned int *lock,
|
||||||
unsigned int old, unsigned int new)
|
unsigned int old, unsigned int new)
|
||||||
|
@ -50,34 +52,46 @@ _raw_compare_and_swap(volatile unsigned int *lock,
|
||||||
* (the type definitions are in asm/spinlock_types.h)
|
* (the type definitions are in asm/spinlock_types.h)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __raw_spin_is_locked(x) ((x)->lock != 0)
|
#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
||||||
#define __raw_spin_unlock_wait(lock) \
|
#define __raw_spin_unlock_wait(lock) \
|
||||||
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
do { while (__raw_spin_is_locked(lock)) \
|
||||||
|
_raw_spin_relax(lock); } while (0)
|
||||||
|
|
||||||
extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc);
|
extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc);
|
||||||
extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc);
|
extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc);
|
||||||
|
extern void _raw_spin_relax(raw_spinlock_t *lock);
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lp)
|
static inline void __raw_spin_lock(raw_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
|
unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
|
||||||
|
int old;
|
||||||
|
|
||||||
if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
|
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
||||||
_raw_spin_lock_wait(lp, pc);
|
if (likely(old == 0)) {
|
||||||
|
lp->owner_pc = pc;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
_raw_spin_lock_wait(lp, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lp)
|
static inline int __raw_spin_trylock(raw_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
|
unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
|
||||||
|
int old;
|
||||||
|
|
||||||
if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
|
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
||||||
|
if (likely(old == 0)) {
|
||||||
|
lp->owner_pc = pc;
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
return _raw_spin_trylock_retry(lp, pc);
|
return _raw_spin_trylock_retry(lp, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lp)
|
static inline void __raw_spin_unlock(raw_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
_raw_compare_and_swap(&lp->lock, lp->lock, 0);
|
lp->owner_pc = 0;
|
||||||
|
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -154,7 +168,6 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
return _raw_write_trylock_retry(rw);
|
return _raw_write_trylock_retry(rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define _raw_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define _raw_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
|
|
|
@ -6,16 +6,16 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int owner_cpu;
|
||||||
|
volatile unsigned int owner_pc;
|
||||||
} __attribute__ ((aligned (4))) raw_spinlock_t;
|
} __attribute__ ((aligned (4))) raw_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
volatile unsigned int owner_pc;
|
|
||||||
} raw_rwlock_t;
|
} raw_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
|
#define __RAW_RW_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue