sparc64: Make itc_sync_lock raw
One more place where we must not be able to be preempted or to be interrupted in RT. Always actually disable interrupts during synchronization cycle. Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
aa3449ee9c
commit
49b6c01f4c
|
@ -149,7 +149,7 @@ void cpu_panic(void)
|
||||||
#define NUM_ROUNDS 64 /* magic value */
|
#define NUM_ROUNDS 64 /* magic value */
|
||||||
#define NUM_ITERS 5 /* likewise */
|
#define NUM_ITERS 5 /* likewise */
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(itc_sync_lock);
|
static DEFINE_RAW_SPINLOCK(itc_sync_lock);
|
||||||
static unsigned long go[SLAVE + 1];
|
static unsigned long go[SLAVE + 1];
|
||||||
|
|
||||||
#define DEBUG_TICK_SYNC 0
|
#define DEBUG_TICK_SYNC 0
|
||||||
|
@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu)
|
||||||
go[MASTER] = 0;
|
go[MASTER] = 0;
|
||||||
membar_safe("#StoreLoad");
|
membar_safe("#StoreLoad");
|
||||||
|
|
||||||
spin_lock_irqsave(&itc_sync_lock, flags);
|
raw_spin_lock_irqsave(&itc_sync_lock, flags);
|
||||||
{
|
{
|
||||||
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
|
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
|
||||||
while (!go[MASTER])
|
while (!go[MASTER])
|
||||||
|
@ -268,7 +268,7 @@ static void smp_synchronize_one_tick(int cpu)
|
||||||
membar_safe("#StoreLoad");
|
membar_safe("#StoreLoad");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&itc_sync_lock, flags);
|
raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
|
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
|
||||||
|
|
Loading…
Reference in New Issue