2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/arch/m32r/kernel/smp.c
|
|
|
|
*
|
|
|
|
* M32R SMP support routines.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2001, 2002 Hitoshi Yamamoto
|
|
|
|
*
|
|
|
|
* Taken from i386 version.
|
|
|
|
* (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
|
|
|
|
* (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
|
|
|
|
*
|
|
|
|
* This code is released under the GNU General Public License version 2 or
|
|
|
|
* later.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#undef DEBUG_SMP
|
|
|
|
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/interrupt.h>
|
2009-10-07 21:09:06 +08:00
|
|
|
#include <linux/sched.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/profile.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/pgalloc.h>
|
2011-07-27 07:09:06 +08:00
|
|
|
#include <linux/atomic.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/m32r.h>
|
2011-05-27 07:25:01 +08:00
|
|
|
#include <asm/tlbflush.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
/* Data structures and variables */
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For flush_cache_all()
|
|
|
|
*/
|
|
|
|
static DEFINE_SPINLOCK(flushcache_lock);
|
|
|
|
static volatile unsigned long flushcache_cpumask = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For flush_tlb_others()
|
|
|
|
*/
|
2015-05-09 02:09:52 +08:00
|
|
|
static cpumask_t flush_cpumask;
|
2005-04-17 06:20:36 +08:00
|
|
|
static struct mm_struct *flush_mm;
|
|
|
|
static struct vm_area_struct *flush_vma;
|
|
|
|
static volatile unsigned long flush_va;
|
|
|
|
static DEFINE_SPINLOCK(tlbstate_lock);
|
|
|
|
#define FLUSH_ALL 0xffffffff
|
|
|
|
|
|
|
|
DECLARE_PER_CPU(int, prof_multiplier);
|
|
|
|
DECLARE_PER_CPU(int, prof_old_multiplier);
|
|
|
|
DECLARE_PER_CPU(int, prof_counter);
|
|
|
|
|
|
|
|
extern spinlock_t ipi_lock[];
|
|
|
|
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
/* Function Prototypes */
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
|
|
|
|
void smp_reschedule_interrupt(void);
|
|
|
|
void smp_flush_cache_all_interrupt(void);
|
|
|
|
|
|
|
|
static void flush_tlb_all_ipi(void *);
|
|
|
|
static void flush_tlb_others(cpumask_t, struct mm_struct *,
|
|
|
|
struct vm_area_struct *, unsigned long);
|
2011-05-27 07:25:01 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
void smp_invalidate_interrupt(void);
|
|
|
|
|
|
|
|
static void stop_this_cpu(void *);
|
|
|
|
|
|
|
|
void smp_ipi_timer_interrupt(struct pt_regs *);
|
2006-10-07 23:29:18 +08:00
|
|
|
void smp_local_timer_interrupt(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-09-24 14:01:47 +08:00
|
|
|
static void send_IPI_allbutself(int, int);
|
2009-09-24 23:34:43 +08:00
|
|
|
static void send_IPI_mask(const struct cpumask *, int, int);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
/* Rescheduling request Routines */
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_send_reschedule
|
|
|
|
*
|
|
|
|
* Description: This routine requests other CPU to execute rescheduling.
|
|
|
|
* 1.Send 'RESCHEDULE_IPI' to other CPU.
|
|
|
|
* Request other CPU to execute 'smp_reschedule_interrupt()'.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: cpu_id - Target CPU ID
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_send_reschedule(int cpu_id)
|
|
|
|
{
|
|
|
|
WARN_ON(cpu_is_offline(cpu_id));
|
2009-09-24 23:34:43 +08:00
|
|
|
send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_reschedule_interrupt
|
|
|
|
*
|
|
|
|
* Description: This routine executes on CPU which received
|
|
|
|
* 'RESCHEDULE_IPI'.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: NONE
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_reschedule_interrupt(void)
|
|
|
|
{
|
2011-04-05 23:23:39 +08:00
|
|
|
scheduler_ipi();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_flush_cache_all
|
|
|
|
*
|
|
|
|
* Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
|
|
|
|
* CPUs in the system.
|
|
|
|
*
|
|
|
|
* Born on Date: 2003-05-28
|
|
|
|
*
|
|
|
|
* Arguments: NONE
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_flush_cache_all(void)
|
|
|
|
{
|
|
|
|
cpumask_t cpumask;
|
|
|
|
unsigned long *mask;
|
|
|
|
|
|
|
|
preempt_disable();
|
2011-05-27 07:24:59 +08:00
|
|
|
cpumask_copy(&cpumask, cpu_online_mask);
|
|
|
|
cpumask_clear_cpu(smp_processor_id(), &cpumask);
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_lock(&flushcache_lock);
|
2011-05-27 07:24:59 +08:00
|
|
|
mask=cpumask_bits(&cpumask);
|
2015-04-24 07:12:32 +08:00
|
|
|
atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
|
2009-09-24 23:34:43 +08:00
|
|
|
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
_flush_cache_copyback_all();
|
|
|
|
while (flushcache_cpumask)
|
|
|
|
mb();
|
|
|
|
spin_unlock(&flushcache_lock);
|
|
|
|
preempt_enable();
|
|
|
|
}
|
2016-05-24 07:22:23 +08:00
|
|
|
EXPORT_SYMBOL(smp_flush_cache_all);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
void smp_flush_cache_all_interrupt(void)
|
|
|
|
{
|
|
|
|
_flush_cache_copyback_all();
|
|
|
|
clear_bit(smp_processor_id(), &flushcache_cpumask);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
2007-10-20 07:14:39 +08:00
|
|
|
/* TLB flush request Routines */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_flush_tlb_all
|
|
|
|
*
|
|
|
|
* Description: This routine flushes all processes TLBs.
|
|
|
|
* 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
|
|
|
|
* 2.Execute 'do_flush_tlb_all_local()'.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: NONE
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_flush_tlb_all(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
local_irq_save(flags);
|
|
|
|
__flush_tlb_all();
|
|
|
|
local_irq_restore(flags);
|
2008-06-06 17:18:06 +08:00
|
|
|
smp_call_function(flush_tlb_all_ipi, NULL, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: flush_tlb_all_ipi
|
|
|
|
*
|
|
|
|
* Description: This routine flushes all local TLBs.
|
|
|
|
* 1.Execute 'do_flush_tlb_all_local()'.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: *info - not used
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
static void flush_tlb_all_ipi(void *info)
|
|
|
|
{
|
|
|
|
__flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_flush_tlb_mm
|
|
|
|
*
|
|
|
|
* Description: This routine flushes the specified mm context TLB's.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: *mm - a pointer to the mm struct for flush TLB
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_flush_tlb_mm(struct mm_struct *mm)
|
|
|
|
{
|
2005-10-15 06:59:07 +08:00
|
|
|
int cpu_id;
|
2005-04-17 06:20:36 +08:00
|
|
|
cpumask_t cpu_mask;
|
2005-10-15 06:59:07 +08:00
|
|
|
unsigned long *mmc;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
preempt_disable();
|
2005-10-15 06:59:07 +08:00
|
|
|
cpu_id = smp_processor_id();
|
|
|
|
mmc = &mm->context[cpu_id];
|
2011-05-27 07:24:59 +08:00
|
|
|
cpumask_copy(&cpu_mask, mm_cpumask(mm));
|
|
|
|
cpumask_clear_cpu(cpu_id, &cpu_mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (*mmc != NO_CONTEXT) {
|
|
|
|
local_irq_save(flags);
|
|
|
|
*mmc = NO_CONTEXT;
|
|
|
|
if (mm == current->mm)
|
|
|
|
activate_context(mm);
|
|
|
|
else
|
2009-09-24 23:34:49 +08:00
|
|
|
cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
|
2005-04-17 06:20:36 +08:00
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
2011-05-27 07:24:59 +08:00
|
|
|
if (!cpumask_empty(&cpu_mask))
|
2005-04-17 06:20:36 +08:00
|
|
|
flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_flush_tlb_range
|
|
|
|
*
|
|
|
|
* Description: This routine flushes a range of pages.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: *mm - a pointer to the mm struct for flush TLB
|
|
|
|
* start - not used
|
|
|
|
* end - not used
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
smp_flush_tlb_mm(vma->vm_mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_flush_tlb_page
|
|
|
|
*
|
|
|
|
* Description: This routine flushes one page.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: *vma - a pointer to the vma struct include va
|
|
|
|
* va - virtual address for flush TLB
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
2005-10-15 06:59:07 +08:00
|
|
|
int cpu_id;
|
2005-04-17 06:20:36 +08:00
|
|
|
cpumask_t cpu_mask;
|
2005-10-15 06:59:07 +08:00
|
|
|
unsigned long *mmc;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
preempt_disable();
|
2005-10-15 06:59:07 +08:00
|
|
|
cpu_id = smp_processor_id();
|
|
|
|
mmc = &mm->context[cpu_id];
|
2011-05-27 07:24:59 +08:00
|
|
|
cpumask_copy(&cpu_mask, mm_cpumask(mm));
|
|
|
|
cpumask_clear_cpu(cpu_id, &cpu_mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG_SMP
|
|
|
|
if (!mm)
|
|
|
|
BUG();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (*mmc != NO_CONTEXT) {
|
|
|
|
local_irq_save(flags);
|
|
|
|
va &= PAGE_MASK;
|
|
|
|
va |= (*mmc & MMU_CONTEXT_ASID_MASK);
|
|
|
|
__flush_tlb_page(va);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
2011-05-27 07:24:59 +08:00
|
|
|
if (!cpumask_empty(&cpu_mask))
|
2005-04-17 06:20:36 +08:00
|
|
|
flush_tlb_others(cpu_mask, mm, vma, va);
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: flush_tlb_others
|
|
|
|
*
|
|
|
|
* Description: This routine requests other CPU to execute flush TLB.
|
2007-10-20 07:14:39 +08:00
|
|
|
* 1.Setup parameters.
|
2005-04-17 06:20:36 +08:00
|
|
|
* 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
|
|
|
|
* Request other CPU to execute 'smp_invalidate_interrupt()'.
|
|
|
|
* 3.Wait for other CPUs operation finished.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: cpumask - bitmap of target CPUs
|
|
|
|
* *mm - a pointer to the mm struct for flush TLB
|
|
|
|
* *vma - a pointer to the vma struct include va
|
|
|
|
* va - virtual address for flush TLB
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
|
|
|
|
struct vm_area_struct *vma, unsigned long va)
|
|
|
|
{
|
|
|
|
unsigned long *mask;
|
|
|
|
#ifdef DEBUG_SMP
|
|
|
|
unsigned long flags;
|
|
|
|
__save_flags(flags);
|
|
|
|
if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
|
|
|
|
BUG();
|
|
|
|
#endif /* DEBUG_SMP */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A couple of (to be removed) sanity checks:
|
|
|
|
*
|
|
|
|
* - we do not send IPIs to not-yet booted CPUs.
|
|
|
|
* - current CPU must not be in mask
|
|
|
|
* - mask must exist :)
|
|
|
|
*/
|
2011-05-27 07:24:59 +08:00
|
|
|
BUG_ON(cpumask_empty(&cpumask));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-05-27 07:24:59 +08:00
|
|
|
BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
|
2005-04-17 06:20:36 +08:00
|
|
|
BUG_ON(!mm);
|
|
|
|
|
|
|
|
/* If a CPU which we ran on has gone down, OK. */
|
2011-05-27 07:24:59 +08:00
|
|
|
cpumask_and(&cpumask, &cpumask, cpu_online_mask);
|
|
|
|
if (cpumask_empty(&cpumask))
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* i'm not happy about this global shared spinlock in the
|
|
|
|
* MM hot path, but we'll see how contended it is.
|
|
|
|
* Temporarily this turns IRQs off, so that lockups are
|
|
|
|
* detected by the NMI watchdog.
|
|
|
|
*/
|
|
|
|
spin_lock(&tlbstate_lock);
|
|
|
|
|
|
|
|
flush_mm = mm;
|
|
|
|
flush_vma = vma;
|
|
|
|
flush_va = va;
|
2011-05-27 07:24:59 +08:00
|
|
|
mask=cpumask_bits(&cpumask);
|
2015-04-24 07:12:32 +08:00
|
|
|
atomic_or(*mask, (atomic_t *)&flush_cpumask);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to send the IPI only to
|
|
|
|
* CPUs affected.
|
|
|
|
*/
|
2009-09-24 23:34:43 +08:00
|
|
|
send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-05-09 02:09:52 +08:00
|
|
|
while (!cpumask_empty(&flush_cpumask)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* nothing. lockup detection does not belong here */
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
|
|
|
flush_mm = NULL;
|
|
|
|
flush_vma = NULL;
|
|
|
|
flush_va = 0;
|
|
|
|
spin_unlock(&tlbstate_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_invalidate_interrupt
|
|
|
|
*
|
|
|
|
* Description: This routine executes on CPU which received
|
|
|
|
* 'INVALIDATE_TLB_IPI'.
|
|
|
|
* 1.Flush local TLB.
|
|
|
|
* 2.Report flush TLB process was finished.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: NONE
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_invalidate_interrupt(void)
|
|
|
|
{
|
|
|
|
int cpu_id = smp_processor_id();
|
|
|
|
unsigned long *mmc = &flush_mm->context[cpu_id];
|
|
|
|
|
2011-05-27 07:24:59 +08:00
|
|
|
if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (flush_va == FLUSH_ALL) {
|
|
|
|
*mmc = NO_CONTEXT;
|
|
|
|
if (flush_mm == current->active_mm)
|
|
|
|
activate_context(flush_mm);
|
|
|
|
else
|
2009-09-24 23:34:49 +08:00
|
|
|
cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
unsigned long va = flush_va;
|
|
|
|
|
|
|
|
if (*mmc != NO_CONTEXT) {
|
|
|
|
va &= PAGE_MASK;
|
|
|
|
va |= (*mmc & MMU_CONTEXT_ASID_MASK);
|
|
|
|
__flush_tlb_page(va);
|
|
|
|
}
|
|
|
|
}
|
2015-05-09 02:09:52 +08:00
|
|
|
cpumask_clear_cpu(cpu_id, &flush_cpumask);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
2007-10-20 07:14:39 +08:00
|
|
|
/* Stop CPU request Routines */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_send_stop
|
|
|
|
*
|
|
|
|
* Description: This routine requests stop all CPUs.
|
|
|
|
* 1.Request other CPU to execute 'stop_this_cpu()'.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: NONE
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_send_stop(void)
|
|
|
|
{
|
2008-06-06 17:18:06 +08:00
|
|
|
smp_call_function(stop_this_cpu, NULL, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: stop_this_cpu
|
|
|
|
*
|
|
|
|
* Description: This routine halt CPU.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: NONE
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
static void stop_this_cpu(void *dummy)
|
|
|
|
{
|
|
|
|
int cpu_id = smp_processor_id();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove this CPU:
|
|
|
|
*/
|
2011-05-27 07:24:59 +08:00
|
|
|
set_cpu_online(cpu_id, false);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PSW IE = 1;
|
|
|
|
* IMASK = 0;
|
|
|
|
* goto SLEEP
|
|
|
|
*/
|
|
|
|
local_irq_disable();
|
|
|
|
outl(0, M32R_ICU_IMASK_PORTL);
|
|
|
|
inl(M32R_ICU_IMASK_PORTL); /* dummy read */
|
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
for ( ; ; );
|
|
|
|
}
|
|
|
|
|
2009-09-24 23:34:43 +08:00
|
|
|
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-06-11 02:49:30 +08:00
|
|
|
send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-11 02:49:30 +08:00
|
|
|
void arch_send_call_function_single_ipi(int cpu)
|
|
|
|
{
|
2009-09-24 23:34:43 +08:00
|
|
|
send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_call_function_interrupt
|
|
|
|
*
|
|
|
|
* Description: This routine executes on CPU which received
|
|
|
|
* 'CALL_FUNCTION_IPI'.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: NONE
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_call_function_interrupt(void)
|
|
|
|
{
|
|
|
|
irq_enter();
|
2008-06-11 02:49:30 +08:00
|
|
|
generic_smp_call_function_interrupt();
|
2005-04-17 06:20:36 +08:00
|
|
|
irq_exit();
|
2008-06-11 02:49:30 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-11 02:49:30 +08:00
|
|
|
void smp_call_function_single_interrupt(void)
|
|
|
|
{
|
|
|
|
irq_enter();
|
|
|
|
generic_smp_call_function_single_interrupt();
|
|
|
|
irq_exit();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
2007-10-20 07:14:39 +08:00
|
|
|
/* Timer Routines */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_send_timer
|
|
|
|
*
|
|
|
|
* Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
|
|
|
|
* in the system.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: NONE
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_send_timer(void)
|
|
|
|
{
|
|
|
|
send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_send_timer
|
|
|
|
*
|
|
|
|
* Description: This routine executes on CPU which received
|
|
|
|
* 'LOCAL_TIMER_IPI'.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: *regs - a pointer to the saved regster info
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
|
|
|
void smp_ipi_timer_interrupt(struct pt_regs *regs)
|
|
|
|
{
|
2006-10-07 23:29:18 +08:00
|
|
|
struct pt_regs *old_regs;
|
|
|
|
old_regs = set_irq_regs(regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
irq_enter();
|
2006-10-07 23:29:18 +08:00
|
|
|
smp_local_timer_interrupt();
|
2005-04-17 06:20:36 +08:00
|
|
|
irq_exit();
|
2006-10-07 23:29:18 +08:00
|
|
|
set_irq_regs(old_regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: smp_local_timer_interrupt
|
|
|
|
*
|
|
|
|
* Description: Local timer interrupt handler. It does both profiling and
|
|
|
|
* process statistics/rescheduling.
|
|
|
|
* We do profiling in every local tick, statistics/rescheduling
|
|
|
|
* happen only every 'profiling multiplier' ticks. The default
|
|
|
|
* multiplier is 1 and it can be changed by writing the new
|
|
|
|
* multiplier value into /proc/profile.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: *regs - a pointer to the saved regster info
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Original: arch/i386/kernel/apic.c
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
* 2003-06-24 hy use per_cpu structure.
|
|
|
|
*==========================================================================*/
|
2006-10-07 23:29:18 +08:00
|
|
|
void smp_local_timer_interrupt(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-10-07 23:29:18 +08:00
|
|
|
int user = user_mode(get_irq_regs());
|
2005-04-17 06:20:36 +08:00
|
|
|
int cpu_id = smp_processor_id();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The profiling function is SMP safe. (nothing can mess
|
|
|
|
* around with "current", and the profiling counters are
|
|
|
|
* updated with atomic operations). This is especially
|
|
|
|
* useful with a profiling multiplier != 1
|
|
|
|
*/
|
|
|
|
|
2006-10-07 23:29:18 +08:00
|
|
|
profile_tick(CPU_PROFILING);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (--per_cpu(prof_counter, cpu_id) <= 0) {
|
|
|
|
/*
|
|
|
|
* The multiplier may have changed since the last time we got
|
|
|
|
* to this point as a result of the user writing to
|
|
|
|
* /proc/profile. In this case we need to adjust the APIC
|
|
|
|
* timer accordingly.
|
|
|
|
*
|
|
|
|
* Interrupts are already masked off at this point.
|
|
|
|
*/
|
|
|
|
per_cpu(prof_counter, cpu_id)
|
|
|
|
= per_cpu(prof_multiplier, cpu_id);
|
|
|
|
if (per_cpu(prof_counter, cpu_id)
|
|
|
|
!= per_cpu(prof_old_multiplier, cpu_id))
|
|
|
|
{
|
|
|
|
per_cpu(prof_old_multiplier, cpu_id)
|
|
|
|
= per_cpu(prof_counter, cpu_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
update_process_times(user);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
2007-10-20 07:14:39 +08:00
|
|
|
/* Send IPI Routines */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: send_IPI_allbutself
|
|
|
|
*
|
|
|
|
* Description: This routine sends a IPI to all other CPUs in the system.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: ipi_num - Number of IPI
|
|
|
|
* try - 0 : Send IPI certainly.
|
2007-10-20 07:14:39 +08:00
|
|
|
* !0 : The following IPI is not sent when Target CPU
|
2005-04-17 06:20:36 +08:00
|
|
|
* has not received the before IPI.
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
2008-09-24 14:01:47 +08:00
|
|
|
static void send_IPI_allbutself(int ipi_num, int try)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
cpumask_t cpumask;
|
|
|
|
|
2011-05-27 07:24:59 +08:00
|
|
|
cpumask_copy(&cpumask, cpu_online_mask);
|
|
|
|
cpumask_clear_cpu(smp_processor_id(), &cpumask);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-09-24 23:34:43 +08:00
|
|
|
send_IPI_mask(&cpumask, ipi_num, try);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: send_IPI_mask
|
|
|
|
*
|
|
|
|
* Description: This routine sends a IPI to CPUs in the system.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: cpu_mask - Bitmap of target CPUs logical ID
|
|
|
|
* ipi_num - Number of IPI
|
|
|
|
* try - 0 : Send IPI certainly.
|
2007-10-20 07:14:39 +08:00
|
|
|
* !0 : The following IPI is not sent when Target CPU
|
2005-04-17 06:20:36 +08:00
|
|
|
* has not received the before IPI.
|
|
|
|
*
|
|
|
|
* Returns: void (cannot fail)
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
2009-09-24 23:34:43 +08:00
|
|
|
static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
cpumask_t physid_mask, tmp;
|
|
|
|
int cpu_id, phys_id;
|
|
|
|
int num_cpus = num_online_cpus();
|
|
|
|
|
|
|
|
if (num_cpus <= 1) /* NO MP */
|
|
|
|
return;
|
|
|
|
|
2009-09-24 23:34:43 +08:00
|
|
|
cpumask_and(&tmp, cpumask, cpu_online_mask);
|
|
|
|
BUG_ON(!cpumask_equal(cpumask, &tmp));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-05-27 07:24:59 +08:00
|
|
|
cpumask_clear(&physid_mask);
|
2009-09-24 23:34:43 +08:00
|
|
|
for_each_cpu(cpu_id, cpumask) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if ((phys_id = cpu_to_physid(cpu_id)) != -1)
|
2011-05-27 07:24:59 +08:00
|
|
|
cpumask_set_cpu(phys_id, &physid_mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-05-27 07:24:59 +08:00
|
|
|
send_IPI_mask_phys(&physid_mask, ipi_num, try);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*==========================================================================*
|
|
|
|
* Name: send_IPI_mask_phys
|
|
|
|
*
|
|
|
|
* Description: This routine sends a IPI to other CPUs in the system.
|
|
|
|
*
|
|
|
|
* Born on Date: 2002.02.05
|
|
|
|
*
|
|
|
|
* Arguments: cpu_mask - Bitmap of target CPUs physical ID
|
|
|
|
* ipi_num - Number of IPI
|
|
|
|
* try - 0 : Send IPI certainly.
|
2007-10-20 07:14:39 +08:00
|
|
|
* !0 : The following IPI is not sent when Target CPU
|
2005-04-17 06:20:36 +08:00
|
|
|
* has not received the before IPI.
|
|
|
|
*
|
|
|
|
* Returns: IPICRi regster value.
|
|
|
|
*
|
|
|
|
* Modification log:
|
|
|
|
* Date Who Description
|
|
|
|
* ---------- --- --------------------------------------------------------
|
|
|
|
*
|
|
|
|
*==========================================================================*/
|
2011-05-27 07:24:59 +08:00
|
|
|
unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
|
2005-04-17 06:20:36 +08:00
|
|
|
int try)
|
|
|
|
{
|
|
|
|
spinlock_t *ipilock;
|
|
|
|
volatile unsigned long *ipicr_addr;
|
|
|
|
unsigned long ipicr_val;
|
|
|
|
unsigned long my_physid_mask;
|
2011-05-27 07:24:59 +08:00
|
|
|
unsigned long mask = cpumask_bits(physid_mask)[0];
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
|
|
|
|
if (mask & ~physids_coerce(phys_cpu_present_map))
|
|
|
|
BUG();
|
2009-11-01 22:33:06 +08:00
|
|
|
if (ipi_num >= NR_IPIS || ipi_num < 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
BUG();
|
|
|
|
|
|
|
|
mask <<= IPI_SHIFT;
|
|
|
|
ipilock = &ipi_lock[ipi_num];
|
|
|
|
ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
|
|
|
|
+ (ipi_num << 2));
|
|
|
|
my_physid_mask = ~(1 << smp_processor_id());
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lock ipi_lock[i]
|
|
|
|
* check IPICRi == 0
|
|
|
|
* write IPICRi (send IPIi)
|
|
|
|
* unlock ipi_lock[i]
|
|
|
|
*/
|
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 15:25:56 +08:00
|
|
|
spin_lock(ipilock);
|
2005-04-17 06:20:36 +08:00
|
|
|
__asm__ __volatile__ (
|
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 15:25:56 +08:00
|
|
|
";; CHECK IPICRi == 0 \n\t"
|
2005-04-17 06:20:36 +08:00
|
|
|
".fillinsn \n"
|
|
|
|
"1: \n\t"
|
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 15:25:56 +08:00
|
|
|
"ld %0, @%1 \n\t"
|
|
|
|
"and %0, %4 \n\t"
|
|
|
|
"beqz %0, 2f \n\t"
|
|
|
|
"bnez %3, 3f \n\t"
|
2005-04-17 06:20:36 +08:00
|
|
|
"bra 1b \n\t"
|
|
|
|
";; WRITE IPICRi (send IPIi) \n\t"
|
|
|
|
".fillinsn \n"
|
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 15:25:56 +08:00
|
|
|
"2: \n\t"
|
|
|
|
"st %2, @%1 \n\t"
|
2005-04-17 06:20:36 +08:00
|
|
|
".fillinsn \n"
|
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 15:25:56 +08:00
|
|
|
"3: \n\t"
|
2005-04-17 06:20:36 +08:00
|
|
|
: "=&r"(ipicr_val)
|
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 15:25:56 +08:00
|
|
|
: "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
|
|
|
|
: "memory"
|
2005-04-17 06:20:36 +08:00
|
|
|
);
|
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 15:25:56 +08:00
|
|
|
spin_unlock(ipilock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return ipicr_val;
|
|
|
|
}
|