Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm

Pull core ARM updates from Russell King:
 "This is the bulk of the core ARM updates for this merge window.
  Included in here is a different way to handle the VIVT cache flushing
  on context switch, which should allow scheduler folk to remove a
  special case in their core code.

  We have architectured timer support here, which is a set of timers
  specified by the ARM architecture for future SoCs.  So we should see
  less variability in timer design going forward.

  The last big thing here is my cleanup to the way we handle PCI across
  ARM, fixing some oddities in some platforms which hadn't realised
  there was a way to deal with their private data already built in to
  our PCI backend.

  I've also removed support for the ARMv3 architecture; it hasn't worked
  properly for years so it seems pointless to keep it around."

* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (47 commits)
  ARM: PCI: remove per-pci_hw list of buses
  ARM: PCI: dove/kirkwood/mv78xx0: use sys->private_data
  ARM: PCI: provide a default bus scan implementation
  ARM: PCI: get rid of pci_std_swizzle()
  ARM: PCI: versatile: fix PCI interrupt setup
  ARM: PCI: integrator: use common PCI swizzle
  ARM: 7416/1: LPAE: Remove unused L_PTE_(BUFFERABLE|CACHEABLE) macros
  ARM: 7415/1: vfp: convert printk's to pr_*'s
  ARM: decompressor: avoid speculative prefetch from non-RAM areas
  ARM: Remove ARMv3 support from decompressor
  ARM: 7413/1: move read_{boot,persistent}_clock to the architecture level
  ARM: Remove support for ARMv3 ARM610 and ARM710 CPUs
  ARM: 7363/1: DEBUG_LL: limit early mapping to the minimum
  ARM: 7391/1: versatile: add some auxdata for device trees
  ARM: 7389/2: plat-versatile: modernize FPGA IRQ controller
  AMBA: get rid of last two uses of NO_IRQ
  ARM: 7408/1: cacheflush: return error to userspace when flushing syscall fails
  ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
  ARM: 7404/1: cmpxchg64: use atomic64 and local64 routines for cmpxchg64
  ARM: 7347/1: SCU: use cpu_logical_map for per-CPU low power mode
  ...
This commit is contained in:
Linus Torvalds 2012-05-21 16:01:50 -07:00
commit ff8ce5f67d
159 changed files with 1460 additions and 2454 deletions

View File

@ -0,0 +1,27 @@
* ARM architected timer
ARM Cortex-A7 and Cortex-A15 have a per-core architected timer, which
provides per-cpu timers.
The timer is attached to a GIC to deliver its per-processor interrupts.
** Timer node properties:
- compatible : Should at least contain "arm,armv7-timer".
- interrupts : Interrupt list for secure, non-secure, virtual and
hypervisor timers, in that order.
- clock-frequency : The frequency of the main counter, in Hz. Optional.
Example:
timer {
compatible = "arm,cortex-a15-timer",
"arm,armv7-timer";
interrupts = <1 13 0xf08>,
<1 14 0xf08>,
<1 11 0xf08>,
<1 10 0xf08>;
clock-frequency = <100000000>;
};

View File

@ -11,6 +11,7 @@ config ARM
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
@ -30,6 +31,8 @@ config ARM
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select CPU_PM if (SUSPEND || CPU_IDLE)
select GENERIC_PCI_IOMAP
@ -126,14 +129,6 @@ config TRACE_IRQFLAGS_SUPPORT
bool
default y
config HARDIRQS_SW_RESEND
bool
default y
config GENERIC_IRQ_PROBE
bool
default y
config GENERIC_LOCKBREAK
bool
default y
@ -280,6 +275,7 @@ config ARCH_INTEGRATOR
select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
select MULTI_IRQ_HANDLER
help
Support for ARM's Integrator platform.
@ -632,7 +628,6 @@ config ARCH_MMP
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select GPIO_PXA
select TICK_ONESHOT
select PLAT_PXA
select SPARSE_IRQ
select GENERIC_ALLOCATOR
@ -716,7 +711,6 @@ config ARCH_PXA
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
select GPIO_PXA
select TICK_ONESHOT
select PLAT_PXA
select SPARSE_IRQ
select AUTO_ZRELADDR
@ -783,7 +777,6 @@ config ARCH_SA1100
select CPU_FREQ
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB
select HAVE_IDE
select NEED_MACH_MEMORY_H
@ -1552,10 +1545,15 @@ config HAVE_ARM_SCU
help
This option enables support for the ARM system coherency unit
config ARM_ARCH_TIMER
bool "Architected timer support"
depends on CPU_V7
help
This option enables support for the ARM architected timer
config HAVE_ARM_TWD
bool
depends on SMP
select TICK_ONESHOT
help
This options enables support for the ARM timer and watchdog unit

View File

@ -70,8 +70,6 @@ arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
# This selects how we optimise for the processor.
tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610
tune-$(CONFIG_CPU_ARM710) :=-mtune=arm710
tune-$(CONFIG_CPU_ARM7TDMI) :=-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM740T) :=-mtune=arm7tdmi

View File

@ -567,6 +567,12 @@ __armv3_mpu_cache_on:
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
#define CB_BITS 0x08
#else
#define CB_BITS 0x0c
#endif
__setup_mmu: sub r3, r4, #16384 @ Page directory size
bic r3, r3, #0xff @ Align the pointer
bic r3, r3, #0x3f00
@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
mov r9, r0, lsr #18
mov r9, r9, lsl #18 @ start of RAM
add r10, r9, #0x10000000 @ a reasonable RAM size
mov r1, #0x12
orr r1, r1, #3 << 10
mov r1, #0x12 @ XN|U + section mapping
orr r1, r1, #3 << 10 @ AP=11
add r2, r3, #16384
1: cmp r1, r9 @ if virt > start of RAM
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
orrhs r1, r1, #0x08 @ set cacheable
#else
orrhs r1, r1, #0x0c @ set cacheable, bufferable
#endif
cmp r1, r10 @ if virt > end of RAM
bichs r1, r1, #0x0c @ clear cacheable, bufferable
cmphs r10, r1 @ && end of RAM > virt
bic r1, r1, #0x1c @ clear XN|U + C + B
orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
orrhs r1, r1, r6 @ set RAM section settings
str r1, [r0], #4 @ 1:1 mapping
add r1, r1, #1048576
teq r0, r2
@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
* so there is no map overlap problem for up to 1 MB compressed kernel.
* If the execution is in RAM then we would only be duplicating the above.
*/
mov r1, #0x1e
orr r1, r6, #0x04 @ ensure B is set for this
orr r1, r1, #3 << 10
mov r2, pc
mov r2, r2, lsr #20
@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on:
__armv4_mmu_cache_on:
mov r12, lr
#ifdef CONFIG_MMU
mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@ -641,6 +645,7 @@ __armv7_mmu_cache_on:
#ifdef CONFIG_MMU
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
tst r11, #0xf @ VMSA
movne r6, #CB_BITS | 0x02 @ !XN
blne __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@ -655,7 +660,7 @@ __armv7_mmu_cache_on:
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
orrne r0, r0, #1 @ MMU enabled
movne r1, #-1
movne r1, #0xfffffffd @ domain 0 = client
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
#endif
@ -668,6 +673,7 @@ __armv7_mmu_cache_on:
__fa526_cache_on:
mov r12, lr
mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
@ -680,18 +686,6 @@ __fa526_cache_on:
mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
mov pc, r12
__arm6_mmu_cache_on:
mov r12, lr
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
mov r0, #0x30
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
mov pc, r12
__common_mmu_cache_on:
#ifndef CONFIG_THUMB2_KERNEL
#ifndef DEBUG
@ -756,16 +750,6 @@ call_cache_fn: adr r12, proc_types
.align 2
.type proc_types,#object
proc_types:
.word 0x41560600 @ ARM6/610
.word 0xffffffe0
W(b) __arm6_mmu_cache_off @ works, but slow
W(b) __arm6_mmu_cache_off
mov pc, lr
THUMB( nop )
@ b __arm6_mmu_cache_on @ untested
@ b __arm6_mmu_cache_off
@ b __armv3_mmu_cache_flush
.word 0x00000000 @ old ARM ID
.word 0x0000f000
mov pc, lr
@ -777,8 +761,10 @@ proc_types:
.word 0x41007000 @ ARM7/710
.word 0xfff8fe00
W(b) __arm7_mmu_cache_off
W(b) __arm7_mmu_cache_off
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
@ -977,21 +963,6 @@ __armv7_mmu_cache_off:
mcr p15, 0, r0, c7, c5, 4 @ ISB
mov pc, r12
__arm6_mmu_cache_off:
mov r0, #0x00000030 @ ARM6 control reg.
b __armv3_mmu_cache_off
__arm7_mmu_cache_off:
mov r0, #0x00000070 @ ARM7 control reg.
b __armv3_mmu_cache_off
__armv3_mmu_cache_off:
mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
mov pc, lr
/*
* Clean and flush the cache to maintain consistency.
*

View File

@ -222,7 +222,7 @@ static int it8152_pci_write_config(struct pci_bus *bus,
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops it8152_ops = {
struct pci_ops it8152_ops = {
.read = it8152_pci_read_config,
.write = it8152_pci_write_config,
};
@ -346,9 +346,4 @@ void pcibios_set_master(struct pci_dev *dev)
}
struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys)
{
return pci_scan_root_bus(NULL, nr, &it8152_ops, sys, &sys->resources);
}
EXPORT_SYMBOL(dma_set_coherent_mask);

View File

@ -51,7 +51,7 @@ via82c505_write_config(struct pci_bus *bus, unsigned int devfn, int where,
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops via82c505_ops = {
struct pci_ops via82c505_ops = {
.read = via82c505_read_config,
.write = via82c505_write_config,
};
@ -81,12 +81,3 @@ int __init via82c505_setup(int nr, struct pci_sys_data *sys)
{
return (nr == 0);
}
struct pci_bus * __init via82c505_scan_bus(int nr, struct pci_sys_data *sysdata)
{
if (nr == 0)
return pci_scan_root_bus(NULL, 0, &via82c505_ops, sysdata,
&sysdata->resources);
return NULL;
}

View File

@ -39,6 +39,7 @@
* struct vic_device - VIC PM device
* @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
* @valid_sources: A bitmask of valid interrupts
* @resume_sources: A bitmask of interrupts for resume.
* @resume_irqs: The IRQs enabled for resume.
* @int_select: Save for VIC_INT_SELECT.
@ -50,6 +51,7 @@
struct vic_device {
void __iomem *base;
int irq;
u32 valid_sources;
u32 resume_sources;
u32 resume_irqs;
u32 int_select;
@ -164,10 +166,32 @@ static int __init vic_pm_init(void)
late_initcall(vic_pm_init);
#endif /* CONFIG_PM */
static struct irq_chip vic_chip;
static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct vic_device *v = d->host_data;
/* Skip invalid IRQs, only register handlers for the real ones */
if (!(v->valid_sources & (1 << hwirq)))
return -ENOTSUPP;
irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
irq_set_chip_data(irq, v->base);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
return 0;
}
static struct irq_domain_ops vic_irqdomain_ops = {
.map = vic_irqdomain_map,
.xlate = irq_domain_xlate_onetwocell,
};
/**
* vic_register() - Register a VIC.
* @base: The base address of the VIC.
* @irq: The base IRQ for the VIC.
* @valid_sources: bitmask of valid interrupts
* @resume_sources: bitmask of interrupts allowed for resume sources.
* @node: The device tree node associated with the VIC.
*
@ -178,7 +202,8 @@ late_initcall(vic_pm_init);
* This also configures the IRQ domain for the VIC.
*/
static void __init vic_register(void __iomem *base, unsigned int irq,
u32 resume_sources, struct device_node *node)
u32 valid_sources, u32 resume_sources,
struct device_node *node)
{
struct vic_device *v;
@ -189,11 +214,12 @@ static void __init vic_register(void __iomem *base, unsigned int irq,
v = &vic_devices[vic_id];
v->base = base;
v->valid_sources = valid_sources;
v->resume_sources = resume_sources;
v->irq = irq;
vic_id++;
v->domain = irq_domain_add_legacy(node, 32, irq, 0,
&irq_domain_simple_ops, v);
v->domain = irq_domain_add_legacy(node, fls(valid_sources), irq, 0,
&vic_irqdomain_ops, v);
}
static void vic_ack_irq(struct irq_data *d)
@ -287,23 +313,6 @@ static void __init vic_clear_interrupts(void __iomem *base)
}
}
static void __init vic_set_irq_sources(void __iomem *base,
unsigned int irq_start, u32 vic_sources)
{
unsigned int i;
for (i = 0; i < 32; i++) {
if (vic_sources & (1 << i)) {
unsigned int irq = irq_start + i;
irq_set_chip_and_handler(irq, &vic_chip,
handle_level_irq);
irq_set_chip_data(irq, base);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
}
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
@ -338,8 +347,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
vic_set_irq_sources(base, irq_start, vic_sources);
vic_register(base, irq_start, 0, node);
vic_register(base, irq_start, vic_sources, 0, node);
}
void __init __vic_init(void __iomem *base, unsigned int irq_start,
@ -379,9 +387,7 @@ void __init __vic_init(void __iomem *base, unsigned int irq_start,
vic_init2(base);
vic_set_irq_sources(base, irq_start, vic_sources);
vic_register(base, irq_start, resume_sources, node);
vic_register(base, irq_start, vic_sources, resume_sources, node);
}
/**

View File

@ -8,8 +8,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_RPC=y
CONFIG_CPU_ARM610=y
CONFIG_CPU_ARM710=y
CONFIG_CPU_SA110=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0

View File

@ -0,0 +1,19 @@
#ifndef __ASMARM_ARCH_TIMER_H
#define __ASMARM_ARCH_TIMER_H
#ifdef CONFIG_ARM_ARCH_TIMER
int arch_timer_of_register(void);
int arch_timer_sched_clock_init(void);
#else
static inline int arch_timer_of_register(void)
{
return -ENXIO;
}
static inline int arch_timer_sched_clock_init(void)
{
return -ENXIO;
}
#endif
#endif

View File

@ -101,7 +101,7 @@ struct cpu_cache_fns {
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long);
int (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_map_area)(const void *, size_t, int);
@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_area(void *, size_t);
/*
@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
#define flush_cache_user_range(vma,start,end) \
#define flush_cache_user_range(start,end) \
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
/*

View File

@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
(unsigned long)(n), \
sizeof(*(ptr))))
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
#define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
atomic64_t, \
counter), \
(unsigned long)(o), \
(unsigned long)(n)))
/*
* Note : ARMv7-M (currently unsupported by Linux) does not support
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
* not be allowed to use __cmpxchg64.
*/
static inline unsigned long long __cmpxchg64(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
register unsigned long long oldval asm("r0");
register unsigned long long __old asm("r2") = old;
register unsigned long long __new asm("r4") = new;
unsigned long res;
do {
asm volatile(
" @ __cmpxchg8\n"
" ldrexd %1, %H1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" teqeq %H1, %H3\n"
" strexdeq %0, %4, %H4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (__old), "r" (__new)
: "memory", "cc");
} while (res);
return oldval;
}
static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long ret;
smp_mb();
ret = __cmpxchg64(ptr, old, new);
smp_mb();
return ret;
}
#define cmpxchg64(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#else /* min ARCH = ARMv6 */
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
local64_t, \
a), \
(unsigned long)(o), \
(unsigned long)(n)))
#endif /* __LINUX_ARM_ARCH__ >= 6 */

View File

@ -31,14 +31,6 @@
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
#if defined(CONFIG_CPU_ARM610)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm6_data_abort
# endif
#endif
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1

View File

@ -23,15 +23,6 @@
* CPU_NAME - the prefix for CPU related functions
*/
#ifdef CONFIG_CPU_ARM610
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm6
# endif
#endif
#ifdef CONFIG_CPU_ARM7TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
@ -41,15 +32,6 @@
# endif
#endif
#ifdef CONFIG_CPU_ARM710
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7
# endif
#endif
#ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU

View File

@ -110,6 +110,6 @@ extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
extern void it8152_init_irq(void);
extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys);
extern struct pci_ops it8152_ops;
#endif /* __ASM_HARDWARE_IT8152_H */

View File

@ -12,13 +12,14 @@
#define __ASM_MACH_PCI_H
struct pci_sys_data;
struct pci_ops;
struct pci_bus;
struct hw_pci {
#ifdef CONFIG_PCI_DOMAINS
int domain;
#endif
struct list_head buses;
struct pci_ops *ops;
int nr_controllers;
int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
@ -45,15 +46,9 @@ struct pci_sys_data {
u8 (*swizzle)(struct pci_dev *, u8 *);
/* IRQ mapping */
int (*map_irq)(const struct pci_dev *, u8, u8);
struct hw_pci *hw;
void *private_data; /* platform controller private data */
};
/*
* This is the standard PCI-PCI bridge swizzling algorithm.
*/
#define pci_std_swizzle pci_common_swizzle
/*
* Call this with your hw_pci struct to initialise the PCI system.
*/
@ -62,22 +57,22 @@ void pci_common_init(struct hw_pci *);
/*
* PCI controllers
*/
extern struct pci_ops iop3xx_ops;
extern int iop3xx_pci_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *);
extern void iop3xx_pci_preinit(void);
extern void iop3xx_pci_preinit_cond(void);
extern struct pci_ops dc21285_ops;
extern int dc21285_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *);
extern void dc21285_preinit(void);
extern void dc21285_postinit(void);
extern struct pci_ops via82c505_ops;
extern int via82c505_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *);
extern void via82c505_init(void *sysdata);
extern struct pci_ops pci_v3_ops;
extern int pci_v3_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
extern void pci_v3_preinit(void);
extern void pci_v3_postinit(void);

View File

@ -42,4 +42,9 @@ struct sys_timer {
extern void timer_tick(void);
struct timespec;
typedef void (*clock_access_fn)(struct timespec *);
extern int register_persistent_clock(clock_access_fn read_boot,
clock_access_fn read_persistent);
#endif

View File

@ -34,11 +34,4 @@ typedef struct {
#endif
/*
* switch_mm() may do a full cache flush over the context switch,
* so enable interrupts over the context switch to avoid high
* latency.
*/
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
#endif

View File

@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm);
#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid;
#ifdef CONFIG_SMP
DECLARE_PER_CPU(struct mm_struct *, current_mm);
#endif
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
void cpu_set_reserved_ttbr0(void);
static inline void check_context(struct mm_struct *mm)
static inline void switch_new_context(struct mm_struct *mm)
{
/*
* This code is executed with interrupts enabled. Therefore,
* mm->context.id cannot be updated to the latest ASID version
* on a different CPU (and condition below not triggered)
* without first getting an IPI to reset the context. The
* alternative is to take a read_lock on mm->context.id_lock
* (after changing its type to rwlock_t).
*/
if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
__new_context(mm);
unsigned long flags;
__new_context(mm);
local_irq_save(flags);
cpu_switch_mm(mm->pgd, mm);
local_irq_restore(flags);
}
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
/*
* Required during context switch to avoid speculative page table
* walking with the wrong TTBR.
*/
cpu_set_reserved_ttbr0();
if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
/*
* The ASID is from the current generation, just switch to the
* new pgd. This condition is only true for calls from
* context_switch() and interrupts are already disabled.
*/
cpu_switch_mm(mm->pgd, mm);
else if (irqs_disabled())
/*
* Defer the new ASID allocation until after the context
* switch critical region since __new_context() cannot be
* called with interrupts disabled (it sends IPIs).
*/
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
else
/*
* That is a direct call to switch_mm() or activate_mm() with
* interrupts enabled and a new context.
*/
switch_new_context(mm);
}
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
#else
static inline void check_context(struct mm_struct *mm)
#define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
if (test_and_clear_thread_flag(TIF_SWITCH_MM))
switch_new_context(current->mm);
}
#else /* !CONFIG_CPU_HAS_ASID */
#ifdef CONFIG_MMU
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
#endif
if (irqs_disabled())
/*
* cpu_switch_mm() needs to flush the VIVT caches. To avoid
* high interrupt latencies, defer the call and continue
* running with the old mm. Since we only support UP systems
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
else
cpu_switch_mm(mm->pgd, mm);
}
#define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
struct mm_struct *mm = current->mm;
cpu_switch_mm(mm->pgd, mm);
}
}
#endif /* CONFIG_MMU */
#define init_new_context(tsk,mm) 0
#endif
#endif /* CONFIG_CPU_HAS_ASID */
#define destroy_context(mm) do { } while(0)
@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
__flush_icache_all();
#endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
#ifdef CONFIG_SMP
struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
*crt_mm = next;
#endif
check_context(next);
cpu_switch_mm(next->pgd, next);
check_and_switch_context(next, tsk);
if (cache_is_vivt())
cpumask_clear_cpu(cpu, mm_cpumask(prev));
}

View File

@ -34,7 +34,6 @@
* processor(s) we're building for.
*
* We have the following to choose from:
* v3 - ARMv3
* v4wt - ARMv4 with writethrough cache, without minicache
* v4wb - ARMv4 with writeback cache, without minicache
* v4_mc - ARMv4 with minicache
@ -44,14 +43,6 @@
#undef _USER
#undef MULTI_USER
#ifdef CONFIG_CPU_COPY_V3
# ifdef _USER
# define MULTI_USER 1
# else
# define _USER v3
# endif
#endif
#ifdef CONFIG_CPU_COPY_V4WT
# ifdef _USER
# define MULTI_USER 1

View File

@ -69,8 +69,6 @@
*/
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
#define L_PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
#define L_PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */

View File

@ -249,6 +249,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
return regs->ARM_sp;
}
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
return regs->ARM_sp;
}
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */

View File

@ -0,0 +1,93 @@
/*
* Access to user system call parameters and results
*
* See asm-generic/syscall.h for descriptions of what we must do here.
*/
#ifndef _ASM_ARM_SYSCALL_H
#define _ASM_ARM_SYSCALL_H
#include <linux/err.h>
extern const unsigned long sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return task_thread_info(task)->syscall;
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
regs->ARM_r0 = regs->ARM_ORIG_r0;
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
unsigned long error = regs->ARM_r0;
return IS_ERR_VALUE(error) ? error : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
return regs->ARM_r0;
}
static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
regs->ARM_r0 = (long) error ? error : val;
}
#define SYSCALL_MAX_ARGS 7
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args)
{
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
memset(args_bad, 0, n_bad * sizeof(args[0]));
n = SYSCALL_MAX_ARGS - i;
}
if (i == 0) {
args[0] = regs->ARM_ORIG_r0;
args++;
i++;
n--;
}
memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args)
{
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
n = SYSCALL_MAX_ARGS - i;
}
if (i == 0) {
regs->ARM_ORIG_r0 = args[0];
args++;
i++;
n--;
}
memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
}
#endif /* _ASM_ARM_SYSCALL_H */

View File

@ -153,6 +153,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
#define TIF_SECCOMP 21
#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)

View File

@ -65,21 +65,6 @@
#define MULTI_TLB 1
#endif
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#ifdef CONFIG_CPU_TLB_V3
# define v3_possible_flags v3_tlb_flags
# define v3_always_flags v3_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v3
# endif
#else
# define v3_possible_flags 0
# define v3_always_flags (-1UL)
#endif
#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
#ifdef CONFIG_CPU_TLB_V4WT
@ -298,8 +283,7 @@ extern struct cpu_tlb_fns cpu_tlb;
* implemented the "%?" method, but this has been discontinued due to too
* many people getting it wrong.
*/
#define possible_tlb_flags (v3_possible_flags | \
v4_possible_flags | \
#define possible_tlb_flags (v4_possible_flags | \
v4wbi_possible_flags | \
fr_possible_flags | \
v4wb_possible_flags | \
@ -307,8 +291,7 @@ extern struct cpu_tlb_fns cpu_tlb;
v6wbi_possible_flags | \
v7wbi_possible_flags)
#define always_tlb_flags (v3_always_flags & \
v4_always_flags & \
#define always_tlb_flags (v4_always_flags & \
v4wbi_always_flags & \
fr_always_flags & \
v4wb_always_flags & \

View File

@ -34,6 +34,7 @@ obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o

View File

@ -0,0 +1,350 @@
/*
* linux/arch/arm/kernel/arch_timer.c
*
* Copyright (C) 2011 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <asm/cputype.h>
#include <asm/localtimer.h>
#include <asm/arch_timer.h>
#include <asm/system_info.h>
#include <asm/sched_clock.h>
static unsigned long arch_timer_rate;
static int arch_timer_ppi;
static int arch_timer_ppi2;
static struct clock_event_device __percpu **arch_timer_evt;
/*
* Architected system timer support.
*/
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
#define ARCH_TIMER_REG_CTRL 0
#define ARCH_TIMER_REG_FREQ 1
#define ARCH_TIMER_REG_TVAL 2
static void arch_timer_reg_write(int reg, u32 val)
{
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
break;
case ARCH_TIMER_REG_TVAL:
asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
break;
}
isb();
}
static u32 arch_timer_reg_read(int reg)
{
u32 val;
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
break;
case ARCH_TIMER_REG_FREQ:
asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
break;
case ARCH_TIMER_REG_TVAL:
asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
break;
default:
BUG();
}
return val;
}
static irqreturn_t arch_timer_handler(int irq, void *dev_id)
{
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
unsigned long ctrl;
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void arch_timer_disable(void)
{
unsigned long ctrl;
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
}
static void arch_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
switch (mode) {
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
arch_timer_disable();
break;
default:
break;
}
}
static int arch_timer_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
unsigned long ctrl;
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
return 0;
}
static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
{
/* Be safe... */
arch_timer_disable();
clk->features = CLOCK_EVT_FEAT_ONESHOT;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->set_mode = arch_timer_set_mode;
clk->set_next_event = arch_timer_set_next_event;
clk->irq = arch_timer_ppi;
clockevents_config_and_register(clk, arch_timer_rate,
0xf, 0x7fffffff);
*__this_cpu_ptr(arch_timer_evt) = clk;
enable_percpu_irq(clk->irq, 0);
if (arch_timer_ppi2)
enable_percpu_irq(arch_timer_ppi2, 0);
return 0;
}
/* Is the optional system timer available? */
static int local_timer_is_architected(void)
{
return (cpu_architecture() >= CPU_ARCH_ARMv7) &&
((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1;
}
static int arch_timer_available(void)
{
unsigned long freq;
if (!local_timer_is_architected())
return -ENXIO;
if (arch_timer_rate == 0) {
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
/* Check the timer frequency. */
if (freq == 0) {
pr_warn("Architected timer frequency not available\n");
return -EINVAL;
}
arch_timer_rate = freq;
}
pr_info_once("Architected local timer running at %lu.%02luMHz.\n",
arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
return 0;
}
static inline cycle_t arch_counter_get_cntpct(void)
{
u32 cvall, cvalh;
asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
return ((cycle_t) cvalh << 32) | cvall;
}
static inline cycle_t arch_counter_get_cntvct(void)
{
u32 cvall, cvalh;
asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
return ((cycle_t) cvalh << 32) | cvall;
}
static u32 notrace arch_counter_get_cntvct32(void)
{
cycle_t cntvct = arch_counter_get_cntvct();
/*
* The sched_clock infrastructure only knows about counters
* with at most 32bits. Forget about the upper 24 bits for the
* time being...
*/
return (u32)(cntvct & (u32)~0);
}
static cycle_t arch_counter_read(struct clocksource *cs)
{
return arch_counter_get_cntpct();
}
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
{
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
clk->irq, smp_processor_id());
disable_percpu_irq(clk->irq);
if (arch_timer_ppi2)
disable_percpu_irq(arch_timer_ppi2);
arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
}
static struct local_timer_ops arch_timer_ops __cpuinitdata = {
.setup = arch_timer_setup,
.stop = arch_timer_stop,
};
static struct clock_event_device arch_timer_global_evt;
static int __init arch_timer_register(void)
{
int err;
err = arch_timer_available();
if (err)
return err;
arch_timer_evt = alloc_percpu(struct clock_event_device *);
if (!arch_timer_evt)
return -ENOMEM;
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
"arch_timer", arch_timer_evt);
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
arch_timer_ppi, err);
goto out_free;
}
if (arch_timer_ppi2) {
err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
"arch_timer", arch_timer_evt);
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
arch_timer_ppi2, err);
arch_timer_ppi2 = 0;
goto out_free_irq;
}
}
err = local_timer_register(&arch_timer_ops);
if (err) {
/*
* We couldn't register as a local timer (could be
* because we're on a UP platform, or because some
* other local timer is already present...). Try as a
* global timer instead.
*/
arch_timer_global_evt.cpumask = cpumask_of(0);
err = arch_timer_setup(&arch_timer_global_evt);
}
if (err)
goto out_free_irq;
return 0;
out_free_irq:
free_percpu_irq(arch_timer_ppi, arch_timer_evt);
if (arch_timer_ppi2)
free_percpu_irq(arch_timer_ppi2, arch_timer_evt);
out_free:
free_percpu(arch_timer_evt);
return err;
}
static const struct of_device_id arch_timer_of_match[] __initconst = {
{ .compatible = "arm,armv7-timer", },
{},
};
int __init arch_timer_of_register(void)
{
struct device_node *np;
u32 freq;
np = of_find_matching_node(NULL, arch_timer_of_match);
if (!np) {
pr_err("arch_timer: can't find DT node\n");
return -ENODEV;
}
/* Try to determine the frequency from the device tree or CNTFRQ */
if (!of_property_read_u32(np, "clock-frequency", &freq))
arch_timer_rate = freq;
arch_timer_ppi = irq_of_parse_and_map(np, 0);
arch_timer_ppi2 = irq_of_parse_and_map(np, 1);
pr_info("arch_timer: found %s irqs %d %d\n",
np->name, arch_timer_ppi, arch_timer_ppi2);
return arch_timer_register();
}
int __init arch_timer_sched_clock_init(void)
{
int err;
err = arch_timer_available();
if (err)
return err;
setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate);
return 0;
}

View File

@ -374,16 +374,29 @@ EXPORT_SYMBOL(pcibios_fixup_bus);
#endif
/*
* Swizzle the device pin each time we cross a bridge.
* This might update pin and returns the slot number.
* Swizzle the device pin each time we cross a bridge. If a platform does
* not provide a swizzle function, we perform the standard PCI swizzling.
*
* The default swizzling walks up the bus tree one level at a time, applying
* the standard swizzle function at each step, stopping when it finds the PCI
* root bus. This will return the slot number of the bridge device on the
* root bus and the interrupt pin on that device which should correspond
* with the downstream device interrupt.
*
* Platforms may override this, in which case the slot and pin returned
* depend entirely on the platform code. However, please note that the
* PCI standard swizzle is implemented on plug-in cards and Cardbus based
* PCI extenders, so it can not be ignored.
*/
static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
{
struct pci_sys_data *sys = dev->sysdata;
int slot = 0, oldpin = *pin;
int slot, oldpin = *pin;
if (sys->swizzle)
slot = sys->swizzle(dev, pin);
else
slot = pci_common_swizzle(dev, pin);
if (debug_pci)
printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
@ -410,7 +423,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq;
}
static void __init pcibios_init_hw(struct hw_pci *hw)
static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
{
struct pci_sys_data *sys = NULL;
int ret;
@ -424,7 +437,6 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
#ifdef CONFIG_PCI_DOMAINS
sys->domain = hw->domain;
#endif
sys->hw = hw;
sys->busnr = busnr;
sys->swizzle = hw->swizzle;
sys->map_irq = hw->map_irq;
@ -440,14 +452,18 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
&iomem_resource, sys->mem_offset);
}
sys->bus = hw->scan(nr, sys);
if (hw->scan)
sys->bus = hw->scan(nr, sys);
else
sys->bus = pci_scan_root_bus(NULL, sys->busnr,
hw->ops, sys, &sys->resources);
if (!sys->bus)
panic("PCI: unable to scan bus!");
busnr = sys->bus->subordinate + 1;
list_add(&sys->node, &hw->buses);
list_add(&sys->node, head);
} else {
kfree(sys);
if (ret < 0)
@ -459,19 +475,18 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
void __init pci_common_init(struct hw_pci *hw)
{
struct pci_sys_data *sys;
INIT_LIST_HEAD(&hw->buses);
LIST_HEAD(head);
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
if (hw->preinit)
hw->preinit();
pcibios_init_hw(hw);
pcibios_init_hw(hw, &head);
if (hw->postinit)
hw->postinit();
pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
list_for_each_entry(sys, &hw->buses, node) {
list_for_each_entry(sys, &head, node) {
struct pci_bus *bus = sys->bus;
if (!pci_has_flag(PCI_PROBE_ONLY)) {

View File

@ -556,10 +556,6 @@ call_fpe:
#endif
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
and r8, r0, #0x0f000000 @ mask out op-code bits
teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
#endif
moveq pc, lr
get_thread_info r10 @ get current thread
and r8, r0, #0x00000f00 @ mask out CP number

View File

@ -335,20 +335,6 @@ ENDPROC(ftrace_stub)
*-----------------------------------------------------------------------------
*/
/* If we're optimising for StrongARM the resulting code won't
run on an ARM7 and we can save a couple of instructions.
--pb */
#ifdef CONFIG_CPU_ARM710
#define A710(code...) code
.Larm710bug:
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE
subs pc, lr, #4
#else
#define A710(code...)
#endif
.align 5
ENTRY(vector_swi)
sub sp, sp, #S_FRAME_SIZE
@ -379,9 +365,6 @@ ENTRY(vector_swi)
ldreq r10, [lr, #-4] @ get SWI instruction
#else
ldr r10, [lr, #-4] @ get SWI instruction
A710( and ip, r10, #0x0f000000 @ check for SWI )
A710( teq ip, #0x0f000000 )
A710( bne .Larm710bug )
#endif
#ifdef CONFIG_CPU_ENDIAN_BE8
rev r10, r10 @ little endian instruction
@ -392,26 +375,15 @@ ENTRY(vector_swi)
/*
* Pure EABI user space always put syscall number into scno (r7).
*/
A710( ldr ip, [lr, #-4] @ get SWI instruction )
A710( and ip, ip, #0x0f000000 @ check for SWI )
A710( teq ip, #0x0f000000 )
A710( bne .Larm710bug )
#elif defined(CONFIG_ARM_THUMB)
/* Legacy ABI only, possibly thumb mode. */
tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
ldreq scno, [lr, #-4]
#else
/* Legacy ABI only. */
ldr scno, [lr, #-4] @ get SWI instruction
A710( and ip, scno, #0x0f000000 @ check for SWI )
A710( teq ip, #0x0f000000 )
A710( bne .Larm710bug )
#endif
#ifdef CONFIG_ALIGNMENT_TRAP

View File

@ -277,10 +277,6 @@ __create_page_tables:
mov r3, r3, lsl #PMD_ORDER
add r0, r4, r3
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
cmp r3, #0x0800 @ limit to 512MB
movhi r3, #0x0800
add r6, r0, r3
mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
orr r3, r7, r3, lsl #SECTION_SHIFT
@ -289,13 +285,10 @@ __create_page_tables:
#else
orr r3, r3, #PMD_SECT_XN
#endif
1: str r3, [r0], #4
str r3, [r0], #4
#ifdef CONFIG_ARM_LPAE
str r7, [r0], #4
#endif
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
blo 1b
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
/* we don't need any serial debugging mappings */

View File

@ -24,6 +24,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/regset.h>
#include <linux/audit.h>
#include <linux/tracehook.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@ -918,8 +919,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
if (!(current->ptrace & PT_PTRACED))
return scno;
current_thread_info()->syscall = scno;
@ -930,19 +929,11 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
ip = regs->ARM_ip;
regs->ARM_ip = why;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
if (why)
tracehook_report_syscall_exit(regs, 0);
else if (tracehook_report_syscall_entry(regs))
current_thread_info()->syscall = -1;
regs->ARM_ip = ip;
return current_thread_info()->syscall;

View File

@ -589,6 +589,8 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
*/
block_sigmask(ka, sig);
tracehook_signal_handler(sig, info, ka, regs, 0);
return 0;
}

View File

@ -454,6 +454,9 @@ static struct local_timer_ops *lt_ops;
#ifdef CONFIG_LOCAL_TIMERS
int local_timer_register(struct local_timer_ops *ops)
{
if (!is_smp() || !setup_max_cpus)
return -ENXIO;
if (lt_ops)
return -EBUSY;

View File

@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
@ -74,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
unsigned int val;
int cpu = smp_processor_id();
int cpu = cpu_logical_map(smp_processor_id());
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;

View File

@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/cputype.h>
#include <asm/system_info.h>
#include <asm/thread_notify.h>
@ -67,8 +68,7 @@ static int __init thumbee_init(void)
if (cpu_arch < CPU_ARCH_ARMv7)
return 0;
/* processor feature register 0 */
asm("mrc p15, 0, %0, c0, c1, 0\n" : "=r" (pfr0));
pfr0 = read_cpuid_ext(CPUID_EXT_PFR0);
if ((pfr0 & 0x0000f000) != 0x00001000)
return 0;

View File

@ -110,6 +110,42 @@ void timer_tick(void)
}
#endif
static void dummy_clock_access(struct timespec *ts)
{
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
static clock_access_fn __read_persistent_clock = dummy_clock_access;
static clock_access_fn __read_boot_clock = dummy_clock_access;;
void read_persistent_clock(struct timespec *ts)
{
__read_persistent_clock(ts);
}
void read_boot_clock(struct timespec *ts)
{
__read_boot_clock(ts);
}
int __init register_persistent_clock(clock_access_fn read_boot,
clock_access_fn read_persistent)
{
/* Only allow the clockaccess functions to be registered once */
if (__read_persistent_clock == dummy_clock_access &&
__read_boot_clock == dummy_clock_access) {
if (read_boot)
__read_boot_clock = read_boot;
if (read_persistent)
__read_persistent_clock = read_persistent;
return 0;
}
return -EINVAL;
}
#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
static int timer_suspend(void)
{

View File

@ -479,14 +479,14 @@ static int bad_syscall(int n, struct pt_regs *regs)
return regs->ARM_r0;
}
static inline void
static inline int
do_cache_op(unsigned long start, unsigned long end, int flags)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *vma;
if (end < start || flags)
return;
return -EINVAL;
down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
@ -496,9 +496,11 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
if (end > vma->vm_end)
end = vma->vm_end;
flush_cache_user_range(vma, start, end);
up_read(&mm->mmap_sem);
return flush_cache_user_range(start, end);
}
up_read(&mm->mmap_sem);
return -EINVAL;
}
/*
@ -544,8 +546,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
* the specified region).
*/
case NR(cacheflush):
do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
return 0;
return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
case NR(usr26):
if (!(elf_hwcap & HWCAP_26BIT))

View File

@ -17,30 +17,13 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
call_with_stack.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y)
mmu-y += copy_from_user.o copy_to_user.o
else
ifneq ($(CONFIG_CPU_32v3),y)
mmu-y += copy_from_user.o copy_to_user.o
else
mmu-y += uaccess.o
endif
endif
mmu-y += copy_from_user.o copy_to_user.o
# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
lib-$(CONFIG_MMU) += $(mmu-y)
ifeq ($(CONFIG_CPU_32v3),y)
lib-y += io-readsw-armv3.o io-writesw-armv3.o
else
lib-y += io-readsw-armv4.o io-writesw-armv4.o
endif
lib-$(CONFIG_MMU) += $(mmu-y)
lib-y += io-readsw-armv4.o io-writesw-armv4.o
lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
lib-$(CONFIG_ARCH_SHARK) += io-shark.o

View File

@ -1,106 +0,0 @@
/*
* linux/arch/arm/lib/io-readsw-armv3.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
mov r2, lr
b panic
.Linsw_bad_align_msg:
.asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
.align
.Linsw_align: tst r1, #1
bne .Linsw_bad_alignment
ldr r3, [r0]
strb r3, [r1], #1
mov r3, r3, lsr #8
strb r3, [r1], #1
subs r2, r2, #1
moveq pc, lr
ENTRY(__raw_readsw)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
tst r1, #3
bne .Linsw_align
.Linsw_aligned: mov ip, #0xff
orr ip, ip, ip, lsl #8
stmfd sp!, {r4, r5, r6, lr}
subs r2, r2, #8
bmi .Lno_insw_8
.Linsw_8_lp: ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
ldr r4, [r0]
and r4, r4, ip
ldr r5, [r0]
orr r4, r4, r5, lsl #16
ldr r5, [r0]
and r5, r5, ip
ldr r6, [r0]
orr r5, r5, r6, lsl #16
ldr r6, [r0]
and r6, r6, ip
ldr lr, [r0]
orr r6, r6, lr, lsl #16
stmia r1!, {r3 - r6}
subs r2, r2, #8
bpl .Linsw_8_lp
tst r2, #7
ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
ldr r4, [r0]
and r4, r4, ip
ldr r5, [r0]
orr r4, r4, r5, lsl #16
stmia r1!, {r3, r4}
.Lno_insw_4: tst r2, #2
beq .Lno_insw_2
ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
str r3, [r1], #4
.Lno_insw_2: tst r2, #1
ldrne r3, [r0]
strneb r3, [r1], #1
movne r3, r3, lsr #8
strneb r3, [r1]
ldmfd sp!, {r4, r5, r6, pc}

View File

@ -1,126 +0,0 @@
/*
* linux/arch/arm/lib/io-writesw-armv3.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
mov r2, lr
b panic
.Loutsw_bad_align_msg:
.asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
.align
.Loutsw_align: tst r1, #1
bne .Loutsw_bad_alignment
add r1, r1, #2
ldr r3, [r1, #-4]
mov r3, r3, lsr #16
orr r3, r3, r3, lsl #16
str r3, [r0]
subs r2, r2, #1
moveq pc, lr
ENTRY(__raw_writesw)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
tst r1, #3
bne .Loutsw_align
stmfd sp!, {r4, r5, r6, lr}
subs r2, r2, #8
bmi .Lno_outsw_8
.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6}
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r4, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r4, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r5, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r5, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r6, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r6, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
subs r2, r2, #8
bpl .Loutsw_8_lp
tst r2, #7
ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4
ldmia r1!, {r3, r4}
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r4, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r4, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
.Lno_outsw_4: tst r2, #2
beq .Lno_outsw_2
ldr r3, [r1], #4
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
.Lno_outsw_2: tst r2, #1
ldrne r3, [r1]
movne ip, r3, lsl #16
orrne ip, ip, ip, lsr #16
strne ip, [r0]
ldmfd sp!, {r4, r5, r6, pc}

View File

@ -1,564 +0,0 @@
/*
* linux/arch/arm/lib/uaccess.S
*
* Copyright (C) 1995, 1996,1997,1998 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Routines to block copy data to/from user memory
* These are highly optimised both for the 4k page size
* and for various alignments.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
.text
#define PAGE_SHIFT 12
/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
* Purpose : copy a block to user memory from kernel memory
* Params : to - user memory
* : from - kernel memory
* : n - number of bytes to copy
* Returns : Number of bytes NOT copied.
*/
.Lc2u_dest_not_aligned:
rsb ip, ip, #4
cmp ip, #2
ldrb r3, [r1], #1
USER( TUSER( strb) r3, [r0], #1) @ May fault
ldrgeb r3, [r1], #1
USER( TUSER( strgeb) r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #1
USER( TUSER( strgtb) r3, [r0], #1) @ May fault
sub r2, r2, ip
b .Lc2u_dest_aligned
ENTRY(__copy_to_user)
stmfd sp!, {r2, r4 - r7, lr}
cmp r2, #4
blt .Lc2u_not_enough
ands ip, r0, #3
bne .Lc2u_dest_not_aligned
.Lc2u_dest_aligned:
ands ip, r1, #3
bne .Lc2u_src_not_aligned
/*
* Seeing as there has to be at least 8 bytes to copy, we can
* copy one word, and force a user-mode page fault...
*/
.Lc2u_0fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lc2u_0nowords
ldr r3, [r1], #4
USER( TUSER( str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
beq .Lc2u_0fupi
/*
* ip = max no. of bytes to copy before needing another "strt" insn
*/
cmp r2, ip
movlt ip, r2
sub r2, r2, ip
subs ip, ip, #32
blt .Lc2u_0rem8lp
.Lc2u_0cpy8lp: ldmia r1!, {r3 - r6}
stmia r0!, {r3 - r6} @ Shouldnt fault
ldmia r1!, {r3 - r6}
subs ip, ip, #32
stmia r0!, {r3 - r6} @ Shouldnt fault
bpl .Lc2u_0cpy8lp
.Lc2u_0rem8lp: cmn ip, #16
ldmgeia r1!, {r3 - r6}
stmgeia r0!, {r3 - r6} @ Shouldnt fault
tst ip, #8
ldmneia r1!, {r3 - r4}
stmneia r0!, {r3 - r4} @ Shouldnt fault
tst ip, #4
ldrne r3, [r1], #4
TUSER( strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3
beq .Lc2u_0fupi
.Lc2u_0nowords: teq ip, #0
beq .Lc2u_finished
.Lc2u_nowords: cmp ip, #2
ldrb r3, [r1], #1
USER( TUSER( strb) r3, [r0], #1) @ May fault
ldrgeb r3, [r1], #1
USER( TUSER( strgeb) r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #1
USER( TUSER( strgtb) r3, [r0], #1) @ May fault
b .Lc2u_finished
.Lc2u_not_enough:
movs ip, r2
bne .Lc2u_nowords
.Lc2u_finished: mov r0, #0
ldmfd sp!, {r2, r4 - r7, pc}
.Lc2u_src_not_aligned:
bic r1, r1, #3
ldr r7, [r1], #4
cmp ip, #2
bgt .Lc2u_3fupi
beq .Lc2u_2fupi
.Lc2u_1fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lc2u_1nowords
mov r3, r7, pull #8
ldr r7, [r1], #4
orr r3, r3, r7, push #24
USER( TUSER( str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
beq .Lc2u_1fupi
cmp r2, ip
movlt ip, r2
sub r2, r2, ip
subs ip, ip, #16
blt .Lc2u_1rem8lp
.Lc2u_1cpy8lp: mov r3, r7, pull #8
ldmia r1!, {r4 - r7}
subs ip, ip, #16
orr r3, r3, r4, push #24
mov r4, r4, pull #8
orr r4, r4, r5, push #24
mov r5, r5, pull #8
orr r5, r5, r6, push #24
mov r6, r6, pull #8
orr r6, r6, r7, push #24
stmia r0!, {r3 - r6} @ Shouldnt fault
bpl .Lc2u_1cpy8lp
.Lc2u_1rem8lp: tst ip, #8
movne r3, r7, pull #8
ldmneia r1!, {r4, r7}
orrne r3, r3, r4, push #24
movne r4, r4, pull #8
orrne r4, r4, r7, push #24
stmneia r0!, {r3 - r4} @ Shouldnt fault
tst ip, #4
movne r3, r7, pull #8
ldrne r7, [r1], #4
orrne r3, r3, r7, push #24
TUSER( strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3
beq .Lc2u_1fupi
.Lc2u_1nowords: mov r3, r7, get_byte_1
teq ip, #0
beq .Lc2u_finished
cmp ip, #2
USER( TUSER( strb) r3, [r0], #1) @ May fault
movge r3, r7, get_byte_2
USER( TUSER( strgeb) r3, [r0], #1) @ May fault
movgt r3, r7, get_byte_3
USER( TUSER( strgtb) r3, [r0], #1) @ May fault
b .Lc2u_finished
.Lc2u_2fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lc2u_2nowords
mov r3, r7, pull #16
ldr r7, [r1], #4
orr r3, r3, r7, push #16
USER( TUSER( str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
beq .Lc2u_2fupi
cmp r2, ip
movlt ip, r2
sub r2, r2, ip
subs ip, ip, #16
blt .Lc2u_2rem8lp
.Lc2u_2cpy8lp: mov r3, r7, pull #16
ldmia r1!, {r4 - r7}
subs ip, ip, #16
orr r3, r3, r4, push #16
mov r4, r4, pull #16
orr r4, r4, r5, push #16
mov r5, r5, pull #16
orr r5, r5, r6, push #16
mov r6, r6, pull #16
orr r6, r6, r7, push #16
stmia r0!, {r3 - r6} @ Shouldnt fault
bpl .Lc2u_2cpy8lp
.Lc2u_2rem8lp: tst ip, #8
movne r3, r7, pull #16
ldmneia r1!, {r4, r7}
orrne r3, r3, r4, push #16
movne r4, r4, pull #16
orrne r4, r4, r7, push #16
stmneia r0!, {r3 - r4} @ Shouldnt fault
tst ip, #4
movne r3, r7, pull #16
ldrne r7, [r1], #4
orrne r3, r3, r7, push #16
TUSER( strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3
beq .Lc2u_2fupi
.Lc2u_2nowords: mov r3, r7, get_byte_2
teq ip, #0
beq .Lc2u_finished
cmp ip, #2
USER( TUSER( strb) r3, [r0], #1) @ May fault
movge r3, r7, get_byte_3
USER( TUSER( strgeb) r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #0
USER( TUSER( strgtb) r3, [r0], #1) @ May fault
b .Lc2u_finished
.Lc2u_3fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lc2u_3nowords
mov r3, r7, pull #24
ldr r7, [r1], #4
orr r3, r3, r7, push #8
USER( TUSER( str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
beq .Lc2u_3fupi
cmp r2, ip
movlt ip, r2
sub r2, r2, ip
subs ip, ip, #16
blt .Lc2u_3rem8lp
.Lc2u_3cpy8lp: mov r3, r7, pull #24
ldmia r1!, {r4 - r7}
subs ip, ip, #16
orr r3, r3, r4, push #8
mov r4, r4, pull #24
orr r4, r4, r5, push #8
mov r5, r5, pull #24
orr r5, r5, r6, push #8
mov r6, r6, pull #24
orr r6, r6, r7, push #8
stmia r0!, {r3 - r6} @ Shouldnt fault
bpl .Lc2u_3cpy8lp
.Lc2u_3rem8lp: tst ip, #8
movne r3, r7, pull #24
ldmneia r1!, {r4, r7}
orrne r3, r3, r4, push #8
movne r4, r4, pull #24
orrne r4, r4, r7, push #8
stmneia r0!, {r3 - r4} @ Shouldnt fault
tst ip, #4
movne r3, r7, pull #24
ldrne r7, [r1], #4
orrne r3, r3, r7, push #8
TUSER( strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3
beq .Lc2u_3fupi
.Lc2u_3nowords: mov r3, r7, get_byte_3
teq ip, #0
beq .Lc2u_finished
cmp ip, #2
USER( TUSER( strb) r3, [r0], #1) @ May fault
ldrgeb r3, [r1], #1
USER( TUSER( strgeb) r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #0
USER( TUSER( strgtb) r3, [r0], #1) @ May fault
b .Lc2u_finished
ENDPROC(__copy_to_user)
.pushsection .fixup,"ax"
.align 0
9001: ldmfd sp!, {r0, r4 - r7, pc}
.popsection
/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
* Purpose : copy a block from user memory to kernel memory
* Params : to - kernel memory
* : from - user memory
* : n - number of bytes to copy
* Returns : Number of bytes NOT copied.
*/
.Lcfu_dest_not_aligned:
rsb ip, ip, #4
cmp ip, #2
USER( TUSER( ldrb) r3, [r1], #1) @ May fault
strb r3, [r0], #1
USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
strgeb r3, [r0], #1
USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
strgtb r3, [r0], #1
sub r2, r2, ip
b .Lcfu_dest_aligned
ENTRY(__copy_from_user)
stmfd sp!, {r0, r2, r4 - r7, lr}
cmp r2, #4
blt .Lcfu_not_enough
ands ip, r0, #3
bne .Lcfu_dest_not_aligned
.Lcfu_dest_aligned:
ands ip, r1, #3
bne .Lcfu_src_not_aligned
/*
* Seeing as there has to be at least 8 bytes to copy, we can
* copy one word, and force a user-mode page fault...
*/
.Lcfu_0fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lcfu_0nowords
USER( TUSER( ldr) r3, [r1], #4)
str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
beq .Lcfu_0fupi
/*
* ip = max no. of bytes to copy before needing another "strt" insn
*/
cmp r2, ip
movlt ip, r2
sub r2, r2, ip
subs ip, ip, #32
blt .Lcfu_0rem8lp
.Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault
stmia r0!, {r3 - r6}
ldmia r1!, {r3 - r6} @ Shouldnt fault
subs ip, ip, #32
stmia r0!, {r3 - r6}
bpl .Lcfu_0cpy8lp
.Lcfu_0rem8lp: cmn ip, #16
ldmgeia r1!, {r3 - r6} @ Shouldnt fault
stmgeia r0!, {r3 - r6}
tst ip, #8
ldmneia r1!, {r3 - r4} @ Shouldnt fault
stmneia r0!, {r3 - r4}
tst ip, #4
TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault
strne r3, [r0], #4
ands ip, ip, #3
beq .Lcfu_0fupi
.Lcfu_0nowords: teq ip, #0
beq .Lcfu_finished
.Lcfu_nowords: cmp ip, #2
USER( TUSER( ldrb) r3, [r1], #1) @ May fault
strb r3, [r0], #1
USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
strgeb r3, [r0], #1
USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
strgtb r3, [r0], #1
b .Lcfu_finished
.Lcfu_not_enough:
movs ip, r2
bne .Lcfu_nowords
.Lcfu_finished: mov r0, #0
add sp, sp, #8
ldmfd sp!, {r4 - r7, pc}
.Lcfu_src_not_aligned:
bic r1, r1, #3
USER( TUSER( ldr) r7, [r1], #4) @ May fault
cmp ip, #2
bgt .Lcfu_3fupi
beq .Lcfu_2fupi
.Lcfu_1fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lcfu_1nowords
mov r3, r7, pull #8
USER( TUSER( ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #24
str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
beq .Lcfu_1fupi
cmp r2, ip
movlt ip, r2
sub r2, r2, ip
subs ip, ip, #16
blt .Lcfu_1rem8lp
.Lcfu_1cpy8lp: mov r3, r7, pull #8
ldmia r1!, {r4 - r7} @ Shouldnt fault
subs ip, ip, #16
orr r3, r3, r4, push #24
mov r4, r4, pull #8
orr r4, r4, r5, push #24
mov r5, r5, pull #8
orr r5, r5, r6, push #24
mov r6, r6, pull #8
orr r6, r6, r7, push #24
stmia r0!, {r3 - r6}
bpl .Lcfu_1cpy8lp
.Lcfu_1rem8lp: tst ip, #8
movne r3, r7, pull #8
ldmneia r1!, {r4, r7} @ Shouldnt fault
orrne r3, r3, r4, push #24
movne r4, r4, pull #8
orrne r4, r4, r7, push #24
stmneia r0!, {r3 - r4}
tst ip, #4
movne r3, r7, pull #8
USER( TUSER( ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #24
strne r3, [r0], #4
ands ip, ip, #3
beq .Lcfu_1fupi
.Lcfu_1nowords: mov r3, r7, get_byte_1
teq ip, #0
beq .Lcfu_finished
cmp ip, #2
strb r3, [r0], #1
movge r3, r7, get_byte_2
strgeb r3, [r0], #1
movgt r3, r7, get_byte_3
strgtb r3, [r0], #1
b .Lcfu_finished
.Lcfu_2fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lcfu_2nowords
mov r3, r7, pull #16
USER( TUSER( ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #16
str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
beq .Lcfu_2fupi
cmp r2, ip
movlt ip, r2
sub r2, r2, ip
subs ip, ip, #16
blt .Lcfu_2rem8lp
.Lcfu_2cpy8lp: mov r3, r7, pull #16
ldmia r1!, {r4 - r7} @ Shouldnt fault
subs ip, ip, #16
orr r3, r3, r4, push #16
mov r4, r4, pull #16
orr r4, r4, r5, push #16
mov r5, r5, pull #16
orr r5, r5, r6, push #16
mov r6, r6, pull #16
orr r6, r6, r7, push #16
stmia r0!, {r3 - r6}
bpl .Lcfu_2cpy8lp
.Lcfu_2rem8lp: tst ip, #8
movne r3, r7, pull #16
ldmneia r1!, {r4, r7} @ Shouldnt fault
orrne r3, r3, r4, push #16
movne r4, r4, pull #16
orrne r4, r4, r7, push #16
stmneia r0!, {r3 - r4}
tst ip, #4
movne r3, r7, pull #16
USER( TUSER( ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #16
strne r3, [r0], #4
ands ip, ip, #3
beq .Lcfu_2fupi
.Lcfu_2nowords: mov r3, r7, get_byte_2
teq ip, #0
beq .Lcfu_finished
cmp ip, #2
strb r3, [r0], #1
movge r3, r7, get_byte_3
strgeb r3, [r0], #1
USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault
strgtb r3, [r0], #1
b .Lcfu_finished
.Lcfu_3fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lcfu_3nowords
mov r3, r7, pull #24
USER( TUSER( ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #8
str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
beq .Lcfu_3fupi
cmp r2, ip
movlt ip, r2
sub r2, r2, ip
subs ip, ip, #16
blt .Lcfu_3rem8lp
.Lcfu_3cpy8lp: mov r3, r7, pull #24
ldmia r1!, {r4 - r7} @ Shouldnt fault
orr r3, r3, r4, push #8
mov r4, r4, pull #24
orr r4, r4, r5, push #8
mov r5, r5, pull #24
orr r5, r5, r6, push #8
mov r6, r6, pull #24
orr r6, r6, r7, push #8
stmia r0!, {r3 - r6}
subs ip, ip, #16
bpl .Lcfu_3cpy8lp
.Lcfu_3rem8lp: tst ip, #8
movne r3, r7, pull #24
ldmneia r1!, {r4, r7} @ Shouldnt fault
orrne r3, r3, r4, push #8
movne r4, r4, pull #24
orrne r4, r4, r7, push #8
stmneia r0!, {r3 - r4}
tst ip, #4
movne r3, r7, pull #24
USER( TUSER( ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #8
strne r3, [r0], #4
ands ip, ip, #3
beq .Lcfu_3fupi
.Lcfu_3nowords: mov r3, r7, get_byte_3
teq ip, #0
beq .Lcfu_finished
cmp ip, #2
strb r3, [r0], #1
USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
strgeb r3, [r0], #1
USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
strgtb r3, [r0], #1
b .Lcfu_finished
ENDPROC(__copy_from_user)
.pushsection .fixup,"ax"
.align 0
/*
* We took an exception. r0 contains a pointer to
* the byte not copied.
*/
9001: ldr r2, [sp], #4 @ void *to
sub r2, r0, r2 @ bytes copied
ldr r1, [sp], #4 @ unsigned long count
subs r4, r1, r2 @ bytes left to copy
movne r1, r4
blne __memzero
mov r0, r4
ldmfd sp!, {r4 - r7, pc}
.popsection

View File

@ -166,12 +166,6 @@ static struct pci_ops cns3xxx_pcie_ops = {
.write = cns3xxx_pci_write_config,
};
static struct pci_bus *cns3xxx_pci_scan_bus(int nr, struct pci_sys_data *sys)
{
return pci_scan_root_bus(NULL, sys->busnr, &cns3xxx_pcie_ops, sys,
&sys->resources);
}
static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct cns3xxx_pcie *cnspci = pdev_to_cnspci(dev);
@ -221,10 +215,9 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
.irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
.hw_pci = {
.domain = 0,
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &cns3xxx_pcie_ops,
.setup = cns3xxx_pci_setup,
.scan = cns3xxx_pci_scan_bus,
.map_irq = cns3xxx_pcie_map_irq,
},
},
@ -264,10 +257,9 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
.irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
.hw_pci = {
.domain = 1,
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &cns3xxx_pcie_ops,
.setup = cns3xxx_pci_setup,
.scan = cns3xxx_pci_scan_bus,
.map_irq = cns3xxx_pcie_map_irq,
},
},

View File

@ -43,6 +43,7 @@ static int __init dove_pcie_setup(int nr, struct pci_sys_data *sys)
return 0;
pp = &pcie_port[nr];
sys->private_data = pp;
pp->root_bus_nr = sys->busnr;
/*
@ -93,19 +94,6 @@ static int __init dove_pcie_setup(int nr, struct pci_sys_data *sys)
return 1;
}
static struct pcie_port *bus_to_port(int bus)
{
int i;
for (i = num_pcie_ports - 1; i >= 0; i--) {
int rbus = pcie_port[i].root_bus_nr;
if (rbus != -1 && rbus <= bus)
break;
}
return i >= 0 ? pcie_port + i : NULL;
}
static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
{
/*
@ -121,7 +109,8 @@ static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = bus_to_port(bus->number);
struct pci_sys_data *sys = bus->sysdata;
struct pcie_port *pp = sys->private_data;
unsigned long flags;
int ret;
@ -140,7 +129,8 @@ static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = bus_to_port(bus->number);
struct pci_sys_data *sys = bus->sysdata;
struct pcie_port *pp = sys->private_data;
unsigned long flags;
int ret;
@ -194,14 +184,14 @@ dove_pcie_scan_bus(int nr, struct pci_sys_data *sys)
static int __init dove_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pcie_port *pp = bus_to_port(dev->bus->number);
struct pci_sys_data *sys = dev->sysdata;
struct pcie_port *pp = sys->private_data;
return pp->index ? IRQ_DOVE_PCIE1 : IRQ_DOVE_PCIE0;
}
static struct hw_pci dove_pci __initdata = {
.nr_controllers = 2,
.swizzle = pci_std_swizzle,
.setup = dove_pcie_setup,
.scan = dove_pcie_scan_bus,
.map_irq = dove_pcie_map_irq,

View File

@ -16,6 +16,11 @@
/* cats host-specific stuff */
static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
{
return 0;
}
static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->irq >= 255)
@ -39,11 +44,11 @@ static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
* cards being used (ie, pci-pci bridge based cards)?
*/
static struct hw_pci cats_pci __initdata = {
.swizzle = NULL,
.swizzle = cats_no_swizzle,
.map_irq = cats_map_irq,
.nr_controllers = 1,
.ops = &dc21285_ops,
.setup = dc21285_setup,
.scan = dc21285_scan_bus,
.preinit = dc21285_preinit,
.postinit = dc21285_postinit,
};

View File

@ -129,7 +129,7 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops dc21285_ops = {
struct pci_ops dc21285_ops = {
.read = dc21285_read_config,
.write = dc21285_write_config,
};
@ -284,11 +284,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
return 1;
}
struct pci_bus * __init dc21285_scan_bus(int nr, struct pci_sys_data *sys)
{
return pci_scan_root_bus(NULL, 0, &dc21285_ops, sys, &sys->resources);
}
#define dc21285_request_irq(_a, _b, _c, _d, _e) \
WARN_ON(request_irq(_a, _b, _c, _d, _e) < 0)

View File

@ -29,11 +29,10 @@ static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci ebsa285_pci __initdata = {
.swizzle = pci_std_swizzle,
.map_irq = ebsa285_map_irq,
.nr_controllers = 1,
.ops = &dc21285_ops,
.setup = dc21285_setup,
.scan = dc21285_scan_bus,
.preinit = dc21285_preinit,
.postinit = dc21285_postinit,
};

View File

@ -43,11 +43,10 @@ static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci netwinder_pci __initdata = {
.swizzle = pci_std_swizzle,
.map_irq = netwinder_map_irq,
.nr_controllers = 1,
.ops = &dc21285_ops,
.setup = dc21285_setup,
.scan = dc21285_scan_bus,
.preinit = dc21285_preinit,
.postinit = dc21285_postinit,
};

View File

@ -41,8 +41,8 @@ static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
static struct hw_pci personal_server_pci __initdata = {
.map_irq = personal_server_map_irq,
.nr_controllers = 1,
.ops = &dc21285_ops,
.setup = dc21285_setup,
.scan = dc21285_scan_bus,
.preinit = dc21285_preinit,
.postinit = dc21285_postinit,
};

View File

@ -398,24 +398,16 @@ static int impd1_probe(struct lm_device *dev)
struct impd1_device *idev = impd1_devs + i;
struct amba_device *d;
unsigned long pc_base;
char devname[32];
pc_base = dev->resource.start + idev->offset;
d = amba_device_alloc(NULL, pc_base, SZ_4K);
if (!d)
snprintf(devname, 32, "lm%x:%5.5lx", dev->id, idev->offset >> 12);
d = amba_ahb_device_add(&dev->dev, devname, pc_base, SZ_4K,
dev->irq, dev->irq,
idev->platform_data, idev->id);
if (IS_ERR(d)) {
dev_err(&dev->dev, "unable to register device: %ld\n", PTR_ERR(d));
continue;
dev_set_name(&d->dev, "lm%x:%5.5lx", dev->id, idev->offset >> 12);
d->dev.parent = &dev->dev;
d->irq[0] = dev->irq;
d->irq[1] = dev->irq;
d->periphid = idev->id;
d->dev.platform_data = idev->platform_data;
ret = amba_device_add(d, &dev->resource);
if (ret) {
dev_err(&d->dev, "unable to register device: %d\n", ret);
amba_device_put(d);
}
}

View File

@ -1,39 +0,0 @@
/*
* arch/arm/mach-integrator/include/mach/entry-macro.S
*
* Low-level IRQ helper macros for Integrator platforms
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
#include <mach/platform.h>
#include <mach/irqs.h>
.macro get_irqnr_preamble, base, tmp
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
/* FIXME: should not be using soo many LDRs here */
ldr \base, =IO_ADDRESS(INTEGRATOR_IC_BASE)
mov \irqnr, #IRQ_PIC_START
ldr \irqstat, [\base, #IRQ_STATUS] @ get masked status
ldr \base, =IO_ADDRESS(INTEGRATOR_HDR_BASE)
teq \irqstat, #0
ldreq \irqstat, [\base, #(INTEGRATOR_HDR_IC_OFFSET+IRQ_STATUS)]
moveq \irqnr, #IRQ_CIC_START
1001: tst \irqstat, #15
bne 1002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 1001b
1002: tst \irqstat, #1
bne 1003f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 1002b
1003: /* EQ will be set if no irqs pending */
.endm

View File

@ -22,37 +22,37 @@
/*
* Interrupt numbers
*/
#define IRQ_PIC_START 0
#define IRQ_SOFTINT 0
#define IRQ_UARTINT0 1
#define IRQ_UARTINT1 2
#define IRQ_KMIINT0 3
#define IRQ_KMIINT1 4
#define IRQ_TIMERINT0 5
#define IRQ_TIMERINT1 6
#define IRQ_TIMERINT2 7
#define IRQ_RTCINT 8
#define IRQ_AP_EXPINT0 9
#define IRQ_AP_EXPINT1 10
#define IRQ_AP_EXPINT2 11
#define IRQ_AP_EXPINT3 12
#define IRQ_AP_PCIINT0 13
#define IRQ_AP_PCIINT1 14
#define IRQ_AP_PCIINT2 15
#define IRQ_AP_PCIINT3 16
#define IRQ_AP_V3INT 17
#define IRQ_AP_CPINT0 18
#define IRQ_AP_CPINT1 19
#define IRQ_AP_LBUSTIMEOUT 20
#define IRQ_AP_APCINT 21
#define IRQ_CP_CLCDCINT 22
#define IRQ_CP_MMCIINT0 23
#define IRQ_CP_MMCIINT1 24
#define IRQ_CP_AACIINT 25
#define IRQ_CP_CPPLDINT 26
#define IRQ_CP_ETHINT 27
#define IRQ_CP_TSPENINT 28
#define IRQ_PIC_END 31
#define IRQ_PIC_START 1
#define IRQ_SOFTINT 1
#define IRQ_UARTINT0 2
#define IRQ_UARTINT1 3
#define IRQ_KMIINT0 4
#define IRQ_KMIINT1 5
#define IRQ_TIMERINT0 6
#define IRQ_TIMERINT1 7
#define IRQ_TIMERINT2 8
#define IRQ_RTCINT 9
#define IRQ_AP_EXPINT0 10
#define IRQ_AP_EXPINT1 11
#define IRQ_AP_EXPINT2 12
#define IRQ_AP_EXPINT3 13
#define IRQ_AP_PCIINT0 14
#define IRQ_AP_PCIINT1 15
#define IRQ_AP_PCIINT2 16
#define IRQ_AP_PCIINT3 17
#define IRQ_AP_V3INT 18
#define IRQ_AP_CPINT0 19
#define IRQ_AP_CPINT1 20
#define IRQ_AP_LBUSTIMEOUT 21
#define IRQ_AP_APCINT 22
#define IRQ_CP_CLCDCINT 23
#define IRQ_CP_MMCIINT0 24
#define IRQ_CP_MMCIINT1 25
#define IRQ_CP_AACIINT 26
#define IRQ_CP_CPPLDINT 27
#define IRQ_CP_ETHINT 28
#define IRQ_CP_TSPENINT 29
#define IRQ_PIC_END 29
#define IRQ_CIC_START 32
#define IRQ_CM_SOFTINT 32
@ -80,4 +80,3 @@
#define NR_IRQS_INTEGRATOR_AP 34
#define NR_IRQS_INTEGRATOR_CP 47

View File

@ -162,12 +162,6 @@ static void __init ap_map_io(void)
#define INTEGRATOR_SC_VALID_INT 0x003fffff
static struct fpga_irq_data sc_irq_data = {
.base = VA_IC_BASE,
.irq_start = 0,
.chip.name = "SC",
};
static void __init ap_init_irq(void)
{
/* Disable all interrupts initially. */
@ -178,7 +172,8 @@ static void __init ap_init_irq(void)
writel(-1, VA_IC_BASE + IRQ_ENABLE_CLEAR);
writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR);
fpga_irq_init(-1, INTEGRATOR_SC_VALID_INT, &sc_irq_data);
fpga_irq_init(VA_IC_BASE, "SC", IRQ_PIC_START,
-1, INTEGRATOR_SC_VALID_INT, NULL);
}
#ifdef CONFIG_PM
@ -478,6 +473,7 @@ MACHINE_START(INTEGRATOR, "ARM-Integrator")
.nr_irqs = NR_IRQS_INTEGRATOR_AP,
.init_early = integrator_init_early,
.init_irq = ap_init_irq,
.handle_irq = fpga_handle_irq,
.timer = &ap_timer,
.init_machine = ap_init,
.restart = integrator_restart,

View File

@ -143,30 +143,14 @@ static void __init intcp_map_io(void)
iotable_init(intcp_io_desc, ARRAY_SIZE(intcp_io_desc));
}
static struct fpga_irq_data cic_irq_data = {
.base = INTCP_VA_CIC_BASE,
.irq_start = IRQ_CIC_START,
.chip.name = "CIC",
};
static struct fpga_irq_data pic_irq_data = {
.base = INTCP_VA_PIC_BASE,
.irq_start = IRQ_PIC_START,
.chip.name = "PIC",
};
static struct fpga_irq_data sic_irq_data = {
.base = INTCP_VA_SIC_BASE,
.irq_start = IRQ_SIC_START,
.chip.name = "SIC",
};
static void __init intcp_init_irq(void)
{
u32 pic_mask, sic_mask;
u32 pic_mask, cic_mask, sic_mask;
/* These masks are for the HW IRQ registers */
pic_mask = ~((~0u) << (11 - IRQ_PIC_START));
pic_mask |= (~((~0u) << (29 - 22))) << 22;
cic_mask = ~((~0u) << (1 + IRQ_CIC_END - IRQ_CIC_START));
sic_mask = ~((~0u) << (1 + IRQ_SIC_END - IRQ_SIC_START));
/*
@ -179,12 +163,14 @@ static void __init intcp_init_irq(void)
writel(sic_mask, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
writel(sic_mask, INTCP_VA_SIC_BASE + FIQ_ENABLE_CLEAR);
fpga_irq_init(-1, pic_mask, &pic_irq_data);
fpga_irq_init(INTCP_VA_PIC_BASE, "PIC", IRQ_PIC_START,
-1, pic_mask, NULL);
fpga_irq_init(-1, ~((~0u) << (1 + IRQ_CIC_END - IRQ_CIC_START)),
&cic_irq_data);
fpga_irq_init(INTCP_VA_CIC_BASE, "CIC", IRQ_CIC_START,
-1, cic_mask, NULL);
fpga_irq_init(IRQ_CP_CPPLDINT, sic_mask, &sic_irq_data);
fpga_irq_init(INTCP_VA_SIC_BASE, "SIC", IRQ_SIC_START,
IRQ_CP_CPPLDINT, sic_mask, NULL);
}
/*
@ -467,6 +453,7 @@ MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
.nr_irqs = NR_IRQS_INTEGRATOR_CP,
.init_early = intcp_init_early,
.init_irq = intcp_init_irq,
.handle_irq = fpga_handle_irq,
.timer = &cp_timer,
.init_machine = intcp_init,
.restart = integrator_restart,

View File

@ -70,21 +70,10 @@
*/
static u8 __init integrator_swizzle(struct pci_dev *dev, u8 *pinp)
{
int pin = *pinp;
if (*pinp == 0)
*pinp = 1;
if (pin == 0)
pin = 1;
while (dev->bus->self) {
pin = pci_swizzle_interrupt_pin(dev, pin);
/*
* move up the chain of bridges, swizzling as we go.
*/
dev = dev->bus->self;
}
*pinp = pin;
return PCI_SLOT(dev->devfn);
return pci_common_swizzle(dev, pinp);
}
static int irq_tab[4] __initdata = {
@ -109,7 +98,7 @@ static struct hw_pci integrator_pci __initdata = {
.map_irq = integrator_map_irq,
.setup = pci_v3_setup,
.nr_controllers = 1,
.scan = pci_v3_scan_bus,
.ops = &pci_v3_ops,
.preinit = pci_v3_preinit,
.postinit = pci_v3_postinit,
};

View File

@ -340,7 +340,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops pci_v3_ops = {
struct pci_ops pci_v3_ops = {
.read = v3_read_config,
.write = v3_write_config,
};
@ -488,12 +488,6 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
return ret;
}
struct pci_bus * __init pci_v3_scan_bus(int nr, struct pci_sys_data *sys)
{
return pci_scan_root_bus(NULL, sys->busnr, &pci_v3_ops, sys,
&sys->resources);
}
/*
* V3_LB_BASE? - local bus address
* V3_LB_MAP? - pci bus address

View File

@ -54,7 +54,6 @@ iq81340mc_pcix_map_irq(const struct pci_dev *dev, u8 idsel, u8 pin)
}
static struct hw_pci iq81340mc_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 0,
.setup = iop13xx_pci_setup,
.map_irq = iq81340mc_pcix_map_irq,

View File

@ -56,7 +56,6 @@ iq81340sc_atux_map_irq(struct pci_dev *dev, u8 idsel, u8 pin)
}
static struct hw_pci iq81340sc_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 0,
.setup = iop13xx_pci_setup,
.scan = iop13xx_scan_bus,

View File

@ -103,11 +103,10 @@ em7210_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci em7210_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &iop3xx_ops,
.setup = iop3xx_pci_setup,
.preinit = iop3xx_pci_preinit,
.scan = iop3xx_pci_scan_bus,
.map_irq = em7210_pci_map_irq,
};

View File

@ -96,11 +96,10 @@ glantank_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci glantank_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &iop3xx_ops,
.setup = iop3xx_pci_setup,
.preinit = iop3xx_pci_preinit,
.scan = iop3xx_pci_scan_bus,
.map_irq = glantank_pci_map_irq,
};

View File

@ -130,11 +130,10 @@ ep80219_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci ep80219_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &iop3xx_ops,
.setup = iop3xx_pci_setup,
.preinit = iop3xx_pci_preinit,
.scan = iop3xx_pci_scan_bus,
.map_irq = ep80219_pci_map_irq,
};
@ -166,11 +165,10 @@ iq31244_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci iq31244_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &iop3xx_ops,
.setup = iop3xx_pci_setup,
.preinit = iop3xx_pci_preinit,
.scan = iop3xx_pci_scan_bus,
.map_irq = iq31244_pci_map_irq,
};

View File

@ -101,11 +101,10 @@ iq80321_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci iq80321_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &iop3xx_ops,
.setup = iop3xx_pci_setup,
.preinit = iop3xx_pci_preinit_cond,
.scan = iop3xx_pci_scan_bus,
.map_irq = iq80321_pci_map_irq,
};

View File

@ -114,11 +114,10 @@ n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci n2100_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &iop3xx_ops,
.setup = iop3xx_pci_setup,
.preinit = iop3xx_pci_preinit,
.scan = iop3xx_pci_scan_bus,
.map_irq = n2100_pci_map_irq,
};

View File

@ -84,11 +84,10 @@ iq80331_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci iq80331_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &iop3xx_ops,
.setup = iop3xx_pci_setup,
.preinit = iop3xx_pci_preinit_cond,
.scan = iop3xx_pci_scan_bus,
.map_irq = iq80331_pci_map_irq,
};

View File

@ -84,11 +84,10 @@ iq80332_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
static struct hw_pci iq80332_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 1,
.ops = &iop3xx_ops,
.setup = iop3xx_pci_setup,
.preinit = iop3xx_pci_preinit_cond,
.scan = iop3xx_pci_scan_bus,
.map_irq = iq80332_pci_map_irq,
};

View File

@ -141,13 +141,6 @@ static struct pci_ops enp2611_pci_ops = {
.write = enp2611_pci_write_config
};
static struct pci_bus * __init enp2611_pci_scan_bus(int nr,
struct pci_sys_data *sys)
{
return pci_scan_root_bus(NULL, sys->busnr, &enp2611_pci_ops, sys,
&sys->resources);
}
static int __init enp2611_pci_map_irq(const struct pci_dev *dev, u8 slot,
u8 pin)
{
@ -180,9 +173,9 @@ static int __init enp2611_pci_map_irq(const struct pci_dev *dev, u8 slot,
struct hw_pci enp2611_pci __initdata = {
.nr_controllers = 1,
.ops = &enp2611_pci_ops,
.setup = enp2611_pci_setup,
.preinit = enp2611_pci_preinit,
.scan = enp2611_pci_scan_bus,
.map_irq = enp2611_pci_map_irq,
};

View File

@ -127,10 +127,10 @@ unsigned long ixp2000_gettimeoffset(void);
struct pci_sys_data;
extern struct pci_ops ixp2000_pci_ops;
u32 *ixp2000_pci_config_addr(unsigned int bus, unsigned int devfn, int where);
void ixp2000_pci_preinit(void);
int ixp2000_pci_setup(int, struct pci_sys_data*);
struct pci_bus* ixp2000_pci_scan_bus(int, struct pci_sys_data*);
int ixp2000_pci_read_config(struct pci_bus*, unsigned int, int, int, u32 *);
int ixp2000_pci_write_config(struct pci_bus*, unsigned int, int, int, u32);

View File

@ -146,10 +146,10 @@ static void ixdp2400_pci_postinit(void)
static struct hw_pci ixdp2400_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp2000_pci_ops,
.setup = ixdp2400_pci_setup,
.preinit = ixdp2400_pci_preinit,
.postinit = ixdp2400_pci_postinit,
.scan = ixp2000_pci_scan_bus,
.map_irq = ixdp2400_pci_map_irq,
};

View File

@ -246,10 +246,10 @@ static void __init ixdp2800_pci_postinit(void)
struct __initdata hw_pci ixdp2800_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp2000_pci_ops,
.setup = ixdp2800_pci_setup,
.preinit = ixdp2800_pci_preinit,
.postinit = ixdp2800_pci_postinit,
.scan = ixp2000_pci_scan_bus,
.map_irq = ixdp2800_pci_map_irq,
};

View File

@ -327,9 +327,9 @@ static int ixdp2x01_pci_setup(int nr, struct pci_sys_data *sys)
struct hw_pci ixdp2x01_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp2000_pci_ops,
.setup = ixdp2x01_pci_setup,
.preinit = ixdp2x01_pci_preinit,
.scan = ixp2000_pci_scan_bus,
.map_irq = ixdp2x01_pci_map_irq,
};

View File

@ -124,17 +124,11 @@ int ixp2000_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where,
}
static struct pci_ops ixp2000_pci_ops = {
struct pci_ops ixp2000_pci_ops = {
.read = ixp2000_pci_read_config,
.write = ixp2000_pci_write_config
};
struct pci_bus *ixp2000_pci_scan_bus(int nr, struct pci_sys_data *sysdata)
{
return pci_scan_root_bus(NULL, sysdata->busnr, &ixp2000_pci_ops,
sysdata, &sysdata->resources);
}
int ixp2000_pci_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{

View File

@ -37,7 +37,7 @@ void ixp23xx_sys_init(void);
void ixp23xx_restart(char, const char *);
int ixp23xx_pci_setup(int, struct pci_sys_data *);
void ixp23xx_pci_preinit(void);
struct pci_bus *ixp23xx_pci_scan_bus(int, struct pci_sys_data*);
extern struct pci_ops ixp23xx_pci_ops;
void ixp23xx_pci_slave_init(void);
extern struct sys_timer ixp23xx_timer;

View File

@ -251,9 +251,9 @@ static int __init ixdp2351_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci ixdp2351_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp23xx_pci_ops,
.preinit = ixp23xx_pci_preinit,
.setup = ixp23xx_pci_setup,
.scan = ixp23xx_pci_scan_bus,
.map_irq = ixdp2351_map_irq,
};

View File

@ -140,12 +140,6 @@ struct pci_ops ixp23xx_pci_ops = {
.write = ixp23xx_pci_write_config,
};
struct pci_bus *ixp23xx_pci_scan_bus(int nr, struct pci_sys_data *sysdata)
{
return pci_scan_root_bus(NULL, sysdata->busnr, &ixp23xx_pci_ops,
sysdata, &sysdata->resources);
}
int ixp23xx_pci_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
volatile unsigned long temp;

View File

@ -118,9 +118,9 @@ static void __init roadrunner_pci_preinit(void)
static struct hw_pci roadrunner_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp23xx_pci_ops,
.preinit = roadrunner_pci_preinit,
.setup = ixp23xx_pci_setup,
.scan = ixp23xx_pci_scan_bus,
.map_irq = roadrunner_map_irq,
};

View File

@ -65,10 +65,9 @@ static int __init avila_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci avila_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = avila_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = avila_map_irq,
};

View File

@ -480,12 +480,6 @@ int ixp4xx_setup(int nr, struct pci_sys_data *sys)
return 1;
}
struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
{
return pci_scan_root_bus(NULL, sys->busnr, &ixp4xx_ops, sys,
&sys->resources);
}
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= SZ_64M - 1)

View File

@ -48,10 +48,9 @@ static int __init coyote_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci coyote_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = coyote_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = coyote_map_irq,
};

View File

@ -62,10 +62,9 @@ static int __init dsmg600_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci __initdata dsmg600_pci = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = dsmg600_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = dsmg600_map_irq,
};

View File

@ -59,10 +59,9 @@ static int __init fsg_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci fsg_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = fsg_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = fsg_map_irq,
};

View File

@ -47,10 +47,9 @@ static int __init gateway7001_map_irq(const struct pci_dev *dev, u8 slot,
struct hw_pci gateway7001_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = gateway7001_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = gateway7001_map_irq,
};

View File

@ -473,11 +473,10 @@ static int __init gmlr_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static struct hw_pci gmlr_hw_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = gmlr_pci_preinit,
.postinit = gmlr_pci_postinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = gmlr_map_irq,
};

View File

@ -67,10 +67,9 @@ static int __init gtwx5715_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci gtwx5715_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = gtwx5715_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = gtwx5715_map_irq,
};

View File

@ -130,7 +130,7 @@ extern void ixp4xx_restart(char, const char *);
extern void ixp4xx_pci_preinit(void);
struct pci_sys_data;
extern int ixp4xx_setup(int nr, struct pci_sys_data *sys);
extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys);
extern struct pci_ops ixp4xx_ops;
/*
* GPIO-functions

View File

@ -60,10 +60,9 @@ static int __init ixdp425_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci ixdp425_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = ixdp425_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = ixdp425_map_irq,
};

View File

@ -42,10 +42,9 @@ static int __init ixdpg425_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci ixdpg425_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = ixdpg425_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = ixdpg425_map_irq,
};

View File

@ -61,10 +61,9 @@ static int __init miccpt_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci miccpt_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = miccpt_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = miccpt_map_irq,
};

View File

@ -58,10 +58,9 @@ static int __init nas100d_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci __initdata nas100d_pci = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = nas100d_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = nas100d_map_irq,
};

View File

@ -54,10 +54,9 @@ static int __init nslu2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci __initdata nslu2_pci = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = nslu2_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = nslu2_map_irq,
};

View File

@ -56,10 +56,9 @@ static int __init vulcan_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci vulcan_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = vulcan_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = vulcan_map_irq,
};

View File

@ -46,10 +46,9 @@ static int __init wg302v2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
struct hw_pci wg302v2_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = wg302v2_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = wg302v2_map_irq,
};

View File

@ -44,12 +44,6 @@ struct pcie_port {
static int pcie_port_map[2];
static int num_pcie_ports;
static inline struct pcie_port *bus_to_port(struct pci_bus *bus)
{
struct pci_sys_data *sys = bus->sysdata;
return sys->private_data;
}
static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
{
/*
@ -79,7 +73,8 @@ static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = bus_to_port(bus);
struct pci_sys_data *sys = bus->sysdata;
struct pcie_port *pp = sys->private_data;
unsigned long flags;
int ret;
@ -98,7 +93,8 @@ static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = bus_to_port(bus);
struct pci_sys_data *sys = bus->sysdata;
struct pcie_port *pp = sys->private_data;
unsigned long flags;
int ret;
@ -248,13 +244,13 @@ kirkwood_pcie_scan_bus(int nr, struct pci_sys_data *sys)
static int __init kirkwood_pcie_map_irq(const struct pci_dev *dev, u8 slot,
u8 pin)
{
struct pcie_port *pp = bus_to_port(dev->bus);
struct pci_sys_data *sys = dev->sysdata;
struct pcie_port *pp = sys->private_data;
return pp->irq;
}
static struct hw_pci kirkwood_pci __initdata = {
.swizzle = pci_std_swizzle,
.setup = kirkwood_pcie_setup,
.scan = kirkwood_pcie_scan_bus,
.map_irq = kirkwood_pcie_map_irq,

View File

@ -141,12 +141,6 @@ static struct pci_ops ks8695_pci_ops = {
.write = ks8695_pci_writeconfig,
};
static struct pci_bus* __init ks8695_pci_scan_bus(int nr, struct pci_sys_data *sys)
{
return pci_scan_root_bus(NULL, sys->busnr, &ks8695_pci_ops, sys,
&sys->resources);
}
static struct resource pci_mem = {
.name = "PCI Memory space",
.start = KS8695_PCIMEM_PA,
@ -302,11 +296,10 @@ static void ks8695_show_pciregs(void)
static struct hw_pci ks8695_pci __initdata = {
.nr_controllers = 1,
.ops = &ks8695_pci_ops,
.preinit = ks8695_pci_preinit,
.setup = ks8695_pci_setup,
.scan = ks8695_pci_scan_bus,
.postinit = NULL,
.swizzle = pci_std_swizzle,
.map_irq = NULL,
};

View File

@ -147,6 +147,7 @@ static int __init mv78xx0_pcie_setup(int nr, struct pci_sys_data *sys)
return 0;
pp = &pcie_port[nr];
sys->private_data = pp;
pp->root_bus_nr = sys->busnr;
/*
@ -161,19 +162,6 @@ static int __init mv78xx0_pcie_setup(int nr, struct pci_sys_data *sys)
return 1;
}
static struct pcie_port *bus_to_port(int bus)
{
int i;
for (i = num_pcie_ports - 1; i >= 0; i--) {
int rbus = pcie_port[i].root_bus_nr;
if (rbus != -1 && rbus <= bus)
break;
}
return i >= 0 ? pcie_port + i : NULL;
}
static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
{
/*
@ -189,7 +177,8 @@ static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = bus_to_port(bus->number);
struct pci_sys_data *sys = bus->sysdata;
struct pcie_port *pp = sys->private_data;
unsigned long flags;
int ret;
@ -208,7 +197,8 @@ static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = bus_to_port(bus->number);
struct pci_sys_data *sys = bus->sysdata;
struct pcie_port *pp = sys->private_data;
unsigned long flags;
int ret;
@ -263,7 +253,8 @@ mv78xx0_pcie_scan_bus(int nr, struct pci_sys_data *sys)
static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot,
u8 pin)
{
struct pcie_port *pp = bus_to_port(dev->bus->number);
struct pci_sys_data *sys = dev->bus->sysdata;
struct pcie_port *pp = sys->private_data;
return IRQ_MV78XX0_PCIE_00 + (pp->maj << 2) + pp->min;
}
@ -271,7 +262,6 @@ static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot,
static struct hw_pci mv78xx0_pci __initdata = {
.nr_controllers = 8,
.preinit = mv78xx0_pcie_preinit,
.swizzle = pci_std_swizzle,
.setup = mv78xx0_pcie_setup,
.scan = mv78xx0_pcie_scan_bus,
.map_irq = mv78xx0_pcie_map_irq,

View File

@ -11,10 +11,16 @@
#include <mach/mx23.h>
#include <mach/devices-common.h>
#include <mach/mxsfb.h>
#include <linux/amba/bus.h>
extern const struct amba_device mx23_duart_device __initconst;
#define mx23_add_duart() \
mxs_add_duart(&mx23_duart_device)
static inline int mx23_add_duart(void)
{
struct amba_device *d;
d = amba_ahb_device_add(NULL, "duart", MX23_DUART_BASE_ADDR, SZ_8K,
MX23_INT_DUART, 0, 0, 0);
return IS_ERR(d) ? PTR_ERR(d) : 0;
}
extern const struct mxs_auart_data mx23_auart_data[] __initconst;
#define mx23_add_auart(id) mxs_add_auart(&mx23_auart_data[id])

View File

@ -11,10 +11,16 @@
#include <mach/mx28.h>
#include <mach/devices-common.h>
#include <mach/mxsfb.h>
#include <linux/amba/bus.h>
extern const struct amba_device mx28_duart_device __initconst;
#define mx28_add_duart() \
mxs_add_duart(&mx28_duart_device)
static inline int mx28_add_duart(void)
{
struct amba_device *d;
d = amba_ahb_device_add(NULL, "duart", MX28_DUART_BASE_ADDR, SZ_8K,
MX28_INT_DUART, 0, 0, 0);
return IS_ERR(d) ? PTR_ERR(d) : 0;
}
extern const struct mxs_auart_data mx28_auart_data[] __initconst;
#define mx28_add_auart(id) mxs_add_auart(&mx28_auart_data[id])

View File

@ -75,22 +75,6 @@ struct platform_device *__init mxs_add_platform_device_dmamask(
return pdev;
}
int __init mxs_add_amba_device(const struct amba_device *dev)
{
struct amba_device *adev = amba_device_alloc(dev->dev.init_name,
dev->res.start, resource_size(&dev->res));
if (!adev) {
pr_err("%s: failed to allocate memory", __func__);
return -ENOMEM;
}
adev->irq[0] = dev->irq[0];
adev->irq[1] = dev->irq[1];
return amba_device_add(adev, &iomem_resource);
}
struct device mxs_apbh_bus = {
.init_name = "mxs_apbh",
.parent = &platform_bus,

View File

@ -1,4 +1,3 @@
obj-$(CONFIG_MXS_HAVE_AMBA_DUART) += amba-duart.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_AUART) += platform-auart.o
obj-y += platform-dma.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_FEC) += platform-fec.o

View File

@ -1,40 +0,0 @@
/*
* Copyright (C) 2009-2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
* Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
#include <asm/irq.h>
#include <mach/mx23.h>
#include <mach/mx28.h>
#include <mach/devices-common.h>
#define MXS_AMBA_DUART_DEVICE(name, soc) \
const struct amba_device name##_device __initconst = { \
.dev = { \
.init_name = "duart", \
}, \
.res = { \
.start = soc ## _DUART_BASE_ADDR, \
.end = (soc ## _DUART_BASE_ADDR) + SZ_8K - 1, \
.flags = IORESOURCE_MEM, \
}, \
.irq = {soc ## _INT_DUART}, \
}
#ifdef CONFIG_SOC_IMX23
MXS_AMBA_DUART_DEVICE(mx23_duart, MX23);
#endif
#ifdef CONFIG_SOC_IMX28
MXS_AMBA_DUART_DEVICE(mx28_duart, MX28);
#endif
int __init mxs_add_duart(const struct amba_device *dev)
{
return mxs_add_amba_device(dev);
}

View File

@ -27,11 +27,6 @@ static inline struct platform_device *mxs_add_platform_device(
name, id, res, num_resources, data, size_data, 0);
}
int __init mxs_add_amba_device(const struct amba_device *dev);
/* duart */
int __init mxs_add_duart(const struct amba_device *dev);
/* auart */
struct mxs_auart_data {
int id;

View File

@ -265,7 +265,6 @@ static int __init db88f5281_pci_map_irq(const struct pci_dev *dev, u8 slot,
static struct hw_pci db88f5281_pci __initdata = {
.nr_controllers = 2,
.preinit = db88f5281_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = orion5x_pci_sys_setup,
.scan = orion5x_pci_sys_scan_bus,
.map_irq = db88f5281_pci_map_irq,

Some files were not shown because too many files have changed in this diff Show More