mirror of https://gitee.com/openkylin/linux.git
Merge master.kernel.org:/home/rmk/linux-2.6-arm
This commit is contained in:
commit
29ac878a71
|
@ -1,7 +1,7 @@
|
||||||
Kernel Memory Layout on ARM Linux
|
Kernel Memory Layout on ARM Linux
|
||||||
|
|
||||||
Russell King <rmk@arm.linux.org.uk>
|
Russell King <rmk@arm.linux.org.uk>
|
||||||
May 21, 2004 (2.6.6)
|
November 17, 2005 (2.6.15)
|
||||||
|
|
||||||
This document describes the virtual memory layout which the Linux
|
This document describes the virtual memory layout which the Linux
|
||||||
kernel uses for ARM processors. It indicates which regions are
|
kernel uses for ARM processors. It indicates which regions are
|
||||||
|
@ -37,6 +37,8 @@ ff000000 ffbfffff Reserved for future expansion of DMA
|
||||||
mapping region.
|
mapping region.
|
||||||
|
|
||||||
VMALLOC_END feffffff Free for platform use, recommended.
|
VMALLOC_END feffffff Free for platform use, recommended.
|
||||||
|
VMALLOC_END must be aligned to a 2MB
|
||||||
|
boundary.
|
||||||
|
|
||||||
VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
|
VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
|
||||||
Memory returned by vmalloc/ioremap will
|
Memory returned by vmalloc/ioremap will
|
||||||
|
|
|
@ -120,7 +120,6 @@ EXPORT_SYMBOL(__arch_strncpy_from_user);
|
||||||
EXPORT_SYMBOL(__get_user_1);
|
EXPORT_SYMBOL(__get_user_1);
|
||||||
EXPORT_SYMBOL(__get_user_2);
|
EXPORT_SYMBOL(__get_user_2);
|
||||||
EXPORT_SYMBOL(__get_user_4);
|
EXPORT_SYMBOL(__get_user_4);
|
||||||
EXPORT_SYMBOL(__get_user_8);
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__put_user_1);
|
EXPORT_SYMBOL(__put_user_1);
|
||||||
EXPORT_SYMBOL(__put_user_2);
|
EXPORT_SYMBOL(__put_user_2);
|
||||||
|
|
|
@ -48,8 +48,7 @@ work_pending:
|
||||||
mov r0, sp @ 'regs'
|
mov r0, sp @ 'regs'
|
||||||
mov r2, why @ 'syscall'
|
mov r2, why @ 'syscall'
|
||||||
bl do_notify_resume
|
bl do_notify_resume
|
||||||
disable_irq @ disable interrupts
|
b ret_slow_syscall @ Check work again
|
||||||
b no_work_pending
|
|
||||||
|
|
||||||
work_resched:
|
work_resched:
|
||||||
bl schedule
|
bl schedule
|
||||||
|
|
|
@ -595,23 +595,22 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
|
||||||
*/
|
*/
|
||||||
ret |= !valid_user_regs(regs);
|
ret |= !valid_user_regs(regs);
|
||||||
|
|
||||||
/*
|
|
||||||
* Block the signal if we were unsuccessful.
|
|
||||||
*/
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
spin_lock_irq(&tsk->sighand->siglock);
|
force_sigsegv(sig, tsk);
|
||||||
sigorsets(&tsk->blocked, &tsk->blocked,
|
return;
|
||||||
&ka->sa.sa_mask);
|
|
||||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
|
||||||
sigaddset(&tsk->blocked, sig);
|
|
||||||
recalc_sigpending();
|
|
||||||
spin_unlock_irq(&tsk->sighand->siglock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == 0)
|
/*
|
||||||
return;
|
* Block the signal if we were successful.
|
||||||
|
*/
|
||||||
|
spin_lock_irq(&tsk->sighand->siglock);
|
||||||
|
sigorsets(&tsk->blocked, &tsk->blocked,
|
||||||
|
&ka->sa.sa_mask);
|
||||||
|
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||||
|
sigaddset(&tsk->blocked, sig);
|
||||||
|
recalc_sigpending();
|
||||||
|
spin_unlock_irq(&tsk->sighand->siglock);
|
||||||
|
|
||||||
force_sigsegv(sig, tsk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -172,6 +172,10 @@ SECTIONS
|
||||||
.comment 0 : { *(.comment) }
|
.comment 0 : { *(.comment) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/* those must never be empty */
|
/*
|
||||||
|
* These must never be empty
|
||||||
|
* If you have to comment these two assert statements out, your
|
||||||
|
* binutils is too old (for other reasons as well)
|
||||||
|
*/
|
||||||
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
||||||
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
||||||
|
|
|
@ -54,15 +54,6 @@ __get_user_4:
|
||||||
mov r0, #0
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
.global __get_user_8
|
|
||||||
__get_user_8:
|
|
||||||
5: ldrt r2, [r0], #4
|
|
||||||
6: ldrt r3, [r0]
|
|
||||||
mov r0, #0
|
|
||||||
mov pc, lr
|
|
||||||
|
|
||||||
__get_user_bad_8:
|
|
||||||
mov r3, #0
|
|
||||||
__get_user_bad:
|
__get_user_bad:
|
||||||
mov r2, #0
|
mov r2, #0
|
||||||
mov r0, #-EFAULT
|
mov r0, #-EFAULT
|
||||||
|
@ -73,6 +64,4 @@ __get_user_bad:
|
||||||
.long 2b, __get_user_bad
|
.long 2b, __get_user_bad
|
||||||
.long 3b, __get_user_bad
|
.long 3b, __get_user_bad
|
||||||
.long 4b, __get_user_bad
|
.long 4b, __get_user_bad
|
||||||
.long 5b, __get_user_bad_8
|
|
||||||
.long 6b, __get_user_bad_8
|
|
||||||
.previous
|
.previous
|
||||||
|
|
|
@ -51,4 +51,4 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
|
||||||
obj-$(CONFIG_CPU_SA110) += proc-sa110.o
|
obj-$(CONFIG_CPU_SA110) += proc-sa110.o
|
||||||
obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
|
obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
|
||||||
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
|
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
|
||||||
obj-$(CONFIG_CPU_V6) += proc-v6.o blockops.o
|
obj-$(CONFIG_CPU_V6) += proc-v6.o
|
||||||
|
|
|
@ -1,185 +0,0 @@
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
|
|
||||||
#include <asm/memory.h>
|
|
||||||
#include <asm/ptrace.h>
|
|
||||||
#include <asm/cacheflush.h>
|
|
||||||
#include <asm/traps.h>
|
|
||||||
|
|
||||||
extern struct cpu_cache_fns blk_cache_fns;
|
|
||||||
|
|
||||||
#define HARVARD_CACHE
|
|
||||||
|
|
||||||
/*
|
|
||||||
* blk_flush_kern_dcache_page(kaddr)
|
|
||||||
*
|
|
||||||
* Ensure that the data held in the page kaddr is written back
|
|
||||||
* to the page in question.
|
|
||||||
*
|
|
||||||
* - kaddr - kernel address (guaranteed to be page aligned)
|
|
||||||
*/
|
|
||||||
static void __attribute__((naked))
|
|
||||||
blk_flush_kern_dcache_page(void *kaddr)
|
|
||||||
{
|
|
||||||
asm(
|
|
||||||
"add r1, r0, %0 \n\
|
|
||||||
sub r1, r1, %1 \n\
|
|
||||||
1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
|
|
||||||
mov r0, #0 \n\
|
|
||||||
mcr p15, 0, r0, c7, c5, 0 \n\
|
|
||||||
mcr p15, 0, r0, c7, c10, 4 \n\
|
|
||||||
mov pc, lr"
|
|
||||||
:
|
|
||||||
: "I" (PAGE_SIZE), "I" (L1_CACHE_BYTES));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* blk_dma_inv_range(start,end)
|
|
||||||
*
|
|
||||||
* Invalidate the data cache within the specified region; we will
|
|
||||||
* be performing a DMA operation in this region and we want to
|
|
||||||
* purge old data in the cache.
|
|
||||||
*
|
|
||||||
* - start - virtual start address of region
|
|
||||||
* - end - virtual end address of region
|
|
||||||
*/
|
|
||||||
static void __attribute__((naked))
|
|
||||||
blk_dma_inv_range_unified(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
asm(
|
|
||||||
"tst r0, %0 \n\
|
|
||||||
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
|
|
||||||
tst r1, %0 \n\
|
|
||||||
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
|
|
||||||
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
|
|
||||||
mov r0, #0 \n\
|
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
|
|
||||||
mov pc, lr"
|
|
||||||
:
|
|
||||||
: "I" (L1_CACHE_BYTES - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __attribute__((naked))
|
|
||||||
blk_dma_inv_range_harvard(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
asm(
|
|
||||||
"tst r0, %0 \n\
|
|
||||||
mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
|
|
||||||
tst r1, %0 \n\
|
|
||||||
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
|
|
||||||
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
|
|
||||||
mov r0, #0 \n\
|
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
|
|
||||||
mov pc, lr"
|
|
||||||
:
|
|
||||||
: "I" (L1_CACHE_BYTES - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* blk_dma_clean_range(start,end)
|
|
||||||
* - start - virtual start address of region
|
|
||||||
* - end - virtual end address of region
|
|
||||||
*/
|
|
||||||
static void __attribute__((naked))
|
|
||||||
blk_dma_clean_range(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
asm(
|
|
||||||
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
|
|
||||||
mov r0, #0 \n\
|
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
|
|
||||||
mov pc, lr");
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* blk_dma_flush_range(start,end)
|
|
||||||
* - start - virtual start address of region
|
|
||||||
* - end - virtual end address of region
|
|
||||||
*/
|
|
||||||
static void __attribute__((naked))
|
|
||||||
blk_dma_flush_range(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
asm(
|
|
||||||
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
|
|
||||||
mov pc, lr");
|
|
||||||
}
|
|
||||||
|
|
||||||
static int blockops_trap(struct pt_regs *regs, unsigned int instr)
|
|
||||||
{
|
|
||||||
regs->ARM_r4 |= regs->ARM_r2;
|
|
||||||
regs->ARM_pc += 4;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static char *func[] = {
|
|
||||||
"Prefetch data range",
|
|
||||||
"Clean+Invalidate data range",
|
|
||||||
"Clean data range",
|
|
||||||
"Invalidate data range",
|
|
||||||
"Invalidate instr range"
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct undef_hook blockops_hook __initdata = {
|
|
||||||
.instr_mask = 0x0fffffd0,
|
|
||||||
.instr_val = 0x0c401f00,
|
|
||||||
.cpsr_mask = PSR_T_BIT,
|
|
||||||
.cpsr_val = 0,
|
|
||||||
.fn = blockops_trap,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init blockops_check(void)
|
|
||||||
{
|
|
||||||
register unsigned int err asm("r4") = 0;
|
|
||||||
unsigned int err_pos = 1;
|
|
||||||
unsigned int cache_type;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_type));
|
|
||||||
|
|
||||||
printk("Checking V6 block cache operations:\n");
|
|
||||||
register_undef_hook(&blockops_hook);
|
|
||||||
|
|
||||||
__asm__ ("mov r0, %0\n\t"
|
|
||||||
"mov r1, %1\n\t"
|
|
||||||
"mov r2, #1\n\t"
|
|
||||||
".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
|
|
||||||
"mov r2, #2\n\t"
|
|
||||||
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
|
|
||||||
"mov r2, #4\n\t"
|
|
||||||
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
|
|
||||||
"mov r2, #8\n\t"
|
|
||||||
".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
|
|
||||||
"mov r2, #16\n\t"
|
|
||||||
".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
|
|
||||||
:
|
|
||||||
: "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128)
|
|
||||||
: "r0", "r1", "r2");
|
|
||||||
|
|
||||||
unregister_undef_hook(&blockops_hook);
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1)
|
|
||||||
printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : "");
|
|
||||||
|
|
||||||
if ((err & 8) == 0) {
|
|
||||||
printk(" --> Using %s block cache invalidate\n",
|
|
||||||
cache_type & (1 << 24) ? "harvard" : "unified");
|
|
||||||
if (cache_type & (1 << 24))
|
|
||||||
cpu_cache.dma_inv_range = blk_dma_inv_range_harvard;
|
|
||||||
else
|
|
||||||
cpu_cache.dma_inv_range = blk_dma_inv_range_unified;
|
|
||||||
}
|
|
||||||
if ((err & 4) == 0) {
|
|
||||||
printk(" --> Using block cache clean\n");
|
|
||||||
cpu_cache.dma_clean_range = blk_dma_clean_range;
|
|
||||||
}
|
|
||||||
if ((err & 2) == 0) {
|
|
||||||
printk(" --> Using block cache clean+invalidate\n");
|
|
||||||
cpu_cache.dma_flush_range = blk_dma_flush_range;
|
|
||||||
cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
__initcall(blockops_check);
|
|
|
@ -420,7 +420,8 @@ static void __init bootmem_init(struct meminfo *mi)
|
||||||
* Set up device the mappings. Since we clear out the page tables for all
|
* Set up device the mappings. Since we clear out the page tables for all
|
||||||
* mappings above VMALLOC_END, we will remove any debug device mappings.
|
* mappings above VMALLOC_END, we will remove any debug device mappings.
|
||||||
* This means you have to be careful how you debug this function, or any
|
* This means you have to be careful how you debug this function, or any
|
||||||
* called function. (Do it by code inspection!)
|
* called function. This means you can't use any function or debugging
|
||||||
|
* method which may touch any device, otherwise the kernel _will_ crash.
|
||||||
*/
|
*/
|
||||||
static void __init devicemaps_init(struct machine_desc *mdesc)
|
static void __init devicemaps_init(struct machine_desc *mdesc)
|
||||||
{
|
{
|
||||||
|
@ -428,6 +429,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
void *vectors;
|
void *vectors;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate the vector page early.
|
||||||
|
*/
|
||||||
|
vectors = alloc_bootmem_low_pages(PAGE_SIZE);
|
||||||
|
BUG_ON(!vectors);
|
||||||
|
|
||||||
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
|
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
|
||||||
pmd_clear(pmd_off_k(addr));
|
pmd_clear(pmd_off_k(addr));
|
||||||
|
|
||||||
|
@ -461,12 +468,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
|
||||||
create_mapping(&map);
|
create_mapping(&map);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
flush_cache_all();
|
|
||||||
local_flush_tlb_all();
|
|
||||||
|
|
||||||
vectors = alloc_bootmem_low_pages(PAGE_SIZE);
|
|
||||||
BUG_ON(!vectors);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create a mapping for the machine vectors at the high-vectors
|
* Create a mapping for the machine vectors at the high-vectors
|
||||||
* location (0xffff0000). If we aren't using high-vectors, also
|
* location (0xffff0000). If we aren't using high-vectors, also
|
||||||
|
@ -491,12 +492,13 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
|
||||||
mdesc->map_io();
|
mdesc->map_io();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Finally flush the tlb again - this ensures that we're in a
|
* Finally flush the caches and tlb to ensure that we're in a
|
||||||
* consistent state wrt the writebuffer if the writebuffer needs
|
* consistent state wrt the writebuffer. This also ensures that
|
||||||
* draining. After this point, we can start to touch devices
|
* any write-allocated cache lines in the vector page are written
|
||||||
* again.
|
* back. After this point, we can start to touch devices again.
|
||||||
*/
|
*/
|
||||||
local_flush_tlb_all();
|
local_flush_tlb_all();
|
||||||
|
flush_cache_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -130,8 +130,7 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
|
||||||
* mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
|
* mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
|
||||||
*/
|
*/
|
||||||
void __iomem *
|
void __iomem *
|
||||||
__ioremap(unsigned long phys_addr, size_t size, unsigned long flags,
|
__ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
|
||||||
unsigned long align)
|
|
||||||
{
|
{
|
||||||
void * addr;
|
void * addr;
|
||||||
struct vm_struct * area;
|
struct vm_struct * area;
|
||||||
|
|
|
@ -246,7 +246,7 @@ int __init ipaq_mtd_init(void)
|
||||||
ipaq_map[i].size = h3xxx_max_flash_size;
|
ipaq_map[i].size = h3xxx_max_flash_size;
|
||||||
ipaq_map[i].set_vpp = h3xxx_set_vpp;
|
ipaq_map[i].set_vpp = h3xxx_set_vpp;
|
||||||
ipaq_map[i].phys = cs_phys[i];
|
ipaq_map[i].phys = cs_phys[i];
|
||||||
ipaq_map[i].virt = __ioremap(cs_phys[i], 0x04000000, 0, 1);
|
ipaq_map[i].virt = ioremap(cs_phys[i], 0x04000000);
|
||||||
if (machine_is_h3100 () || machine_is_h1900())
|
if (machine_is_h3100 () || machine_is_h1900())
|
||||||
ipaq_map[i].bankwidth = 2;
|
ipaq_map[i].bankwidth = 2;
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ int __init ipaq_mtd_init(void)
|
||||||
nb_parts = ARRAY_SIZE(jornada_partitions);
|
nb_parts = ARRAY_SIZE(jornada_partitions);
|
||||||
ipaq_map[0].size = jornada_max_flash_size;
|
ipaq_map[0].size = jornada_max_flash_size;
|
||||||
ipaq_map[0].set_vpp = jornada56x_set_vpp;
|
ipaq_map[0].set_vpp = jornada56x_set_vpp;
|
||||||
ipaq_map[0].virt = (__u32)__ioremap(0x0, 0x04000000, 0, 1);
|
ipaq_map[0].virt = (__u32)ioremap(0x0, 0x04000000);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SA1100_JORNADA720
|
#ifdef CONFIG_SA1100_JORNADA720
|
||||||
|
@ -442,7 +442,7 @@ static int __init h1900_special_case(void)
|
||||||
ipaq_map[0].size = 0x80000;
|
ipaq_map[0].size = 0x80000;
|
||||||
ipaq_map[0].set_vpp = h3xxx_set_vpp;
|
ipaq_map[0].set_vpp = h3xxx_set_vpp;
|
||||||
ipaq_map[0].phys = 0x0;
|
ipaq_map[0].phys = 0x0;
|
||||||
ipaq_map[0].virt = __ioremap(0x0, 0x04000000, 0, 1);
|
ipaq_map[0].virt = ioremap(0x0, 0x04000000);
|
||||||
ipaq_map[0].bankwidth = 2;
|
ipaq_map[0].bankwidth = 2;
|
||||||
|
|
||||||
printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
|
printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
|
||||||
|
|
|
@ -112,7 +112,7 @@ static int __init h1910_init (void)
|
||||||
if (!machine_is_h1900())
|
if (!machine_is_h1900())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
nandaddr = __ioremap(0x08000000, 0x1000, 0, 1);
|
nandaddr = ioremap(0x08000000, 0x1000);
|
||||||
if (!nandaddr) {
|
if (!nandaddr) {
|
||||||
printk("Failed to ioremap nand flash.\n");
|
printk("Failed to ioremap nand flash.\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -59,11 +59,10 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
|
||||||
* fallback to the default.
|
* fallback to the default.
|
||||||
*/
|
*/
|
||||||
static inline void __iomem *
|
static inline void __iomem *
|
||||||
__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned long align)
|
__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags)
|
||||||
{
|
{
|
||||||
extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
|
|
||||||
if((addr < 0x48000000) || (addr > 0x4fffffff))
|
if((addr < 0x48000000) || (addr > 0x4fffffff))
|
||||||
return __ioremap(addr, size, flags, align);
|
return __ioremap(addr, size, flags);
|
||||||
|
|
||||||
return (void *)addr;
|
return (void *)addr;
|
||||||
}
|
}
|
||||||
|
@ -71,13 +70,11 @@ __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned
|
||||||
static inline void
|
static inline void
|
||||||
__ixp4xx_iounmap(void __iomem *addr)
|
__ixp4xx_iounmap(void __iomem *addr)
|
||||||
{
|
{
|
||||||
extern void __iounmap(void __iomem *addr);
|
|
||||||
|
|
||||||
if ((u32)addr >= VMALLOC_START)
|
if ((u32)addr >= VMALLOC_START)
|
||||||
__iounmap(addr);
|
__iounmap(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __arch_ioremap(a, s, f, x) __ixp4xx_ioremap(a, s, f, x)
|
#define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
|
||||||
#define __arch_iounmap(a) __ixp4xx_iounmap(a)
|
#define __arch_iounmap(a) __ixp4xx_iounmap(a)
|
||||||
|
|
||||||
#define writeb(v, p) __ixp4xx_writeb(v, p)
|
#define writeb(v, p) __ixp4xx_writeb(v, p)
|
||||||
|
|
|
@ -54,6 +54,12 @@ extern void __raw_readsl(void __iomem *addr, void *data, int longlen);
|
||||||
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
|
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
|
||||||
#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
|
#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Architecture ioremap implementation.
|
||||||
|
*/
|
||||||
|
extern void __iomem * __ioremap(unsigned long, size_t, unsigned long);
|
||||||
|
extern void __iounmap(void __iomem *addr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bad read/write accesses...
|
* Bad read/write accesses...
|
||||||
*/
|
*/
|
||||||
|
@ -256,18 +262,15 @@ check_signature(void __iomem *io_addr, const unsigned char *signature,
|
||||||
* ioremap takes a PCI memory address, as specified in
|
* ioremap takes a PCI memory address, as specified in
|
||||||
* Documentation/IO-mapping.txt.
|
* Documentation/IO-mapping.txt.
|
||||||
*/
|
*/
|
||||||
extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
|
|
||||||
extern void __iounmap(void __iomem *addr);
|
|
||||||
|
|
||||||
#ifndef __arch_ioremap
|
#ifndef __arch_ioremap
|
||||||
#define ioremap(cookie,size) __ioremap(cookie,size,0,1)
|
#define ioremap(cookie,size) __ioremap(cookie,size,0)
|
||||||
#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0,1)
|
#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0)
|
||||||
#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE,1)
|
#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE)
|
||||||
#define iounmap(cookie) __iounmap(cookie)
|
#define iounmap(cookie) __iounmap(cookie)
|
||||||
#else
|
#else
|
||||||
#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0,1)
|
#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0)
|
||||||
#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0,1)
|
#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0)
|
||||||
#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE,1)
|
#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE)
|
||||||
#define iounmap(cookie) __arch_iounmap(cookie)
|
#define iounmap(cookie) __arch_iounmap(cookie)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,6 @@ static inline void set_fs (mm_segment_t fs)
|
||||||
extern int __get_user_1(void *);
|
extern int __get_user_1(void *);
|
||||||
extern int __get_user_2(void *);
|
extern int __get_user_2(void *);
|
||||||
extern int __get_user_4(void *);
|
extern int __get_user_4(void *);
|
||||||
extern int __get_user_8(void *);
|
|
||||||
extern int __get_user_bad(void);
|
extern int __get_user_bad(void);
|
||||||
|
|
||||||
#define __get_user_x(__r2,__p,__e,__s,__i...) \
|
#define __get_user_x(__r2,__p,__e,__s,__i...) \
|
||||||
|
@ -114,7 +113,7 @@ extern int __get_user_bad(void);
|
||||||
#define get_user(x,p) \
|
#define get_user(x,p) \
|
||||||
({ \
|
({ \
|
||||||
const register typeof(*(p)) __user *__p asm("r0") = (p);\
|
const register typeof(*(p)) __user *__p asm("r0") = (p);\
|
||||||
register typeof(*(p)) __r2 asm("r2"); \
|
register unsigned int __r2 asm("r2"); \
|
||||||
register int __e asm("r0"); \
|
register int __e asm("r0"); \
|
||||||
switch (sizeof(*(__p))) { \
|
switch (sizeof(*(__p))) { \
|
||||||
case 1: \
|
case 1: \
|
||||||
|
@ -126,12 +125,9 @@ extern int __get_user_bad(void);
|
||||||
case 4: \
|
case 4: \
|
||||||
__get_user_x(__r2, __p, __e, 4, "lr"); \
|
__get_user_x(__r2, __p, __e, 4, "lr"); \
|
||||||
break; \
|
break; \
|
||||||
case 8: \
|
|
||||||
__get_user_x(__r2, __p, __e, 8, "lr"); \
|
|
||||||
break; \
|
|
||||||
default: __e = __get_user_bad(); break; \
|
default: __e = __get_user_bad(); break; \
|
||||||
} \
|
} \
|
||||||
x = __r2; \
|
x = (typeof(*(p))) __r2; \
|
||||||
__e; \
|
__e; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue