linux_old1/arch/arm/mm/mm.h

95 lines
2.3 KiB
C
Raw Normal View History

#ifdef CONFIG_MMU
ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area In current implementation, we used ARM-specific flag, that is, VM_ARM_STATIC_MAPPING, for distinguishing ARM specific static mapped area. The purpose of static mapped area is to re-use static mapped area when entire physical address range of the ioremap request can be covered by this area. This implementation causes needless overhead for some cases. For example, assume that there is only one static mapped area and vmlist has 300 areas. Every time we call ioremap, we check 300 areas for deciding whether it is matched or not. Moreover, even if there is no static mapped area and vmlist has 300 areas, every time we call ioremap, we check 300 areas in now. If we construct a extra list for static mapped area, we can eliminate above mentioned overhead. With a extra list, if there is one static mapped area, we just check only one area and proceed next operation quickly. In fact, it is not a critical problem, because ioremap is not frequently used. But reducing overhead is better idea. Another reason for doing this work is for removing architecture dependency on vmalloc layer. I think that vmlist and vmlist_lock is internal data structure for vmalloc layer. Some codes for debugging and stat inevitably use vmlist and vmlist_lock. But it is preferable that they are used as least as possible in outside of vmalloc.c Now, I introduce an ARM-specific infrastructure for static mapped area. In the following patch, we will use this and resolve above mentioned problem. Reviewed-by: Nicolas Pitre <nico@linaro.org> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-02-09 13:28:05 +08:00
#include <linux/list.h>
#include <linux/vmalloc.h>
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
/*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently, while 0xffff4000
* is reserved for VIPT aliasing flushing by generic code.
*
* Note that we don't allow VIPT aliasing caches with SMP.
*/
#define COPYPAGE_MINICACHE 0xffff8000
#define COPYPAGE_V6_FROM 0xffff8000
#define COPYPAGE_V6_TO 0xffffc000
/* PFN alias flushing, for VIPT caches */
#define FLUSH_ALIAS_START 0xffff4000
static inline void set_top_pte(unsigned long va, pte_t pte)
{
pte_t *ptep = pte_offset_kernel(top_pmd, va);
set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(va);
}
static inline pte_t get_top_pte(unsigned long va)
{
pte_t *ptep = pte_offset_kernel(top_pmd, va);
return *ptep;
}
static inline pmd_t *pmd_off_k(unsigned long virt)
{
return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
}
struct mem_type {
pteval_t prot_pte;
pmdval_t prot_l1;
pmdval_t prot_sect;
unsigned int domain;
};
const struct mem_type *get_mem_type(unsigned int type);
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
/*
* ARM specific vm_struct->flags bits.
*/
/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
#define VM_ARM_SECTION_MAPPING 0x80000000
/* permanent static mappings from iotable_init() */
#define VM_ARM_STATIC_MAPPING 0x40000000
ARM: Fix ioremap() of address zero Murali Nalajala reports a regression that ioremapping address zero results in an oops dump: Unable to handle kernel paging request at virtual address fa200000 pgd = d4f80000 [fa200000] *pgd=00000000 Internal error: Oops: 5 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 Tainted: G W (3.4.0-g3b5f728-00009-g638207a #13) PC is at msm_pm_config_rst_vector_before_pc+0x8/0x30 LR is at msm_pm_boot_config_before_pc+0x18/0x20 pc : [<c0078f84>] lr : [<c007903c>] psr: a0000093 sp : c0837ef0 ip : cfe00000 fp : 0000000d r10: da7efc17 r9 : 225c4278 r8 : 00000006 r7 : 0003c000 r6 : c085c824 r5 : 00000001 r4 : fa101000 r3 : fa200000 r2 : c095080c r1 : 002250fc r0 : 00000000 Flags: NzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c5387d Table: 25180059 DAC: 00000015 [<c0078f84>] (msm_pm_config_rst_vector_before_pc+0x8/0x30) from [<c007903c>] (msm_pm_boot_config_before_pc+0x18/0x20) [<c007903c>] (msm_pm_boot_config_before_pc+0x18/0x20) from [<c007a55c>] (msm_pm_power_collapse+0x410/0xb04) [<c007a55c>] (msm_pm_power_collapse+0x410/0xb04) from [<c007b17c>] (arch_idle+0x294/0x3e0) [<c007b17c>] (arch_idle+0x294/0x3e0) from [<c000eed8>] (default_idle+0x18/0x2c) [<c000eed8>] (default_idle+0x18/0x2c) from [<c000f254>] (cpu_idle+0x90/0xe4) [<c000f254>] (cpu_idle+0x90/0xe4) from [<c057231c>] (rest_init+0x88/0xa0) [<c057231c>] (rest_init+0x88/0xa0) from [<c07ff890>] (start_kernel+0x3a8/0x40c) Code: c0704256 e12fff1e e59f2020 e5923000 (e5930000) This is caused by the 'reserved' entries which we insert (see 19b52abe3c5d7 - ARM: 7438/1: fill possible PMD empty section gaps) which get matched for physical address zero. Resolve this by marking these reserved entries with a different flag. Cc: <stable@vger.kernel.org> Tested-by: Murali Nalajala <mnalajal@codeaurora.org> Acked-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2012-08-25 16:03:15 +08:00
/* empty mapping */
#define VM_ARM_EMPTY_MAPPING 0x20000000
/* mapping type (attributes) for permanent static mappings */
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
/* consistent regions used by dma_alloc_attrs() */
#define VM_ARM_DMA_CONSISTENT 0x20000000
ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area In current implementation, we used ARM-specific flag, that is, VM_ARM_STATIC_MAPPING, for distinguishing ARM specific static mapped area. The purpose of static mapped area is to re-use static mapped area when entire physical address range of the ioremap request can be covered by this area. This implementation causes needless overhead for some cases. For example, assume that there is only one static mapped area and vmlist has 300 areas. Every time we call ioremap, we check 300 areas for deciding whether it is matched or not. Moreover, even if there is no static mapped area and vmlist has 300 areas, every time we call ioremap, we check 300 areas in now. If we construct a extra list for static mapped area, we can eliminate above mentioned overhead. With a extra list, if there is one static mapped area, we just check only one area and proceed next operation quickly. In fact, it is not a critical problem, because ioremap is not frequently used. But reducing overhead is better idea. Another reason for doing this work is for removing architecture dependency on vmalloc layer. I think that vmlist and vmlist_lock is internal data structure for vmalloc layer. Some codes for debugging and stat inevitably use vmlist and vmlist_lock. But it is preferable that they are used as least as possible in outside of vmalloc.c Now, I introduce an ARM-specific infrastructure for static mapped area. In the following patch, we will use this and resolve above mentioned problem. Reviewed-by: Nicolas Pitre <nico@linaro.org> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-02-09 13:28:05 +08:00
struct static_vm {
struct vm_struct vm;
struct list_head list;
};
extern struct list_head static_vmlist;
extern struct static_vm *find_static_vm_vaddr(void *vaddr);
extern __init void add_static_vm_early(struct static_vm *svm);
#endif
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
extern unsigned long arm_dma_pfn_limit;
#else
#define arm_dma_limit ((phys_addr_t)~0)
#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
#endif
extern phys_addr_t arm_lowmem_limit;
void __init bootmem_init(void);
void arm_mm_memblock_reserve(void);
void dma_contiguous_remap(void);