linux_old1/arch/mips/mm/init.c

489 lines
12 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
#include <linux/hardirq.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/gfp.h>
#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/dma.h>
#include <asm/kmap_types.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
/* Atomicity and interruptability */
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#define ENTER_CRITICAL(flags) \
{ \
unsigned int mvpflags; \
local_irq_save(flags);\
mvpflags = dvpe()
#define EXIT_CRITICAL(flags) \
evpe(mvpflags); \
local_irq_restore(flags); \
}
#else
#define ENTER_CRITICAL(flags) local_irq_save(flags)
#define EXIT_CRITICAL(flags) local_irq_restore(flags)
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
*/
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL_GPL(empty_zero_page);
/*
* Not static inline because used by IP27 special magic initialization code
*/
unsigned long setup_zero_pages(void)
{
unsigned int order;
unsigned long size;
struct page *page;
if (cpu_has_vce)
order = 3;
else
order = 0;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
SetPageReserved(page);
page++;
}
size = PAGE_SIZE << order;
zero_page_mask = (size - 1) & PAGE_MASK;
return 1UL << order;
}
#ifdef CONFIG_MIPS_MT_SMTC
static pte_t *kmap_coherent_pte;
static void __init kmap_coherent_init(void)
{
unsigned long vaddr;
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
}
#else
static inline void kmap_coherent_init(void) {}
#endif
void *kmap_coherent(struct page *page, unsigned long addr)
{
enum fixed_addresses idx;
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
int tlbidx;
BUG_ON(Page_dcache_dirty(page));
inc_preempt_count();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
#ifdef CONFIG_MIPS_MT_SMTC
idx += FIX_N_COLOURS * smp_processor_id() +
(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
#else
idx += in_interrupt() ? FIX_N_COLOURS : 0;
#endif
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, PAGE_KERNEL);
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
ENTER_CRITICAL(flags);
old_ctx = read_c0_entryhi();
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
#ifdef CONFIG_MIPS_MT_SMTC
set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
/* preload TLB instead of local_flush_tlb_one() */
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
tlbidx = read_c0_index();
mtc0_tlbw_hazard();
if (tlbidx < 0)
tlb_write_random();
else
tlb_write_indexed();
#else
tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
tlb_write_indexed();
#endif
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags);
return (void*) vaddr;
}
#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
void kunmap_coherent(void)
{
#ifndef CONFIG_MIPS_MT_SMTC
unsigned int wired;
unsigned long flags, old_ctx;
ENTER_CRITICAL(flags);
old_ctx = read_c0_entryhi();
wired = read_c0_wired() - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags);
#endif
dec_preempt_count();
preempt_check_resched();
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *vfrom, *vto;
vto = kmap_atomic(to);
if (cpu_has_dc_aliases &&
page_mapped(from) && !Page_dcache_dirty(from)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
} else {
vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
}
if ((!cpu_has_ic_fills_f_dc) ||
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
flush_data_cache_page((unsigned long)vto);
kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
if (cpu_has_dc_aliases &&
page_mapped(page) && !Page_dcache_dirty(page)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
flush_cache_page(vma, vaddr, page_to_pfn(page));
}
void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
if (cpu_has_dc_aliases &&
page_mapped(page) && !Page_dcache_dirty(page)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
}
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i, j, k;
unsigned long vaddr;
vaddr = start;
i = __pgd_offset(vaddr);
j = __pud_offset(vaddr);
k = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
vaddr += PMD_SIZE;
}
k = 0;
}
j = 0;
}
#endif
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
int page_is_ram(unsigned long pagenr)
{
int i;
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long addr, end;
MIPS: Handle initmem in systems with kernel not in add_memory_region() mem This patch addresses a couple of related problems: 1) The kernel may reside in physical memory outside of the ranges set by plat_mem_setup(). If this is the case, init mem cannot be reused as it resides outside of the range of pages that the kernel memory allocators control. 2) initrd images might be loaded in physical memory outside of the ranges set by plat_mem_setup(). The memory likewise cannot be reused. The patch doesn't handle this specific case, but the infrastructure is useful for future patches that do. The crux of the problem is that there are memory regions that need be memory_present(), but that cannot be free_bootmem() at the time of arch_mem_init(). We create a new type of memory (BOOT_MEM_INIT_RAM) for use with add_memory_region(). Then arch_mem_init() adds the init mem with this type if the init mem is not already covered by existing ranges. When memory is being freed into the bootmem allocator, we skip the BOOT_MEM_INIT_RAM ranges so they are not clobbered, but we do signal them as memory_present(). This way when they are later freed, the necessary memory manager structures have initialized and the Sparse allocater is prevented from crashing. The Octeon specific code that handled this case is removed, because the new general purpose code handles the case. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1988/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2011-11-22 22:38:03 +08:00
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
case BOOT_MEM_INIT_RAM:
break;
default:
/* not usable memory */
continue;
MIPS: Handle initmem in systems with kernel not in add_memory_region() mem This patch addresses a couple of related problems: 1) The kernel may reside in physical memory outside of the ranges set by plat_mem_setup(). If this is the case, init mem cannot be reused as it resides outside of the range of pages that the kernel memory allocators control. 2) initrd images might be loaded in physical memory outside of the ranges set by plat_mem_setup(). The memory likewise cannot be reused. The patch doesn't handle this specific case, but the infrastructure is useful for future patches that do. The crux of the problem is that there are memory regions that need be memory_present(), but that cannot be free_bootmem() at the time of arch_mem_init(). We create a new type of memory (BOOT_MEM_INIT_RAM) for use with add_memory_region(). Then arch_mem_init() adds the init mem with this type if the init mem is not already covered by existing ranges. When memory is being freed into the bootmem allocator, we skip the BOOT_MEM_INIT_RAM ranges so they are not clobbered, but we do signal them as memory_present(). This way when they are later freed, the necessary memory manager structures have initialized and the Sparse allocater is prevented from crashing. The Octeon specific code that handled this case is removed, because the new general purpose code handles the case. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1988/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2011-11-22 22:38:03 +08:00
}
addr = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr +
boot_mem_map.map[i].size);
if (pagenr >= addr && pagenr < end)
return 1;
}
return 0;
}
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long lastpfn __maybe_unused;
pagetable_init();
#ifdef CONFIG_HIGHMEM
kmap_init();
#endif
kmap_coherent_init();
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
lastpfn = max_low_pfn;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
lastpfn = highend_pfn;
if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
lastpfn = max_low_pfn;
}
#endif
free_area_init_nodes(max_zone_pfns);
}
#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif
void __init mem_init(void)
{
unsigned long codesize, reservedpages, datasize, initsize;
unsigned long tmp, ram;
#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_DISCONTIGMEM
#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
#endif
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = ram = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
MIPS: Handle initmem in systems with kernel not in add_memory_region() mem This patch addresses a couple of related problems: 1) The kernel may reside in physical memory outside of the ranges set by plat_mem_setup(). If this is the case, init mem cannot be reused as it resides outside of the range of pages that the kernel memory allocators control. 2) initrd images might be loaded in physical memory outside of the ranges set by plat_mem_setup(). The memory likewise cannot be reused. The patch doesn't handle this specific case, but the infrastructure is useful for future patches that do. The crux of the problem is that there are memory regions that need be memory_present(), but that cannot be free_bootmem() at the time of arch_mem_init(). We create a new type of memory (BOOT_MEM_INIT_RAM) for use with add_memory_region(). Then arch_mem_init() adds the init mem with this type if the init mem is not already covered by existing ranges. When memory is being freed into the bootmem allocator, we skip the BOOT_MEM_INIT_RAM ranges so they are not clobbered, but we do signal them as memory_present(). This way when they are later freed, the necessary memory manager structures have initialized and the Sparse allocater is prevented from crashing. The Octeon specific code that handled this case is removed, because the new general purpose code handles the case. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1988/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2011-11-22 22:38:03 +08:00
if (page_is_ram(tmp) && pfn_valid(tmp)) {
ram++;
if (PageReserved(pfn_to_page(tmp)))
reservedpages++;
}
num_physpages = ram;
#ifdef CONFIG_HIGHMEM
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = pfn_to_page(tmp);
if (!page_is_ram(tmp)) {
SetPageReserved(page);
continue;
}
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
totalram_pages += totalhigh_pages;
num_physpages += totalhigh_pages;
#endif
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
/* The -4 is a hack so that user tools don't have to handle
the overflow. */
kclist_add(&kcore_kseg0, (void *) CKSEG0,
0x80000000 - 4, KCORE_TEXT);
#endif
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
nr_free_pages() << (PAGE_SHIFT-10),
ram << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10,
totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
unsigned long pfn;
for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
struct page *page = pfn_to_page(pfn);
void *addr = phys_to_virt(PFN_PHYS(pfn));
ClearPageReserved(page);
init_page_count(page);
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
__free_page(page);
totalram_pages++;
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_init_pages("initrd memory",
virt_to_phys((void *)start),
virt_to_phys((void *)end));
}
#endif
void __init_refok free_initmem(void)
{
prom_free_prom_memory();
free_init_pages("unused kernel memory",
__pa_symbol(&__init_begin),
__pa_symbol(&__init_end));
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
#endif
/*
* gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
* are constants. So we use the variants from asm-offset.h until that gcc
* will officially be retired.
*
* Align swapper_pg_dir in to 64K, allows its address to be loaded
* with a single LUI instruction in the TLB handlers. If we used
* __aligned(64K), its size would get rounded up to the alignment
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;