linux/arch/mips/kernel/head.S

259 lines
6.5 KiB
ArmAsm
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995 Waldorf Electronics
* Written by Ralf Baechle and Andreas Busse
* Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle
* Copyright (C) 1996 Paul M. Antoine
* Modified for DECStation and hence R3000 support by Paul M. Antoine
* Further modifications by David S. Miller and Harald Koerfgen
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/threads.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/page.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <kernel-entry-init.h>
.macro ARC64_TWIDDLE_PC
#if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL)
/* We get launched at a XKPHYS address but the kernel is linked to
run at a KSEG0 address, so jump there. */
PTR_LA t0, \@f
jr t0
\@:
#endif
.endm
/*
* inputs are the text nasid in t1, data nasid in t2.
*/
.macro MAPPED_KERNEL_SETUP_TLB
#ifdef CONFIG_MAPPED_KERNEL
/*
* This needs to read the nasid - assume 0 for now.
* Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0,
* 0+DVG in tlblo_1.
*/
dli t0, 0xffffffffc0000000
dmtc0 t0, CP0_ENTRYHI
li t0, 0x1c000 # Offset of text into node memory
dsll t1, NASID_SHFT # Shift text nasid into place
dsll t2, NASID_SHFT # Same for data nasid
or t1, t1, t0 # Physical load address of kernel text
or t2, t2, t0 # Physical load address of kernel data
dsrl t1, 12 # 4K pfn
dsrl t2, 12 # 4K pfn
dsll t1, 6 # Get pfn into place
dsll t2, 6 # Get pfn into place
li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6)
or t0, t0, t1
mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr
li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6)
or t0, t0, t2
mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr
li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M
mtc0 t0, CP0_PAGEMASK
li t0, 0 # KMAP_INX
mtc0 t0, CP0_INDEX
li t0, 1
mtc0 t0, CP0_WIRED
tlbwi
#else
mtc0 zero, CP0_WIRED
#endif
.endm
/*
* For the moment disable interrupts, mark the kernel mode and
* set ST0_KX so that the CPU does not spit fire when using
* 64-bit addresses. A full initialization of the CPU's status
* register is done later in per_cpu_trap_init().
*/
.macro setup_c0_status set clr
.set push
#ifdef CONFIG_MIPS_MT_SMTC
/*
* For SMTC, we need to set privilege and disable interrupts only for
* the current TC, using the TCStatus register.
*/
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU, leave IXMT */
xori t0, 0x00001800
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
xor t0, ST0_EXL | ST0_ERL | \clr
mtc0 t0, CP0_STATUS
#else
mfc0 t0, CP0_STATUS
or t0, ST0_CU0|\set|0x1f|\clr
xor t0, 0x1f|\clr
mtc0 t0, CP0_STATUS
.set noreorder
sll zero,3 # ehb
#endif
.set pop
.endm
.macro setup_c0_status_pri
#ifdef CONFIG_64BIT
setup_c0_status ST0_KX 0
#else
setup_c0_status 0 0
#endif
.endm
.macro setup_c0_status_sec
#ifdef CONFIG_64BIT
setup_c0_status ST0_KX ST0_BEV
#else
setup_c0_status 0 ST0_BEV
#endif
.endm
/*
* Reserved space for exception handlers.
* Necessary for machines which link their kernels at KSEG0.
*/
.fill 0x400
EXPORT(stext) # used for profiling
EXPORT(_stext)
#ifdef CONFIG_MIPS_SIM
/*
* Give us a fighting chance of running if execution beings at the
* kernel load address. This is needed because this platform does
* not have a ELF loader yet.
*/
j kernel_entry
#endif
__INIT
NESTED(kernel_entry, 16, sp) # kernel entry point
kernel_entry_setup # cpu specific setup
setup_c0_status_pri
ARC64_TWIDDLE_PC
#ifdef CONFIG_MIPS_MT_SMTC
/*
* In SMTC kernel, "CLI" is thread-specific, in TCStatus.
* We still need to enable interrupts globally in Status,
* and clear EXL/ERL.
*
* TCContext is used to track interrupt levels under
* service in SMTC kernel. Clear for boot TC before
* allowing any interrupts.
*/
mtc0 zero, CP0_TCCONTEXT
mfc0 t0, CP0_STATUS
ori t0, t0, 0xff1f
xori t0, t0, 0x001e
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
1:
PTR_ADDIU t0, LONGSIZE
LONG_S zero, (t0)
bne t0, t1, 1b
LONG_S a0, fw_arg0 # firmware arguments
LONG_S a1, fw_arg1
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
PTR_LI sp, _THREAD_SIZE - 32
PTR_ADDU sp, $28
set_saved_sp sp, t0, t1
PTR_SUBU sp, 4 * SZREG # init stack pointer
j start_kernel
END(kernel_entry)
#ifdef CONFIG_QEMU
__INIT
#endif
#ifdef CONFIG_SMP
/*
* SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Read-modify-writes of Status must be atomic, and this
* is one case where CLI is invoked without EXL being
* necessarily set. The CLI and setup_c0_status will
* in fact be redundant for all but the first TC of
* each VPE being booted.
*/
DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
setup_c0_status_sec
smp_slave_setup
#ifdef CONFIG_MIPS_MT_SMTC
andi t2, t2, VPECONTROL_TE
beqz t2, 2f
EMT # emt
2:
#endif /* CONFIG_MIPS_MT_SMTC */
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */
__FINIT
.comm kernelsp, NR_CPUS * 8, 8
.comm pgd_current, NR_CPUS * 8, 8
.comm fw_arg0, SZREG, SZREG # firmware arguments
.comm fw_arg1, SZREG, SZREG
.comm fw_arg2, SZREG, SZREG
.comm fw_arg3, SZREG, SZREG
.macro page name, order
.comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
.endm
/*
* On 64-bit we've got three-level pagetables with a slightly
* different layout ...
*/
page swapper_pg_dir, _PGD_ORDER
#ifdef CONFIG_64BIT
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n This is a patch to load 64-bit modules to CKSEG0 so that can be compiled with -msym32 option. This makes each module ~10% smaller. * introduce MODULE_START and MODULE_END * custom module_alloc() * PGD for modules * change XTLB refill handler synthesizer * enable -msym32 for modules again (revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a) New XTLB refill handler looks like this: 80000080 dmfc0 k0,C0_BADVADDR 80000084 bltz k0,800000e4 # goto l_module_alloc 80000088 lui k1,0x8046 # %high(pgd_current) 8000008c ld k1,24600(k1) # %low(pgd_current) 80000090 dsrl k0,k0,0x1b # l_vmalloc_done: 80000094 andi k0,k0,0x1ff8 80000098 daddu k1,k1,k0 8000009c dmfc0 k0,C0_BADVADDR 800000a0 ld k1,0(k1) 800000a4 dsrl k0,k0,0x12 800000a8 andi k0,k0,0xff8 800000ac daddu k1,k1,k0 800000b0 dmfc0 k0,C0_XCONTEXT 800000b4 ld k1,0(k1) 800000b8 andi k0,k0,0xff0 800000bc daddu k1,k1,k0 800000c0 ld k0,0(k1) 800000c4 ld k1,8(k1) 800000c8 dsrl k0,k0,0x6 800000cc mtc0 k0,C0_ENTRYLO0 800000d0 dsrl k1,k1,0x6 800000d4 mtc0 k1,C0_ENTRYL01 800000d8 nop 800000dc tlbwr 800000e0 eret 800000e4 dsll k1,k0,0x2 # l_module_alloc: 800000e8 bgez k1,80000008 # goto l_vmalloc 800000ec lui k1,0xc000 800000f0 dsubu k0,k0,k1 800000f4 lui k1,0x8046 # %high(module_pg_dir) 800000f8 beq zero,zero,80000000 800000fc nop 80000000 beq zero,zero,80000090 # goto l_vmalloc_done 80000004 daddiu k1,k1,0x4000 80000008 dsll32 k1,k1,0x0 # l_vmalloc: 8000000c dsubu k0,k0,k1 80000010 beq zero,zero,80000090 # goto l_vmalloc_done 80000014 lui k1,0x8046 # %high(swapper_pg_dir) Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-25 23:08:31 +08:00
#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
page module_pg_dir, _PGD_ORDER
#endif
page invalid_pmd_table, _PMD_ORDER
#endif
page invalid_pte_table, _PTE_ORDER