powerpc: Move most remaining ppc64 files over to arch/powerpc

Also deletes files in arch/ppc64 that are no longer used now that
we don't compile with ARCH=ppc64 any more.

Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Paul Mackerras 2005-11-14 17:30:17 +11:00
parent c55377ee73
commit 7568cb4ef6
29 changed files with 19 additions and 9886 deletions

View File

@ -932,6 +932,7 @@ source "arch/powerpc/oprofile/Kconfig"
config KPROBES config KPROBES
bool "Kprobes (EXPERIMENTAL)" bool "Kprobes (EXPERIMENTAL)"
depends on PPC64
help help
Kprobes allows you to trap at almost any kernel address and Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes execute a callback function. register_kprobe() establishes

View File

@ -49,12 +49,23 @@ extra-y += vmlinux.lds
obj-y += process.o init_task.o time.o \ obj-y += process.o init_task.o time.o \
prom.o traps.o setup-common.o prom.o traps.o setup-common.o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o
obj-$(CONFIG_PPC64) += misc_64.o obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
obj-$(CONFIG_PPC_OF) += prom_init.o obj-$(CONFIG_PPC_OF) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_6xx) += idle_6xx.o obj-$(CONFIG_6xx) += idle_6xx.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
module-$(CONFIG_PPC64) += module_64.o
obj-$(CONFIG_MODULES) += $(module-y)
pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
pci_direct_iommu.o iomap.o
obj-$(CONFIG_PCI) += $(pci64-y)
kexec64-$(CONFIG_PPC64) += machine_kexec_64.o
obj-$(CONFIG_KEXEC) += $(kexec64-y)
ifeq ($(CONFIG_PPC_ISERIES),y) ifeq ($(CONFIG_PPC_ISERIES),y)
$(obj)/head_64.o: $(obj)/lparmap.s $(obj)/head_64.o: $(obj)/lparmap.s
@ -62,11 +73,8 @@ AFLAGS_head_64.o += -I$(obj)
endif endif
else else
# stuff used from here for ARCH=ppc or ARCH=ppc64 # stuff used from here for ARCH=ppc
smpobj-$(CONFIG_SMP) += smp.o smpobj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
setup-common.o $(smpobj-y)
endif endif

View File

@ -5,3 +5,6 @@ obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_XICS) += xics.o obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh.o eeh_event.o obj-$(CONFIG_EEH) += eeh.o eeh_event.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o

View File

@ -1,520 +0,0 @@
#
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
#
config 64BIT
def_bool y
config MMU
bool
default y
config PPC_STD_MMU
def_bool y
config UID16
bool
config RWSEM_GENERIC_SPINLOCK
bool
config RWSEM_XCHGADD_ALGORITHM
bool
default y
config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_ISA_DMA
bool
default y
config EARLY_PRINTK
bool
default y
config COMPAT
bool
default y
config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
default y
config ARCH_MAY_HAVE_PC_FDC
bool
default y
config PPC_STD_MMU
bool
default y
# We optimistically allocate largepages from the VM, so make the limit
# large enough (16MB). This badly named config option is actually
# max order + 1
config FORCE_MAX_ZONEORDER
int
default "9" if PPC_64K_PAGES
default "13"
source "init/Kconfig"
config SYSVIPC_COMPAT
bool
depends on COMPAT && SYSVIPC
default y
menu "Platform support"
choice
prompt "Platform Type"
default PPC_MULTIPLATFORM
config PPC_ISERIES
bool "IBM Legacy iSeries"
config PPC_MULTIPLATFORM
bool "Generic"
endchoice
config PPC_PSERIES
depends on PPC_MULTIPLATFORM
bool " IBM pSeries & new iSeries"
default y
config PPC_BPA
bool " Broadband Processor Architecture"
depends on PPC_MULTIPLATFORM
config PPC_PMAC
depends on PPC_MULTIPLATFORM
bool " Apple G5 based machines"
default y
select U3_DART
select GENERIC_TBSYNC
config PPC_MAPLE
depends on PPC_MULTIPLATFORM
bool " Maple 970FX Evaluation Board"
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
default n
help
This option enables support for the Maple 970FX Evaluation Board.
For more informations, refer to <http://www.970eval.com>
config PPC
bool
default y
config PPC64
bool
default y
config PPC_OF
depends on PPC_MULTIPLATFORM
bool
default y
config XICS
depends on PPC_PSERIES
bool
default y
config MPIC
depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
bool
default y
config PPC_I8259
depends on PPC_PSERIES
bool
default y
config BPA_IIC
depends on PPC_BPA
bool
default y
# VMX is pSeries only for now until somebody writes the iSeries
# exception vectors for it
config ALTIVEC
bool "Support for VMX (Altivec) vector unit"
depends on PPC_MULTIPLATFORM
default y
config PPC_SPLPAR
depends on PPC_PSERIES
bool "Support for shared-processor logical partitions"
default n
help
Enabling this option will make the kernel run more efficiently
on logically-partitioned pSeries systems which use shared
processors, that is, which share physical processors between
two or more partitions.
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on PPC_MULTIPLATFORM && EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is indepedent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similiarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
initially work for you. It may help to enable device hotplugging
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
source "drivers/cpufreq/Kconfig"
config CPU_FREQ_PMAC64
bool "Support for some Apple G5s"
depends on CPU_FREQ && PMAC_SMU && PPC64
select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple iMac G5,
and some of the more recent desktop G5 machines as well.
config IBMVIO
depends on PPC_PSERIES || PPC_ISERIES
bool
default y
config U3_DART
bool
depends on PPC_MULTIPLATFORM
default n
config MPIC_BROKEN_U3
bool
depends on PPC_MAPLE
default y
config GENERIC_TBSYNC
def_bool n
config PPC_PMAC64
bool
depends on PPC_PMAC
default y
config BOOTX_TEXT
bool "Support for early boot text console"
depends PPC_OF
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires an Open Firmware compatible video card.
config POWER4
def_bool y
config PPC_FPU
def_bool y
config POWER4_ONLY
bool "Optimize for POWER4"
default n
---help---
Cause the compiler to optimize for POWER4 processors. The resulting
binary will not work on POWER3 or RS64 processors when compiled with
binutils 2.15 or later.
config IOMMU_VMERGE
bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
depends on EXPERIMENTAL
default n
help
Cause IO segments sent to a device for DMA to be merged virtually
by the IOMMU when they happen to have been allocated contiguously.
This doesn't add pressure to the IOMMU allocator. However, some
drivers don't support getting large merged segments coming back
from *_map_sg(). Say Y if you know the drivers you are using are
properly handling this case.
config SMP
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y.
If you say N here, the kernel will run on single and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on single-processor machines.
On a single-processor machine, the kernel will run faster if you say
N here.
If you don't know what to do here, say Y.
config NR_CPUS
int "Maximum number of CPUs (2-128)"
range 2 128
depends on SMP
default "32"
config HMT
bool "Hardware multithreading"
depends on SMP && PPC_PSERIES && BROKEN
help
This option enables hardware multithreading on RS64 cpus.
pSeries systems p620 and p660 have such a cpu type.
config NUMA
bool "NUMA support"
default y if SMP && PPC_PSERIES
config ARCH_SELECT_MEMORY_MODEL
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
depends on !NUMA
config ARCH_SPARSEMEM_ENABLE
def_bool y
config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on NUMA
source "mm/Kconfig"
config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not
# reside on that node.
#
# This is a relatively temporary hack that should
# be able to go away when sparsemem is fully in
# place
config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
config PPC_64K_PAGES
bool "64k page size"
help
This option changes the kernel logical page size to 64k. On machines
without processor support for 64k pages, the kernel will simulate
them by loading each individual 4k page on demand transparently,
while on hardware with such support, it will be used to map
normal application pages.
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on SMP
default off
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with POWER5 cpus at a cost of slightly increased
overhead in some places. If unsure say N here.
source "kernel/Kconfig.preempt"
source kernel/Kconfig.hz
config EEH
bool "PCI Extended Error Handling (EEH)" if EMBEDDED
depends on PPC_PSERIES
default y if !EMBEDDED
#
# Use the generic interrupt handling code in kernel/irq/:
#
config GENERIC_HARDIRQS
bool
default y
config PPC_RTAS
bool
depends on PPC_PSERIES || PPC_BPA
default y
config RTAS_ERROR_LOGGING
bool
depends on PPC_RTAS
default y
config RTAS_PROC
bool "Proc interface to RTAS"
depends on PPC_RTAS
default y
config RTAS_FLASH
tristate "Firmware flash interface"
depends on RTAS_PROC
config SCANLOG
tristate "Scanlog dump interface"
depends on RTAS_PROC && PPC_PSERIES
config LPARCFG
tristate "LPAR Configuration Data"
depends on PPC_PSERIES || PPC_ISERIES
help
Provide system capacity information via human readable
<key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
source "fs/Kconfig.binfmt"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
select HOTPLUG
---help---
Say Y here to be able to turn CPUs off and on.
Say N if you are unsure.
config PROC_DEVICETREE
bool "Support for Open Firmware device tree in /proc"
help
This option adds a device-tree directory under /proc which contains
an image of the device tree that the kernel copies from Open
Firmware. If unsure, say Y here.
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
depends on !PPC_ISERIES
config CMDLINE
string "Initial kernel command string"
depends on CMDLINE_BOOL
default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
help
On some platforms, there is currently no way for the boot loader to
pass arguments to the kernel. For these platforms, you can supply
some command-line options at build time by entering them here. In
most cases you will need to specify the root device here.
endmenu
config ISA_DMA_API
bool
default y
menu "Bus Options"
config ISA
bool
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. If you have an Apple machine, say N here; if you
have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
you have an embedded board, consult your board documentation.
config SBUS
bool
config MCA
bool
config EISA
bool
config PCI
bool "support for PCI devices" if (EMBEDDED && PPC_ISERIES)
default y
help
Find out whether your system includes a PCI bus. PCI is the name of
a bus system, i.e. the way the CPU talks to the other stuff inside
your box. If you say Y here, the kernel will include drivers and
infrastructure code to support PCI bus devices.
config PCI_DOMAINS
bool
default PCI
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
endmenu
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
menu "iSeries device drivers"
depends on PPC_ISERIES
config VIOCONS
tristate "iSeries Virtual Console Support"
config VIODASD
tristate "iSeries Virtual I/O disk support"
help
If you are running on an iSeries system and you want to use
virtual disks created and managed by OS/400, say Y.
config VIOCD
tristate "iSeries Virtual I/O CD support"
help
If you are running Linux on an IBM iSeries system and you want to
read a CD drive owned by OS/400, say Y here.
config VIOTAPE
tristate "iSeries Virtual Tape Support"
help
If you are running Linux on an iSeries system and you want Linux
to read and/or write a tape drive owned by OS/400, say Y here.
endmenu
config VIOPATH
bool
depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
default y
source "arch/powerpc/oprofile/Kconfig"
source "arch/ppc64/Kconfig.debug"
source "security/Kconfig"
config KEYS_COMPAT
bool
depends on COMPAT && KEYS
default y
source "crypto/Kconfig"
source "lib/Kconfig"

View File

@ -2,44 +2,6 @@
# Makefile for the linux ppc64 kernel. # Makefile for the linux ppc64 kernel.
# #
ifneq ($(CONFIG_PPC_MERGE),y) obj-y += idle.o align.o
EXTRA_CFLAGS += -mno-minimal-toc
extra-y := head.o vmlinux.lds
obj-y := misc.o prom.o
endif
obj-y += idle.o dma.o \
align.o \
iommu.o
pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
endif
obj-$(CONFIG_KEXEC) += machine_kexec.o
obj-$(CONFIG_MODULES) += module.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_MODULES) += ppc_ksyms.o
endif
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_BOOTX_TEXT) += btext.o
endif
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_KPROBES) += kprobes.o
ifneq ($(CONFIG_PPC_MERGE),y)
ifeq ($(CONFIG_PPC_ISERIES),y)
arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
AFLAGS_head.o += -Iarch/powerpc/kernel
endif
endif

View File

@ -1,195 +0,0 @@
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/hardirq.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/rtas.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/systemcfg.h>
#include <asm/compat.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
/* thread struct on stack */
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
/* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
#ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
#endif /* CONFIG_ALTIVEC */
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_64K_PAGES
DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
#endif
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
#endif /* CONFIG_HUGETLB_PAGE */
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
/* RTAS */
DEFINE(RTASBASE, offsetof(struct rtas_t, base));
DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
/* 288 = # of volatile regs, int & fp, for leaf routines */
/* which do not stack a frame. See the PPC64 ABI. */
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
/*
* Note: these symbols include _ because they overlap with special
* register names
*/
DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
/* About the CPU features table */
DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
/* systemcfg offsets for use by vdso */
DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
/* timeval/timezone offsets for use by vdso */
DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
return 0;
}

View File

@ -1,792 +0,0 @@
/*
* Procedures for drawing on the screen early on in the boot process.
*
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/prom.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/lmb.h>
#include <asm/processor.h>
#include <asm/udbg.h>
#undef NO_SCROLL
#ifndef NO_SCROLL
static void scrollscreen(void);
#endif
static void draw_byte(unsigned char c, long locX, long locY);
static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
static int g_loc_X;
static int g_loc_Y;
static int g_max_loc_X;
static int g_max_loc_Y;
static int dispDeviceRowBytes;
static int dispDeviceDepth;
static int dispDeviceRect[4];
static unsigned char *dispDeviceBase, *logicalDisplayBase;
unsigned long disp_BAT[2] __initdata = {0, 0};
#define cmapsz (16*256)
static unsigned char vga_font[cmapsz];
int boot_text_mapped;
int force_printk_to_btext = 0;
/* Here's a small text engine to use during early boot
* or for debugging purposes
*
* todo:
*
* - build some kind of vgacon with it to enable early printk
* - move to a separate file
* - add a few video driver hooks to keep in sync with display
* changes.
*/
void map_boot_text(void)
{
unsigned long base, offset, size;
unsigned char *vbase;
/* By default, we are no longer mapped */
boot_text_mapped = 0;
if (dispDeviceBase == 0)
return;
base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
offset = ((unsigned long) dispDeviceBase) - base;
size = dispDeviceRowBytes * dispDeviceRect[3] + offset
+ dispDeviceRect[0];
vbase = __ioremap(base, size, _PAGE_NO_CACHE);
if (vbase == 0)
return;
logicalDisplayBase = vbase + offset;
boot_text_mapped = 1;
}
int btext_initialize(struct device_node *np)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
u32 *prop;
prop = (u32 *)get_property(np, "width", NULL);
if (prop == NULL)
return -EINVAL;
width = *prop;
prop = (u32 *)get_property(np, "height", NULL);
if (prop == NULL)
return -EINVAL;
height = *prop;
prop = (u32 *)get_property(np, "depth", NULL);
if (prop == NULL)
return -EINVAL;
depth = *prop;
pitch = width * ((depth + 7) / 8);
prop = (u32 *)get_property(np, "linebytes", NULL);
if (prop)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
prop = (u32 *)get_property(np, "address", NULL);
if (prop)
address = *prop;
/* FIXME: Add support for PCI reg properties */
if (address == 0)
return -EINVAL;
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
logicalDisplayBase = (unsigned char *)address;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth;
dispDeviceRect[0] = dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
map_boot_text();
return 0;
}
static void btext_putc(unsigned char c)
{
btext_drawchar(c);
}
void __init init_boot_display(void)
{
char *name;
struct device_node *np = NULL;
int rc = -ENODEV;
printk("trying to initialize btext ...\n");
name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
if (name != NULL) {
np = of_find_node_by_path(name);
if (np != NULL) {
if (strcmp(np->type, "display") != 0) {
printk("boot stdout isn't a display !\n");
of_node_put(np);
np = NULL;
}
}
}
if (np)
rc = btext_initialize(np);
if (rc) {
for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
if (get_property(np, "linux,opened", NULL)) {
printk("trying %s ...\n", np->full_name);
rc = btext_initialize(np);
printk("result: %d\n", rc);
}
if (rc == 0)
break;
}
}
if (rc == 0 && udbg_putc == NULL)
udbg_putc = btext_putc;
}
/* Calc the base address of a given point (x,y) */
static unsigned char * calc_base(int x, int y)
{
unsigned char *base;
base = logicalDisplayBase;
if (base == 0)
base = dispDeviceBase;
base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
return base;
}
/* Adjust the display to a new resolution */
void btext_update_display(unsigned long phys, int width, int height,
int depth, int pitch)
{
if (dispDeviceBase == 0)
return;
/* check it's the same frame buffer (within 256MB) */
if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
return;
dispDeviceBase = (__u8 *) phys;
dispDeviceRect[0] = 0;
dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
dispDeviceDepth = depth;
dispDeviceRowBytes = pitch;
if (boot_text_mapped) {
iounmap(logicalDisplayBase);
boot_text_mapped = 0;
}
map_boot_text();
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
}
void btext_clearscreen(void)
{
unsigned long *base = (unsigned long *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 3;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
unsigned long *ptr = base;
for(j=width; j; --j)
*(ptr++) = 0;
base += (dispDeviceRowBytes >> 3);
}
}
#ifndef NO_SCROLL
static void scrollscreen(void)
{
unsigned long *src = (unsigned long *)calc_base(0,16);
unsigned long *dst = (unsigned long *)calc_base(0,0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 3;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
{
unsigned long *src_ptr = src;
unsigned long *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = *(src_ptr++);
src += (dispDeviceRowBytes >> 3);
dst += (dispDeviceRowBytes >> 3);
}
for (i=0; i<16; i++)
{
unsigned long *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = 0;
dst += (dispDeviceRowBytes >> 3);
}
}
#endif /* ndef NO_SCROLL */
void btext_drawchar(char c)
{
int cline = 0;
#ifdef NO_SCROLL
int x;
#endif
if (!boot_text_mapped)
return;
switch (c) {
case '\b':
if (g_loc_X > 0)
--g_loc_X;
break;
case '\t':
g_loc_X = (g_loc_X & -8) + 8;
break;
case '\r':
g_loc_X = 0;
break;
case '\n':
g_loc_X = 0;
g_loc_Y++;
cline = 1;
break;
default:
draw_byte(c, g_loc_X++, g_loc_Y);
}
if (g_loc_X >= g_max_loc_X) {
g_loc_X = 0;
g_loc_Y++;
cline = 1;
}
#ifndef NO_SCROLL
while (g_loc_Y >= g_max_loc_Y) {
scrollscreen();
g_loc_Y--;
}
#else
/* wrap around from bottom to top of screen so we don't
waste time scrolling each line. -- paulus. */
if (g_loc_Y >= g_max_loc_Y)
g_loc_Y = 0;
if (cline) {
for (x = 0; x < g_max_loc_X; ++x)
draw_byte(' ', x, g_loc_Y);
}
#endif
}
void btext_drawstring(const char *c)
{
if (!boot_text_mapped)
return;
while (*c)
btext_drawchar(*c++);
}
void btext_drawhex(unsigned long v)
{
char *hex_table = "0123456789abcdef";
if (!boot_text_mapped)
return;
btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 8) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 4) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 0) & 0x0000000FUL]);
btext_drawchar(' ');
}
static void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
unsigned char *font = &vga_font[((unsigned int)c) * 16];
int rb = dispDeviceRowBytes;
switch(dispDeviceDepth) {
case 24:
case 32:
draw_byte_32(font, (unsigned int *)base, rb);
break;
case 15:
case 16:
draw_byte_16(font, (unsigned int *)base, rb);
break;
case 8:
draw_byte_8(font, (unsigned int *)base, rb);
break;
}
}
static unsigned int expand_bits_8[16] = {
0x00000000,
0x000000ff,
0x0000ff00,
0x0000ffff,
0x00ff0000,
0x00ff00ff,
0x00ffff00,
0x00ffffff,
0xff000000,
0xff0000ff,
0xff00ff00,
0xff00ffff,
0xffff0000,
0xffff00ff,
0xffffff00,
0xffffffff
};
static unsigned int expand_bits_16[4] = {
0x00000000,
0x0000ffff,
0xffff0000,
0xffffffff
};
static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (-(bits >> 7) & fg) ^ bg;
base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
base[7] = (-(bits & 1) & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_16;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 6] & fg) ^ bg;
base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
base[3] = (eb[bits & 3] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_8;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 4] & fg) ^ bg;
base[1] = (eb[bits & 0xf] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static unsigned char vga_font[cmapsz] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,940 +0,0 @@
/*
* arch/ppc/kernel/misc.S
*
*
*
* This file contains miscellaneous low-level functions.
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/sys.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
.text
/*
* Returns (address we were linked at) - (address we are running at)
* for use before the text and data are mapped to KERNELBASE.
*/
_GLOBAL(reloc_offset)
mflr r0
bl 1f
1: mflr r3
LOADADDR(r4,1b)
sub r3,r4,r3
mtlr r0
blr
_GLOBAL(get_msr)
mfmsr r3
blr
_GLOBAL(get_dar)
mfdar r3
blr
_GLOBAL(get_srr0)
mfsrr0 r3
blr
_GLOBAL(get_srr1)
mfsrr1 r3
blr
_GLOBAL(get_sp)
mr r3,r1
blr
#ifdef CONFIG_IRQSTACKS
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-112(r3)
mr r1,r3
bl .__do_softirq
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
blr
_GLOBAL(call___do_IRQ)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-112(r5)
mr r1,r5
bl .__do_IRQ
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
blr
#endif /* CONFIG_IRQSTACKS */
/*
* To be called by C code which needs to do some operations with MMU
* disabled. Note that interrupts have to be disabled by the caller
* prior to calling us. The code called _MUST_ be in the RMO of course
* and part of the linear mapping as we don't attempt to translate the
* stack pointer at all. The function is called with the stack switched
* to this CPU emergency stack
*
* prototype is void *call_with_mmu_off(void *func, void *data);
*
* the called function is expected to be of the form
*
* void *called(void *data);
*/
_GLOBAL(call_with_mmu_off)
mflr r0 /* get link, save it on stackframe */
std r0,16(r1)
mr r1,r5 /* save old stack ptr */
ld r1,PACAEMERGSP(r13) /* get emerg. stack */
subi r1,r1,STACK_FRAME_OVERHEAD
std r0,16(r1) /* save link on emerg. stack */
std r5,0(r1) /* save old stack ptr in backchain */
ld r3,0(r3) /* get to real function ptr (assume same TOC) */
bl 2f /* we need LR to return, continue at label 2 */
ld r0,16(r1) /* we return here from the call, get LR and */
ld r1,0(r1) /* .. old stack ptr */
mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
mfmsr r4
ori r4,r4,MSR_IR|MSR_DR
mtspr SPRN_SRR1,r4
rfid
2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
mr r3,r4 /* get parameter */
mfmsr r0
ori r0,r0,MSR_IR|MSR_DR
xori r0,r0,MSR_IR|MSR_DR
mtspr SPRN_SRR1,r0
rfid
.section ".toc","aw"
PPC64_CACHES:
.tc ppc64_caches[TC],ppc64_caches
.section ".text"
/*
* Write any modified data cache blocks out to memory
* and invalidate the corresponding instruction cache blocks.
*
* flush_icache_range(unsigned long start, unsigned long stop)
*
* flush all bytes from start through stop-1 inclusive
*/
_KPROBE(__flush_icache_range)
/*
* Flush the data cache to memory
*
* Different systems have different cache line sizes
* and in some cases i-cache and d-cache line sizes differ from
* each other.
*/
ld r10,PPC64_CACHES@toc(r2)
lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mtctr r8
1: dcbst 0,r6
add r6,r6,r7
bdnz 1b
sync
/* Now invalidate the instruction cache */
lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5
lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mtctr r8
2: icbi 0,r6
add r6,r6,r7
bdnz 2b
isync
blr
.text
/*
* Like above, but only do the D-cache.
*
* flush_dcache_range(unsigned long start, unsigned long stop)
*
* flush all bytes from start to stop-1 inclusive
*/
_GLOBAL(flush_dcache_range)
/*
* Flush the data cache to memory
*
* Different systems have different cache line sizes
*/
ld r10,PPC64_CACHES@toc(r2)
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mtctr r8
0: dcbst 0,r6
add r6,r6,r7
bdnz 0b
sync
blr
/*
* Like above, but works on non-mapped physical addresses.
* Use only for non-LPAR setups ! It also assumes real mode
* is cacheable. Used for flushing out the DART before using
* it as uncacheable memory
*
* flush_dcache_phys_range(unsigned long start, unsigned long stop)
*
* flush all bytes from start to stop-1 inclusive
*/
_GLOBAL(flush_dcache_phys_range)
ld r10,PPC64_CACHES@toc(r2)
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mfmsr r5 /* Disable MMU Data Relocation */
ori r0,r5,MSR_DR
xori r0,r0,MSR_DR
sync
mtmsr r0
sync
isync
mtctr r8
0: dcbst 0,r6
add r6,r6,r7
bdnz 0b
sync
isync
mtmsr r5 /* Re-enable MMU Data Relocation */
sync
isync
blr
_GLOBAL(flush_inval_dcache_range)
ld r10,PPC64_CACHES@toc(r2)
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
sync
isync
mtctr r8
0: dcbf 0,r6
add r6,r6,r7
bdnz 0b
sync
isync
blr
/*
* Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*
* snoop from the data cache.
*
* void __flush_dcache_icache(void *page)
*/
_GLOBAL(__flush_dcache_icache)
/*
* Flush the data cache to memory
*
* Different systems have different cache line sizes
*/
/* Flush the dcache */
ld r7,PPC64_CACHES@toc(r2)
clrrdi r3,r3,PAGE_SHIFT /* Page align */
lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
mr r6,r3
mtctr r4
0: dcbst 0,r6
add r6,r6,r5
bdnz 0b
sync
/* Now invalidate the icache */
lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
mtctr r4
1: icbi 0,r3
add r3,r3,r5
bdnz 1b
isync
blr
/*
* I/O string operations
*
* insb(port, buf, len)
* outsb(port, buf, len)
* insw(port, buf, len)
* outsw(port, buf, len)
* insl(port, buf, len)
* outsl(port, buf, len)
* insw_ns(port, buf, len)
* outsw_ns(port, buf, len)
* insl_ns(port, buf, len)
* outsl_ns(port, buf, len)
*
* The *_ns versions don't do byte-swapping.
*/
_GLOBAL(_insb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbz r5,0(r3)
eieio
stbu r5,1(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbzu r5,1(r4)
stb r5,0(r3)
bdnz 00b
sync
blr
_GLOBAL(_insw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhbrx r5,0,r3
eieio
sthu r5,2(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
sthbrx r5,0,r3
bdnz 00b
sync
blr
_GLOBAL(_insl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwbrx r5,0,r3
eieio
stwu r5,4(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stwbrx r5,0,r3
bdnz 00b
sync
blr
/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
_GLOBAL(_insw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhz r5,0(r3)
eieio
sthu r5,2(r4)
bdnz 00b
twi 0,r5,0
isync
blr
/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
_GLOBAL(_outsw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
sth r5,0(r3)
bdnz 00b
sync
blr
_GLOBAL(_insl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwz r5,0(r3)
eieio
stwu r5,4(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stw r5,0(r3)
bdnz 00b
sync
blr
/*
* identify_cpu and calls setup_cpu
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
*/
_GLOBAL(identify_cpu)
mfpvr r7
1:
lwz r8,CPU_SPEC_PVR_MASK(r3)
and r8,r8,r7
lwz r9,CPU_SPEC_PVR_VALUE(r3)
cmplw 0,r9,r8
beq 1f
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
add r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
sub r4,r4,r5
ld r4,0(r4)
sub r4,r4,r5
mtctr r4
/* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
mr r4,r3
mr r3,r5
bctr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
LOADADDR(r6,cur_cpu_spec)
sub r6,r6,r3
ld r4,0(r6)
sub r4,r4,r3
ld r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
LOADADDR(r6,__start___ftr_fixup)
sub r6,r6,r3
LOADADDR(r7,__stop___ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
* Do an IO access in real mode
*/
_GLOBAL(real_readb)
mfmsr r7
ori r0,r7,MSR_DR
xori r0,r0,MSR_DR
sync
mtmsrd r0
sync
isync
mfspr r6,SPRN_HID4
rldicl r5,r6,32,0
ori r5,r5,0x100
rldicl r5,r5,32,0
sync
mtspr SPRN_HID4,r5
isync
slbia
isync
lbz r3,0(r3)
sync
mtspr SPRN_HID4,r6
isync
slbia
isync
mtmsrd r7
sync
isync
blr
/*
* Do an IO access in real mode
*/
_GLOBAL(real_writeb)
mfmsr r7
ori r0,r7,MSR_DR
xori r0,r0,MSR_DR
sync
mtmsrd r0
sync
isync
mfspr r6,SPRN_HID4
rldicl r5,r6,32,0
ori r5,r5,0x100
rldicl r5,r5,32,0
sync
mtspr SPRN_HID4,r5
isync
slbia
isync
stb r3,0(r4)
sync
mtspr SPRN_HID4,r6
isync
slbia
isync
mtmsrd r7
sync
isync
blr
#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
/*
* SCOM access functions for 970 (FX only for now)
*
* unsigned long scom970_read(unsigned int address);
* void scom970_write(unsigned int address, unsigned long value);
*
* The address passed in is the 24 bits register address. This code
* is 970 specific and will not check the status bits, so you should
* know what you are doing.
*/
_GLOBAL(scom970_read)
/* interrupts off */
mfmsr r4
ori r0,r4,MSR_EE
xori r0,r0,MSR_EE
mtmsrd r0,1
/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
* (including parity). On current CPUs they must be 0'd,
* and finally or in RW bit
*/
rlwinm r3,r3,8,0,15
ori r3,r3,0x8000
/* do the actual scom read */
sync
mtspr SPRN_SCOMC,r3
isync
mfspr r3,SPRN_SCOMD
isync
mfspr r0,SPRN_SCOMC
isync
/* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
* that's the best we can do). Not implemented yet as we don't use
* the scom on any of the bogus CPUs yet, but may have to be done
* ultimately
*/
/* restore interrupts */
mtmsrd r4,1
blr
_GLOBAL(scom970_write)
/* interrupts off */
mfmsr r5
ori r0,r5,MSR_EE
xori r0,r0,MSR_EE
mtmsrd r0,1
/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
* (including parity). On current CPUs they must be 0'd.
*/
rlwinm r3,r3,8,0,15
sync
mtspr SPRN_SCOMD,r4 /* write data */
isync
mtspr SPRN_SCOMC,r3 /* write command */
isync
mfspr 3,SPRN_SCOMC
isync
/* restore interrupts */
mtmsrd r5,1
blr
/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
*/
_GLOBAL(kernel_thread)
std r29,-24(r1)
std r30,-16(r1)
stdu r1,-STACK_FRAME_OVERHEAD(r1)
mr r29,r3
mr r30,r4
ori r3,r5,CLONE_VM /* flags */
oris r3,r3,(CLONE_UNTRACED>>16)
li r4,0 /* new sp (unused) */
li r0,__NR_clone
sc
cmpdi 0,r3,0 /* parent or child? */
bne 1f /* return if parent */
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
ld r2,8(r29)
ld r29,0(r29)
mtlr r29 /* fn addr in lr */
mr r3,r30 /* load arg and call fn */
blrl
li r0,__NR_exit /* exit after child exits */
li r3,0
sc
1: addi r1,r1,STACK_FRAME_OVERHEAD
ld r29,-24(r1)
ld r30,-16(r1)
blr
/*
* disable_kernel_fp()
* Disable the FPU.
*/
_GLOBAL(disable_kernel_fp)
mfmsr r3
rldicl r0,r3,(63-MSR_FP_LG),1
rldicl r3,r0,(MSR_FP_LG+1),0
mtmsrd r3 /* disable use of fpu now */
isync
blr
#ifdef CONFIG_ALTIVEC
#if 0 /* this has no callers for now */
/*
* disable_kernel_altivec()
* Disable the VMX.
*/
_GLOBAL(disable_kernel_altivec)
mfmsr r3
rldicl r0,r3,(63-MSR_VEC_LG),1
rldicl r3,r0,(MSR_VEC_LG+1),0
mtmsrd r3 /* disable use of VMX now */
isync
blr
#endif /* 0 */
/*
* giveup_altivec(tsk)
* Disable VMX for the task given as the argument,
* and save the vector registers in its thread_struct.
* Enables the VMX for use in the kernel on return.
*/
_GLOBAL(giveup_altivec)
mfmsr r5
oris r5,r5,MSR_VEC@h
mtmsrd r5 /* enable use of VMX now */
isync
cmpdi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3)
cmpdi 0,r5,0
SAVE_32VRS(0,r4,r3)
mfvscr vr0
li r4,THREAD_VSCR
stvx vr0,r4,r3
beq 1f
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_VEC@h
andc r4,r4,r3 /* disable FP for previous task */
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
ld r4,last_task_used_altivec@got(r2)
std r5,0(r4)
#endif /* CONFIG_SMP */
blr
#endif /* CONFIG_ALTIVEC */
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(execve)
li r0,__NR_execve
sc
bnslr
neg r3,r3
blr
/* kexec_wait(phys_cpu)
*
* wait for the flag to change, indicating this kernel is going away but
* the slave code for the next one is at addresses 0 to 100.
*
* This is used by all slaves.
*
* Physical (hardware) cpu id should be in r3.
*/
_GLOBAL(kexec_wait)
bl 1f
1: mflr r5
addi r5,r5,kexec_flag-1b
99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5)
cmpwi 0,r4,0
bnea 0x60
#endif
b 99b
/* this can be in text because we won't change it until we are
* running in real anyways
*/
kexec_flag:
.long 0
#ifdef CONFIG_KEXEC
/* kexec_smp_wait(void)
*
* call with interrupts off
* note: this is a terminal routine, it does not save lr
*
* get phys id from paca
* set paca id to -1 to say we got here
* switch to real mode
* join other cpus in kexec_wait(phys_id)
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
li r4,-1
sth r4,PACAHWCPUID(r13) /* let others know we left */
bl real_mode
b .kexec_wait
/*
* switch to real mode (turn mmu off)
* we use the early kernel trick that the hardware ignores bits
* 0 and 1 (big endian) of the effective address in real mode
*
* don't overwrite r3 here, it is live for kexec_wait above.
*/
real_mode: /* assume normal blr return */
1: li r9,MSR_RI
li r10,MSR_DR|MSR_IR
mflr r11 /* return address to SRR0 */
mfmsr r12
andc r9,r12,r9
andc r10,r12,r10
mtmsrd r9,1
mtspr SPRN_SRR1,r10
mtspr SPRN_SRR0,r11
rfid
/*
* kexec_sequence(newstack, start, image, control, clear_all())
*
* does the grungy work with stack switching and real mode switches
* also does simple calls to other code
*/
_GLOBAL(kexec_sequence)
mflr r0
std r0,16(r1)
/* switch stacks to newstack -- &kexec_stack.stack */
stdu r1,THREAD_SIZE-112(r3)
mr r1,r3
li r0,0
std r0,16(r1)
/* save regs for local vars on new stack.
* yes, we won't go back, but ...
*/
std r31,-8(r1)
std r30,-16(r1)
std r29,-24(r1)
std r28,-32(r1)
std r27,-40(r1)
std r26,-48(r1)
std r25,-56(r1)
stdu r1,-112-64(r1)
/* save args into preserved regs */
mr r31,r3 /* newstack (both) */
mr r30,r4 /* start (real) */
mr r29,r5 /* image (virt) */
mr r28,r6 /* control, unused */
mr r27,r7 /* clear_all() fn desc */
mr r26,r8 /* spare */
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
/* disable interrupts, we are overwriting kernel data next */
mfmsr r3
rlwinm r3,r3,0,17,15
mtmsrd r3,1
/* copy dest pages, flush whole dest image */
mr r3,r29
bl .kexec_copy_flush /* (image) */
/* turn off mmu */
bl real_mode
/* clear out hardware hash page table and tlb */
ld r5,0(r27) /* deref function descriptor */
mtctr r5
bctrl /* ppc_md.hash_clear_all(void); */
/*
* kexec image calling is:
* the first 0x100 bytes of the entry point are copied to 0
*
* all slaves branch to slave = 0x60 (absolute)
* slave(phys_cpu_id);
*
* master goes to start = entry point
* start(phys_cpu_id, start, 0);
*
*
* a wrapper is needed to call existing kernels, here is an approximate
* description of one method:
*
* v2: (2.6.10)
* start will be near the boot_block (maybe 0x100 bytes before it?)
* it will have a 0x60, which will b to boot_block, where it will wait
* and 0 will store phys into struct boot-block and load r3 from there,
* copy kernel 0-0x100 and tell slaves to back down to 0x60 again
*
* v1: (2.6.9)
* boot block will have all cpus scanning device tree to see if they
* are the boot cpu ?????
* other device tree differences (prop sizes, va vs pa, etc)...
*/
/* copy 0x100 bytes starting at start to 0 */
li r3,0
mr r4,r30
li r5,0x100
li r6,0
bl .copy_and_flush /* (dest, src, copy limit, start offset) */
1: /* assume normal blr return */
/* release other cpus to the new kernel secondary start at 0x60 */
mflr r5
li r6,1
stw r6,kexec_flag-1b(5)
mr r3,r25 # my phys cpu
mr r4,r30 # start, aka phys mem offset
mtlr 4
li r5,0
blr /* image->start(physid, image->start, 0); */
#endif /* CONFIG_KEXEC */

View File

@ -1,76 +0,0 @@
/*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/console.h>
#include <net/checksum.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/hw_irq.h>
#include <asm/abs_addr.h>
#include <asm/cacheflush.h>
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(reloc_offset);
EXPORT_SYMBOL(_insb);
EXPORT_SYMBOL(_outsb);
EXPORT_SYMBOL(_insw);
EXPORT_SYMBOL(_outsw);
EXPORT_SYMBOL(_insl);
EXPORT_SYMBOL(_outsl);
EXPORT_SYMBOL(_insw_ns);
EXPORT_SYMBOL(_outsw_ns);
EXPORT_SYMBOL(_insl_ns);
EXPORT_SYMBOL(_outsl_ns);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(giveup_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
#endif
EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(console_drivers);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,136 +0,0 @@
/*
*
*
* PowerPC-specific semaphore code.
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
* to eliminate the SMP races in the old version between the updates
* of `count' and `waking'. Now we use negative `count' values to
* indicate that some process(es) are waiting for the semaphore.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_count, tmp;
__asm__ __volatile__("\n"
"1: lwarx %0,0,%3\n"
" srawi %1,%0,31\n"
" andc %1,%0,%1\n"
" add %1,%1,%4\n"
" stwcx. %1,0,%3\n"
" bne 1b"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (&sem->count), "r" (incr), "m" (sem->count)
: "cc");
return old_count;
}
void __up(struct semaphore *sem)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__up);
/*
* Note that when we come in to __down or __down_interruptible,
* we have already decremented count, but that decrement was
* ineffective since the result was < 0, and any negative value
* of count is equivalent to 0.
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__down);
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
EXPORT_SYMBOL(__down_interruptible);

View File

@ -1,625 +0,0 @@
/*
* linux/arch/ppc64/kernel/vdso.c
*
* Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/bootmem.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/systemcfg.h>
#include <asm/vdso.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
/*
* The vDSOs themselves are here
*/
extern char vdso64_start, vdso64_end;
extern char vdso32_start, vdso32_end;
static void *vdso64_kbase = &vdso64_start;
static void *vdso32_kbase = &vdso32_start;
unsigned int vdso64_pages;
unsigned int vdso32_pages;
/* Signal trampolines user addresses */
unsigned long vdso64_rt_sigtramp;
unsigned long vdso32_sigtramp;
unsigned long vdso32_rt_sigtramp;
/* Format of the patch table */
struct vdso_patch_def
{
u32 pvr_mask, pvr_value;
const char *gen_name;
const char *fix_name;
};
/* Table of functions to patch based on the CPU type/revision
*
* TODO: Improve by adding whole lists for each entry
*/
static struct vdso_patch_def vdso_patches[] = {
{
0xffff0000, 0x003a0000, /* POWER5 */
"__kernel_sync_dicache", "__kernel_sync_dicache_p5"
},
{
0xffff0000, 0x003b0000, /* POWER5 */
"__kernel_sync_dicache", "__kernel_sync_dicache_p5"
},
};
/*
* Some infos carried around for each of them during parsing at
* boot time.
*/
struct lib32_elfinfo
{
Elf32_Ehdr *hdr; /* ptr to ELF */
Elf32_Sym *dynsym; /* ptr to .dynsym section */
unsigned long dynsymsize; /* size of .dynsym section */
char *dynstr; /* ptr to .dynstr section */
unsigned long text; /* offset of .text section in .so */
};
struct lib64_elfinfo
{
Elf64_Ehdr *hdr;
Elf64_Sym *dynsym;
unsigned long dynsymsize;
char *dynstr;
unsigned long text;
};
#ifdef __DEBUG
static void dump_one_vdso_page(struct page *pg, struct page *upg)
{
printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT),
page_count(pg),
pg->flags);
if (upg/* && pg != upg*/) {
printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT),
page_count(upg),
upg->flags);
}
printk("\n");
}
static void dump_vdso_pages(struct vm_area_struct * vma)
{
int i;
if (!vma || test_thread_flag(TIF_32BIT)) {
printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase);
for (i=0; i<vdso32_pages; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
struct page *upg = (vma && vma->vm_mm) ?
follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0)
: NULL;
dump_one_vdso_page(pg, upg);
}
}
if (!vma || !test_thread_flag(TIF_32BIT)) {
printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase);
for (i=0; i<vdso64_pages; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
struct page *upg = (vma && vma->vm_mm) ?
follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0)
: NULL;
dump_one_vdso_page(pg, upg);
}
}
}
#endif /* DEBUG */
/*
* Keep a dummy vma_close for now, it will prevent VMA merging.
*/
static void vdso_vma_close(struct vm_area_struct * vma)
{
}
/*
* Our nopage() function, maps in the actual vDSO kernel pages, they will
* be mapped read-only by do_no_page(), and eventually COW'ed, either
* right away for an initial write access, or by do_wp_page().
*/
static struct page * vdso_vma_nopage(struct vm_area_struct * vma,
unsigned long address, int *type)
{
unsigned long offset = address - vma->vm_start;
struct page *pg;
void *vbase = test_thread_flag(TIF_32BIT) ? vdso32_kbase : vdso64_kbase;
DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n",
current->comm, address, offset);
if (address < vma->vm_start || address > vma->vm_end)
return NOPAGE_SIGBUS;
/*
* Last page is systemcfg.
*/
if ((vma->vm_end - address) <= PAGE_SIZE)
pg = virt_to_page(_systemcfg);
else
pg = virt_to_page(vbase + offset);
get_page(pg);
DBG(" ->page count: %d\n", page_count(pg));
return pg;
}
static struct vm_operations_struct vdso_vmops = {
.close = vdso_vma_close,
.nopage = vdso_vma_nopage,
};
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long vdso_pages;
unsigned long vdso_base;
if (test_thread_flag(TIF_32BIT)) {
vdso_pages = vdso32_pages;
vdso_base = VDSO32_MBASE;
} else {
vdso_pages = vdso64_pages;
vdso_base = VDSO64_MBASE;
}
current->thread.vdso_base = 0;
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
*/
if (vdso_pages == 0)
return 0;
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma == NULL)
return -ENOMEM;
memset(vma, 0, sizeof(*vma));
/*
* pick a base address for the vDSO in process space. We try to put it
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
*/
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
if (vdso_base & ~PAGE_MASK) {
kmem_cache_free(vm_area_cachep, vma);
return (int)vdso_base;
}
current->thread.vdso_base = vdso_base;
vma->vm_mm = mm;
vma->vm_start = current->thread.vdso_base;
/*
* the VMA size is one page more than the vDSO since systemcfg
* is mapped in the last one
*/
vma->vm_end = vma->vm_start + ((vdso_pages + 1) << PAGE_SHIFT);
/*
* our vma flags don't have VM_WRITE so by default, the process isn't allowed
* to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW on those
* pages but it's then your responsibility to never do that on the "data" page
* of the vDSO or you'll stop getting kernel updates and your nice userland
* gettimeofday will be totally dead. It's fine to use that for setting
* breakpoints in the vDSO code pages though
*/
vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_RESERVED;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
vma->vm_ops = &vdso_vmops;
down_write(&mm->mmap_sem);
if (insert_vm_struct(mm, vma)) {
up_write(&mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return -ENOMEM;
}
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
up_write(&mm->mmap_sem);
return 0;
}
static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
Elf32_Shdr *sechdrs;
unsigned int i;
char *secnames;
/* Grab section headers and strings so we can tell who is who */
sechdrs = (void *)ehdr + ehdr->e_shoff;
secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
/* Find the section they want */
for (i = 1; i < ehdr->e_shnum; i++) {
if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
if (size)
*size = sechdrs[i].sh_size;
return (void *)ehdr + sechdrs[i].sh_offset;
}
}
*size = 0;
return NULL;
}
static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
Elf64_Shdr *sechdrs;
unsigned int i;
char *secnames;
/* Grab section headers and strings so we can tell who is who */
sechdrs = (void *)ehdr + ehdr->e_shoff;
secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
/* Find the section they want */
for (i = 1; i < ehdr->e_shnum; i++) {
if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
if (size)
*size = sechdrs[i].sh_size;
return (void *)ehdr + sechdrs[i].sh_offset;
}
}
if (size)
*size = 0;
return NULL;
}
static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, const char *symname)
{
unsigned int i;
char name[32], *c;
for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
if (lib->dynsym[i].st_name == 0)
continue;
strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32);
c = strchr(name, '@');
if (c)
*c = 0;
if (strcmp(symname, name) == 0)
return &lib->dynsym[i];
}
return NULL;
}
static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, const char *symname)
{
unsigned int i;
char name[32], *c;
for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) {
if (lib->dynsym[i].st_name == 0)
continue;
strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32);
c = strchr(name, '@');
if (c)
*c = 0;
if (strcmp(symname, name) == 0)
return &lib->dynsym[i];
}
return NULL;
}
/* Note that we assume the section is .text and the symbol is relative to
* the library base
*/
static unsigned long __init find_function32(struct lib32_elfinfo *lib, const char *symname)
{
Elf32_Sym *sym = find_symbol32(lib, symname);
if (sym == NULL) {
printk(KERN_WARNING "vDSO32: function %s not found !\n", symname);
return 0;
}
return sym->st_value - VDSO32_LBASE;
}
/* Note that we assume the section is .text and the symbol is relative to
* the library base
*/
static unsigned long __init find_function64(struct lib64_elfinfo *lib, const char *symname)
{
Elf64_Sym *sym = find_symbol64(lib, symname);
if (sym == NULL) {
printk(KERN_WARNING "vDSO64: function %s not found !\n", symname);
return 0;
}
#ifdef VDS64_HAS_DESCRIPTORS
return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - VDSO64_LBASE;
#else
return sym->st_value - VDSO64_LBASE;
#endif
}
static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
void *sect;
/*
* Locate symbol tables & text section
*/
v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
if (v32->dynsym == NULL || v32->dynstr == NULL) {
printk(KERN_ERR "vDSO32: a required symbol section was not found\n");
return -1;
}
sect = find_section32(v32->hdr, ".text", NULL);
if (sect == NULL) {
printk(KERN_ERR "vDSO32: the .text section was not found\n");
return -1;
}
v32->text = sect - vdso32_kbase;
v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL);
if (v64->dynsym == NULL || v64->dynstr == NULL) {
printk(KERN_ERR "vDSO64: a required symbol section was not found\n");
return -1;
}
sect = find_section64(v64->hdr, ".text", NULL);
if (sect == NULL) {
printk(KERN_ERR "vDSO64: the .text section was not found\n");
return -1;
}
v64->text = sect - vdso64_kbase;
return 0;
}
static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
/*
* Find signal trampolines
*/
vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64");
vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32");
vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32");
}
static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
Elf32_Sym *sym32;
Elf64_Sym *sym64;
sym32 = find_symbol32(v32, "__kernel_datapage_offset");
if (sym32 == NULL) {
printk(KERN_ERR "vDSO32: Can't find symbol __kernel_datapage_offset !\n");
return -1;
}
*((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
(vdso32_pages << PAGE_SHIFT) - (sym32->st_value - VDSO32_LBASE);
sym64 = find_symbol64(v64, "__kernel_datapage_offset");
if (sym64 == NULL) {
printk(KERN_ERR "vDSO64: Can't find symbol __kernel_datapage_offset !\n");
return -1;
}
*((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
(vdso64_pages << PAGE_SHIFT) - (sym64->st_value - VDSO64_LBASE);
return 0;
}
static int vdso_do_func_patch32(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64,
const char *orig, const char *fix)
{
Elf32_Sym *sym32_gen, *sym32_fix;
sym32_gen = find_symbol32(v32, orig);
if (sym32_gen == NULL) {
printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig);
return -1;
}
sym32_fix = find_symbol32(v32, fix);
if (sym32_fix == NULL) {
printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix);
return -1;
}
sym32_gen->st_value = sym32_fix->st_value;
sym32_gen->st_size = sym32_fix->st_size;
sym32_gen->st_info = sym32_fix->st_info;
sym32_gen->st_other = sym32_fix->st_other;
sym32_gen->st_shndx = sym32_fix->st_shndx;
return 0;
}
static int vdso_do_func_patch64(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64,
const char *orig, const char *fix)
{
Elf64_Sym *sym64_gen, *sym64_fix;
sym64_gen = find_symbol64(v64, orig);
if (sym64_gen == NULL) {
printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig);
return -1;
}
sym64_fix = find_symbol64(v64, fix);
if (sym64_fix == NULL) {
printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix);
return -1;
}
sym64_gen->st_value = sym64_fix->st_value;
sym64_gen->st_size = sym64_fix->st_size;
sym64_gen->st_info = sym64_fix->st_info;
sym64_gen->st_other = sym64_fix->st_other;
sym64_gen->st_shndx = sym64_fix->st_shndx;
return 0;
}
static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
u32 pvr;
int i;
pvr = mfspr(SPRN_PVR);
for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) {
struct vdso_patch_def *patch = &vdso_patches[i];
int match = (pvr & patch->pvr_mask) == patch->pvr_value;
DBG("patch %d (mask: %x, pvr: %x) : %s\n",
i, patch->pvr_mask, patch->pvr_value, match ? "match" : "skip");
if (!match)
continue;
DBG("replacing %s with %s...\n", patch->gen_name, patch->fix_name);
/*
* Patch the 32 bits and 64 bits symbols. Note that we do not patch
* the "." symbol on 64 bits. It would be easy to do, but doesn't
* seem to be necessary, patching the OPD symbol is enough.
*/
vdso_do_func_patch32(v32, v64, patch->gen_name, patch->fix_name);
vdso_do_func_patch64(v32, v64, patch->gen_name, patch->fix_name);
}
return 0;
}
static __init int vdso_setup(void)
{
struct lib32_elfinfo v32;
struct lib64_elfinfo v64;
v32.hdr = vdso32_kbase;
v64.hdr = vdso64_kbase;
if (vdso_do_find_sections(&v32, &v64))
return -1;
if (vdso_fixup_datapage(&v32, &v64))
return -1;
if (vdso_fixup_alt_funcs(&v32, &v64))
return -1;
vdso_setup_trampolines(&v32, &v64);
return 0;
}
void __init vdso_init(void)
{
int i;
vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
DBG("vdso64_kbase: %p, 0x%x pages, vdso32_kbase: %p, 0x%x pages\n",
vdso64_kbase, vdso64_pages, vdso32_kbase, vdso32_pages);
/*
* Initialize the vDSO images in memory, that is do necessary
* fixups of vDSO symbols, locate trampolines, etc...
*/
if (vdso_setup()) {
printk(KERN_ERR "vDSO setup failure, not enabled !\n");
/* XXX should free pages here ? */
vdso64_pages = vdso32_pages = 0;
return;
}
/* Make sure pages are in the correct state */
for (i = 0; i < vdso64_pages; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
}
for (i = 0; i < vdso32_pages; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
}
get_page(virt_to_page(_systemcfg));
}
int in_gate_area_no_task(unsigned long addr)
{
return 0;
}
int in_gate_area(struct task_struct *task, unsigned long addr)
{
return 0;
}
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
{
return NULL;
}

View File

@ -1,151 +0,0 @@
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
OUTPUT_ARCH(powerpc:common64)
jiffies = jiffies_64;
SECTIONS
{
/* Sections to be discarded. */
/DISCARD/ : {
*(.exitcall.exit)
}
/* Read-only sections, merged into text segment: */
.text : {
*(.text .text.*)
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
. = ALIGN(PAGE_SIZE);
_etext = .;
}
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
__bug_table : {
__start___bug_table = .;
*(__bug_table)
__stop___bug_table = .;
}
__ftr_fixup : {
__start___ftr_fixup = .;
*(__ftr_fixup)
__stop___ftr_fixup = .;
}
RODATA
/* will be freed after init */
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : {
*(.init.data)
}
. = ALIGN(16);
.init.setup : {
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
.initcall.init : {
__initcall_start = .;
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
__initcall_end = .;
}
.con_initcall.init : {
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
}
SECURITY_INIT
. = ALIGN(PAGE_SIZE);
.init.ramfs : {
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
}
.data.percpu : {
__per_cpu_start = .;
*(.data.percpu)
__per_cpu_end = .;
}
. = ALIGN(PAGE_SIZE);
. = ALIGN(16384);
__init_end = .;
/* freed after init ends here */
/* Read/write sections */
. = ALIGN(PAGE_SIZE);
. = ALIGN(16384);
_sdata = .;
/* The initial task and kernel stack */
.data.init_task : {
*(.data.init_task)
}
. = ALIGN(PAGE_SIZE);
.data.page_aligned : {
*(.data.page_aligned)
}
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
.data : {
*(.data .data.rel* .toc1)
*(.branch_lt)
}
.opd : {
*(.opd)
}
.got : {
__toc_start = .;
*(.got)
*(.toc)
. = ALIGN(PAGE_SIZE);
_edata = .;
}
. = ALIGN(PAGE_SIZE);
.bss : {
__bss_start = .;
*(.bss)
__bss_stop = .;
}
. = ALIGN(PAGE_SIZE);
_end = . ;
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define GETREG(reg) \
static inline unsigned long get_ ## reg (void) \
{ unsigned long ret; asm volatile ("mf" #reg " %0" : "=r" (ret) :); return ret; }
#define SETREG(reg) \
static inline void set_ ## reg (unsigned long val) \
{ asm volatile ("mt" #reg " %0" : : "r" (val)); }
GETREG(msr)
SETREG(msrd)
GETREG(cr)
#define GSETSPR(n, name) \
static inline long get_ ## name (void) \
{ long ret; asm volatile ("mfspr %0," #n : "=r" (ret) : ); return ret; } \
static inline void set_ ## name (long val) \
{ asm volatile ("mtspr " #n ",%0" : : "r" (val)); }
GSETSPR(0, mq)
GSETSPR(1, xer)
GSETSPR(4, rtcu)
GSETSPR(5, rtcl)
GSETSPR(8, lr)
GSETSPR(9, ctr)
GSETSPR(18, dsisr)
GSETSPR(19, dar)
GSETSPR(22, dec)
GSETSPR(25, sdr1)
GSETSPR(26, srr0)
GSETSPR(27, srr1)
GSETSPR(272, sprg0)
GSETSPR(273, sprg1)
GSETSPR(274, sprg2)
GSETSPR(275, sprg3)
GSETSPR(282, ear)
GSETSPR(287, pvr)
GSETSPR(1008, hid0)
GSETSPR(1009, hid1)
GSETSPR(1010, iabr)
GSETSPR(1023, pir)
static inline void store_inst(void *p)
{
asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
}
static inline void cflush(void *p)
{
asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
}
static inline void cinval(void *p)
{
asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
}

View File

@ -1,328 +0,0 @@
#ifndef _PPC64_PAGE_H
#define _PPC64_PAGE_H
/*
* Copyright (C) 2001 PPC64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <asm/asm-compat.h>
/*
* We support either 4k or 64k software page size. When using 64k pages
* however, wether we are really supporting 64k pages in HW or not is
* irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12
* as use of 64k pages remains a linux kernel specific, every notion of
* page number shared with the firmware, TCEs, iommu, etc... still assumes
* a page size of 4096.
*/
#ifdef CONFIG_PPC_64K_PAGES
#define PAGE_SHIFT 16
#else
#define PAGE_SHIFT 12
#endif
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
/* HW_PAGE_SHIFT is always 4k pages */
#define HW_PAGE_SHIFT 12
#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
/* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
* HW_PAGE_SHIFT, that is 4k pages
*/
#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
/* Segment size */
#define SID_SHIFT 28
#define SID_MASK 0xfffffffffUL
#define ESID_MASK 0xfffffffff0000000UL
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
/* Large pages size */
#ifndef __ASSEMBLY__
extern unsigned int HPAGE_SHIFT;
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_HUGETLB_PAGE
#define HTLB_AREA_SHIFT 40
#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- (1U << GET_ESID(addr))) & 0xffff)
#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
- (1U << GET_HTLB_AREA(addr))) & 0xffff)
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define touches_hugepage_low_range(mm, addr, len) \
(LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
#define touches_hugepage_high_range(mm, addr, len) \
(HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
#define __within_hugepage_low_range(addr, len, segmask) \
((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
#define within_hugepage_low_range(addr, len) \
__within_hugepage_low_range((addr), (len), \
current->mm->context.low_htlb_areas)
#define __within_hugepage_high_range(addr, len, zonemask) \
((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
#define within_hugepage_high_range(addr, len) \
__within_hugepage_high_range((addr), (len), \
current->mm->context.high_htlb_areas)
#define is_hugepage_only_range(mm, addr, len) \
(touches_hugepage_high_range((mm), (addr), (len)) || \
touches_hugepage_low_range((mm), (addr), (len)))
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define in_hugepage_area(context, addr) \
(cpu_has_feature(CPU_FTR_16M_PAGE) && \
( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
( ((addr) < 0x100000000L) && \
((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
#else /* !CONFIG_HUGETLB_PAGE */
#define in_hugepage_area(mm, addr) 0
#endif /* !CONFIG_HUGETLB_PAGE */
/* align addr on a size boundary - adjust address up/down if needed */
#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/cache.h>
#undef STRICT_MM_TYPECHECKS
#define REGION_SIZE 4UL
#define REGION_SHIFT 60UL
#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
static __inline__ void clear_page(void *addr)
{
unsigned long lines, line_size;
line_size = ppc64_caches.dline_size;
lines = ppc64_caches.dlines_per_page;
__asm__ __volatile__(
"mtctr %1 # clear_page\n\
1: dcbz 0,%0\n\
add %0,%0,%3\n\
bdnz+ 1b"
: "=r" (addr)
: "r" (lines), "0" (addr), "r" (line_size)
: "ctr", "memory");
}
extern void copy_4K_page(void *to, void *from);
#ifdef CONFIG_PPC_64K_PAGES
static inline void copy_page(void *to, void *from)
{
unsigned int i;
for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
copy_4K_page(to, from);
to += 4096;
from += 4096;
}
}
#else /* CONFIG_PPC_64K_PAGES */
static inline void copy_page(void *to, void *from)
{
copy_4K_page(to, from);
}
#endif /* CONFIG_PPC_64K_PAGES */
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking.
* Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
*/
/* PTE level */
typedef struct { unsigned long pte; } pte_t;
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) })
/* 64k pages additionally define a bigger "real PTE" type that gathers
* the "second half" part of the PTE for pseudo 64k pages
*/
#ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef struct { pte_t pte; } real_pte_t;
#endif
/* PMD level */
typedef struct { unsigned long pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) })
/* PUD level exusts only on 4k pages */
#ifndef CONFIG_PPC_64K_PAGES
typedef struct { unsigned long pud; } pud_t;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) })
#endif
/* PGD level */
typedef struct { unsigned long pgd; } pgd_t;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) })
/* Page protection bits */
typedef struct { unsigned long pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) })
#else
/*
* .. while these make it easier on the compiler
*/
typedef unsigned long pte_t;
#define pte_val(x) (x)
#define __pte(x) (x)
#ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef unsigned long real_pte_t;
#endif
typedef unsigned long pmd_t;
#define pmd_val(x) (x)
#define __pmd(x) (x)
#ifndef CONFIG_PPC_64K_PAGES
typedef unsigned long pud_t;
#define pud_val(x) (x)
#define __pud(x) (x)
#endif
typedef unsigned long pgd_t;
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
typedef unsigned long pgprot_t;
#define __pgd(x) (x)
#define __pgprot(x) (x)
#endif
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
extern int page_is_ram(unsigned long pfn);
extern u64 ppc64_pft_size; /* Log 2 of page table size */
/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
#define __HAVE_ARCH_GATE_AREA 1
#endif /* __ASSEMBLY__ */
#ifdef MODULE
#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
#else
#define __page_aligned \
__attribute__((__aligned__(PAGE_SIZE), \
__section__(".data.page_aligned")))
#endif
/* This must match the -Ttext linker address */
/* Note: tophys & tovirt make assumptions about how */
/* KERNELBASE is defined for performance reasons. */
/* When KERNELBASE moves, those macros may have */
/* to change! */
#define PAGE_OFFSET ASM_CONST(0xC000000000000000)
#define KERNELBASE PAGE_OFFSET
#define VMALLOCBASE ASM_CONST(0xD000000000000000)
#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
#define USER_REGION_ID (0UL)
#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
#ifdef CONFIG_FLATMEM
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
* and needs to be executable. This means the whole heap ends
* up being executable.
*/
#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_DEFAULT_FLAGS \
(test_thread_flag(TIF_32BIT) ? \
VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
/*
* This is the default if a program doesn't have a PT_GNU_STACK
* program header entry. The PPC64 ELF ABI has a non executable stack
* stack by default, so in the absense of a PT_GNU_STACK program header
* we turn execute permission off.
*/
#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_STACK_DEFAULT_FLAGS \
(test_thread_flag(TIF_32BIT) ? \
VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
#endif /* __KERNEL__ */
#include <asm-generic/page.h>
#endif /* _PPC64_PAGE_H */