linux/include/asm-generic/vmlinux.lds.h

392 lines
12 KiB
C
Raw Normal View History

#ifndef LOAD_OFFSET
#define LOAD_OFFSET 0
#endif
#ifndef VMLINUX_SYMBOL
#define VMLINUX_SYMBOL(_sym_) _sym_
#endif
/* Align . to a 8 byte boundary equals to maximum function alignment. */
#define ALIGN_FUNCTION() . = ALIGN(8)
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
#ifdef CONFIG_HOTPLUG
#define DEV_KEEP(sec) *(.dev##sec)
#define DEV_DISCARD(sec)
#else
#define DEV_KEEP(sec)
#define DEV_DISCARD(sec) *(.dev##sec)
#endif
#ifdef CONFIG_HOTPLUG_CPU
#define CPU_KEEP(sec) *(.cpu##sec)
#define CPU_DISCARD(sec)
#else
#define CPU_KEEP(sec)
#define CPU_DISCARD(sec) *(.cpu##sec)
#endif
#if defined(CONFIG_MEMORY_HOTPLUG)
#define MEM_KEEP(sec) *(.mem##sec)
#define MEM_DISCARD(sec)
#else
#define MEM_KEEP(sec)
#define MEM_DISCARD(sec) *(.mem##sec)
#endif
/* .data section */
#define DATA_DATA \
*(.data) \
*(.data.init.refok) \
Introduce new section reference annotations tags: __ref, __refdata, __refconst Today we have the following annotations for functions/data referencing __init/__exit functions / data: __init_refok => for init functions __initdata_refok => for init data __exit_refok => for exit functions There is really no difference between the __init and __exit versions and simplify it and to introduce a shorter annotation the following new annotations are introduced: __ref => for functions (code) that references __*init / __*exit __refdata => for variables __refconst => for const variables Whit this annotation is it more obvious what the annotation is for and there is no longer the arbitary division between __init and __exit code. The mechanishm is the same as before - a special section is created which is made part of the usual sections in the linker script. We will start to see annotations like this: -static struct pci_serial_quirk pci_serial_quirks[] = { +static const struct pci_serial_quirk pci_serial_quirks[] __refconst = { ----------------- -static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = +static struct notifier_block cpuid_class_cpu_notifier __refdata = ---------------- -static int threshold_cpu_callback(struct notifier_block *nfb, +static int __ref threshold_cpu_callback(struct notifier_block *nfb, [The above is just random samples]. Note: No modifications were needed in modpost to support the new sections due to the newly introduced blacklisting. Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
2008-01-29 03:21:15 +08:00
*(.ref.data) \
DEV_KEEP(init.data) \
DEV_KEEP(exit.data) \
CPU_KEEP(init.data) \
CPU_KEEP(exit.data) \
MEM_KEEP(init.data) \
MEM_KEEP(exit.data) \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___markers) = .; \
*(__markers) \
VMLINUX_SYMBOL(__stop___markers) = .;
#define RO_DATA(align) \
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
[PATCH] i386: Distinguish absolute symbols Ld knows about 2 kinds of symbols, absolute and section relative. Section relative symbols symbols change value when a section is moved and absolute symbols do not. Currently in the linker script we have several labels marking the beginning and ending of sections that are outside of sections, making them absolute symbols. Having a mixture of absolute and section relative symbols refereing to the same data is currently harmless but it is confusing. This must be done carefully as newer revs of ld do not place symbols that appear in sections without data and instead ld makes those symbols global :( My ultimate goal is to build a relocatable kernel. The safest and least intrusive technique is to generate relocation entries so the kernel can be relocated at load time. The only penalty would be an increase in the size of the kernel binary. The problem is that if absolute and relocatable symbols are not properly specified absolute symbols will be relocated or section relative symbols won't be, which is fatal. The practical motivation is that when generating kernels that will run from a reserved area for analyzing what caused a kernel panic, it is simpler if you don't need to hard code the physical memory location they will run at, especially for the distributions. [AK: and merged:] o Also put a message so that in future people can be aware of it and avoid introducing absolute symbols. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 09:14:03 +08:00
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
*(__markers_strings) /* Markers: strings */ \
} \
\
.rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
*(.rodata1) \
} \
\
BUG_TABLE \
\
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
*(.pci_fixup_early) \
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
*(.pci_fixup_header) \
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
*(.pci_fixup_final) \
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
*(.pci_fixup_enable) \
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
*(.pci_fixup_resume) \
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
*(.pci_fixup_resume_early) \
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
*(.pci_fixup_suspend) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
*(.builtin_fw) \
VMLINUX_SYMBOL(__end_builtin_fw) = .; \
} \
\
/* RapidIO route ops */ \
.rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
*(.rio_route_ops) \
VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
} \
\
TRACEDATA \
\
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
*(__ksymtab) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
*(__ksymtab_gpl) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
*(__ksymtab_unused) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
*(__ksymtab_unused_gpl) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
*(__ksymtab_gpl_future) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
*(__kcrctab) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
*(__kcrctab_gpl) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
*(__kcrctab_unused) \
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
*(__kcrctab_unused_gpl) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
*(__kcrctab_gpl_future) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
*(__ksymtab_strings) \
} \
\
/* __*init sections */ \
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
Introduce new section reference annotations tags: __ref, __refdata, __refconst Today we have the following annotations for functions/data referencing __init/__exit functions / data: __init_refok => for init functions __initdata_refok => for init data __exit_refok => for exit functions There is really no difference between the __init and __exit versions and simplify it and to introduce a shorter annotation the following new annotations are introduced: __ref => for functions (code) that references __*init / __*exit __refdata => for variables __refconst => for const variables Whit this annotation is it more obvious what the annotation is for and there is no longer the arbitary division between __init and __exit code. The mechanishm is the same as before - a special section is created which is made part of the usual sections in the linker script. We will start to see annotations like this: -static struct pci_serial_quirk pci_serial_quirks[] = { +static const struct pci_serial_quirk pci_serial_quirks[] __refconst = { ----------------- -static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = +static struct notifier_block cpuid_class_cpu_notifier __refdata = ---------------- -static int threshold_cpu_callback(struct notifier_block *nfb, +static int __ref threshold_cpu_callback(struct notifier_block *nfb, [The above is just random samples]. Note: No modifications were needed in modpost to support the new sections due to the newly introduced blacklisting. Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
2008-01-29 03:21:15 +08:00
*(.ref.rodata) \
DEV_KEEP(init.rodata) \
DEV_KEEP(exit.rodata) \
CPU_KEEP(init.rodata) \
CPU_KEEP(exit.rodata) \
MEM_KEEP(init.rodata) \
MEM_KEEP(exit.rodata) \
} \
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
*(__param) \
VMLINUX_SYMBOL(__stop___param) = .; \
. = ALIGN((align)); \
[PATCH] i386: Distinguish absolute symbols Ld knows about 2 kinds of symbols, absolute and section relative. Section relative symbols symbols change value when a section is moved and absolute symbols do not. Currently in the linker script we have several labels marking the beginning and ending of sections that are outside of sections, making them absolute symbols. Having a mixture of absolute and section relative symbols refereing to the same data is currently harmless but it is confusing. This must be done carefully as newer revs of ld do not place symbols that appear in sections without data and instead ld makes those symbols global :( My ultimate goal is to build a relocatable kernel. The safest and least intrusive technique is to generate relocation entries so the kernel can be relocated at load time. The only penalty would be an increase in the size of the kernel binary. The problem is that if absolute and relocatable symbols are not properly specified absolute symbols will be relocated or section relative symbols won't be, which is fatal. The practical motivation is that when generating kernels that will run from a reserved area for analyzing what caused a kernel panic, it is simpler if you don't need to hard code the physical memory location they will run at, especially for the distributions. [AK: and merged:] o Also put a message so that in future people can be aware of it and avoid introducing absolute symbols. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 09:14:03 +08:00
VMLINUX_SYMBOL(__end_rodata) = .; \
} \
. = ALIGN((align));
/* RODATA provided for backward compatibility.
* All archs are supposed to use RO_DATA() */
#define RODATA RO_DATA(4096)
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
*(.security_initcall.init) \
VMLINUX_SYMBOL(__security_initcall_end) = .; \
}
/* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot) \
*(.text) \
Introduce new section reference annotations tags: __ref, __refdata, __refconst Today we have the following annotations for functions/data referencing __init/__exit functions / data: __init_refok => for init functions __initdata_refok => for init data __exit_refok => for exit functions There is really no difference between the __init and __exit versions and simplify it and to introduce a shorter annotation the following new annotations are introduced: __ref => for functions (code) that references __*init / __*exit __refdata => for variables __refconst => for const variables Whit this annotation is it more obvious what the annotation is for and there is no longer the arbitary division between __init and __exit code. The mechanishm is the same as before - a special section is created which is made part of the usual sections in the linker script. We will start to see annotations like this: -static struct pci_serial_quirk pci_serial_quirks[] = { +static const struct pci_serial_quirk pci_serial_quirks[] __refconst = { ----------------- -static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = +static struct notifier_block cpuid_class_cpu_notifier __refdata = ---------------- -static int threshold_cpu_callback(struct notifier_block *nfb, +static int __ref threshold_cpu_callback(struct notifier_block *nfb, [The above is just random samples]. Note: No modifications were needed in modpost to support the new sections due to the newly introduced blacklisting. Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
2008-01-29 03:21:15 +08:00
*(.ref.text) \
*(.text.init.refok) \
*(.exit.text.refok) \
DEV_KEEP(init.text) \
DEV_KEEP(exit.text) \
CPU_KEEP(init.text) \
CPU_KEEP(exit.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
*(.text.unlikely)
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define SCHED_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__sched_text_start) = .; \
*(.sched.text) \
VMLINUX_SYMBOL(__sched_text_end) = .;
/* spinlock.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define LOCK_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__lock_text_start) = .; \
*(.spinlock.text) \
VMLINUX_SYMBOL(__lock_text_end) = .;
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__kprobes_text_start) = .; \
*(.kprobes.text) \
VMLINUX_SYMBOL(__kprobes_text_end) = .;
/* Section used for early init (in .S files) */
#define HEAD_TEXT *(.head.text)
/* init and exit section handling */
#define INIT_DATA \
*(.init.data) \
DEV_DISCARD(init.data) \
DEV_DISCARD(init.rodata) \
CPU_DISCARD(init.data) \
CPU_DISCARD(init.rodata) \
MEM_DISCARD(init.data) \
MEM_DISCARD(init.rodata)
#define INIT_TEXT \
*(.init.text) \
DEV_DISCARD(init.text) \
CPU_DISCARD(init.text) \
MEM_DISCARD(init.text)
#define EXIT_DATA \
*(.exit.data) \
DEV_DISCARD(exit.data) \
DEV_DISCARD(exit.rodata) \
CPU_DISCARD(exit.data) \
CPU_DISCARD(exit.rodata) \
MEM_DISCARD(exit.data) \
MEM_DISCARD(exit.rodata)
#define EXIT_TEXT \
*(.exit.text) \
DEV_DISCARD(exit.text) \
CPU_DISCARD(exit.text) \
MEM_DISCARD(exit.text)
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to
the beginning of the section so we begin them at 0. */
#define DWARF_DEBUG \
/* DWARF 1 */ \
.debug 0 : { *(.debug) } \
.line 0 : { *(.line) } \
/* GNU DWARF 1 extensions */ \
.debug_srcinfo 0 : { *(.debug_srcinfo) } \
.debug_sfnames 0 : { *(.debug_sfnames) } \
/* DWARF 1.1 and DWARF 2 */ \
.debug_aranges 0 : { *(.debug_aranges) } \
.debug_pubnames 0 : { *(.debug_pubnames) } \
/* DWARF 2 */ \
.debug_info 0 : { *(.debug_info \
.gnu.linkonce.wi.*) } \
.debug_abbrev 0 : { *(.debug_abbrev) } \
.debug_line 0 : { *(.debug_line) } \
.debug_frame 0 : { *(.debug_frame) } \
.debug_str 0 : { *(.debug_str) } \
.debug_loc 0 : { *(.debug_loc) } \
.debug_macinfo 0 : { *(.debug_macinfo) } \
/* SGI/MIPS DWARF 2 extensions */ \
.debug_weaknames 0 : { *(.debug_weaknames) } \
.debug_funcnames 0 : { *(.debug_funcnames) } \
.debug_typenames 0 : { *(.debug_typenames) } \
.debug_varnames 0 : { *(.debug_varnames) } \
/* Stabs debugging sections. */
#define STABS_DEBUG \
.stab 0 : { *(.stab) } \
.stabstr 0 : { *(.stabstr) } \
.stab.excl 0 : { *(.stab.excl) } \
.stab.exclstr 0 : { *(.stab.exclstr) } \
.stab.index 0 : { *(.stab.index) } \
.stab.indexstr 0 : { *(.stab.indexstr) } \
.comment 0 : { *(.comment) }
#ifdef CONFIG_GENERIC_BUG
[PATCH] Generic BUG implementation This patch adds common handling for kernel BUGs, for use by architectures as they wish. The code is derived from arch/powerpc. The advantages of having common BUG handling are: - consistent BUG reporting across architectures - shared implementation of out-of-line file/line data - implement CONFIG_DEBUG_BUGVERBOSE consistently This means that in inline impact of BUG is just the illegal instruction itself, which is an improvement for i386 and x86-64. A BUG is represented in the instruction stream as an illegal instruction, which has file/line information associated with it. This extra information is stored in the __bug_table section in the ELF file. When the kernel gets an illegal instruction, it first confirms it might possibly be from a BUG (ie, in kernel mode, the right illegal instruction). It then calls report_bug(). This searches __bug_table for a matching instruction pointer, and if found, prints the corresponding file/line information. If report_bug() determines that it wasn't a BUG which caused the trap, it returns BUG_TRAP_TYPE_NONE. Some architectures (powerpc) implement WARN using the same mechanism; if the illegal instruction was the result of a WARN, then report_bug(Q) returns CONFIG_DEBUG_BUGVERBOSE; otherwise it returns BUG_TRAP_TYPE_BUG. lib/bug.c keeps a list of loaded modules which can be searched for __bug_table entries. The architecture must call module_bug_finalize()/module_bug_cleanup() from its corresponding module_finalize/cleanup functions. Unsetting CONFIG_DEBUG_BUGVERBOSE will reduce the kernel size by some amount. At the very least, filename and line information will not be recorded for each but, but architectures may decide to store no extra information per BUG at all. Unfortunately, gcc doesn't have a general way to mark an asm() as noreturn, so architectures will generally have to include an infinite loop (or similar) in the BUG code, so that gcc knows execution won't continue beyond that point. gcc does have a __builtin_trap() operator which may be useful to achieve the same effect, unfortunately it cannot be used to actually implement the BUG itself, because there's no way to get the instruction's address for use in generating the __bug_table entry. [randy.dunlap@oracle.com: Handle BUG=n, GENERIC_BUG=n to prevent build errors] [bunk@stusta.de: include/linux/bug.h must always #include <linux/module.h] Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Andi Kleen <ak@muc.de> Cc: Hugh Dickens <hugh@veritas.com> Cc: Michael Ellerman <michael@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:36:19 +08:00
#define BUG_TABLE \
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
__start___bug_table = .; \
*(__bug_table) \
__stop___bug_table = .; \
}
#else
#define BUG_TABLE
#endif
[PATCH] Generic BUG implementation This patch adds common handling for kernel BUGs, for use by architectures as they wish. The code is derived from arch/powerpc. The advantages of having common BUG handling are: - consistent BUG reporting across architectures - shared implementation of out-of-line file/line data - implement CONFIG_DEBUG_BUGVERBOSE consistently This means that in inline impact of BUG is just the illegal instruction itself, which is an improvement for i386 and x86-64. A BUG is represented in the instruction stream as an illegal instruction, which has file/line information associated with it. This extra information is stored in the __bug_table section in the ELF file. When the kernel gets an illegal instruction, it first confirms it might possibly be from a BUG (ie, in kernel mode, the right illegal instruction). It then calls report_bug(). This searches __bug_table for a matching instruction pointer, and if found, prints the corresponding file/line information. If report_bug() determines that it wasn't a BUG which caused the trap, it returns BUG_TRAP_TYPE_NONE. Some architectures (powerpc) implement WARN using the same mechanism; if the illegal instruction was the result of a WARN, then report_bug(Q) returns CONFIG_DEBUG_BUGVERBOSE; otherwise it returns BUG_TRAP_TYPE_BUG. lib/bug.c keeps a list of loaded modules which can be searched for __bug_table entries. The architecture must call module_bug_finalize()/module_bug_cleanup() from its corresponding module_finalize/cleanup functions. Unsetting CONFIG_DEBUG_BUGVERBOSE will reduce the kernel size by some amount. At the very least, filename and line information will not be recorded for each but, but architectures may decide to store no extra information per BUG at all. Unfortunately, gcc doesn't have a general way to mark an asm() as noreturn, so architectures will generally have to include an infinite loop (or similar) in the BUG code, so that gcc knows execution won't continue beyond that point. gcc does have a __builtin_trap() operator which may be useful to achieve the same effect, unfortunately it cannot be used to actually implement the BUG itself, because there's no way to get the instruction's address for use in generating the __bug_table entry. [randy.dunlap@oracle.com: Handle BUG=n, GENERIC_BUG=n to prevent build errors] [bunk@stusta.de: include/linux/bug.h must always #include <linux/module.h] Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Andi Kleen <ak@muc.de> Cc: Hugh Dickens <hugh@veritas.com> Cc: Michael Ellerman <michael@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:36:19 +08:00
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
__tracedata_start = .; \
*(.tracedata) \
__tracedata_end = .; \
}
#else
#define TRACEDATA
#endif
#define NOTES \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_notes) = .; \
*(.note.*) \
VMLINUX_SYMBOL(__stop_notes) = .; \
}
#define INITCALLS \
*(.initcallearly.init) \
__early_initcall_end = .; \
*(.initcall0.init) \
*(.initcall0s.init) \
*(.initcall1.init) \
*(.initcall1s.init) \
*(.initcall2.init) \
*(.initcall2s.init) \
*(.initcall3.init) \
*(.initcall3s.init) \
*(.initcall4.init) \
*(.initcall4s.init) \
*(.initcall5.init) \
*(.initcall5s.init) \
*(.initcallrootfs.init) \
*(.initcall6.init) \
*(.initcall6s.init) \
*(.initcall7.init) \
*(.initcall7s.init)
#define PERCPU(align) \
. = ALIGN(align); \
__per_cpu_start = .; \
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
*(.data.percpu) \
*(.data.percpu.shared_aligned) \
} \
__per_cpu_end = .;