mirror of https://gitee.com/openkylin/linux.git
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (35 commits) x86: Add HPET force support for MCP55 (nForce 5) chipsets x86: Force enable HPET for CK804 (nForce 4) chipsets x86: clean up setup.h and the boot code x86: Save registers in saved_context during suspend and hibernation x86: merge setup_32/64.h x86: merge signal_32/64.h x86: merge required-features.h x86: merge sigcontext_32/64.h x86: merge msr_32/64.h x86: merge mttr_32/64.h x86: merge statfs_32/64.h x86: merge stat_32/64.h x86: merge shmbuf_32/64.h x86: merge ptrace_32/64.h x86: merge msgbuf_32/64.h x86: merge elf_32/64.h x86: merge byteorder_32/64.h x86: whitespace cleanup of mce_64.c x86: consolidate the cpu/ related code usage x86: prepare consolidation of cpu/ related code usage ...
This commit is contained in:
commit
a7aed1c2dc
|
@ -422,7 +422,8 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
hpet= [X86-32,HPET] option to control HPET usage
|
||||
Format: { enable (default) | disable | force }
|
||||
disable: disable HPET and use PIT instead
|
||||
force: allow force enabled of undocumented chips (ICH4, VIA)
|
||||
force: allow force enabled of undocumented chips (ICH4,
|
||||
VIA, nVidia)
|
||||
|
||||
com20020= [HW,NET] ARCnet - COM20020 chipset
|
||||
Format:
|
||||
|
|
|
@ -1270,6 +1270,8 @@ source "drivers/Kconfig"
|
|||
|
||||
source "fs/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.instrumentation"
|
||||
|
||||
source "arch/i386/Kconfig.debug"
|
||||
|
||||
source "security/Kconfig"
|
||||
|
|
|
@ -20,6 +20,12 @@
|
|||
# Fill in SRCARCH
|
||||
SRCARCH := x86
|
||||
|
||||
# BITS is used as extension for files which are available in a 32 bit
|
||||
# and a 64 bit version to simplify shared Makefiles.
|
||||
# e.g.: obj-y += foo_$(BITS).o
|
||||
BITS := 32
|
||||
export BITS
|
||||
|
||||
HAS_BIARCH := $(call cc-option-yn, -m32)
|
||||
ifeq ($(HAS_BIARCH),y)
|
||||
AS := $(AS) --32
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/edd.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
/* Useful macros */
|
||||
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
|
||||
|
|
|
@ -38,11 +38,9 @@ static const char* safe_abs_relocs[] = {
|
|||
|
||||
static int is_safe_abs_reloc(const char* sym_name)
|
||||
{
|
||||
int i, array_size;
|
||||
int i;
|
||||
|
||||
array_size = sizeof(safe_abs_relocs)/sizeof(char*);
|
||||
|
||||
for(i = 0; i < array_size; i++) {
|
||||
for(i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
|
||||
if (!strcmp(sym_name, safe_abs_relocs[i]))
|
||||
/* Match found */
|
||||
return 1;
|
||||
|
|
|
@ -26,8 +26,6 @@ char *heap_end = _end; /* Default end of heap = no heap */
|
|||
* screws up the old-style command line protocol, adjust by
|
||||
* filling in the new-style command line pointer instead.
|
||||
*/
|
||||
#define OLD_CL_MAGIC 0xA33F
|
||||
#define OLD_CL_ADDRESS 0x20
|
||||
|
||||
static void copy_boot_params(void)
|
||||
{
|
||||
|
|
|
@ -1,5 +1,15 @@
|
|||
ifeq ($(CONFIG_X86_32),y)
|
||||
include ${srctree}/arch/x86/crypto/Makefile_32
|
||||
else
|
||||
include ${srctree}/arch/x86/crypto/Makefile_64
|
||||
endif
|
||||
#
|
||||
# Arch-specific CryptoAPI modules.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
|
||||
|
||||
aes-i586-y := aes-i586-asm_32.o aes_32.o
|
||||
twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
|
||||
|
||||
aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
|
||||
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
#
|
||||
# x86/crypto/Makefile
|
||||
#
|
||||
# Arch-specific CryptoAPI modules.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
|
||||
|
||||
aes-i586-y := aes-i586-asm_32.o aes_32.o
|
||||
twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
#
|
||||
# x86/crypto/Makefile
|
||||
#
|
||||
# Arch-specific CryptoAPI modules.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
|
||||
|
||||
aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
|
||||
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
|
||||
|
|
@ -26,7 +26,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse_32.o
|
|||
obj-$(CONFIG_X86_LOCAL_APIC) += apic_32.o nmi_32.o
|
||||
obj-$(CONFIG_X86_IO_APIC) += io_apic_32.o
|
||||
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash_32.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump_32.o
|
||||
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
|
||||
obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
|
||||
|
|
|
@ -9,25 +9,21 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
|
|||
x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \
|
||||
setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
|
||||
pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
|
||||
perfctr-watchdog.o i8253.o
|
||||
i8253.o
|
||||
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o
|
||||
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o
|
||||
obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
|
||||
obj-$(CONFIG_MTRR) += cpu/mtrr/
|
||||
obj-$(CONFIG_ACPI) += acpi/
|
||||
obj-y += cpu/
|
||||
obj-y += acpi/
|
||||
obj-$(CONFIG_X86_MSR) += msr.o
|
||||
obj-$(CONFIG_MICROCODE) += microcode.o
|
||||
obj-$(CONFIG_X86_CPUID) += cpuid.o
|
||||
obj-$(CONFIG_SMP) += smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o
|
||||
obj-y += apic_64.o nmi_64.o
|
||||
obj-y += io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash_64.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump_64.o
|
||||
obj-$(CONFIG_PM) += suspend_64.o
|
||||
obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o
|
||||
obj-$(CONFIG_CPU_FREQ) += cpu/cpufreq/
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
obj-$(CONFIG_IOMMU) += pci-gart_64.o aperture_64.o
|
||||
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
|
||||
|
@ -42,13 +38,6 @@ obj-$(CONFIG_MODULES) += module_64.o
|
|||
obj-$(CONFIG_PCI) += early-quirks.o
|
||||
|
||||
obj-y += topology.o
|
||||
obj-y += intel_cacheinfo.o
|
||||
obj-y += addon_cpuid_features.o
|
||||
obj-y += pcspeaker.o
|
||||
|
||||
CFLAGS_vsyscall_64.o := $(PROFILING) -g0
|
||||
|
||||
therm_throt-y += cpu/mcheck/therm_throt.o
|
||||
intel_cacheinfo-y += cpu/intel_cacheinfo.o
|
||||
addon_cpuid_features-y += cpu/addon_cpuid_features.o
|
||||
perfctr-watchdog-y += cpu/perfctr-watchdog.o
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
ifeq ($(CONFIG_X86_32),y)
|
||||
include ${srctree}/arch/x86/kernel/acpi/Makefile_32
|
||||
else
|
||||
include ${srctree}/arch/x86/kernel/acpi/Makefile_64
|
||||
obj-$(CONFIG_ACPI) += boot.o
|
||||
obj-$(CONFIG_ACPI_SLEEP) += sleep_$(BITS).o wakeup_$(BITS).o
|
||||
|
||||
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
||||
obj-y += cstate.o processor.o
|
||||
endif
|
||||
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
obj-$(CONFIG_ACPI) += boot.o
|
||||
obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
|
||||
|
||||
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
||||
obj-y += cstate.o processor.o
|
||||
endif
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
obj-y := boot.o
|
||||
obj-$(CONFIG_ACPI_SLEEP) += sleep_64.o wakeup_64.o
|
||||
|
||||
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
||||
obj-y += processor.o cstate.o
|
||||
endif
|
||||
|
|
@ -4,6 +4,7 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
|
||||
#
|
||||
|
@ -342,31 +343,32 @@ do_suspend_lowlevel:
|
|||
xorl %eax, %eax
|
||||
call save_processor_state
|
||||
|
||||
movq %rsp, saved_context_esp(%rip)
|
||||
movq %rax, saved_context_eax(%rip)
|
||||
movq %rbx, saved_context_ebx(%rip)
|
||||
movq %rcx, saved_context_ecx(%rip)
|
||||
movq %rdx, saved_context_edx(%rip)
|
||||
movq %rbp, saved_context_ebp(%rip)
|
||||
movq %rsi, saved_context_esi(%rip)
|
||||
movq %rdi, saved_context_edi(%rip)
|
||||
movq %r8, saved_context_r08(%rip)
|
||||
movq %r9, saved_context_r09(%rip)
|
||||
movq %r10, saved_context_r10(%rip)
|
||||
movq %r11, saved_context_r11(%rip)
|
||||
movq %r12, saved_context_r12(%rip)
|
||||
movq %r13, saved_context_r13(%rip)
|
||||
movq %r14, saved_context_r14(%rip)
|
||||
movq %r15, saved_context_r15(%rip)
|
||||
pushfq ; popq saved_context_eflags(%rip)
|
||||
movq $saved_context, %rax
|
||||
movq %rsp, pt_regs_rsp(%rax)
|
||||
movq %rbp, pt_regs_rbp(%rax)
|
||||
movq %rsi, pt_regs_rsi(%rax)
|
||||
movq %rdi, pt_regs_rdi(%rax)
|
||||
movq %rbx, pt_regs_rbx(%rax)
|
||||
movq %rcx, pt_regs_rcx(%rax)
|
||||
movq %rdx, pt_regs_rdx(%rax)
|
||||
movq %r8, pt_regs_r8(%rax)
|
||||
movq %r9, pt_regs_r9(%rax)
|
||||
movq %r10, pt_regs_r10(%rax)
|
||||
movq %r11, pt_regs_r11(%rax)
|
||||
movq %r12, pt_regs_r12(%rax)
|
||||
movq %r13, pt_regs_r13(%rax)
|
||||
movq %r14, pt_regs_r14(%rax)
|
||||
movq %r15, pt_regs_r15(%rax)
|
||||
pushfq
|
||||
popq pt_regs_eflags(%rax)
|
||||
|
||||
movq $.L97, saved_rip(%rip)
|
||||
|
||||
movq %rsp,saved_rsp
|
||||
movq %rbp,saved_rbp
|
||||
movq %rbx,saved_rbx
|
||||
movq %rdi,saved_rdi
|
||||
movq %rsi,saved_rsi
|
||||
movq %rsp, saved_rsp
|
||||
movq %rbp, saved_rbp
|
||||
movq %rbx, saved_rbx
|
||||
movq %rdi, saved_rdi
|
||||
movq %rsi, saved_rsi
|
||||
|
||||
addq $8, %rsp
|
||||
movl $3, %edi
|
||||
|
@ -377,32 +379,35 @@ do_suspend_lowlevel:
|
|||
.L99:
|
||||
.align 4
|
||||
movl $24, %eax
|
||||
movw %ax, %ds
|
||||
movq saved_context+58(%rip), %rax
|
||||
movq %rax, %cr4
|
||||
movq saved_context+50(%rip), %rax
|
||||
movq %rax, %cr3
|
||||
movq saved_context+42(%rip), %rax
|
||||
movq %rax, %cr2
|
||||
movq saved_context+34(%rip), %rax
|
||||
movq %rax, %cr0
|
||||
pushq saved_context_eflags(%rip) ; popfq
|
||||
movq saved_context_esp(%rip), %rsp
|
||||
movq saved_context_ebp(%rip), %rbp
|
||||
movq saved_context_eax(%rip), %rax
|
||||
movq saved_context_ebx(%rip), %rbx
|
||||
movq saved_context_ecx(%rip), %rcx
|
||||
movq saved_context_edx(%rip), %rdx
|
||||
movq saved_context_esi(%rip), %rsi
|
||||
movq saved_context_edi(%rip), %rdi
|
||||
movq saved_context_r08(%rip), %r8
|
||||
movq saved_context_r09(%rip), %r9
|
||||
movq saved_context_r10(%rip), %r10
|
||||
movq saved_context_r11(%rip), %r11
|
||||
movq saved_context_r12(%rip), %r12
|
||||
movq saved_context_r13(%rip), %r13
|
||||
movq saved_context_r14(%rip), %r14
|
||||
movq saved_context_r15(%rip), %r15
|
||||
movw %ax, %ds
|
||||
|
||||
/* We don't restore %rax, it must be 0 anyway */
|
||||
movq $saved_context, %rax
|
||||
movq saved_context_cr4(%rax), %rbx
|
||||
movq %rbx, %cr4
|
||||
movq saved_context_cr3(%rax), %rbx
|
||||
movq %rbx, %cr3
|
||||
movq saved_context_cr2(%rax), %rbx
|
||||
movq %rbx, %cr2
|
||||
movq saved_context_cr0(%rax), %rbx
|
||||
movq %rbx, %cr0
|
||||
pushq pt_regs_eflags(%rax)
|
||||
popfq
|
||||
movq pt_regs_rsp(%rax), %rsp
|
||||
movq pt_regs_rbp(%rax), %rbp
|
||||
movq pt_regs_rsi(%rax), %rsi
|
||||
movq pt_regs_rdi(%rax), %rdi
|
||||
movq pt_regs_rbx(%rax), %rbx
|
||||
movq pt_regs_rcx(%rax), %rcx
|
||||
movq pt_regs_rdx(%rax), %rdx
|
||||
movq pt_regs_r8(%rax), %r8
|
||||
movq pt_regs_r9(%rax), %r9
|
||||
movq pt_regs_r10(%rax), %r10
|
||||
movq pt_regs_r11(%rax), %r11
|
||||
movq pt_regs_r12(%rax), %r12
|
||||
movq pt_regs_r13(%rax), %r13
|
||||
movq pt_regs_r14(%rax), %r14
|
||||
movq pt_regs_r15(%rax), %r15
|
||||
|
||||
xorl %eax, %eax
|
||||
addq $8, %rsp
|
||||
|
|
|
@ -287,6 +287,20 @@ void disable_local_APIC(void)
|
|||
apic_write(APIC_SPIV, value);
|
||||
}
|
||||
|
||||
void lapic_shutdown(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!cpu_has_apic)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
disable_local_APIC();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is to verify that we're looking at a real local APIC.
|
||||
* Check these against your board if the CPUs aren't getting
|
||||
|
|
|
@ -76,6 +76,34 @@ int main(void)
|
|||
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
|
||||
DEFINE(pbe_next, offsetof(struct pbe, next));
|
||||
BLANK();
|
||||
#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
|
||||
ENTRY(rbx);
|
||||
ENTRY(rbx);
|
||||
ENTRY(rcx);
|
||||
ENTRY(rdx);
|
||||
ENTRY(rsp);
|
||||
ENTRY(rbp);
|
||||
ENTRY(rsi);
|
||||
ENTRY(rdi);
|
||||
ENTRY(r8);
|
||||
ENTRY(r9);
|
||||
ENTRY(r10);
|
||||
ENTRY(r11);
|
||||
ENTRY(r12);
|
||||
ENTRY(r13);
|
||||
ENTRY(r14);
|
||||
ENTRY(r15);
|
||||
ENTRY(eflags);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
|
||||
ENTRY(cr0);
|
||||
ENTRY(cr2);
|
||||
ENTRY(cr3);
|
||||
ENTRY(cr4);
|
||||
ENTRY(cr8);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
|
||||
BLANK();
|
||||
DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
|
||||
|
|
|
@ -2,19 +2,19 @@
|
|||
# Makefile for x86-compatible CPU details and quirks
|
||||
#
|
||||
|
||||
obj-y := common.o proc.o bugs.o
|
||||
obj-y := intel_cacheinfo.o addon_cpuid_features.o
|
||||
|
||||
obj-y += amd.o
|
||||
obj-y += cyrix.o
|
||||
obj-y += centaur.o
|
||||
obj-y += transmeta.o
|
||||
obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o
|
||||
obj-y += nexgen.o
|
||||
obj-y += umc.o
|
||||
obj-$(CONFIG_X86_32) += common.o proc.o bugs.o
|
||||
obj-$(CONFIG_X86_32) += amd.o
|
||||
obj-$(CONFIG_X86_32) += cyrix.o
|
||||
obj-$(CONFIG_X86_32) += centaur.o
|
||||
obj-$(CONFIG_X86_32) += transmeta.o
|
||||
obj-$(CONFIG_X86_32) += intel.o
|
||||
obj-$(CONFIG_X86_32) += nexgen.o
|
||||
obj-$(CONFIG_X86_32) += umc.o
|
||||
|
||||
obj-$(CONFIG_X86_MCE) += mcheck/
|
||||
|
||||
obj-$(CONFIG_MTRR) += mtrr/
|
||||
obj-$(CONFIG_CPU_FREQ) += cpufreq/
|
||||
obj-$(CONFIG_X86_MCE) += mcheck/
|
||||
obj-$(CONFIG_MTRR) += mtrr/
|
||||
obj-$(CONFIG_CPU_FREQ) += cpufreq/
|
||||
|
||||
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
|
||||
|
|
|
@ -1,2 +1,6 @@
|
|||
obj-y = mce.o k7.o p4.o p5.o p6.o winchip.o therm_throt.o
|
||||
obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
|
||||
obj-y = mce_$(BITS).o therm_throt.o
|
||||
|
||||
obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o
|
||||
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o
|
||||
obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
|
||||
obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
|
||||
|
|
|
@ -111,8 +111,7 @@ static void print_mce(struct mce *m)
|
|||
"CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
|
||||
m->cpu, m->mcgstatus, m->bank, m->status);
|
||||
if (m->rip) {
|
||||
printk(KERN_EMERG
|
||||
"RIP%s %02x:<%016Lx> ",
|
||||
printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
|
||||
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
|
||||
m->cs, m->rip);
|
||||
if (m->cs == __KERNEL_CS)
|
||||
|
@ -126,8 +125,8 @@ static void print_mce(struct mce *m)
|
|||
printk("MISC %Lx ", m->misc);
|
||||
printk("\n");
|
||||
printk(KERN_EMERG "This is not a software problem!\n");
|
||||
printk(KERN_EMERG
|
||||
"Run through mcelog --ascii to decode and contact your hardware vendor\n");
|
||||
printk(KERN_EMERG "Run through mcelog --ascii to decode "
|
||||
"and contact your hardware vendor\n");
|
||||
}
|
||||
|
||||
static void mce_panic(char *msg, struct mce *backup, unsigned long start)
|
||||
|
@ -137,6 +136,7 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
|
|||
oops_begin();
|
||||
for (i = 0; i < MCE_LOG_LEN; i++) {
|
||||
unsigned long tsc = mcelog.entry[i].tsc;
|
||||
|
||||
if (time_before(tsc, start))
|
||||
continue;
|
||||
print_mce(&mcelog.entry[i]);
|
||||
|
@ -173,7 +173,6 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
|
|||
/*
|
||||
* The actual machine check handler
|
||||
*/
|
||||
|
||||
void do_machine_check(struct pt_regs * regs, long error_code)
|
||||
{
|
||||
struct mce m, panicm;
|
||||
|
@ -194,7 +193,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
|
|||
atomic_inc(&mce_entry);
|
||||
|
||||
if (regs)
|
||||
notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL);
|
||||
notify_die(DIE_NMI, "machine check", regs, error_code, 18,
|
||||
SIGKILL);
|
||||
if (!banks)
|
||||
goto out2;
|
||||
|
||||
|
@ -372,7 +372,7 @@ static void mcheck_timer(struct work_struct *work)
|
|||
if (mce_notify_user()) {
|
||||
next_interval = max(next_interval/2, HZ/100);
|
||||
} else {
|
||||
next_interval = min(next_interval*2,
|
||||
next_interval = min(next_interval * 2,
|
||||
(int)round_jiffies_relative(check_interval*HZ));
|
||||
}
|
||||
|
||||
|
@ -556,10 +556,12 @@ static int mce_release(struct inode *inode, struct file *file)
|
|||
static void collect_tscs(void *data)
|
||||
{
|
||||
unsigned long *cpu_tsc = (unsigned long *)data;
|
||||
|
||||
rdtscll(cpu_tsc[smp_processor_id()]);
|
||||
}
|
||||
|
||||
static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off)
|
||||
static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
|
||||
loff_t *off)
|
||||
{
|
||||
unsigned long *cpu_tsc;
|
||||
static DECLARE_MUTEX(mce_read_sem);
|
||||
|
@ -584,6 +586,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
|
|||
err = 0;
|
||||
for (i = 0; i < next; i++) {
|
||||
unsigned long start = jiffies;
|
||||
|
||||
while (!mcelog.entry[i].finished) {
|
||||
if (time_after_eq(jiffies, start + 2)) {
|
||||
memset(mcelog.entry + i,0, sizeof(struct mce));
|
||||
|
@ -603,13 +606,16 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
|
|||
|
||||
synchronize_sched();
|
||||
|
||||
/* Collect entries that were still getting written before the synchronize. */
|
||||
|
||||
/*
|
||||
* Collect entries that were still getting written before the
|
||||
* synchronize.
|
||||
*/
|
||||
on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
|
||||
for (i = next; i < MCE_LOG_LEN; i++) {
|
||||
if (mcelog.entry[i].finished &&
|
||||
mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
|
||||
err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce));
|
||||
err |= copy_to_user(buf, mcelog.entry+i,
|
||||
sizeof(struct mce));
|
||||
smp_rmb();
|
||||
buf += sizeof(struct mce);
|
||||
memset(&mcelog.entry[i], 0, sizeof(struct mce));
|
||||
|
@ -628,9 +634,11 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg)
|
||||
static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
int __user *p = (int __user *)arg;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
switch (cmd) {
|
||||
|
@ -640,6 +648,7 @@ static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned
|
|||
return put_user(MCE_LOG_LEN, p);
|
||||
case MCE_GETCLEAR_FLAGS: {
|
||||
unsigned flags;
|
||||
|
||||
do {
|
||||
flags = mcelog.flags;
|
||||
} while (cmpxchg(&mcelog.flags, flags, 0) != flags);
|
||||
|
@ -681,7 +690,6 @@ void __init restart_mce(void)
|
|||
/*
|
||||
* Old style boot options parsing. Only for compatibility.
|
||||
*/
|
||||
|
||||
static int __init mcheck_disable(char *str)
|
||||
{
|
||||
mce_dont_init = 1;
|
||||
|
@ -744,17 +752,17 @@ DEFINE_PER_CPU(struct sys_device, device_mce);
|
|||
|
||||
/* Why are there no generic functions for this? */
|
||||
#define ACCESSOR(name, var, start) \
|
||||
static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
|
||||
return sprintf(buf, "%lx\n", (unsigned long)var); \
|
||||
} \
|
||||
static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
|
||||
return sprintf(buf, "%lx\n", (unsigned long)var); \
|
||||
} \
|
||||
static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
|
||||
char *end; \
|
||||
unsigned long new = simple_strtoul(buf, &end, 0); \
|
||||
if (end == buf) return -EINVAL; \
|
||||
var = new; \
|
||||
start; \
|
||||
return end-buf; \
|
||||
} \
|
||||
char *end; \
|
||||
unsigned long new = simple_strtoul(buf, &end, 0); \
|
||||
if (end == buf) return -EINVAL; \
|
||||
var = new; \
|
||||
start; \
|
||||
return end-buf; \
|
||||
} \
|
||||
static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
|
||||
|
||||
/* TBD should generate these dynamically based on number of available banks */
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Architecture specific (i386) functions for kexec based crash dumps.
|
||||
* Architecture specific (i386/x86_64) functions for kexec based crash dumps.
|
||||
*
|
||||
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
|
||||
*
|
||||
|
@ -25,8 +25,11 @@
|
|||
#include <linux/kdebug.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#ifdef X86_32
|
||||
#include <mach_ipi.h>
|
||||
|
||||
#else
|
||||
#include <asm/mach_apic.h>
|
||||
#endif
|
||||
|
||||
/* This keeps a track of which one is crashing cpu. */
|
||||
static int crashing_cpu;
|
||||
|
@ -38,7 +41,9 @@ static int crash_nmi_callback(struct notifier_block *self,
|
|||
unsigned long val, void *data)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
#ifdef X86_32
|
||||
struct pt_regs fixed_regs;
|
||||
#endif
|
||||
int cpu;
|
||||
|
||||
if (val != DIE_NMI_IPI)
|
||||
|
@ -55,10 +60,12 @@ static int crash_nmi_callback(struct notifier_block *self,
|
|||
return NOTIFY_STOP;
|
||||
local_irq_disable();
|
||||
|
||||
#ifdef X86_32
|
||||
if (!user_mode_vm(regs)) {
|
||||
crash_fixup_ss_esp(&fixed_regs, regs);
|
||||
regs = &fixed_regs;
|
||||
}
|
||||
#endif
|
||||
crash_save_cpu(regs, cpu);
|
||||
disable_local_APIC();
|
||||
atomic_dec(&waiting_for_crash_ipi);
|
|
@ -1,135 +0,0 @@
|
|||
/*
|
||||
* Architecture specific (x86_64) functions for kexec based crash dumps.
|
||||
*
|
||||
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2004. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/mach_apic.h>
|
||||
|
||||
/* This keeps a track of which one is crashing cpu. */
|
||||
static int crashing_cpu;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static atomic_t waiting_for_crash_ipi;
|
||||
|
||||
static int crash_nmi_callback(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
int cpu;
|
||||
|
||||
if (val != DIE_NMI_IPI)
|
||||
return NOTIFY_OK;
|
||||
|
||||
regs = ((struct die_args *)data)->regs;
|
||||
cpu = raw_smp_processor_id();
|
||||
|
||||
/*
|
||||
* Don't do anything if this handler is invoked on crashing cpu.
|
||||
* Otherwise, system will completely hang. Crashing cpu can get
|
||||
* an NMI if system was initially booted with nmi_watchdog parameter.
|
||||
*/
|
||||
if (cpu == crashing_cpu)
|
||||
return NOTIFY_STOP;
|
||||
local_irq_disable();
|
||||
|
||||
crash_save_cpu(regs, cpu);
|
||||
disable_local_APIC();
|
||||
atomic_dec(&waiting_for_crash_ipi);
|
||||
/* Assume hlt works */
|
||||
for(;;)
|
||||
halt();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void smp_send_nmi_allbutself(void)
|
||||
{
|
||||
send_IPI_allbutself(NMI_VECTOR);
|
||||
}
|
||||
|
||||
/*
|
||||
* This code is a best effort heuristic to get the
|
||||
* other cpus to stop executing. So races with
|
||||
* cpu hotplug shouldn't matter.
|
||||
*/
|
||||
|
||||
static struct notifier_block crash_nmi_nb = {
|
||||
.notifier_call = crash_nmi_callback,
|
||||
};
|
||||
|
||||
static void nmi_shootdown_cpus(void)
|
||||
{
|
||||
unsigned long msecs;
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
if (register_die_notifier(&crash_nmi_nb))
|
||||
return; /* return what? */
|
||||
|
||||
/*
|
||||
* Ensure the new callback function is set before sending
|
||||
* out the NMI
|
||||
*/
|
||||
wmb();
|
||||
|
||||
smp_send_nmi_allbutself();
|
||||
|
||||
msecs = 1000; /* Wait at most a second for the other cpus to stop */
|
||||
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
|
||||
mdelay(1);
|
||||
msecs--;
|
||||
}
|
||||
/* Leave the nmi callback set */
|
||||
disable_local_APIC();
|
||||
}
|
||||
#else
|
||||
static void nmi_shootdown_cpus(void)
|
||||
{
|
||||
/* There are no cpus to shootdown */
|
||||
}
|
||||
#endif
|
||||
|
||||
void machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* This function is only called after the system
|
||||
* has panicked or is otherwise in a critical state.
|
||||
* The minimum amount of code to allow a kexec'd kernel
|
||||
* to run successfully needs to happen here.
|
||||
*
|
||||
* In practice this means shooting down the other cpus in
|
||||
* an SMP system.
|
||||
*/
|
||||
/* The kernel is broken so disable interrupts */
|
||||
local_irq_disable();
|
||||
|
||||
/* Make a note of crashing cpu. Will be used in NMI callback.*/
|
||||
crashing_cpu = smp_processor_id();
|
||||
nmi_shootdown_cpus();
|
||||
|
||||
if(cpu_has_apic)
|
||||
disable_local_APIC();
|
||||
|
||||
disable_IO_APIC();
|
||||
|
||||
crash_save_cpu(regs, smp_processor_id());
|
||||
}
|
|
@ -124,12 +124,7 @@ ENTRY(startup_32)
|
|||
movsl
|
||||
movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi
|
||||
andl %esi,%esi
|
||||
jnz 2f # New command line protocol
|
||||
cmpw $(OLD_CL_MAGIC),OLD_CL_MAGIC_ADDR
|
||||
jne 1f
|
||||
movzwl OLD_CL_OFFSET,%esi
|
||||
addl $(OLD_CL_BASE_ADDR),%esi
|
||||
2:
|
||||
jz 1f # No comand line
|
||||
movl $(boot_command_line - __PAGE_OFFSET),%edi
|
||||
movl $(COMMAND_LINE_SIZE/4),%ecx
|
||||
rep
|
||||
|
|
|
@ -60,7 +60,8 @@ static enum {
|
|||
NONE_FORCE_HPET_RESUME,
|
||||
OLD_ICH_FORCE_HPET_RESUME,
|
||||
ICH_FORCE_HPET_RESUME,
|
||||
VT8237_FORCE_HPET_RESUME
|
||||
VT8237_FORCE_HPET_RESUME,
|
||||
NVIDIA_FORCE_HPET_RESUME,
|
||||
} force_hpet_resume_type;
|
||||
|
||||
static void __iomem *rcba_base;
|
||||
|
@ -321,6 +322,55 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
|
|||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
|
||||
vt8237_force_enable_hpet);
|
||||
|
||||
/*
|
||||
* Undocumented chipset feature taken from LinuxBIOS.
|
||||
*/
|
||||
static void nvidia_force_hpet_resume(void)
|
||||
{
|
||||
pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
|
||||
printk(KERN_DEBUG "Force enabled HPET at resume\n");
|
||||
}
|
||||
|
||||
static void nvidia_force_enable_hpet(struct pci_dev *dev)
|
||||
{
|
||||
u32 uninitialized_var(val);
|
||||
|
||||
if (!hpet_force_user || hpet_address || force_hpet_address)
|
||||
return;
|
||||
|
||||
pci_write_config_dword(dev, 0x44, 0xfed00001);
|
||||
pci_read_config_dword(dev, 0x44, &val);
|
||||
force_hpet_address = val & 0xfffffffe;
|
||||
force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
|
||||
printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
|
||||
force_hpet_address);
|
||||
cached_dev = dev;
|
||||
return;
|
||||
}
|
||||
|
||||
/* ISA Bridges */
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
|
||||
nvidia_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
|
||||
nvidia_force_enable_hpet);
|
||||
|
||||
/* LPC bridges */
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
|
||||
nvidia_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
|
||||
nvidia_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
|
||||
nvidia_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
|
||||
nvidia_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
|
||||
nvidia_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
|
||||
nvidia_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
|
||||
nvidia_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
|
||||
nvidia_force_enable_hpet);
|
||||
|
||||
void force_hpet_resume(void)
|
||||
{
|
||||
|
@ -334,6 +384,9 @@ void force_hpet_resume(void)
|
|||
case VT8237_FORCE_HPET_RESUME:
|
||||
return vt8237_force_hpet_resume();
|
||||
|
||||
case NVIDIA_FORCE_HPET_RESUME:
|
||||
return nvidia_force_hpet_resume();
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -388,7 +388,7 @@ static void inquire_remote_apic(int apicid)
|
|||
|
||||
printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
|
||||
|
||||
for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(regs); i++) {
|
||||
printk("... APIC #%d %s: ", apicid, names[i]);
|
||||
|
||||
/*
|
||||
|
|
|
@ -19,12 +19,6 @@ extern const void __nosave_begin, __nosave_end;
|
|||
|
||||
struct saved_context saved_context;
|
||||
|
||||
unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
|
||||
unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
|
||||
unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
|
||||
unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
|
||||
unsigned long saved_context_eflags;
|
||||
|
||||
void __save_processor_state(struct saved_context *ctxt)
|
||||
{
|
||||
kernel_fpu_begin();
|
||||
|
|
|
@ -17,24 +17,24 @@
|
|||
#include <asm/asm-offsets.h>
|
||||
|
||||
ENTRY(swsusp_arch_suspend)
|
||||
|
||||
movq %rsp, saved_context_esp(%rip)
|
||||
movq %rax, saved_context_eax(%rip)
|
||||
movq %rbx, saved_context_ebx(%rip)
|
||||
movq %rcx, saved_context_ecx(%rip)
|
||||
movq %rdx, saved_context_edx(%rip)
|
||||
movq %rbp, saved_context_ebp(%rip)
|
||||
movq %rsi, saved_context_esi(%rip)
|
||||
movq %rdi, saved_context_edi(%rip)
|
||||
movq %r8, saved_context_r08(%rip)
|
||||
movq %r9, saved_context_r09(%rip)
|
||||
movq %r10, saved_context_r10(%rip)
|
||||
movq %r11, saved_context_r11(%rip)
|
||||
movq %r12, saved_context_r12(%rip)
|
||||
movq %r13, saved_context_r13(%rip)
|
||||
movq %r14, saved_context_r14(%rip)
|
||||
movq %r15, saved_context_r15(%rip)
|
||||
pushfq ; popq saved_context_eflags(%rip)
|
||||
movq $saved_context, %rax
|
||||
movq %rsp, pt_regs_rsp(%rax)
|
||||
movq %rbp, pt_regs_rbp(%rax)
|
||||
movq %rsi, pt_regs_rsi(%rax)
|
||||
movq %rdi, pt_regs_rdi(%rax)
|
||||
movq %rbx, pt_regs_rbx(%rax)
|
||||
movq %rcx, pt_regs_rcx(%rax)
|
||||
movq %rdx, pt_regs_rdx(%rax)
|
||||
movq %r8, pt_regs_r8(%rax)
|
||||
movq %r9, pt_regs_r9(%rax)
|
||||
movq %r10, pt_regs_r10(%rax)
|
||||
movq %r11, pt_regs_r11(%rax)
|
||||
movq %r12, pt_regs_r12(%rax)
|
||||
movq %r13, pt_regs_r13(%rax)
|
||||
movq %r14, pt_regs_r14(%rax)
|
||||
movq %r15, pt_regs_r15(%rax)
|
||||
pushfq
|
||||
popq pt_regs_eflags(%rax)
|
||||
|
||||
/* save the address of restore_registers */
|
||||
movq $restore_registers, %rax
|
||||
|
@ -113,23 +113,25 @@ ENTRY(restore_registers)
|
|||
movq %rcx, %cr3
|
||||
movq %rax, %cr4; # turn PGE back on
|
||||
|
||||
movq saved_context_esp(%rip), %rsp
|
||||
movq saved_context_ebp(%rip), %rbp
|
||||
/* restore GPRs (we don't restore %rax, it must be 0 anyway) */
|
||||
movq saved_context_ebx(%rip), %rbx
|
||||
movq saved_context_ecx(%rip), %rcx
|
||||
movq saved_context_edx(%rip), %rdx
|
||||
movq saved_context_esi(%rip), %rsi
|
||||
movq saved_context_edi(%rip), %rdi
|
||||
movq saved_context_r08(%rip), %r8
|
||||
movq saved_context_r09(%rip), %r9
|
||||
movq saved_context_r10(%rip), %r10
|
||||
movq saved_context_r11(%rip), %r11
|
||||
movq saved_context_r12(%rip), %r12
|
||||
movq saved_context_r13(%rip), %r13
|
||||
movq saved_context_r14(%rip), %r14
|
||||
movq saved_context_r15(%rip), %r15
|
||||
pushq saved_context_eflags(%rip) ; popfq
|
||||
/* We don't restore %rax, it must be 0 anyway */
|
||||
movq $saved_context, %rax
|
||||
movq pt_regs_rsp(%rax), %rsp
|
||||
movq pt_regs_rbp(%rax), %rbp
|
||||
movq pt_regs_rsi(%rax), %rsi
|
||||
movq pt_regs_rdi(%rax), %rdi
|
||||
movq pt_regs_rbx(%rax), %rbx
|
||||
movq pt_regs_rcx(%rax), %rcx
|
||||
movq pt_regs_rdx(%rax), %rdx
|
||||
movq pt_regs_r8(%rax), %r8
|
||||
movq pt_regs_r9(%rax), %r9
|
||||
movq pt_regs_r10(%rax), %r10
|
||||
movq pt_regs_r11(%rax), %r11
|
||||
movq pt_regs_r12(%rax), %r12
|
||||
movq pt_regs_r13(%rax), %r13
|
||||
movq pt_regs_r14(%rax), %r14
|
||||
movq pt_regs_r15(%rax), %r15
|
||||
pushq pt_regs_eflags(%rax)
|
||||
popfq
|
||||
|
||||
xorq %rax, %rax
|
||||
|
||||
|
|
|
@ -131,38 +131,43 @@ unsigned long native_calculate_cpu_khz(void)
|
|||
{
|
||||
unsigned long long start, end;
|
||||
unsigned long count;
|
||||
u64 delta64;
|
||||
u64 delta64 = (u64)ULLONG_MAX;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* run 3 times to ensure the cache is warm */
|
||||
/* run 3 times to ensure the cache is warm and to get an accurate reading */
|
||||
for (i = 0; i < 3; i++) {
|
||||
mach_prepare_counter();
|
||||
rdtscll(start);
|
||||
mach_countup(&count);
|
||||
rdtscll(end);
|
||||
|
||||
/*
|
||||
* Error: ECTCNEVERSET
|
||||
* The CTC wasn't reliable: we got a hit on the very first read,
|
||||
* or the CPU was so fast/slow that the quotient wouldn't fit in
|
||||
* 32 bits..
|
||||
*/
|
||||
if (count <= 1)
|
||||
continue;
|
||||
|
||||
/* cpu freq too slow: */
|
||||
if ((end - start) <= CALIBRATE_TIME_MSEC)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We want the minimum time of all runs in case one of them
|
||||
* is inaccurate due to SMI or other delay
|
||||
*/
|
||||
delta64 = min(delta64, (end - start));
|
||||
}
|
||||
/*
|
||||
* Error: ECTCNEVERSET
|
||||
* The CTC wasn't reliable: we got a hit on the very first read,
|
||||
* or the CPU was so fast/slow that the quotient wouldn't fit in
|
||||
* 32 bits..
|
||||
*/
|
||||
if (count <= 1)
|
||||
goto err;
|
||||
|
||||
delta64 = end - start;
|
||||
|
||||
/* cpu freq too fast: */
|
||||
/* cpu freq too fast (or every run was bad): */
|
||||
if (delta64 > (1ULL<<32))
|
||||
goto err;
|
||||
|
||||
/* cpu freq too slow: */
|
||||
if (delta64 <= CALIBRATE_TIME_MSEC)
|
||||
goto err;
|
||||
|
||||
delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
|
||||
do_div(delta64,CALIBRATE_TIME_MSEC);
|
||||
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
config PROFILING
|
||||
bool "Profiling support (EXPERIMENTAL)"
|
||||
help
|
||||
Say Y here to enable the extended profiling support mechanisms used
|
||||
by profilers such as OProfile.
|
||||
|
||||
|
||||
config OPROFILE
|
||||
tristate "OProfile system profiling (EXPERIMENTAL)"
|
||||
depends on PROFILING
|
||||
help
|
||||
OProfile is a profiling system capable of profiling the
|
||||
whole system, include the kernel, kernel modules, libraries,
|
||||
and applications.
|
||||
|
||||
If unsure, say N.
|
||||
|
|
@ -833,6 +833,8 @@ source "drivers/firmware/Kconfig"
|
|||
|
||||
source fs/Kconfig
|
||||
|
||||
source "kernel/Kconfig.instrumentation"
|
||||
|
||||
source "arch/x86_64/Kconfig.debug"
|
||||
|
||||
source "security/Kconfig"
|
||||
|
|
|
@ -24,6 +24,12 @@
|
|||
# Fill in SRCARCH
|
||||
SRCARCH := x86
|
||||
|
||||
# BITS is used as extension for files which are available in a 32 bit
|
||||
# and a 64 bit version to simplify shared Makefiles.
|
||||
# e.g.: obj-y += foo_$(BITS).o
|
||||
BITS := 64
|
||||
export BITS
|
||||
|
||||
LDFLAGS := -m elf_x86_64
|
||||
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
|
||||
LDFLAGS_vmlinux :=
|
||||
|
|
|
@ -11,41 +11,16 @@ header-y += sigcontext32.h
|
|||
header-y += ucontext.h
|
||||
header-y += vsyscall32.h
|
||||
|
||||
unifdef-y += a.out_32.h
|
||||
unifdef-y += a.out_64.h
|
||||
unifdef-y += byteorder_32.h
|
||||
unifdef-y += byteorder_64.h
|
||||
unifdef-y += e820.h
|
||||
unifdef-y += elf_32.h
|
||||
unifdef-y += elf_64.h
|
||||
unifdef-y += ist.h
|
||||
unifdef-y += mce.h
|
||||
unifdef-y += msgbuf_32.h
|
||||
unifdef-y += msgbuf_64.h
|
||||
unifdef-y += msr_32.h
|
||||
unifdef-y += msr_64.h
|
||||
unifdef-y += msr.h
|
||||
unifdef-y += mtrr_32.h
|
||||
unifdef-y += mtrr_64.h
|
||||
unifdef-y += mtrr.h
|
||||
unifdef-y += page_32.h
|
||||
unifdef-y += page_64.h
|
||||
unifdef-y += posix_types_32.h
|
||||
unifdef-y += posix_types_64.h
|
||||
unifdef-y += ptrace_32.h
|
||||
unifdef-y += ptrace_64.h
|
||||
unifdef-y += setup_32.h
|
||||
unifdef-y += setup_64.h
|
||||
unifdef-y += shmbuf_32.h
|
||||
unifdef-y += shmbuf_64.h
|
||||
unifdef-y += sigcontext_32.h
|
||||
unifdef-y += sigcontext_64.h
|
||||
unifdef-y += signal_32.h
|
||||
unifdef-y += signal_64.h
|
||||
unifdef-y += stat_32.h
|
||||
unifdef-y += stat_64.h
|
||||
unifdef-y += statfs_32.h
|
||||
unifdef-y += statfs_64.h
|
||||
unifdef-y += ptrace.h
|
||||
unifdef-y += unistd_32.h
|
||||
unifdef-y += unistd_64.h
|
||||
unifdef-y += user_32.h
|
||||
|
|
|
@ -1,13 +1,30 @@
|
|||
#ifndef _ASM_X86_A_OUT_H
|
||||
#define _ASM_X86_A_OUT_H
|
||||
|
||||
struct exec
|
||||
{
|
||||
unsigned int a_info; /* Use macros N_MAGIC, etc for access */
|
||||
unsigned a_text; /* length of text, in bytes */
|
||||
unsigned a_data; /* length of data, in bytes */
|
||||
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
|
||||
unsigned a_syms; /* length of symbol table data in file, in bytes */
|
||||
unsigned a_entry; /* start address */
|
||||
unsigned a_trsize; /* length of relocation info for text, in bytes */
|
||||
unsigned a_drsize; /* length of relocation info for data, in bytes */
|
||||
};
|
||||
|
||||
#define N_TRSIZE(a) ((a).a_trsize)
|
||||
#define N_DRSIZE(a) ((a).a_drsize)
|
||||
#define N_SYMSIZE(a) ((a).a_syms)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# include <linux/thread_info.h>
|
||||
# define STACK_TOP TASK_SIZE
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "a.out_32.h"
|
||||
# define STACK_TOP_MAX STACK_TOP
|
||||
# else
|
||||
# include "a.out_64.h"
|
||||
# endif
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "a.out_32.h"
|
||||
# else
|
||||
# include "a.out_64.h"
|
||||
# define STACK_TOP_MAX TASK_SIZE64
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_A_OUT_H */
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
#ifndef __I386_A_OUT_H__
|
||||
#define __I386_A_OUT_H__
|
||||
|
||||
struct exec
|
||||
{
|
||||
unsigned long a_info; /* Use macros N_MAGIC, etc for access */
|
||||
unsigned a_text; /* length of text, in bytes */
|
||||
unsigned a_data; /* length of data, in bytes */
|
||||
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
|
||||
unsigned a_syms; /* length of symbol table data in file, in bytes */
|
||||
unsigned a_entry; /* start address */
|
||||
unsigned a_trsize; /* length of relocation info for text, in bytes */
|
||||
unsigned a_drsize; /* length of relocation info for data, in bytes */
|
||||
};
|
||||
|
||||
#define N_TRSIZE(a) ((a).a_trsize)
|
||||
#define N_DRSIZE(a) ((a).a_drsize)
|
||||
#define N_SYMSIZE(a) ((a).a_syms)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define STACK_TOP TASK_SIZE
|
||||
#define STACK_TOP_MAX STACK_TOP
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __A_OUT_GNU_H__ */
|
|
@ -1,28 +0,0 @@
|
|||
#ifndef __X8664_A_OUT_H__
|
||||
#define __X8664_A_OUT_H__
|
||||
|
||||
/* 32bit a.out */
|
||||
|
||||
struct exec
|
||||
{
|
||||
unsigned int a_info; /* Use macros N_MAGIC, etc for access */
|
||||
unsigned a_text; /* length of text, in bytes */
|
||||
unsigned a_data; /* length of data, in bytes */
|
||||
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
|
||||
unsigned a_syms; /* length of symbol table data in file, in bytes */
|
||||
unsigned a_entry; /* start address */
|
||||
unsigned a_trsize; /* length of relocation info for text, in bytes */
|
||||
unsigned a_drsize; /* length of relocation info for data, in bytes */
|
||||
};
|
||||
|
||||
#define N_TRSIZE(a) ((a).a_trsize)
|
||||
#define N_DRSIZE(a) ((a).a_drsize)
|
||||
#define N_SYMSIZE(a) ((a).a_syms)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/thread_info.h>
|
||||
#define STACK_TOP TASK_SIZE
|
||||
#define STACK_TOP_MAX TASK_SIZE64
|
||||
#endif
|
||||
|
||||
#endif /* __A_OUT_GNU_H__ */
|
|
@ -69,6 +69,7 @@ extern void clear_local_APIC (void);
|
|||
extern void connect_bsp_APIC (void);
|
||||
extern void disconnect_bsp_APIC (int virt_wire_setup);
|
||||
extern void disable_local_APIC (void);
|
||||
extern void lapic_shutdown (void);
|
||||
extern int verify_local_APIC (void);
|
||||
extern void cache_APIC_registers (void);
|
||||
extern void sync_Arb_IDs (void);
|
||||
|
|
|
@ -80,6 +80,20 @@ static inline void clear_bit(int nr, volatile unsigned long * addr)
|
|||
:"Ir" (nr));
|
||||
}
|
||||
|
||||
/*
|
||||
* clear_bit_unlock - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* clear_bit() is atomic and implies release semantics before the memory
|
||||
* operation. It can be used for an unlock.
|
||||
*/
|
||||
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
barrier();
|
||||
clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long * addr)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
|
@ -87,6 +101,25 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr)
|
|||
:"+m" (ADDR)
|
||||
:"Ir" (nr));
|
||||
}
|
||||
|
||||
/*
|
||||
* __clear_bit_unlock - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* __clear_bit() is non-atomic and implies release semantics before the memory
|
||||
* operation. It can be used for an unlock if no other CPUs can concurrently
|
||||
* modify other bits in the word.
|
||||
*
|
||||
* No memory barrier is required here, because x86 cannot reorder stores past
|
||||
* older loads. Same principle as spin_unlock.
|
||||
*/
|
||||
static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
barrier();
|
||||
__clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
#define smp_mb__before_clear_bit() barrier()
|
||||
#define smp_mb__after_clear_bit() barrier()
|
||||
|
||||
|
@ -145,6 +178,15 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
|
|||
return oldbit;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_set_bit_lock - Set a bit and return its old value for lock
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This is the same as test_and_set_bit on x86
|
||||
*/
|
||||
#define test_and_set_bit_lock test_and_set_bit
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
|
@ -406,7 +448,6 @@ static inline int fls(int x)
|
|||
}
|
||||
|
||||
#include <asm-generic/bitops/hweight.h>
|
||||
#include <asm-generic/bitops/lock.h>
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
|
|
@ -72,6 +72,20 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
|
|||
:"dIr" (nr));
|
||||
}
|
||||
|
||||
/*
|
||||
* clear_bit_unlock - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* clear_bit() is atomic and implies release semantics before the memory
|
||||
* operation. It can be used for an unlock.
|
||||
*/
|
||||
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
barrier();
|
||||
clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static __inline__ void __clear_bit(int nr, volatile void * addr)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
|
@ -80,6 +94,24 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
|
|||
:"dIr" (nr));
|
||||
}
|
||||
|
||||
/*
|
||||
* __clear_bit_unlock - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* __clear_bit() is non-atomic and implies release semantics before the memory
|
||||
* operation. It can be used for an unlock if no other CPUs can concurrently
|
||||
* modify other bits in the word.
|
||||
*
|
||||
* No memory barrier is required here, because x86 cannot reorder stores past
|
||||
* older loads. Same principle as spin_unlock.
|
||||
*/
|
||||
static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
barrier();
|
||||
__clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
#define smp_mb__before_clear_bit() barrier()
|
||||
#define smp_mb__after_clear_bit() barrier()
|
||||
|
||||
|
@ -136,6 +168,15 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
|
|||
return oldbit;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_set_bit_lock - Set a bit and return its old value for lock
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This is the same as test_and_set_bit on x86
|
||||
*/
|
||||
#define test_and_set_bit_lock test_and_set_bit
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
|
@ -412,7 +453,6 @@ static __inline__ int fls(int x)
|
|||
#define ARCH_HAS_FAST_MULTIPLIER 1
|
||||
|
||||
#include <asm-generic/bitops/hweight.h>
|
||||
#include <asm-generic/bitops/lock.h>
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
|
|
@ -1,13 +1,72 @@
|
|||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "byteorder_32.h"
|
||||
# else
|
||||
# include "byteorder_64.h"
|
||||
# endif
|
||||
#ifndef _ASM_X86_BYTEORDER_H
|
||||
#define _ASM_X86_BYTEORDER_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
||||
{
|
||||
#ifdef CONFIG_X86_BSWAP
|
||||
__asm__("bswap %0" : "=r" (x) : "0" (x));
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "byteorder_32.h"
|
||||
# else
|
||||
# include "byteorder_64.h"
|
||||
# endif
|
||||
__asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
|
||||
"rorl $16,%0\n\t" /* swap words */
|
||||
"xchgb %b0,%h0" /* swap higher bytes */
|
||||
:"=q" (x)
|
||||
: "0" (x));
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
|
||||
{
|
||||
union {
|
||||
struct { __u32 a,b; } s;
|
||||
__u64 u;
|
||||
} v;
|
||||
v.u = val;
|
||||
#ifdef CONFIG_X86_BSWAP
|
||||
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
|
||||
: "=r" (v.s.a), "=r" (v.s.b)
|
||||
: "0" (v.s.a), "1" (v.s.b));
|
||||
#else
|
||||
v.s.a = ___arch__swab32(v.s.a);
|
||||
v.s.b = ___arch__swab32(v.s.b);
|
||||
asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
|
||||
#endif
|
||||
return v.u;
|
||||
}
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
|
||||
{
|
||||
__asm__("bswapq %0" : "=r" (x) : "0" (x));
|
||||
return x;
|
||||
}
|
||||
|
||||
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
||||
{
|
||||
__asm__("bswapl %0" : "=r" (x) : "0" (x));
|
||||
return x;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Do not define swab16. Gcc is smart enough to recognize "C" version and
|
||||
convert it into rotation or exhange. */
|
||||
|
||||
#define __arch__swab64(x) ___arch__swab64(x)
|
||||
#define __arch__swab32(x) ___arch__swab32(x)
|
||||
|
||||
#define __BYTEORDER_HAS_U64__
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#include <linux/byteorder/little_endian.h>
|
||||
|
||||
#endif /* _ASM_X86_BYTEORDER_H */
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
#ifndef _I386_BYTEORDER_H
|
||||
#define _I386_BYTEORDER_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
/* For avoiding bswap on i386 */
|
||||
#ifdef __KERNEL__
|
||||
#endif
|
||||
|
||||
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
||||
{
|
||||
#ifdef CONFIG_X86_BSWAP
|
||||
__asm__("bswap %0" : "=r" (x) : "0" (x));
|
||||
#else
|
||||
__asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
|
||||
"rorl $16,%0\n\t" /* swap words */
|
||||
"xchgb %b0,%h0" /* swap higher bytes */
|
||||
:"=q" (x)
|
||||
: "0" (x));
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
|
||||
{
|
||||
union {
|
||||
struct { __u32 a,b; } s;
|
||||
__u64 u;
|
||||
} v;
|
||||
v.u = val;
|
||||
#ifdef CONFIG_X86_BSWAP
|
||||
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
|
||||
: "=r" (v.s.a), "=r" (v.s.b)
|
||||
: "0" (v.s.a), "1" (v.s.b));
|
||||
#else
|
||||
v.s.a = ___arch__swab32(v.s.a);
|
||||
v.s.b = ___arch__swab32(v.s.b);
|
||||
asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
|
||||
#endif
|
||||
return v.u;
|
||||
}
|
||||
|
||||
/* Do not define swab16. Gcc is smart enough to recognize "C" version and
|
||||
convert it into rotation or exhange. */
|
||||
|
||||
#define __arch__swab64(x) ___arch__swab64(x)
|
||||
#define __arch__swab32(x) ___arch__swab32(x)
|
||||
|
||||
#define __BYTEORDER_HAS_U64__
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#include <linux/byteorder/little_endian.h>
|
||||
|
||||
#endif /* _I386_BYTEORDER_H */
|
|
@ -1,33 +0,0 @@
|
|||
#ifndef _X86_64_BYTEORDER_H
|
||||
#define _X86_64_BYTEORDER_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
|
||||
{
|
||||
__asm__("bswapq %0" : "=r" (x) : "0" (x));
|
||||
return x;
|
||||
}
|
||||
|
||||
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
||||
{
|
||||
__asm__("bswapl %0" : "=r" (x) : "0" (x));
|
||||
return x;
|
||||
}
|
||||
|
||||
/* Do not define swab16. Gcc is smart enough to recognize "C" version and
|
||||
convert it into rotation or exhange. */
|
||||
|
||||
#define __arch__swab32(x) ___arch__swab32(x)
|
||||
#define __arch__swab64(x) ___arch__swab64(x)
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#define __BYTEORDER_HAS_U64__
|
||||
|
||||
#include <linux/byteorder/little_endian.h>
|
||||
|
||||
#endif /* _X86_64_BYTEORDER_H */
|
|
@ -1,5 +1,59 @@
|
|||
#ifndef _ASM_X86_DIV64_H
|
||||
#define _ASM_X86_DIV64_H
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "div64_32.h"
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* do_div() is NOT a C function. It wants to return
|
||||
* two values (the quotient and the remainder), but
|
||||
* since that doesn't work very well in C, what it
|
||||
* does is:
|
||||
*
|
||||
* - modifies the 64-bit dividend _in_place_
|
||||
* - returns the 32-bit remainder
|
||||
*
|
||||
* This ends up being the most efficient "calling
|
||||
* convention" on x86.
|
||||
*/
|
||||
#define do_div(n,base) ({ \
|
||||
unsigned long __upper, __low, __high, __mod, __base; \
|
||||
__base = (base); \
|
||||
asm("":"=a" (__low), "=d" (__high):"A" (n)); \
|
||||
__upper = __high; \
|
||||
if (__high) { \
|
||||
__upper = __high % (__base); \
|
||||
__high = __high / (__base); \
|
||||
} \
|
||||
asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
|
||||
asm("":"=A" (n):"a" (__low),"d" (__high)); \
|
||||
__mod; \
|
||||
})
|
||||
|
||||
/*
|
||||
* (long)X = ((long long)divs) / (long)div
|
||||
* (long)rem = ((long long)divs) % (long)div
|
||||
*
|
||||
* Warning, this will do an exception if X overflows.
|
||||
*/
|
||||
#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
|
||||
|
||||
static inline long
|
||||
div_ll_X_l_rem(long long divs, long div, long *rem)
|
||||
{
|
||||
long dum2;
|
||||
__asm__("divl %2":"=a"(dum2), "=d"(*rem)
|
||||
: "rm"(div), "A"(divs));
|
||||
|
||||
return dum2;
|
||||
|
||||
}
|
||||
|
||||
extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
|
||||
|
||||
#else
|
||||
# include "div64_64.h"
|
||||
#endif
|
||||
# include <asm-generic/div64.h>
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#endif /* _ASM_X86_DIV64_H */
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
#ifndef __I386_DIV64
|
||||
#define __I386_DIV64
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* do_div() is NOT a C function. It wants to return
|
||||
* two values (the quotient and the remainder), but
|
||||
* since that doesn't work very well in C, what it
|
||||
* does is:
|
||||
*
|
||||
* - modifies the 64-bit dividend _in_place_
|
||||
* - returns the 32-bit remainder
|
||||
*
|
||||
* This ends up being the most efficient "calling
|
||||
* convention" on x86.
|
||||
*/
|
||||
#define do_div(n,base) ({ \
|
||||
unsigned long __upper, __low, __high, __mod, __base; \
|
||||
__base = (base); \
|
||||
asm("":"=a" (__low), "=d" (__high):"A" (n)); \
|
||||
__upper = __high; \
|
||||
if (__high) { \
|
||||
__upper = __high % (__base); \
|
||||
__high = __high / (__base); \
|
||||
} \
|
||||
asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
|
||||
asm("":"=A" (n):"a" (__low),"d" (__high)); \
|
||||
__mod; \
|
||||
})
|
||||
|
||||
/*
|
||||
* (long)X = ((long long)divs) / (long)div
|
||||
* (long)rem = ((long long)divs) % (long)div
|
||||
*
|
||||
* Warning, this will do an exception if X overflows.
|
||||
*/
|
||||
#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
|
||||
|
||||
static inline long
|
||||
div_ll_X_l_rem(long long divs, long div, long *rem)
|
||||
{
|
||||
long dum2;
|
||||
__asm__("divl %2":"=a"(dum2), "=d"(*rem)
|
||||
: "rm"(div), "A"(divs));
|
||||
|
||||
return dum2;
|
||||
|
||||
}
|
||||
|
||||
extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
|
||||
#endif
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/div64.h>
|
|
@ -1,13 +1,290 @@
|
|||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "elf_32.h"
|
||||
# else
|
||||
# include "elf_64.h"
|
||||
# endif
|
||||
#ifndef _ASM_X86_ELF_H
|
||||
#define _ASM_X86_ELF_H
|
||||
|
||||
/*
|
||||
* ELF register definitions..
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/user.h>
|
||||
#include <asm/auxvec.h>
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
|
||||
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
typedef struct user_i387_struct elf_fpregset_t;
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
typedef struct user_fxsr_struct elf_fpxregset_t;
|
||||
|
||||
#define R_386_NONE 0
|
||||
#define R_386_32 1
|
||||
#define R_386_PC32 2
|
||||
#define R_386_GOT32 3
|
||||
#define R_386_PLT32 4
|
||||
#define R_386_COPY 5
|
||||
#define R_386_GLOB_DAT 6
|
||||
#define R_386_JMP_SLOT 7
|
||||
#define R_386_RELATIVE 8
|
||||
#define R_386_GOTOFF 9
|
||||
#define R_386_GOTPC 10
|
||||
#define R_386_NUM 11
|
||||
|
||||
/*
|
||||
* These are used to set parameters in the core dumps.
|
||||
*/
|
||||
#define ELF_CLASS ELFCLASS32
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
#define ELF_ARCH EM_386
|
||||
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "elf_32.h"
|
||||
# else
|
||||
# include "elf_64.h"
|
||||
# endif
|
||||
|
||||
/* x86-64 relocation types */
|
||||
#define R_X86_64_NONE 0 /* No reloc */
|
||||
#define R_X86_64_64 1 /* Direct 64 bit */
|
||||
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
|
||||
#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
|
||||
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
|
||||
#define R_X86_64_COPY 5 /* Copy symbol at runtime */
|
||||
#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
|
||||
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
|
||||
#define R_X86_64_RELATIVE 8 /* Adjust by program base */
|
||||
#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
|
||||
offset to GOT */
|
||||
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
|
||||
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
|
||||
#define R_X86_64_16 12 /* Direct 16 bit zero extended */
|
||||
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
|
||||
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
|
||||
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
|
||||
|
||||
#define R_X86_64_NUM 16
|
||||
|
||||
/*
|
||||
* These are used to set parameters in the core dumps.
|
||||
*/
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
#define ELF_ARCH EM_X86_64
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h> /* for savesegment */
|
||||
#include <asm/desc.h>
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) \
|
||||
(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
|
||||
|
||||
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
|
||||
contains a pointer to a function which might be registered using `atexit'.
|
||||
This provides a mean for the dynamic linker to call DT_FINI functions for
|
||||
shared libraries that have been loaded before the code runs.
|
||||
|
||||
A value of 0 tells we have no such handler.
|
||||
|
||||
We might as well make sure everything else is cleared too (except for %esp),
|
||||
just to make things more deterministic.
|
||||
*/
|
||||
#define ELF_PLAT_INIT(_r, load_addr) do { \
|
||||
_r->ebx = 0; _r->ecx = 0; _r->edx = 0; \
|
||||
_r->esi = 0; _r->edi = 0; _r->ebp = 0; \
|
||||
_r->eax = 0; \
|
||||
} while (0)
|
||||
|
||||
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
|
||||
now struct_user_regs, they are different) */
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
|
||||
pr_reg[0] = regs->ebx; \
|
||||
pr_reg[1] = regs->ecx; \
|
||||
pr_reg[2] = regs->edx; \
|
||||
pr_reg[3] = regs->esi; \
|
||||
pr_reg[4] = regs->edi; \
|
||||
pr_reg[5] = regs->ebp; \
|
||||
pr_reg[6] = regs->eax; \
|
||||
pr_reg[7] = regs->xds & 0xffff; \
|
||||
pr_reg[8] = regs->xes & 0xffff; \
|
||||
pr_reg[9] = regs->xfs & 0xffff; \
|
||||
savesegment(gs,pr_reg[10]); \
|
||||
pr_reg[11] = regs->orig_eax; \
|
||||
pr_reg[12] = regs->eip; \
|
||||
pr_reg[13] = regs->xcs & 0xffff; \
|
||||
pr_reg[14] = regs->eflags; \
|
||||
pr_reg[15] = regs->esp; \
|
||||
pr_reg[16] = regs->xss & 0xffff;
|
||||
|
||||
#define ELF_PLATFORM (utsname()->machine)
|
||||
#define set_personality_64bit() do { } while (0)
|
||||
extern unsigned int vdso_enabled;
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) \
|
||||
((x)->e_machine == EM_X86_64)
|
||||
|
||||
#define ELF_PLAT_INIT(_r, load_addr) do { \
|
||||
struct task_struct *cur = current; \
|
||||
(_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
|
||||
(_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
|
||||
(_r)->rax = 0; \
|
||||
(_r)->r8 = 0; \
|
||||
(_r)->r9 = 0; \
|
||||
(_r)->r10 = 0; \
|
||||
(_r)->r11 = 0; \
|
||||
(_r)->r12 = 0; \
|
||||
(_r)->r13 = 0; \
|
||||
(_r)->r14 = 0; \
|
||||
(_r)->r15 = 0; \
|
||||
cur->thread.fs = 0; cur->thread.gs = 0; \
|
||||
cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
|
||||
cur->thread.ds = 0; cur->thread.es = 0; \
|
||||
clear_thread_flag(TIF_IA32); \
|
||||
} while (0)
|
||||
|
||||
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
|
||||
now struct_user_regs, they are different). Assumes current is the process
|
||||
getting dumped. */
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
|
||||
unsigned v; \
|
||||
(pr_reg)[0] = (regs)->r15; \
|
||||
(pr_reg)[1] = (regs)->r14; \
|
||||
(pr_reg)[2] = (regs)->r13; \
|
||||
(pr_reg)[3] = (regs)->r12; \
|
||||
(pr_reg)[4] = (regs)->rbp; \
|
||||
(pr_reg)[5] = (regs)->rbx; \
|
||||
(pr_reg)[6] = (regs)->r11; \
|
||||
(pr_reg)[7] = (regs)->r10; \
|
||||
(pr_reg)[8] = (regs)->r9; \
|
||||
(pr_reg)[9] = (regs)->r8; \
|
||||
(pr_reg)[10] = (regs)->rax; \
|
||||
(pr_reg)[11] = (regs)->rcx; \
|
||||
(pr_reg)[12] = (regs)->rdx; \
|
||||
(pr_reg)[13] = (regs)->rsi; \
|
||||
(pr_reg)[14] = (regs)->rdi; \
|
||||
(pr_reg)[15] = (regs)->orig_rax; \
|
||||
(pr_reg)[16] = (regs)->rip; \
|
||||
(pr_reg)[17] = (regs)->cs; \
|
||||
(pr_reg)[18] = (regs)->eflags; \
|
||||
(pr_reg)[19] = (regs)->rsp; \
|
||||
(pr_reg)[20] = (regs)->ss; \
|
||||
(pr_reg)[21] = current->thread.fs; \
|
||||
(pr_reg)[22] = current->thread.gs; \
|
||||
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
|
||||
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
|
||||
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
|
||||
asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
|
||||
} while(0);
|
||||
|
||||
/* I'm not sure if we can use '-' here */
|
||||
#define ELF_PLATFORM ("x86_64")
|
||||
extern void set_personality_64bit(void);
|
||||
extern int vdso_enabled;
|
||||
|
||||
#endif /* !CONFIG_X86_32 */
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. This could be done in user space,
|
||||
but it's not easy, and we've already done it here. */
|
||||
|
||||
#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
|
||||
|
||||
/* This yields a string that ld.so will use to load implementation
|
||||
specific libraries for optimization. This is more specific in
|
||||
intent than poking at uname or /proc/cpuinfo.
|
||||
|
||||
For the moment, we have only optimizations for the Intel generations,
|
||||
but that could change... */
|
||||
|
||||
#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
|
||||
|
||||
/*
|
||||
* An executable for which elf_read_implies_exec() returns TRUE will
|
||||
* have the READ_IMPLIES_EXEC personality flag set automatically.
|
||||
*/
|
||||
#define elf_read_implies_exec(ex, executable_stack) \
|
||||
(executable_stack != EXSTACK_DISABLE_X)
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
|
||||
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
|
||||
|
||||
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
|
||||
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern int dump_task_extended_fpu (struct task_struct *,
|
||||
struct user_fxsr_struct *);
|
||||
#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) \
|
||||
dump_task_extended_fpu(tsk, elf_xfpregs)
|
||||
#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
|
||||
|
||||
#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
|
||||
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
|
||||
#define VDSO_PRELINK 0
|
||||
|
||||
#define VDSO_SYM(x) \
|
||||
(VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK)
|
||||
|
||||
#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
|
||||
#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE)
|
||||
|
||||
extern void __kernel_vsyscall;
|
||||
|
||||
#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
|
||||
|
||||
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
|
||||
|
||||
#define ARCH_DLINFO \
|
||||
do if (vdso_enabled) { \
|
||||
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
|
||||
} while (0)
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
/* 1GB for 64bit, 8MB for 32bit */
|
||||
#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
|
||||
|
||||
#define ARCH_DLINFO \
|
||||
do if (vdso_enabled) { \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
|
||||
} while (0)
|
||||
|
||||
#endif /* !CONFIG_X86_32 */
|
||||
|
||||
struct linux_binprm;
|
||||
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int executable_stack);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,165 +0,0 @@
|
|||
#ifndef __ASMi386_ELF_H
|
||||
#define __ASMi386_ELF_H
|
||||
|
||||
/*
|
||||
* ELF register definitions..
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/user.h>
|
||||
#include <asm/auxvec.h>
|
||||
|
||||
#define R_386_NONE 0
|
||||
#define R_386_32 1
|
||||
#define R_386_PC32 2
|
||||
#define R_386_GOT32 3
|
||||
#define R_386_PLT32 4
|
||||
#define R_386_COPY 5
|
||||
#define R_386_GLOB_DAT 6
|
||||
#define R_386_JMP_SLOT 7
|
||||
#define R_386_RELATIVE 8
|
||||
#define R_386_GOTOFF 9
|
||||
#define R_386_GOTPC 10
|
||||
#define R_386_NUM 11
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
|
||||
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
typedef struct user_i387_struct elf_fpregset_t;
|
||||
typedef struct user_fxsr_struct elf_fpxregset_t;
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) \
|
||||
(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
|
||||
|
||||
/*
|
||||
* These are used to set parameters in the core dumps.
|
||||
*/
|
||||
#define ELF_CLASS ELFCLASS32
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
#define ELF_ARCH EM_386
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h> /* for savesegment */
|
||||
#include <asm/desc.h>
|
||||
|
||||
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
|
||||
contains a pointer to a function which might be registered using `atexit'.
|
||||
This provides a mean for the dynamic linker to call DT_FINI functions for
|
||||
shared libraries that have been loaded before the code runs.
|
||||
|
||||
A value of 0 tells we have no such handler.
|
||||
|
||||
We might as well make sure everything else is cleared too (except for %esp),
|
||||
just to make things more deterministic.
|
||||
*/
|
||||
#define ELF_PLAT_INIT(_r, load_addr) do { \
|
||||
_r->ebx = 0; _r->ecx = 0; _r->edx = 0; \
|
||||
_r->esi = 0; _r->edi = 0; _r->ebp = 0; \
|
||||
_r->eax = 0; \
|
||||
} while (0)
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
|
||||
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
|
||||
now struct_user_regs, they are different) */
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
|
||||
pr_reg[0] = regs->ebx; \
|
||||
pr_reg[1] = regs->ecx; \
|
||||
pr_reg[2] = regs->edx; \
|
||||
pr_reg[3] = regs->esi; \
|
||||
pr_reg[4] = regs->edi; \
|
||||
pr_reg[5] = regs->ebp; \
|
||||
pr_reg[6] = regs->eax; \
|
||||
pr_reg[7] = regs->xds & 0xffff; \
|
||||
pr_reg[8] = regs->xes & 0xffff; \
|
||||
pr_reg[9] = regs->xfs & 0xffff; \
|
||||
savesegment(gs,pr_reg[10]); \
|
||||
pr_reg[11] = regs->orig_eax; \
|
||||
pr_reg[12] = regs->eip; \
|
||||
pr_reg[13] = regs->xcs & 0xffff; \
|
||||
pr_reg[14] = regs->eflags; \
|
||||
pr_reg[15] = regs->esp; \
|
||||
pr_reg[16] = regs->xss & 0xffff;
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. This could be done in user space,
|
||||
but it's not easy, and we've already done it here. */
|
||||
|
||||
#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
|
||||
|
||||
/* This yields a string that ld.so will use to load implementation
|
||||
specific libraries for optimization. This is more specific in
|
||||
intent than poking at uname or /proc/cpuinfo.
|
||||
|
||||
For the moment, we have only optimizations for the Intel generations,
|
||||
but that could change... */
|
||||
|
||||
#define ELF_PLATFORM (utsname()->machine)
|
||||
|
||||
#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
|
||||
|
||||
/*
|
||||
* An executable for which elf_read_implies_exec() returns TRUE will
|
||||
* have the READ_IMPLIES_EXEC personality flag set automatically.
|
||||
*/
|
||||
#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
|
||||
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
|
||||
extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *);
|
||||
|
||||
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
|
||||
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
|
||||
#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
|
||||
#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
|
||||
|
||||
#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
|
||||
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
|
||||
#define VDSO_PRELINK 0
|
||||
|
||||
#define VDSO_SYM(x) \
|
||||
(VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK)
|
||||
|
||||
#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
|
||||
#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE)
|
||||
|
||||
extern void __kernel_vsyscall;
|
||||
|
||||
#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
|
||||
|
||||
struct linux_binprm;
|
||||
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int executable_stack);
|
||||
|
||||
extern unsigned int vdso_enabled;
|
||||
|
||||
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
|
||||
#define ARCH_DLINFO \
|
||||
do if (vdso_enabled) { \
|
||||
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,180 +0,0 @@
|
|||
#ifndef __ASM_X86_64_ELF_H
|
||||
#define __ASM_X86_64_ELF_H
|
||||
|
||||
/*
|
||||
* ELF register definitions..
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/user.h>
|
||||
|
||||
/* x86-64 relocation types */
|
||||
#define R_X86_64_NONE 0 /* No reloc */
|
||||
#define R_X86_64_64 1 /* Direct 64 bit */
|
||||
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
|
||||
#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
|
||||
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
|
||||
#define R_X86_64_COPY 5 /* Copy symbol at runtime */
|
||||
#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
|
||||
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
|
||||
#define R_X86_64_RELATIVE 8 /* Adjust by program base */
|
||||
#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
|
||||
offset to GOT */
|
||||
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
|
||||
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
|
||||
#define R_X86_64_16 12 /* Direct 16 bit zero extended */
|
||||
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
|
||||
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
|
||||
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
|
||||
|
||||
#define R_X86_64_NUM 16
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
|
||||
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
typedef struct user_i387_struct elf_fpregset_t;
|
||||
|
||||
/*
|
||||
* These are used to set parameters in the core dumps.
|
||||
*/
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
#define ELF_ARCH EM_X86_64
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) \
|
||||
((x)->e_machine == EM_X86_64)
|
||||
|
||||
|
||||
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
|
||||
contains a pointer to a function which might be registered using `atexit'.
|
||||
This provides a mean for the dynamic linker to call DT_FINI functions for
|
||||
shared libraries that have been loaded before the code runs.
|
||||
|
||||
A value of 0 tells we have no such handler.
|
||||
|
||||
We might as well make sure everything else is cleared too (except for %esp),
|
||||
just to make things more deterministic.
|
||||
*/
|
||||
#define ELF_PLAT_INIT(_r, load_addr) do { \
|
||||
struct task_struct *cur = current; \
|
||||
(_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
|
||||
(_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
|
||||
(_r)->rax = 0; \
|
||||
(_r)->r8 = 0; \
|
||||
(_r)->r9 = 0; \
|
||||
(_r)->r10 = 0; \
|
||||
(_r)->r11 = 0; \
|
||||
(_r)->r12 = 0; \
|
||||
(_r)->r13 = 0; \
|
||||
(_r)->r14 = 0; \
|
||||
(_r)->r15 = 0; \
|
||||
cur->thread.fs = 0; cur->thread.gs = 0; \
|
||||
cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
|
||||
cur->thread.ds = 0; cur->thread.es = 0; \
|
||||
clear_thread_flag(TIF_IA32); \
|
||||
} while (0)
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
|
||||
|
||||
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
|
||||
now struct_user_regs, they are different). Assumes current is the process
|
||||
getting dumped. */
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
|
||||
unsigned v; \
|
||||
(pr_reg)[0] = (regs)->r15; \
|
||||
(pr_reg)[1] = (regs)->r14; \
|
||||
(pr_reg)[2] = (regs)->r13; \
|
||||
(pr_reg)[3] = (regs)->r12; \
|
||||
(pr_reg)[4] = (regs)->rbp; \
|
||||
(pr_reg)[5] = (regs)->rbx; \
|
||||
(pr_reg)[6] = (regs)->r11; \
|
||||
(pr_reg)[7] = (regs)->r10; \
|
||||
(pr_reg)[8] = (regs)->r9; \
|
||||
(pr_reg)[9] = (regs)->r8; \
|
||||
(pr_reg)[10] = (regs)->rax; \
|
||||
(pr_reg)[11] = (regs)->rcx; \
|
||||
(pr_reg)[12] = (regs)->rdx; \
|
||||
(pr_reg)[13] = (regs)->rsi; \
|
||||
(pr_reg)[14] = (regs)->rdi; \
|
||||
(pr_reg)[15] = (regs)->orig_rax; \
|
||||
(pr_reg)[16] = (regs)->rip; \
|
||||
(pr_reg)[17] = (regs)->cs; \
|
||||
(pr_reg)[18] = (regs)->eflags; \
|
||||
(pr_reg)[19] = (regs)->rsp; \
|
||||
(pr_reg)[20] = (regs)->ss; \
|
||||
(pr_reg)[21] = current->thread.fs; \
|
||||
(pr_reg)[22] = current->thread.gs; \
|
||||
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
|
||||
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
|
||||
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
|
||||
asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
|
||||
} while(0);
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. This could be done in user space,
|
||||
but it's not easy, and we've already done it here. */
|
||||
|
||||
#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
|
||||
|
||||
/* This yields a string that ld.so will use to load implementation
|
||||
specific libraries for optimization. This is more specific in
|
||||
intent than poking at uname or /proc/cpuinfo.
|
||||
|
||||
For the moment, we have only optimizations for the Intel generations,
|
||||
but that could change... */
|
||||
|
||||
/* I'm not sure if we can use '-' here */
|
||||
#define ELF_PLATFORM ("x86_64")
|
||||
|
||||
extern void set_personality_64bit(void);
|
||||
#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
|
||||
/*
|
||||
* An executable for which elf_read_implies_exec() returns TRUE will
|
||||
* have the READ_IMPLIES_EXEC personality flag set automatically.
|
||||
*/
|
||||
#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
|
||||
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
|
||||
|
||||
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
|
||||
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
|
||||
|
||||
/* 1GB for 64bit, 8MB for 32bit */
|
||||
#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
|
||||
|
||||
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
||||
struct linux_binprm;
|
||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int executable_stack);
|
||||
|
||||
extern int vdso_enabled;
|
||||
|
||||
#define ARCH_DLINFO \
|
||||
do if (vdso_enabled) { \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,5 +1,23 @@
|
|||
#ifdef CONFIG_X86_32
|
||||
# include "mmu_32.h"
|
||||
#else
|
||||
# include "mmu_64.h"
|
||||
#ifndef _ASM_X86_MMU_H
|
||||
#define _ASM_X86_MMU_H
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
/*
|
||||
* The x86 doesn't have a mmu context, but
|
||||
* we put the segment information here.
|
||||
*
|
||||
* cpu_vm_mask is used to optimize ldt flushing.
|
||||
*/
|
||||
typedef struct {
|
||||
void *ldt;
|
||||
#ifdef CONFIG_X86_64
|
||||
rwlock_t ldtlock;
|
||||
#endif
|
||||
int size;
|
||||
struct mutex lock;
|
||||
void *vdso;
|
||||
} mm_context_t;
|
||||
|
||||
#endif /* _ASM_X86_MMU_H */
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
#ifndef __i386_MMU_H
|
||||
#define __i386_MMU_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
/*
|
||||
* The i386 doesn't have a mmu context, but
|
||||
* we put the segment information here.
|
||||
*
|
||||
* cpu_vm_mask is used to optimize ldt flushing.
|
||||
*/
|
||||
typedef struct {
|
||||
int size;
|
||||
struct mutex lock;
|
||||
void *ldt;
|
||||
void *vdso;
|
||||
} mm_context_t;
|
||||
|
||||
#endif
|
|
@ -1,21 +0,0 @@
|
|||
#ifndef __x86_64_MMU_H
|
||||
#define __x86_64_MMU_H
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
/*
|
||||
* The x86_64 doesn't have a mmu context, but
|
||||
* we put the segment information here.
|
||||
*
|
||||
* cpu_vm_mask is used to optimize ldt flushing.
|
||||
*/
|
||||
typedef struct {
|
||||
void *ldt;
|
||||
rwlock_t ldtlock;
|
||||
int size;
|
||||
struct mutex lock;
|
||||
void *vdso;
|
||||
} mm_context_t;
|
||||
|
||||
#endif
|
|
@ -1,13 +1,39 @@
|
|||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "msgbuf_32.h"
|
||||
# else
|
||||
# include "msgbuf_64.h"
|
||||
# endif
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "msgbuf_32.h"
|
||||
# else
|
||||
# include "msgbuf_64.h"
|
||||
# endif
|
||||
#ifndef _ASM_X86_MSGBUF_H
|
||||
#define _ASM_X86_MSGBUF_H
|
||||
|
||||
/*
|
||||
* The msqid64_ds structure for i386 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space on i386 is left for:
|
||||
* - 64-bit time_t to solve y2038 problem
|
||||
* - 2 miscellaneous 32-bit values
|
||||
*
|
||||
* Pad space on x8664 is left for:
|
||||
* - 2 miscellaneous 64-bit values
|
||||
*/
|
||||
struct msqid64_ds {
|
||||
struct ipc64_perm msg_perm;
|
||||
__kernel_time_t msg_stime; /* last msgsnd time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused1;
|
||||
#endif
|
||||
__kernel_time_t msg_rtime; /* last msgrcv time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused2;
|
||||
#endif
|
||||
__kernel_time_t msg_ctime; /* last change time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused3;
|
||||
#endif
|
||||
unsigned long msg_cbytes; /* current number of bytes on queue */
|
||||
unsigned long msg_qnum; /* number of messages in queue */
|
||||
unsigned long msg_qbytes; /* max number of bytes on queue */
|
||||
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
|
||||
__kernel_pid_t msg_lrpid; /* last receive pid */
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_MSGBUF_H */
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
#ifndef _I386_MSGBUF_H
|
||||
#define _I386_MSGBUF_H
|
||||
|
||||
/*
|
||||
* The msqid64_ds structure for i386 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space is left for:
|
||||
* - 64-bit time_t to solve y2038 problem
|
||||
* - 2 miscellaneous 32-bit values
|
||||
*/
|
||||
|
||||
struct msqid64_ds {
|
||||
struct ipc64_perm msg_perm;
|
||||
__kernel_time_t msg_stime; /* last msgsnd time */
|
||||
unsigned long __unused1;
|
||||
__kernel_time_t msg_rtime; /* last msgrcv time */
|
||||
unsigned long __unused2;
|
||||
__kernel_time_t msg_ctime; /* last change time */
|
||||
unsigned long __unused3;
|
||||
unsigned long msg_cbytes; /* current number of bytes on queue */
|
||||
unsigned long msg_qnum; /* number of messages in queue */
|
||||
unsigned long msg_qbytes; /* max number of bytes on queue */
|
||||
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
|
||||
__kernel_pid_t msg_lrpid; /* last receive pid */
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
#endif /* _I386_MSGBUF_H */
|
|
@ -1,27 +0,0 @@
|
|||
#ifndef _X8664_MSGBUF_H
|
||||
#define _X8664_MSGBUF_H
|
||||
|
||||
/*
|
||||
* The msqid64_ds structure for x86-64 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space is left for:
|
||||
* - 2 miscellaneous 64-bit values
|
||||
*/
|
||||
|
||||
struct msqid64_ds {
|
||||
struct ipc64_perm msg_perm;
|
||||
__kernel_time_t msg_stime; /* last msgsnd time */
|
||||
__kernel_time_t msg_rtime; /* last msgrcv time */
|
||||
__kernel_time_t msg_ctime; /* last change time */
|
||||
unsigned long msg_cbytes; /* current number of bytes on queue */
|
||||
unsigned long msg_qnum; /* number of messages in queue */
|
||||
unsigned long msg_qbytes; /* max number of bytes on queue */
|
||||
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
|
||||
__kernel_pid_t msg_lrpid; /* last receive pid */
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -1,13 +1,350 @@
|
|||
#ifndef __ASM_X86_MSR_H_
|
||||
#define __ASM_X86_MSR_H_
|
||||
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "msr_32.h"
|
||||
# else
|
||||
# include "msr_64.h"
|
||||
# endif
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/errno.h>
|
||||
|
||||
static inline unsigned long long native_read_msr(unsigned int msr)
|
||||
{
|
||||
unsigned long long val;
|
||||
|
||||
asm volatile("rdmsr" : "=A" (val) : "c" (msr));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
||||
int *err)
|
||||
{
|
||||
unsigned long long val;
|
||||
|
||||
asm volatile("2: rdmsr ; xorl %0,%0\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: movl %3,%0 ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .align 4\n\t"
|
||||
" .long 2b,3b\n\t"
|
||||
".previous"
|
||||
: "=r" (*err), "=A" (val)
|
||||
: "c" (msr), "i" (-EFAULT));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_msr(unsigned int msr, unsigned long long val)
|
||||
{
|
||||
asm volatile("wrmsr" : : "c" (msr), "A"(val));
|
||||
}
|
||||
|
||||
static inline int native_write_msr_safe(unsigned int msr,
|
||||
unsigned long long val)
|
||||
{
|
||||
int err;
|
||||
asm volatile("2: wrmsr ; xorl %0,%0\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: movl %4,%0 ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .align 4\n\t"
|
||||
" .long 2b,3b\n\t"
|
||||
".previous"
|
||||
: "=a" (err)
|
||||
: "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
|
||||
"i" (-EFAULT));
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_tsc(void)
|
||||
{
|
||||
unsigned long long val;
|
||||
asm volatile("rdtsc" : "=A" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_pmc(void)
|
||||
{
|
||||
unsigned long long val;
|
||||
asm volatile("rdpmc" : "=A" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "msr_32.h"
|
||||
# else
|
||||
# include "msr_64.h"
|
||||
# endif
|
||||
#include <linux/errno.h>
|
||||
/*
|
||||
* Access to machine-specific registers (available on 586 and better only)
|
||||
* Note: the rd* operations modify the parameters directly (without using
|
||||
* pointer indirection), this allows gcc to optimize better
|
||||
*/
|
||||
|
||||
#define rdmsr(msr,val1,val2) \
|
||||
do { \
|
||||
u64 __val = native_read_msr(msr); \
|
||||
(val1) = (u32)__val; \
|
||||
(val2) = (u32)(__val >> 32); \
|
||||
} while(0)
|
||||
|
||||
static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
|
||||
{
|
||||
native_write_msr(__msr, ((u64)__high << 32) | __low);
|
||||
}
|
||||
|
||||
#define rdmsrl(msr,val) \
|
||||
((val) = native_read_msr(msr))
|
||||
|
||||
#define wrmsrl(msr,val) native_write_msr(msr, val)
|
||||
|
||||
/* wrmsr with exception handling */
|
||||
static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
|
||||
{
|
||||
return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
|
||||
}
|
||||
|
||||
/* rdmsr with exception handling */
|
||||
#define rdmsr_safe(msr,p1,p2) \
|
||||
({ \
|
||||
int __err; \
|
||||
u64 __val = native_read_msr_safe(msr, &__err); \
|
||||
(*p1) = (u32)__val; \
|
||||
(*p2) = (u32)(__val >> 32); \
|
||||
__err; \
|
||||
})
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)native_read_tsc())
|
||||
|
||||
#define rdtscll(val) \
|
||||
((val) = native_read_tsc())
|
||||
|
||||
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
|
||||
|
||||
#define rdpmc(counter,low,high) \
|
||||
do { \
|
||||
u64 _l = native_read_pmc(); \
|
||||
(low) = (u32)_l; \
|
||||
(high) = (u32)(_l >> 32); \
|
||||
} while(0)
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
rdmsr(msr_no, *l, *h);
|
||||
}
|
||||
static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
wrmsr(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
return rdmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* ! __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/errno.h>
|
||||
/*
|
||||
* Access to machine-specific registers (available on 586 and better only)
|
||||
* Note: the rd* operations modify the parameters directly (without using
|
||||
* pointer indirection), this allows gcc to optimize better
|
||||
*/
|
||||
|
||||
#define rdmsr(msr,val1,val2) \
|
||||
__asm__ __volatile__("rdmsr" \
|
||||
: "=a" (val1), "=d" (val2) \
|
||||
: "c" (msr))
|
||||
|
||||
|
||||
#define rdmsrl(msr,val) do { unsigned long a__,b__; \
|
||||
__asm__ __volatile__("rdmsr" \
|
||||
: "=a" (a__), "=d" (b__) \
|
||||
: "c" (msr)); \
|
||||
val = a__ | (b__<<32); \
|
||||
} while(0)
|
||||
|
||||
#define wrmsr(msr,val1,val2) \
|
||||
__asm__ __volatile__("wrmsr" \
|
||||
: /* no outputs */ \
|
||||
: "c" (msr), "a" (val1), "d" (val2))
|
||||
|
||||
#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
|
||||
|
||||
/* wrmsr with exception handling */
|
||||
#define wrmsr_safe(msr,a,b) ({ int ret__; \
|
||||
asm volatile("2: wrmsr ; xorl %0,%0\n" \
|
||||
"1:\n\t" \
|
||||
".section .fixup,\"ax\"\n\t" \
|
||||
"3: movl %4,%0 ; jmp 1b\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .align 8\n\t" \
|
||||
" .quad 2b,3b\n\t" \
|
||||
".previous" \
|
||||
: "=a" (ret__) \
|
||||
: "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
|
||||
ret__; })
|
||||
|
||||
#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
|
||||
|
||||
#define rdmsr_safe(msr,a,b) \
|
||||
({ int ret__; \
|
||||
asm volatile ("1: rdmsr\n" \
|
||||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3: movl %4,%0\n" \
|
||||
" jmp 2b\n" \
|
||||
".previous\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .align 8\n" \
|
||||
" .quad 1b,3b\n" \
|
||||
".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
|
||||
:"c"(msr), "i"(-EIO), "0"(0)); \
|
||||
ret__; })
|
||||
|
||||
#define rdtsc(low,high) \
|
||||
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
|
||||
|
||||
#define rdtscl(low) \
|
||||
__asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
|
||||
|
||||
#define rdtscp(low,high,aux) \
|
||||
asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
|
||||
|
||||
#define rdtscll(val) do { \
|
||||
unsigned int __a,__d; \
|
||||
asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
|
||||
(val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
|
||||
} while(0)
|
||||
|
||||
#define rdtscpll(val, aux) do { \
|
||||
unsigned long __a, __d; \
|
||||
asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
|
||||
(val) = (__d << 32) | __a; \
|
||||
} while (0)
|
||||
|
||||
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
|
||||
|
||||
#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
|
||||
|
||||
#define rdpmc(counter,low,high) \
|
||||
__asm__ __volatile__("rdpmc" \
|
||||
: "=a" (low), "=d" (high) \
|
||||
: "c" (counter))
|
||||
|
||||
static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx)
|
||||
{
|
||||
__asm__("cpuid"
|
||||
: "=a" (*eax),
|
||||
"=b" (*ebx),
|
||||
"=c" (*ecx),
|
||||
"=d" (*edx)
|
||||
: "0" (op));
|
||||
}
|
||||
|
||||
/* Some CPUID calls want 'count' to be placed in ecx */
|
||||
static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
|
||||
int *edx)
|
||||
{
|
||||
__asm__("cpuid"
|
||||
: "=a" (*eax),
|
||||
"=b" (*ebx),
|
||||
"=c" (*ecx),
|
||||
"=d" (*edx)
|
||||
: "0" (op), "c" (count));
|
||||
}
|
||||
|
||||
/*
|
||||
* CPUID functions returning a single datum
|
||||
*/
|
||||
static inline unsigned int cpuid_eax(unsigned int op)
|
||||
{
|
||||
unsigned int eax;
|
||||
|
||||
__asm__("cpuid"
|
||||
: "=a" (eax)
|
||||
: "0" (op)
|
||||
: "bx", "cx", "dx");
|
||||
return eax;
|
||||
}
|
||||
static inline unsigned int cpuid_ebx(unsigned int op)
|
||||
{
|
||||
unsigned int eax, ebx;
|
||||
|
||||
__asm__("cpuid"
|
||||
: "=a" (eax), "=b" (ebx)
|
||||
: "0" (op)
|
||||
: "cx", "dx" );
|
||||
return ebx;
|
||||
}
|
||||
static inline unsigned int cpuid_ecx(unsigned int op)
|
||||
{
|
||||
unsigned int eax, ecx;
|
||||
|
||||
__asm__("cpuid"
|
||||
: "=a" (eax), "=c" (ecx)
|
||||
: "0" (op)
|
||||
: "bx", "dx" );
|
||||
return ecx;
|
||||
}
|
||||
static inline unsigned int cpuid_edx(unsigned int op)
|
||||
{
|
||||
unsigned int eax, edx;
|
||||
|
||||
__asm__("cpuid"
|
||||
: "=a" (eax), "=d" (edx)
|
||||
: "0" (op)
|
||||
: "bx", "cx");
|
||||
return edx;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
rdmsr(msr_no, *l, *h);
|
||||
}
|
||||
static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
wrmsr(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
return rdmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* !__i386__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,161 +0,0 @@
|
|||
#ifndef __ASM_MSR_H
|
||||
#define __ASM_MSR_H
|
||||
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/errno.h>
|
||||
|
||||
static inline unsigned long long native_read_msr(unsigned int msr)
|
||||
{
|
||||
unsigned long long val;
|
||||
|
||||
asm volatile("rdmsr" : "=A" (val) : "c" (msr));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
||||
int *err)
|
||||
{
|
||||
unsigned long long val;
|
||||
|
||||
asm volatile("2: rdmsr ; xorl %0,%0\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: movl %3,%0 ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .align 4\n\t"
|
||||
" .long 2b,3b\n\t"
|
||||
".previous"
|
||||
: "=r" (*err), "=A" (val)
|
||||
: "c" (msr), "i" (-EFAULT));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_msr(unsigned int msr, unsigned long long val)
|
||||
{
|
||||
asm volatile("wrmsr" : : "c" (msr), "A"(val));
|
||||
}
|
||||
|
||||
static inline int native_write_msr_safe(unsigned int msr,
|
||||
unsigned long long val)
|
||||
{
|
||||
int err;
|
||||
asm volatile("2: wrmsr ; xorl %0,%0\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: movl %4,%0 ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .align 4\n\t"
|
||||
" .long 2b,3b\n\t"
|
||||
".previous"
|
||||
: "=a" (err)
|
||||
: "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
|
||||
"i" (-EFAULT));
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_tsc(void)
|
||||
{
|
||||
unsigned long long val;
|
||||
asm volatile("rdtsc" : "=A" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_pmc(void)
|
||||
{
|
||||
unsigned long long val;
|
||||
asm volatile("rdpmc" : "=A" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#include <linux/errno.h>
|
||||
/*
|
||||
* Access to machine-specific registers (available on 586 and better only)
|
||||
* Note: the rd* operations modify the parameters directly (without using
|
||||
* pointer indirection), this allows gcc to optimize better
|
||||
*/
|
||||
|
||||
#define rdmsr(msr,val1,val2) \
|
||||
do { \
|
||||
u64 __val = native_read_msr(msr); \
|
||||
(val1) = (u32)__val; \
|
||||
(val2) = (u32)(__val >> 32); \
|
||||
} while(0)
|
||||
|
||||
static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
|
||||
{
|
||||
native_write_msr(__msr, ((u64)__high << 32) | __low);
|
||||
}
|
||||
|
||||
#define rdmsrl(msr,val) \
|
||||
((val) = native_read_msr(msr))
|
||||
|
||||
#define wrmsrl(msr,val) native_write_msr(msr, val)
|
||||
|
||||
/* wrmsr with exception handling */
|
||||
static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
|
||||
{
|
||||
return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
|
||||
}
|
||||
|
||||
/* rdmsr with exception handling */
|
||||
#define rdmsr_safe(msr,p1,p2) \
|
||||
({ \
|
||||
int __err; \
|
||||
u64 __val = native_read_msr_safe(msr, &__err); \
|
||||
(*p1) = (u32)__val; \
|
||||
(*p2) = (u32)(__val >> 32); \
|
||||
__err; \
|
||||
})
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)native_read_tsc())
|
||||
|
||||
#define rdtscll(val) \
|
||||
((val) = native_read_tsc())
|
||||
|
||||
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
|
||||
|
||||
#define rdpmc(counter,low,high) \
|
||||
do { \
|
||||
u64 _l = native_read_pmc(); \
|
||||
(low) = (u32)_l; \
|
||||
(high) = (u32)(_l >> 32); \
|
||||
} while(0)
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
rdmsr(msr_no, *l, *h);
|
||||
}
|
||||
static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
wrmsr(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
return rdmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __ASM_MSR_H */
|
|
@ -1,187 +0,0 @@
|
|||
#ifndef X86_64_MSR_H
|
||||
#define X86_64_MSR_H 1
|
||||
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/errno.h>
|
||||
/*
|
||||
* Access to machine-specific registers (available on 586 and better only)
|
||||
* Note: the rd* operations modify the parameters directly (without using
|
||||
* pointer indirection), this allows gcc to optimize better
|
||||
*/
|
||||
|
||||
#define rdmsr(msr,val1,val2) \
|
||||
__asm__ __volatile__("rdmsr" \
|
||||
: "=a" (val1), "=d" (val2) \
|
||||
: "c" (msr))
|
||||
|
||||
|
||||
#define rdmsrl(msr,val) do { unsigned long a__,b__; \
|
||||
__asm__ __volatile__("rdmsr" \
|
||||
: "=a" (a__), "=d" (b__) \
|
||||
: "c" (msr)); \
|
||||
val = a__ | (b__<<32); \
|
||||
} while(0)
|
||||
|
||||
#define wrmsr(msr,val1,val2) \
|
||||
__asm__ __volatile__("wrmsr" \
|
||||
: /* no outputs */ \
|
||||
: "c" (msr), "a" (val1), "d" (val2))
|
||||
|
||||
#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
|
||||
|
||||
/* wrmsr with exception handling */
|
||||
#define wrmsr_safe(msr,a,b) ({ int ret__; \
|
||||
asm volatile("2: wrmsr ; xorl %0,%0\n" \
|
||||
"1:\n\t" \
|
||||
".section .fixup,\"ax\"\n\t" \
|
||||
"3: movl %4,%0 ; jmp 1b\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .align 8\n\t" \
|
||||
" .quad 2b,3b\n\t" \
|
||||
".previous" \
|
||||
: "=a" (ret__) \
|
||||
: "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
|
||||
ret__; })
|
||||
|
||||
#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
|
||||
|
||||
#define rdmsr_safe(msr,a,b) \
|
||||
({ int ret__; \
|
||||
asm volatile ("1: rdmsr\n" \
|
||||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3: movl %4,%0\n" \
|
||||
" jmp 2b\n" \
|
||||
".previous\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .align 8\n" \
|
||||
" .quad 1b,3b\n" \
|
||||
".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
|
||||
:"c"(msr), "i"(-EIO), "0"(0)); \
|
||||
ret__; })
|
||||
|
||||
#define rdtsc(low,high) \
|
||||
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
|
||||
|
||||
#define rdtscl(low) \
|
||||
__asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
|
||||
|
||||
#define rdtscp(low,high,aux) \
|
||||
asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
|
||||
|
||||
#define rdtscll(val) do { \
|
||||
unsigned int __a,__d; \
|
||||
asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
|
||||
(val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
|
||||
} while(0)
|
||||
|
||||
#define rdtscpll(val, aux) do { \
|
||||
unsigned long __a, __d; \
|
||||
asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
|
||||
(val) = (__d << 32) | __a; \
|
||||
} while (0)
|
||||
|
||||
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
|
||||
|
||||
#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
|
||||
|
||||
#define rdpmc(counter,low,high) \
|
||||
__asm__ __volatile__("rdpmc" \
|
||||
: "=a" (low), "=d" (high) \
|
||||
: "c" (counter))
|
||||
|
||||
static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx)
|
||||
{
|
||||
__asm__("cpuid"
|
||||
: "=a" (*eax),
|
||||
"=b" (*ebx),
|
||||
"=c" (*ecx),
|
||||
"=d" (*edx)
|
||||
: "0" (op));
|
||||
}
|
||||
|
||||
/* Some CPUID calls want 'count' to be placed in ecx */
|
||||
static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
|
||||
int *edx)
|
||||
{
|
||||
__asm__("cpuid"
|
||||
: "=a" (*eax),
|
||||
"=b" (*ebx),
|
||||
"=c" (*ecx),
|
||||
"=d" (*edx)
|
||||
: "0" (op), "c" (count));
|
||||
}
|
||||
|
||||
/*
|
||||
* CPUID functions returning a single datum
|
||||
*/
|
||||
static inline unsigned int cpuid_eax(unsigned int op)
|
||||
{
|
||||
unsigned int eax;
|
||||
|
||||
__asm__("cpuid"
|
||||
: "=a" (eax)
|
||||
: "0" (op)
|
||||
: "bx", "cx", "dx");
|
||||
return eax;
|
||||
}
|
||||
static inline unsigned int cpuid_ebx(unsigned int op)
|
||||
{
|
||||
unsigned int eax, ebx;
|
||||
|
||||
__asm__("cpuid"
|
||||
: "=a" (eax), "=b" (ebx)
|
||||
: "0" (op)
|
||||
: "cx", "dx" );
|
||||
return ebx;
|
||||
}
|
||||
static inline unsigned int cpuid_ecx(unsigned int op)
|
||||
{
|
||||
unsigned int eax, ecx;
|
||||
|
||||
__asm__("cpuid"
|
||||
: "=a" (eax), "=c" (ecx)
|
||||
: "0" (op)
|
||||
: "bx", "dx" );
|
||||
return ecx;
|
||||
}
|
||||
static inline unsigned int cpuid_edx(unsigned int op)
|
||||
{
|
||||
unsigned int eax, edx;
|
||||
|
||||
__asm__("cpuid"
|
||||
: "=a" (eax), "=d" (edx)
|
||||
: "0" (op)
|
||||
: "bx", "cx");
|
||||
return edx;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
rdmsr(msr_no, *l, *h);
|
||||
}
|
||||
static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
wrmsr(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
return rdmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* X86_64_MSR_H */
|
|
@ -1,13 +1,164 @@
|
|||
/* Generic MTRR (Memory Type Range Register) ioctls.
|
||||
|
||||
Copyright (C) 1997-1999 Richard Gooch
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Library General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Library General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Library General Public
|
||||
License along with this library; if not, write to the Free
|
||||
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
|
||||
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
|
||||
The postal address is:
|
||||
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
|
||||
*/
|
||||
#ifndef _ASM_X86_MTRR_H
|
||||
#define _ASM_X86_MTRR_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#define MTRR_IOCTL_BASE 'M'
|
||||
|
||||
struct mtrr_sentry
|
||||
{
|
||||
unsigned long base; /* Base address */
|
||||
unsigned int size; /* Size of region */
|
||||
unsigned int type; /* Type of region */
|
||||
};
|
||||
|
||||
/* Warning: this structure has a different order from i386
|
||||
on x86-64. The 32bit emulation code takes care of that.
|
||||
But you need to use this for 64bit, otherwise your X server
|
||||
will break. */
|
||||
|
||||
#ifdef __i386__
|
||||
struct mtrr_gentry
|
||||
{
|
||||
unsigned int regnum; /* Register number */
|
||||
unsigned long base; /* Base address */
|
||||
unsigned int size; /* Size of region */
|
||||
unsigned int type; /* Type of region */
|
||||
};
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
struct mtrr_gentry
|
||||
{
|
||||
unsigned long base; /* Base address */
|
||||
unsigned int size; /* Size of region */
|
||||
unsigned int regnum; /* Register number */
|
||||
unsigned int type; /* Type of region */
|
||||
};
|
||||
#endif /* !__i386__ */
|
||||
|
||||
/* These are the various ioctls */
|
||||
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
|
||||
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
|
||||
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
|
||||
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
|
||||
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
|
||||
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
|
||||
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
|
||||
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
|
||||
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
|
||||
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
|
||||
|
||||
/* These are the region types */
|
||||
#define MTRR_TYPE_UNCACHABLE 0
|
||||
#define MTRR_TYPE_WRCOMB 1
|
||||
/*#define MTRR_TYPE_ 2*/
|
||||
/*#define MTRR_TYPE_ 3*/
|
||||
#define MTRR_TYPE_WRTHROUGH 4
|
||||
#define MTRR_TYPE_WRPROT 5
|
||||
#define MTRR_TYPE_WRBACK 6
|
||||
#define MTRR_NUM_TYPES 7
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "mtrr_32.h"
|
||||
# else
|
||||
# include "mtrr_64.h"
|
||||
# endif
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "mtrr_32.h"
|
||||
# else
|
||||
# include "mtrr_64.h"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* The following functions are for use by other drivers */
|
||||
# ifdef CONFIG_MTRR
|
||||
extern void mtrr_save_fixed_ranges(void *);
|
||||
extern void mtrr_save_state(void);
|
||||
extern int mtrr_add (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment);
|
||||
extern int mtrr_add_page (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment);
|
||||
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
|
||||
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
|
||||
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
|
||||
extern void mtrr_ap_init(void);
|
||||
extern void mtrr_bp_init(void);
|
||||
# else
|
||||
#define mtrr_save_fixed_ranges(arg) do {} while (0)
|
||||
#define mtrr_save_state() do {} while (0)
|
||||
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_del (int reg, unsigned long base,
|
||||
unsigned long size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_del_page (int reg, unsigned long base,
|
||||
unsigned long size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
|
||||
|
||||
#define mtrr_ap_init() do {} while (0)
|
||||
#define mtrr_bp_init() do {} while (0)
|
||||
# endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include <linux/compat.h>
|
||||
|
||||
struct mtrr_sentry32
|
||||
{
|
||||
compat_ulong_t base; /* Base address */
|
||||
compat_uint_t size; /* Size of region */
|
||||
compat_uint_t type; /* Type of region */
|
||||
};
|
||||
|
||||
struct mtrr_gentry32
|
||||
{
|
||||
compat_ulong_t regnum; /* Register number */
|
||||
compat_uint_t base; /* Base address */
|
||||
compat_uint_t size; /* Size of region */
|
||||
compat_uint_t type; /* Type of region */
|
||||
};
|
||||
|
||||
#define MTRR_IOCTL_BASE 'M'
|
||||
|
||||
#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
|
||||
#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
|
||||
#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_X86_MTRR_H */
|
||||
|
|
|
@ -1,115 +0,0 @@
|
|||
/* Generic MTRR (Memory Type Range Register) ioctls.
|
||||
|
||||
Copyright (C) 1997-1999 Richard Gooch
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Library General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Library General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Library General Public
|
||||
License along with this library; if not, write to the Free
|
||||
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
|
||||
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
|
||||
The postal address is:
|
||||
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
|
||||
*/
|
||||
#ifndef _LINUX_MTRR_H
|
||||
#define _LINUX_MTRR_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#define MTRR_IOCTL_BASE 'M'
|
||||
|
||||
struct mtrr_sentry
|
||||
{
|
||||
unsigned long base; /* Base address */
|
||||
unsigned int size; /* Size of region */
|
||||
unsigned int type; /* Type of region */
|
||||
};
|
||||
|
||||
struct mtrr_gentry
|
||||
{
|
||||
unsigned int regnum; /* Register number */
|
||||
unsigned long base; /* Base address */
|
||||
unsigned int size; /* Size of region */
|
||||
unsigned int type; /* Type of region */
|
||||
};
|
||||
|
||||
/* These are the various ioctls */
|
||||
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
|
||||
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
|
||||
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
|
||||
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
|
||||
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
|
||||
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
|
||||
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
|
||||
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
|
||||
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
|
||||
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
|
||||
|
||||
/* These are the region types */
|
||||
#define MTRR_TYPE_UNCACHABLE 0
|
||||
#define MTRR_TYPE_WRCOMB 1
|
||||
/*#define MTRR_TYPE_ 2*/
|
||||
/*#define MTRR_TYPE_ 3*/
|
||||
#define MTRR_TYPE_WRTHROUGH 4
|
||||
#define MTRR_TYPE_WRPROT 5
|
||||
#define MTRR_TYPE_WRBACK 6
|
||||
#define MTRR_NUM_TYPES 7
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* The following functions are for use by other drivers */
|
||||
# ifdef CONFIG_MTRR
|
||||
extern void mtrr_save_fixed_ranges(void *);
|
||||
extern void mtrr_save_state(void);
|
||||
extern int mtrr_add (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment);
|
||||
extern int mtrr_add_page (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment);
|
||||
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
|
||||
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
|
||||
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
|
||||
extern void mtrr_ap_init(void);
|
||||
extern void mtrr_bp_init(void);
|
||||
# else
|
||||
#define mtrr_save_fixed_ranges(arg) do {} while (0)
|
||||
#define mtrr_save_state() do {} while (0)
|
||||
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_del (int reg, unsigned long base,
|
||||
unsigned long size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_del_page (int reg, unsigned long base,
|
||||
unsigned long size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
|
||||
|
||||
#define mtrr_ap_init() do {} while (0)
|
||||
#define mtrr_bp_init() do {} while (0)
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_MTRR_H */
|
|
@ -1,152 +0,0 @@
|
|||
/* Generic MTRR (Memory Type Range Register) ioctls.
|
||||
|
||||
Copyright (C) 1997-1999 Richard Gooch
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Library General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Library General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Library General Public
|
||||
License along with this library; if not, write to the Free
|
||||
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
|
||||
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
|
||||
The postal address is:
|
||||
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
|
||||
*/
|
||||
#ifndef _LINUX_MTRR_H
|
||||
#define _LINUX_MTRR_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define MTRR_IOCTL_BASE 'M'
|
||||
|
||||
struct mtrr_sentry
|
||||
{
|
||||
unsigned long base; /* Base address */
|
||||
unsigned int size; /* Size of region */
|
||||
unsigned int type; /* Type of region */
|
||||
};
|
||||
|
||||
/* Warning: this structure has a different order from i386
|
||||
on x86-64. The 32bit emulation code takes care of that.
|
||||
But you need to use this for 64bit, otherwise your X server
|
||||
will break. */
|
||||
struct mtrr_gentry
|
||||
{
|
||||
unsigned long base; /* Base address */
|
||||
unsigned int size; /* Size of region */
|
||||
unsigned int regnum; /* Register number */
|
||||
unsigned int type; /* Type of region */
|
||||
};
|
||||
|
||||
/* These are the various ioctls */
|
||||
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
|
||||
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
|
||||
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
|
||||
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
|
||||
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
|
||||
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
|
||||
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
|
||||
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
|
||||
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
|
||||
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
|
||||
|
||||
/* These are the region types */
|
||||
#define MTRR_TYPE_UNCACHABLE 0
|
||||
#define MTRR_TYPE_WRCOMB 1
|
||||
/*#define MTRR_TYPE_ 2*/
|
||||
/*#define MTRR_TYPE_ 3*/
|
||||
#define MTRR_TYPE_WRTHROUGH 4
|
||||
#define MTRR_TYPE_WRPROT 5
|
||||
#define MTRR_TYPE_WRBACK 6
|
||||
#define MTRR_NUM_TYPES 7
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* The following functions are for use by other drivers */
|
||||
# ifdef CONFIG_MTRR
|
||||
extern int mtrr_add (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment);
|
||||
extern int mtrr_add_page (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment);
|
||||
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
|
||||
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
|
||||
# else
|
||||
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_del (int reg, unsigned long base,
|
||||
unsigned long size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static __inline__ int mtrr_del_page (int reg, unsigned long base,
|
||||
unsigned long size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MTRR */
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include <linux/compat.h>
|
||||
|
||||
struct mtrr_sentry32
|
||||
{
|
||||
compat_ulong_t base; /* Base address */
|
||||
compat_uint_t size; /* Size of region */
|
||||
compat_uint_t type; /* Type of region */
|
||||
};
|
||||
|
||||
struct mtrr_gentry32
|
||||
{
|
||||
compat_ulong_t regnum; /* Register number */
|
||||
compat_uint_t base; /* Base address */
|
||||
compat_uint_t size; /* Size of region */
|
||||
compat_uint_t type; /* Type of region */
|
||||
};
|
||||
|
||||
#define MTRR_IOCTL_BASE 'M'
|
||||
|
||||
#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
|
||||
#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
|
||||
#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
|
||||
#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#ifdef CONFIG_MTRR
|
||||
extern void mtrr_ap_init(void);
|
||||
extern void mtrr_bp_init(void);
|
||||
extern void mtrr_save_fixed_ranges(void *);
|
||||
extern void mtrr_save_state(void);
|
||||
#else
|
||||
#define mtrr_ap_init() do {} while (0)
|
||||
#define mtrr_bp_init() do {} while (0)
|
||||
#define mtrr_save_fixed_ranges(arg) do {} while (0)
|
||||
#define mtrr_save_state() do {} while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_MTRR_H */
|
|
@ -1,13 +1,142 @@
|
|||
#ifndef _ASM_X86_PTRACE_H
|
||||
#define _ASM_X86_PTRACE_H
|
||||
|
||||
#include <linux/compiler.h> /* For __user */
|
||||
#include <asm/ptrace-abi.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __i386__
|
||||
/* this struct defines the way the registers are stored on the
|
||||
stack during a system call. */
|
||||
|
||||
struct pt_regs {
|
||||
long ebx;
|
||||
long ecx;
|
||||
long edx;
|
||||
long esi;
|
||||
long edi;
|
||||
long ebp;
|
||||
long eax;
|
||||
int xds;
|
||||
int xes;
|
||||
int xfs;
|
||||
/* int xgs; */
|
||||
long orig_eax;
|
||||
long eip;
|
||||
int xcs;
|
||||
long eflags;
|
||||
long esp;
|
||||
int xss;
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "ptrace_32.h"
|
||||
# else
|
||||
# include "ptrace_64.h"
|
||||
# endif
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "ptrace_32.h"
|
||||
# else
|
||||
# include "ptrace_64.h"
|
||||
# endif
|
||||
|
||||
#include <asm/vm86.h>
|
||||
#include <asm/segment.h>
|
||||
|
||||
struct task_struct;
|
||||
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
|
||||
|
||||
/*
|
||||
* user_mode_vm(regs) determines whether a register set came from user mode.
|
||||
* This is true if V8086 mode was enabled OR if the register set was from
|
||||
* protected mode with RPL-3 CS value. This tricky test checks that with
|
||||
* one comparison. Many places in the kernel can bypass this full check
|
||||
* if they have already ruled out V8086 mode, so user_mode(regs) can be used.
|
||||
*/
|
||||
static inline int user_mode(struct pt_regs *regs)
|
||||
{
|
||||
return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
|
||||
}
|
||||
static inline int user_mode_vm(struct pt_regs *regs)
|
||||
{
|
||||
return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
|
||||
}
|
||||
static inline int v8086_mode(struct pt_regs *regs)
|
||||
{
|
||||
return (regs->eflags & VM_MASK);
|
||||
}
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->eip)
|
||||
#define frame_pointer(regs) ((regs)->ebp)
|
||||
#define stack_pointer(regs) ((regs)->esp)
|
||||
#define regs_return_value(regs) ((regs)->eax)
|
||||
|
||||
extern unsigned long profile_pc(struct pt_regs *regs);
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
struct pt_regs {
|
||||
unsigned long r15;
|
||||
unsigned long r14;
|
||||
unsigned long r13;
|
||||
unsigned long r12;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
unsigned long r8;
|
||||
unsigned long rax;
|
||||
unsigned long rcx;
|
||||
unsigned long rdx;
|
||||
unsigned long rsi;
|
||||
unsigned long rdi;
|
||||
unsigned long orig_rax;
|
||||
/* end of arguments */
|
||||
/* cpu exception frame or undefined */
|
||||
unsigned long rip;
|
||||
unsigned long cs;
|
||||
unsigned long eflags;
|
||||
unsigned long rsp;
|
||||
unsigned long ss;
|
||||
/* top of stack page */
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define user_mode(regs) (!!((regs)->cs & 3))
|
||||
#define user_mode_vm(regs) user_mode(regs)
|
||||
#define instruction_pointer(regs) ((regs)->rip)
|
||||
#define frame_pointer(regs) ((regs)->rbp)
|
||||
#define stack_pointer(regs) ((regs)->rsp)
|
||||
#define regs_return_value(regs) ((regs)->rax)
|
||||
|
||||
extern unsigned long profile_pc(struct pt_regs *regs);
|
||||
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern unsigned long
|
||||
convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
|
||||
|
||||
enum {
|
||||
EF_CF = 0x00000001,
|
||||
EF_PF = 0x00000004,
|
||||
EF_AF = 0x00000010,
|
||||
EF_ZF = 0x00000040,
|
||||
EF_SF = 0x00000080,
|
||||
EF_TF = 0x00000100,
|
||||
EF_IE = 0x00000200,
|
||||
EF_DF = 0x00000400,
|
||||
EF_OF = 0x00000800,
|
||||
EF_IOPL = 0x00003000,
|
||||
EF_IOPL_RING0 = 0x00000000,
|
||||
EF_IOPL_RING1 = 0x00001000,
|
||||
EF_IOPL_RING2 = 0x00002000,
|
||||
EF_NT = 0x00004000, /* nested task */
|
||||
EF_RF = 0x00010000, /* resume */
|
||||
EF_VM = 0x00020000, /* virtual mode */
|
||||
EF_AC = 0x00040000, /* alignment */
|
||||
EF_VIF = 0x00080000, /* virtual interrupt */
|
||||
EF_VIP = 0x00100000, /* virtual interrupt pending */
|
||||
EF_ID = 0x00200000, /* id */
|
||||
};
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* !__i386__ */
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
#ifndef _I386_PTRACE_H
|
||||
#define _I386_PTRACE_H
|
||||
|
||||
#include <asm/ptrace-abi.h>
|
||||
|
||||
/* this struct defines the way the registers are stored on the
|
||||
stack during a system call. */
|
||||
|
||||
struct pt_regs {
|
||||
long ebx;
|
||||
long ecx;
|
||||
long edx;
|
||||
long esi;
|
||||
long edi;
|
||||
long ebp;
|
||||
long eax;
|
||||
int xds;
|
||||
int xes;
|
||||
int xfs;
|
||||
/* int xgs; */
|
||||
long orig_eax;
|
||||
long eip;
|
||||
int xcs;
|
||||
long eflags;
|
||||
long esp;
|
||||
int xss;
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/vm86.h>
|
||||
#include <asm/segment.h>
|
||||
|
||||
struct task_struct;
|
||||
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
|
||||
|
||||
/*
|
||||
* user_mode_vm(regs) determines whether a register set came from user mode.
|
||||
* This is true if V8086 mode was enabled OR if the register set was from
|
||||
* protected mode with RPL-3 CS value. This tricky test checks that with
|
||||
* one comparison. Many places in the kernel can bypass this full check
|
||||
* if they have already ruled out V8086 mode, so user_mode(regs) can be used.
|
||||
*/
|
||||
static inline int user_mode(struct pt_regs *regs)
|
||||
{
|
||||
return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
|
||||
}
|
||||
static inline int user_mode_vm(struct pt_regs *regs)
|
||||
{
|
||||
return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
|
||||
}
|
||||
static inline int v8086_mode(struct pt_regs *regs)
|
||||
{
|
||||
return (regs->eflags & VM_MASK);
|
||||
}
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->eip)
|
||||
#define frame_pointer(regs) ((regs)->ebp)
|
||||
#define stack_pointer(regs) ((regs)->esp)
|
||||
#define regs_return_value(regs) ((regs)->eax)
|
||||
|
||||
extern unsigned long profile_pc(struct pt_regs *regs);
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
|
@ -1,80 +0,0 @@
|
|||
#ifndef _X86_64_PTRACE_H
|
||||
#define _X86_64_PTRACE_H
|
||||
|
||||
#include <linux/compiler.h> /* For __user */
|
||||
#include <asm/ptrace-abi.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct pt_regs {
|
||||
unsigned long r15;
|
||||
unsigned long r14;
|
||||
unsigned long r13;
|
||||
unsigned long r12;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
unsigned long r8;
|
||||
unsigned long rax;
|
||||
unsigned long rcx;
|
||||
unsigned long rdx;
|
||||
unsigned long rsi;
|
||||
unsigned long rdi;
|
||||
unsigned long orig_rax;
|
||||
/* end of arguments */
|
||||
/* cpu exception frame or undefined */
|
||||
unsigned long rip;
|
||||
unsigned long cs;
|
||||
unsigned long eflags;
|
||||
unsigned long rsp;
|
||||
unsigned long ss;
|
||||
/* top of stack page */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
|
||||
#define user_mode(regs) (!!((regs)->cs & 3))
|
||||
#define user_mode_vm(regs) user_mode(regs)
|
||||
#define instruction_pointer(regs) ((regs)->rip)
|
||||
#define frame_pointer(regs) ((regs)->rbp)
|
||||
#define stack_pointer(regs) ((regs)->rsp)
|
||||
#define regs_return_value(regs) ((regs)->rax)
|
||||
|
||||
extern unsigned long profile_pc(struct pt_regs *regs);
|
||||
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern unsigned long
|
||||
convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
|
||||
|
||||
enum {
|
||||
EF_CF = 0x00000001,
|
||||
EF_PF = 0x00000004,
|
||||
EF_AF = 0x00000010,
|
||||
EF_ZF = 0x00000040,
|
||||
EF_SF = 0x00000080,
|
||||
EF_TF = 0x00000100,
|
||||
EF_IE = 0x00000200,
|
||||
EF_DF = 0x00000400,
|
||||
EF_OF = 0x00000800,
|
||||
EF_IOPL = 0x00003000,
|
||||
EF_IOPL_RING0 = 0x00000000,
|
||||
EF_IOPL_RING1 = 0x00001000,
|
||||
EF_IOPL_RING2 = 0x00002000,
|
||||
EF_NT = 0x00004000, /* nested task */
|
||||
EF_RF = 0x00010000, /* resume */
|
||||
EF_VM = 0x00020000, /* virtual mode */
|
||||
EF_AC = 0x00040000, /* alignment */
|
||||
EF_VIF = 0x00080000, /* virtual interrupt */
|
||||
EF_VIP = 0x00100000, /* virtual interrupt pending */
|
||||
EF_ID = 0x00200000, /* id */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,5 +1,72 @@
|
|||
#ifdef CONFIG_X86_32
|
||||
# include "required-features_32.h"
|
||||
#ifndef _ASM_REQUIRED_FEATURES_H
|
||||
#define _ASM_REQUIRED_FEATURES_H 1
|
||||
|
||||
/* Define minimum CPUID feature set for kernel These bits are checked
|
||||
really early to actually display a visible error message before the
|
||||
kernel dies. Make sure to assign features to the proper mask!
|
||||
|
||||
Some requirements that are not in CPUID yet are also in the
|
||||
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
|
||||
|
||||
The real information is in arch/x86/Kconfig.cpu, this just converts
|
||||
the CONFIGs into a bitmask */
|
||||
|
||||
#ifndef CONFIG_MATH_EMULATION
|
||||
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
|
||||
#else
|
||||
# include "required-features_64.h"
|
||||
# define NEED_FPU 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
|
||||
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
|
||||
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
|
||||
#else
|
||||
# define NEED_PAE 0
|
||||
# define NEED_CX8 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
|
||||
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
|
||||
#else
|
||||
# define NEED_CMOV 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_USE_3DNOW
|
||||
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
|
||||
#else
|
||||
# define NEED_3DNOW 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define NEED_PSE (1<<(X86_FEATURE_PSE & 31))
|
||||
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
|
||||
#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
|
||||
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
|
||||
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
|
||||
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
|
||||
#define NEED_LM (1<<(X86_FEATURE_LM & 31))
|
||||
#else
|
||||
#define NEED_PSE 0
|
||||
#define NEED_MSR 0
|
||||
#define NEED_PGE 0
|
||||
#define NEED_FXSR 0
|
||||
#define NEED_XMM 0
|
||||
#define NEED_XMM2 0
|
||||
#define NEED_LM 0
|
||||
#endif
|
||||
|
||||
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
|
||||
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
|
||||
NEED_XMM|NEED_XMM2)
|
||||
#define SSE_MASK (NEED_XMM|NEED_XMM2)
|
||||
|
||||
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
|
||||
|
||||
#define REQUIRED_MASK2 0
|
||||
#define REQUIRED_MASK3 0
|
||||
#define REQUIRED_MASK4 0
|
||||
#define REQUIRED_MASK5 0
|
||||
#define REQUIRED_MASK6 0
|
||||
#define REQUIRED_MASK7 0
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
#ifndef _ASM_REQUIRED_FEATURES_H
|
||||
#define _ASM_REQUIRED_FEATURES_H 1
|
||||
|
||||
/* Define minimum CPUID feature set for kernel These bits are checked
|
||||
really early to actually display a visible error message before the
|
||||
kernel dies. Make sure to assign features to the proper mask!
|
||||
|
||||
Some requirements that are not in CPUID yet are also in the
|
||||
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
|
||||
|
||||
The real information is in arch/i386/Kconfig.cpu, this just converts
|
||||
the CONFIGs into a bitmask */
|
||||
|
||||
#ifndef CONFIG_MATH_EMULATION
|
||||
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
|
||||
#else
|
||||
# define NEED_FPU 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
|
||||
#else
|
||||
# define NEED_PAE 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_CMOV
|
||||
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
|
||||
#else
|
||||
# define NEED_CMOV 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
|
||||
#else
|
||||
# define NEED_CX8 0
|
||||
#endif
|
||||
|
||||
#define REQUIRED_MASK0 (NEED_FPU|NEED_PAE|NEED_CMOV|NEED_CX8)
|
||||
|
||||
#ifdef CONFIG_X86_USE_3DNOW
|
||||
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
|
||||
#else
|
||||
# define NEED_3DNOW 0
|
||||
#endif
|
||||
|
||||
#define REQUIRED_MASK1 (NEED_3DNOW)
|
||||
|
||||
#define REQUIRED_MASK2 0
|
||||
#define REQUIRED_MASK3 0
|
||||
#define REQUIRED_MASK4 0
|
||||
#define REQUIRED_MASK5 0
|
||||
#define REQUIRED_MASK6 0
|
||||
#define REQUIRED_MASK7 0
|
||||
|
||||
#endif
|
|
@ -1,46 +0,0 @@
|
|||
#ifndef _ASM_REQUIRED_FEATURES_H
|
||||
#define _ASM_REQUIRED_FEATURES_H 1
|
||||
|
||||
/* Define minimum CPUID feature set for kernel These bits are checked
|
||||
really early to actually display a visible error message before the
|
||||
kernel dies. Make sure to assign features to the proper mask!
|
||||
|
||||
The real information is in arch/x86_64/Kconfig.cpu, this just converts
|
||||
the CONFIGs into a bitmask */
|
||||
|
||||
/* x86-64 baseline features */
|
||||
#define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
|
||||
#define NEED_PSE (1<<(X86_FEATURE_PSE & 31))
|
||||
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
|
||||
#define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
|
||||
#define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
|
||||
#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
|
||||
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
|
||||
#define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
|
||||
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
|
||||
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
|
||||
|
||||
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
|
||||
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
|
||||
NEED_XMM|NEED_XMM2)
|
||||
#define SSE_MASK (NEED_XMM|NEED_XMM2)
|
||||
|
||||
/* x86-64 baseline features */
|
||||
#define NEED_LM (1<<(X86_FEATURE_LM & 31))
|
||||
|
||||
#ifdef CONFIG_X86_USE_3DNOW
|
||||
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
|
||||
#else
|
||||
# define NEED_3DNOW 0
|
||||
#endif
|
||||
|
||||
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
|
||||
|
||||
#define REQUIRED_MASK2 0
|
||||
#define REQUIRED_MASK3 0
|
||||
#define REQUIRED_MASK4 0
|
||||
#define REQUIRED_MASK5 0
|
||||
#define REQUIRED_MASK6 0
|
||||
#define REQUIRED_MASK7 0
|
||||
|
||||
#endif
|
|
@ -1,13 +1,63 @@
|
|||
#ifndef _ASM_X86_SETUP_H
|
||||
#define _ASM_X86_SETUP_H
|
||||
|
||||
#define COMMAND_LINE_SIZE 2048
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "setup_32.h"
|
||||
# else
|
||||
# include "setup_64.h"
|
||||
# endif
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "setup_32.h"
|
||||
# else
|
||||
# include "setup_64.h"
|
||||
# endif
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
#include <linux/pfn.h>
|
||||
/*
|
||||
* Reserved space for vmalloc and iomap - defined in asm/page.h
|
||||
*/
|
||||
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
|
||||
#define MAX_NONPAE_PFN (1 << 20)
|
||||
|
||||
#endif /* __i386__ */
|
||||
|
||||
#define PARAM_SIZE 4096 /* sizeof(struct boot_params) */
|
||||
|
||||
#define OLD_CL_MAGIC 0xA33F
|
||||
#define OLD_CL_ADDRESS 0x020 /* Relative to real mode data */
|
||||
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/bootparam.h>
|
||||
|
||||
#ifndef _SETUP
|
||||
|
||||
/*
|
||||
* This is set up by the setup-routine at boot-time
|
||||
*/
|
||||
extern struct boot_params boot_params;
|
||||
|
||||
#ifdef __i386__
|
||||
/*
|
||||
* Do NOT EVER look at the BIOS memory size location.
|
||||
* It does not work on many machines.
|
||||
*/
|
||||
#define LOWMEMSIZE() (0x9f000)
|
||||
|
||||
struct e820entry;
|
||||
|
||||
char * __init machine_specific_memory_setup(void);
|
||||
char *memory_setup(void);
|
||||
|
||||
int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
|
||||
int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
|
||||
void __init add_memory_region(unsigned long long start,
|
||||
unsigned long long size, int type);
|
||||
|
||||
extern unsigned long init_pg_tables_end;
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
#define paravirt_post_allocator_init() do {} while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __i386__ */
|
||||
#endif /* _SETUP */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_X86_SETUP_H */
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
* Just a place holder. We don't want to have to test x86 before
|
||||
* we include stuff
|
||||
*/
|
||||
|
||||
#ifndef _i386_SETUP_H
|
||||
#define _i386_SETUP_H
|
||||
|
||||
#define COMMAND_LINE_SIZE 2048
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/pfn.h>
|
||||
|
||||
/*
|
||||
* Reserved space for vmalloc and iomap - defined in asm/page.h
|
||||
*/
|
||||
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
|
||||
#define MAX_NONPAE_PFN (1 << 20)
|
||||
|
||||
#define PARAM_SIZE 4096
|
||||
|
||||
#define OLD_CL_MAGIC_ADDR 0x90020
|
||||
#define OLD_CL_MAGIC 0xA33F
|
||||
#define OLD_CL_BASE_ADDR 0x90000
|
||||
#define OLD_CL_OFFSET 0x90022
|
||||
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/bootparam.h>
|
||||
|
||||
/*
|
||||
* This is set up by the setup-routine at boot-time
|
||||
*/
|
||||
extern struct boot_params boot_params;
|
||||
|
||||
/*
|
||||
* Do NOT EVER look at the BIOS memory size location.
|
||||
* It does not work on many machines.
|
||||
*/
|
||||
#define LOWMEMSIZE() (0x9f000)
|
||||
|
||||
struct e820entry;
|
||||
|
||||
char * __init machine_specific_memory_setup(void);
|
||||
char *memory_setup(void);
|
||||
|
||||
int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
|
||||
int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
|
||||
void __init add_memory_region(unsigned long long start,
|
||||
unsigned long long size, int type);
|
||||
|
||||
extern unsigned long init_pg_tables_end;
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
#define paravirt_post_allocator_init() do {} while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _i386_SETUP_H */
|
|
@ -1,19 +0,0 @@
|
|||
#ifndef _x8664_SETUP_H
|
||||
#define _x8664_SETUP_H
|
||||
|
||||
#define COMMAND_LINE_SIZE 2048
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/bootparam.h>
|
||||
|
||||
/*
|
||||
* This is set up by the setup-routine at boot-time
|
||||
*/
|
||||
extern struct boot_params boot_params;
|
||||
|
||||
#endif /* not __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
|
@ -1,13 +1,51 @@
|
|||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "shmbuf_32.h"
|
||||
# else
|
||||
# include "shmbuf_64.h"
|
||||
# endif
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "shmbuf_32.h"
|
||||
# else
|
||||
# include "shmbuf_64.h"
|
||||
# endif
|
||||
#ifndef _ASM_X86_SHMBUF_H
|
||||
#define _ASM_X86_SHMBUF_H
|
||||
|
||||
/*
|
||||
* The shmid64_ds structure for x86 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space on 32 bit is left for:
|
||||
* - 64-bit time_t to solve y2038 problem
|
||||
* - 2 miscellaneous 32-bit values
|
||||
*
|
||||
* Pad space on 64 bit is left for:
|
||||
* - 2 miscellaneous 64-bit values
|
||||
*/
|
||||
|
||||
struct shmid64_ds {
|
||||
struct ipc64_perm shm_perm; /* operation perms */
|
||||
size_t shm_segsz; /* size of segment (bytes) */
|
||||
__kernel_time_t shm_atime; /* last attach time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused1;
|
||||
#endif
|
||||
__kernel_time_t shm_dtime; /* last detach time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused2;
|
||||
#endif
|
||||
__kernel_time_t shm_ctime; /* last change time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused3;
|
||||
#endif
|
||||
__kernel_pid_t shm_cpid; /* pid of creator */
|
||||
__kernel_pid_t shm_lpid; /* pid of last operator */
|
||||
unsigned long shm_nattch; /* no. of current attaches */
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
struct shminfo64 {
|
||||
unsigned long shmmax;
|
||||
unsigned long shmmin;
|
||||
unsigned long shmmni;
|
||||
unsigned long shmseg;
|
||||
unsigned long shmall;
|
||||
unsigned long __unused1;
|
||||
unsigned long __unused2;
|
||||
unsigned long __unused3;
|
||||
unsigned long __unused4;
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_SHMBUF_H */
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
#ifndef _I386_SHMBUF_H
|
||||
#define _I386_SHMBUF_H
|
||||
|
||||
/*
|
||||
* The shmid64_ds structure for i386 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space is left for:
|
||||
* - 64-bit time_t to solve y2038 problem
|
||||
* - 2 miscellaneous 32-bit values
|
||||
*/
|
||||
|
||||
struct shmid64_ds {
|
||||
struct ipc64_perm shm_perm; /* operation perms */
|
||||
size_t shm_segsz; /* size of segment (bytes) */
|
||||
__kernel_time_t shm_atime; /* last attach time */
|
||||
unsigned long __unused1;
|
||||
__kernel_time_t shm_dtime; /* last detach time */
|
||||
unsigned long __unused2;
|
||||
__kernel_time_t shm_ctime; /* last change time */
|
||||
unsigned long __unused3;
|
||||
__kernel_pid_t shm_cpid; /* pid of creator */
|
||||
__kernel_pid_t shm_lpid; /* pid of last operator */
|
||||
unsigned long shm_nattch; /* no. of current attaches */
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
struct shminfo64 {
|
||||
unsigned long shmmax;
|
||||
unsigned long shmmin;
|
||||
unsigned long shmmni;
|
||||
unsigned long shmseg;
|
||||
unsigned long shmall;
|
||||
unsigned long __unused1;
|
||||
unsigned long __unused2;
|
||||
unsigned long __unused3;
|
||||
unsigned long __unused4;
|
||||
};
|
||||
|
||||
#endif /* _I386_SHMBUF_H */
|
|
@ -1,38 +0,0 @@
|
|||
#ifndef _X8664_SHMBUF_H
|
||||
#define _X8664_SHMBUF_H
|
||||
|
||||
/*
|
||||
* The shmid64_ds structure for x8664 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space is left for:
|
||||
* - 2 miscellaneous 64-bit values
|
||||
*/
|
||||
|
||||
struct shmid64_ds {
|
||||
struct ipc64_perm shm_perm; /* operation perms */
|
||||
size_t shm_segsz; /* size of segment (bytes) */
|
||||
__kernel_time_t shm_atime; /* last attach time */
|
||||
__kernel_time_t shm_dtime; /* last detach time */
|
||||
__kernel_time_t shm_ctime; /* last change time */
|
||||
__kernel_pid_t shm_cpid; /* pid of creator */
|
||||
__kernel_pid_t shm_lpid; /* pid of last operator */
|
||||
unsigned long shm_nattch; /* no. of current attaches */
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
struct shminfo64 {
|
||||
unsigned long shmmax;
|
||||
unsigned long shmmin;
|
||||
unsigned long shmmni;
|
||||
unsigned long shmseg;
|
||||
unsigned long shmall;
|
||||
unsigned long __unused1;
|
||||
unsigned long __unused2;
|
||||
unsigned long __unused3;
|
||||
unsigned long __unused4;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -1,13 +1,138 @@
|
|||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "sigcontext_32.h"
|
||||
# else
|
||||
# include "sigcontext_64.h"
|
||||
# endif
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "sigcontext_32.h"
|
||||
# else
|
||||
# include "sigcontext_64.h"
|
||||
# endif
|
||||
#ifndef _ASM_X86_SIGCONTEXT_H
|
||||
#define _ASM_X86_SIGCONTEXT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
#ifdef __i386__
|
||||
/*
|
||||
* As documented in the iBCS2 standard..
|
||||
*
|
||||
* The first part of "struct _fpstate" is just the normal i387
|
||||
* hardware setup, the extra "status" word is used to save the
|
||||
* coprocessor status word before entering the handler.
|
||||
*
|
||||
* Pentium III FXSR, SSE support
|
||||
* Gareth Hughes <gareth@valinux.com>, May 2000
|
||||
*
|
||||
* The FPU state data structure has had to grow to accommodate the
|
||||
* extended FPU state required by the Streaming SIMD Extensions.
|
||||
* There is no documented standard to accomplish this at the moment.
|
||||
*/
|
||||
struct _fpreg {
|
||||
unsigned short significand[4];
|
||||
unsigned short exponent;
|
||||
};
|
||||
|
||||
struct _fpxreg {
|
||||
unsigned short significand[4];
|
||||
unsigned short exponent;
|
||||
unsigned short padding[3];
|
||||
};
|
||||
|
||||
struct _xmmreg {
|
||||
unsigned long element[4];
|
||||
};
|
||||
|
||||
struct _fpstate {
|
||||
/* Regular FPU environment */
|
||||
unsigned long cw;
|
||||
unsigned long sw;
|
||||
unsigned long tag;
|
||||
unsigned long ipoff;
|
||||
unsigned long cssel;
|
||||
unsigned long dataoff;
|
||||
unsigned long datasel;
|
||||
struct _fpreg _st[8];
|
||||
unsigned short status;
|
||||
unsigned short magic; /* 0xffff = regular FPU data only */
|
||||
|
||||
/* FXSR FPU environment */
|
||||
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
|
||||
unsigned long mxcsr;
|
||||
unsigned long reserved;
|
||||
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
|
||||
struct _xmmreg _xmm[8];
|
||||
unsigned long padding[56];
|
||||
};
|
||||
|
||||
#define X86_FXSR_MAGIC 0x0000
|
||||
|
||||
struct sigcontext {
|
||||
unsigned short gs, __gsh;
|
||||
unsigned short fs, __fsh;
|
||||
unsigned short es, __esh;
|
||||
unsigned short ds, __dsh;
|
||||
unsigned long edi;
|
||||
unsigned long esi;
|
||||
unsigned long ebp;
|
||||
unsigned long esp;
|
||||
unsigned long ebx;
|
||||
unsigned long edx;
|
||||
unsigned long ecx;
|
||||
unsigned long eax;
|
||||
unsigned long trapno;
|
||||
unsigned long err;
|
||||
unsigned long eip;
|
||||
unsigned short cs, __csh;
|
||||
unsigned long eflags;
|
||||
unsigned long esp_at_signal;
|
||||
unsigned short ss, __ssh;
|
||||
struct _fpstate __user * fpstate;
|
||||
unsigned long oldmask;
|
||||
unsigned long cr2;
|
||||
};
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
/* FXSAVE frame */
|
||||
/* Note: reserved1/2 may someday contain valuable data. Always save/restore
|
||||
them when you change signal frames. */
|
||||
struct _fpstate {
|
||||
__u16 cwd;
|
||||
__u16 swd;
|
||||
__u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
|
||||
__u16 fop;
|
||||
__u64 rip;
|
||||
__u64 rdp;
|
||||
__u32 mxcsr;
|
||||
__u32 mxcsr_mask;
|
||||
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */
|
||||
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
|
||||
__u32 reserved2[24];
|
||||
};
|
||||
|
||||
struct sigcontext {
|
||||
unsigned long r8;
|
||||
unsigned long r9;
|
||||
unsigned long r10;
|
||||
unsigned long r11;
|
||||
unsigned long r12;
|
||||
unsigned long r13;
|
||||
unsigned long r14;
|
||||
unsigned long r15;
|
||||
unsigned long rdi;
|
||||
unsigned long rsi;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
unsigned long rdx;
|
||||
unsigned long rax;
|
||||
unsigned long rcx;
|
||||
unsigned long rsp;
|
||||
unsigned long rip;
|
||||
unsigned long eflags; /* RFLAGS */
|
||||
unsigned short cs;
|
||||
unsigned short gs;
|
||||
unsigned short fs;
|
||||
unsigned short __pad0;
|
||||
unsigned long err;
|
||||
unsigned long trapno;
|
||||
unsigned long oldmask;
|
||||
unsigned long cr2;
|
||||
struct _fpstate __user *fpstate; /* zero when no FPU context */
|
||||
unsigned long reserved1[8];
|
||||
};
|
||||
|
||||
#endif /* !__i386__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
#ifndef _ASMi386_SIGCONTEXT_H
|
||||
#define _ASMi386_SIGCONTEXT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/*
|
||||
* As documented in the iBCS2 standard..
|
||||
*
|
||||
* The first part of "struct _fpstate" is just the normal i387
|
||||
* hardware setup, the extra "status" word is used to save the
|
||||
* coprocessor status word before entering the handler.
|
||||
*
|
||||
* Pentium III FXSR, SSE support
|
||||
* Gareth Hughes <gareth@valinux.com>, May 2000
|
||||
*
|
||||
* The FPU state data structure has had to grow to accommodate the
|
||||
* extended FPU state required by the Streaming SIMD Extensions.
|
||||
* There is no documented standard to accomplish this at the moment.
|
||||
*/
|
||||
struct _fpreg {
|
||||
unsigned short significand[4];
|
||||
unsigned short exponent;
|
||||
};
|
||||
|
||||
struct _fpxreg {
|
||||
unsigned short significand[4];
|
||||
unsigned short exponent;
|
||||
unsigned short padding[3];
|
||||
};
|
||||
|
||||
struct _xmmreg {
|
||||
unsigned long element[4];
|
||||
};
|
||||
|
||||
struct _fpstate {
|
||||
/* Regular FPU environment */
|
||||
unsigned long cw;
|
||||
unsigned long sw;
|
||||
unsigned long tag;
|
||||
unsigned long ipoff;
|
||||
unsigned long cssel;
|
||||
unsigned long dataoff;
|
||||
unsigned long datasel;
|
||||
struct _fpreg _st[8];
|
||||
unsigned short status;
|
||||
unsigned short magic; /* 0xffff = regular FPU data only */
|
||||
|
||||
/* FXSR FPU environment */
|
||||
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
|
||||
unsigned long mxcsr;
|
||||
unsigned long reserved;
|
||||
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
|
||||
struct _xmmreg _xmm[8];
|
||||
unsigned long padding[56];
|
||||
};
|
||||
|
||||
#define X86_FXSR_MAGIC 0x0000
|
||||
|
||||
struct sigcontext {
|
||||
unsigned short gs, __gsh;
|
||||
unsigned short fs, __fsh;
|
||||
unsigned short es, __esh;
|
||||
unsigned short ds, __dsh;
|
||||
unsigned long edi;
|
||||
unsigned long esi;
|
||||
unsigned long ebp;
|
||||
unsigned long esp;
|
||||
unsigned long ebx;
|
||||
unsigned long edx;
|
||||
unsigned long ecx;
|
||||
unsigned long eax;
|
||||
unsigned long trapno;
|
||||
unsigned long err;
|
||||
unsigned long eip;
|
||||
unsigned short cs, __csh;
|
||||
unsigned long eflags;
|
||||
unsigned long esp_at_signal;
|
||||
unsigned short ss, __ssh;
|
||||
struct _fpstate __user * fpstate;
|
||||
unsigned long oldmask;
|
||||
unsigned long cr2;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
|
@ -1,55 +0,0 @@
|
|||
#ifndef _ASM_X86_64_SIGCONTEXT_H
|
||||
#define _ASM_X86_64_SIGCONTEXT_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* FXSAVE frame */
|
||||
/* Note: reserved1/2 may someday contain valuable data. Always save/restore
|
||||
them when you change signal frames. */
|
||||
struct _fpstate {
|
||||
__u16 cwd;
|
||||
__u16 swd;
|
||||
__u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
|
||||
__u16 fop;
|
||||
__u64 rip;
|
||||
__u64 rdp;
|
||||
__u32 mxcsr;
|
||||
__u32 mxcsr_mask;
|
||||
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */
|
||||
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
|
||||
__u32 reserved2[24];
|
||||
};
|
||||
|
||||
struct sigcontext {
|
||||
unsigned long r8;
|
||||
unsigned long r9;
|
||||
unsigned long r10;
|
||||
unsigned long r11;
|
||||
unsigned long r12;
|
||||
unsigned long r13;
|
||||
unsigned long r14;
|
||||
unsigned long r15;
|
||||
unsigned long rdi;
|
||||
unsigned long rsi;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
unsigned long rdx;
|
||||
unsigned long rax;
|
||||
unsigned long rcx;
|
||||
unsigned long rsp;
|
||||
unsigned long rip;
|
||||
unsigned long eflags; /* RFLAGS */
|
||||
unsigned short cs;
|
||||
unsigned short gs;
|
||||
unsigned short fs;
|
||||
unsigned short __pad0;
|
||||
unsigned long err;
|
||||
unsigned long trapno;
|
||||
unsigned long oldmask;
|
||||
unsigned long cr2;
|
||||
struct _fpstate __user *fpstate; /* zero when no FPU context */
|
||||
unsigned long reserved1[8];
|
||||
};
|
||||
|
||||
#endif
|
|
@ -1,13 +1,266 @@
|
|||
#ifndef _ASM_X86_SIGNAL_H
|
||||
#define _ASM_X86_SIGNAL_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* Avoid too many header ordering problems. */
|
||||
struct siginfo;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "signal_32.h"
|
||||
# else
|
||||
# include "signal_64.h"
|
||||
# endif
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/* Most things should be clean enough to redefine this at will, if care
|
||||
is taken to make libc match. */
|
||||
|
||||
#define _NSIG 64
|
||||
|
||||
#ifdef __i386__
|
||||
# define _NSIG_BPW 32
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "signal_32.h"
|
||||
# else
|
||||
# include "signal_64.h"
|
||||
# endif
|
||||
# define _NSIG_BPW 64
|
||||
#endif
|
||||
|
||||
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
|
||||
|
||||
typedef unsigned long old_sigset_t; /* at least 32 bits */
|
||||
|
||||
typedef struct {
|
||||
unsigned long sig[_NSIG_WORDS];
|
||||
} sigset_t;
|
||||
|
||||
#else
|
||||
/* Here we must cater to libcs that poke about in kernel headers. */
|
||||
|
||||
#define NSIG 32
|
||||
typedef unsigned long sigset_t;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define SIGHUP 1
|
||||
#define SIGINT 2
|
||||
#define SIGQUIT 3
|
||||
#define SIGILL 4
|
||||
#define SIGTRAP 5
|
||||
#define SIGABRT 6
|
||||
#define SIGIOT 6
|
||||
#define SIGBUS 7
|
||||
#define SIGFPE 8
|
||||
#define SIGKILL 9
|
||||
#define SIGUSR1 10
|
||||
#define SIGSEGV 11
|
||||
#define SIGUSR2 12
|
||||
#define SIGPIPE 13
|
||||
#define SIGALRM 14
|
||||
#define SIGTERM 15
|
||||
#define SIGSTKFLT 16
|
||||
#define SIGCHLD 17
|
||||
#define SIGCONT 18
|
||||
#define SIGSTOP 19
|
||||
#define SIGTSTP 20
|
||||
#define SIGTTIN 21
|
||||
#define SIGTTOU 22
|
||||
#define SIGURG 23
|
||||
#define SIGXCPU 24
|
||||
#define SIGXFSZ 25
|
||||
#define SIGVTALRM 26
|
||||
#define SIGPROF 27
|
||||
#define SIGWINCH 28
|
||||
#define SIGIO 29
|
||||
#define SIGPOLL SIGIO
|
||||
/*
|
||||
#define SIGLOST 29
|
||||
*/
|
||||
#define SIGPWR 30
|
||||
#define SIGSYS 31
|
||||
#define SIGUNUSED 31
|
||||
|
||||
/* These should not be considered constants from userland. */
|
||||
#define SIGRTMIN 32
|
||||
#define SIGRTMAX _NSIG
|
||||
|
||||
/*
|
||||
* SA_FLAGS values:
|
||||
*
|
||||
* SA_ONSTACK indicates that a registered stack_t will be used.
|
||||
* SA_RESTART flag to get restarting signals (which were the default long ago)
|
||||
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
|
||||
* SA_RESETHAND clears the handler when the signal is delivered.
|
||||
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
|
||||
* SA_NODEFER prevents the current signal from being masked in the handler.
|
||||
*
|
||||
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
|
||||
* Unix names RESETHAND and NODEFER respectively.
|
||||
*/
|
||||
#define SA_NOCLDSTOP 0x00000001u
|
||||
#define SA_NOCLDWAIT 0x00000002u
|
||||
#define SA_SIGINFO 0x00000004u
|
||||
#define SA_ONSTACK 0x08000000u
|
||||
#define SA_RESTART 0x10000000u
|
||||
#define SA_NODEFER 0x40000000u
|
||||
#define SA_RESETHAND 0x80000000u
|
||||
|
||||
#define SA_NOMASK SA_NODEFER
|
||||
#define SA_ONESHOT SA_RESETHAND
|
||||
|
||||
#define SA_RESTORER 0x04000000
|
||||
|
||||
/*
|
||||
* sigaltstack controls
|
||||
*/
|
||||
#define SS_ONSTACK 1
|
||||
#define SS_DISABLE 2
|
||||
|
||||
#define MINSIGSTKSZ 2048
|
||||
#define SIGSTKSZ 8192
|
||||
|
||||
#include <asm-generic/signal.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __i386__
|
||||
# ifdef __KERNEL__
|
||||
struct old_sigaction {
|
||||
__sighandler_t sa_handler;
|
||||
old_sigset_t sa_mask;
|
||||
unsigned long sa_flags;
|
||||
__sigrestore_t sa_restorer;
|
||||
};
|
||||
|
||||
struct sigaction {
|
||||
__sighandler_t sa_handler;
|
||||
unsigned long sa_flags;
|
||||
__sigrestore_t sa_restorer;
|
||||
sigset_t sa_mask; /* mask last for extensibility */
|
||||
};
|
||||
|
||||
struct k_sigaction {
|
||||
struct sigaction sa;
|
||||
};
|
||||
# else /* __KERNEL__ */
|
||||
/* Here we must cater to libcs that poke about in kernel headers. */
|
||||
|
||||
struct sigaction {
|
||||
union {
|
||||
__sighandler_t _sa_handler;
|
||||
void (*_sa_sigaction)(int, struct siginfo *, void *);
|
||||
} _u;
|
||||
sigset_t sa_mask;
|
||||
unsigned long sa_flags;
|
||||
void (*sa_restorer)(void);
|
||||
};
|
||||
|
||||
#define sa_handler _u._sa_handler
|
||||
#define sa_sigaction _u._sa_sigaction
|
||||
|
||||
# endif /* ! __KERNEL__ */
|
||||
#else /* __i386__ */
|
||||
|
||||
struct sigaction {
|
||||
__sighandler_t sa_handler;
|
||||
unsigned long sa_flags;
|
||||
__sigrestore_t sa_restorer;
|
||||
sigset_t sa_mask; /* mask last for extensibility */
|
||||
};
|
||||
|
||||
struct k_sigaction {
|
||||
struct sigaction sa;
|
||||
};
|
||||
|
||||
#endif /* !__i386__ */
|
||||
|
||||
typedef struct sigaltstack {
|
||||
void __user *ss_sp;
|
||||
int ss_flags;
|
||||
size_t ss_size;
|
||||
} stack_t;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/sigcontext.h>
|
||||
|
||||
#ifdef __386__
|
||||
|
||||
#define __HAVE_ARCH_SIG_BITOPS
|
||||
|
||||
#define sigaddset(set,sig) \
|
||||
(__builtin_constantp(sig) ? \
|
||||
__const_sigaddset((set),(sig)) : \
|
||||
__gen_sigaddset((set),(sig)))
|
||||
|
||||
static __inline__ void __gen_sigaddset(sigset_t *set, int _sig)
|
||||
{
|
||||
__asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
|
||||
}
|
||||
|
||||
static __inline__ void __const_sigaddset(sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
|
||||
}
|
||||
|
||||
#define sigdelset(set,sig) \
|
||||
(__builtin_constant_p(sig) ? \
|
||||
__const_sigdelset((set),(sig)) : \
|
||||
__gen_sigdelset((set),(sig)))
|
||||
|
||||
|
||||
static __inline__ void __gen_sigdelset(sigset_t *set, int _sig)
|
||||
{
|
||||
__asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
|
||||
}
|
||||
|
||||
static __inline__ void __const_sigdelset(sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
|
||||
}
|
||||
|
||||
static __inline__ int __const_sigismember(sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
|
||||
}
|
||||
|
||||
static __inline__ int __gen_sigismember(sigset_t *set, int _sig)
|
||||
{
|
||||
int ret;
|
||||
__asm__("btl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define sigismember(set,sig) \
|
||||
(__builtin_constant_p(sig) ? \
|
||||
__const_sigismember((set),(sig)) : \
|
||||
__gen_sigismember((set),(sig)))
|
||||
|
||||
static __inline__ int sigfindinword(unsigned long word)
|
||||
{
|
||||
__asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
|
||||
return word;
|
||||
}
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
#define ptrace_signal_deliver(regs, cookie) \
|
||||
do { \
|
||||
if (current->ptrace & PT_DTRACE) { \
|
||||
current->ptrace &= ~PT_DTRACE; \
|
||||
(regs)->eflags &= ~TF_MASK; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
#undef __HAVE_ARCH_SIG_BITOPS
|
||||
|
||||
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
|
||||
|
||||
#endif /* !__i386__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,232 +0,0 @@
|
|||
#ifndef _ASMi386_SIGNAL_H
|
||||
#define _ASMi386_SIGNAL_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* Avoid too many header ordering problems. */
|
||||
struct siginfo;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/* Most things should be clean enough to redefine this at will, if care
|
||||
is taken to make libc match. */
|
||||
|
||||
#define _NSIG 64
|
||||
#define _NSIG_BPW 32
|
||||
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
|
||||
|
||||
typedef unsigned long old_sigset_t; /* at least 32 bits */
|
||||
|
||||
typedef struct {
|
||||
unsigned long sig[_NSIG_WORDS];
|
||||
} sigset_t;
|
||||
|
||||
#else
|
||||
/* Here we must cater to libcs that poke about in kernel headers. */
|
||||
|
||||
#define NSIG 32
|
||||
typedef unsigned long sigset_t;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#define SIGHUP 1
|
||||
#define SIGINT 2
|
||||
#define SIGQUIT 3
|
||||
#define SIGILL 4
|
||||
#define SIGTRAP 5
|
||||
#define SIGABRT 6
|
||||
#define SIGIOT 6
|
||||
#define SIGBUS 7
|
||||
#define SIGFPE 8
|
||||
#define SIGKILL 9
|
||||
#define SIGUSR1 10
|
||||
#define SIGSEGV 11
|
||||
#define SIGUSR2 12
|
||||
#define SIGPIPE 13
|
||||
#define SIGALRM 14
|
||||
#define SIGTERM 15
|
||||
#define SIGSTKFLT 16
|
||||
#define SIGCHLD 17
|
||||
#define SIGCONT 18
|
||||
#define SIGSTOP 19
|
||||
#define SIGTSTP 20
|
||||
#define SIGTTIN 21
|
||||
#define SIGTTOU 22
|
||||
#define SIGURG 23
|
||||
#define SIGXCPU 24
|
||||
#define SIGXFSZ 25
|
||||
#define SIGVTALRM 26
|
||||
#define SIGPROF 27
|
||||
#define SIGWINCH 28
|
||||
#define SIGIO 29
|
||||
#define SIGPOLL SIGIO
|
||||
/*
|
||||
#define SIGLOST 29
|
||||
*/
|
||||
#define SIGPWR 30
|
||||
#define SIGSYS 31
|
||||
#define SIGUNUSED 31
|
||||
|
||||
/* These should not be considered constants from userland. */
|
||||
#define SIGRTMIN 32
|
||||
#define SIGRTMAX _NSIG
|
||||
|
||||
/*
|
||||
* SA_FLAGS values:
|
||||
*
|
||||
* SA_ONSTACK indicates that a registered stack_t will be used.
|
||||
* SA_RESTART flag to get restarting signals (which were the default long ago)
|
||||
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
|
||||
* SA_RESETHAND clears the handler when the signal is delivered.
|
||||
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
|
||||
* SA_NODEFER prevents the current signal from being masked in the handler.
|
||||
*
|
||||
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
|
||||
* Unix names RESETHAND and NODEFER respectively.
|
||||
*/
|
||||
#define SA_NOCLDSTOP 0x00000001u
|
||||
#define SA_NOCLDWAIT 0x00000002u
|
||||
#define SA_SIGINFO 0x00000004u
|
||||
#define SA_ONSTACK 0x08000000u
|
||||
#define SA_RESTART 0x10000000u
|
||||
#define SA_NODEFER 0x40000000u
|
||||
#define SA_RESETHAND 0x80000000u
|
||||
|
||||
#define SA_NOMASK SA_NODEFER
|
||||
#define SA_ONESHOT SA_RESETHAND
|
||||
|
||||
#define SA_RESTORER 0x04000000
|
||||
|
||||
/*
|
||||
* sigaltstack controls
|
||||
*/
|
||||
#define SS_ONSTACK 1
|
||||
#define SS_DISABLE 2
|
||||
|
||||
#define MINSIGSTKSZ 2048
|
||||
#define SIGSTKSZ 8192
|
||||
|
||||
#include <asm-generic/signal.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
struct old_sigaction {
|
||||
__sighandler_t sa_handler;
|
||||
old_sigset_t sa_mask;
|
||||
unsigned long sa_flags;
|
||||
__sigrestore_t sa_restorer;
|
||||
};
|
||||
|
||||
struct sigaction {
|
||||
__sighandler_t sa_handler;
|
||||
unsigned long sa_flags;
|
||||
__sigrestore_t sa_restorer;
|
||||
sigset_t sa_mask; /* mask last for extensibility */
|
||||
};
|
||||
|
||||
struct k_sigaction {
|
||||
struct sigaction sa;
|
||||
};
|
||||
#else
|
||||
/* Here we must cater to libcs that poke about in kernel headers. */
|
||||
|
||||
struct sigaction {
|
||||
union {
|
||||
__sighandler_t _sa_handler;
|
||||
void (*_sa_sigaction)(int, struct siginfo *, void *);
|
||||
} _u;
|
||||
sigset_t sa_mask;
|
||||
unsigned long sa_flags;
|
||||
void (*sa_restorer)(void);
|
||||
};
|
||||
|
||||
#define sa_handler _u._sa_handler
|
||||
#define sa_sigaction _u._sa_sigaction
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
typedef struct sigaltstack {
|
||||
void __user *ss_sp;
|
||||
int ss_flags;
|
||||
size_t ss_size;
|
||||
} stack_t;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/sigcontext.h>
|
||||
|
||||
#define __HAVE_ARCH_SIG_BITOPS
|
||||
|
||||
#define sigaddset(set,sig) \
|
||||
(__builtin_constant_p(sig) ? \
|
||||
__const_sigaddset((set),(sig)) : \
|
||||
__gen_sigaddset((set),(sig)))
|
||||
|
||||
static __inline__ void __gen_sigaddset(sigset_t *set, int _sig)
|
||||
{
|
||||
__asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
|
||||
}
|
||||
|
||||
static __inline__ void __const_sigaddset(sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
|
||||
}
|
||||
|
||||
#define sigdelset(set,sig) \
|
||||
(__builtin_constant_p(sig) ? \
|
||||
__const_sigdelset((set),(sig)) : \
|
||||
__gen_sigdelset((set),(sig)))
|
||||
|
||||
|
||||
static __inline__ void __gen_sigdelset(sigset_t *set, int _sig)
|
||||
{
|
||||
__asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
|
||||
}
|
||||
|
||||
static __inline__ void __const_sigdelset(sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
|
||||
}
|
||||
|
||||
static __inline__ int __const_sigismember(sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
|
||||
}
|
||||
|
||||
static __inline__ int __gen_sigismember(sigset_t *set, int _sig)
|
||||
{
|
||||
int ret;
|
||||
__asm__("btl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define sigismember(set,sig) \
|
||||
(__builtin_constant_p(sig) ? \
|
||||
__const_sigismember((set),(sig)) : \
|
||||
__gen_sigismember((set),(sig)))
|
||||
|
||||
static __inline__ int sigfindinword(unsigned long word)
|
||||
{
|
||||
__asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
|
||||
return word;
|
||||
}
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
#define ptrace_signal_deliver(regs, cookie) \
|
||||
do { \
|
||||
if (current->ptrace & PT_DTRACE) { \
|
||||
current->ptrace &= ~PT_DTRACE; \
|
||||
(regs)->eflags &= ~TF_MASK; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
|
@ -1,181 +0,0 @@
|
|||
#ifndef _ASMx8664_SIGNAL_H
|
||||
#define _ASMx8664_SIGNAL_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
/* Avoid too many header ordering problems. */
|
||||
struct siginfo;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/linkage.h>
|
||||
/* Most things should be clean enough to redefine this at will, if care
|
||||
is taken to make libc match. */
|
||||
|
||||
#define _NSIG 64
|
||||
#define _NSIG_BPW 64
|
||||
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
|
||||
|
||||
typedef unsigned long old_sigset_t; /* at least 32 bits */
|
||||
|
||||
typedef struct {
|
||||
unsigned long sig[_NSIG_WORDS];
|
||||
} sigset_t;
|
||||
|
||||
|
||||
#else
|
||||
/* Here we must cater to libcs that poke about in kernel headers. */
|
||||
|
||||
#define NSIG 32
|
||||
typedef unsigned long sigset_t;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
||||
#define SIGHUP 1
|
||||
#define SIGINT 2
|
||||
#define SIGQUIT 3
|
||||
#define SIGILL 4
|
||||
#define SIGTRAP 5
|
||||
#define SIGABRT 6
|
||||
#define SIGIOT 6
|
||||
#define SIGBUS 7
|
||||
#define SIGFPE 8
|
||||
#define SIGKILL 9
|
||||
#define SIGUSR1 10
|
||||
#define SIGSEGV 11
|
||||
#define SIGUSR2 12
|
||||
#define SIGPIPE 13
|
||||
#define SIGALRM 14
|
||||
#define SIGTERM 15
|
||||
#define SIGSTKFLT 16
|
||||
#define SIGCHLD 17
|
||||
#define SIGCONT 18
|
||||
#define SIGSTOP 19
|
||||
#define SIGTSTP 20
|
||||
#define SIGTTIN 21
|
||||
#define SIGTTOU 22
|
||||
#define SIGURG 23
|
||||
#define SIGXCPU 24
|
||||
#define SIGXFSZ 25
|
||||
#define SIGVTALRM 26
|
||||
#define SIGPROF 27
|
||||
#define SIGWINCH 28
|
||||
#define SIGIO 29
|
||||
#define SIGPOLL SIGIO
|
||||
/*
|
||||
#define SIGLOST 29
|
||||
*/
|
||||
#define SIGPWR 30
|
||||
#define SIGSYS 31
|
||||
#define SIGUNUSED 31
|
||||
|
||||
/* These should not be considered constants from userland. */
|
||||
#define SIGRTMIN 32
|
||||
#define SIGRTMAX _NSIG
|
||||
|
||||
/*
|
||||
* SA_FLAGS values:
|
||||
*
|
||||
* SA_ONSTACK indicates that a registered stack_t will be used.
|
||||
* SA_RESTART flag to get restarting signals (which were the default long ago)
|
||||
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
|
||||
* SA_RESETHAND clears the handler when the signal is delivered.
|
||||
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
|
||||
* SA_NODEFER prevents the current signal from being masked in the handler.
|
||||
*
|
||||
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
|
||||
* Unix names RESETHAND and NODEFER respectively.
|
||||
*/
|
||||
#define SA_NOCLDSTOP 0x00000001
|
||||
#define SA_NOCLDWAIT 0x00000002
|
||||
#define SA_SIGINFO 0x00000004
|
||||
#define SA_ONSTACK 0x08000000
|
||||
#define SA_RESTART 0x10000000
|
||||
#define SA_NODEFER 0x40000000
|
||||
#define SA_RESETHAND 0x80000000
|
||||
|
||||
#define SA_NOMASK SA_NODEFER
|
||||
#define SA_ONESHOT SA_RESETHAND
|
||||
|
||||
#define SA_RESTORER 0x04000000
|
||||
|
||||
/*
|
||||
* sigaltstack controls
|
||||
*/
|
||||
#define SS_ONSTACK 1
|
||||
#define SS_DISABLE 2
|
||||
|
||||
#define MINSIGSTKSZ 2048
|
||||
#define SIGSTKSZ 8192
|
||||
|
||||
#include <asm-generic/signal.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct sigaction {
|
||||
__sighandler_t sa_handler;
|
||||
unsigned long sa_flags;
|
||||
__sigrestore_t sa_restorer;
|
||||
sigset_t sa_mask; /* mask last for extensibility */
|
||||
};
|
||||
|
||||
struct k_sigaction {
|
||||
struct sigaction sa;
|
||||
};
|
||||
|
||||
typedef struct sigaltstack {
|
||||
void __user *ss_sp;
|
||||
int ss_flags;
|
||||
size_t ss_size;
|
||||
} stack_t;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/sigcontext.h>
|
||||
|
||||
#undef __HAVE_ARCH_SIG_BITOPS
|
||||
#if 0
|
||||
|
||||
static inline void sigaddset(sigset_t *set, int _sig)
|
||||
{
|
||||
__asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
|
||||
}
|
||||
|
||||
static inline void sigdelset(sigset_t *set, int _sig)
|
||||
{
|
||||
__asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
|
||||
}
|
||||
|
||||
static inline int __const_sigismember(sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1)));
|
||||
}
|
||||
|
||||
static inline int __gen_sigismember(sigset_t *set, int _sig)
|
||||
{
|
||||
int ret;
|
||||
__asm__("btq %2,%1\n\tsbbq %0,%0"
|
||||
: "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define sigismember(set,sig) \
|
||||
(__builtin_constant_p(sig) ? \
|
||||
__const_sigismember((set),(sig)) : \
|
||||
__gen_sigismember((set),(sig)))
|
||||
|
||||
static inline int sigfindinword(unsigned long word)
|
||||
{
|
||||
__asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc");
|
||||
return word;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
|
@ -76,6 +76,8 @@ extern unsigned __cpuinitdata disabled_cpus;
|
|||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define safe_smp_processor_id() smp_processor_id()
|
||||
|
||||
static inline int hard_smp_processor_id(void)
|
||||
{
|
||||
/* we don't want to mark this access volatile - bad code generation */
|
||||
|
|
|
@ -1,13 +1,114 @@
|
|||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "stat_32.h"
|
||||
# else
|
||||
# include "stat_64.h"
|
||||
# endif
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "stat_32.h"
|
||||
# else
|
||||
# include "stat_64.h"
|
||||
# endif
|
||||
#ifndef _ASM_X86_STAT_H
|
||||
#define _ASM_X86_STAT_H
|
||||
|
||||
#define STAT_HAVE_NSEC 1
|
||||
|
||||
#ifdef __i386__
|
||||
struct stat {
|
||||
unsigned long st_dev;
|
||||
unsigned long st_ino;
|
||||
unsigned short st_mode;
|
||||
unsigned short st_nlink;
|
||||
unsigned short st_uid;
|
||||
unsigned short st_gid;
|
||||
unsigned long st_rdev;
|
||||
unsigned long st_size;
|
||||
unsigned long st_blksize;
|
||||
unsigned long st_blocks;
|
||||
unsigned long st_atime;
|
||||
unsigned long st_atime_nsec;
|
||||
unsigned long st_mtime;
|
||||
unsigned long st_mtime_nsec;
|
||||
unsigned long st_ctime;
|
||||
unsigned long st_ctime_nsec;
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
#define STAT64_HAS_BROKEN_ST_INO 1
|
||||
|
||||
/* This matches struct stat64 in glibc2.1, hence the absolutely
|
||||
* insane amounts of padding around dev_t's.
|
||||
*/
|
||||
struct stat64 {
|
||||
unsigned long long st_dev;
|
||||
unsigned char __pad0[4];
|
||||
|
||||
unsigned long __st_ino;
|
||||
|
||||
unsigned int st_mode;
|
||||
unsigned int st_nlink;
|
||||
|
||||
unsigned long st_uid;
|
||||
unsigned long st_gid;
|
||||
|
||||
unsigned long long st_rdev;
|
||||
unsigned char __pad3[4];
|
||||
|
||||
long long st_size;
|
||||
unsigned long st_blksize;
|
||||
|
||||
/* Number 512-byte blocks allocated. */
|
||||
unsigned long long st_blocks;
|
||||
|
||||
unsigned long st_atime;
|
||||
unsigned long st_atime_nsec;
|
||||
|
||||
unsigned long st_mtime;
|
||||
unsigned int st_mtime_nsec;
|
||||
|
||||
unsigned long st_ctime;
|
||||
unsigned long st_ctime_nsec;
|
||||
|
||||
unsigned long long st_ino;
|
||||
};
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
struct stat {
|
||||
unsigned long st_dev;
|
||||
unsigned long st_ino;
|
||||
unsigned long st_nlink;
|
||||
|
||||
unsigned int st_mode;
|
||||
unsigned int st_uid;
|
||||
unsigned int st_gid;
|
||||
unsigned int __pad0;
|
||||
unsigned long st_rdev;
|
||||
long st_size;
|
||||
long st_blksize;
|
||||
long st_blocks; /* Number 512-byte blocks allocated. */
|
||||
|
||||
unsigned long st_atime;
|
||||
unsigned long st_atime_nsec;
|
||||
unsigned long st_mtime;
|
||||
unsigned long st_mtime_nsec;
|
||||
unsigned long st_ctime;
|
||||
unsigned long st_ctime_nsec;
|
||||
long __unused[3];
|
||||
};
|
||||
#endif
|
||||
|
||||
/* for 32bit emulation and 32 bit kernels */
|
||||
struct __old_kernel_stat {
|
||||
unsigned short st_dev;
|
||||
unsigned short st_ino;
|
||||
unsigned short st_mode;
|
||||
unsigned short st_nlink;
|
||||
unsigned short st_uid;
|
||||
unsigned short st_gid;
|
||||
unsigned short st_rdev;
|
||||
#ifdef __i386__
|
||||
unsigned long st_size;
|
||||
unsigned long st_atime;
|
||||
unsigned long st_mtime;
|
||||
unsigned long st_ctime;
|
||||
#else
|
||||
unsigned int st_size;
|
||||
unsigned int st_atime;
|
||||
unsigned int st_mtime;
|
||||
unsigned int st_ctime;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,77 +0,0 @@
|
|||
#ifndef _I386_STAT_H
|
||||
#define _I386_STAT_H
|
||||
|
||||
struct __old_kernel_stat {
|
||||
unsigned short st_dev;
|
||||
unsigned short st_ino;
|
||||
unsigned short st_mode;
|
||||
unsigned short st_nlink;
|
||||
unsigned short st_uid;
|
||||
unsigned short st_gid;
|
||||
unsigned short st_rdev;
|
||||
unsigned long st_size;
|
||||
unsigned long st_atime;
|
||||
unsigned long st_mtime;
|
||||
unsigned long st_ctime;
|
||||
};
|
||||
|
||||
struct stat {
|
||||
unsigned long st_dev;
|
||||
unsigned long st_ino;
|
||||
unsigned short st_mode;
|
||||
unsigned short st_nlink;
|
||||
unsigned short st_uid;
|
||||
unsigned short st_gid;
|
||||
unsigned long st_rdev;
|
||||
unsigned long st_size;
|
||||
unsigned long st_blksize;
|
||||
unsigned long st_blocks;
|
||||
unsigned long st_atime;
|
||||
unsigned long st_atime_nsec;
|
||||
unsigned long st_mtime;
|
||||
unsigned long st_mtime_nsec;
|
||||
unsigned long st_ctime;
|
||||
unsigned long st_ctime_nsec;
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
/* This matches struct stat64 in glibc2.1, hence the absolutely
|
||||
* insane amounts of padding around dev_t's.
|
||||
*/
|
||||
struct stat64 {
|
||||
unsigned long long st_dev;
|
||||
unsigned char __pad0[4];
|
||||
|
||||
#define STAT64_HAS_BROKEN_ST_INO 1
|
||||
unsigned long __st_ino;
|
||||
|
||||
unsigned int st_mode;
|
||||
unsigned int st_nlink;
|
||||
|
||||
unsigned long st_uid;
|
||||
unsigned long st_gid;
|
||||
|
||||
unsigned long long st_rdev;
|
||||
unsigned char __pad3[4];
|
||||
|
||||
long long st_size;
|
||||
unsigned long st_blksize;
|
||||
|
||||
unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
|
||||
|
||||
unsigned long st_atime;
|
||||
unsigned long st_atime_nsec;
|
||||
|
||||
unsigned long st_mtime;
|
||||
unsigned int st_mtime_nsec;
|
||||
|
||||
unsigned long st_ctime;
|
||||
unsigned long st_ctime_nsec;
|
||||
|
||||
unsigned long long st_ino;
|
||||
};
|
||||
|
||||
#define STAT_HAVE_NSEC 1
|
||||
|
||||
#endif
|
|
@ -1,44 +0,0 @@
|
|||
#ifndef _ASM_X86_64_STAT_H
|
||||
#define _ASM_X86_64_STAT_H
|
||||
|
||||
#define STAT_HAVE_NSEC 1
|
||||
|
||||
struct stat {
|
||||
unsigned long st_dev;
|
||||
unsigned long st_ino;
|
||||
unsigned long st_nlink;
|
||||
|
||||
unsigned int st_mode;
|
||||
unsigned int st_uid;
|
||||
unsigned int st_gid;
|
||||
unsigned int __pad0;
|
||||
unsigned long st_rdev;
|
||||
long st_size;
|
||||
long st_blksize;
|
||||
long st_blocks; /* Number 512-byte blocks allocated. */
|
||||
|
||||
unsigned long st_atime;
|
||||
unsigned long st_atime_nsec;
|
||||
unsigned long st_mtime;
|
||||
unsigned long st_mtime_nsec;
|
||||
unsigned long st_ctime;
|
||||
unsigned long st_ctime_nsec;
|
||||
long __unused[3];
|
||||
};
|
||||
|
||||
/* For 32bit emulation */
|
||||
struct __old_kernel_stat {
|
||||
unsigned short st_dev;
|
||||
unsigned short st_ino;
|
||||
unsigned short st_mode;
|
||||
unsigned short st_nlink;
|
||||
unsigned short st_uid;
|
||||
unsigned short st_gid;
|
||||
unsigned short st_rdev;
|
||||
unsigned int st_size;
|
||||
unsigned int st_atime;
|
||||
unsigned int st_mtime;
|
||||
unsigned int st_ctime;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -1,13 +1,63 @@
|
|||
#ifdef __KERNEL__
|
||||
# ifdef CONFIG_X86_32
|
||||
# include "statfs_32.h"
|
||||
# else
|
||||
# include "statfs_64.h"
|
||||
# endif
|
||||
#ifndef _ASM_X86_STATFS_H
|
||||
#define _ASM_X86_STATFS_H
|
||||
|
||||
#ifdef __i386__
|
||||
#include <asm-generic/statfs.h>
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "statfs_32.h"
|
||||
# else
|
||||
# include "statfs_64.h"
|
||||
# endif
|
||||
|
||||
#ifndef __KERNEL_STRICT_NAMES
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
typedef __kernel_fsid_t fsid_t;
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is ugly -- we're already 64-bit clean, so just duplicate the
|
||||
* definitions.
|
||||
*/
|
||||
struct statfs {
|
||||
long f_type;
|
||||
long f_bsize;
|
||||
long f_blocks;
|
||||
long f_bfree;
|
||||
long f_bavail;
|
||||
long f_files;
|
||||
long f_ffree;
|
||||
__kernel_fsid_t f_fsid;
|
||||
long f_namelen;
|
||||
long f_frsize;
|
||||
long f_spare[5];
|
||||
};
|
||||
|
||||
struct statfs64 {
|
||||
long f_type;
|
||||
long f_bsize;
|
||||
long f_blocks;
|
||||
long f_bfree;
|
||||
long f_bavail;
|
||||
long f_files;
|
||||
long f_ffree;
|
||||
__kernel_fsid_t f_fsid;
|
||||
long f_namelen;
|
||||
long f_frsize;
|
||||
long f_spare[5];
|
||||
};
|
||||
|
||||
struct compat_statfs64 {
|
||||
__u32 f_type;
|
||||
__u32 f_bsize;
|
||||
__u64 f_blocks;
|
||||
__u64 f_bfree;
|
||||
__u64 f_bavail;
|
||||
__u64 f_files;
|
||||
__u64 f_ffree;
|
||||
__kernel_fsid_t f_fsid;
|
||||
__u32 f_namelen;
|
||||
__u32 f_frsize;
|
||||
__u32 f_spare[5];
|
||||
} __attribute__((packed));
|
||||
|
||||
#endif /* !__i386__ */
|
||||
#endif
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef _I386_STATFS_H
|
||||
#define _I386_STATFS_H
|
||||
|
||||
#include <asm-generic/statfs.h>
|
||||
|
||||
#endif
|
|
@ -1,58 +0,0 @@
|
|||
#ifndef _X86_64_STATFS_H
|
||||
#define _X86_64_STATFS_H
|
||||
|
||||
#ifndef __KERNEL_STRICT_NAMES
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
typedef __kernel_fsid_t fsid_t;
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is ugly -- we're already 64-bit clean, so just duplicate the
|
||||
* definitions.
|
||||
*/
|
||||
struct statfs {
|
||||
long f_type;
|
||||
long f_bsize;
|
||||
long f_blocks;
|
||||
long f_bfree;
|
||||
long f_bavail;
|
||||
long f_files;
|
||||
long f_ffree;
|
||||
__kernel_fsid_t f_fsid;
|
||||
long f_namelen;
|
||||
long f_frsize;
|
||||
long f_spare[5];
|
||||
};
|
||||
|
||||
struct statfs64 {
|
||||
long f_type;
|
||||
long f_bsize;
|
||||
long f_blocks;
|
||||
long f_bfree;
|
||||
long f_bavail;
|
||||
long f_files;
|
||||
long f_ffree;
|
||||
__kernel_fsid_t f_fsid;
|
||||
long f_namelen;
|
||||
long f_frsize;
|
||||
long f_spare[5];
|
||||
};
|
||||
|
||||
struct compat_statfs64 {
|
||||
__u32 f_type;
|
||||
__u32 f_bsize;
|
||||
__u64 f_blocks;
|
||||
__u64 f_bfree;
|
||||
__u64 f_bavail;
|
||||
__u64 f_files;
|
||||
__u64 f_ffree;
|
||||
__kernel_fsid_t f_fsid;
|
||||
__u32 f_namelen;
|
||||
__u32 f_frsize;
|
||||
__u32 f_spare[5];
|
||||
} __attribute__((packed));
|
||||
|
||||
#endif
|
|
@ -3,6 +3,9 @@
|
|||
* Based on code
|
||||
* Copyright 2001 Patrick Mochel <mochel@osdl.org>
|
||||
*/
|
||||
#ifndef __ASM_X86_64_SUSPEND_H
|
||||
#define __ASM_X86_64_SUSPEND_H
|
||||
|
||||
#include <asm/desc.h>
|
||||
#include <asm/i387.h>
|
||||
|
||||
|
@ -12,8 +15,9 @@ arch_prepare_suspend(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Image of the saved processor state. If you touch this, fix acpi_wakeup.S. */
|
||||
/* Image of the saved processor state. If you touch this, fix acpi/wakeup.S. */
|
||||
struct saved_context {
|
||||
struct pt_regs regs;
|
||||
u16 ds, es, fs, gs, ss;
|
||||
unsigned long gs_base, gs_kernel_base, fs_base;
|
||||
unsigned long cr0, cr2, cr3, cr4, cr8;
|
||||
|
@ -29,29 +33,16 @@ struct saved_context {
|
|||
unsigned long tr;
|
||||
unsigned long safety;
|
||||
unsigned long return_address;
|
||||
unsigned long eflags;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* We'll access these from assembly, so we'd better have them outside struct */
|
||||
extern unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
|
||||
extern unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
|
||||
extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
|
||||
extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
|
||||
extern unsigned long saved_context_eflags;
|
||||
|
||||
#define loaddebug(thread,register) \
|
||||
set_debugreg((thread)->debugreg##register, register)
|
||||
|
||||
extern void fix_processor_context(void);
|
||||
|
||||
extern unsigned long saved_rip;
|
||||
extern unsigned long saved_rsp;
|
||||
extern unsigned long saved_rbp;
|
||||
extern unsigned long saved_rbx;
|
||||
extern unsigned long saved_rsi;
|
||||
extern unsigned long saved_rdi;
|
||||
|
||||
/* routines for saving/restoring kernel state */
|
||||
extern int acpi_save_state_mem(void);
|
||||
extern char core_restore_code;
|
||||
extern char restore_registers;
|
||||
|
||||
#endif /* __ASM_X86_64_SUSPEND_H */
|
||||
|
|
Loading…
Reference in New Issue