179 lines
3.9 KiB
C
179 lines
3.9 KiB
C
|
/*
|
||
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||
|
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
|
||
|
*
|
||
|
* Pentium III FXSR, SSE support
|
||
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
||
|
*/
|
||
|
|
||
|
/*
|
||
|
* Handle hardware traps and faults.
|
||
|
*/
|
||
|
#include <linux/spinlock.h>
|
||
|
#include <linux/kprobes.h>
|
||
|
#include <linux/kdebug.h>
|
||
|
#include <linux/nmi.h>
|
||
|
|
||
|
#if defined(CONFIG_EDAC)
|
||
|
#include <linux/edac.h>
|
||
|
#endif
|
||
|
|
||
|
#include <linux/atomic.h>
|
||
|
#include <asm/traps.h>
|
||
|
#include <asm/mach_traps.h>
|
||
|
|
||
|
static int ignore_nmis;
|
||
|
|
||
|
int unknown_nmi_panic;
|
||
|
/*
|
||
|
* Prevent NMI reason port (0x61) being accessed simultaneously, can
|
||
|
* only be used in NMI handler.
|
||
|
*/
|
||
|
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
|
||
|
|
||
|
static int __init setup_unknown_nmi_panic(char *str)
|
||
|
{
|
||
|
unknown_nmi_panic = 1;
|
||
|
return 1;
|
||
|
}
|
||
|
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
|
||
|
|
||
|
static notrace __kprobes void
|
||
|
pci_serr_error(unsigned char reason, struct pt_regs *regs)
|
||
|
{
|
||
|
pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
|
||
|
reason, smp_processor_id());
|
||
|
|
||
|
/*
|
||
|
* On some machines, PCI SERR line is used to report memory
|
||
|
* errors. EDAC makes use of it.
|
||
|
*/
|
||
|
#if defined(CONFIG_EDAC)
|
||
|
if (edac_handler_set()) {
|
||
|
edac_atomic_assert_error();
|
||
|
return;
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
if (panic_on_unrecovered_nmi)
|
||
|
panic("NMI: Not continuing");
|
||
|
|
||
|
pr_emerg("Dazed and confused, but trying to continue\n");
|
||
|
|
||
|
/* Clear and disable the PCI SERR error line. */
|
||
|
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
|
||
|
outb(reason, NMI_REASON_PORT);
|
||
|
}
|
||
|
|
||
|
static notrace __kprobes void
|
||
|
io_check_error(unsigned char reason, struct pt_regs *regs)
|
||
|
{
|
||
|
unsigned long i;
|
||
|
|
||
|
pr_emerg(
|
||
|
"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
|
||
|
reason, smp_processor_id());
|
||
|
show_registers(regs);
|
||
|
|
||
|
if (panic_on_io_nmi)
|
||
|
panic("NMI IOCK error: Not continuing");
|
||
|
|
||
|
/* Re-enable the IOCK line, wait for a few seconds */
|
||
|
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
|
||
|
outb(reason, NMI_REASON_PORT);
|
||
|
|
||
|
i = 20000;
|
||
|
while (--i) {
|
||
|
touch_nmi_watchdog();
|
||
|
udelay(100);
|
||
|
}
|
||
|
|
||
|
reason &= ~NMI_REASON_CLEAR_IOCHK;
|
||
|
outb(reason, NMI_REASON_PORT);
|
||
|
}
|
||
|
|
||
|
static notrace __kprobes void
|
||
|
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
||
|
{
|
||
|
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
|
||
|
NOTIFY_STOP)
|
||
|
return;
|
||
|
#ifdef CONFIG_MCA
|
||
|
/*
|
||
|
* Might actually be able to figure out what the guilty party
|
||
|
* is:
|
||
|
*/
|
||
|
if (MCA_bus) {
|
||
|
mca_handle_nmi();
|
||
|
return;
|
||
|
}
|
||
|
#endif
|
||
|
pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
|
||
|
reason, smp_processor_id());
|
||
|
|
||
|
pr_emerg("Do you have a strange power saving mode enabled?\n");
|
||
|
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
|
||
|
panic("NMI: Not continuing");
|
||
|
|
||
|
pr_emerg("Dazed and confused, but trying to continue\n");
|
||
|
}
|
||
|
|
||
|
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
|
||
|
{
|
||
|
unsigned char reason = 0;
|
||
|
|
||
|
/*
|
||
|
* CPU-specific NMI must be processed before non-CPU-specific
|
||
|
* NMI, otherwise we may lose it, because the CPU-specific
|
||
|
* NMI can not be detected/processed on other CPUs.
|
||
|
*/
|
||
|
if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
|
||
|
return;
|
||
|
|
||
|
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
|
||
|
raw_spin_lock(&nmi_reason_lock);
|
||
|
reason = get_nmi_reason();
|
||
|
|
||
|
if (reason & NMI_REASON_MASK) {
|
||
|
if (reason & NMI_REASON_SERR)
|
||
|
pci_serr_error(reason, regs);
|
||
|
else if (reason & NMI_REASON_IOCHK)
|
||
|
io_check_error(reason, regs);
|
||
|
#ifdef CONFIG_X86_32
|
||
|
/*
|
||
|
* Reassert NMI in case it became active
|
||
|
* meanwhile as it's edge-triggered:
|
||
|
*/
|
||
|
reassert_nmi();
|
||
|
#endif
|
||
|
raw_spin_unlock(&nmi_reason_lock);
|
||
|
return;
|
||
|
}
|
||
|
raw_spin_unlock(&nmi_reason_lock);
|
||
|
|
||
|
unknown_nmi_error(reason, regs);
|
||
|
}
|
||
|
|
||
|
dotraplinkage notrace __kprobes void
|
||
|
do_nmi(struct pt_regs *regs, long error_code)
|
||
|
{
|
||
|
nmi_enter();
|
||
|
|
||
|
inc_irq_stat(__nmi_count);
|
||
|
|
||
|
if (!ignore_nmis)
|
||
|
default_do_nmi(regs);
|
||
|
|
||
|
nmi_exit();
|
||
|
}
|
||
|
|
||
|
void stop_nmi(void)
|
||
|
{
|
||
|
ignore_nmis++;
|
||
|
}
|
||
|
|
||
|
void restart_nmi(void)
|
||
|
{
|
||
|
ignore_nmis--;
|
||
|
}
|