2 * linux/arch/i386/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
13 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/nmi.h>
20 #include <linux/sysdev.h>
21 #include <linux/sysctl.h>
22 #include <linux/percpu.h>
23 #include <linux/dmi.h>
24 #include <linux/kprobes.h>
25 #include <linux/cpumask.h>
26 #include <linux/kernel_stat.h>
30 #include <asm/kdebug.h>
31 #include <asm/intel_arch_perfmon.h>
33 #include "mach_traps.h"
35 int unknown_nmi_panic;
36 int nmi_watchdog_enabled;
38 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
39 * evtsel_nmi_owner tracks the ownership of the event selection
40 * - different performance counters/ event selection may be reserved for
41 * different subsystems this reservation system just tries to coordinate
44 static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
45 static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
47 static cpumask_t backtrace_mask = CPU_MASK_NONE;
49 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
50 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
52 #define NMI_MAX_COUNTER_BITS 66
55 * >0: the lapic NMI watchdog is active, but can be disabled
56 * <0: the lapic NMI watchdog has not been set up, and cannot
58 * 0: the lapic NMI watchdog is disabled, but can be enabled
60 atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
62 unsigned int nmi_watchdog = NMI_DEFAULT;
63 static unsigned int nmi_hz = HZ;
65 struct nmi_watchdog_ctlblk {
68 unsigned int cccr_msr;
69 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
70 unsigned int evntsel_msr; /* the MSR to select the events to handle */
72 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
74 /* local prototypes */
75 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
77 extern void show_registers(struct pt_regs *regs);
78 extern int unknown_nmi_panic;
80 /* converts an msr to an appropriate reservation bit */
81 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
83 /* returns the bit offset of the performance counter register */
84 switch (boot_cpu_data.x86_vendor) {
86 return (msr - MSR_K7_PERFCTR0);
87 case X86_VENDOR_INTEL:
88 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
89 return (msr - MSR_ARCH_PERFMON_PERFCTR0);
91 switch (boot_cpu_data.x86) {
93 return (msr - MSR_P6_PERFCTR0);
95 return (msr - MSR_P4_BPU_PERFCTR0);
101 /* converts an msr to an appropriate reservation bit */
102 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
104 /* returns the bit offset of the event selection register */
105 switch (boot_cpu_data.x86_vendor) {
107 return (msr - MSR_K7_EVNTSEL0);
108 case X86_VENDOR_INTEL:
109 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
110 return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
112 switch (boot_cpu_data.x86) {
114 return (msr - MSR_P6_EVNTSEL0);
116 return (msr - MSR_P4_BSU_ESCR0);
122 /* checks for a bit availability (hack for oprofile) */
123 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
126 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
127 for_each_possible_cpu (cpu) {
128 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
134 /* checks the an msr for availability */
135 int avail_to_resrv_perfctr_nmi(unsigned int msr)
137 unsigned int counter;
140 counter = nmi_perfctr_msr_to_bit(msr);
141 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
143 for_each_possible_cpu (cpu) {
144 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
150 static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
152 unsigned int counter;
154 cpu = smp_processor_id();
156 counter = nmi_perfctr_msr_to_bit(msr);
157 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
159 if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
164 static void __release_perfctr_nmi(int cpu, unsigned int msr)
166 unsigned int counter;
168 cpu = smp_processor_id();
170 counter = nmi_perfctr_msr_to_bit(msr);
171 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
173 clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
176 int reserve_perfctr_nmi(unsigned int msr)
179 for_each_possible_cpu (cpu) {
180 if (!__reserve_perfctr_nmi(cpu, msr)) {
181 for_each_possible_cpu (i) {
184 __release_perfctr_nmi(i, msr);
192 void release_perfctr_nmi(unsigned int msr)
195 for_each_possible_cpu (cpu) {
196 __release_perfctr_nmi(cpu, msr);
200 int __reserve_evntsel_nmi(int cpu, unsigned int msr)
202 unsigned int counter;
204 cpu = smp_processor_id();
206 counter = nmi_evntsel_msr_to_bit(msr);
207 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
209 if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
214 static void __release_evntsel_nmi(int cpu, unsigned int msr)
216 unsigned int counter;
218 cpu = smp_processor_id();
220 counter = nmi_evntsel_msr_to_bit(msr);
221 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
223 clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
226 int reserve_evntsel_nmi(unsigned int msr)
229 for_each_possible_cpu (cpu) {
230 if (!__reserve_evntsel_nmi(cpu, msr)) {
231 for_each_possible_cpu (i) {
234 __release_evntsel_nmi(i, msr);
242 void release_evntsel_nmi(unsigned int msr)
245 for_each_possible_cpu (cpu) {
246 __release_evntsel_nmi(cpu, msr);
250 static __cpuinit inline int nmi_known_cpu(void)
252 switch (boot_cpu_data.x86_vendor) {
254 return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6)
255 || (boot_cpu_data.x86 == 16));
256 case X86_VENDOR_INTEL:
257 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
260 return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6));
265 static int endflag __initdata = 0;
268 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
269 * the CPU is idle. To make sure the NMI watchdog really ticks on all
270 * CPUs during the test make them busy.
272 static __init void nmi_cpu_busy(void *data)
274 local_irq_enable_in_hardirq();
275 /* Intentionally don't use cpu_relax here. This is
276 to make sure that the performance counter really ticks,
277 even if there is a simulator or similar that catches the
278 pause instruction. On a real HT machine this is fine because
279 all other CPUs are busy with "useless" delay loops and don't
280 care if they get somewhat less cycles. */
286 static unsigned int adjust_for_32bit_ctr(unsigned int hz)
289 unsigned int retval = hz;
292 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
293 * are writable, with higher bits sign extending from bit 31.
294 * So, we can only program the counter with 31 bit values and
295 * 32nd bit should be 1, for 33.. to be 1.
296 * Find the appropriate nmi_hz
298 counter_val = (u64)cpu_khz * 1000;
299 do_div(counter_val, retval);
300 if (counter_val > 0x7fffffffULL) {
301 u64 count = (u64)cpu_khz * 1000;
302 do_div(count, 0x7fffffffUL);
308 static int __init check_nmi_watchdog(void)
310 unsigned int *prev_nmi_count;
313 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
316 if (!atomic_read(&nmi_active))
319 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
323 printk(KERN_INFO "Testing NMI watchdog ... ");
325 if (nmi_watchdog == NMI_LOCAL_APIC)
326 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
328 for_each_possible_cpu(cpu)
329 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
331 mdelay((20*1000)/nmi_hz); // wait 20 ticks
333 for_each_possible_cpu(cpu) {
335 /* Check cpu_callin_map here because that is set
336 after the timer is started. */
337 if (!cpu_isset(cpu, cpu_callin_map))
340 if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
342 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
343 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
347 per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
348 atomic_dec(&nmi_active);
351 if (!atomic_read(&nmi_active)) {
352 kfree(prev_nmi_count);
353 atomic_set(&nmi_active, -1);
359 /* now that we know it works we can reduce NMI frequency to
360 something more reasonable; makes a difference in some configs */
361 if (nmi_watchdog == NMI_LOCAL_APIC) {
362 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
366 if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
367 wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
368 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
372 kfree(prev_nmi_count);
375 /* This needs to happen later in boot so counters are working */
376 late_initcall(check_nmi_watchdog);
378 static int __init setup_nmi_watchdog(char *str)
382 get_option(&str, &nmi);
384 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
391 __setup("nmi_watchdog=", setup_nmi_watchdog);
393 static void disable_lapic_nmi_watchdog(void)
395 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
397 if (atomic_read(&nmi_active) <= 0)
400 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
402 BUG_ON(atomic_read(&nmi_active) != 0);
405 static void enable_lapic_nmi_watchdog(void)
407 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
409 /* are we already enabled */
410 if (atomic_read(&nmi_active) != 0)
413 /* are we lapic aware */
414 if (nmi_known_cpu() <= 0)
417 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
418 touch_nmi_watchdog();
421 void disable_timer_nmi_watchdog(void)
423 BUG_ON(nmi_watchdog != NMI_IO_APIC);
425 if (atomic_read(&nmi_active) <= 0)
429 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
431 BUG_ON(atomic_read(&nmi_active) != 0);
434 void enable_timer_nmi_watchdog(void)
436 BUG_ON(nmi_watchdog != NMI_IO_APIC);
438 if (atomic_read(&nmi_active) == 0) {
439 touch_nmi_watchdog();
440 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
445 static void __acpi_nmi_disable(void *__unused)
447 apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
451 * Disable timer based NMIs on all CPUs:
453 void acpi_nmi_disable(void)
455 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
456 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
459 static void __acpi_nmi_enable(void *__unused)
461 apic_write_around(APIC_LVT0, APIC_DM_NMI);
465 * Enable timer based NMIs on all CPUs:
467 void acpi_nmi_enable(void)
469 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
470 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
475 static int nmi_pm_active; /* nmi_active before suspend */
477 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
479 /* only CPU0 goes here, other CPUs should be offline */
480 nmi_pm_active = atomic_read(&nmi_active);
481 stop_apic_nmi_watchdog(NULL);
482 BUG_ON(atomic_read(&nmi_active) != 0);
486 static int lapic_nmi_resume(struct sys_device *dev)
488 /* only CPU0 goes here, other CPUs should be offline */
489 if (nmi_pm_active > 0) {
490 setup_apic_nmi_watchdog(NULL);
491 touch_nmi_watchdog();
497 static struct sysdev_class nmi_sysclass = {
498 set_kset_name("lapic_nmi"),
499 .resume = lapic_nmi_resume,
500 .suspend = lapic_nmi_suspend,
503 static struct sys_device device_lapic_nmi = {
505 .cls = &nmi_sysclass,
508 static int __init init_lapic_nmi_sysfs(void)
512 /* should really be a BUG_ON but b/c this is an
513 * init call, it just doesn't work. -dcz
515 if (nmi_watchdog != NMI_LOCAL_APIC)
518 if ( atomic_read(&nmi_active) < 0 )
521 error = sysdev_class_register(&nmi_sysclass);
523 error = sysdev_register(&device_lapic_nmi);
526 /* must come after the local APIC's device_initcall() */
527 late_initcall(init_lapic_nmi_sysfs);
529 #endif /* CONFIG_PM */
532 * Activate the NMI watchdog via the local APIC.
533 * Original code written by Keith Owens.
536 static void write_watchdog_counter(unsigned int perfctr_msr, const char *descr)
538 u64 count = (u64)cpu_khz * 1000;
540 do_div(count, nmi_hz);
542 Dprintk("setting %s to -0x%08Lx\n", descr, count);
543 wrmsrl(perfctr_msr, 0 - count);
546 static void write_watchdog_counter32(unsigned int perfctr_msr,
549 u64 count = (u64)cpu_khz * 1000;
551 do_div(count, nmi_hz);
553 Dprintk("setting %s to -0x%08Lx\n", descr, count);
554 wrmsr(perfctr_msr, (u32)(-count), 0);
557 /* Note that these events don't tick when the CPU idles. This means
558 the frequency varies with CPU load. */
560 #define K7_EVNTSEL_ENABLE (1 << 22)
561 #define K7_EVNTSEL_INT (1 << 20)
562 #define K7_EVNTSEL_OS (1 << 17)
563 #define K7_EVNTSEL_USR (1 << 16)
564 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
565 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
567 static int setup_k7_watchdog(void)
569 unsigned int perfctr_msr, evntsel_msr;
570 unsigned int evntsel;
571 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
573 perfctr_msr = MSR_K7_PERFCTR0;
574 evntsel_msr = MSR_K7_EVNTSEL0;
575 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
578 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
581 wrmsrl(perfctr_msr, 0UL);
583 evntsel = K7_EVNTSEL_INT
588 /* setup the timer */
589 wrmsr(evntsel_msr, evntsel, 0);
590 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0");
591 apic_write(APIC_LVTPC, APIC_DM_NMI);
592 evntsel |= K7_EVNTSEL_ENABLE;
593 wrmsr(evntsel_msr, evntsel, 0);
595 wd->perfctr_msr = perfctr_msr;
596 wd->evntsel_msr = evntsel_msr;
597 wd->cccr_msr = 0; //unused
598 wd->check_bit = 1ULL<<63;
601 __release_perfctr_nmi(-1, perfctr_msr);
606 static void stop_k7_watchdog(void)
608 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
610 wrmsr(wd->evntsel_msr, 0, 0);
612 __release_evntsel_nmi(-1, wd->evntsel_msr);
613 __release_perfctr_nmi(-1, wd->perfctr_msr);
616 #define P6_EVNTSEL0_ENABLE (1 << 22)
617 #define P6_EVNTSEL_INT (1 << 20)
618 #define P6_EVNTSEL_OS (1 << 17)
619 #define P6_EVNTSEL_USR (1 << 16)
620 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
621 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
623 static int setup_p6_watchdog(void)
625 unsigned int perfctr_msr, evntsel_msr;
626 unsigned int evntsel;
627 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
629 perfctr_msr = MSR_P6_PERFCTR0;
630 evntsel_msr = MSR_P6_EVNTSEL0;
631 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
634 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
637 wrmsrl(perfctr_msr, 0UL);
639 evntsel = P6_EVNTSEL_INT
644 /* setup the timer */
645 wrmsr(evntsel_msr, evntsel, 0);
646 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
647 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0");
648 apic_write(APIC_LVTPC, APIC_DM_NMI);
649 evntsel |= P6_EVNTSEL0_ENABLE;
650 wrmsr(evntsel_msr, evntsel, 0);
652 wd->perfctr_msr = perfctr_msr;
653 wd->evntsel_msr = evntsel_msr;
654 wd->cccr_msr = 0; //unused
655 wd->check_bit = 1ULL<<39;
658 __release_perfctr_nmi(-1, perfctr_msr);
663 static void stop_p6_watchdog(void)
665 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
667 wrmsr(wd->evntsel_msr, 0, 0);
669 __release_evntsel_nmi(-1, wd->evntsel_msr);
670 __release_perfctr_nmi(-1, wd->perfctr_msr);
673 /* Note that these events don't tick when the CPU idles. This means
674 the frequency varies with CPU load. */
676 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
677 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
678 #define P4_ESCR_OS (1<<3)
679 #define P4_ESCR_USR (1<<2)
680 #define P4_CCCR_OVF_PMI0 (1<<26)
681 #define P4_CCCR_OVF_PMI1 (1<<27)
682 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
683 #define P4_CCCR_COMPLEMENT (1<<19)
684 #define P4_CCCR_COMPARE (1<<18)
685 #define P4_CCCR_REQUIRED (3<<16)
686 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
687 #define P4_CCCR_ENABLE (1<<12)
688 #define P4_CCCR_OVF (1<<31)
689 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
690 CRU_ESCR0 (with any non-null event selector) through a complemented
691 max threshold. [IA32-Vol3, Section 14.9.9] */
693 static int setup_p4_watchdog(void)
695 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
696 unsigned int evntsel, cccr_val;
697 unsigned int misc_enable, dummy;
699 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
701 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
702 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
706 /* detect which hyperthread we are on */
707 if (smp_num_siblings == 2) {
708 unsigned int ebx, apicid;
711 apicid = (ebx >> 24) & 0xff;
717 /* performance counters are shared resources
718 * assign each hyperthread its own set
719 * (re-use the ESCR0 register, seems safe
720 * and keeps the cccr_val the same)
724 perfctr_msr = MSR_P4_IQ_PERFCTR0;
725 evntsel_msr = MSR_P4_CRU_ESCR0;
726 cccr_msr = MSR_P4_IQ_CCCR0;
727 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
730 perfctr_msr = MSR_P4_IQ_PERFCTR1;
731 evntsel_msr = MSR_P4_CRU_ESCR0;
732 cccr_msr = MSR_P4_IQ_CCCR1;
733 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
736 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
739 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
742 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
746 cccr_val |= P4_CCCR_THRESHOLD(15)
751 wrmsr(evntsel_msr, evntsel, 0);
752 wrmsr(cccr_msr, cccr_val, 0);
753 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0");
754 apic_write(APIC_LVTPC, APIC_DM_NMI);
755 cccr_val |= P4_CCCR_ENABLE;
756 wrmsr(cccr_msr, cccr_val, 0);
757 wd->perfctr_msr = perfctr_msr;
758 wd->evntsel_msr = evntsel_msr;
759 wd->cccr_msr = cccr_msr;
760 wd->check_bit = 1ULL<<39;
763 __release_perfctr_nmi(-1, perfctr_msr);
768 static void stop_p4_watchdog(void)
770 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
772 wrmsr(wd->cccr_msr, 0, 0);
773 wrmsr(wd->evntsel_msr, 0, 0);
775 __release_evntsel_nmi(-1, wd->evntsel_msr);
776 __release_perfctr_nmi(-1, wd->perfctr_msr);
779 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
780 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
782 static int setup_intel_arch_watchdog(void)
785 union cpuid10_eax eax;
787 unsigned int perfctr_msr, evntsel_msr;
788 unsigned int evntsel;
789 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
792 * Check whether the Architectural PerfMon supports
793 * Unhalted Core Cycles Event or not.
794 * NOTE: Corresponding bit = 0 in ebx indicates event present.
796 cpuid(10, &(eax.full), &ebx, &unused, &unused);
797 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
798 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
801 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
802 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
804 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
807 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
810 wrmsrl(perfctr_msr, 0UL);
812 evntsel = ARCH_PERFMON_EVENTSEL_INT
813 | ARCH_PERFMON_EVENTSEL_OS
814 | ARCH_PERFMON_EVENTSEL_USR
815 | ARCH_PERFMON_NMI_EVENT_SEL
816 | ARCH_PERFMON_NMI_EVENT_UMASK;
818 /* setup the timer */
819 wrmsr(evntsel_msr, evntsel, 0);
820 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
821 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0");
822 apic_write(APIC_LVTPC, APIC_DM_NMI);
823 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
824 wrmsr(evntsel_msr, evntsel, 0);
826 wd->perfctr_msr = perfctr_msr;
827 wd->evntsel_msr = evntsel_msr;
828 wd->cccr_msr = 0; //unused
829 wd->check_bit = 1ULL << (eax.split.bit_width - 1);
832 __release_perfctr_nmi(-1, perfctr_msr);
837 static void stop_intel_arch_watchdog(void)
840 union cpuid10_eax eax;
842 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
845 * Check whether the Architectural PerfMon supports
846 * Unhalted Core Cycles Event or not.
847 * NOTE: Corresponding bit = 0 in ebx indicates event present.
849 cpuid(10, &(eax.full), &ebx, &unused, &unused);
850 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
851 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
854 wrmsr(wd->evntsel_msr, 0, 0);
855 __release_evntsel_nmi(-1, wd->evntsel_msr);
856 __release_perfctr_nmi(-1, wd->perfctr_msr);
859 void setup_apic_nmi_watchdog (void *unused)
861 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
863 /* only support LOCAL and IO APICs for now */
864 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
865 (nmi_watchdog != NMI_IO_APIC))
868 if (wd->enabled == 1)
871 /* cheap hack to support suspend/resume */
872 /* if cpu0 is not active neither should the other cpus */
873 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
876 if (nmi_watchdog == NMI_LOCAL_APIC) {
877 switch (boot_cpu_data.x86_vendor) {
879 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
880 boot_cpu_data.x86 != 16)
882 if (!setup_k7_watchdog())
885 case X86_VENDOR_INTEL:
886 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
887 if (!setup_intel_arch_watchdog())
891 switch (boot_cpu_data.x86) {
893 if (boot_cpu_data.x86_model > 0xd)
896 if (!setup_p6_watchdog())
900 if (boot_cpu_data.x86_model > 0x4)
903 if (!setup_p4_watchdog())
915 atomic_inc(&nmi_active);
918 void stop_apic_nmi_watchdog(void *unused)
920 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
922 /* only support LOCAL and IO APICs for now */
923 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
924 (nmi_watchdog != NMI_IO_APIC))
927 if (wd->enabled == 0)
930 if (nmi_watchdog == NMI_LOCAL_APIC) {
931 switch (boot_cpu_data.x86_vendor) {
935 case X86_VENDOR_INTEL:
936 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
937 stop_intel_arch_watchdog();
940 switch (boot_cpu_data.x86) {
942 if (boot_cpu_data.x86_model > 0xd)
947 if (boot_cpu_data.x86_model > 0x4)
958 atomic_dec(&nmi_active);
962 * the best way to detect whether a CPU has a 'hard lockup' problem
963 * is to check it's local APIC timer IRQ counts. If they are not
964 * changing then that CPU has some problem.
966 * as these watchdog NMI IRQs are generated on every CPU, we only
967 * have to check the current processor.
969 * since NMIs don't listen to _any_ locks, we have to be extremely
970 * careful not to rely on unsafe variables. The printk might lock
971 * up though, so we have to break up any console locks first ...
972 * [when there will be more tty-related locks, break them up
977 last_irq_sums [NR_CPUS],
978 alert_counter [NR_CPUS];
980 void touch_nmi_watchdog (void)
982 if (nmi_watchdog > 0) {
986 * Just reset the alert counters, (other CPUs might be
987 * spinning on locks we hold):
989 for_each_present_cpu (cpu)
990 alert_counter[cpu] = 0;
994 * Tickle the softlockup detector too:
996 touch_softlockup_watchdog();
998 EXPORT_SYMBOL(touch_nmi_watchdog);
1000 extern void die_nmi(struct pt_regs *, const char *msg);
1002 __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
1006 * Since current_thread_info()-> is always on the stack, and we
1007 * always switch the stack NMI-atomically, it's safe to use
1008 * smp_processor_id().
1012 int cpu = smp_processor_id();
1013 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
1017 /* check for other users first */
1018 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
1024 if (cpu_isset(cpu, backtrace_mask)) {
1025 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
1028 printk("NMI backtrace for cpu %d\n", cpu);
1031 cpu_clear(cpu, backtrace_mask);
1035 * Take the local apic timer and PIT/HPET into account. We don't
1036 * know which one is active, when we have highres/dyntick on
1038 sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_irqs(0);
1040 /* if the none of the timers isn't firing, this cpu isn't doing much */
1041 if (!touched && last_irq_sums[cpu] == sum) {
1043 * Ayiee, looks like this CPU is stuck ...
1044 * wait a few IRQs (5 seconds) before doing the oops ...
1046 alert_counter[cpu]++;
1047 if (alert_counter[cpu] == 5*nmi_hz)
1049 * die_nmi will return ONLY if NOTIFY_STOP happens..
1051 die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
1053 last_irq_sums[cpu] = sum;
1054 alert_counter[cpu] = 0;
1056 /* see if the nmi watchdog went off */
1058 if (nmi_watchdog == NMI_LOCAL_APIC) {
1059 rdmsrl(wd->perfctr_msr, dummy);
1060 if (dummy & wd->check_bit){
1061 /* this wasn't a watchdog timer interrupt */
1065 /* only Intel P4 uses the cccr msr */
1066 if (wd->cccr_msr != 0) {
1069 * - An overflown perfctr will assert its interrupt
1070 * until the OVF flag in its CCCR is cleared.
1071 * - LVTPC is masked on interrupt and must be
1072 * unmasked by the LVTPC handler.
1074 rdmsrl(wd->cccr_msr, dummy);
1075 dummy &= ~P4_CCCR_OVF;
1076 wrmsrl(wd->cccr_msr, dummy);
1077 apic_write(APIC_LVTPC, APIC_DM_NMI);
1078 /* start the cycle over again */
1079 write_watchdog_counter(wd->perfctr_msr, NULL);
1081 else if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
1082 wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
1083 /* P6 based Pentium M need to re-unmask
1084 * the apic vector but it doesn't hurt
1086 * ArchPerfom/Core Duo also needs this */
1087 apic_write(APIC_LVTPC, APIC_DM_NMI);
1088 /* P6/ARCH_PERFMON has 32 bit counter write */
1089 write_watchdog_counter32(wd->perfctr_msr, NULL);
1091 /* start the cycle over again */
1092 write_watchdog_counter(wd->perfctr_msr, NULL);
1095 } else if (nmi_watchdog == NMI_IO_APIC) {
1096 /* don't know how to accurately check for this.
1097 * just assume it was a watchdog timer interrupt
1098 * This matches the old behaviour.
1107 int do_nmi_callback(struct pt_regs * regs, int cpu)
1109 #ifdef CONFIG_SYSCTL
1110 if (unknown_nmi_panic)
1111 return unknown_nmi_panic_callback(regs, cpu);
1116 #ifdef CONFIG_SYSCTL
1118 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
1120 unsigned char reason = get_nmi_reason();
1123 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
1129 * proc handler for /proc/sys/kernel/nmi
1131 int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
1132 void __user *buffer, size_t *length, loff_t *ppos)
1136 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
1137 old_state = nmi_watchdog_enabled;
1138 proc_dointvec(table, write, file, buffer, length, ppos);
1139 if (!!old_state == !!nmi_watchdog_enabled)
1142 if (atomic_read(&nmi_active) < 0) {
1143 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
1147 if (nmi_watchdog == NMI_DEFAULT) {
1148 if (nmi_known_cpu() > 0)
1149 nmi_watchdog = NMI_LOCAL_APIC;
1151 nmi_watchdog = NMI_IO_APIC;
1154 if (nmi_watchdog == NMI_LOCAL_APIC) {
1155 if (nmi_watchdog_enabled)
1156 enable_lapic_nmi_watchdog();
1158 disable_lapic_nmi_watchdog();
1160 printk( KERN_WARNING
1161 "NMI watchdog doesn't know what hardware to touch\n");
1169 void __trigger_all_cpu_backtrace(void)
1173 backtrace_mask = cpu_online_map;
1174 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
1175 for (i = 0; i < 10 * 1000; i++) {
1176 if (cpus_empty(backtrace_mask))
1182 EXPORT_SYMBOL(nmi_active);
1183 EXPORT_SYMBOL(nmi_watchdog);
1184 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
1185 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
1186 EXPORT_SYMBOL(reserve_perfctr_nmi);
1187 EXPORT_SYMBOL(release_perfctr_nmi);
1188 EXPORT_SYMBOL(reserve_evntsel_nmi);
1189 EXPORT_SYMBOL(release_evntsel_nmi);
1190 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
1191 EXPORT_SYMBOL(enable_timer_nmi_watchdog);