Merge branches 'pm-cpufreq', 'pm-cpuidle', 'pm-devfreq', 'pm-opp' and 'pm-tools'
[linux-drm-fsl-dcu.git] / arch / x86 / kernel / irq_32.c
1 /*
2  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3  *
4  * This file contains the lowest level x86-specific interrupt
5  * entry, irq-stacks and irq statistics code. All the remaining
6  * irq logic is done by the generic kernel/irq/ code and
7  * by the x86-specific irq controller code. (e.g. i8259.c and
8  * io_apic.c.)
9  */
10
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
18 #include <linux/uaccess.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21
22 #include <asm/apic.h>
23
24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25 EXPORT_PER_CPU_SYMBOL(irq_stat);
26
27 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28 EXPORT_PER_CPU_SYMBOL(irq_regs);
29
30 #ifdef CONFIG_DEBUG_STACKOVERFLOW
31
32 int sysctl_panic_on_stackoverflow __read_mostly;
33
34 /* Debugging check for stack overflow: is there less than 1KB free? */
35 static int check_stack_overflow(void)
36 {
37         long sp;
38
39         __asm__ __volatile__("andl %%esp,%0" :
40                              "=r" (sp) : "0" (THREAD_SIZE - 1));
41
42         return sp < (sizeof(struct thread_info) + STACK_WARN);
43 }
44
45 static void print_stack_overflow(void)
46 {
47         printk(KERN_WARNING "low stack detected by irq handler\n");
48         dump_stack();
49         if (sysctl_panic_on_stackoverflow)
50                 panic("low stack detected by irq handler - check messages\n");
51 }
52
53 #else
54 static inline int check_stack_overflow(void) { return 0; }
55 static inline void print_stack_overflow(void) { }
56 #endif
57
58 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
59 DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
60
61 static void call_on_stack(void *func, void *stack)
62 {
63         asm volatile("xchgl     %%ebx,%%esp     \n"
64                      "call      *%%edi          \n"
65                      "movl      %%ebx,%%esp     \n"
66                      : "=b" (stack)
67                      : "0" (stack),
68                        "D"(func)
69                      : "memory", "cc", "edx", "ecx", "eax");
70 }
71
72 static inline void *current_stack(void)
73 {
74         return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
75 }
76
77 static inline int
78 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
79 {
80         struct irq_stack *curstk, *irqstk;
81         u32 *isp, *prev_esp, arg1, arg2;
82
83         curstk = (struct irq_stack *) current_stack();
84         irqstk = __this_cpu_read(hardirq_stack);
85
86         /*
87          * this is where we switch to the IRQ stack. However, if we are
88          * already using the IRQ stack (because we interrupted a hardirq
89          * handler) we can't do that and just have to keep using the
90          * current stack (which is the irq stack already after all)
91          */
92         if (unlikely(curstk == irqstk))
93                 return 0;
94
95         isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
96
97         /* Save the next esp at the bottom of the stack */
98         prev_esp = (u32 *)irqstk;
99         *prev_esp = current_stack_pointer();
100
101         if (unlikely(overflow))
102                 call_on_stack(print_stack_overflow, isp);
103
104         asm volatile("xchgl     %%ebx,%%esp     \n"
105                      "call      *%%edi          \n"
106                      "movl      %%ebx,%%esp     \n"
107                      : "=a" (arg1), "=d" (arg2), "=b" (isp)
108                      :  "0" (irq),   "1" (desc),  "2" (isp),
109                         "D" (desc->handle_irq)
110                      : "memory", "cc", "ecx");
111         return 1;
112 }
113
114 /*
115  * allocate per-cpu stacks for hardirq and for softirq processing
116  */
117 void irq_ctx_init(int cpu)
118 {
119         struct irq_stack *irqstk;
120
121         if (per_cpu(hardirq_stack, cpu))
122                 return;
123
124         irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
125                                                THREADINFO_GFP,
126                                                THREAD_SIZE_ORDER));
127         per_cpu(hardirq_stack, cpu) = irqstk;
128
129         irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
130                                                THREADINFO_GFP,
131                                                THREAD_SIZE_ORDER));
132         per_cpu(softirq_stack, cpu) = irqstk;
133
134         printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
135                cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
136 }
137
138 void do_softirq_own_stack(void)
139 {
140         struct thread_info *curstk;
141         struct irq_stack *irqstk;
142         u32 *isp, *prev_esp;
143
144         curstk = current_stack();
145         irqstk = __this_cpu_read(softirq_stack);
146
147         /* build the stack frame on the softirq stack */
148         isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
149
150         /* Push the previous esp onto the stack */
151         prev_esp = (u32 *)irqstk;
152         *prev_esp = current_stack_pointer();
153
154         call_on_stack(__do_softirq, isp);
155 }
156
157 bool handle_irq(unsigned irq, struct pt_regs *regs)
158 {
159         struct irq_desc *desc;
160         int overflow;
161
162         overflow = check_stack_overflow();
163
164         desc = irq_to_desc(irq);
165         if (unlikely(!desc))
166                 return false;
167
168         if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
169                 if (unlikely(overflow))
170                         print_stack_overflow();
171                 desc->handle_irq(irq, desc);
172         }
173
174         return true;
175 }