309cbcd6909cd808b92941e53deea0e3e3a5a756
[linux-drm-fsl-dcu.git] / arch / mips / include / asm / irqflags.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1999 Silicon Graphics
9  * Copyright (C) 2000 MIPS Technologies, Inc.
10  */
11 #ifndef _ASM_IRQFLAGS_H
12 #define _ASM_IRQFLAGS_H
13
14 #ifndef __ASSEMBLY__
15
16 #include <linux/compiler.h>
17 #include <asm/hazards.h>
18
19 __asm__(
20         "       .macro  arch_local_irq_enable                           \n"
21         "       .set    push                                            \n"
22         "       .set    reorder                                         \n"
23         "       .set    noat                                            \n"
24 #ifdef CONFIG_MIPS_MT_SMTC
25         "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
26         "       ori     $1, 0x400                                       \n"
27         "       xori    $1, 0x400                                       \n"
28         "       mtc0    $1, $2, 1                                       \n"
29 #elif defined(CONFIG_CPU_MIPSR2)
30         "       ei                                                      \n"
31 #else
32         "       mfc0    $1,$12                                          \n"
33         "       ori     $1,0x1f                                         \n"
34         "       xori    $1,0x1e                                         \n"
35         "       mtc0    $1,$12                                          \n"
36 #endif
37         "       irq_enable_hazard                                       \n"
38         "       .set    pop                                             \n"
39         "       .endm");
40
41 extern void smtc_ipi_replay(void);
42
43 static inline void arch_local_irq_enable(void)
44 {
45 #ifdef CONFIG_MIPS_MT_SMTC
46         /*
47          * SMTC kernel needs to do a software replay of queued
48          * IPIs, at the cost of call overhead on each local_irq_enable()
49          */
50         smtc_ipi_replay();
51 #endif
52         __asm__ __volatile__(
53                 "arch_local_irq_enable"
54                 : /* no outputs */
55                 : /* no inputs */
56                 : "memory");
57 }
58
59
60 /*
61  * For cli() we have to insert nops to make sure that the new value
62  * has actually arrived in the status register before the end of this
63  * macro.
64  * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
65  * no nops at all.
66  */
67 /*
68  * For TX49, operating only IE bit is not enough.
69  *
70  * If mfc0 $12 follows store and the mfc0 is last instruction of a
71  * page and fetching the next instruction causes TLB miss, the result
72  * of the mfc0 might wrongly contain EXL bit.
73  *
74  * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
75  *
76  * Workaround: mask EXL bit of the result or place a nop before mfc0.
77  */
78 __asm__(
79         "       .macro  arch_local_irq_disable\n"
80         "       .set    push                                            \n"
81         "       .set    noat                                            \n"
82 #ifdef CONFIG_MIPS_MT_SMTC
83         "       mfc0    $1, $2, 1                                       \n"
84         "       ori     $1, 0x400                                       \n"
85         "       .set    noreorder                                       \n"
86         "       mtc0    $1, $2, 1                                       \n"
87 #elif defined(CONFIG_CPU_MIPSR2)
88         "       di                                                      \n"
89 #else
90         "       mfc0    $1,$12                                          \n"
91         "       ori     $1,0x1f                                         \n"
92         "       xori    $1,0x1f                                         \n"
93         "       .set    noreorder                                       \n"
94         "       mtc0    $1,$12                                          \n"
95 #endif
96         "       irq_disable_hazard                                      \n"
97         "       .set    pop                                             \n"
98         "       .endm                                                   \n");
99
100 static inline void arch_local_irq_disable(void)
101 {
102         __asm__ __volatile__(
103                 "arch_local_irq_disable"
104                 : /* no outputs */
105                 : /* no inputs */
106                 : "memory");
107 }
108
109 __asm__(
110         "       .macro  arch_local_save_flags flags                     \n"
111         "       .set    push                                            \n"
112         "       .set    reorder                                         \n"
113 #ifdef CONFIG_MIPS_MT_SMTC
114         "       mfc0    \\flags, $2, 1                                  \n"
115 #else
116         "       mfc0    \\flags, $12                                    \n"
117 #endif
118         "       .set    pop                                             \n"
119         "       .endm                                                   \n");
120
121 static inline unsigned long arch_local_save_flags(void)
122 {
123         unsigned long flags;
124         asm volatile("arch_local_save_flags %0" : "=r" (flags));
125         return flags;
126 }
127
128 __asm__(
129         "       .macro  arch_local_irq_save result                      \n"
130         "       .set    push                                            \n"
131         "       .set    reorder                                         \n"
132         "       .set    noat                                            \n"
133 #ifdef CONFIG_MIPS_MT_SMTC
134         "       mfc0    \\result, $2, 1                                 \n"
135         "       ori     $1, \\result, 0x400                             \n"
136         "       .set    noreorder                                       \n"
137         "       mtc0    $1, $2, 1                                       \n"
138         "       andi    \\result, \\result, 0x400                       \n"
139 #elif defined(CONFIG_CPU_MIPSR2)
140         "       di      \\result                                        \n"
141         "       andi    \\result, 1                                     \n"
142 #else
143         "       mfc0    \\result, $12                                   \n"
144         "       ori     $1, \\result, 0x1f                              \n"
145         "       xori    $1, 0x1f                                        \n"
146         "       .set    noreorder                                       \n"
147         "       mtc0    $1, $12                                         \n"
148 #endif
149         "       irq_disable_hazard                                      \n"
150         "       .set    pop                                             \n"
151         "       .endm                                                   \n");
152
153 static inline unsigned long arch_local_irq_save(void)
154 {
155         unsigned long flags;
156         asm volatile("arch_local_irq_save\t%0"
157                      : "=r" (flags)
158                      : /* no inputs */
159                      : "memory");
160         return flags;
161 }
162
163 __asm__(
164         "       .macro  arch_local_irq_restore flags                    \n"
165         "       .set    push                                            \n"
166         "       .set    noreorder                                       \n"
167         "       .set    noat                                            \n"
168 #ifdef CONFIG_MIPS_MT_SMTC
169         "mfc0   $1, $2, 1                                               \n"
170         "andi   \\flags, 0x400                                          \n"
171         "ori    $1, 0x400                                               \n"
172         "xori   $1, 0x400                                               \n"
173         "or     \\flags, $1                                             \n"
174         "mtc0   \\flags, $2, 1                                          \n"
175 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
176         /*
177          * Slow, but doesn't suffer from a relatively unlikely race
178          * condition we're having since days 1.
179          */
180         "       beqz    \\flags, 1f                                     \n"
181         "        di                                                     \n"
182         "       ei                                                      \n"
183         "1:                                                             \n"
184 #elif defined(CONFIG_CPU_MIPSR2)
185         /*
186          * Fast, dangerous.  Life is fun, life is good.
187          */
188         "       mfc0    $1, $12                                         \n"
189         "       ins     $1, \\flags, 0, 1                               \n"
190         "       mtc0    $1, $12                                         \n"
191 #else
192         "       mfc0    $1, $12                                         \n"
193         "       andi    \\flags, 1                                      \n"
194         "       ori     $1, 0x1f                                        \n"
195         "       xori    $1, 0x1f                                        \n"
196         "       or      \\flags, $1                                     \n"
197         "       mtc0    \\flags, $12                                    \n"
198 #endif
199         "       irq_disable_hazard                                      \n"
200         "       .set    pop                                             \n"
201         "       .endm                                                   \n");
202
203
204 static inline void arch_local_irq_restore(unsigned long flags)
205 {
206         unsigned long __tmp1;
207
208 #ifdef CONFIG_MIPS_MT_SMTC
209         /*
210          * SMTC kernel needs to do a software replay of queued
211          * IPIs, at the cost of branch and call overhead on each
212          * local_irq_restore()
213          */
214         if (unlikely(!(flags & 0x0400)))
215                 smtc_ipi_replay();
216 #endif
217
218         __asm__ __volatile__(
219                 "arch_local_irq_restore\t%0"
220                 : "=r" (__tmp1)
221                 : "0" (flags)
222                 : "memory");
223 }
224
225 static inline void __arch_local_irq_restore(unsigned long flags)
226 {
227         unsigned long __tmp1;
228
229         __asm__ __volatile__(
230                 "arch_local_irq_restore\t%0"
231                 : "=r" (__tmp1)
232                 : "0" (flags)
233                 : "memory");
234 }
235
236 static inline int arch_irqs_disabled_flags(unsigned long flags)
237 {
238 #ifdef CONFIG_MIPS_MT_SMTC
239         /*
240          * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
241          */
242         return flags & 0x400;
243 #else
244         return !(flags & 1);
245 #endif
246 }
247
248 #endif
249
250 /*
251  * Do the CPU's IRQ-state tracing from assembly code.
252  */
253 #ifdef CONFIG_TRACE_IRQFLAGS
254 /* Reload some registers clobbered by trace_hardirqs_on */
255 #ifdef CONFIG_64BIT
256 # define TRACE_IRQS_RELOAD_REGS                                         \
257         LONG_L  $11, PT_R11(sp);                                        \
258         LONG_L  $10, PT_R10(sp);                                        \
259         LONG_L  $9, PT_R9(sp);                                          \
260         LONG_L  $8, PT_R8(sp);                                          \
261         LONG_L  $7, PT_R7(sp);                                          \
262         LONG_L  $6, PT_R6(sp);                                          \
263         LONG_L  $5, PT_R5(sp);                                          \
264         LONG_L  $4, PT_R4(sp);                                          \
265         LONG_L  $2, PT_R2(sp)
266 #else
267 # define TRACE_IRQS_RELOAD_REGS                                         \
268         LONG_L  $7, PT_R7(sp);                                          \
269         LONG_L  $6, PT_R6(sp);                                          \
270         LONG_L  $5, PT_R5(sp);                                          \
271         LONG_L  $4, PT_R4(sp);                                          \
272         LONG_L  $2, PT_R2(sp)
273 #endif
274 # define TRACE_IRQS_ON                                                  \
275         CLI;    /* make sure trace_hardirqs_on() is called in kernel level */ \
276         jal     trace_hardirqs_on
277 # define TRACE_IRQS_ON_RELOAD                                           \
278         TRACE_IRQS_ON;                                                  \
279         TRACE_IRQS_RELOAD_REGS
280 # define TRACE_IRQS_OFF                                                 \
281         jal     trace_hardirqs_off
282 #else
283 # define TRACE_IRQS_ON
284 # define TRACE_IRQS_ON_RELOAD
285 # define TRACE_IRQS_OFF
286 #endif
287
288 #endif /* _ASM_IRQFLAGS_H */