Merge branch 'acpi-ec'
[linux-drm-fsl-dcu.git] / drivers / irqchip / irq-mips-gic.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
8  */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip/mips-gic.h>
15 #include <linux/of_address.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18
19 #include <asm/mips-cm.h>
20 #include <asm/setup.h>
21 #include <asm/traps.h>
22
23 #include <dt-bindings/interrupt-controller/mips-gic.h>
24
25 #include "irqchip.h"
26
27 unsigned int gic_present;
28
29 struct gic_pcpu_mask {
30         DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 };
32
33 static void __iomem *gic_base;
34 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35 static DEFINE_SPINLOCK(gic_lock);
36 static struct irq_domain *gic_irq_domain;
37 static int gic_shared_intrs;
38 static int gic_vpes;
39 static unsigned int gic_cpu_pin;
40 static unsigned int timer_cpu_pin;
41 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
42
43 static void __gic_irq_dispatch(void);
44
45 static inline unsigned int gic_read(unsigned int reg)
46 {
47         return __raw_readl(gic_base + reg);
48 }
49
50 static inline void gic_write(unsigned int reg, unsigned int val)
51 {
52         __raw_writel(val, gic_base + reg);
53 }
54
55 static inline void gic_update_bits(unsigned int reg, unsigned int mask,
56                                    unsigned int val)
57 {
58         unsigned int regval;
59
60         regval = gic_read(reg);
61         regval &= ~mask;
62         regval |= val;
63         gic_write(reg, regval);
64 }
65
66 static inline void gic_reset_mask(unsigned int intr)
67 {
68         gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
69                   1 << GIC_INTR_BIT(intr));
70 }
71
72 static inline void gic_set_mask(unsigned int intr)
73 {
74         gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
75                   1 << GIC_INTR_BIT(intr));
76 }
77
78 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
79 {
80         gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
81                         GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
82                         pol << GIC_INTR_BIT(intr));
83 }
84
85 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
86 {
87         gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
88                         GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
89                         trig << GIC_INTR_BIT(intr));
90 }
91
92 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
93 {
94         gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
95                         1 << GIC_INTR_BIT(intr),
96                         dual << GIC_INTR_BIT(intr));
97 }
98
99 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
100 {
101         gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
102                   GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
103 }
104
105 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
106 {
107         gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
108                   GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
109                   GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
110 }
111
112 #ifdef CONFIG_CLKSRC_MIPS_GIC
113 cycle_t gic_read_count(void)
114 {
115         unsigned int hi, hi2, lo;
116
117         do {
118                 hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
119                 lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
120                 hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
121         } while (hi2 != hi);
122
123         return (((cycle_t) hi) << 32) + lo;
124 }
125
126 unsigned int gic_get_count_width(void)
127 {
128         unsigned int bits, config;
129
130         config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
131         bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
132                          GIC_SH_CONFIG_COUNTBITS_SHF);
133
134         return bits;
135 }
136
137 void gic_write_compare(cycle_t cnt)
138 {
139         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
140                                 (int)(cnt >> 32));
141         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
142                                 (int)(cnt & 0xffffffff));
143 }
144
145 void gic_write_cpu_compare(cycle_t cnt, int cpu)
146 {
147         unsigned long flags;
148
149         local_irq_save(flags);
150
151         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
152         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
153                                 (int)(cnt >> 32));
154         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
155                                 (int)(cnt & 0xffffffff));
156
157         local_irq_restore(flags);
158 }
159
160 cycle_t gic_read_compare(void)
161 {
162         unsigned int hi, lo;
163
164         hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
165         lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
166
167         return (((cycle_t) hi) << 32) + lo;
168 }
169 #endif
170
171 static bool gic_local_irq_is_routable(int intr)
172 {
173         u32 vpe_ctl;
174
175         /* All local interrupts are routable in EIC mode. */
176         if (cpu_has_veic)
177                 return true;
178
179         vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
180         switch (intr) {
181         case GIC_LOCAL_INT_TIMER:
182                 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
183         case GIC_LOCAL_INT_PERFCTR:
184                 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
185         case GIC_LOCAL_INT_FDC:
186                 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
187         case GIC_LOCAL_INT_SWINT0:
188         case GIC_LOCAL_INT_SWINT1:
189                 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
190         default:
191                 return true;
192         }
193 }
194
195 unsigned int gic_get_timer_pending(void)
196 {
197         unsigned int vpe_pending;
198
199         vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
200         return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
201 }
202
203 static void gic_bind_eic_interrupt(int irq, int set)
204 {
205         /* Convert irq vector # to hw int # */
206         irq -= GIC_PIN_TO_VEC_OFFSET;
207
208         /* Set irq to use shadow set */
209         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
210                   GIC_VPE_EIC_SS(irq), set);
211 }
212
213 void gic_send_ipi(unsigned int intr)
214 {
215         gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
216 }
217
218 int gic_get_c0_compare_int(void)
219 {
220         if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
221                 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
222         return irq_create_mapping(gic_irq_domain,
223                                   GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
224 }
225
226 int gic_get_c0_perfcount_int(void)
227 {
228         if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
229                 /* Is the erformance counter shared with the timer? */
230                 if (cp0_perfcount_irq < 0)
231                         return -1;
232                 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
233         }
234         return irq_create_mapping(gic_irq_domain,
235                                   GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
236 }
237
238 static unsigned int gic_get_int(void)
239 {
240         unsigned int i;
241         unsigned long *pcpu_mask;
242         unsigned long pending_reg, intrmask_reg;
243         DECLARE_BITMAP(pending, GIC_MAX_INTRS);
244         DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
245
246         /* Get per-cpu bitmaps */
247         pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
248
249         pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
250         intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
251
252         for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
253                 pending[i] = gic_read(pending_reg);
254                 intrmask[i] = gic_read(intrmask_reg);
255                 pending_reg += 0x4;
256                 intrmask_reg += 0x4;
257         }
258
259         bitmap_and(pending, pending, intrmask, gic_shared_intrs);
260         bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
261
262         return find_first_bit(pending, gic_shared_intrs);
263 }
264
265 static void gic_mask_irq(struct irq_data *d)
266 {
267         gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
268 }
269
270 static void gic_unmask_irq(struct irq_data *d)
271 {
272         gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
273 }
274
275 static void gic_ack_irq(struct irq_data *d)
276 {
277         unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
278
279         gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
280 }
281
282 static int gic_set_type(struct irq_data *d, unsigned int type)
283 {
284         unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
285         unsigned long flags;
286         bool is_edge;
287
288         spin_lock_irqsave(&gic_lock, flags);
289         switch (type & IRQ_TYPE_SENSE_MASK) {
290         case IRQ_TYPE_EDGE_FALLING:
291                 gic_set_polarity(irq, GIC_POL_NEG);
292                 gic_set_trigger(irq, GIC_TRIG_EDGE);
293                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
294                 is_edge = true;
295                 break;
296         case IRQ_TYPE_EDGE_RISING:
297                 gic_set_polarity(irq, GIC_POL_POS);
298                 gic_set_trigger(irq, GIC_TRIG_EDGE);
299                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
300                 is_edge = true;
301                 break;
302         case IRQ_TYPE_EDGE_BOTH:
303                 /* polarity is irrelevant in this case */
304                 gic_set_trigger(irq, GIC_TRIG_EDGE);
305                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
306                 is_edge = true;
307                 break;
308         case IRQ_TYPE_LEVEL_LOW:
309                 gic_set_polarity(irq, GIC_POL_NEG);
310                 gic_set_trigger(irq, GIC_TRIG_LEVEL);
311                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
312                 is_edge = false;
313                 break;
314         case IRQ_TYPE_LEVEL_HIGH:
315         default:
316                 gic_set_polarity(irq, GIC_POL_POS);
317                 gic_set_trigger(irq, GIC_TRIG_LEVEL);
318                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
319                 is_edge = false;
320                 break;
321         }
322
323         if (is_edge) {
324                 __irq_set_chip_handler_name_locked(d->irq,
325                                                    &gic_edge_irq_controller,
326                                                    handle_edge_irq, NULL);
327         } else {
328                 __irq_set_chip_handler_name_locked(d->irq,
329                                                    &gic_level_irq_controller,
330                                                    handle_level_irq, NULL);
331         }
332         spin_unlock_irqrestore(&gic_lock, flags);
333
334         return 0;
335 }
336
337 #ifdef CONFIG_SMP
338 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
339                             bool force)
340 {
341         unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
342         cpumask_t       tmp = CPU_MASK_NONE;
343         unsigned long   flags;
344         int             i;
345
346         cpumask_and(&tmp, cpumask, cpu_online_mask);
347         if (cpus_empty(tmp))
348                 return -EINVAL;
349
350         /* Assumption : cpumask refers to a single CPU */
351         spin_lock_irqsave(&gic_lock, flags);
352
353         /* Re-route this IRQ */
354         gic_map_to_vpe(irq, first_cpu(tmp));
355
356         /* Update the pcpu_masks */
357         for (i = 0; i < NR_CPUS; i++)
358                 clear_bit(irq, pcpu_masks[i].pcpu_mask);
359         set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
360
361         cpumask_copy(d->affinity, cpumask);
362         spin_unlock_irqrestore(&gic_lock, flags);
363
364         return IRQ_SET_MASK_OK_NOCOPY;
365 }
366 #endif
367
368 static struct irq_chip gic_level_irq_controller = {
369         .name                   =       "MIPS GIC",
370         .irq_mask               =       gic_mask_irq,
371         .irq_unmask             =       gic_unmask_irq,
372         .irq_set_type           =       gic_set_type,
373 #ifdef CONFIG_SMP
374         .irq_set_affinity       =       gic_set_affinity,
375 #endif
376 };
377
378 static struct irq_chip gic_edge_irq_controller = {
379         .name                   =       "MIPS GIC",
380         .irq_ack                =       gic_ack_irq,
381         .irq_mask               =       gic_mask_irq,
382         .irq_unmask             =       gic_unmask_irq,
383         .irq_set_type           =       gic_set_type,
384 #ifdef CONFIG_SMP
385         .irq_set_affinity       =       gic_set_affinity,
386 #endif
387 };
388
389 static unsigned int gic_get_local_int(void)
390 {
391         unsigned long pending, masked;
392
393         pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
394         masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
395
396         bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
397
398         return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
399 }
400
401 static void gic_mask_local_irq(struct irq_data *d)
402 {
403         int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
404
405         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
406 }
407
408 static void gic_unmask_local_irq(struct irq_data *d)
409 {
410         int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
411
412         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
413 }
414
415 static struct irq_chip gic_local_irq_controller = {
416         .name                   =       "MIPS GIC Local",
417         .irq_mask               =       gic_mask_local_irq,
418         .irq_unmask             =       gic_unmask_local_irq,
419 };
420
421 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
422 {
423         int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
424         int i;
425         unsigned long flags;
426
427         spin_lock_irqsave(&gic_lock, flags);
428         for (i = 0; i < gic_vpes; i++) {
429                 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
430                 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
431         }
432         spin_unlock_irqrestore(&gic_lock, flags);
433 }
434
435 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
436 {
437         int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
438         int i;
439         unsigned long flags;
440
441         spin_lock_irqsave(&gic_lock, flags);
442         for (i = 0; i < gic_vpes; i++) {
443                 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
444                 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
445         }
446         spin_unlock_irqrestore(&gic_lock, flags);
447 }
448
449 static struct irq_chip gic_all_vpes_local_irq_controller = {
450         .name                   =       "MIPS GIC Local",
451         .irq_mask               =       gic_mask_local_irq_all_vpes,
452         .irq_unmask             =       gic_unmask_local_irq_all_vpes,
453 };
454
455 static void __gic_irq_dispatch(void)
456 {
457         unsigned int intr, virq;
458
459         while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) {
460                 virq = irq_linear_revmap(gic_irq_domain,
461                                          GIC_LOCAL_TO_HWIRQ(intr));
462                 do_IRQ(virq);
463         }
464
465         while ((intr = gic_get_int()) != gic_shared_intrs) {
466                 virq = irq_linear_revmap(gic_irq_domain,
467                                          GIC_SHARED_TO_HWIRQ(intr));
468                 do_IRQ(virq);
469         }
470 }
471
472 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
473 {
474         __gic_irq_dispatch();
475 }
476
477 #ifdef CONFIG_MIPS_GIC_IPI
478 static int gic_resched_int_base;
479 static int gic_call_int_base;
480
481 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
482 {
483         return gic_resched_int_base + cpu;
484 }
485
486 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
487 {
488         return gic_call_int_base + cpu;
489 }
490
491 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
492 {
493         scheduler_ipi();
494
495         return IRQ_HANDLED;
496 }
497
498 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
499 {
500         smp_call_function_interrupt();
501
502         return IRQ_HANDLED;
503 }
504
505 static struct irqaction irq_resched = {
506         .handler        = ipi_resched_interrupt,
507         .flags          = IRQF_PERCPU,
508         .name           = "IPI resched"
509 };
510
511 static struct irqaction irq_call = {
512         .handler        = ipi_call_interrupt,
513         .flags          = IRQF_PERCPU,
514         .name           = "IPI call"
515 };
516
517 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
518                                     struct irqaction *action)
519 {
520         int virq = irq_create_mapping(gic_irq_domain,
521                                       GIC_SHARED_TO_HWIRQ(intr));
522         int i;
523
524         gic_map_to_vpe(intr, cpu);
525         for (i = 0; i < NR_CPUS; i++)
526                 clear_bit(intr, pcpu_masks[i].pcpu_mask);
527         set_bit(intr, pcpu_masks[cpu].pcpu_mask);
528
529         irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
530
531         irq_set_handler(virq, handle_percpu_irq);
532         setup_irq(virq, action);
533 }
534
535 static __init void gic_ipi_init(void)
536 {
537         int i;
538
539         /* Use last 2 * NR_CPUS interrupts as IPIs */
540         gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
541         gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
542
543         for (i = 0; i < nr_cpu_ids; i++) {
544                 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
545                 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
546         }
547 }
548 #else
549 static inline void gic_ipi_init(void)
550 {
551 }
552 #endif
553
554 static void __init gic_basic_init(void)
555 {
556         unsigned int i;
557
558         board_bind_eic_interrupt = &gic_bind_eic_interrupt;
559
560         /* Setup defaults */
561         for (i = 0; i < gic_shared_intrs; i++) {
562                 gic_set_polarity(i, GIC_POL_POS);
563                 gic_set_trigger(i, GIC_TRIG_LEVEL);
564                 gic_reset_mask(i);
565         }
566
567         for (i = 0; i < gic_vpes; i++) {
568                 unsigned int j;
569
570                 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
571                 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
572                         if (!gic_local_irq_is_routable(j))
573                                 continue;
574                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
575                 }
576         }
577 }
578
579 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
580                                     irq_hw_number_t hw)
581 {
582         int intr = GIC_HWIRQ_TO_LOCAL(hw);
583         int ret = 0;
584         int i;
585         unsigned long flags;
586
587         if (!gic_local_irq_is_routable(intr))
588                 return -EPERM;
589
590         /*
591          * HACK: These are all really percpu interrupts, but the rest
592          * of the MIPS kernel code does not use the percpu IRQ API for
593          * the CP0 timer and performance counter interrupts.
594          */
595         if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
596                 irq_set_chip_and_handler(virq,
597                                          &gic_local_irq_controller,
598                                          handle_percpu_devid_irq);
599                 irq_set_percpu_devid(virq);
600         } else {
601                 irq_set_chip_and_handler(virq,
602                                          &gic_all_vpes_local_irq_controller,
603                                          handle_percpu_irq);
604         }
605
606         spin_lock_irqsave(&gic_lock, flags);
607         for (i = 0; i < gic_vpes; i++) {
608                 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
609
610                 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
611
612                 switch (intr) {
613                 case GIC_LOCAL_INT_WD:
614                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
615                         break;
616                 case GIC_LOCAL_INT_COMPARE:
617                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
618                         break;
619                 case GIC_LOCAL_INT_TIMER:
620                         /* CONFIG_MIPS_CMP workaround (see __gic_init) */
621                         val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
622                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
623                         break;
624                 case GIC_LOCAL_INT_PERFCTR:
625                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
626                         break;
627                 case GIC_LOCAL_INT_SWINT0:
628                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
629                         break;
630                 case GIC_LOCAL_INT_SWINT1:
631                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
632                         break;
633                 case GIC_LOCAL_INT_FDC:
634                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
635                         break;
636                 default:
637                         pr_err("Invalid local IRQ %d\n", intr);
638                         ret = -EINVAL;
639                         break;
640                 }
641         }
642         spin_unlock_irqrestore(&gic_lock, flags);
643
644         return ret;
645 }
646
647 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
648                                      irq_hw_number_t hw)
649 {
650         int intr = GIC_HWIRQ_TO_SHARED(hw);
651         unsigned long flags;
652
653         irq_set_chip_and_handler(virq, &gic_level_irq_controller,
654                                  handle_level_irq);
655
656         spin_lock_irqsave(&gic_lock, flags);
657         gic_map_to_pin(intr, gic_cpu_pin);
658         /* Map to VPE 0 by default */
659         gic_map_to_vpe(intr, 0);
660         set_bit(intr, pcpu_masks[0].pcpu_mask);
661         spin_unlock_irqrestore(&gic_lock, flags);
662
663         return 0;
664 }
665
666 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
667                               irq_hw_number_t hw)
668 {
669         if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
670                 return gic_local_irq_domain_map(d, virq, hw);
671         return gic_shared_irq_domain_map(d, virq, hw);
672 }
673
674 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
675                                 const u32 *intspec, unsigned int intsize,
676                                 irq_hw_number_t *out_hwirq,
677                                 unsigned int *out_type)
678 {
679         if (intsize != 3)
680                 return -EINVAL;
681
682         if (intspec[0] == GIC_SHARED)
683                 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
684         else if (intspec[0] == GIC_LOCAL)
685                 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
686         else
687                 return -EINVAL;
688         *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
689
690         return 0;
691 }
692
693 static struct irq_domain_ops gic_irq_domain_ops = {
694         .map = gic_irq_domain_map,
695         .xlate = gic_irq_domain_xlate,
696 };
697
698 static void __init __gic_init(unsigned long gic_base_addr,
699                               unsigned long gic_addrspace_size,
700                               unsigned int cpu_vec, unsigned int irqbase,
701                               struct device_node *node)
702 {
703         unsigned int gicconfig;
704
705         gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
706
707         gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
708         gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
709                    GIC_SH_CONFIG_NUMINTRS_SHF;
710         gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
711
712         gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
713                   GIC_SH_CONFIG_NUMVPES_SHF;
714         gic_vpes = gic_vpes + 1;
715
716         if (cpu_has_veic) {
717                 /* Always use vector 1 in EIC mode */
718                 gic_cpu_pin = 0;
719                 timer_cpu_pin = gic_cpu_pin;
720                 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
721                                __gic_irq_dispatch);
722         } else {
723                 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
724                 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
725                                         gic_irq_dispatch);
726                 /*
727                  * With the CMP implementation of SMP (deprecated), other CPUs
728                  * are started by the bootloader and put into a timer based
729                  * waiting poll loop. We must not re-route those CPU's local
730                  * timer interrupts as the wait instruction will never finish,
731                  * so just handle whatever CPU interrupt it is routed to by
732                  * default.
733                  *
734                  * This workaround should be removed when CMP support is
735                  * dropped.
736                  */
737                 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
738                     gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
739                         timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
740                                                          GIC_VPE_TIMER_MAP)) &
741                                         GIC_MAP_MSK;
742                         irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
743                                                 GIC_CPU_PIN_OFFSET +
744                                                 timer_cpu_pin,
745                                                 gic_irq_dispatch);
746                 } else {
747                         timer_cpu_pin = gic_cpu_pin;
748                 }
749         }
750
751         gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
752                                                gic_shared_intrs, irqbase,
753                                                &gic_irq_domain_ops, NULL);
754         if (!gic_irq_domain)
755                 panic("Failed to add GIC IRQ domain");
756
757         gic_basic_init();
758
759         gic_ipi_init();
760 }
761
762 void __init gic_init(unsigned long gic_base_addr,
763                      unsigned long gic_addrspace_size,
764                      unsigned int cpu_vec, unsigned int irqbase)
765 {
766         __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
767 }
768
769 static int __init gic_of_init(struct device_node *node,
770                               struct device_node *parent)
771 {
772         struct resource res;
773         unsigned int cpu_vec, i = 0, reserved = 0;
774         phys_addr_t gic_base;
775         size_t gic_len;
776
777         /* Find the first available CPU vector. */
778         while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
779                                            i++, &cpu_vec))
780                 reserved |= BIT(cpu_vec);
781         for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
782                 if (!(reserved & BIT(cpu_vec)))
783                         break;
784         }
785         if (cpu_vec == 8) {
786                 pr_err("No CPU vectors available for GIC\n");
787                 return -ENODEV;
788         }
789
790         if (of_address_to_resource(node, 0, &res)) {
791                 /*
792                  * Probe the CM for the GIC base address if not specified
793                  * in the device-tree.
794                  */
795                 if (mips_cm_present()) {
796                         gic_base = read_gcr_gic_base() &
797                                 ~CM_GCR_GIC_BASE_GICEN_MSK;
798                         gic_len = 0x20000;
799                 } else {
800                         pr_err("Failed to get GIC memory range\n");
801                         return -ENODEV;
802                 }
803         } else {
804                 gic_base = res.start;
805                 gic_len = resource_size(&res);
806         }
807
808         if (mips_cm_present())
809                 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
810         gic_present = true;
811
812         __gic_init(gic_base, gic_len, cpu_vec, 0, node);
813
814         return 0;
815 }
816 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);