MIPS: SMP: Don't increment irq_count multiple times for call function IPIs
[linux-drm-fsl-dcu.git] / drivers / irqchip / irq-mips-gic.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
8  */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip/mips-gic.h>
15 #include <linux/of_address.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18
19 #include <asm/mips-cm.h>
20 #include <asm/setup.h>
21 #include <asm/traps.h>
22
23 #include <dt-bindings/interrupt-controller/mips-gic.h>
24
25 #include "irqchip.h"
26
27 unsigned int gic_present;
28
29 struct gic_pcpu_mask {
30         DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 };
32
33 static void __iomem *gic_base;
34 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35 static DEFINE_SPINLOCK(gic_lock);
36 static struct irq_domain *gic_irq_domain;
37 static int gic_shared_intrs;
38 static int gic_vpes;
39 static unsigned int gic_cpu_pin;
40 static unsigned int timer_cpu_pin;
41 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
42
43 static void __gic_irq_dispatch(void);
44
45 static inline unsigned int gic_read(unsigned int reg)
46 {
47         return __raw_readl(gic_base + reg);
48 }
49
50 static inline void gic_write(unsigned int reg, unsigned int val)
51 {
52         __raw_writel(val, gic_base + reg);
53 }
54
55 static inline void gic_update_bits(unsigned int reg, unsigned int mask,
56                                    unsigned int val)
57 {
58         unsigned int regval;
59
60         regval = gic_read(reg);
61         regval &= ~mask;
62         regval |= val;
63         gic_write(reg, regval);
64 }
65
66 static inline void gic_reset_mask(unsigned int intr)
67 {
68         gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
69                   1 << GIC_INTR_BIT(intr));
70 }
71
72 static inline void gic_set_mask(unsigned int intr)
73 {
74         gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
75                   1 << GIC_INTR_BIT(intr));
76 }
77
78 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
79 {
80         gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
81                         GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
82                         pol << GIC_INTR_BIT(intr));
83 }
84
85 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
86 {
87         gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
88                         GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
89                         trig << GIC_INTR_BIT(intr));
90 }
91
92 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
93 {
94         gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
95                         1 << GIC_INTR_BIT(intr),
96                         dual << GIC_INTR_BIT(intr));
97 }
98
99 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
100 {
101         gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
102                   GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
103 }
104
105 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
106 {
107         gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
108                   GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
109                   GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
110 }
111
112 #ifdef CONFIG_CLKSRC_MIPS_GIC
113 cycle_t gic_read_count(void)
114 {
115         unsigned int hi, hi2, lo;
116
117         do {
118                 hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
119                 lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
120                 hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
121         } while (hi2 != hi);
122
123         return (((cycle_t) hi) << 32) + lo;
124 }
125
126 unsigned int gic_get_count_width(void)
127 {
128         unsigned int bits, config;
129
130         config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
131         bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
132                          GIC_SH_CONFIG_COUNTBITS_SHF);
133
134         return bits;
135 }
136
137 void gic_write_compare(cycle_t cnt)
138 {
139         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
140                                 (int)(cnt >> 32));
141         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
142                                 (int)(cnt & 0xffffffff));
143 }
144
145 void gic_write_cpu_compare(cycle_t cnt, int cpu)
146 {
147         unsigned long flags;
148
149         local_irq_save(flags);
150
151         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
152         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
153                                 (int)(cnt >> 32));
154         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
155                                 (int)(cnt & 0xffffffff));
156
157         local_irq_restore(flags);
158 }
159
160 cycle_t gic_read_compare(void)
161 {
162         unsigned int hi, lo;
163
164         hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
165         lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
166
167         return (((cycle_t) hi) << 32) + lo;
168 }
169
170 void gic_start_count(void)
171 {
172         u32 gicconfig;
173
174         /* Start the counter */
175         gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
176         gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
177         gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
178 }
179
180 void gic_stop_count(void)
181 {
182         u32 gicconfig;
183
184         /* Stop the counter */
185         gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
186         gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
187         gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
188 }
189
190 #endif
191
192 static bool gic_local_irq_is_routable(int intr)
193 {
194         u32 vpe_ctl;
195
196         /* All local interrupts are routable in EIC mode. */
197         if (cpu_has_veic)
198                 return true;
199
200         vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
201         switch (intr) {
202         case GIC_LOCAL_INT_TIMER:
203                 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
204         case GIC_LOCAL_INT_PERFCTR:
205                 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
206         case GIC_LOCAL_INT_FDC:
207                 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
208         case GIC_LOCAL_INT_SWINT0:
209         case GIC_LOCAL_INT_SWINT1:
210                 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
211         default:
212                 return true;
213         }
214 }
215
216 static void gic_bind_eic_interrupt(int irq, int set)
217 {
218         /* Convert irq vector # to hw int # */
219         irq -= GIC_PIN_TO_VEC_OFFSET;
220
221         /* Set irq to use shadow set */
222         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
223                   GIC_VPE_EIC_SS(irq), set);
224 }
225
226 void gic_send_ipi(unsigned int intr)
227 {
228         gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
229 }
230
231 int gic_get_c0_compare_int(void)
232 {
233         if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
234                 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
235         return irq_create_mapping(gic_irq_domain,
236                                   GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
237 }
238
239 int gic_get_c0_perfcount_int(void)
240 {
241         if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
242                 /* Is the performance counter shared with the timer? */
243                 if (cp0_perfcount_irq < 0)
244                         return -1;
245                 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
246         }
247         return irq_create_mapping(gic_irq_domain,
248                                   GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
249 }
250
251 int gic_get_c0_fdc_int(void)
252 {
253         if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
254                 /* Is the FDC IRQ even present? */
255                 if (cp0_fdc_irq < 0)
256                         return -1;
257                 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
258         }
259
260         return irq_create_mapping(gic_irq_domain,
261                                   GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
262 }
263
264 static void gic_handle_shared_int(bool chained)
265 {
266         unsigned int i, intr, virq;
267         unsigned long *pcpu_mask;
268         unsigned long pending_reg, intrmask_reg;
269         DECLARE_BITMAP(pending, GIC_MAX_INTRS);
270         DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
271
272         /* Get per-cpu bitmaps */
273         pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
274
275         pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
276         intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
277
278         for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
279                 pending[i] = gic_read(pending_reg);
280                 intrmask[i] = gic_read(intrmask_reg);
281                 pending_reg += 0x4;
282                 intrmask_reg += 0x4;
283         }
284
285         bitmap_and(pending, pending, intrmask, gic_shared_intrs);
286         bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
287
288         intr = find_first_bit(pending, gic_shared_intrs);
289         while (intr != gic_shared_intrs) {
290                 virq = irq_linear_revmap(gic_irq_domain,
291                                          GIC_SHARED_TO_HWIRQ(intr));
292                 if (chained)
293                         generic_handle_irq(virq);
294                 else
295                         do_IRQ(virq);
296
297                 /* go to next pending bit */
298                 bitmap_clear(pending, intr, 1);
299                 intr = find_first_bit(pending, gic_shared_intrs);
300         }
301 }
302
303 static void gic_mask_irq(struct irq_data *d)
304 {
305         gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
306 }
307
308 static void gic_unmask_irq(struct irq_data *d)
309 {
310         gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
311 }
312
313 static void gic_ack_irq(struct irq_data *d)
314 {
315         unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
316
317         gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
318 }
319
320 static int gic_set_type(struct irq_data *d, unsigned int type)
321 {
322         unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
323         unsigned long flags;
324         bool is_edge;
325
326         spin_lock_irqsave(&gic_lock, flags);
327         switch (type & IRQ_TYPE_SENSE_MASK) {
328         case IRQ_TYPE_EDGE_FALLING:
329                 gic_set_polarity(irq, GIC_POL_NEG);
330                 gic_set_trigger(irq, GIC_TRIG_EDGE);
331                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
332                 is_edge = true;
333                 break;
334         case IRQ_TYPE_EDGE_RISING:
335                 gic_set_polarity(irq, GIC_POL_POS);
336                 gic_set_trigger(irq, GIC_TRIG_EDGE);
337                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
338                 is_edge = true;
339                 break;
340         case IRQ_TYPE_EDGE_BOTH:
341                 /* polarity is irrelevant in this case */
342                 gic_set_trigger(irq, GIC_TRIG_EDGE);
343                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
344                 is_edge = true;
345                 break;
346         case IRQ_TYPE_LEVEL_LOW:
347                 gic_set_polarity(irq, GIC_POL_NEG);
348                 gic_set_trigger(irq, GIC_TRIG_LEVEL);
349                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
350                 is_edge = false;
351                 break;
352         case IRQ_TYPE_LEVEL_HIGH:
353         default:
354                 gic_set_polarity(irq, GIC_POL_POS);
355                 gic_set_trigger(irq, GIC_TRIG_LEVEL);
356                 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
357                 is_edge = false;
358                 break;
359         }
360
361         if (is_edge) {
362                 __irq_set_chip_handler_name_locked(d->irq,
363                                                    &gic_edge_irq_controller,
364                                                    handle_edge_irq, NULL);
365         } else {
366                 __irq_set_chip_handler_name_locked(d->irq,
367                                                    &gic_level_irq_controller,
368                                                    handle_level_irq, NULL);
369         }
370         spin_unlock_irqrestore(&gic_lock, flags);
371
372         return 0;
373 }
374
375 #ifdef CONFIG_SMP
376 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
377                             bool force)
378 {
379         unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
380         cpumask_t       tmp = CPU_MASK_NONE;
381         unsigned long   flags;
382         int             i;
383
384         cpumask_and(&tmp, cpumask, cpu_online_mask);
385         if (cpumask_empty(&tmp))
386                 return -EINVAL;
387
388         /* Assumption : cpumask refers to a single CPU */
389         spin_lock_irqsave(&gic_lock, flags);
390
391         /* Re-route this IRQ */
392         gic_map_to_vpe(irq, cpumask_first(&tmp));
393
394         /* Update the pcpu_masks */
395         for (i = 0; i < NR_CPUS; i++)
396                 clear_bit(irq, pcpu_masks[i].pcpu_mask);
397         set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
398
399         cpumask_copy(d->affinity, cpumask);
400         spin_unlock_irqrestore(&gic_lock, flags);
401
402         return IRQ_SET_MASK_OK_NOCOPY;
403 }
404 #endif
405
406 static struct irq_chip gic_level_irq_controller = {
407         .name                   =       "MIPS GIC",
408         .irq_mask               =       gic_mask_irq,
409         .irq_unmask             =       gic_unmask_irq,
410         .irq_set_type           =       gic_set_type,
411 #ifdef CONFIG_SMP
412         .irq_set_affinity       =       gic_set_affinity,
413 #endif
414 };
415
416 static struct irq_chip gic_edge_irq_controller = {
417         .name                   =       "MIPS GIC",
418         .irq_ack                =       gic_ack_irq,
419         .irq_mask               =       gic_mask_irq,
420         .irq_unmask             =       gic_unmask_irq,
421         .irq_set_type           =       gic_set_type,
422 #ifdef CONFIG_SMP
423         .irq_set_affinity       =       gic_set_affinity,
424 #endif
425 };
426
427 static void gic_handle_local_int(bool chained)
428 {
429         unsigned long pending, masked;
430         unsigned int intr, virq;
431
432         pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
433         masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
434
435         bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
436
437         intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
438         while (intr != GIC_NUM_LOCAL_INTRS) {
439                 virq = irq_linear_revmap(gic_irq_domain,
440                                          GIC_LOCAL_TO_HWIRQ(intr));
441                 if (chained)
442                         generic_handle_irq(virq);
443                 else
444                         do_IRQ(virq);
445
446                 /* go to next pending bit */
447                 bitmap_clear(&pending, intr, 1);
448                 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
449         }
450 }
451
452 static void gic_mask_local_irq(struct irq_data *d)
453 {
454         int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
455
456         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
457 }
458
459 static void gic_unmask_local_irq(struct irq_data *d)
460 {
461         int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
462
463         gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
464 }
465
466 static struct irq_chip gic_local_irq_controller = {
467         .name                   =       "MIPS GIC Local",
468         .irq_mask               =       gic_mask_local_irq,
469         .irq_unmask             =       gic_unmask_local_irq,
470 };
471
472 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
473 {
474         int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
475         int i;
476         unsigned long flags;
477
478         spin_lock_irqsave(&gic_lock, flags);
479         for (i = 0; i < gic_vpes; i++) {
480                 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
481                 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
482         }
483         spin_unlock_irqrestore(&gic_lock, flags);
484 }
485
486 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
487 {
488         int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
489         int i;
490         unsigned long flags;
491
492         spin_lock_irqsave(&gic_lock, flags);
493         for (i = 0; i < gic_vpes; i++) {
494                 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
495                 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
496         }
497         spin_unlock_irqrestore(&gic_lock, flags);
498 }
499
500 static struct irq_chip gic_all_vpes_local_irq_controller = {
501         .name                   =       "MIPS GIC Local",
502         .irq_mask               =       gic_mask_local_irq_all_vpes,
503         .irq_unmask             =       gic_unmask_local_irq_all_vpes,
504 };
505
506 static void __gic_irq_dispatch(void)
507 {
508         gic_handle_local_int(false);
509         gic_handle_shared_int(false);
510 }
511
512 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
513 {
514         gic_handle_local_int(true);
515         gic_handle_shared_int(true);
516 }
517
518 #ifdef CONFIG_MIPS_GIC_IPI
519 static int gic_resched_int_base;
520 static int gic_call_int_base;
521
522 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
523 {
524         return gic_resched_int_base + cpu;
525 }
526
527 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
528 {
529         return gic_call_int_base + cpu;
530 }
531
532 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
533 {
534         scheduler_ipi();
535
536         return IRQ_HANDLED;
537 }
538
539 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
540 {
541         generic_smp_call_function_interrupt();
542
543         return IRQ_HANDLED;
544 }
545
546 static struct irqaction irq_resched = {
547         .handler        = ipi_resched_interrupt,
548         .flags          = IRQF_PERCPU,
549         .name           = "IPI resched"
550 };
551
552 static struct irqaction irq_call = {
553         .handler        = ipi_call_interrupt,
554         .flags          = IRQF_PERCPU,
555         .name           = "IPI call"
556 };
557
558 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
559                                     struct irqaction *action)
560 {
561         int virq = irq_create_mapping(gic_irq_domain,
562                                       GIC_SHARED_TO_HWIRQ(intr));
563         int i;
564
565         gic_map_to_vpe(intr, cpu);
566         for (i = 0; i < NR_CPUS; i++)
567                 clear_bit(intr, pcpu_masks[i].pcpu_mask);
568         set_bit(intr, pcpu_masks[cpu].pcpu_mask);
569
570         irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
571
572         irq_set_handler(virq, handle_percpu_irq);
573         setup_irq(virq, action);
574 }
575
576 static __init void gic_ipi_init(void)
577 {
578         int i;
579
580         /* Use last 2 * NR_CPUS interrupts as IPIs */
581         gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
582         gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
583
584         for (i = 0; i < nr_cpu_ids; i++) {
585                 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
586                 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
587         }
588 }
589 #else
590 static inline void gic_ipi_init(void)
591 {
592 }
593 #endif
594
595 static void __init gic_basic_init(void)
596 {
597         unsigned int i;
598
599         board_bind_eic_interrupt = &gic_bind_eic_interrupt;
600
601         /* Setup defaults */
602         for (i = 0; i < gic_shared_intrs; i++) {
603                 gic_set_polarity(i, GIC_POL_POS);
604                 gic_set_trigger(i, GIC_TRIG_LEVEL);
605                 gic_reset_mask(i);
606         }
607
608         for (i = 0; i < gic_vpes; i++) {
609                 unsigned int j;
610
611                 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
612                 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
613                         if (!gic_local_irq_is_routable(j))
614                                 continue;
615                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
616                 }
617         }
618 }
619
620 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
621                                     irq_hw_number_t hw)
622 {
623         int intr = GIC_HWIRQ_TO_LOCAL(hw);
624         int ret = 0;
625         int i;
626         unsigned long flags;
627
628         if (!gic_local_irq_is_routable(intr))
629                 return -EPERM;
630
631         /*
632          * HACK: These are all really percpu interrupts, but the rest
633          * of the MIPS kernel code does not use the percpu IRQ API for
634          * the CP0 timer and performance counter interrupts.
635          */
636         switch (intr) {
637         case GIC_LOCAL_INT_TIMER:
638         case GIC_LOCAL_INT_PERFCTR:
639         case GIC_LOCAL_INT_FDC:
640                 irq_set_chip_and_handler(virq,
641                                          &gic_all_vpes_local_irq_controller,
642                                          handle_percpu_irq);
643                 break;
644         default:
645                 irq_set_chip_and_handler(virq,
646                                          &gic_local_irq_controller,
647                                          handle_percpu_devid_irq);
648                 irq_set_percpu_devid(virq);
649                 break;
650         }
651
652         spin_lock_irqsave(&gic_lock, flags);
653         for (i = 0; i < gic_vpes; i++) {
654                 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
655
656                 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
657
658                 switch (intr) {
659                 case GIC_LOCAL_INT_WD:
660                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
661                         break;
662                 case GIC_LOCAL_INT_COMPARE:
663                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
664                         break;
665                 case GIC_LOCAL_INT_TIMER:
666                         /* CONFIG_MIPS_CMP workaround (see __gic_init) */
667                         val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
668                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
669                         break;
670                 case GIC_LOCAL_INT_PERFCTR:
671                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
672                         break;
673                 case GIC_LOCAL_INT_SWINT0:
674                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
675                         break;
676                 case GIC_LOCAL_INT_SWINT1:
677                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
678                         break;
679                 case GIC_LOCAL_INT_FDC:
680                         gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
681                         break;
682                 default:
683                         pr_err("Invalid local IRQ %d\n", intr);
684                         ret = -EINVAL;
685                         break;
686                 }
687         }
688         spin_unlock_irqrestore(&gic_lock, flags);
689
690         return ret;
691 }
692
693 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
694                                      irq_hw_number_t hw)
695 {
696         int intr = GIC_HWIRQ_TO_SHARED(hw);
697         unsigned long flags;
698
699         irq_set_chip_and_handler(virq, &gic_level_irq_controller,
700                                  handle_level_irq);
701
702         spin_lock_irqsave(&gic_lock, flags);
703         gic_map_to_pin(intr, gic_cpu_pin);
704         /* Map to VPE 0 by default */
705         gic_map_to_vpe(intr, 0);
706         set_bit(intr, pcpu_masks[0].pcpu_mask);
707         spin_unlock_irqrestore(&gic_lock, flags);
708
709         return 0;
710 }
711
712 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
713                               irq_hw_number_t hw)
714 {
715         if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
716                 return gic_local_irq_domain_map(d, virq, hw);
717         return gic_shared_irq_domain_map(d, virq, hw);
718 }
719
720 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
721                                 const u32 *intspec, unsigned int intsize,
722                                 irq_hw_number_t *out_hwirq,
723                                 unsigned int *out_type)
724 {
725         if (intsize != 3)
726                 return -EINVAL;
727
728         if (intspec[0] == GIC_SHARED)
729                 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
730         else if (intspec[0] == GIC_LOCAL)
731                 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
732         else
733                 return -EINVAL;
734         *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
735
736         return 0;
737 }
738
739 static const struct irq_domain_ops gic_irq_domain_ops = {
740         .map = gic_irq_domain_map,
741         .xlate = gic_irq_domain_xlate,
742 };
743
744 static void __init __gic_init(unsigned long gic_base_addr,
745                               unsigned long gic_addrspace_size,
746                               unsigned int cpu_vec, unsigned int irqbase,
747                               struct device_node *node)
748 {
749         unsigned int gicconfig;
750
751         gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
752
753         gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
754         gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
755                    GIC_SH_CONFIG_NUMINTRS_SHF;
756         gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
757
758         gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
759                   GIC_SH_CONFIG_NUMVPES_SHF;
760         gic_vpes = gic_vpes + 1;
761
762         if (cpu_has_veic) {
763                 /* Always use vector 1 in EIC mode */
764                 gic_cpu_pin = 0;
765                 timer_cpu_pin = gic_cpu_pin;
766                 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
767                                __gic_irq_dispatch);
768         } else {
769                 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
770                 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
771                                         gic_irq_dispatch);
772                 /*
773                  * With the CMP implementation of SMP (deprecated), other CPUs
774                  * are started by the bootloader and put into a timer based
775                  * waiting poll loop. We must not re-route those CPU's local
776                  * timer interrupts as the wait instruction will never finish,
777                  * so just handle whatever CPU interrupt it is routed to by
778                  * default.
779                  *
780                  * This workaround should be removed when CMP support is
781                  * dropped.
782                  */
783                 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
784                     gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
785                         timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
786                                                          GIC_VPE_TIMER_MAP)) &
787                                         GIC_MAP_MSK;
788                         irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
789                                                 GIC_CPU_PIN_OFFSET +
790                                                 timer_cpu_pin,
791                                                 gic_irq_dispatch);
792                 } else {
793                         timer_cpu_pin = gic_cpu_pin;
794                 }
795         }
796
797         gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
798                                                gic_shared_intrs, irqbase,
799                                                &gic_irq_domain_ops, NULL);
800         if (!gic_irq_domain)
801                 panic("Failed to add GIC IRQ domain");
802
803         gic_basic_init();
804
805         gic_ipi_init();
806 }
807
808 void __init gic_init(unsigned long gic_base_addr,
809                      unsigned long gic_addrspace_size,
810                      unsigned int cpu_vec, unsigned int irqbase)
811 {
812         __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
813 }
814
815 static int __init gic_of_init(struct device_node *node,
816                               struct device_node *parent)
817 {
818         struct resource res;
819         unsigned int cpu_vec, i = 0, reserved = 0;
820         phys_addr_t gic_base;
821         size_t gic_len;
822
823         /* Find the first available CPU vector. */
824         while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
825                                            i++, &cpu_vec))
826                 reserved |= BIT(cpu_vec);
827         for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
828                 if (!(reserved & BIT(cpu_vec)))
829                         break;
830         }
831         if (cpu_vec == 8) {
832                 pr_err("No CPU vectors available for GIC\n");
833                 return -ENODEV;
834         }
835
836         if (of_address_to_resource(node, 0, &res)) {
837                 /*
838                  * Probe the CM for the GIC base address if not specified
839                  * in the device-tree.
840                  */
841                 if (mips_cm_present()) {
842                         gic_base = read_gcr_gic_base() &
843                                 ~CM_GCR_GIC_BASE_GICEN_MSK;
844                         gic_len = 0x20000;
845                 } else {
846                         pr_err("Failed to get GIC memory range\n");
847                         return -ENODEV;
848                 }
849         } else {
850                 gic_base = res.start;
851                 gic_len = resource_size(&res);
852         }
853
854         if (mips_cm_present())
855                 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
856         gic_present = true;
857
858         __gic_init(gic_base, gic_len, cpu_vec, 0, node);
859
860         return 0;
861 }
862 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);