1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/linkage.h>
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/ftrace.h>
24 #include <linux/irq.h>
25 #include <linux/kmemleak.h>
27 #include <asm/ptrace.h>
28 #include <asm/processor.h>
29 #include <asm/atomic.h>
30 #include <asm/system.h>
33 #include <asm/iommu.h>
35 #include <asm/oplib.h>
37 #include <asm/timer.h>
39 #include <asm/starfire.h>
40 #include <asm/uaccess.h>
41 #include <asm/cache.h>
42 #include <asm/cpudata.h>
43 #include <asm/auxio.h>
45 #include <asm/hypervisor.h>
46 #include <asm/cacheflush.h>
52 #define NUM_IVECS (IMAP_INR + 1)
54 struct ino_bucket *ivector_table;
55 unsigned long ivector_table_pa;
57 /* On several sun4u processors, it is illegal to mix bypass and
58 * non-bypass accesses. Therefore we access all INO buckets
59 * using bypass accesses only.
61 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
65 __asm__ __volatile__("ldxa [%1] %2, %0"
68 offsetof(struct ino_bucket,
70 "i" (ASI_PHYS_USE_EC));
75 static void bucket_clear_chain_pa(unsigned long bucket_pa)
77 __asm__ __volatile__("stxa %%g0, [%0] %1"
80 offsetof(struct ino_bucket,
82 "i" (ASI_PHYS_USE_EC));
85 static unsigned int bucket_get_irq(unsigned long bucket_pa)
89 __asm__ __volatile__("lduwa [%1] %2, %0"
92 offsetof(struct ino_bucket,
94 "i" (ASI_PHYS_USE_EC));
99 static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
101 __asm__ __volatile__("stwa %0, [%1] %2"
105 offsetof(struct ino_bucket,
107 "i" (ASI_PHYS_USE_EC));
110 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
113 unsigned int dev_handle;
114 unsigned int dev_ino;
116 } irq_table[NR_IRQS];
117 static DEFINE_SPINLOCK(irq_alloc_lock);
119 unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
124 BUILD_BUG_ON(NR_IRQS >= 256);
126 spin_lock_irqsave(&irq_alloc_lock, flags);
128 for (ent = 1; ent < NR_IRQS; ent++) {
129 if (!irq_table[ent].in_use)
132 if (ent >= NR_IRQS) {
133 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
136 irq_table[ent].dev_handle = dev_handle;
137 irq_table[ent].dev_ino = dev_ino;
138 irq_table[ent].in_use = 1;
141 spin_unlock_irqrestore(&irq_alloc_lock, flags);
146 #ifdef CONFIG_PCI_MSI
147 void irq_free(unsigned int irq)
154 spin_lock_irqsave(&irq_alloc_lock, flags);
156 irq_table[irq].in_use = 0;
158 spin_unlock_irqrestore(&irq_alloc_lock, flags);
163 * /proc/interrupts printing:
166 int show_interrupts(struct seq_file *p, void *v)
168 int i = *(loff_t *) v, j;
169 struct irqaction * action;
174 for_each_online_cpu(j)
175 seq_printf(p, "CPU%d ",j);
180 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
181 action = irq_desc[i].action;
184 seq_printf(p, "%3d: ",i);
186 seq_printf(p, "%10u ", kstat_irqs(i));
188 for_each_online_cpu(j)
189 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
191 seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name);
192 seq_printf(p, " %s", action->name);
194 for (action=action->next; action; action = action->next)
195 seq_printf(p, ", %s", action->name);
199 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
200 } else if (i == NR_IRQS) {
201 seq_printf(p, "NMI: ");
202 for_each_online_cpu(j)
203 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
204 seq_printf(p, " Non-maskable interrupts\n");
209 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
213 if (this_is_starfire) {
214 tid = starfire_translate(imap, cpuid);
215 tid <<= IMAP_TID_SHIFT;
218 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
221 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
222 if ((ver >> 32UL) == __JALAPENO_ID ||
223 (ver >> 32UL) == __SERRANO_ID) {
224 tid = cpuid << IMAP_TID_SHIFT;
225 tid &= IMAP_TID_JBUS;
227 unsigned int a = cpuid & 0x1f;
228 unsigned int n = (cpuid >> 5) & 0x1f;
230 tid = ((a << IMAP_AID_SHIFT) |
231 (n << IMAP_NID_SHIFT));
232 tid &= (IMAP_AID_SAFARI |
236 tid = cpuid << IMAP_TID_SHIFT;
244 struct irq_handler_data {
248 void (*pre_handler)(unsigned int, void *, void *);
254 static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
259 cpumask_copy(&mask, affinity);
260 if (cpus_equal(mask, cpu_online_map)) {
261 cpuid = map_to_cpu(irq);
265 cpus_and(tmp, cpu_online_map, mask);
266 cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp);
272 #define irq_choose_cpu(irq, affinity) \
273 real_hard_smp_processor_id()
276 static void sun4u_irq_enable(struct irq_data *data)
278 struct irq_handler_data *handler_data = data->handler_data;
280 if (likely(handler_data)) {
281 unsigned long cpuid, imap, val;
284 cpuid = irq_choose_cpu(data->irq, data->affinity);
285 imap = handler_data->imap;
287 tid = sun4u_compute_tid(imap, cpuid);
289 val = upa_readq(imap);
290 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
291 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
292 val |= tid | IMAP_VALID;
293 upa_writeq(val, imap);
294 upa_writeq(ICLR_IDLE, handler_data->iclr);
298 static int sun4u_set_affinity(struct irq_data *data,
299 const struct cpumask *mask, bool force)
301 struct irq_handler_data *handler_data = data->handler_data;
303 if (likely(handler_data)) {
304 unsigned long cpuid, imap, val;
307 cpuid = irq_choose_cpu(data->irq, mask);
308 imap = handler_data->imap;
310 tid = sun4u_compute_tid(imap, cpuid);
312 val = upa_readq(imap);
313 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
314 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
315 val |= tid | IMAP_VALID;
316 upa_writeq(val, imap);
317 upa_writeq(ICLR_IDLE, handler_data->iclr);
323 /* Don't do anything. The desc->status check for IRQ_DISABLED in
324 * handler_irq() will skip the handler call and that will leave the
325 * interrupt in the sent state. The next ->enable() call will hit the
326 * ICLR register to reset the state machine.
328 * This scheme is necessary, instead of clearing the Valid bit in the
329 * IMAP register, to handle the case of IMAP registers being shared by
330 * multiple INOs (and thus ICLR registers). Since we use a different
331 * virtual IRQ for each shared IMAP instance, the generic code thinks
332 * there is only one user so it prematurely calls ->disable() on
335 * We have to provide an explicit ->disable() method instead of using
336 * NULL to get the default. The reason is that if the generic code
337 * sees that, it also hooks up a default ->shutdown method which
338 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
340 static void sun4u_irq_disable(struct irq_data *data)
344 static void sun4u_irq_eoi(struct irq_data *data)
346 struct irq_handler_data *handler_data = data->handler_data;
348 if (likely(handler_data))
349 upa_writeq(ICLR_IDLE, handler_data->iclr);
352 static void sun4v_irq_enable(struct irq_data *data)
354 unsigned int ino = irq_table[data->irq].dev_ino;
355 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
358 err = sun4v_intr_settarget(ino, cpuid);
360 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
361 "err(%d)\n", ino, cpuid, err);
362 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
364 printk(KERN_ERR "sun4v_intr_setstate(%x): "
365 "err(%d)\n", ino, err);
366 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
368 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
372 static int sun4v_set_affinity(struct irq_data *data,
373 const struct cpumask *mask, bool force)
375 unsigned int ino = irq_table[data->irq].dev_ino;
376 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
379 err = sun4v_intr_settarget(ino, cpuid);
381 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
382 "err(%d)\n", ino, cpuid, err);
387 static void sun4v_irq_disable(struct irq_data *data)
389 unsigned int ino = irq_table[data->irq].dev_ino;
392 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
394 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
395 "err(%d)\n", ino, err);
398 static void sun4v_irq_eoi(struct irq_data *data)
400 unsigned int ino = irq_table[data->irq].dev_ino;
403 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
405 printk(KERN_ERR "sun4v_intr_setstate(%x): "
406 "err(%d)\n", ino, err);
409 static void sun4v_virq_enable(struct irq_data *data)
411 unsigned long cpuid, dev_handle, dev_ino;
414 cpuid = irq_choose_cpu(data->irq, data->affinity);
416 dev_handle = irq_table[data->irq].dev_handle;
417 dev_ino = irq_table[data->irq].dev_ino;
419 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
421 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
423 dev_handle, dev_ino, cpuid, err);
424 err = sun4v_vintr_set_state(dev_handle, dev_ino,
427 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
428 "HV_INTR_STATE_IDLE): err(%d)\n",
429 dev_handle, dev_ino, err);
430 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
433 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
434 "HV_INTR_ENABLED): err(%d)\n",
435 dev_handle, dev_ino, err);
438 static int sun4v_virt_set_affinity(struct irq_data *data,
439 const struct cpumask *mask, bool force)
441 unsigned long cpuid, dev_handle, dev_ino;
444 cpuid = irq_choose_cpu(data->irq, mask);
446 dev_handle = irq_table[data->irq].dev_handle;
447 dev_ino = irq_table[data->irq].dev_ino;
449 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
451 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
453 dev_handle, dev_ino, cpuid, err);
458 static void sun4v_virq_disable(struct irq_data *data)
460 unsigned long dev_handle, dev_ino;
463 dev_handle = irq_table[data->irq].dev_handle;
464 dev_ino = irq_table[data->irq].dev_ino;
466 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
469 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
470 "HV_INTR_DISABLED): err(%d)\n",
471 dev_handle, dev_ino, err);
474 static void sun4v_virq_eoi(struct irq_data *data)
476 unsigned long dev_handle, dev_ino;
479 dev_handle = irq_table[data->irq].dev_handle;
480 dev_ino = irq_table[data->irq].dev_ino;
482 err = sun4v_vintr_set_state(dev_handle, dev_ino,
485 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
486 "HV_INTR_STATE_IDLE): err(%d)\n",
487 dev_handle, dev_ino, err);
490 static struct irq_chip sun4u_irq = {
492 .irq_enable = sun4u_irq_enable,
493 .irq_disable = sun4u_irq_disable,
494 .irq_eoi = sun4u_irq_eoi,
495 .irq_set_affinity = sun4u_set_affinity,
496 .flags = IRQCHIP_EOI_IF_HANDLED,
499 static struct irq_chip sun4v_irq = {
501 .irq_enable = sun4v_irq_enable,
502 .irq_disable = sun4v_irq_disable,
503 .irq_eoi = sun4v_irq_eoi,
504 .irq_set_affinity = sun4v_set_affinity,
505 .flags = IRQCHIP_EOI_IF_HANDLED,
508 static struct irq_chip sun4v_virq = {
510 .irq_enable = sun4v_virq_enable,
511 .irq_disable = sun4v_virq_disable,
512 .irq_eoi = sun4v_virq_eoi,
513 .irq_set_affinity = sun4v_virt_set_affinity,
514 .flags = IRQCHIP_EOI_IF_HANDLED,
517 static void pre_flow_handler(struct irq_data *d)
519 struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
520 unsigned int ino = irq_table[d->irq].dev_ino;
522 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
525 void irq_install_pre_handler(int irq,
526 void (*func)(unsigned int, void *, void *),
527 void *arg1, void *arg2)
529 struct irq_handler_data *handler_data = irq_get_handler_data(irq);
531 handler_data->pre_handler = func;
532 handler_data->arg1 = arg1;
533 handler_data->arg2 = arg2;
535 __irq_set_preflow_handler(irq, pre_flow_handler);
538 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
540 struct ino_bucket *bucket;
541 struct irq_handler_data *handler_data;
545 BUG_ON(tlb_type == hypervisor);
547 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
548 bucket = &ivector_table[ino];
549 irq = bucket_get_irq(__pa(bucket));
551 irq = irq_alloc(0, ino);
552 bucket_set_irq(__pa(bucket), irq);
553 irq_set_chip_and_handler_name(irq, &sun4u_irq,
554 handle_fasteoi_irq, "IVEC");
557 handler_data = irq_get_handler_data(irq);
558 if (unlikely(handler_data))
561 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
562 if (unlikely(!handler_data)) {
563 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
566 irq_set_handler_data(irq, handler_data);
568 handler_data->imap = imap;
569 handler_data->iclr = iclr;
575 static unsigned int sun4v_build_common(unsigned long sysino,
576 struct irq_chip *chip)
578 struct ino_bucket *bucket;
579 struct irq_handler_data *handler_data;
582 BUG_ON(tlb_type != hypervisor);
584 bucket = &ivector_table[sysino];
585 irq = bucket_get_irq(__pa(bucket));
587 irq = irq_alloc(0, sysino);
588 bucket_set_irq(__pa(bucket), irq);
589 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
593 handler_data = irq_get_handler_data(irq);
594 if (unlikely(handler_data))
597 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
598 if (unlikely(!handler_data)) {
599 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
602 irq_set_handler_data(irq, handler_data);
604 /* Catch accidental accesses to these things. IMAP/ICLR handling
605 * is done by hypervisor calls on sun4v platforms, not by direct
608 handler_data->imap = ~0UL;
609 handler_data->iclr = ~0UL;
615 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
617 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
619 return sun4v_build_common(sysino, &sun4v_irq);
622 unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
624 struct irq_handler_data *handler_data;
625 unsigned long hv_err, cookie;
626 struct ino_bucket *bucket;
629 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
630 if (unlikely(!bucket))
633 /* The only reference we store to the IRQ bucket is
634 * by physical address which kmemleak can't see, tell
635 * it that this object explicitly is not a leak and
638 kmemleak_not_leak(bucket);
640 __flush_dcache_range((unsigned long) bucket,
641 ((unsigned long) bucket +
642 sizeof(struct ino_bucket)));
644 irq = irq_alloc(devhandle, devino);
645 bucket_set_irq(__pa(bucket), irq);
647 irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
650 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
651 if (unlikely(!handler_data))
654 /* In order to make the LDC channel startup sequence easier,
655 * especially wrt. locking, we do not let request_irq() enable
658 irq_set_status_flags(irq, IRQ_NOAUTOEN);
659 irq_set_handler_data(irq, handler_data);
661 /* Catch accidental accesses to these things. IMAP/ICLR handling
662 * is done by hypervisor calls on sun4v platforms, not by direct
665 handler_data->imap = ~0UL;
666 handler_data->iclr = ~0UL;
668 cookie = ~__pa(bucket);
669 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
671 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
672 "err=%lu\n", devhandle, devino, hv_err);
679 void ack_bad_irq(unsigned int irq)
681 unsigned int ino = irq_table[irq].dev_ino;
686 printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
690 void *hardirq_stack[NR_CPUS];
691 void *softirq_stack[NR_CPUS];
693 void __irq_entry handler_irq(int pil, struct pt_regs *regs)
695 unsigned long pstate, bucket_pa;
696 struct pt_regs *old_regs;
699 clear_softint(1 << pil);
701 old_regs = set_irq_regs(regs);
704 /* Grab an atomic snapshot of the pending IVECs. */
705 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
706 "wrpr %0, %3, %%pstate\n\t"
709 "wrpr %0, 0x0, %%pstate\n\t"
710 : "=&r" (pstate), "=&r" (bucket_pa)
711 : "r" (irq_work_pa(smp_processor_id())),
715 orig_sp = set_hardirq_stack();
718 unsigned long next_pa;
721 next_pa = bucket_get_chain_pa(bucket_pa);
722 irq = bucket_get_irq(bucket_pa);
723 bucket_clear_chain_pa(bucket_pa);
725 generic_handle_irq(irq);
730 restore_hardirq_stack(orig_sp);
733 set_irq_regs(old_regs);
736 void do_softirq(void)
743 local_irq_save(flags);
745 if (local_softirq_pending()) {
746 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
748 sp += THREAD_SIZE - 192 - STACK_BIAS;
750 __asm__ __volatile__("mov %%sp, %0\n\t"
755 __asm__ __volatile__("mov %0, %%sp"
759 local_irq_restore(flags);
762 #ifdef CONFIG_HOTPLUG_CPU
763 void fixup_irqs(void)
767 for (irq = 0; irq < NR_IRQS; irq++) {
768 struct irq_desc *desc = irq_to_desc(irq);
769 struct irq_data *data = irq_desc_get_irq_data(desc);
772 raw_spin_lock_irqsave(&desc->lock, flags);
773 if (desc->action && !irqd_is_per_cpu(data)) {
774 if (data->chip->irq_set_affinity)
775 data->chip->irq_set_affinity(data,
779 raw_spin_unlock_irqrestore(&desc->lock, flags);
782 tick_ops->disable_irq();
793 static struct sun5_timer *prom_timers;
794 static u64 prom_limit0, prom_limit1;
796 static void map_prom_timers(void)
798 struct device_node *dp;
799 const unsigned int *addr;
801 /* PROM timer node hangs out in the top level of device siblings... */
802 dp = of_find_node_by_path("/");
805 if (!strcmp(dp->name, "counter-timer"))
810 /* Assume if node is not present, PROM uses different tick mechanism
811 * which we should not care about.
814 prom_timers = (struct sun5_timer *) 0;
818 /* If PROM is really using this, it must be mapped by him. */
819 addr = of_get_property(dp, "address", NULL);
821 prom_printf("PROM does not have timer mapped, trying to continue.\n");
822 prom_timers = (struct sun5_timer *) 0;
825 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
828 static void kill_prom_timer(void)
833 /* Save them away for later. */
834 prom_limit0 = prom_timers->limit0;
835 prom_limit1 = prom_timers->limit1;
837 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
838 * We turn both off here just to be paranoid.
840 prom_timers->limit0 = 0;
841 prom_timers->limit1 = 0;
843 /* Wheee, eat the interrupt packet too... */
844 __asm__ __volatile__(
846 " ldxa [%%g0] %0, %%g1\n"
847 " ldxa [%%g2] %1, %%g1\n"
848 " stxa %%g0, [%%g0] %0\n"
851 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
855 void notrace init_irqwork_curcpu(void)
857 int cpu = hard_smp_processor_id();
859 trap_block[cpu].irq_worklist_pa = 0UL;
862 /* Please be very careful with register_one_mondo() and
863 * sun4v_register_mondo_queues().
865 * On SMP this gets invoked from the CPU trampoline before
866 * the cpu has fully taken over the trap table from OBP,
867 * and it's kernel stack + %g6 thread register state is
868 * not fully cooked yet.
870 * Therefore you cannot make any OBP calls, not even prom_printf,
871 * from these two routines.
873 static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
875 unsigned long num_entries = (qmask + 1) / 64;
876 unsigned long status;
878 status = sun4v_cpu_qconf(type, paddr, num_entries);
879 if (status != HV_EOK) {
880 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
881 "err %lu\n", type, paddr, num_entries, status);
886 void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
888 struct trap_per_cpu *tb = &trap_block[this_cpu];
890 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
891 tb->cpu_mondo_qmask);
892 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
893 tb->dev_mondo_qmask);
894 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
896 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
900 /* Each queue region must be a power of 2 multiple of 64 bytes in
901 * size. The base real address must be aligned to the size of the
902 * region. Thus, an 8KB queue must be 8KB aligned, for example.
904 static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
906 unsigned long size = PAGE_ALIGN(qmask + 1);
907 unsigned long order = get_order(size);
910 p = __get_free_pages(GFP_KERNEL, order);
912 prom_printf("SUN4V: Error, cannot allocate queue.\n");
919 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
924 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
926 page = get_zeroed_page(GFP_KERNEL);
928 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
932 tb->cpu_mondo_block_pa = __pa(page);
933 tb->cpu_list_pa = __pa(page + 64);
937 /* Allocate mondo and error queues for all possible cpus. */
938 static void __init sun4v_init_mondo_queues(void)
942 for_each_possible_cpu(cpu) {
943 struct trap_per_cpu *tb = &trap_block[cpu];
945 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
946 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
947 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
948 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
949 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
950 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
955 static void __init init_send_mondo_info(void)
959 for_each_possible_cpu(cpu) {
960 struct trap_per_cpu *tb = &trap_block[cpu];
962 init_cpu_send_mondo_info(tb);
966 static struct irqaction timer_irq_action = {
970 /* Only invoked on boot processor. */
971 void __init init_IRQ(void)
978 size = sizeof(struct ino_bucket) * NUM_IVECS;
979 ivector_table = kzalloc(size, GFP_KERNEL);
980 if (!ivector_table) {
981 prom_printf("Fatal error, cannot allocate ivector_table\n");
984 __flush_dcache_range((unsigned long) ivector_table,
985 ((unsigned long) ivector_table) + size);
987 ivector_table_pa = __pa(ivector_table);
989 if (tlb_type == hypervisor)
990 sun4v_init_mondo_queues();
992 init_send_mondo_info();
994 if (tlb_type == hypervisor) {
995 /* Load up the boot cpu's entries. */
996 sun4v_register_mondo_queues(hard_smp_processor_id());
999 /* We need to clear any IRQ's pending in the soft interrupt
1000 * registers, a spurious one could be left around from the
1001 * PROM timer which we just disabled.
1003 clear_softint(get_softint());
1005 /* Now that ivector table is initialized, it is safe
1006 * to receive IRQ vector traps. We will normally take
1007 * one or two right now, in case some device PROM used
1008 * to boot us wants to speak to us. We just ignore them.
1010 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1011 "or %%g1, %0, %%g1\n\t"
1012 "wrpr %%g1, 0x0, %%pstate"
1017 irq_to_desc(0)->action = &timer_irq_action;