Merge ../linux-2.6-watchdog-mm
[linux-drm-fsl-dcu.git] / arch / sh / kernel / irq.c
index b56e79632f241aaa28fb91935ef3dace2c69924c..944128ce97066ff46bc67cae6a5ce98a3717e9d6 100644 (file)
@@ -1,5 +1,4 @@
-/* $Id: irq.c,v 1.20 2004/01/13 05:52:11 kkojima Exp $
- *
+/*
  * linux/arch/sh/kernel/irq.c
  *
  *     Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
@@ -7,15 +6,20 @@
  *
  * SuperH version:  Copyright (C) 1999  Niibe Yutaka
  */
-
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
+#include <linux/io.h>
 #include <asm/irq.h>
 #include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/thread_info.h>
 #include <asm/cpu/mmu_context.h>
 
+atomic_t irq_err_count;
+
 /*
  * 'what should we do if we get a hw irq event on an illegal vector'.
  * each architecture has to answer this themselves, it doesn't deserve
@@ -23,6 +27,7 @@
  */
 void ack_bad_irq(unsigned int irq)
 {
+       atomic_inc(&irq_err_count);
        printk("unexpected IRQ trap at vector %02x\n", irq);
 }
 
@@ -46,8 +51,10 @@ int show_interrupts(struct seq_file *p, void *v)
                if (!action)
                        goto unlock;
                seq_printf(p, "%3d: ",i);
-               seq_printf(p, "%10u ", kstat_irqs(i));
-               seq_printf(p, " %14s", irq_desc[i].handler->typename);
+               for_each_online_cpu(j)
+                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+               seq_printf(p, " %14s", irq_desc[i].chip->name);
+               seq_printf(p, "-%-8s", irq_desc[i].name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
@@ -55,39 +62,190 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
 unlock:
                spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-       }
+       } else if (i == NR_IRQS)
+               seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
+
        return 0;
 }
 #endif
 
+#ifdef CONFIG_4KSTACKS
+/*
+ * per-CPU IRQ handling contexts (thread information and stack)
+ */
+union irq_ctx {
+       struct thread_info      tinfo;
+       u32                     stack[THREAD_SIZE/sizeof(u32)];
+};
+
+static union irq_ctx *hardirq_ctx[NR_CPUS];
+static union irq_ctx *softirq_ctx[NR_CPUS];
+#endif
 
 asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
                      unsigned long r6, unsigned long r7,
                      struct pt_regs regs)
 {
-       int irq = r4;
+       struct pt_regs *old_regs = set_irq_regs(&regs);
+       int irq;
+#ifdef CONFIG_4KSTACKS
+       union irq_ctx *curctx, *irqctx;
+#endif
 
        irq_enter();
 
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+       /* Debugging check for stack overflow: is there less than 1KB free? */
+       {
+               long sp;
+
+               __asm__ __volatile__ ("and r15, %0" :
+                                       "=r" (sp) : "0" (THREAD_SIZE - 1));
+
+               if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+                       printk("do_IRQ: stack overflow: %ld\n",
+                              sp - sizeof(struct thread_info));
+                       dump_stack();
+               }
+       }
+#endif
+
 #ifdef CONFIG_CPU_HAS_INTEVT
-       __asm__ __volatile__ (
-#ifdef CONFIG_CPU_HAS_SR_RB
-               "stc    r2_bank, %0\n\t"
+       irq = (ctrl_inl(INTEVT) >> 5) - 16;
 #else
-               "mov.l  @%1, %0\n\t"
-#endif
-               "shlr2  %0\n\t"
-               "shlr2  %0\n\t"
-               "shlr   %0\n\t"
-               "add    #-16, %0\n\t"
-               : "=z" (irq), "=r" (r4)
-               : "1" (INTEVT)
-               : "memory"
-       );
+       irq = r4;
 #endif
 
        irq = irq_demux(irq);
-       __do_IRQ(irq, &regs);
+
+#ifdef CONFIG_4KSTACKS
+       curctx = (union irq_ctx *)current_thread_info();
+       irqctx = hardirq_ctx[smp_processor_id()];
+
+       /*
+        * this is where we switch to the IRQ stack. However, if we are
+        * already using the IRQ stack (because we interrupted a hardirq
+        * handler) we can't do that and just have to keep using the
+        * current stack (which is the irq stack already after all)
+        */
+       if (curctx != irqctx) {
+               u32 *isp;
+
+               isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+               irqctx->tinfo.task = curctx->tinfo.task;
+               irqctx->tinfo.previous_sp = current_stack_pointer;
+
+               __asm__ __volatile__ (
+                       "mov    %0, r4          \n"
+                       "mov    r15, r9         \n"
+                       "jsr    @%1             \n"
+                       /* swith to the irq stack */
+                       " mov   %2, r15         \n"
+                       /* restore the stack (ring zero) */
+                       "mov    r9, r15         \n"
+                       : /* no outputs */
+                       : "r" (irq), "r" (generic_handle_irq), "r" (isp)
+                       /* XXX: A somewhat excessive clobber list? -PFM */
+                       : "memory", "r0", "r1", "r2", "r3", "r4",
+                         "r5", "r6", "r7", "r8", "t", "pr"
+               );
+       } else
+#endif
+               generic_handle_irq(irq);
+
        irq_exit();
+
+       set_irq_regs(old_regs);
        return 1;
 }
+
+#ifdef CONFIG_4KSTACKS
+/*
+ * These should really be __section__(".bss.page_aligned") as well, but
+ * gcc's 3.0 and earlier don't handle that correctly.
+ */
+static char softirq_stack[NR_CPUS * THREAD_SIZE]
+               __attribute__((__aligned__(THREAD_SIZE)));
+
+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
+               __attribute__((__aligned__(THREAD_SIZE)));
+
+/*
+ * allocate per-cpu stacks for hardirq and for softirq processing
+ */
+void irq_ctx_init(int cpu)
+{
+       union irq_ctx *irqctx;
+
+       if (hardirq_ctx[cpu])
+               return;
+
+       irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+
+       hardirq_ctx[cpu] = irqctx;
+
+       irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = SOFTIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+
+       softirq_ctx[cpu] = irqctx;
+
+       printk("CPU %u irqstacks, hard=%p soft=%p\n",
+               cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
+}
+
+void irq_ctx_exit(int cpu)
+{
+       hardirq_ctx[cpu] = NULL;
+}
+
+extern asmlinkage void __do_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+       unsigned long flags;
+       struct thread_info *curctx;
+       union irq_ctx *irqctx;
+       u32 *isp;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+
+       if (local_softirq_pending()) {
+               curctx = current_thread_info();
+               irqctx = softirq_ctx[smp_processor_id()];
+               irqctx->tinfo.task = curctx->task;
+               irqctx->tinfo.previous_sp = current_stack_pointer;
+
+               /* build the stack frame on the softirq stack */
+               isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+
+               __asm__ __volatile__ (
+                       "mov    r15, r9         \n"
+                       "jsr    @%0             \n"
+                       /* switch to the softirq stack */
+                       " mov   %1, r15         \n"
+                       /* restore the thread stack */
+                       "mov    r9, r15         \n"
+                       : /* no outputs */
+                       : "r" (__do_softirq), "r" (isp)
+                       /* XXX: A somewhat excessive clobber list? -PFM */
+                       : "memory", "r0", "r1", "r2", "r3", "r4",
+                         "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+               );
+       }
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(do_softirq);
+#endif