2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
15 #include <linux/bug.h>
16 #include <linux/compiler.h>
17 #include <linux/context_tracking.h>
18 #include <linux/cpu_pm.h>
19 #include <linux/kexec.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/kallsyms.h>
28 #include <linux/bootmem.h>
29 #include <linux/interrupt.h>
30 #include <linux/ptrace.h>
31 #include <linux/kgdb.h>
32 #include <linux/kdebug.h>
33 #include <linux/kprobes.h>
34 #include <linux/notifier.h>
35 #include <linux/kdb.h>
36 #include <linux/irq.h>
37 #include <linux/perf_event.h>
39 #include <asm/bootinfo.h>
40 #include <asm/branch.h>
41 #include <asm/break.h>
44 #include <asm/cpu-type.h>
47 #include <asm/fpu_emulator.h>
49 #include <asm/mips-r2-to-r6-emul.h>
50 #include <asm/mipsregs.h>
51 #include <asm/mipsmtregs.h>
52 #include <asm/module.h>
54 #include <asm/pgtable.h>
55 #include <asm/ptrace.h>
56 #include <asm/sections.h>
57 #include <asm/tlbdebug.h>
58 #include <asm/traps.h>
59 #include <asm/uaccess.h>
60 #include <asm/watch.h>
61 #include <asm/mmu_context.h>
62 #include <asm/types.h>
63 #include <asm/stacktrace.h>
66 extern void check_wait(void);
67 extern asmlinkage void rollback_handle_int(void);
68 extern asmlinkage void handle_int(void);
69 extern u32 handle_tlbl[];
70 extern u32 handle_tlbs[];
71 extern u32 handle_tlbm[];
72 extern asmlinkage void handle_adel(void);
73 extern asmlinkage void handle_ades(void);
74 extern asmlinkage void handle_ibe(void);
75 extern asmlinkage void handle_dbe(void);
76 extern asmlinkage void handle_sys(void);
77 extern asmlinkage void handle_bp(void);
78 extern asmlinkage void handle_ri(void);
79 extern asmlinkage void handle_ri_rdhwr_vivt(void);
80 extern asmlinkage void handle_ri_rdhwr(void);
81 extern asmlinkage void handle_cpu(void);
82 extern asmlinkage void handle_ov(void);
83 extern asmlinkage void handle_tr(void);
84 extern asmlinkage void handle_msa_fpe(void);
85 extern asmlinkage void handle_fpe(void);
86 extern asmlinkage void handle_ftlb(void);
87 extern asmlinkage void handle_msa(void);
88 extern asmlinkage void handle_mdmx(void);
89 extern asmlinkage void handle_watch(void);
90 extern asmlinkage void handle_mt(void);
91 extern asmlinkage void handle_dsp(void);
92 extern asmlinkage void handle_mcheck(void);
93 extern asmlinkage void handle_reserved(void);
94 extern void tlb_do_page_fault_0(void);
96 void (*board_be_init)(void);
97 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
98 void (*board_nmi_handler_setup)(void);
99 void (*board_ejtag_handler_setup)(void);
100 void (*board_bind_eic_interrupt)(int irq, int regset);
101 void (*board_ebase_setup)(void);
102 void(*board_cache_error_setup)(void);
104 static void show_raw_backtrace(unsigned long reg29)
106 unsigned long *sp = (unsigned long *)(reg29 & ~3);
109 printk("Call Trace:");
110 #ifdef CONFIG_KALLSYMS
113 while (!kstack_end(sp)) {
114 unsigned long __user *p =
115 (unsigned long __user *)(unsigned long)sp++;
116 if (__get_user(addr, p)) {
117 printk(" (Bad stack address)");
120 if (__kernel_text_address(addr))
126 #ifdef CONFIG_KALLSYMS
128 static int __init set_raw_show_trace(char *str)
133 __setup("raw_show_trace", set_raw_show_trace);
136 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
138 unsigned long sp = regs->regs[29];
139 unsigned long ra = regs->regs[31];
140 unsigned long pc = regs->cp0_epc;
145 if (raw_show_trace || !__kernel_text_address(pc)) {
146 show_raw_backtrace(sp);
149 printk("Call Trace:\n");
152 pc = unwind_stack(task, &sp, pc, &ra);
158 * This routine abuses get_user()/put_user() to reference pointers
159 * with at least a bit of error checking ...
161 static void show_stacktrace(struct task_struct *task,
162 const struct pt_regs *regs)
164 const int field = 2 * sizeof(unsigned long);
167 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
171 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
172 if (i && ((i % (64 / field)) == 0))
179 if (__get_user(stackdata, sp++)) {
180 printk(" (Bad stack address)");
184 printk(" %0*lx", field, stackdata);
188 show_backtrace(task, regs);
191 void show_stack(struct task_struct *task, unsigned long *sp)
195 regs.regs[29] = (unsigned long)sp;
199 if (task && task != current) {
200 regs.regs[29] = task->thread.reg29;
202 regs.cp0_epc = task->thread.reg31;
203 #ifdef CONFIG_KGDB_KDB
204 } else if (atomic_read(&kgdb_active) != -1 &&
206 memcpy(®s, kdb_current_regs, sizeof(regs));
207 #endif /* CONFIG_KGDB_KDB */
209 prepare_frametrace(®s);
212 show_stacktrace(task, ®s);
215 static void show_code(unsigned int __user *pc)
218 unsigned short __user *pc16 = NULL;
222 if ((unsigned long)pc & 1)
223 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
224 for(i = -3 ; i < 6 ; i++) {
226 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
227 printk(" (Bad address in epc)\n");
230 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
234 static void __show_regs(const struct pt_regs *regs)
236 const int field = 2 * sizeof(unsigned long);
237 unsigned int cause = regs->cp0_cause;
240 show_regs_print_info(KERN_DEFAULT);
243 * Saved main processor registers
245 for (i = 0; i < 32; ) {
249 printk(" %0*lx", field, 0UL);
250 else if (i == 26 || i == 27)
251 printk(" %*s", field, "");
253 printk(" %0*lx", field, regs->regs[i]);
260 #ifdef CONFIG_CPU_HAS_SMARTMIPS
261 printk("Acx : %0*lx\n", field, regs->acx);
263 printk("Hi : %0*lx\n", field, regs->hi);
264 printk("Lo : %0*lx\n", field, regs->lo);
267 * Saved cp0 registers
269 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
270 (void *) regs->cp0_epc);
271 printk(" %s\n", print_tainted());
272 printk("ra : %0*lx %pS\n", field, regs->regs[31],
273 (void *) regs->regs[31]);
275 printk("Status: %08x ", (uint32_t) regs->cp0_status);
278 if (regs->cp0_status & ST0_KUO)
280 if (regs->cp0_status & ST0_IEO)
282 if (regs->cp0_status & ST0_KUP)
284 if (regs->cp0_status & ST0_IEP)
286 if (regs->cp0_status & ST0_KUC)
288 if (regs->cp0_status & ST0_IEC)
290 } else if (cpu_has_4kex) {
291 if (regs->cp0_status & ST0_KX)
293 if (regs->cp0_status & ST0_SX)
295 if (regs->cp0_status & ST0_UX)
297 switch (regs->cp0_status & ST0_KSU) {
302 printk("SUPERVISOR ");
311 if (regs->cp0_status & ST0_ERL)
313 if (regs->cp0_status & ST0_EXL)
315 if (regs->cp0_status & ST0_IE)
320 printk("Cause : %08x\n", cause);
322 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
323 if (1 <= cause && cause <= 5)
324 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
326 printk("PrId : %08x (%s)\n", read_c0_prid(),
331 * FIXME: really the generic show_regs should take a const pointer argument.
333 void show_regs(struct pt_regs *regs)
335 __show_regs((struct pt_regs *)regs);
338 void show_registers(struct pt_regs *regs)
340 const int field = 2 * sizeof(unsigned long);
341 mm_segment_t old_fs = get_fs();
345 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
346 current->comm, current->pid, current_thread_info(), current,
347 field, current_thread_info()->tp_value);
348 if (cpu_has_userlocal) {
351 tls = read_c0_userlocal();
352 if (tls != current_thread_info()->tp_value)
353 printk("*HwTLS: %0*lx\n", field, tls);
356 if (!user_mode(regs))
357 /* Necessary for getting the correct stack content */
359 show_stacktrace(current, regs);
360 show_code((unsigned int __user *) regs->cp0_epc);
365 static int regs_to_trapnr(struct pt_regs *regs)
367 return (regs->cp0_cause >> 2) & 0x1f;
370 static DEFINE_RAW_SPINLOCK(die_lock);
372 void __noreturn die(const char *str, struct pt_regs *regs)
374 static int die_counter;
379 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
380 SIGSEGV) == NOTIFY_STOP)
384 raw_spin_lock_irq(&die_lock);
387 printk("%s[#%d]:\n", str, ++die_counter);
388 show_registers(regs);
389 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
390 raw_spin_unlock_irq(&die_lock);
395 panic("Fatal exception in interrupt");
398 printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
400 panic("Fatal exception");
403 if (regs && kexec_should_crash(current))
409 extern struct exception_table_entry __start___dbe_table[];
410 extern struct exception_table_entry __stop___dbe_table[];
413 " .section __dbe_table, \"a\"\n"
416 /* Given an address, look for it in the exception tables. */
417 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
419 const struct exception_table_entry *e;
421 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
423 e = search_module_dbetables(addr);
427 asmlinkage void do_be(struct pt_regs *regs)
429 const int field = 2 * sizeof(unsigned long);
430 const struct exception_table_entry *fixup = NULL;
431 int data = regs->cp0_cause & 4;
432 int action = MIPS_BE_FATAL;
433 enum ctx_state prev_state;
435 prev_state = exception_enter();
436 /* XXX For now. Fixme, this searches the wrong table ... */
437 if (data && !user_mode(regs))
438 fixup = search_dbe_tables(exception_epc(regs));
441 action = MIPS_BE_FIXUP;
443 if (board_be_handler)
444 action = board_be_handler(regs, fixup != NULL);
447 case MIPS_BE_DISCARD:
451 regs->cp0_epc = fixup->nextinsn;
460 * Assume it would be too dangerous to continue ...
462 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
463 data ? "Data" : "Instruction",
464 field, regs->cp0_epc, field, regs->regs[31]);
465 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
466 SIGBUS) == NOTIFY_STOP)
469 die_if_kernel("Oops", regs);
470 force_sig(SIGBUS, current);
473 exception_exit(prev_state);
477 * ll/sc, rdhwr, sync emulation
480 #define OPCODE 0xfc000000
481 #define BASE 0x03e00000
482 #define RT 0x001f0000
483 #define OFFSET 0x0000ffff
484 #define LL 0xc0000000
485 #define SC 0xe0000000
486 #define SPEC0 0x00000000
487 #define SPEC3 0x7c000000
488 #define RD 0x0000f800
489 #define FUNC 0x0000003f
490 #define SYNC 0x0000000f
491 #define RDHWR 0x0000003b
493 /* microMIPS definitions */
494 #define MM_POOL32A_FUNC 0xfc00ffff
495 #define MM_RDHWR 0x00006b3c
496 #define MM_RS 0x001f0000
497 #define MM_RT 0x03e00000
500 * The ll_bit is cleared by r*_switch.S
504 struct task_struct *ll_task;
506 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
508 unsigned long value, __user *vaddr;
512 * analyse the ll instruction that just caused a ri exception
513 * and put the referenced address to addr.
516 /* sign extend offset */
517 offset = opcode & OFFSET;
521 vaddr = (unsigned long __user *)
522 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
524 if ((unsigned long)vaddr & 3)
526 if (get_user(value, vaddr))
531 if (ll_task == NULL || ll_task == current) {
540 regs->regs[(opcode & RT) >> 16] = value;
545 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
547 unsigned long __user *vaddr;
552 * analyse the sc instruction that just caused a ri exception
553 * and put the referenced address to addr.
556 /* sign extend offset */
557 offset = opcode & OFFSET;
561 vaddr = (unsigned long __user *)
562 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
563 reg = (opcode & RT) >> 16;
565 if ((unsigned long)vaddr & 3)
570 if (ll_bit == 0 || ll_task != current) {
578 if (put_user(regs->regs[reg], vaddr))
587 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
588 * opcodes are supposed to result in coprocessor unusable exceptions if
589 * executed on ll/sc-less processors. That's the theory. In practice a
590 * few processors such as NEC's VR4100 throw reserved instruction exceptions
591 * instead, so we're doing the emulation thing in both exception handlers.
593 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
595 if ((opcode & OPCODE) == LL) {
596 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
598 return simulate_ll(regs, opcode);
600 if ((opcode & OPCODE) == SC) {
601 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
603 return simulate_sc(regs, opcode);
606 return -1; /* Must be something else ... */
610 * Simulate trapping 'rdhwr' instructions to provide user accessible
611 * registers not implemented in hardware.
613 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
615 struct thread_info *ti = task_thread_info(current);
617 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
620 case 0: /* CPU number */
621 regs->regs[rt] = smp_processor_id();
623 case 1: /* SYNCI length */
624 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
625 current_cpu_data.icache.linesz);
627 case 2: /* Read count register */
628 regs->regs[rt] = read_c0_count();
630 case 3: /* Count register resolution */
631 switch (current_cpu_type()) {
641 regs->regs[rt] = ti->tp_value;
648 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
650 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
651 int rd = (opcode & RD) >> 11;
652 int rt = (opcode & RT) >> 16;
654 simulate_rdhwr(regs, rd, rt);
662 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
664 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
665 int rd = (opcode & MM_RS) >> 16;
666 int rt = (opcode & MM_RT) >> 21;
667 simulate_rdhwr(regs, rd, rt);
675 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
677 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
678 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
683 return -1; /* Must be something else ... */
686 asmlinkage void do_ov(struct pt_regs *regs)
688 enum ctx_state prev_state;
691 prev_state = exception_enter();
692 die_if_kernel("Integer overflow", regs);
694 info.si_code = FPE_INTOVF;
695 info.si_signo = SIGFPE;
697 info.si_addr = (void __user *) regs->cp0_epc;
698 force_sig_info(SIGFPE, &info, current);
699 exception_exit(prev_state);
702 int process_fpemu_return(int sig, void __user *fault_addr)
705 * We can't allow the emulated instruction to leave any of the cause
706 * bits set in FCSR. If they were then the kernel would take an FP
707 * exception when restoring FP context.
709 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
711 if (sig == SIGSEGV || sig == SIGBUS) {
712 struct siginfo si = {0};
713 si.si_addr = fault_addr;
715 if (sig == SIGSEGV) {
716 down_read(¤t->mm->mmap_sem);
717 if (find_vma(current->mm, (unsigned long)fault_addr))
718 si.si_code = SEGV_ACCERR;
720 si.si_code = SEGV_MAPERR;
721 up_read(¤t->mm->mmap_sem);
723 si.si_code = BUS_ADRERR;
725 force_sig_info(sig, &si, current);
728 force_sig(sig, current);
735 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
736 unsigned long old_epc, unsigned long old_ra)
738 union mips_instruction inst = { .word = opcode };
739 void __user *fault_addr = NULL;
742 /* If it's obviously not an FP instruction, skip it */
743 switch (inst.i_format.opcode) {
757 * do_ri skipped over the instruction via compute_return_epc, undo
758 * that for the FPU emulator.
760 regs->cp0_epc = old_epc;
761 regs->regs[31] = old_ra;
763 /* Save the FP context to struct thread_struct */
766 /* Run the emulator */
767 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
770 /* If something went wrong, signal */
771 process_fpemu_return(sig, fault_addr);
773 /* Restore the hardware register state */
780 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
782 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
784 enum ctx_state prev_state;
785 siginfo_t info = {0};
787 prev_state = exception_enter();
788 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
789 SIGFPE) == NOTIFY_STOP)
791 die_if_kernel("FP exception in kernel code", regs);
793 if (fcr31 & FPU_CSR_UNI_X) {
795 void __user *fault_addr = NULL;
798 * Unimplemented operation exception. If we've got the full
799 * software emulator on-board, let's use it...
801 * Force FPU to dump state into task/thread context. We're
802 * moving a lot of data here for what is probably a single
803 * instruction, but the alternative is to pre-decode the FP
804 * register operands before invoking the emulator, which seems
805 * a bit extreme for what should be an infrequent event.
807 /* Ensure 'resume' not overwrite saved fp context again. */
810 /* Run the emulator */
811 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
814 /* If something went wrong, signal */
815 process_fpemu_return(sig, fault_addr);
817 /* Restore the hardware register state */
818 own_fpu(1); /* Using the FPU again. */
821 } else if (fcr31 & FPU_CSR_INV_X)
822 info.si_code = FPE_FLTINV;
823 else if (fcr31 & FPU_CSR_DIV_X)
824 info.si_code = FPE_FLTDIV;
825 else if (fcr31 & FPU_CSR_OVF_X)
826 info.si_code = FPE_FLTOVF;
827 else if (fcr31 & FPU_CSR_UDF_X)
828 info.si_code = FPE_FLTUND;
829 else if (fcr31 & FPU_CSR_INE_X)
830 info.si_code = FPE_FLTRES;
832 info.si_code = __SI_FAULT;
833 info.si_signo = SIGFPE;
835 info.si_addr = (void __user *) regs->cp0_epc;
836 force_sig_info(SIGFPE, &info, current);
839 exception_exit(prev_state);
842 void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
848 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
849 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
851 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
853 if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
854 SIGTRAP) == NOTIFY_STOP)
858 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
859 * insns, even for trap and break codes that indicate arithmetic
860 * failures. Weird ...
861 * But should we continue the brokenness??? --macro
866 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
867 die_if_kernel(b, regs);
868 if (code == BRK_DIVZERO)
869 info.si_code = FPE_INTDIV;
871 info.si_code = FPE_INTOVF;
872 info.si_signo = SIGFPE;
874 info.si_addr = (void __user *) regs->cp0_epc;
875 force_sig_info(SIGFPE, &info, current);
878 die_if_kernel("Kernel bug detected", regs);
879 force_sig(SIGTRAP, current);
883 * Address errors may be deliberately induced by the FPU
884 * emulator to retake control of the CPU after executing the
885 * instruction in the delay slot of an emulated branch.
887 * Terminate if exception was recognized as a delay slot return
888 * otherwise handle as normal.
890 if (do_dsemulret(regs))
893 die_if_kernel("Math emu break/trap", regs);
894 force_sig(SIGTRAP, current);
897 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
898 die_if_kernel(b, regs);
899 force_sig(SIGTRAP, current);
903 asmlinkage void do_bp(struct pt_regs *regs)
905 unsigned int opcode, bcode;
906 enum ctx_state prev_state;
912 if (!user_mode(regs))
915 prev_state = exception_enter();
916 if (get_isa16_mode(regs->cp0_epc)) {
918 epc = exception_epc(regs);
920 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
921 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
923 opcode = (instr[0] << 16) | instr[1];
926 if (__get_user(instr[0],
927 (u16 __user *)msk_isa16_mode(epc)))
929 bcode = (instr[0] >> 6) & 0x3f;
930 do_trap_or_bp(regs, bcode, "Break");
934 if (__get_user(opcode,
935 (unsigned int __user *) exception_epc(regs)))
940 * There is the ancient bug in the MIPS assemblers that the break
941 * code starts left to bit 16 instead to bit 6 in the opcode.
942 * Gas is bug-compatible, but not always, grrr...
943 * We handle both cases with a simple heuristics. --macro
945 bcode = ((opcode >> 6) & ((1 << 20) - 1));
946 if (bcode >= (1 << 10))
950 * notify the kprobe handlers, if instruction is likely to
955 if (notify_die(DIE_BREAK, "debug", regs, bcode,
956 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
960 case BRK_KPROBE_SSTEPBP:
961 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
962 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
970 do_trap_or_bp(regs, bcode, "Break");
974 exception_exit(prev_state);
978 force_sig(SIGSEGV, current);
982 asmlinkage void do_tr(struct pt_regs *regs)
984 u32 opcode, tcode = 0;
985 enum ctx_state prev_state;
988 unsigned long epc = msk_isa16_mode(exception_epc(regs));
991 if (!user_mode(regs))
994 prev_state = exception_enter();
995 if (get_isa16_mode(regs->cp0_epc)) {
996 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
997 __get_user(instr[1], (u16 __user *)(epc + 2)))
999 opcode = (instr[0] << 16) | instr[1];
1000 /* Immediate versions don't provide a code. */
1001 if (!(opcode & OPCODE))
1002 tcode = (opcode >> 12) & ((1 << 4) - 1);
1004 if (__get_user(opcode, (u32 __user *)epc))
1006 /* Immediate versions don't provide a code. */
1007 if (!(opcode & OPCODE))
1008 tcode = (opcode >> 6) & ((1 << 10) - 1);
1011 do_trap_or_bp(regs, tcode, "Trap");
1015 exception_exit(prev_state);
1019 force_sig(SIGSEGV, current);
1023 asmlinkage void do_ri(struct pt_regs *regs)
1025 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1026 unsigned long old_epc = regs->cp0_epc;
1027 unsigned long old31 = regs->regs[31];
1028 enum ctx_state prev_state;
1029 unsigned int opcode = 0;
1033 * Avoid any kernel code. Just emulate the R2 instruction
1034 * as quickly as possible.
1036 if (mipsr2_emulation && cpu_has_mips_r6 &&
1037 likely(user_mode(regs))) {
1038 if (likely(get_user(opcode, epc) >= 0)) {
1039 status = mipsr2_decoder(regs, opcode);
1043 task_thread_info(current)->r2_emul_return = 1;
1048 process_fpemu_return(status,
1049 ¤t->thread.cp0_baduaddr);
1050 task_thread_info(current)->r2_emul_return = 1;
1058 prev_state = exception_enter();
1060 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
1061 SIGILL) == NOTIFY_STOP)
1064 die_if_kernel("Reserved instruction in kernel code", regs);
1066 if (unlikely(compute_return_epc(regs) < 0))
1069 if (get_isa16_mode(regs->cp0_epc)) {
1070 unsigned short mmop[2] = { 0 };
1072 if (unlikely(get_user(mmop[0], epc) < 0))
1074 if (unlikely(get_user(mmop[1], epc) < 0))
1076 opcode = (mmop[0] << 16) | mmop[1];
1079 status = simulate_rdhwr_mm(regs, opcode);
1081 if (unlikely(get_user(opcode, epc) < 0))
1084 if (!cpu_has_llsc && status < 0)
1085 status = simulate_llsc(regs, opcode);
1088 status = simulate_rdhwr_normal(regs, opcode);
1091 status = simulate_sync(regs, opcode);
1094 status = simulate_fp(regs, opcode, old_epc, old31);
1100 if (unlikely(status > 0)) {
1101 regs->cp0_epc = old_epc; /* Undo skip-over. */
1102 regs->regs[31] = old31;
1103 force_sig(status, current);
1107 exception_exit(prev_state);
1111 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1112 * emulated more than some threshold number of instructions, force migration to
1113 * a "CPU" that has FP support.
1115 static void mt_ase_fp_affinity(void)
1117 #ifdef CONFIG_MIPS_MT_FPAFF
1118 if (mt_fpemul_threshold > 0 &&
1119 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1121 * If there's no FPU present, or if the application has already
1122 * restricted the allowed set to exclude any CPUs with FPUs,
1123 * we'll skip the procedure.
1125 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
1128 current->thread.user_cpus_allowed
1129 = current->cpus_allowed;
1130 cpus_and(tmask, current->cpus_allowed,
1132 set_cpus_allowed_ptr(current, &tmask);
1133 set_thread_flag(TIF_FPUBOUND);
1136 #endif /* CONFIG_MIPS_MT_FPAFF */
1140 * No lock; only written during early bootup by CPU 0.
1142 static RAW_NOTIFIER_HEAD(cu2_chain);
1144 int __ref register_cu2_notifier(struct notifier_block *nb)
1146 return raw_notifier_chain_register(&cu2_chain, nb);
1149 int cu2_notifier_call_chain(unsigned long val, void *v)
1151 return raw_notifier_call_chain(&cu2_chain, val, v);
1154 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1157 struct pt_regs *regs = data;
1159 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1160 "instruction", regs);
1161 force_sig(SIGILL, current);
1166 static int wait_on_fp_mode_switch(atomic_t *p)
1169 * The FP mode for this task is currently being switched. That may
1170 * involve modifications to the format of this tasks FP context which
1171 * make it unsafe to proceed with execution for the moment. Instead,
1172 * schedule some other task.
1178 static int enable_restore_fp_context(int msa)
1180 int err, was_fpu_owner, prior_msa;
1183 * If an FP mode switch is currently underway, wait for it to
1184 * complete before proceeding.
1186 wait_on_atomic_t(¤t->mm->context.fp_mode_switching,
1187 wait_on_fp_mode_switch, TASK_KILLABLE);
1190 /* First time FP context user. */
1196 set_thread_flag(TIF_USEDMSA);
1197 set_thread_flag(TIF_MSA_CTX_LIVE);
1206 * This task has formerly used the FP context.
1208 * If this thread has no live MSA vector context then we can simply
1209 * restore the scalar FP context. If it has live MSA vector context
1210 * (that is, it has or may have used MSA since last performing a
1211 * function call) then we'll need to restore the vector context. This
1212 * applies even if we're currently only executing a scalar FP
1213 * instruction. This is because if we were to later execute an MSA
1214 * instruction then we'd either have to:
1216 * - Restore the vector context & clobber any registers modified by
1217 * scalar FP instructions between now & then.
1221 * - Not restore the vector context & lose the most significant bits
1222 * of all vector registers.
1224 * Neither of those options is acceptable. We cannot restore the least
1225 * significant bits of the registers now & only restore the most
1226 * significant bits later because the most significant bits of any
1227 * vector registers whose aliased FP register is modified now will have
1228 * been zeroed. We'd have no way to know that when restoring the vector
1229 * context & thus may load an outdated value for the most significant
1230 * bits of a vector register.
1232 if (!msa && !thread_msa_context_live())
1236 * This task is using or has previously used MSA. Thus we require
1237 * that Status.FR == 1.
1240 was_fpu_owner = is_fpu_owner();
1241 err = own_fpu_inatomic(0);
1246 write_msa_csr(current->thread.fpu.msacsr);
1247 set_thread_flag(TIF_USEDMSA);
1250 * If this is the first time that the task is using MSA and it has
1251 * previously used scalar FP in this time slice then we already nave
1252 * FP context which we shouldn't clobber. We do however need to clear
1253 * the upper 64b of each vector register so that this task has no
1254 * opportunity to see data left behind by another.
1256 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1257 if (!prior_msa && was_fpu_owner) {
1265 * Restore the least significant 64b of each vector register
1266 * from the existing scalar FP context.
1268 _restore_fp(current);
1271 * The task has not formerly used MSA, so clear the upper 64b
1272 * of each vector register such that it cannot see data left
1273 * behind by another task.
1277 /* We need to restore the vector context. */
1278 restore_msa(current);
1280 /* Restore the scalar FP control & status register */
1282 write_32bit_cp1_register(CP1_STATUS,
1283 current->thread.fpu.fcr31);
1292 asmlinkage void do_cpu(struct pt_regs *regs)
1294 enum ctx_state prev_state;
1295 unsigned int __user *epc;
1296 unsigned long old_epc, old31;
1297 unsigned int opcode;
1300 unsigned long __maybe_unused flags;
1302 prev_state = exception_enter();
1303 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1306 die_if_kernel("do_cpu invoked from kernel context!", regs);
1310 epc = (unsigned int __user *)exception_epc(regs);
1311 old_epc = regs->cp0_epc;
1312 old31 = regs->regs[31];
1316 if (unlikely(compute_return_epc(regs) < 0))
1319 if (get_isa16_mode(regs->cp0_epc)) {
1320 unsigned short mmop[2] = { 0 };
1322 if (unlikely(get_user(mmop[0], epc) < 0))
1324 if (unlikely(get_user(mmop[1], epc) < 0))
1326 opcode = (mmop[0] << 16) | mmop[1];
1329 status = simulate_rdhwr_mm(regs, opcode);
1331 if (unlikely(get_user(opcode, epc) < 0))
1334 if (!cpu_has_llsc && status < 0)
1335 status = simulate_llsc(regs, opcode);
1338 status = simulate_rdhwr_normal(regs, opcode);
1344 if (unlikely(status > 0)) {
1345 regs->cp0_epc = old_epc; /* Undo skip-over. */
1346 regs->regs[31] = old31;
1347 force_sig(status, current);
1354 * Old (MIPS I and MIPS II) processors will set this code
1355 * for COP1X opcode instructions that replaced the original
1356 * COP3 space. We don't limit COP1 space instructions in
1357 * the emulator according to the CPU ISA, so we want to
1358 * treat COP1X instructions consistently regardless of which
1359 * code the CPU chose. Therefore we redirect this trap to
1360 * the FP emulator too.
1362 * Then some newer FPU-less processors use this code
1363 * erroneously too, so they are covered by this choice
1366 if (raw_cpu_has_fpu)
1371 err = enable_restore_fp_context(0);
1373 if (!raw_cpu_has_fpu || err) {
1375 void __user *fault_addr = NULL;
1376 sig = fpu_emulator_cop1Handler(regs,
1377 ¤t->thread.fpu,
1379 if (!process_fpemu_return(sig, fault_addr) && !err)
1380 mt_ase_fp_affinity();
1386 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1390 force_sig(SIGILL, current);
1393 exception_exit(prev_state);
1396 asmlinkage void do_msa_fpe(struct pt_regs *regs)
1398 enum ctx_state prev_state;
1400 prev_state = exception_enter();
1401 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1402 force_sig(SIGFPE, current);
1403 exception_exit(prev_state);
1406 asmlinkage void do_msa(struct pt_regs *regs)
1408 enum ctx_state prev_state;
1411 prev_state = exception_enter();
1413 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1414 force_sig(SIGILL, current);
1418 die_if_kernel("do_msa invoked from kernel context!", regs);
1420 err = enable_restore_fp_context(1);
1422 force_sig(SIGILL, current);
1424 exception_exit(prev_state);
1427 asmlinkage void do_mdmx(struct pt_regs *regs)
1429 enum ctx_state prev_state;
1431 prev_state = exception_enter();
1432 force_sig(SIGILL, current);
1433 exception_exit(prev_state);
1437 * Called with interrupts disabled.
1439 asmlinkage void do_watch(struct pt_regs *regs)
1441 enum ctx_state prev_state;
1444 prev_state = exception_enter();
1446 * Clear WP (bit 22) bit of cause register so we don't loop
1449 cause = read_c0_cause();
1450 cause &= ~(1 << 22);
1451 write_c0_cause(cause);
1454 * If the current thread has the watch registers loaded, save
1455 * their values and send SIGTRAP. Otherwise another thread
1456 * left the registers set, clear them and continue.
1458 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1459 mips_read_watch_registers();
1461 force_sig(SIGTRAP, current);
1463 mips_clear_watch_registers();
1466 exception_exit(prev_state);
1469 asmlinkage void do_mcheck(struct pt_regs *regs)
1471 const int field = 2 * sizeof(unsigned long);
1472 int multi_match = regs->cp0_status & ST0_TS;
1473 enum ctx_state prev_state;
1475 prev_state = exception_enter();
1479 pr_err("Index : %0x\n", read_c0_index());
1480 pr_err("Pagemask: %0x\n", read_c0_pagemask());
1481 pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
1482 pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1483 pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1484 pr_err("Wired : %0x\n", read_c0_wired());
1485 pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
1487 pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
1488 pr_err("PWSize : %0*lx\n", field, read_c0_pwsize());
1489 pr_err("PWCtl : %0x\n", read_c0_pwctl());
1495 show_code((unsigned int __user *) regs->cp0_epc);
1498 * Some chips may have other causes of machine check (e.g. SB1
1501 panic("Caught Machine Check exception - %scaused by multiple "
1502 "matching entries in the TLB.",
1503 (multi_match) ? "" : "not ");
1506 asmlinkage void do_mt(struct pt_regs *regs)
1510 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1511 >> VPECONTROL_EXCPT_SHIFT;
1514 printk(KERN_DEBUG "Thread Underflow\n");
1517 printk(KERN_DEBUG "Thread Overflow\n");
1520 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1523 printk(KERN_DEBUG "Gating Storage Exception\n");
1526 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1529 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1532 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1536 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1538 force_sig(SIGILL, current);
1542 asmlinkage void do_dsp(struct pt_regs *regs)
1545 panic("Unexpected DSP exception");
1547 force_sig(SIGILL, current);
1550 asmlinkage void do_reserved(struct pt_regs *regs)
1553 * Game over - no way to handle this if it ever occurs. Most probably
1554 * caused by a new unknown cpu type or after another deadly
1555 * hard/software error.
1558 panic("Caught reserved exception %ld - should not happen.",
1559 (regs->cp0_cause & 0x7f) >> 2);
1562 static int __initdata l1parity = 1;
1563 static int __init nol1parity(char *s)
1568 __setup("nol1par", nol1parity);
1569 static int __initdata l2parity = 1;
1570 static int __init nol2parity(char *s)
1575 __setup("nol2par", nol2parity);
1578 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1579 * it different ways.
1581 static inline void parity_protection_init(void)
1583 switch (current_cpu_type()) {
1589 case CPU_INTERAPTIV:
1592 case CPU_QEMU_GENERIC:
1594 #define ERRCTL_PE 0x80000000
1595 #define ERRCTL_L2P 0x00800000
1596 unsigned long errctl;
1597 unsigned int l1parity_present, l2parity_present;
1599 errctl = read_c0_ecc();
1600 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1602 /* probe L1 parity support */
1603 write_c0_ecc(errctl | ERRCTL_PE);
1604 back_to_back_c0_hazard();
1605 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1607 /* probe L2 parity support */
1608 write_c0_ecc(errctl|ERRCTL_L2P);
1609 back_to_back_c0_hazard();
1610 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1612 if (l1parity_present && l2parity_present) {
1614 errctl |= ERRCTL_PE;
1615 if (l1parity ^ l2parity)
1616 errctl |= ERRCTL_L2P;
1617 } else if (l1parity_present) {
1619 errctl |= ERRCTL_PE;
1620 } else if (l2parity_present) {
1622 errctl |= ERRCTL_L2P;
1624 /* No parity available */
1627 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1629 write_c0_ecc(errctl);
1630 back_to_back_c0_hazard();
1631 errctl = read_c0_ecc();
1632 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1634 if (l1parity_present)
1635 printk(KERN_INFO "Cache parity protection %sabled\n",
1636 (errctl & ERRCTL_PE) ? "en" : "dis");
1638 if (l2parity_present) {
1639 if (l1parity_present && l1parity)
1640 errctl ^= ERRCTL_L2P;
1641 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1642 (errctl & ERRCTL_L2P) ? "en" : "dis");
1650 write_c0_ecc(0x80000000);
1651 back_to_back_c0_hazard();
1652 /* Set the PE bit (bit 31) in the c0_errctl register. */
1653 printk(KERN_INFO "Cache parity protection %sabled\n",
1654 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1658 /* Clear the DE bit (bit 16) in the c0_status register. */
1659 printk(KERN_INFO "Enable cache parity protection for "
1660 "MIPS 20KC/25KF CPUs.\n");
1661 clear_c0_status(ST0_DE);
1668 asmlinkage void cache_parity_error(void)
1670 const int field = 2 * sizeof(unsigned long);
1671 unsigned int reg_val;
1673 /* For the moment, report the problem and hang. */
1674 printk("Cache error exception:\n");
1675 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1676 reg_val = read_c0_cacheerr();
1677 printk("c0_cacheerr == %08x\n", reg_val);
1679 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1680 reg_val & (1<<30) ? "secondary" : "primary",
1681 reg_val & (1<<31) ? "data" : "insn");
1682 if ((cpu_has_mips_r2_r6) &&
1683 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1684 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1685 reg_val & (1<<29) ? "ED " : "",
1686 reg_val & (1<<28) ? "ET " : "",
1687 reg_val & (1<<27) ? "ES " : "",
1688 reg_val & (1<<26) ? "EE " : "",
1689 reg_val & (1<<25) ? "EB " : "",
1690 reg_val & (1<<24) ? "EI " : "",
1691 reg_val & (1<<23) ? "E1 " : "",
1692 reg_val & (1<<22) ? "E0 " : "");
1694 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1695 reg_val & (1<<29) ? "ED " : "",
1696 reg_val & (1<<28) ? "ET " : "",
1697 reg_val & (1<<26) ? "EE " : "",
1698 reg_val & (1<<25) ? "EB " : "",
1699 reg_val & (1<<24) ? "EI " : "",
1700 reg_val & (1<<23) ? "E1 " : "",
1701 reg_val & (1<<22) ? "E0 " : "");
1703 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1705 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1706 if (reg_val & (1<<22))
1707 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1709 if (reg_val & (1<<23))
1710 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1713 panic("Can't handle the cache error!");
1716 asmlinkage void do_ftlb(void)
1718 const int field = 2 * sizeof(unsigned long);
1719 unsigned int reg_val;
1721 /* For the moment, report the problem and hang. */
1722 if ((cpu_has_mips_r2_r6) &&
1723 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1724 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1726 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1727 reg_val = read_c0_cacheerr();
1728 pr_err("c0_cacheerr == %08x\n", reg_val);
1730 if ((reg_val & 0xc0000000) == 0xc0000000) {
1731 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1733 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1734 reg_val & (1<<30) ? "secondary" : "primary",
1735 reg_val & (1<<31) ? "data" : "insn");
1738 pr_err("FTLB error exception\n");
1740 /* Just print the cacheerr bits for now */
1741 cache_parity_error();
1745 * SDBBP EJTAG debug exception handler.
1746 * We skip the instruction and return to the next instruction.
1748 void ejtag_exception_handler(struct pt_regs *regs)
1750 const int field = 2 * sizeof(unsigned long);
1751 unsigned long depc, old_epc, old_ra;
1754 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1755 depc = read_c0_depc();
1756 debug = read_c0_debug();
1757 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1758 if (debug & 0x80000000) {
1760 * In branch delay slot.
1761 * We cheat a little bit here and use EPC to calculate the
1762 * debug return address (DEPC). EPC is restored after the
1765 old_epc = regs->cp0_epc;
1766 old_ra = regs->regs[31];
1767 regs->cp0_epc = depc;
1768 compute_return_epc(regs);
1769 depc = regs->cp0_epc;
1770 regs->cp0_epc = old_epc;
1771 regs->regs[31] = old_ra;
1774 write_c0_depc(depc);
1777 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1778 write_c0_debug(debug | 0x100);
1783 * NMI exception handler.
1784 * No lock; only written during early bootup by CPU 0.
1786 static RAW_NOTIFIER_HEAD(nmi_chain);
1788 int register_nmi_notifier(struct notifier_block *nb)
1790 return raw_notifier_chain_register(&nmi_chain, nb);
1793 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1797 raw_notifier_call_chain(&nmi_chain, 0, regs);
1799 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1800 smp_processor_id(), regs->cp0_epc);
1801 regs->cp0_epc = read_c0_errorepc();
1805 #define VECTORSPACING 0x100 /* for EI/VI mode */
1807 unsigned long ebase;
1808 unsigned long exception_handlers[32];
1809 unsigned long vi_handlers[64];
1811 void __init *set_except_vector(int n, void *addr)
1813 unsigned long handler = (unsigned long) addr;
1814 unsigned long old_handler;
1816 #ifdef CONFIG_CPU_MICROMIPS
1818 * Only the TLB handlers are cache aligned with an even
1819 * address. All other handlers are on an odd address and
1820 * require no modification. Otherwise, MIPS32 mode will
1821 * be entered when handling any TLB exceptions. That
1822 * would be bad...since we must stay in microMIPS mode.
1824 if (!(handler & 0x1))
1827 old_handler = xchg(&exception_handlers[n], handler);
1829 if (n == 0 && cpu_has_divec) {
1830 #ifdef CONFIG_CPU_MICROMIPS
1831 unsigned long jump_mask = ~((1 << 27) - 1);
1833 unsigned long jump_mask = ~((1 << 28) - 1);
1835 u32 *buf = (u32 *)(ebase + 0x200);
1836 unsigned int k0 = 26;
1837 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1838 uasm_i_j(&buf, handler & ~jump_mask);
1841 UASM_i_LA(&buf, k0, handler);
1842 uasm_i_jr(&buf, k0);
1845 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1847 return (void *)old_handler;
1850 static void do_default_vi(void)
1852 show_regs(get_irq_regs());
1853 panic("Caught unexpected vectored interrupt.");
1856 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1858 unsigned long handler;
1859 unsigned long old_handler = vi_handlers[n];
1860 int srssets = current_cpu_data.srsets;
1864 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1867 handler = (unsigned long) do_default_vi;
1870 handler = (unsigned long) addr;
1871 vi_handlers[n] = handler;
1873 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1876 panic("Shadow register set %d not supported", srs);
1879 if (board_bind_eic_interrupt)
1880 board_bind_eic_interrupt(n, srs);
1881 } else if (cpu_has_vint) {
1882 /* SRSMap is only defined if shadow sets are implemented */
1884 change_c0_srsmap(0xf << n*4, srs << n*4);
1889 * If no shadow set is selected then use the default handler
1890 * that does normal register saving and standard interrupt exit
1892 extern char except_vec_vi, except_vec_vi_lui;
1893 extern char except_vec_vi_ori, except_vec_vi_end;
1894 extern char rollback_except_vec_vi;
1895 char *vec_start = using_rollback_handler() ?
1896 &rollback_except_vec_vi : &except_vec_vi;
1897 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1898 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1899 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1901 const int lui_offset = &except_vec_vi_lui - vec_start;
1902 const int ori_offset = &except_vec_vi_ori - vec_start;
1904 const int handler_len = &except_vec_vi_end - vec_start;
1906 if (handler_len > VECTORSPACING) {
1908 * Sigh... panicing won't help as the console
1909 * is probably not configured :(
1911 panic("VECTORSPACING too small");
1914 set_handler(((unsigned long)b - ebase), vec_start,
1915 #ifdef CONFIG_CPU_MICROMIPS
1920 h = (u16 *)(b + lui_offset);
1921 *h = (handler >> 16) & 0xffff;
1922 h = (u16 *)(b + ori_offset);
1923 *h = (handler & 0xffff);
1924 local_flush_icache_range((unsigned long)b,
1925 (unsigned long)(b+handler_len));
1929 * In other cases jump directly to the interrupt handler. It
1930 * is the handler's responsibility to save registers if required
1931 * (eg hi/lo) and return from the exception using "eret".
1937 #ifdef CONFIG_CPU_MICROMIPS
1938 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1940 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1942 h[0] = (insn >> 16) & 0xffff;
1943 h[1] = insn & 0xffff;
1946 local_flush_icache_range((unsigned long)b,
1947 (unsigned long)(b+8));
1950 return (void *)old_handler;
1953 void *set_vi_handler(int n, vi_handler_t addr)
1955 return set_vi_srs_handler(n, addr, 0);
1958 extern void tlb_init(void);
1963 int cp0_compare_irq;
1964 EXPORT_SYMBOL_GPL(cp0_compare_irq);
1965 int cp0_compare_irq_shift;
1968 * Performance counter IRQ or -1 if shared with timer
1970 int cp0_perfcount_irq;
1971 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1975 static int __init ulri_disable(char *s)
1977 pr_info("Disabling ulri\n");
1982 __setup("noulri", ulri_disable);
1984 /* configure STATUS register */
1985 static void configure_status(void)
1988 * Disable coprocessors and select 32-bit or 64-bit addressing
1989 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1990 * flag that some firmware may have left set and the TS bit (for
1991 * IP27). Set XX for ISA IV code to work.
1993 unsigned int status_set = ST0_CU0;
1995 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1997 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
1998 status_set |= ST0_XX;
2000 status_set |= ST0_MX;
2002 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2006 /* configure HWRENA register */
2007 static void configure_hwrena(void)
2009 unsigned int hwrena = cpu_hwrena_impl_bits;
2011 if (cpu_has_mips_r2_r6)
2012 hwrena |= 0x0000000f;
2014 if (!noulri && cpu_has_userlocal)
2015 hwrena |= (1 << 29);
2018 write_c0_hwrena(hwrena);
2021 static void configure_exception_vector(void)
2023 if (cpu_has_veic || cpu_has_vint) {
2024 unsigned long sr = set_c0_status(ST0_BEV);
2025 write_c0_ebase(ebase);
2026 write_c0_status(sr);
2027 /* Setting vector spacing enables EI/VI mode */
2028 change_c0_intctl(0x3e0, VECTORSPACING);
2030 if (cpu_has_divec) {
2031 if (cpu_has_mipsmt) {
2032 unsigned int vpflags = dvpe();
2033 set_c0_cause(CAUSEF_IV);
2036 set_c0_cause(CAUSEF_IV);
2040 void per_cpu_trap_init(bool is_boot_cpu)
2042 unsigned int cpu = smp_processor_id();
2047 configure_exception_vector();
2050 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2052 * o read IntCtl.IPTI to determine the timer interrupt
2053 * o read IntCtl.IPPCI to determine the performance counter interrupt
2055 if (cpu_has_mips_r2_r6) {
2056 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2057 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2058 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2059 if (cp0_perfcount_irq == cp0_compare_irq)
2060 cp0_perfcount_irq = -1;
2062 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2063 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2064 cp0_perfcount_irq = -1;
2067 if (!cpu_data[cpu].asid_cache)
2068 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2070 atomic_inc(&init_mm.mm_count);
2071 current->active_mm = &init_mm;
2072 BUG_ON(current->mm);
2073 enter_lazy_tlb(&init_mm, current);
2075 /* Boot CPU's cache setup in setup_arch(). */
2079 TLBMISS_HANDLER_SETUP();
2082 /* Install CPU exception handler */
2083 void set_handler(unsigned long offset, void *addr, unsigned long size)
2085 #ifdef CONFIG_CPU_MICROMIPS
2086 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2088 memcpy((void *)(ebase + offset), addr, size);
2090 local_flush_icache_range(ebase + offset, ebase + offset + size);
2093 static char panic_null_cerr[] =
2094 "Trying to set NULL cache error exception handler";
2097 * Install uncached CPU exception handler.
2098 * This is suitable only for the cache error exception which is the only
2099 * exception handler that is being run uncached.
2101 void set_uncached_handler(unsigned long offset, void *addr,
2104 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2107 panic(panic_null_cerr);
2109 memcpy((void *)(uncached_ebase + offset), addr, size);
2112 static int __initdata rdhwr_noopt;
2113 static int __init set_rdhwr_noopt(char *str)
2119 __setup("rdhwr_noopt", set_rdhwr_noopt);
2121 void __init trap_init(void)
2123 extern char except_vec3_generic;
2124 extern char except_vec4;
2125 extern char except_vec3_r4000;
2130 #if defined(CONFIG_KGDB)
2131 if (kgdb_early_setup)
2132 return; /* Already done */
2135 if (cpu_has_veic || cpu_has_vint) {
2136 unsigned long size = 0x200 + VECTORSPACING*64;
2137 ebase = (unsigned long)
2138 __alloc_bootmem(size, 1 << fls(size), 0);
2140 #ifdef CONFIG_KVM_GUEST
2141 #define KVM_GUEST_KSEG0 0x40000000
2142 ebase = KVM_GUEST_KSEG0;
2146 if (cpu_has_mips_r2_r6)
2147 ebase += (read_c0_ebase() & 0x3ffff000);
2150 if (cpu_has_mmips) {
2151 unsigned int config3 = read_c0_config3();
2153 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2154 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2156 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2159 if (board_ebase_setup)
2160 board_ebase_setup();
2161 per_cpu_trap_init(true);
2164 * Copy the generic exception handlers to their final destination.
2165 * This will be overriden later as suitable for a particular
2168 set_handler(0x180, &except_vec3_generic, 0x80);
2171 * Setup default vectors
2173 for (i = 0; i <= 31; i++)
2174 set_except_vector(i, handle_reserved);
2177 * Copy the EJTAG debug exception vector handler code to it's final
2180 if (cpu_has_ejtag && board_ejtag_handler_setup)
2181 board_ejtag_handler_setup();
2184 * Only some CPUs have the watch exceptions.
2187 set_except_vector(23, handle_watch);
2190 * Initialise interrupt handlers
2192 if (cpu_has_veic || cpu_has_vint) {
2193 int nvec = cpu_has_veic ? 64 : 8;
2194 for (i = 0; i < nvec; i++)
2195 set_vi_handler(i, NULL);
2197 else if (cpu_has_divec)
2198 set_handler(0x200, &except_vec4, 0x8);
2201 * Some CPUs can enable/disable for cache parity detection, but does
2202 * it different ways.
2204 parity_protection_init();
2207 * The Data Bus Errors / Instruction Bus Errors are signaled
2208 * by external hardware. Therefore these two exceptions
2209 * may have board specific handlers.
2214 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2216 set_except_vector(1, handle_tlbm);
2217 set_except_vector(2, handle_tlbl);
2218 set_except_vector(3, handle_tlbs);
2220 set_except_vector(4, handle_adel);
2221 set_except_vector(5, handle_ades);
2223 set_except_vector(6, handle_ibe);
2224 set_except_vector(7, handle_dbe);
2226 set_except_vector(8, handle_sys);
2227 set_except_vector(9, handle_bp);
2228 set_except_vector(10, rdhwr_noopt ? handle_ri :
2229 (cpu_has_vtag_icache ?
2230 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2231 set_except_vector(11, handle_cpu);
2232 set_except_vector(12, handle_ov);
2233 set_except_vector(13, handle_tr);
2234 set_except_vector(14, handle_msa_fpe);
2236 if (current_cpu_type() == CPU_R6000 ||
2237 current_cpu_type() == CPU_R6000A) {
2239 * The R6000 is the only R-series CPU that features a machine
2240 * check exception (similar to the R4000 cache error) and
2241 * unaligned ldc1/sdc1 exception. The handlers have not been
2242 * written yet. Well, anyway there is no R6000 machine on the
2243 * current list of targets for Linux/MIPS.
2244 * (Duh, crap, there is someone with a triple R6k machine)
2246 //set_except_vector(14, handle_mc);
2247 //set_except_vector(15, handle_ndc);
2251 if (board_nmi_handler_setup)
2252 board_nmi_handler_setup();
2254 if (cpu_has_fpu && !cpu_has_nofpuex)
2255 set_except_vector(15, handle_fpe);
2257 set_except_vector(16, handle_ftlb);
2259 if (cpu_has_rixiex) {
2260 set_except_vector(19, tlb_do_page_fault_0);
2261 set_except_vector(20, tlb_do_page_fault_0);
2264 set_except_vector(21, handle_msa);
2265 set_except_vector(22, handle_mdmx);
2268 set_except_vector(24, handle_mcheck);
2271 set_except_vector(25, handle_mt);
2273 set_except_vector(26, handle_dsp);
2275 if (board_cache_error_setup)
2276 board_cache_error_setup();
2279 /* Special exception: R4[04]00 uses also the divec space. */
2280 set_handler(0x180, &except_vec3_r4000, 0x100);
2281 else if (cpu_has_4kex)
2282 set_handler(0x180, &except_vec3_generic, 0x80);
2284 set_handler(0x080, &except_vec3_generic, 0x80);
2286 local_flush_icache_range(ebase, ebase + 0x400);
2288 sort_extable(__start___dbe_table, __stop___dbe_table);
2290 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
2293 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2297 case CPU_PM_ENTER_FAILED:
2301 configure_exception_vector();
2303 /* Restore register with CPU number for TLB handlers */
2304 TLBMISS_HANDLER_RESTORE();
2312 static struct notifier_block trap_pm_notifier_block = {
2313 .notifier_call = trap_pm_notifier,
2316 static int __init trap_pm_init(void)
2318 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2320 arch_initcall(trap_pm_init);