2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/ptrace.h>
25 #include <linux/regset.h>
26 #include <linux/smp.h>
27 #include <linux/security.h>
28 #include <linux/tracehook.h>
29 #include <linux/audit.h>
30 #include <linux/seccomp.h>
31 #include <linux/ftrace.h>
33 #include <asm/byteorder.h>
37 #include <asm/mipsregs.h>
38 #include <asm/mipsmtregs.h>
39 #include <asm/pgtable.h>
41 #include <asm/syscall.h>
42 #include <asm/uaccess.h>
43 #include <asm/bootinfo.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/syscalls.h>
50 * Called by kernel/ptrace.c when detaching..
52 * Make sure single step bits etc are not set.
54 void ptrace_disable(struct task_struct *child)
56 /* Don't load the watchpoint registers for the ex-child. */
57 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
61 * Read a general register set. We always use the 64-bit format, even
62 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
63 * Registers are sign extended to fill the available space.
65 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
70 if (!access_ok(VERIFY_WRITE, data, 38 * 8))
73 regs = task_pt_regs(child);
75 for (i = 0; i < 32; i++)
76 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
77 __put_user((long)regs->lo, (__s64 __user *)&data->lo);
78 __put_user((long)regs->hi, (__s64 __user *)&data->hi);
79 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
80 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
81 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
82 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
88 * Write a general register set. As for PTRACE_GETREGS, we always use
89 * the 64-bit format. On a 32-bit kernel only the lower order half
90 * (according to endianness) will be used.
92 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
97 if (!access_ok(VERIFY_READ, data, 38 * 8))
100 regs = task_pt_regs(child);
102 for (i = 0; i < 32; i++)
103 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
104 __get_user(regs->lo, (__s64 __user *)&data->lo);
105 __get_user(regs->hi, (__s64 __user *)&data->hi);
106 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
108 /* badvaddr, status, and cause may not be written. */
113 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
117 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
120 if (tsk_used_math(child)) {
121 union fpureg *fregs = get_fpu_regs(child);
122 for (i = 0; i < 32; i++)
123 __put_user(get_fpr64(&fregs[i], 0),
124 i + (__u64 __user *)data);
126 for (i = 0; i < 32; i++)
127 __put_user((__u64) -1, i + (__u64 __user *) data);
130 __put_user(child->thread.fpu.fcr31, data + 64);
131 __put_user(boot_cpu_data.fpu_id, data + 65);
136 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
142 if (!access_ok(VERIFY_READ, data, 33 * 8))
145 fregs = get_fpu_regs(child);
147 for (i = 0; i < 32; i++) {
148 __get_user(fpr_val, i + (__u64 __user *)data);
149 set_fpr64(&fregs[i], 0, fpr_val);
152 __get_user(child->thread.fpu.fcr31, data + 64);
154 /* FIR may not be written. */
159 int ptrace_get_watch_regs(struct task_struct *child,
160 struct pt_watch_regs __user *addr)
162 enum pt_watch_style style;
165 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
167 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
171 style = pt_watch_style_mips32;
172 #define WATCH_STYLE mips32
174 style = pt_watch_style_mips64;
175 #define WATCH_STYLE mips64
178 __put_user(style, &addr->style);
179 __put_user(boot_cpu_data.watch_reg_use_cnt,
180 &addr->WATCH_STYLE.num_valid);
181 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
182 __put_user(child->thread.watch.mips3264.watchlo[i],
183 &addr->WATCH_STYLE.watchlo[i]);
184 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
185 &addr->WATCH_STYLE.watchhi[i]);
186 __put_user(boot_cpu_data.watch_reg_masks[i],
187 &addr->WATCH_STYLE.watch_masks[i]);
190 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
191 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
192 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
198 int ptrace_set_watch_regs(struct task_struct *child,
199 struct pt_watch_regs __user *addr)
202 int watch_active = 0;
203 unsigned long lt[NUM_WATCH_REGS];
204 u16 ht[NUM_WATCH_REGS];
206 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
208 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
210 /* Check the values. */
211 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
212 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
214 if (lt[i] & __UA_LIMIT)
217 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
218 if (lt[i] & 0xffffffff80000000UL)
221 if (lt[i] & __UA_LIMIT)
225 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
230 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
233 child->thread.watch.mips3264.watchlo[i] = lt[i];
235 child->thread.watch.mips3264.watchhi[i] = ht[i];
239 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
241 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
246 /* regset get/set implementations */
248 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
250 static int gpr32_get(struct task_struct *target,
251 const struct user_regset *regset,
252 unsigned int pos, unsigned int count,
253 void *kbuf, void __user *ubuf)
255 struct pt_regs *regs = task_pt_regs(target);
256 u32 uregs[ELF_NGREG] = {};
259 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
260 /* k0/k1 are copied as zero. */
261 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
264 uregs[i] = regs->regs[i - MIPS32_EF_R0];
267 uregs[MIPS32_EF_LO] = regs->lo;
268 uregs[MIPS32_EF_HI] = regs->hi;
269 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
270 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
271 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
272 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
274 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
278 static int gpr32_set(struct task_struct *target,
279 const struct user_regset *regset,
280 unsigned int pos, unsigned int count,
281 const void *kbuf, const void __user *ubuf)
283 struct pt_regs *regs = task_pt_regs(target);
284 u32 uregs[ELF_NGREG];
285 unsigned start, num_regs, i;
288 start = pos / sizeof(u32);
289 num_regs = count / sizeof(u32);
291 if (start + num_regs > ELF_NGREG)
294 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
299 for (i = start; i < num_regs; i++) {
301 * Cast all values to signed here so that if this is a 64-bit
302 * kernel, the supplied 32-bit values will be sign extended.
305 case MIPS32_EF_R1 ... MIPS32_EF_R25:
306 /* k0/k1 are ignored. */
307 case MIPS32_EF_R28 ... MIPS32_EF_R31:
308 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
311 regs->lo = (s32)uregs[i];
314 regs->hi = (s32)uregs[i];
316 case MIPS32_EF_CP0_EPC:
317 regs->cp0_epc = (s32)uregs[i];
325 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
329 static int gpr64_get(struct task_struct *target,
330 const struct user_regset *regset,
331 unsigned int pos, unsigned int count,
332 void *kbuf, void __user *ubuf)
334 struct pt_regs *regs = task_pt_regs(target);
335 u64 uregs[ELF_NGREG] = {};
338 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
339 /* k0/k1 are copied as zero. */
340 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
343 uregs[i] = regs->regs[i - MIPS64_EF_R0];
346 uregs[MIPS64_EF_LO] = regs->lo;
347 uregs[MIPS64_EF_HI] = regs->hi;
348 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
349 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
350 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
351 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
353 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
357 static int gpr64_set(struct task_struct *target,
358 const struct user_regset *regset,
359 unsigned int pos, unsigned int count,
360 const void *kbuf, const void __user *ubuf)
362 struct pt_regs *regs = task_pt_regs(target);
363 u64 uregs[ELF_NGREG];
364 unsigned start, num_regs, i;
367 start = pos / sizeof(u64);
368 num_regs = count / sizeof(u64);
370 if (start + num_regs > ELF_NGREG)
373 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
378 for (i = start; i < num_regs; i++) {
380 case MIPS64_EF_R1 ... MIPS64_EF_R25:
381 /* k0/k1 are ignored. */
382 case MIPS64_EF_R28 ... MIPS64_EF_R31:
383 regs->regs[i - MIPS64_EF_R0] = uregs[i];
391 case MIPS64_EF_CP0_EPC:
392 regs->cp0_epc = uregs[i];
400 #endif /* CONFIG_64BIT */
402 static int fpr_get(struct task_struct *target,
403 const struct user_regset *regset,
404 unsigned int pos, unsigned int count,
405 void *kbuf, void __user *ubuf)
413 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
414 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
416 0, sizeof(elf_fpregset_t));
418 for (i = 0; i < NUM_FPU_REGS; i++) {
419 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
420 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
421 &fpr_val, i * sizeof(elf_fpreg_t),
422 (i + 1) * sizeof(elf_fpreg_t));
430 static int fpr_set(struct task_struct *target,
431 const struct user_regset *regset,
432 unsigned int pos, unsigned int count,
433 const void *kbuf, const void __user *ubuf)
441 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
442 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
444 0, sizeof(elf_fpregset_t));
446 for (i = 0; i < NUM_FPU_REGS; i++) {
447 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
448 &fpr_val, i * sizeof(elf_fpreg_t),
449 (i + 1) * sizeof(elf_fpreg_t));
452 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
463 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
465 static const struct user_regset mips_regsets[] = {
467 .core_note_type = NT_PRSTATUS,
469 .size = sizeof(unsigned int),
470 .align = sizeof(unsigned int),
475 .core_note_type = NT_PRFPREG,
477 .size = sizeof(elf_fpreg_t),
478 .align = sizeof(elf_fpreg_t),
484 static const struct user_regset_view user_mips_view = {
486 .e_machine = ELF_ARCH,
487 .ei_osabi = ELF_OSABI,
488 .regsets = mips_regsets,
489 .n = ARRAY_SIZE(mips_regsets),
492 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
496 static const struct user_regset mips64_regsets[] = {
498 .core_note_type = NT_PRSTATUS,
500 .size = sizeof(unsigned long),
501 .align = sizeof(unsigned long),
506 .core_note_type = NT_PRFPREG,
508 .size = sizeof(elf_fpreg_t),
509 .align = sizeof(elf_fpreg_t),
515 static const struct user_regset_view user_mips64_view = {
517 .e_machine = ELF_ARCH,
518 .ei_osabi = ELF_OSABI,
519 .regsets = mips64_regsets,
520 .n = ARRAY_SIZE(mips64_regsets),
523 #endif /* CONFIG_64BIT */
525 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
528 return &user_mips_view;
530 #ifdef CONFIG_MIPS32_O32
531 if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
532 return &user_mips_view;
534 return &user_mips64_view;
538 long arch_ptrace(struct task_struct *child, long request,
539 unsigned long addr, unsigned long data)
542 void __user *addrp = (void __user *) addr;
543 void __user *datavp = (void __user *) data;
544 unsigned long __user *datalp = (void __user *) data;
547 /* when I and D space are separate, these will need to be fixed. */
548 case PTRACE_PEEKTEXT: /* read word at location addr. */
549 case PTRACE_PEEKDATA:
550 ret = generic_ptrace_peekdata(child, addr, data);
553 /* Read the word at location addr in the USER area. */
554 case PTRACE_PEEKUSR: {
555 struct pt_regs *regs;
557 unsigned long tmp = 0;
559 regs = task_pt_regs(child);
560 ret = 0; /* Default return value. */
564 tmp = regs->regs[addr];
566 case FPR_BASE ... FPR_BASE + 31:
567 if (!tsk_used_math(child)) {
568 /* FP not yet used */
572 fregs = get_fpu_regs(child);
575 if (test_thread_flag(TIF_32BIT_FPREGS)) {
577 * The odd registers are actually the high
578 * order bits of the values stored in the even
579 * registers - unless we're using r2k_switch.S.
581 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
586 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
592 tmp = regs->cp0_cause;
595 tmp = regs->cp0_badvaddr;
603 #ifdef CONFIG_CPU_HAS_SMARTMIPS
609 tmp = child->thread.fpu.fcr31;
612 /* implementation / version register */
613 tmp = boot_cpu_data.fpu_id;
615 case DSP_BASE ... DSP_BASE + 5: {
623 dregs = __get_dsp_regs(child);
624 tmp = (unsigned long) (dregs[addr - DSP_BASE]);
633 tmp = child->thread.dsp.dspcontrol;
640 ret = put_user(tmp, datalp);
644 /* when I and D space are separate, this will have to be fixed. */
645 case PTRACE_POKETEXT: /* write the word at location addr. */
646 case PTRACE_POKEDATA:
647 ret = generic_ptrace_pokedata(child, addr, data);
650 case PTRACE_POKEUSR: {
651 struct pt_regs *regs;
653 regs = task_pt_regs(child);
657 regs->regs[addr] = data;
659 case FPR_BASE ... FPR_BASE + 31: {
660 union fpureg *fregs = get_fpu_regs(child);
662 if (!tsk_used_math(child)) {
663 /* FP not yet used */
664 memset(&child->thread.fpu, ~0,
665 sizeof(child->thread.fpu));
666 child->thread.fpu.fcr31 = 0;
669 if (test_thread_flag(TIF_32BIT_FPREGS)) {
671 * The odd registers are actually the high
672 * order bits of the values stored in the even
673 * registers - unless we're using r2k_switch.S.
675 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
680 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
684 regs->cp0_epc = data;
692 #ifdef CONFIG_CPU_HAS_SMARTMIPS
698 child->thread.fpu.fcr31 = data;
700 case DSP_BASE ... DSP_BASE + 5: {
708 dregs = __get_dsp_regs(child);
709 dregs[addr - DSP_BASE] = data;
717 child->thread.dsp.dspcontrol = data;
720 /* The rest are not allowed. */
728 ret = ptrace_getregs(child, datavp);
732 ret = ptrace_setregs(child, datavp);
735 case PTRACE_GETFPREGS:
736 ret = ptrace_getfpregs(child, datavp);
739 case PTRACE_SETFPREGS:
740 ret = ptrace_setfpregs(child, datavp);
743 case PTRACE_GET_THREAD_AREA:
744 ret = put_user(task_thread_info(child)->tp_value, datalp);
747 case PTRACE_GET_WATCH_REGS:
748 ret = ptrace_get_watch_regs(child, addrp);
751 case PTRACE_SET_WATCH_REGS:
752 ret = ptrace_set_watch_regs(child, addrp);
756 ret = ptrace_request(child, request, addr, data);
764 * Notification of system call entry/exit
765 * - triggered by current->work.syscall_trace
767 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
772 if (secure_computing(syscall) == -1)
775 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
776 tracehook_report_syscall_entry(regs))
779 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
780 trace_sys_enter(regs, regs->regs[2]);
782 audit_syscall_entry(syscall_get_arch(),
784 regs->regs[4], regs->regs[5],
785 regs->regs[6], regs->regs[7]);
790 * Notification of system call entry/exit
791 * - triggered by current->work.syscall_trace
793 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
796 * We may come here right after calling schedule_user()
797 * or do_notify_resume(), in which case we can be in RCU
802 audit_syscall_exit(regs);
804 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
805 trace_sys_exit(regs, regs->regs[2]);
807 if (test_thread_flag(TIF_SYSCALL_TRACE))
808 tracehook_report_syscall_exit(regs, 0);