[PATCH] close_files(): add scheduling point
[linux-drm-fsl-dcu.git] / kernel / exit.c
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp_lock.h>
11 #include <linux/module.h>
12 #include <linux/capability.h>
13 #include <linux/completion.h>
14 #include <linux/personality.h>
15 #include <linux/tty.h>
16 #include <linux/mnt_namespace.h>
17 #include <linux/key.h>
18 #include <linux/security.h>
19 #include <linux/cpu.h>
20 #include <linux/acct.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/file.h>
23 #include <linux/binfmts.h>
24 #include <linux/nsproxy.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/ptrace.h>
27 #include <linux/profile.h>
28 #include <linux/mount.h>
29 #include <linux/proc_fs.h>
30 #include <linux/mempolicy.h>
31 #include <linux/taskstats_kern.h>
32 #include <linux/delayacct.h>
33 #include <linux/cpuset.h>
34 #include <linux/syscalls.h>
35 #include <linux/signal.h>
36 #include <linux/posix-timers.h>
37 #include <linux/cn_proc.h>
38 #include <linux/mutex.h>
39 #include <linux/futex.h>
40 #include <linux/compat.h>
41 #include <linux/pipe_fs_i.h>
42 #include <linux/audit.h> /* for audit_free() */
43 #include <linux/resource.h>
44 #include <linux/blkdev.h>
45
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/pgtable.h>
49 #include <asm/mmu_context.h>
50
51 extern void sem_exit (void);
52
53 static void exit_mm(struct task_struct * tsk);
54
55 static void __unhash_process(struct task_struct *p)
56 {
57         nr_threads--;
58         detach_pid(p, PIDTYPE_PID);
59         if (thread_group_leader(p)) {
60                 detach_pid(p, PIDTYPE_PGID);
61                 detach_pid(p, PIDTYPE_SID);
62
63                 list_del_rcu(&p->tasks);
64                 __get_cpu_var(process_counts)--;
65         }
66         list_del_rcu(&p->thread_group);
67         remove_parent(p);
68 }
69
70 /*
71  * This function expects the tasklist_lock write-locked.
72  */
73 static void __exit_signal(struct task_struct *tsk)
74 {
75         struct signal_struct *sig = tsk->signal;
76         struct sighand_struct *sighand;
77
78         BUG_ON(!sig);
79         BUG_ON(!atomic_read(&sig->count));
80
81         rcu_read_lock();
82         sighand = rcu_dereference(tsk->sighand);
83         spin_lock(&sighand->siglock);
84
85         posix_cpu_timers_exit(tsk);
86         if (atomic_dec_and_test(&sig->count))
87                 posix_cpu_timers_exit_group(tsk);
88         else {
89                 /*
90                  * If there is any task waiting for the group exit
91                  * then notify it:
92                  */
93                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
94                         wake_up_process(sig->group_exit_task);
95                         sig->group_exit_task = NULL;
96                 }
97                 if (tsk == sig->curr_target)
98                         sig->curr_target = next_thread(tsk);
99                 /*
100                  * Accumulate here the counters for all threads but the
101                  * group leader as they die, so they can be added into
102                  * the process-wide totals when those are taken.
103                  * The group leader stays around as a zombie as long
104                  * as there are other threads.  When it gets reaped,
105                  * the exit.c code will add its counts into these totals.
106                  * We won't ever get here for the group leader, since it
107                  * will have been the last reference on the signal_struct.
108                  */
109                 sig->utime = cputime_add(sig->utime, tsk->utime);
110                 sig->stime = cputime_add(sig->stime, tsk->stime);
111                 sig->min_flt += tsk->min_flt;
112                 sig->maj_flt += tsk->maj_flt;
113                 sig->nvcsw += tsk->nvcsw;
114                 sig->nivcsw += tsk->nivcsw;
115                 sig->sched_time += tsk->sched_time;
116                 sig = NULL; /* Marker for below. */
117         }
118
119         __unhash_process(tsk);
120
121         tsk->signal = NULL;
122         tsk->sighand = NULL;
123         spin_unlock(&sighand->siglock);
124         rcu_read_unlock();
125
126         __cleanup_sighand(sighand);
127         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
128         flush_sigqueue(&tsk->pending);
129         if (sig) {
130                 flush_sigqueue(&sig->shared_pending);
131                 taskstats_tgid_free(sig);
132                 __cleanup_signal(sig);
133         }
134 }
135
136 static void delayed_put_task_struct(struct rcu_head *rhp)
137 {
138         put_task_struct(container_of(rhp, struct task_struct, rcu));
139 }
140
141 void release_task(struct task_struct * p)
142 {
143         struct task_struct *leader;
144         int zap_leader;
145 repeat:
146         atomic_dec(&p->user->processes);
147         write_lock_irq(&tasklist_lock);
148         ptrace_unlink(p);
149         BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
150         __exit_signal(p);
151
152         /*
153          * If we are the last non-leader member of the thread
154          * group, and the leader is zombie, then notify the
155          * group leader's parent process. (if it wants notification.)
156          */
157         zap_leader = 0;
158         leader = p->group_leader;
159         if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
160                 BUG_ON(leader->exit_signal == -1);
161                 do_notify_parent(leader, leader->exit_signal);
162                 /*
163                  * If we were the last child thread and the leader has
164                  * exited already, and the leader's parent ignores SIGCHLD,
165                  * then we are the one who should release the leader.
166                  *
167                  * do_notify_parent() will have marked it self-reaping in
168                  * that case.
169                  */
170                 zap_leader = (leader->exit_signal == -1);
171         }
172
173         sched_exit(p);
174         write_unlock_irq(&tasklist_lock);
175         proc_flush_task(p);
176         release_thread(p);
177         call_rcu(&p->rcu, delayed_put_task_struct);
178
179         p = leader;
180         if (unlikely(zap_leader))
181                 goto repeat;
182 }
183
184 /*
185  * This checks not only the pgrp, but falls back on the pid if no
186  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
187  * without this...
188  */
189 int session_of_pgrp(int pgrp)
190 {
191         struct task_struct *p;
192         int sid = 0;
193
194         read_lock(&tasklist_lock);
195
196         p = find_task_by_pid_type(PIDTYPE_PGID, pgrp);
197         if (p == NULL)
198                 p = find_task_by_pid(pgrp);
199         if (p != NULL)
200                 sid = process_session(p);
201
202         read_unlock(&tasklist_lock);
203
204         return sid;
205 }
206
207 /*
208  * Determine if a process group is "orphaned", according to the POSIX
209  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
210  * by terminal-generated stop signals.  Newly orphaned process groups are
211  * to receive a SIGHUP and a SIGCONT.
212  *
213  * "I ask you, have you ever known what it is to be an orphan?"
214  */
215 static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
216 {
217         struct task_struct *p;
218         int ret = 1;
219
220         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
221                 if (p == ignored_task
222                                 || p->exit_state
223                                 || is_init(p->real_parent))
224                         continue;
225                 if (process_group(p->real_parent) != pgrp &&
226                     process_session(p->real_parent) == process_session(p)) {
227                         ret = 0;
228                         break;
229                 }
230         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
231         return ret;     /* (sighing) "Often!" */
232 }
233
234 int is_orphaned_pgrp(int pgrp)
235 {
236         int retval;
237
238         read_lock(&tasklist_lock);
239         retval = will_become_orphaned_pgrp(pgrp, NULL);
240         read_unlock(&tasklist_lock);
241
242         return retval;
243 }
244
245 static int has_stopped_jobs(int pgrp)
246 {
247         int retval = 0;
248         struct task_struct *p;
249
250         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
251                 if (p->state != TASK_STOPPED)
252                         continue;
253                 retval = 1;
254                 break;
255         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
256         return retval;
257 }
258
259 /**
260  * reparent_to_init - Reparent the calling kernel thread to the init task of the pid space that the thread belongs to.
261  *
262  * If a kernel thread is launched as a result of a system call, or if
263  * it ever exits, it should generally reparent itself to init so that
264  * it is correctly cleaned up on exit.
265  *
266  * The various task state such as scheduling policy and priority may have
267  * been inherited from a user process, so we reset them to sane values here.
268  *
269  * NOTE that reparent_to_init() gives the caller full capabilities.
270  */
271 static void reparent_to_init(void)
272 {
273         write_lock_irq(&tasklist_lock);
274
275         ptrace_unlink(current);
276         /* Reparent to init */
277         remove_parent(current);
278         current->parent = child_reaper(current);
279         current->real_parent = child_reaper(current);
280         add_parent(current);
281
282         /* Set the exit signal to SIGCHLD so we signal init on exit */
283         current->exit_signal = SIGCHLD;
284
285         if (!has_rt_policy(current) && (task_nice(current) < 0))
286                 set_user_nice(current, 0);
287         /* cpus_allowed? */
288         /* rt_priority? */
289         /* signals? */
290         security_task_reparent_to_init(current);
291         memcpy(current->signal->rlim, init_task.signal->rlim,
292                sizeof(current->signal->rlim));
293         atomic_inc(&(INIT_USER->__count));
294         write_unlock_irq(&tasklist_lock);
295         switch_uid(INIT_USER);
296 }
297
298 void __set_special_pids(pid_t session, pid_t pgrp)
299 {
300         struct task_struct *curr = current->group_leader;
301
302         if (process_session(curr) != session) {
303                 detach_pid(curr, PIDTYPE_SID);
304                 set_signal_session(curr->signal, session);
305                 attach_pid(curr, PIDTYPE_SID, session);
306         }
307         if (process_group(curr) != pgrp) {
308                 detach_pid(curr, PIDTYPE_PGID);
309                 curr->signal->pgrp = pgrp;
310                 attach_pid(curr, PIDTYPE_PGID, pgrp);
311         }
312 }
313
314 static void set_special_pids(pid_t session, pid_t pgrp)
315 {
316         write_lock_irq(&tasklist_lock);
317         __set_special_pids(session, pgrp);
318         write_unlock_irq(&tasklist_lock);
319 }
320
321 /*
322  * Let kernel threads use this to say that they
323  * allow a certain signal (since daemonize() will
324  * have disabled all of them by default).
325  */
326 int allow_signal(int sig)
327 {
328         if (!valid_signal(sig) || sig < 1)
329                 return -EINVAL;
330
331         spin_lock_irq(&current->sighand->siglock);
332         sigdelset(&current->blocked, sig);
333         if (!current->mm) {
334                 /* Kernel threads handle their own signals.
335                    Let the signal code know it'll be handled, so
336                    that they don't get converted to SIGKILL or
337                    just silently dropped */
338                 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
339         }
340         recalc_sigpending();
341         spin_unlock_irq(&current->sighand->siglock);
342         return 0;
343 }
344
345 EXPORT_SYMBOL(allow_signal);
346
347 int disallow_signal(int sig)
348 {
349         if (!valid_signal(sig) || sig < 1)
350                 return -EINVAL;
351
352         spin_lock_irq(&current->sighand->siglock);
353         sigaddset(&current->blocked, sig);
354         recalc_sigpending();
355         spin_unlock_irq(&current->sighand->siglock);
356         return 0;
357 }
358
359 EXPORT_SYMBOL(disallow_signal);
360
361 /*
362  *      Put all the gunge required to become a kernel thread without
363  *      attached user resources in one place where it belongs.
364  */
365
366 void daemonize(const char *name, ...)
367 {
368         va_list args;
369         struct fs_struct *fs;
370         sigset_t blocked;
371
372         va_start(args, name);
373         vsnprintf(current->comm, sizeof(current->comm), name, args);
374         va_end(args);
375
376         /*
377          * If we were started as result of loading a module, close all of the
378          * user space pages.  We don't need them, and if we didn't close them
379          * they would be locked into memory.
380          */
381         exit_mm(current);
382
383         set_special_pids(1, 1);
384         proc_clear_tty(current);
385
386         /* Block and flush all signals */
387         sigfillset(&blocked);
388         sigprocmask(SIG_BLOCK, &blocked, NULL);
389         flush_signals(current);
390
391         /* Become as one with the init task */
392
393         exit_fs(current);       /* current->fs->count--; */
394         fs = init_task.fs;
395         current->fs = fs;
396         atomic_inc(&fs->count);
397
398         exit_task_namespaces(current);
399         current->nsproxy = init_task.nsproxy;
400         get_task_namespaces(current);
401
402         exit_files(current);
403         current->files = init_task.files;
404         atomic_inc(&current->files->count);
405
406         reparent_to_init();
407 }
408
409 EXPORT_SYMBOL(daemonize);
410
411 static void close_files(struct files_struct * files)
412 {
413         int i, j;
414         struct fdtable *fdt;
415
416         j = 0;
417
418         /*
419          * It is safe to dereference the fd table without RCU or
420          * ->file_lock because this is the last reference to the
421          * files structure.
422          */
423         fdt = files_fdtable(files);
424         for (;;) {
425                 unsigned long set;
426                 i = j * __NFDBITS;
427                 if (i >= fdt->max_fds)
428                         break;
429                 set = fdt->open_fds->fds_bits[j++];
430                 while (set) {
431                         if (set & 1) {
432                                 struct file * file = xchg(&fdt->fd[i], NULL);
433                                 if (file) {
434                                         filp_close(file, files);
435                                         cond_resched();
436                                 }
437                         }
438                         i++;
439                         set >>= 1;
440                 }
441         }
442 }
443
444 struct files_struct *get_files_struct(struct task_struct *task)
445 {
446         struct files_struct *files;
447
448         task_lock(task);
449         files = task->files;
450         if (files)
451                 atomic_inc(&files->count);
452         task_unlock(task);
453
454         return files;
455 }
456
457 void fastcall put_files_struct(struct files_struct *files)
458 {
459         struct fdtable *fdt;
460
461         if (atomic_dec_and_test(&files->count)) {
462                 close_files(files);
463                 /*
464                  * Free the fd and fdset arrays if we expanded them.
465                  * If the fdtable was embedded, pass files for freeing
466                  * at the end of the RCU grace period. Otherwise,
467                  * you can free files immediately.
468                  */
469                 fdt = files_fdtable(files);
470                 if (fdt != &files->fdtab)
471                         kmem_cache_free(files_cachep, files);
472                 free_fdtable(fdt);
473         }
474 }
475
476 EXPORT_SYMBOL(put_files_struct);
477
478 void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
479 {
480         struct files_struct *old;
481
482         old = tsk->files;
483         task_lock(tsk);
484         tsk->files = files;
485         task_unlock(tsk);
486         put_files_struct(old);
487 }
488 EXPORT_SYMBOL(reset_files_struct);
489
490 static inline void __exit_files(struct task_struct *tsk)
491 {
492         struct files_struct * files = tsk->files;
493
494         if (files) {
495                 task_lock(tsk);
496                 tsk->files = NULL;
497                 task_unlock(tsk);
498                 put_files_struct(files);
499         }
500 }
501
502 void exit_files(struct task_struct *tsk)
503 {
504         __exit_files(tsk);
505 }
506
507 static inline void __put_fs_struct(struct fs_struct *fs)
508 {
509         /* No need to hold fs->lock if we are killing it */
510         if (atomic_dec_and_test(&fs->count)) {
511                 dput(fs->root);
512                 mntput(fs->rootmnt);
513                 dput(fs->pwd);
514                 mntput(fs->pwdmnt);
515                 if (fs->altroot) {
516                         dput(fs->altroot);
517                         mntput(fs->altrootmnt);
518                 }
519                 kmem_cache_free(fs_cachep, fs);
520         }
521 }
522
523 void put_fs_struct(struct fs_struct *fs)
524 {
525         __put_fs_struct(fs);
526 }
527
528 static inline void __exit_fs(struct task_struct *tsk)
529 {
530         struct fs_struct * fs = tsk->fs;
531
532         if (fs) {
533                 task_lock(tsk);
534                 tsk->fs = NULL;
535                 task_unlock(tsk);
536                 __put_fs_struct(fs);
537         }
538 }
539
540 void exit_fs(struct task_struct *tsk)
541 {
542         __exit_fs(tsk);
543 }
544
545 EXPORT_SYMBOL_GPL(exit_fs);
546
547 /*
548  * Turn us into a lazy TLB process if we
549  * aren't already..
550  */
551 static void exit_mm(struct task_struct * tsk)
552 {
553         struct mm_struct *mm = tsk->mm;
554
555         mm_release(tsk, mm);
556         if (!mm)
557                 return;
558         /*
559          * Serialize with any possible pending coredump.
560          * We must hold mmap_sem around checking core_waiters
561          * and clearing tsk->mm.  The core-inducing thread
562          * will increment core_waiters for each thread in the
563          * group with ->mm != NULL.
564          */
565         down_read(&mm->mmap_sem);
566         if (mm->core_waiters) {
567                 up_read(&mm->mmap_sem);
568                 down_write(&mm->mmap_sem);
569                 if (!--mm->core_waiters)
570                         complete(mm->core_startup_done);
571                 up_write(&mm->mmap_sem);
572
573                 wait_for_completion(&mm->core_done);
574                 down_read(&mm->mmap_sem);
575         }
576         atomic_inc(&mm->mm_count);
577         BUG_ON(mm != tsk->active_mm);
578         /* more a memory barrier than a real lock */
579         task_lock(tsk);
580         tsk->mm = NULL;
581         up_read(&mm->mmap_sem);
582         enter_lazy_tlb(mm, current);
583         task_unlock(tsk);
584         mmput(mm);
585 }
586
587 static inline void
588 choose_new_parent(struct task_struct *p, struct task_struct *reaper)
589 {
590         /*
591          * Make sure we're not reparenting to ourselves and that
592          * the parent is not a zombie.
593          */
594         BUG_ON(p == reaper || reaper->exit_state);
595         p->real_parent = reaper;
596 }
597
598 static void
599 reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
600 {
601         if (p->pdeath_signal)
602                 /* We already hold the tasklist_lock here.  */
603                 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
604
605         /* Move the child from its dying parent to the new one.  */
606         if (unlikely(traced)) {
607                 /* Preserve ptrace links if someone else is tracing this child.  */
608                 list_del_init(&p->ptrace_list);
609                 if (p->parent != p->real_parent)
610                         list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
611         } else {
612                 /* If this child is being traced, then we're the one tracing it
613                  * anyway, so let go of it.
614                  */
615                 p->ptrace = 0;
616                 remove_parent(p);
617                 p->parent = p->real_parent;
618                 add_parent(p);
619
620                 if (p->state == TASK_TRACED) {
621                         /*
622                          * If it was at a trace stop, turn it into
623                          * a normal stop since it's no longer being
624                          * traced.
625                          */
626                         ptrace_untrace(p);
627                 }
628         }
629
630         /* If this is a threaded reparent there is no need to
631          * notify anyone anything has happened.
632          */
633         if (p->real_parent->group_leader == father->group_leader)
634                 return;
635
636         /* We don't want people slaying init.  */
637         if (p->exit_signal != -1)
638                 p->exit_signal = SIGCHLD;
639
640         /* If we'd notified the old parent about this child's death,
641          * also notify the new parent.
642          */
643         if (!traced && p->exit_state == EXIT_ZOMBIE &&
644             p->exit_signal != -1 && thread_group_empty(p))
645                 do_notify_parent(p, p->exit_signal);
646
647         /*
648          * process group orphan check
649          * Case ii: Our child is in a different pgrp
650          * than we are, and it was the only connection
651          * outside, so the child pgrp is now orphaned.
652          */
653         if ((process_group(p) != process_group(father)) &&
654             (process_session(p) == process_session(father))) {
655                 int pgrp = process_group(p);
656
657                 if (will_become_orphaned_pgrp(pgrp, NULL) &&
658                     has_stopped_jobs(pgrp)) {
659                         __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
660                         __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
661                 }
662         }
663 }
664
665 /*
666  * When we die, we re-parent all our children.
667  * Try to give them to another thread in our thread
668  * group, and if no such member exists, give it to
669  * the child reaper process (ie "init") in our pid
670  * space.
671  */
672 static void
673 forget_original_parent(struct task_struct *father, struct list_head *to_release)
674 {
675         struct task_struct *p, *reaper = father;
676         struct list_head *_p, *_n;
677
678         do {
679                 reaper = next_thread(reaper);
680                 if (reaper == father) {
681                         reaper = child_reaper(father);
682                         break;
683                 }
684         } while (reaper->exit_state);
685
686         /*
687          * There are only two places where our children can be:
688          *
689          * - in our child list
690          * - in our ptraced child list
691          *
692          * Search them and reparent children.
693          */
694         list_for_each_safe(_p, _n, &father->children) {
695                 int ptrace;
696                 p = list_entry(_p, struct task_struct, sibling);
697
698                 ptrace = p->ptrace;
699
700                 /* if father isn't the real parent, then ptrace must be enabled */
701                 BUG_ON(father != p->real_parent && !ptrace);
702
703                 if (father == p->real_parent) {
704                         /* reparent with a reaper, real father it's us */
705                         choose_new_parent(p, reaper);
706                         reparent_thread(p, father, 0);
707                 } else {
708                         /* reparent ptraced task to its real parent */
709                         __ptrace_unlink (p);
710                         if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
711                             thread_group_empty(p))
712                                 do_notify_parent(p, p->exit_signal);
713                 }
714
715                 /*
716                  * if the ptraced child is a zombie with exit_signal == -1
717                  * we must collect it before we exit, or it will remain
718                  * zombie forever since we prevented it from self-reap itself
719                  * while it was being traced by us, to be able to see it in wait4.
720                  */
721                 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
722                         list_add(&p->ptrace_list, to_release);
723         }
724         list_for_each_safe(_p, _n, &father->ptrace_children) {
725                 p = list_entry(_p, struct task_struct, ptrace_list);
726                 choose_new_parent(p, reaper);
727                 reparent_thread(p, father, 1);
728         }
729 }
730
731 /*
732  * Send signals to all our closest relatives so that they know
733  * to properly mourn us..
734  */
735 static void exit_notify(struct task_struct *tsk)
736 {
737         int state;
738         struct task_struct *t;
739         struct list_head ptrace_dead, *_p, *_n;
740
741         if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
742             && !thread_group_empty(tsk)) {
743                 /*
744                  * This occurs when there was a race between our exit
745                  * syscall and a group signal choosing us as the one to
746                  * wake up.  It could be that we are the only thread
747                  * alerted to check for pending signals, but another thread
748                  * should be woken now to take the signal since we will not.
749                  * Now we'll wake all the threads in the group just to make
750                  * sure someone gets all the pending signals.
751                  */
752                 read_lock(&tasklist_lock);
753                 spin_lock_irq(&tsk->sighand->siglock);
754                 for (t = next_thread(tsk); t != tsk; t = next_thread(t))
755                         if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
756                                 recalc_sigpending_tsk(t);
757                                 if (signal_pending(t))
758                                         signal_wake_up(t, 0);
759                         }
760                 spin_unlock_irq(&tsk->sighand->siglock);
761                 read_unlock(&tasklist_lock);
762         }
763
764         write_lock_irq(&tasklist_lock);
765
766         /*
767          * This does two things:
768          *
769          * A.  Make init inherit all the child processes
770          * B.  Check to see if any process groups have become orphaned
771          *      as a result of our exiting, and if they have any stopped
772          *      jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
773          */
774
775         INIT_LIST_HEAD(&ptrace_dead);
776         forget_original_parent(tsk, &ptrace_dead);
777         BUG_ON(!list_empty(&tsk->children));
778         BUG_ON(!list_empty(&tsk->ptrace_children));
779
780         /*
781          * Check to see if any process groups have become orphaned
782          * as a result of our exiting, and if they have any stopped
783          * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
784          *
785          * Case i: Our father is in a different pgrp than we are
786          * and we were the only connection outside, so our pgrp
787          * is about to become orphaned.
788          */
789          
790         t = tsk->real_parent;
791         
792         if ((process_group(t) != process_group(tsk)) &&
793             (process_session(t) == process_session(tsk)) &&
794             will_become_orphaned_pgrp(process_group(tsk), tsk) &&
795             has_stopped_jobs(process_group(tsk))) {
796                 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
797                 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
798         }
799
800         /* Let father know we died 
801          *
802          * Thread signals are configurable, but you aren't going to use
803          * that to send signals to arbitary processes. 
804          * That stops right now.
805          *
806          * If the parent exec id doesn't match the exec id we saved
807          * when we started then we know the parent has changed security
808          * domain.
809          *
810          * If our self_exec id doesn't match our parent_exec_id then
811          * we have changed execution domain as these two values started
812          * the same after a fork.
813          *      
814          */
815         
816         if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
817             ( tsk->parent_exec_id != t->self_exec_id  ||
818               tsk->self_exec_id != tsk->parent_exec_id)
819             && !capable(CAP_KILL))
820                 tsk->exit_signal = SIGCHLD;
821
822
823         /* If something other than our normal parent is ptracing us, then
824          * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
825          * only has special meaning to our real parent.
826          */
827         if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
828                 int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
829                 do_notify_parent(tsk, signal);
830         } else if (tsk->ptrace) {
831                 do_notify_parent(tsk, SIGCHLD);
832         }
833
834         state = EXIT_ZOMBIE;
835         if (tsk->exit_signal == -1 &&
836             (likely(tsk->ptrace == 0) ||
837              unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
838                 state = EXIT_DEAD;
839         tsk->exit_state = state;
840
841         write_unlock_irq(&tasklist_lock);
842
843         list_for_each_safe(_p, _n, &ptrace_dead) {
844                 list_del_init(_p);
845                 t = list_entry(_p, struct task_struct, ptrace_list);
846                 release_task(t);
847         }
848
849         /* If the process is dead, release it - nobody will wait for it */
850         if (state == EXIT_DEAD)
851                 release_task(tsk);
852 }
853
854 fastcall NORET_TYPE void do_exit(long code)
855 {
856         struct task_struct *tsk = current;
857         int group_dead;
858
859         profile_task_exit(tsk);
860
861         WARN_ON(atomic_read(&tsk->fs_excl));
862
863         if (unlikely(in_interrupt()))
864                 panic("Aiee, killing interrupt handler!");
865         if (unlikely(!tsk->pid))
866                 panic("Attempted to kill the idle task!");
867         if (unlikely(tsk == child_reaper(tsk))) {
868                 if (tsk->nsproxy->pid_ns != &init_pid_ns)
869                         tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper;
870                 else
871                         panic("Attempted to kill init!");
872         }
873
874
875         if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
876                 current->ptrace_message = code;
877                 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
878         }
879
880         /*
881          * We're taking recursive faults here in do_exit. Safest is to just
882          * leave this task alone and wait for reboot.
883          */
884         if (unlikely(tsk->flags & PF_EXITING)) {
885                 printk(KERN_ALERT
886                         "Fixing recursive fault but reboot is needed!\n");
887                 if (tsk->io_context)
888                         exit_io_context();
889                 set_current_state(TASK_UNINTERRUPTIBLE);
890                 schedule();
891         }
892
893         tsk->flags |= PF_EXITING;
894
895         if (unlikely(in_atomic()))
896                 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
897                                 current->comm, current->pid,
898                                 preempt_count());
899
900         acct_update_integrals(tsk);
901         if (tsk->mm) {
902                 update_hiwater_rss(tsk->mm);
903                 update_hiwater_vm(tsk->mm);
904         }
905         group_dead = atomic_dec_and_test(&tsk->signal->live);
906         if (group_dead) {
907                 hrtimer_cancel(&tsk->signal->real_timer);
908                 exit_itimers(tsk->signal);
909         }
910         acct_collect(code, group_dead);
911         if (unlikely(tsk->robust_list))
912                 exit_robust_list(tsk);
913 #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
914         if (unlikely(tsk->compat_robust_list))
915                 compat_exit_robust_list(tsk);
916 #endif
917         if (unlikely(tsk->audit_context))
918                 audit_free(tsk);
919
920         taskstats_exit(tsk, group_dead);
921
922         exit_mm(tsk);
923
924         if (group_dead)
925                 acct_process();
926         exit_sem(tsk);
927         __exit_files(tsk);
928         __exit_fs(tsk);
929         exit_thread();
930         cpuset_exit(tsk);
931         exit_keys(tsk);
932
933         if (group_dead && tsk->signal->leader)
934                 disassociate_ctty(1);
935
936         module_put(task_thread_info(tsk)->exec_domain->module);
937         if (tsk->binfmt)
938                 module_put(tsk->binfmt->module);
939
940         tsk->exit_code = code;
941         proc_exit_connector(tsk);
942         exit_task_namespaces(tsk);
943         exit_notify(tsk);
944 #ifdef CONFIG_NUMA
945         mpol_free(tsk->mempolicy);
946         tsk->mempolicy = NULL;
947 #endif
948         /*
949          * This must happen late, after the PID is not
950          * hashed anymore:
951          */
952         if (unlikely(!list_empty(&tsk->pi_state_list)))
953                 exit_pi_state_list(tsk);
954         if (unlikely(current->pi_state_cache))
955                 kfree(current->pi_state_cache);
956         /*
957          * Make sure we are holding no locks:
958          */
959         debug_check_no_locks_held(tsk);
960
961         if (tsk->io_context)
962                 exit_io_context();
963
964         if (tsk->splice_pipe)
965                 __free_pipe_info(tsk->splice_pipe);
966
967         preempt_disable();
968         /* causes final put_task_struct in finish_task_switch(). */
969         tsk->state = TASK_DEAD;
970
971         schedule();
972         BUG();
973         /* Avoid "noreturn function does return".  */
974         for (;;)
975                 cpu_relax();    /* For when BUG is null */
976 }
977
978 EXPORT_SYMBOL_GPL(do_exit);
979
980 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
981 {
982         if (comp)
983                 complete(comp);
984
985         do_exit(code);
986 }
987
988 EXPORT_SYMBOL(complete_and_exit);
989
990 asmlinkage long sys_exit(int error_code)
991 {
992         do_exit((error_code&0xff)<<8);
993 }
994
995 /*
996  * Take down every thread in the group.  This is called by fatal signals
997  * as well as by sys_exit_group (below).
998  */
999 NORET_TYPE void
1000 do_group_exit(int exit_code)
1001 {
1002         BUG_ON(exit_code & 0x80); /* core dumps don't get here */
1003
1004         if (current->signal->flags & SIGNAL_GROUP_EXIT)
1005                 exit_code = current->signal->group_exit_code;
1006         else if (!thread_group_empty(current)) {
1007                 struct signal_struct *const sig = current->signal;
1008                 struct sighand_struct *const sighand = current->sighand;
1009                 spin_lock_irq(&sighand->siglock);
1010                 if (sig->flags & SIGNAL_GROUP_EXIT)
1011                         /* Another thread got here before we took the lock.  */
1012                         exit_code = sig->group_exit_code;
1013                 else {
1014                         sig->group_exit_code = exit_code;
1015                         zap_other_threads(current);
1016                 }
1017                 spin_unlock_irq(&sighand->siglock);
1018         }
1019
1020         do_exit(exit_code);
1021         /* NOTREACHED */
1022 }
1023
1024 /*
1025  * this kills every thread in the thread group. Note that any externally
1026  * wait4()-ing process will get the correct exit code - even if this
1027  * thread is not the thread group leader.
1028  */
1029 asmlinkage void sys_exit_group(int error_code)
1030 {
1031         do_group_exit((error_code & 0xff) << 8);
1032 }
1033
1034 static int eligible_child(pid_t pid, int options, struct task_struct *p)
1035 {
1036         if (pid > 0) {
1037                 if (p->pid != pid)
1038                         return 0;
1039         } else if (!pid) {
1040                 if (process_group(p) != process_group(current))
1041                         return 0;
1042         } else if (pid != -1) {
1043                 if (process_group(p) != -pid)
1044                         return 0;
1045         }
1046
1047         /*
1048          * Do not consider detached threads that are
1049          * not ptraced:
1050          */
1051         if (p->exit_signal == -1 && !p->ptrace)
1052                 return 0;
1053
1054         /* Wait for all children (clone and not) if __WALL is set;
1055          * otherwise, wait for clone children *only* if __WCLONE is
1056          * set; otherwise, wait for non-clone children *only*.  (Note:
1057          * A "clone" child here is one that reports to its parent
1058          * using a signal other than SIGCHLD.) */
1059         if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1060             && !(options & __WALL))
1061                 return 0;
1062         /*
1063          * Do not consider thread group leaders that are
1064          * in a non-empty thread group:
1065          */
1066         if (delay_group_leader(p))
1067                 return 2;
1068
1069         if (security_task_wait(p))
1070                 return 0;
1071
1072         return 1;
1073 }
1074
1075 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1076                                int why, int status,
1077                                struct siginfo __user *infop,
1078                                struct rusage __user *rusagep)
1079 {
1080         int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1081
1082         put_task_struct(p);
1083         if (!retval)
1084                 retval = put_user(SIGCHLD, &infop->si_signo);
1085         if (!retval)
1086                 retval = put_user(0, &infop->si_errno);
1087         if (!retval)
1088                 retval = put_user((short)why, &infop->si_code);
1089         if (!retval)
1090                 retval = put_user(pid, &infop->si_pid);
1091         if (!retval)
1092                 retval = put_user(uid, &infop->si_uid);
1093         if (!retval)
1094                 retval = put_user(status, &infop->si_status);
1095         if (!retval)
1096                 retval = pid;
1097         return retval;
1098 }
1099
1100 /*
1101  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1102  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1103  * the lock and this task is uninteresting.  If we return nonzero, we have
1104  * released the lock and the system call should return.
1105  */
1106 static int wait_task_zombie(struct task_struct *p, int noreap,
1107                             struct siginfo __user *infop,
1108                             int __user *stat_addr, struct rusage __user *ru)
1109 {
1110         unsigned long state;
1111         int retval;
1112         int status;
1113
1114         if (unlikely(noreap)) {
1115                 pid_t pid = p->pid;
1116                 uid_t uid = p->uid;
1117                 int exit_code = p->exit_code;
1118                 int why, status;
1119
1120                 if (unlikely(p->exit_state != EXIT_ZOMBIE))
1121                         return 0;
1122                 if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1123                         return 0;
1124                 get_task_struct(p);
1125                 read_unlock(&tasklist_lock);
1126                 if ((exit_code & 0x7f) == 0) {
1127                         why = CLD_EXITED;
1128                         status = exit_code >> 8;
1129                 } else {
1130                         why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1131                         status = exit_code & 0x7f;
1132                 }
1133                 return wait_noreap_copyout(p, pid, uid, why,
1134                                            status, infop, ru);
1135         }
1136
1137         /*
1138          * Try to move the task's state to DEAD
1139          * only one thread is allowed to do this:
1140          */
1141         state = xchg(&p->exit_state, EXIT_DEAD);
1142         if (state != EXIT_ZOMBIE) {
1143                 BUG_ON(state != EXIT_DEAD);
1144                 return 0;
1145         }
1146         if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1147                 /*
1148                  * This can only happen in a race with a ptraced thread
1149                  * dying on another processor.
1150                  */
1151                 return 0;
1152         }
1153
1154         if (likely(p->real_parent == p->parent) && likely(p->signal)) {
1155                 struct signal_struct *psig;
1156                 struct signal_struct *sig;
1157
1158                 /*
1159                  * The resource counters for the group leader are in its
1160                  * own task_struct.  Those for dead threads in the group
1161                  * are in its signal_struct, as are those for the child
1162                  * processes it has previously reaped.  All these
1163                  * accumulate in the parent's signal_struct c* fields.
1164                  *
1165                  * We don't bother to take a lock here to protect these
1166                  * p->signal fields, because they are only touched by
1167                  * __exit_signal, which runs with tasklist_lock
1168                  * write-locked anyway, and so is excluded here.  We do
1169                  * need to protect the access to p->parent->signal fields,
1170                  * as other threads in the parent group can be right
1171                  * here reaping other children at the same time.
1172                  */
1173                 spin_lock_irq(&p->parent->sighand->siglock);
1174                 psig = p->parent->signal;
1175                 sig = p->signal;
1176                 psig->cutime =
1177                         cputime_add(psig->cutime,
1178                         cputime_add(p->utime,
1179                         cputime_add(sig->utime,
1180                                     sig->cutime)));
1181                 psig->cstime =
1182                         cputime_add(psig->cstime,
1183                         cputime_add(p->stime,
1184                         cputime_add(sig->stime,
1185                                     sig->cstime)));
1186                 psig->cmin_flt +=
1187                         p->min_flt + sig->min_flt + sig->cmin_flt;
1188                 psig->cmaj_flt +=
1189                         p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1190                 psig->cnvcsw +=
1191                         p->nvcsw + sig->nvcsw + sig->cnvcsw;
1192                 psig->cnivcsw +=
1193                         p->nivcsw + sig->nivcsw + sig->cnivcsw;
1194                 spin_unlock_irq(&p->parent->sighand->siglock);
1195         }
1196
1197         /*
1198          * Now we are sure this task is interesting, and no other
1199          * thread can reap it because we set its state to EXIT_DEAD.
1200          */
1201         read_unlock(&tasklist_lock);
1202
1203         retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1204         status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1205                 ? p->signal->group_exit_code : p->exit_code;
1206         if (!retval && stat_addr)
1207                 retval = put_user(status, stat_addr);
1208         if (!retval && infop)
1209                 retval = put_user(SIGCHLD, &infop->si_signo);
1210         if (!retval && infop)
1211                 retval = put_user(0, &infop->si_errno);
1212         if (!retval && infop) {
1213                 int why;
1214
1215                 if ((status & 0x7f) == 0) {
1216                         why = CLD_EXITED;
1217                         status >>= 8;
1218                 } else {
1219                         why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1220                         status &= 0x7f;
1221                 }
1222                 retval = put_user((short)why, &infop->si_code);
1223                 if (!retval)
1224                         retval = put_user(status, &infop->si_status);
1225         }
1226         if (!retval && infop)
1227                 retval = put_user(p->pid, &infop->si_pid);
1228         if (!retval && infop)
1229                 retval = put_user(p->uid, &infop->si_uid);
1230         if (retval) {
1231                 // TODO: is this safe?
1232                 p->exit_state = EXIT_ZOMBIE;
1233                 return retval;
1234         }
1235         retval = p->pid;
1236         if (p->real_parent != p->parent) {
1237                 write_lock_irq(&tasklist_lock);
1238                 /* Double-check with lock held.  */
1239                 if (p->real_parent != p->parent) {
1240                         __ptrace_unlink(p);
1241                         // TODO: is this safe?
1242                         p->exit_state = EXIT_ZOMBIE;
1243                         /*
1244                          * If this is not a detached task, notify the parent.
1245                          * If it's still not detached after that, don't release
1246                          * it now.
1247                          */
1248                         if (p->exit_signal != -1) {
1249                                 do_notify_parent(p, p->exit_signal);
1250                                 if (p->exit_signal != -1)
1251                                         p = NULL;
1252                         }
1253                 }
1254                 write_unlock_irq(&tasklist_lock);
1255         }
1256         if (p != NULL)
1257                 release_task(p);
1258         BUG_ON(!retval);
1259         return retval;
1260 }
1261
1262 /*
1263  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1264  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1265  * the lock and this task is uninteresting.  If we return nonzero, we have
1266  * released the lock and the system call should return.
1267  */
1268 static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1269                              int noreap, struct siginfo __user *infop,
1270                              int __user *stat_addr, struct rusage __user *ru)
1271 {
1272         int retval, exit_code;
1273
1274         if (!p->exit_code)
1275                 return 0;
1276         if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1277             p->signal && p->signal->group_stop_count > 0)
1278                 /*
1279                  * A group stop is in progress and this is the group leader.
1280                  * We won't report until all threads have stopped.
1281                  */
1282                 return 0;
1283
1284         /*
1285          * Now we are pretty sure this task is interesting.
1286          * Make sure it doesn't get reaped out from under us while we
1287          * give up the lock and then examine it below.  We don't want to
1288          * keep holding onto the tasklist_lock while we call getrusage and
1289          * possibly take page faults for user memory.
1290          */
1291         get_task_struct(p);
1292         read_unlock(&tasklist_lock);
1293
1294         if (unlikely(noreap)) {
1295                 pid_t pid = p->pid;
1296                 uid_t uid = p->uid;
1297                 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1298
1299                 exit_code = p->exit_code;
1300                 if (unlikely(!exit_code) ||
1301                     unlikely(p->state & TASK_TRACED))
1302                         goto bail_ref;
1303                 return wait_noreap_copyout(p, pid, uid,
1304                                            why, (exit_code << 8) | 0x7f,
1305                                            infop, ru);
1306         }
1307
1308         write_lock_irq(&tasklist_lock);
1309
1310         /*
1311          * This uses xchg to be atomic with the thread resuming and setting
1312          * it.  It must also be done with the write lock held to prevent a
1313          * race with the EXIT_ZOMBIE case.
1314          */
1315         exit_code = xchg(&p->exit_code, 0);
1316         if (unlikely(p->exit_state)) {
1317                 /*
1318                  * The task resumed and then died.  Let the next iteration
1319                  * catch it in EXIT_ZOMBIE.  Note that exit_code might
1320                  * already be zero here if it resumed and did _exit(0).
1321                  * The task itself is dead and won't touch exit_code again;
1322                  * other processors in this function are locked out.
1323                  */
1324                 p->exit_code = exit_code;
1325                 exit_code = 0;
1326         }
1327         if (unlikely(exit_code == 0)) {
1328                 /*
1329                  * Another thread in this function got to it first, or it
1330                  * resumed, or it resumed and then died.
1331                  */
1332                 write_unlock_irq(&tasklist_lock);
1333 bail_ref:
1334                 put_task_struct(p);
1335                 /*
1336                  * We are returning to the wait loop without having successfully
1337                  * removed the process and having released the lock. We cannot
1338                  * continue, since the "p" task pointer is potentially stale.
1339                  *
1340                  * Return -EAGAIN, and do_wait() will restart the loop from the
1341                  * beginning. Do _not_ re-acquire the lock.
1342                  */
1343                 return -EAGAIN;
1344         }
1345
1346         /* move to end of parent's list to avoid starvation */
1347         remove_parent(p);
1348         add_parent(p);
1349
1350         write_unlock_irq(&tasklist_lock);
1351
1352         retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1353         if (!retval && stat_addr)
1354                 retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1355         if (!retval && infop)
1356                 retval = put_user(SIGCHLD, &infop->si_signo);
1357         if (!retval && infop)
1358                 retval = put_user(0, &infop->si_errno);
1359         if (!retval && infop)
1360                 retval = put_user((short)((p->ptrace & PT_PTRACED)
1361                                           ? CLD_TRAPPED : CLD_STOPPED),
1362                                   &infop->si_code);
1363         if (!retval && infop)
1364                 retval = put_user(exit_code, &infop->si_status);
1365         if (!retval && infop)
1366                 retval = put_user(p->pid, &infop->si_pid);
1367         if (!retval && infop)
1368                 retval = put_user(p->uid, &infop->si_uid);
1369         if (!retval)
1370                 retval = p->pid;
1371         put_task_struct(p);
1372
1373         BUG_ON(!retval);
1374         return retval;
1375 }
1376
1377 /*
1378  * Handle do_wait work for one task in a live, non-stopped state.
1379  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1380  * the lock and this task is uninteresting.  If we return nonzero, we have
1381  * released the lock and the system call should return.
1382  */
1383 static int wait_task_continued(struct task_struct *p, int noreap,
1384                                struct siginfo __user *infop,
1385                                int __user *stat_addr, struct rusage __user *ru)
1386 {
1387         int retval;
1388         pid_t pid;
1389         uid_t uid;
1390
1391         if (unlikely(!p->signal))
1392                 return 0;
1393
1394         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1395                 return 0;
1396
1397         spin_lock_irq(&p->sighand->siglock);
1398         /* Re-check with the lock held.  */
1399         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1400                 spin_unlock_irq(&p->sighand->siglock);
1401                 return 0;
1402         }
1403         if (!noreap)
1404                 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1405         spin_unlock_irq(&p->sighand->siglock);
1406
1407         pid = p->pid;
1408         uid = p->uid;
1409         get_task_struct(p);
1410         read_unlock(&tasklist_lock);
1411
1412         if (!infop) {
1413                 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1414                 put_task_struct(p);
1415                 if (!retval && stat_addr)
1416                         retval = put_user(0xffff, stat_addr);
1417                 if (!retval)
1418                         retval = p->pid;
1419         } else {
1420                 retval = wait_noreap_copyout(p, pid, uid,
1421                                              CLD_CONTINUED, SIGCONT,
1422                                              infop, ru);
1423                 BUG_ON(retval == 0);
1424         }
1425
1426         return retval;
1427 }
1428
1429
1430 static inline int my_ptrace_child(struct task_struct *p)
1431 {
1432         if (!(p->ptrace & PT_PTRACED))
1433                 return 0;
1434         if (!(p->ptrace & PT_ATTACHED))
1435                 return 1;
1436         /*
1437          * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1438          * we are the attacher.  If we are the real parent, this is a race
1439          * inside ptrace_attach.  It is waiting for the tasklist_lock,
1440          * which we have to switch the parent links, but has already set
1441          * the flags in p->ptrace.
1442          */
1443         return (p->parent != p->real_parent);
1444 }
1445
1446 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1447                     int __user *stat_addr, struct rusage __user *ru)
1448 {
1449         DECLARE_WAITQUEUE(wait, current);
1450         struct task_struct *tsk;
1451         int flag, retval;
1452
1453         add_wait_queue(&current->signal->wait_chldexit,&wait);
1454 repeat:
1455         /*
1456          * We will set this flag if we see any child that might later
1457          * match our criteria, even if we are not able to reap it yet.
1458          */
1459         flag = 0;
1460         current->state = TASK_INTERRUPTIBLE;
1461         read_lock(&tasklist_lock);
1462         tsk = current;
1463         do {
1464                 struct task_struct *p;
1465                 struct list_head *_p;
1466                 int ret;
1467
1468                 list_for_each(_p,&tsk->children) {
1469                         p = list_entry(_p, struct task_struct, sibling);
1470
1471                         ret = eligible_child(pid, options, p);
1472                         if (!ret)
1473                                 continue;
1474
1475                         switch (p->state) {
1476                         case TASK_TRACED:
1477                                 /*
1478                                  * When we hit the race with PTRACE_ATTACH,
1479                                  * we will not report this child.  But the
1480                                  * race means it has not yet been moved to
1481                                  * our ptrace_children list, so we need to
1482                                  * set the flag here to avoid a spurious ECHILD
1483                                  * when the race happens with the only child.
1484                                  */
1485                                 flag = 1;
1486                                 if (!my_ptrace_child(p))
1487                                         continue;
1488                                 /*FALLTHROUGH*/
1489                         case TASK_STOPPED:
1490                                 /*
1491                                  * It's stopped now, so it might later
1492                                  * continue, exit, or stop again.
1493                                  */
1494                                 flag = 1;
1495                                 if (!(options & WUNTRACED) &&
1496                                     !my_ptrace_child(p))
1497                                         continue;
1498                                 retval = wait_task_stopped(p, ret == 2,
1499                                                            (options & WNOWAIT),
1500                                                            infop,
1501                                                            stat_addr, ru);
1502                                 if (retval == -EAGAIN)
1503                                         goto repeat;
1504                                 if (retval != 0) /* He released the lock.  */
1505                                         goto end;
1506                                 break;
1507                         default:
1508                         // case EXIT_DEAD:
1509                                 if (p->exit_state == EXIT_DEAD)
1510                                         continue;
1511                         // case EXIT_ZOMBIE:
1512                                 if (p->exit_state == EXIT_ZOMBIE) {
1513                                         /*
1514                                          * Eligible but we cannot release
1515                                          * it yet:
1516                                          */
1517                                         if (ret == 2)
1518                                                 goto check_continued;
1519                                         if (!likely(options & WEXITED))
1520                                                 continue;
1521                                         retval = wait_task_zombie(
1522                                                 p, (options & WNOWAIT),
1523                                                 infop, stat_addr, ru);
1524                                         /* He released the lock.  */
1525                                         if (retval != 0)
1526                                                 goto end;
1527                                         break;
1528                                 }
1529 check_continued:
1530                                 /*
1531                                  * It's running now, so it might later
1532                                  * exit, stop, or stop and then continue.
1533                                  */
1534                                 flag = 1;
1535                                 if (!unlikely(options & WCONTINUED))
1536                                         continue;
1537                                 retval = wait_task_continued(
1538                                         p, (options & WNOWAIT),
1539                                         infop, stat_addr, ru);
1540                                 if (retval != 0) /* He released the lock.  */
1541                                         goto end;
1542                                 break;
1543                         }
1544                 }
1545                 if (!flag) {
1546                         list_for_each(_p, &tsk->ptrace_children) {
1547                                 p = list_entry(_p, struct task_struct,
1548                                                 ptrace_list);
1549                                 if (!eligible_child(pid, options, p))
1550                                         continue;
1551                                 flag = 1;
1552                                 break;
1553                         }
1554                 }
1555                 if (options & __WNOTHREAD)
1556                         break;
1557                 tsk = next_thread(tsk);
1558                 BUG_ON(tsk->signal != current->signal);
1559         } while (tsk != current);
1560
1561         read_unlock(&tasklist_lock);
1562         if (flag) {
1563                 retval = 0;
1564                 if (options & WNOHANG)
1565                         goto end;
1566                 retval = -ERESTARTSYS;
1567                 if (signal_pending(current))
1568                         goto end;
1569                 schedule();
1570                 goto repeat;
1571         }
1572         retval = -ECHILD;
1573 end:
1574         current->state = TASK_RUNNING;
1575         remove_wait_queue(&current->signal->wait_chldexit,&wait);
1576         if (infop) {
1577                 if (retval > 0)
1578                 retval = 0;
1579                 else {
1580                         /*
1581                          * For a WNOHANG return, clear out all the fields
1582                          * we would set so the user can easily tell the
1583                          * difference.
1584                          */
1585                         if (!retval)
1586                                 retval = put_user(0, &infop->si_signo);
1587                         if (!retval)
1588                                 retval = put_user(0, &infop->si_errno);
1589                         if (!retval)
1590                                 retval = put_user(0, &infop->si_code);
1591                         if (!retval)
1592                                 retval = put_user(0, &infop->si_pid);
1593                         if (!retval)
1594                                 retval = put_user(0, &infop->si_uid);
1595                         if (!retval)
1596                                 retval = put_user(0, &infop->si_status);
1597                 }
1598         }
1599         return retval;
1600 }
1601
1602 asmlinkage long sys_waitid(int which, pid_t pid,
1603                            struct siginfo __user *infop, int options,
1604                            struct rusage __user *ru)
1605 {
1606         long ret;
1607
1608         if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1609                 return -EINVAL;
1610         if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1611                 return -EINVAL;
1612
1613         switch (which) {
1614         case P_ALL:
1615                 pid = -1;
1616                 break;
1617         case P_PID:
1618                 if (pid <= 0)
1619                         return -EINVAL;
1620                 break;
1621         case P_PGID:
1622                 if (pid <= 0)
1623                         return -EINVAL;
1624                 pid = -pid;
1625                 break;
1626         default:
1627                 return -EINVAL;
1628         }
1629
1630         ret = do_wait(pid, options, infop, NULL, ru);
1631
1632         /* avoid REGPARM breakage on x86: */
1633         prevent_tail_call(ret);
1634         return ret;
1635 }
1636
1637 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1638                           int options, struct rusage __user *ru)
1639 {
1640         long ret;
1641
1642         if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1643                         __WNOTHREAD|__WCLONE|__WALL))
1644                 return -EINVAL;
1645         ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1646
1647         /* avoid REGPARM breakage on x86: */
1648         prevent_tail_call(ret);
1649         return ret;
1650 }
1651
1652 #ifdef __ARCH_WANT_SYS_WAITPID
1653
1654 /*
1655  * sys_waitpid() remains for compatibility. waitpid() should be
1656  * implemented by calling sys_wait4() from libc.a.
1657  */
1658 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1659 {
1660         return sys_wait4(pid, stat_addr, options, NULL);
1661 }
1662
1663 #endif