1 /* $OpenBSD: kern_exit.c,v 1.71 2007/04/12 22:14:15 tedu Exp $ */
2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1989, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/proc.h>
44 #include <sys/tty.h>
45 #include <sys/time.h>
46 #include <sys/resource.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/wait.h>
50 #include <sys/file.h>
51 #include <sys/vnode.h>
52 #include <sys/syslog.h>
53 #include <sys/malloc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/ptrace.h>
56 #include <sys/acct.h>
57 #include <sys/filedesc.h>
58 #include <sys/signalvar.h>
59 #include <sys/sched.h>
60 #include <sys/ktrace.h>
61 #include <sys/pool.h>
62 #include <sys/mutex.h>
63 #ifdef SYSVSHM
64 #include <sys/shm.h>
65 #endif
66 #ifdef SYSVSEM
67 #include <sys/sem.h>
68 #endif
69
70 #include "systrace.h"
71 #include <dev/systrace.h>
72
73 #include <sys/mount.h>
74 #include <sys/syscallargs.h>
75
76 #include <machine/cpu.h>
77
78 #include <uvm/uvm_extern.h>
79
80 /*
81 * exit --
82 * Death of process.
83 */
84 int
85 sys_exit(struct proc *p, void *v, register_t *retval)
86 {
87 struct sys_exit_args /* {
88 syscallarg(int) rval;
89 } */ *uap = v;
90
91 exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL);
92 /* NOTREACHED */
93 return (0);
94 }
95
96 #ifdef RTHREADS
97 int
98 sys_threxit(struct proc *p, void *v, register_t *retval)
99 {
100 struct sys_threxit_args *uap = v;
101
102 exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_THREAD);
103
104 return (0);
105 }
106 #endif
107
108 /*
109 * Exit: deallocate address space and other resources, change proc state
110 * to zombie, and unlink proc from allproc and parent's lists. Save exit
111 * status and rusage for wait(). Check for child processes and orphan them.
112 */
113 void
114 exit1(struct proc *p, int rv, int flags)
115 {
116 struct proc *q, *nq;
117
118 if (p->p_pid == 1)
119 panic("init died (signal %d, exit %d)",
120 WTERMSIG(rv), WEXITSTATUS(rv));
121
122 /* unlink ourselves from the active threads */
123 TAILQ_REMOVE(&p->p_p->ps_threads, p, p_thr_link);
124 #ifdef RTHREADS
125 if (TAILQ_EMPTY(&p->p_p->ps_threads))
126 wakeup(&p->p_p->ps_threads);
127 /*
128 * if one thread calls exit, we take down everybody.
129 * we have to be careful not to get recursively caught.
130 * this is kinda sick.
131 */
132 if (flags == EXIT_NORMAL && p->p_p->ps_mainproc != p &&
133 (p->p_p->ps_mainproc->p_flag & P_WEXIT) == 0) {
134 /*
135 * we are one of the threads. we SIGKILL the parent,
136 * it will wake us up again, then we proceed.
137 */
138 atomic_setbits_int(&p->p_p->ps_mainproc->p_flag, P_IGNEXITRV);
139 p->p_p->ps_mainproc->p_xstat = rv;
140 psignal(p->p_p->ps_mainproc, SIGKILL);
141 tsleep(p->p_p, PUSER, "thrdying", 0);
142 } else if (p == p->p_p->ps_mainproc) {
143 atomic_setbits_int(&p->p_flag, P_WEXIT);
144 if (flags == EXIT_NORMAL) {
145 q = TAILQ_FIRST(&p->p_p->ps_threads);
146 for (; q != NULL; q = nq) {
147 nq = TAILQ_NEXT(q, p_thr_link);
148 atomic_setbits_int(&q->p_flag, P_IGNEXITRV);
149 q->p_xstat = rv;
150 psignal(q, SIGKILL);
151 }
152 }
153 wakeup(p->p_p);
154 while (!TAILQ_EMPTY(&p->p_p->ps_threads))
155 tsleep(&p->p_p->ps_threads, PUSER, "thrdeath", 0);
156 }
157 #endif
158
159 if (p->p_flag & P_PROFIL)
160 stopprofclock(p);
161 p->p_ru = pool_get(&rusage_pool, PR_WAITOK);
162 /*
163 * If parent is waiting for us to exit or exec, P_PPWAIT is set; we
164 * wake up the parent early to avoid deadlock.
165 */
166 atomic_setbits_int(&p->p_flag, P_WEXIT);
167 atomic_clearbits_int(&p->p_flag, P_TRACED);
168 if (p->p_flag & P_PPWAIT) {
169 atomic_clearbits_int(&p->p_flag, P_PPWAIT);
170 wakeup(p->p_pptr);
171 }
172 p->p_sigignore = ~0;
173 p->p_siglist = 0;
174 timeout_del(&p->p_realit_to);
175 timeout_del(&p->p_stats->p_virt_to);
176 timeout_del(&p->p_stats->p_prof_to);
177
178 /*
179 * Close open files and release open-file table.
180 * This may block!
181 */
182 fdfree(p);
183
184 #ifdef SYSVSEM
185 semexit(p);
186 #endif
187 if (SESS_LEADER(p)) {
188 struct session *sp = p->p_session;
189
190 if (sp->s_ttyvp) {
191 /*
192 * Controlling process.
193 * Signal foreground pgrp,
194 * drain controlling terminal
195 * and revoke access to controlling terminal.
196 */
197 if (sp->s_ttyp->t_session == sp) {
198 if (sp->s_ttyp->t_pgrp)
199 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
200 (void) ttywait(sp->s_ttyp);
201 /*
202 * The tty could have been revoked
203 * if we blocked.
204 */
205 if (sp->s_ttyvp)
206 VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
207 }
208 if (sp->s_ttyvp)
209 vrele(sp->s_ttyvp);
210 sp->s_ttyvp = NULL;
211 /*
212 * s_ttyp is not zero'd; we use this to indicate
213 * that the session once had a controlling terminal.
214 * (for logging and informational purposes)
215 */
216 }
217 sp->s_leader = NULL;
218 }
219 fixjobc(p, p->p_pgrp, 0);
220 #ifdef ACCOUNTING
221 (void)acct_process(p);
222 #endif
223 #ifdef KTRACE
224 /*
225 * release trace file
226 */
227 p->p_traceflag = 0; /* don't trace the vrele() */
228 if (p->p_tracep)
229 ktrsettracevnode(p, NULL);
230 #endif
231 #if NSYSTRACE > 0
232 if (ISSET(p->p_flag, P_SYSTRACE))
233 systrace_exit(p);
234 #endif
235 /*
236 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
237 */
238 p->p_stat = SDEAD;
239
240 /*
241 * Remove proc from pidhash chain so looking it up won't
242 * work. Move it from allproc to zombproc, but do not yet
243 * wake up the reaper. We will put the proc on the
244 * deadproc list later (using the p_hash member), and
245 * wake up the reaper when we do.
246 */
247 LIST_REMOVE(p, p_hash);
248 LIST_REMOVE(p, p_list);
249 LIST_INSERT_HEAD(&zombproc, p, p_list);
250
251 /*
252 * Give orphaned children to init(8).
253 */
254 q = LIST_FIRST(&p->p_children);
255 if (q) /* only need this if any child is S_ZOMB */
256 wakeup(initproc);
257 for (; q != 0; q = nq) {
258 nq = LIST_NEXT(q, p_sibling);
259 proc_reparent(q, initproc);
260 /*
261 * Traced processes are killed
262 * since their existence means someone is screwing up.
263 */
264 if (q->p_flag & P_TRACED) {
265 atomic_clearbits_int(&q->p_flag, P_TRACED);
266 psignal(q, SIGKILL);
267 }
268 }
269
270
271 /*
272 * Save exit status and final rusage info, adding in child rusage
273 * info and self times.
274 */
275 if (!(p->p_flag & P_IGNEXITRV))
276 p->p_xstat = rv;
277 *p->p_ru = p->p_stats->p_ru;
278 calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
279 ruadd(p->p_ru, &p->p_stats->p_cru);
280
281 /*
282 * clear %cpu usage during swap
283 */
284 p->p_pctcpu = 0;
285
286 /*
287 * notify interested parties of our demise.
288 */
289 KNOTE(&p->p_klist, NOTE_EXIT);
290
291 /*
292 * Notify parent that we're gone. If we have P_NOZOMBIE or parent has
293 * the P_NOCLDWAIT flag set, notify process 1 instead (and hope it
294 * will handle this situation).
295 */
296 if ((p->p_flag & P_NOZOMBIE) || (p->p_pptr->p_flag & P_NOCLDWAIT)) {
297 struct proc *pp = p->p_pptr;
298 proc_reparent(p, initproc);
299 /*
300 * If this was the last child of our parent, notify
301 * parent, so in case he was wait(2)ing, he will
302 * continue.
303 */
304 if (LIST_EMPTY(&pp->p_children))
305 wakeup(pp);
306 }
307
308 if (p->p_exitsig != 0)
309 psignal(p->p_pptr, P_EXITSIG(p));
310 wakeup(p->p_pptr);
311
312 /*
313 * Release the process's signal state.
314 */
315 sigactsfree(p);
316
317 /*
318 * Clear curproc after we've done all operations
319 * that could block, and before tearing down the rest
320 * of the process state that might be used from clock, etc.
321 * Also, can't clear curproc while we're still runnable,
322 * as we're not on a run queue (we are current, just not
323 * a proper proc any longer!).
324 *
325 * Other substructures are freed from wait().
326 */
327 curproc = NULL;
328
329 /*
330 * If emulation has process exit hook, call it now.
331 */
332 if (p->p_emul->e_proc_exit)
333 (*p->p_emul->e_proc_exit)(p);
334
335 /* This process no longer needs to hold the kernel lock. */
336 KERNEL_PROC_UNLOCK(p);
337
338 /*
339 * Finally, call machine-dependent code to switch to a new
340 * context (possibly the idle context). Once we are no longer
341 * using the dead process's vmspace and stack, exit2() will be
342 * called to schedule those resources to be released by the
343 * reaper thread.
344 *
345 * Note that cpu_exit() will end with a call equivalent to
346 * cpu_switch(), finishing our execution (pun intended).
347 */
348 uvmexp.swtch++;
349 cpu_exit(p);
350 }
351
352 /*
353 * Locking of this proclist is special; it's accessed in a
354 * critical section of process exit, and thus locking it can't
355 * modify interrupt state. We use a simple spin lock for this
356 * proclist. Processes on this proclist are also on zombproc;
357 * we use the p_hash member to linkup to deadproc.
358 */
359 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE);
360 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
361
362 /*
363 * We are called from cpu_exit() once it is safe to schedule the
364 * dead process's resources to be freed.
365 *
366 * NOTE: One must be careful with locking in this routine. It's
367 * called from a critical section in machine-dependent code, so
368 * we should refrain from changing any interrupt state.
369 *
370 * We lock the deadproc list, place the proc on that list (using
371 * the p_hash member), and wake up the reaper.
372 */
373 void
374 exit2(struct proc *p)
375 {
376 int s;
377
378 mtx_enter(&deadproc_mutex);
379 LIST_INSERT_HEAD(&deadproc, p, p_hash);
380 mtx_leave(&deadproc_mutex);
381
382 wakeup(&deadproc);
383
384 SCHED_LOCK(s);
385 }
386
387 /*
388 * Process reaper. This is run by a kernel thread to free the resources
389 * of a dead process. Once the resources are free, the process becomes
390 * a zombie, and the parent is allowed to read the undead's status.
391 */
392 void
393 reaper(void)
394 {
395 struct proc *p;
396
397 KERNEL_PROC_UNLOCK(curproc);
398
399 for (;;) {
400 mtx_enter(&deadproc_mutex);
401 p = LIST_FIRST(&deadproc);
402 if (p == NULL) {
403 /* No work for us; go to sleep until someone exits. */
404 mtx_leave(&deadproc_mutex);
405 (void) tsleep(&deadproc, PVM, "reaper", 0);
406 continue;
407 }
408
409 /* Remove us from the deadproc list. */
410 LIST_REMOVE(p, p_hash);
411 mtx_leave(&deadproc_mutex);
412 KERNEL_PROC_LOCK(curproc);
413
414 /*
415 * Give machine-dependent code a chance to free any
416 * resources it couldn't free while still running on
417 * that process's context. This must be done before
418 * uvm_exit(), in case these resources are in the PCB.
419 */
420 cpu_wait(p);
421
422 /*
423 * Free the VM resources we're still holding on to.
424 * We must do this from a valid thread because doing
425 * so may block.
426 */
427 uvm_exit(p);
428
429 /* Process is now a true zombie. */
430 if ((p->p_flag & P_NOZOMBIE) == 0) {
431 p->p_stat = SZOMB;
432
433 /* Wake up the parent so it can get exit status. */
434 psignal(p->p_pptr, SIGCHLD);
435 wakeup(p->p_pptr);
436 } else {
437 /* Noone will wait for us. Just zap the process now */
438 proc_zap(p);
439 }
440
441 KERNEL_PROC_UNLOCK(curproc);
442 }
443 }
444
445 pid_t
446 sys_wait4(struct proc *q, void *v, register_t *retval)
447 {
448 struct sys_wait4_args /* {
449 syscallarg(pid_t) pid;
450 syscallarg(int *) status;
451 syscallarg(int) options;
452 syscallarg(struct rusage *) rusage;
453 } */ *uap = v;
454 int nfound;
455 struct proc *p, *t;
456 int status, error;
457
458 if (SCARG(uap, pid) == 0)
459 SCARG(uap, pid) = -q->p_pgid;
460 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WALTSIG|WCONTINUED))
461 return (EINVAL);
462
463 loop:
464 nfound = 0;
465 LIST_FOREACH(p, &q->p_children, p_sibling) {
466 if ((p->p_flag & P_NOZOMBIE) ||
467 (SCARG(uap, pid) != WAIT_ANY &&
468 p->p_pid != SCARG(uap, pid) &&
469 p->p_pgid != -SCARG(uap, pid)))
470 continue;
471
472 /*
473 * Wait for processes with p_exitsig != SIGCHLD processes only
474 * if WALTSIG is set; wait for processes with pexitsig ==
475 * SIGCHLD only if WALTSIG is clear.
476 */
477 if ((SCARG(uap, options) & WALTSIG) ?
478 (p->p_exitsig == SIGCHLD) : (P_EXITSIG(p) != SIGCHLD))
479 continue;
480
481 nfound++;
482 if (p->p_stat == SZOMB) {
483 retval[0] = p->p_pid;
484
485 if (SCARG(uap, status)) {
486 status = p->p_xstat; /* convert to int */
487 error = copyout(&status,
488 SCARG(uap, status), sizeof(status));
489 if (error)
490 return (error);
491 }
492 if (SCARG(uap, rusage) &&
493 (error = copyout(p->p_ru,
494 SCARG(uap, rusage), sizeof(struct rusage))))
495 return (error);
496
497 /*
498 * If we got the child via a ptrace 'attach',
499 * we need to give it back to the old parent.
500 */
501 if (p->p_oppid && (t = pfind(p->p_oppid))) {
502 p->p_oppid = 0;
503 proc_reparent(p, t);
504 if (p->p_exitsig != 0)
505 psignal(t, P_EXITSIG(p));
506 wakeup(t);
507 return (0);
508 }
509
510 scheduler_wait_hook(q, p);
511 p->p_xstat = 0;
512 ruadd(&q->p_stats->p_cru, p->p_ru);
513
514 proc_zap(p);
515
516 return (0);
517 }
518 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
519 (p->p_flag & P_TRACED || SCARG(uap, options) & WUNTRACED)) {
520 atomic_setbits_int(&p->p_flag, P_WAITED);
521 retval[0] = p->p_pid;
522
523 if (SCARG(uap, status)) {
524 status = W_STOPCODE(p->p_xstat);
525 error = copyout(&status, SCARG(uap, status),
526 sizeof(status));
527 } else
528 error = 0;
529 return (error);
530 }
531 if ((SCARG(uap, options) & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
532 atomic_clearbits_int(&p->p_flag, P_CONTINUED);
533 retval[0] = p->p_pid;
534
535 if (SCARG(uap, status)) {
536 status = _WCONTINUED;
537 error = copyout(&status, SCARG(uap, status),
538 sizeof(status));
539 } else
540 error = 0;
541 return (error);
542 }
543 }
544 if (nfound == 0)
545 return (ECHILD);
546 if (SCARG(uap, options) & WNOHANG) {
547 retval[0] = 0;
548 return (0);
549 }
550 if ((error = tsleep(q, PWAIT | PCATCH, "wait", 0)) != 0)
551 return (error);
552 goto loop;
553 }
554
555 /*
556 * make process 'parent' the new parent of process 'child'.
557 */
558 void
559 proc_reparent(struct proc *child, struct proc *parent)
560 {
561
562 if (child->p_pptr == parent)
563 return;
564
565 if (parent == initproc)
566 child->p_exitsig = SIGCHLD;
567
568 LIST_REMOVE(child, p_sibling);
569 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
570 child->p_pptr = parent;
571 }
572
573 void
574 proc_zap(struct proc *p)
575 {
576 pool_put(&rusage_pool, p->p_ru);
577 if (p->p_ptstat)
578 free(p->p_ptstat, M_SUBPROC);
579
580 /*
581 * Finally finished with old proc entry.
582 * Unlink it from its process group and free it.
583 */
584 leavepgrp(p);
585 LIST_REMOVE(p, p_list); /* off zombproc */
586 LIST_REMOVE(p, p_sibling);
587
588 /*
589 * Decrement the count of procs running with this uid.
590 */
591 (void)chgproccnt(p->p_cred->p_ruid, -1);
592
593 /*
594 * Release reference to text vnode
595 */
596 if (p->p_textvp)
597 vrele(p->p_textvp);
598
599 /*
600 * Remove us from our process list, possibly killing the process
601 * in the process (pun intended).
602 */
603 #if 0
604 TAILQ_REMOVE(&p->p_p->ps_threads, p, p_thr_link);
605 #endif
606 if (TAILQ_EMPTY(&p->p_p->ps_threads)) {
607 limfree(p->p_p->ps_limit);
608 if (--p->p_p->ps_cred->p_refcnt == 0) {
609 crfree(p->p_p->ps_cred->pc_ucred);
610 pool_put(&pcred_pool, p->p_p->ps_cred);
611 }
612 pool_put(&process_pool, p->p_p);
613 }
614
615 pool_put(&proc_pool, p);
616 nprocs--;
617 }
618