This source file includes following definitions.
- check_exec
- sys_execve
- copyargs
- exec_sigcode_map
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/filedesc.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/pool.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/acct.h>
48 #include <sys/exec.h>
49 #include <sys/ktrace.h>
50 #include <sys/resourcevar.h>
51 #include <sys/wait.h>
52 #include <sys/mman.h>
53 #include <sys/signalvar.h>
54 #include <sys/stat.h>
55 #include <sys/conf.h>
56 #ifdef SYSVSHM
57 #include <sys/shm.h>
58 #endif
59
60 #include <sys/syscallargs.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #include <machine/cpu.h>
65 #include <machine/reg.h>
66
67 #include <dev/rndvar.h>
68
69 #include "systrace.h"
70
71 #if NSYSTRACE > 0
72 #include <dev/systrace.h>
73 #endif
74
75
76
77
78 int exec_sigcode_map(struct proc *, struct emul *);
79
80
81
82
83
84
85 int stackgap_random = STACKGAP_RANDOM;
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111 int
112 check_exec(struct proc *p, struct exec_package *epp)
113 {
114 int error, i;
115 struct vnode *vp;
116 struct nameidata *ndp;
117 size_t resid;
118
119 ndp = epp->ep_ndp;
120 ndp->ni_cnd.cn_nameiop = LOOKUP;
121 ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
122
123 if ((error = namei(ndp)) != 0)
124 return (error);
125 epp->ep_vp = vp = ndp->ni_vp;
126
127
128 if (vp->v_type == VDIR) {
129 error = EISDIR;
130 goto bad1;
131 }
132 if (vp->v_type != VREG) {
133 error = EACCES;
134 goto bad1;
135 }
136
137
138 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
139 goto bad1;
140
141
142 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
143 error = EACCES;
144 goto bad1;
145 }
146
147 if ((vp->v_mount->mnt_flag & MNT_NOSUID))
148 epp->ep_vap->va_mode &= ~(VSUID | VSGID);
149
150
151 if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
152 goto bad1;
153 if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
154 error = EACCES;
155 goto bad1;
156 }
157
158
159 if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
160 goto bad1;
161
162
163 VOP_UNLOCK(vp, 0, p);
164
165
166 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
167 UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
168 if (error)
169 goto bad2;
170 epp->ep_hdrvalid = epp->ep_hdrlen - resid;
171
172
173
174
175
176 error = ENOEXEC;
177 for (i = 0; i < nexecs && error != 0; i++) {
178 int newerror;
179
180 if (execsw[i].es_check == NULL)
181 continue;
182 newerror = (*execsw[i].es_check)(p, epp);
183 if (!newerror && !(epp->ep_emul->e_flags & EMUL_ENABLED))
184 newerror = EPERM;
185
186 if (!newerror || error == ENOEXEC)
187 error = newerror;
188 if (epp->ep_flags & EXEC_DESTR && error != 0)
189 return (error);
190 }
191 if (!error) {
192
193 if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
194 error = ENOEXEC;
195 }
196
197
198 if ((epp->ep_tsize > MAXTSIZ) ||
199 (epp->ep_dsize > p->p_rlimit[RLIMIT_DATA].rlim_cur))
200 error = ENOMEM;
201
202 if (!error)
203 return (0);
204 }
205
206
207
208
209
210 kill_vmcmds(&epp->ep_vmcmds);
211
212 bad2:
213
214
215
216 vn_close(vp, FREAD, p->p_ucred, p);
217 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
218 return (error);
219
220 bad1:
221
222
223
224
225 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
226 vput(vp);
227 return (error);
228 }
229
230
231
232
233
234 int
235 sys_execve(struct proc *p, void *v, register_t *retval)
236 {
237 struct sys_execve_args
238
239
240
241 *uap = v;
242 int error;
243 struct exec_package pack;
244 struct nameidata nid;
245 struct vattr attr;
246 struct ucred *cred = p->p_ucred;
247 char *argp;
248 char * const *cpp, *dp, *sp;
249 long argc, envc;
250 size_t len, sgap;
251 #ifdef MACHINE_STACK_GROWS_UP
252 size_t slen;
253 #endif
254 char *stack;
255 struct ps_strings arginfo;
256 struct vmspace *vm = p->p_vmspace;
257 char **tmpfap;
258 extern struct emul emul_native;
259 #if NSYSTRACE > 0
260 int wassugid =
261 ISSET(p->p_flag, P_SUGID) || ISSET(p->p_flag, P_SUGIDEXEC);
262 size_t pathbuflen;
263 #endif
264 char *pathbuf = NULL;
265
266
267
268
269
270 atomic_setbits_int(&p->p_flag, P_INEXEC);
271
272 #if NSYSTRACE > 0
273 if (ISSET(p->p_flag, P_SYSTRACE)) {
274 systrace_execve0(p);
275 pathbuf = pool_get(&namei_pool, PR_WAITOK);
276 error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN,
277 &pathbuflen);
278 if (error != 0)
279 goto clrflag;
280 }
281 #endif
282 if (pathbuf != NULL) {
283 NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p);
284 } else {
285 NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE,
286 SCARG(uap, path), p);
287 }
288
289
290
291
292 if (pathbuf != NULL)
293 pack.ep_name = pathbuf;
294 else
295 pack.ep_name = (char *)SCARG(uap, path);
296 pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
297 pack.ep_hdrlen = exec_maxhdrsz;
298 pack.ep_hdrvalid = 0;
299 pack.ep_ndp = &nid;
300 pack.ep_interp = NULL;
301 pack.ep_emul_arg = NULL;
302 VMCMDSET_INIT(&pack.ep_vmcmds);
303 pack.ep_vap = &attr;
304 pack.ep_emul = &emul_native;
305 pack.ep_flags = 0;
306
307
308 if ((error = check_exec(p, &pack)) != 0) {
309 goto freehdr;
310 }
311
312
313
314
315 argp = (char *) uvm_km_valloc_wait(exec_map, NCARGS);
316 #ifdef DIAGNOSTIC
317 if (argp == NULL)
318 panic("execve: argp == NULL");
319 #endif
320 dp = argp;
321 argc = 0;
322
323
324 if (pack.ep_flags & EXEC_HASARGL) {
325 tmpfap = pack.ep_fa;
326 while (*tmpfap != NULL) {
327 char *cp;
328
329 cp = *tmpfap;
330 while (*cp)
331 *dp++ = *cp++;
332 dp++;
333
334 free(*tmpfap, M_EXEC);
335 tmpfap++; argc++;
336 }
337 FREE(pack.ep_fa, M_EXEC);
338 pack.ep_flags &= ~EXEC_HASARGL;
339 }
340
341
342 if (!(cpp = SCARG(uap, argp))) {
343 error = EFAULT;
344 goto bad;
345 }
346
347 if (pack.ep_flags & EXEC_SKIPARG)
348 cpp++;
349
350 while (1) {
351 len = argp + ARG_MAX - dp;
352 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
353 goto bad;
354 if (!sp)
355 break;
356 if ((error = copyinstr(sp, dp, len, &len)) != 0) {
357 if (error == ENAMETOOLONG)
358 error = E2BIG;
359 goto bad;
360 }
361 dp += len;
362 cpp++;
363 argc++;
364 }
365
366 envc = 0;
367
368 if ((cpp = SCARG(uap, envp)) != NULL ) {
369 while (1) {
370 len = argp + ARG_MAX - dp;
371 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
372 goto bad;
373 if (!sp)
374 break;
375 if ((error = copyinstr(sp, dp, len, &len)) != 0) {
376 if (error == ENAMETOOLONG)
377 error = E2BIG;
378 goto bad;
379 }
380 dp += len;
381 cpp++;
382 envc++;
383 }
384 }
385
386 dp = (char *)ALIGN(dp);
387
388 sgap = STACKGAPLEN;
389 if (stackgap_random != 0)
390 sgap += (arc4random() * ALIGNBYTES) & (stackgap_random - 1);
391
392 len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) +
393 sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;
394
395 len = ALIGN(len);
396
397 if (len > pack.ep_ssize) {
398 error = ENOMEM;
399 goto bad;
400 }
401
402
403 pack.ep_ssize = len;
404
405
406
407
408
409 uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
410
411 vm = p->p_vmspace;
412
413 vm->vm_taddr = (char *)pack.ep_taddr;
414 vm->vm_tsize = btoc(pack.ep_tsize);
415 vm->vm_daddr = (char *)pack.ep_daddr;
416 vm->vm_dsize = btoc(pack.ep_dsize);
417 vm->vm_dused = 0;
418 vm->vm_ssize = btoc(pack.ep_ssize);
419 vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
420 vm->vm_minsaddr = (char *)pack.ep_minsaddr;
421
422
423 #ifdef DIAGNOSTIC
424 if (pack.ep_vmcmds.evs_used == 0)
425 panic("execve: no vmcmds");
426 #endif
427 error = exec_process_vmcmds(p, &pack);
428
429
430 if (error)
431 goto exec_abort;
432
433
434 arginfo.ps_nargvstr = argc;
435 arginfo.ps_nenvstr = envc;
436
437 #ifdef MACHINE_STACK_GROWS_UP
438 stack = (char *)USRSTACK + sizeof(arginfo);
439 slen = len - sizeof(arginfo);
440 #else
441 stack = (char *)(USRSTACK - len);
442 #endif
443
444 if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp))
445 goto exec_abort;
446
447
448 if (copyout(&arginfo, (char *)PS_STRINGS, sizeof(arginfo)))
449 goto exec_abort;
450
451 stopprofclock(p);
452 fdcloseexec(p);
453 execsigs(p);
454
455
456 len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
457 bcopy(nid.ni_cnd.cn_nameptr, p->p_comm, len);
458 p->p_comm[len] = 0;
459 p->p_acflag &= ~AFORK;
460
461
462 if (p->p_textvp)
463 vrele(p->p_textvp);
464 VREF(pack.ep_vp);
465 p->p_textvp = pack.ep_vp;
466
467 atomic_setbits_int(&p->p_flag, P_EXEC);
468 if (p->p_flag & P_PPWAIT) {
469 atomic_clearbits_int(&p->p_flag, P_PPWAIT);
470 wakeup((caddr_t)p->p_pptr);
471 }
472
473
474
475
476
477 if (p->p_ucred->cr_uid != p->p_cred->p_ruid ||
478 p->p_ucred->cr_uid != p->p_cred->p_svuid ||
479 p->p_ucred->cr_gid != p->p_cred->p_rgid ||
480 p->p_ucred->cr_gid != p->p_cred->p_svgid)
481 atomic_setbits_int(&p->p_flag, P_SUGIDEXEC);
482 else
483 atomic_clearbits_int(&p->p_flag, P_SUGIDEXEC);
484
485
486
487
488
489 if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
490 int i;
491
492 atomic_setbits_int(&p->p_flag, P_SUGID|P_SUGIDEXEC);
493
494 #ifdef KTRACE
495
496
497
498
499 if (p->p_tracep && !(p->p_traceflag & KTRFAC_ROOT)) {
500 p->p_traceflag = 0;
501 ktrsettracevnode(p, NULL);
502 }
503 #endif
504 p->p_ucred = crcopy(cred);
505 if (attr.va_mode & VSUID)
506 p->p_ucred->cr_uid = attr.va_uid;
507 if (attr.va_mode & VSGID)
508 p->p_ucred->cr_gid = attr.va_gid;
509
510
511
512
513
514 for (i = 0; i < 3; i++) {
515 struct file *fp = NULL;
516
517
518
519
520
521
522 fp = fd_getfile(p->p_fd, i);
523 #ifdef PROCFS
524
525
526
527 if (fp && fp->f_type == DTYPE_VNODE &&
528 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS &&
529 (fp->f_flag & FWRITE)) {
530 fdrelease(p, i);
531 fp = NULL;
532 }
533 #endif
534
535
536
537
538
539
540
541 if (fp == NULL) {
542 short flags = FREAD | (i == 0 ? 0 : FWRITE);
543 struct vnode *vp;
544 int indx;
545
546 if ((error = falloc(p, &fp, &indx)) != 0)
547 goto exec_abort;
548 #ifdef DIAGNOSTIC
549 if (indx != i)
550 panic("sys_execve: falloc indx != i");
551 #endif
552 if ((error = cdevvp(getnulldev(), &vp)) != 0) {
553 fdremove(p->p_fd, indx);
554 closef(fp, p);
555 goto exec_abort;
556 }
557 if ((error = VOP_OPEN(vp, flags, p->p_ucred, p)) != 0) {
558 fdremove(p->p_fd, indx);
559 closef(fp, p);
560 vrele(vp);
561 goto exec_abort;
562 }
563 if (flags & FWRITE)
564 vp->v_writecount++;
565 fp->f_flag = flags;
566 fp->f_type = DTYPE_VNODE;
567 fp->f_ops = &vnops;
568 fp->f_data = (caddr_t)vp;
569 FILE_SET_MATURE(fp);
570 }
571 }
572 } else
573 atomic_clearbits_int(&p->p_flag, P_SUGID);
574 p->p_cred->p_svuid = p->p_ucred->cr_uid;
575 p->p_cred->p_svgid = p->p_ucred->cr_gid;
576
577 if (p->p_flag & P_SUGIDEXEC) {
578 int i, s = splclock();
579
580 timeout_del(&p->p_realit_to);
581 timerclear(&p->p_realtimer.it_interval);
582 timerclear(&p->p_realtimer.it_value);
583 for (i = 0; i < sizeof(p->p_stats->p_timer) /
584 sizeof(p->p_stats->p_timer[0]); i++) {
585 timerclear(&p->p_stats->p_timer[i].it_interval);
586 timerclear(&p->p_stats->p_timer[i].it_value);
587 }
588 splx(s);
589 }
590
591 uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);
592
593 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
594 vn_close(pack.ep_vp, FREAD, cred, p);
595
596
597
598
599 KNOTE(&p->p_klist, NOTE_EXEC);
600
601
602 if (pack.ep_emul->e_fixup != NULL) {
603 if ((*pack.ep_emul->e_fixup)(p, &pack) != 0)
604 goto free_pack_abort;
605 }
606 #ifdef MACHINE_STACK_GROWS_UP
607 (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval);
608 #else
609 (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval);
610 #endif
611
612
613 if (exec_sigcode_map(p, pack.ep_emul))
614 goto free_pack_abort;
615
616 if (p->p_flag & P_TRACED)
617 psignal(p, SIGTRAP);
618
619 free(pack.ep_hdr, M_EXEC);
620
621
622
623
624
625
626
627
628
629
630
631 if (p->p_emul && p->p_emul->e_proc_exit &&
632 p->p_emul != pack.ep_emul)
633 (*p->p_emul->e_proc_exit)(p);
634
635 p->p_descfd = 255;
636 if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
637 p->p_descfd = pack.ep_fd;
638
639
640
641
642
643 if (pack.ep_emul->e_proc_exec)
644 (*pack.ep_emul->e_proc_exec)(p, &pack);
645
646
647 p->p_emul = pack.ep_emul;
648
649 #ifdef KTRACE
650 if (KTRPOINT(p, KTR_EMUL))
651 ktremul(p, p->p_emul->e_name);
652 #endif
653
654 atomic_clearbits_int(&p->p_flag, P_INEXEC);
655
656 #if NSYSTRACE > 0
657 if (ISSET(p->p_flag, P_SYSTRACE) &&
658 wassugid && !ISSET(p->p_flag, P_SUGID) &&
659 !ISSET(p->p_flag, P_SUGIDEXEC))
660 systrace_execve1(pathbuf, p);
661 #endif
662
663 if (pathbuf != NULL)
664 pool_put(&namei_pool, pathbuf);
665
666 return (0);
667
668 bad:
669
670 kill_vmcmds(&pack.ep_vmcmds);
671
672 if (pack.ep_flags & EXEC_HASFD) {
673 pack.ep_flags &= ~EXEC_HASFD;
674 (void) fdrelease(p, pack.ep_fd);
675 }
676 if (pack.ep_interp != NULL)
677 pool_put(&namei_pool, pack.ep_interp);
678 if (pack.ep_emul_arg != NULL)
679 FREE(pack.ep_emul_arg, M_TEMP);
680
681 vn_close(pack.ep_vp, FREAD, cred, p);
682 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
683 uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);
684
685 freehdr:
686 free(pack.ep_hdr, M_EXEC);
687 #if NSYSTRACE > 0
688 clrflag:
689 #endif
690 atomic_clearbits_int(&p->p_flag, P_INEXEC);
691
692 if (pathbuf != NULL)
693 pool_put(&namei_pool, pathbuf);
694
695 return (error);
696
697 exec_abort:
698
699
700
701
702
703 uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
704 VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
705 if (pack.ep_interp != NULL)
706 pool_put(&namei_pool, pack.ep_interp);
707 if (pack.ep_emul_arg != NULL)
708 FREE(pack.ep_emul_arg, M_TEMP);
709 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
710 vn_close(pack.ep_vp, FREAD, cred, p);
711 uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);
712
713 free_pack_abort:
714 free(pack.ep_hdr, M_EXEC);
715 exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL);
716
717
718 atomic_clearbits_int(&p->p_flag, P_INEXEC);
719 if (pathbuf != NULL)
720 pool_put(&namei_pool, pathbuf);
721
722 return (0);
723 }
724
725
726 void *
727 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack,
728 void *argp)
729 {
730 char **cpp = stack;
731 char *dp, *sp;
732 size_t len;
733 void *nullp = NULL;
734 long argc = arginfo->ps_nargvstr;
735 int envc = arginfo->ps_nenvstr;
736
737 if (copyout(&argc, cpp++, sizeof(argc)))
738 return (NULL);
739
740 dp = (char *) (cpp + argc + envc + 2 + pack->ep_emul->e_arglen);
741 sp = argp;
742
743
744 arginfo->ps_argvstr = cpp;
745
746 for (; --argc >= 0; sp += len, dp += len)
747 if (copyout(&dp, cpp++, sizeof(dp)) ||
748 copyoutstr(sp, dp, ARG_MAX, &len))
749 return (NULL);
750
751 if (copyout(&nullp, cpp++, sizeof(nullp)))
752 return (NULL);
753
754 arginfo->ps_envstr = cpp;
755
756 for (; --envc >= 0; sp += len, dp += len)
757 if (copyout(&dp, cpp++, sizeof(dp)) ||
758 copyoutstr(sp, dp, ARG_MAX, &len))
759 return (NULL);
760
761 if (copyout(&nullp, cpp++, sizeof(nullp)))
762 return (NULL);
763
764 return (cpp);
765 }
766
767 int
768 exec_sigcode_map(struct proc *p, struct emul *e)
769 {
770 vsize_t sz;
771
772 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
773
774
775
776
777
778
779
780
781
782
783
784
785 if (e->e_sigobject == NULL) {
786 vaddr_t va;
787 int r;
788
789 e->e_sigobject = uao_create(sz, 0);
790 uao_reference(e->e_sigobject);
791
792 va = vm_map_min(kernel_map);
793 if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject,
794 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
795 UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
796 uao_detach(e->e_sigobject);
797 return (ENOMEM);
798 }
799 memcpy((void *)va, e->e_sigcode, sz);
800 uvm_unmap(kernel_map, va, va + round_page(sz));
801 }
802
803
804 p->p_sigcode = uvm_map_hint(p, VM_PROT_READ|VM_PROT_EXECUTE);
805 uao_reference(e->e_sigobject);
806 if (uvm_map(&p->p_vmspace->vm_map, &p->p_sigcode, round_page(sz),
807 e->e_sigobject, 0, 0, UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX,
808 UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) {
809 uao_detach(e->e_sigobject);
810 return (ENOMEM);
811 }
812
813 return (0);
814 }