root/kern/exec_subr.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. new_vmcmd
  2. vmcmdset_extend
  3. kill_vmcmds
  4. exec_process_vmcmds
  5. vmcmd_map_pagedvn
  6. vmcmd_map_readvn
  7. vmcmd_map_zero
  8. exec_setup_stack

    1 /*      $OpenBSD: exec_subr.c,v 1.28 2006/11/14 18:00:27 jmc Exp $      */
    2 /*      $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $    */
    3 
    4 /*
    5  * Copyright (c) 1993, 1994 Christopher G. Demetriou
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by Christopher G. Demetriou.
   19  * 4. The name of the author may not be used to endorse or promote products
   20  *    derived from this software without specific prior written permission
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 #include <sys/param.h>
   35 #include <sys/systm.h>
   36 #include <sys/proc.h>
   37 #include <sys/malloc.h>
   38 #include <sys/vnode.h>
   39 #include <sys/filedesc.h>
   40 #include <sys/exec.h>
   41 #include <sys/mman.h>
   42 #include <sys/resourcevar.h>
   43 
   44 #include <uvm/uvm.h>
   45 
   46 #ifdef DEBUG
   47 /*
   48  * new_vmcmd():
   49  *      create a new vmcmd structure and fill in its fields based
   50  *      on function call arguments.  make sure objects ref'd by
   51  *      the vmcmd are 'held'.
   52  *
   53  * If not debugging, this is a macro, so it's expanded inline.
   54  */
   55 
   56 void
   57 new_vmcmd(evsp, proc, len, addr, vp, offset, prot, flags)
   58         struct  exec_vmcmd_set *evsp;
   59         int     (*proc)(struct proc * p, struct exec_vmcmd *);
   60         u_long  len;
   61         u_long  addr;
   62         struct  vnode *vp;
   63         u_long  offset;
   64         u_int   prot;
   65         int     flags;
   66 {
   67         struct exec_vmcmd    *vcp;
   68 
   69         if (evsp->evs_used >= evsp->evs_cnt)
   70                 vmcmdset_extend(evsp);
   71         vcp = &evsp->evs_cmds[evsp->evs_used++];
   72         vcp->ev_proc = proc;
   73         vcp->ev_len = len;
   74         vcp->ev_addr = addr;
   75         if ((vcp->ev_vp = vp) != NULL)
   76                 vref(vp);
   77         vcp->ev_offset = offset;
   78         vcp->ev_prot = prot;
   79         vcp->ev_flags = flags;
   80 }
   81 #endif /* DEBUG */
   82 
   83 void
   84 vmcmdset_extend(evsp)
   85         struct  exec_vmcmd_set *evsp;
   86 {
   87         struct exec_vmcmd *nvcp;
   88         u_int ocnt;
   89 
   90 #ifdef DIAGNOSTIC
   91         if (evsp->evs_used < evsp->evs_cnt)
   92                 panic("vmcmdset_extend: not necessary");
   93 #endif
   94 
   95         ocnt = evsp->evs_cnt;
   96         KASSERT(ocnt > 0);
   97         /* figure out number of entries in new set */
   98         evsp->evs_cnt += ocnt;
   99 
  100         /* reallocate the command set */
  101         nvcp = malloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), M_EXEC,
  102             M_WAITOK);
  103         bcopy(evsp->evs_cmds, nvcp, (ocnt * sizeof(struct exec_vmcmd)));
  104         if (evsp->evs_cmds != evsp->evs_start)
  105                 free(evsp->evs_cmds, M_EXEC);
  106         evsp->evs_cmds = nvcp;
  107 }
  108 
  109 void
  110 kill_vmcmds(struct exec_vmcmd_set *evsp)
  111 {
  112         struct exec_vmcmd *vcp;
  113         int i;
  114 
  115         for (i = 0; i < evsp->evs_used; i++) {
  116                 vcp = &evsp->evs_cmds[i];
  117                 if (vcp->ev_vp != NULLVP)
  118                         vrele(vcp->ev_vp);
  119         }
  120 
  121         /*
  122          * Free old vmcmds and reset the array.
  123          */
  124         evsp->evs_used = 0;
  125         if (evsp->evs_cmds != evsp->evs_start)
  126                 free(evsp->evs_cmds, M_EXEC);
  127         evsp->evs_cmds = evsp->evs_start;
  128         evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
  129 }
  130 
  131 int
  132 exec_process_vmcmds(struct proc *p, struct exec_package *epp)
  133 {
  134         struct exec_vmcmd *base_vc = NULL;
  135         int error = 0;
  136         int i;
  137 
  138         for (i = 0; i < epp->ep_vmcmds.evs_used && !error; i++) {
  139                 struct exec_vmcmd *vcp;
  140 
  141                 vcp = &epp->ep_vmcmds.evs_cmds[i];
  142 
  143                 if (vcp->ev_flags & VMCMD_RELATIVE) {
  144 #ifdef DIAGNOSTIC
  145                         if (base_vc == NULL)
  146                                 panic("exec_process_vmcmds: RELATIVE no base");
  147 #endif
  148                         vcp->ev_addr += base_vc->ev_addr;
  149                 }
  150                 error = (*vcp->ev_proc)(p, vcp);
  151                 if (vcp->ev_flags & VMCMD_BASE) {
  152                         base_vc = vcp;
  153                 }
  154         }
  155 
  156         kill_vmcmds(&epp->ep_vmcmds);
  157 
  158         return (error);
  159 }
  160 
  161 /*
  162  * vmcmd_map_pagedvn():
  163  *      handle vmcmd which specifies that a vnode should be mmap'd.
  164  *      appropriate for handling demand-paged text and data segments.
  165  */
  166 
  167 int
  168 vmcmd_map_pagedvn(p, cmd)
  169         struct proc *p;
  170         struct exec_vmcmd *cmd;
  171 {
  172         /*
  173          * note that if you're going to map part of a process as being
  174          * paged from a vnode, that vnode had damn well better be marked as
  175          * VTEXT.  that's handled in the routine which sets up the vmcmd to
  176          * call this routine.
  177          */
  178         struct uvm_object *uobj;
  179         int error;
  180 
  181         /*
  182          * map the vnode in using uvm_map.
  183          */
  184 
  185         if (cmd->ev_len == 0)
  186                 return(0);
  187         if (cmd->ev_offset & PAGE_MASK)
  188                 return(EINVAL);
  189         if (cmd->ev_addr & PAGE_MASK)
  190                 return(EINVAL);
  191         if (cmd->ev_len & PAGE_MASK)
  192                 return(EINVAL);
  193 
  194         /*
  195          * first, attach to the object
  196          */
  197 
  198         uobj = uvn_attach((void *) cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
  199         if (uobj == NULL)
  200                 return(ENOMEM);
  201 
  202         /*
  203          * do the map
  204          */
  205 
  206         error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
  207             uobj, cmd->ev_offset, 0,
  208             UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
  209             UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
  210 
  211         /*
  212          * check for error
  213          */
  214 
  215         if (error) {
  216                 /*
  217                  * error: detach from object
  218                  */
  219                 uobj->pgops->pgo_detach(uobj);
  220         }
  221 
  222         return (error);
  223 }
  224 
  225 /*
  226  * vmcmd_map_readvn():
  227  *      handle vmcmd which specifies that a vnode should be read from.
  228  *      appropriate for non-demand-paged text/data segments, i.e. impure
  229  *      objects (a la OMAGIC and NMAGIC).
  230  */
  231 
  232 int
  233 vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
  234 {
  235         int error;
  236         vm_prot_t prot;
  237 
  238         if (cmd->ev_len == 0)
  239                 return (0);
  240 
  241         prot = cmd->ev_prot;
  242 
  243         cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
  244         error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
  245             round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
  246             UVM_MAPFLAG(prot | UVM_PROT_WRITE, UVM_PROT_ALL, UVM_INH_COPY,
  247             UVM_ADV_NORMAL,
  248             UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
  249 
  250         if (error)
  251                 return (error);
  252 
  253         error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
  254             cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
  255             p->p_ucred, NULL, p);
  256         if (error)
  257                 return (error);
  258 
  259         if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
  260                 /*
  261                  * we had to map in the area at PROT_ALL so that vn_rdwr()
  262                  * could write to it.   however, the caller seems to want
  263                  * it mapped read-only, so now we are going to have to call
  264                  * uvm_map_protect() to fix up the protection.  ICK.
  265                  */
  266                 return (uvm_map_protect(&p->p_vmspace->vm_map,
  267                     trunc_page(cmd->ev_addr),
  268                     round_page(cmd->ev_addr + cmd->ev_len),
  269                     prot, FALSE));
  270         }
  271         return (0);
  272 }
  273 
  274 /*
  275  * vmcmd_map_zero():
  276  *      handle vmcmd which specifies a zero-filled address space region.  The
  277  *      address range must be first allocated, then protected appropriately.
  278  */
  279 
  280 int
  281 vmcmd_map_zero(p, cmd)
  282         struct proc *p;
  283         struct exec_vmcmd *cmd;
  284 {
  285         int error;
  286 
  287         if (cmd->ev_len == 0)
  288                 return (0);
  289         
  290         cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
  291         error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
  292             round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
  293             UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
  294             UVM_ADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
  295 
  296         if (error)
  297                 return error;
  298 
  299         return (0);
  300 }
  301 
  302 /*
  303  * exec_setup_stack(): Set up the stack segment for an a.out
  304  * executable.
  305  *
  306  * Note that the ep_ssize parameter must be set to be the current stack
  307  * limit; this is adjusted in the body of execve() to yield the
  308  * appropriate stack segment usage once the argument length is
  309  * calculated.
  310  *
  311  * This function returns an int for uniformity with other (future) formats'
  312  * stack setup functions.  They might have errors to return.
  313  */
  314 
  315 int
  316 exec_setup_stack(p, epp)
  317         struct proc *p;
  318         struct exec_package *epp;
  319 {
  320 
  321 #ifdef MACHINE_STACK_GROWS_UP
  322         epp->ep_maxsaddr = USRSTACK;
  323         epp->ep_minsaddr = USRSTACK + MAXSSIZ;
  324 #else
  325         epp->ep_maxsaddr = USRSTACK - MAXSSIZ;
  326         epp->ep_minsaddr = USRSTACK;
  327 #endif
  328         epp->ep_ssize = round_page(p->p_rlimit[RLIMIT_STACK].rlim_cur);
  329 
  330         /*
  331          * set up commands for stack.  note that this takes *two*, one to
  332          * map the part of the stack which we can access, and one to map
  333          * the part which we can't.
  334          *
  335          * arguably, it could be made into one, but that would require the
  336          * addition of another mapping proc, which is unnecessary
  337          *
  338          * note that in memory, things assumed to be: 0 ....... ep_maxsaddr
  339          * <stack> ep_minsaddr
  340          */
  341 #ifdef MACHINE_STACK_GROWS_UP
  342         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
  343             ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
  344             epp->ep_maxsaddr + epp->ep_ssize, NULLVP, 0, VM_PROT_NONE);
  345         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
  346             epp->ep_maxsaddr, NULLVP, 0,
  347             VM_PROT_READ|VM_PROT_WRITE);
  348 #else
  349         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
  350             ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
  351             epp->ep_maxsaddr, NULLVP, 0, VM_PROT_NONE);
  352         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
  353             (epp->ep_minsaddr - epp->ep_ssize), NULLVP, 0,
  354             VM_PROT_READ|VM_PROT_WRITE);
  355 #endif
  356 
  357         return 0;
  358 }

/* [<][>][^][v][top][bottom][index][help] */