root/compat/ibcs2/ibcs2_exec.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. exec_ibcs2_coff_makecmds
  2. exec_ibcs2_coff_setup_stack
  3. exec_ibcs2_coff_prep_omagic
  4. exec_ibcs2_coff_prep_nmagic
  5. coff_find_section
  6. exec_ibcs2_coff_prep_zmagic
  7. coff_load_shlib
  8. exec_ibcs2_xout_makecmds
  9. exec_ibcs2_xout_prep_nmagic
  10. exec_ibcs2_xout_setup_stack

    1 /*      $OpenBSD: ibcs2_exec.c,v 1.18 2006/12/29 13:04:37 pedro Exp $   */
    2 /*      $NetBSD: ibcs2_exec.c,v 1.12 1996/10/12 02:13:52 thorpej Exp $  */
    3 
    4 /*
    5  * Copyright (c) 1994, 1995 Scott Bartram
    6  * Copyright (c) 1994 Adam Glass
    7  * Copyright (c) 1993, 1994 Christopher G. Demetriou
    8  * All rights reserved.
    9  *
   10  * originally from kern/exec_ecoff.c
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by Scott Bartram.
   23  * 4. The name of the author may not be used to endorse or promote products
   24  *    derived from this software without specific prior written permission
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   27  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   28  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   29  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   30  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   31  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   35  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   36  */
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/proc.h>
   41 #include <sys/exec.h>
   42 #include <sys/malloc.h>
   43 #include <sys/vnode.h>
   44 #include <sys/resourcevar.h>
   45 #include <sys/namei.h>
   46 #include <uvm/uvm_extern.h>
   47 
   48 #include <compat/ibcs2/ibcs2_types.h>
   49 #include <compat/ibcs2/ibcs2_exec.h>
   50 #include <compat/ibcs2/ibcs2_util.h>
   51 #include <compat/ibcs2/ibcs2_syscall.h>
   52 
   53 int exec_ibcs2_coff_prep_omagic(struct proc *, struct exec_package *,
   54                                      struct coff_filehdr *, 
   55                                      struct coff_aouthdr *);
   56 int exec_ibcs2_coff_prep_nmagic(struct proc *, struct exec_package *,
   57                                      struct coff_filehdr *, 
   58                                      struct coff_aouthdr *);
   59 int exec_ibcs2_coff_prep_zmagic(struct proc *, struct exec_package *,
   60                                      struct coff_filehdr *, 
   61                                      struct coff_aouthdr *);
   62 int exec_ibcs2_coff_setup_stack(struct proc *, struct exec_package *);
   63 void cpu_exec_ibcs2_coff_setup(int, struct proc *, struct exec_package *,
   64                                     void *);
   65 
   66 int exec_ibcs2_xout_prep_nmagic(struct proc *, struct exec_package *,
   67                                      struct xexec *, struct xext *);
   68 int exec_ibcs2_xout_prep_zmagic(struct proc *, struct exec_package *,
   69                                      struct xexec *, struct xext *);
   70 int exec_ibcs2_xout_setup_stack(struct proc *, struct exec_package *);
   71 int coff_load_shlib(struct proc *, char *, struct exec_package *);
   72 static int coff_find_section(struct proc *, struct vnode *, 
   73                                   struct coff_filehdr *, struct coff_scnhdr *,
   74                                   int);
   75         
   76 
   77 extern int bsd2ibcs_errno[];
   78 extern struct sysent ibcs2_sysent[];
   79 #ifdef SYSCALL_DEBUG
   80 extern char *ibcs2_syscallnames[];
   81 #endif
   82 extern void ibcs2_sendsig(sig_t, int, int, u_long, int, union sigval);
   83 extern char sigcode[], esigcode[];
   84 
   85 const char ibcs2_emul_path[] = "/emul/ibcs2";
   86 
   87 struct emul emul_ibcs2 = {
   88         "ibcs2",
   89         bsd2ibcs_errno,
   90         ibcs2_sendsig,
   91         0,
   92         IBCS2_SYS_MAXSYSCALL,
   93         ibcs2_sysent,
   94 #ifdef SYSCALL_DEBUG
   95         ibcs2_syscallnames,
   96 #else
   97         NULL,
   98 #endif
   99         0,
  100         copyargs,
  101         setregs,
  102         NULL,
  103         sigcode,
  104         esigcode,
  105 };
  106 
  107 /*
  108  * exec_ibcs2_coff_makecmds(): Check if it's an coff-format executable.
  109  *
  110  * Given a proc pointer and an exec package pointer, see if the referent
  111  * of the epp is in coff format.  Check 'standard' magic numbers for
  112  * this architecture.  If that fails, return failure.
  113  *
  114  * This function is  responsible for creating a set of vmcmds which can be
  115  * used to build the process's vm space and inserting them into the exec
  116  * package.
  117  */
  118 
  119 int
  120 exec_ibcs2_coff_makecmds(p, epp)
  121         struct proc *p;
  122         struct exec_package *epp;
  123 {
  124         int error;
  125         struct coff_filehdr *fp = epp->ep_hdr;
  126         struct coff_aouthdr *ap;
  127 
  128         if (epp->ep_hdrvalid < COFF_HDR_SIZE)
  129                 return ENOEXEC;
  130 
  131         if (COFF_BADMAG(fp))
  132                 return ENOEXEC;
  133         
  134         ap = (struct coff_aouthdr *)((char *)epp->ep_hdr +
  135             sizeof(struct coff_filehdr));
  136         switch (ap->a_magic) {
  137         case COFF_OMAGIC:
  138                 error = exec_ibcs2_coff_prep_omagic(p, epp, fp, ap);
  139                 break;
  140         case COFF_NMAGIC:
  141                 error = exec_ibcs2_coff_prep_nmagic(p, epp, fp, ap);
  142                 break;
  143         case COFF_ZMAGIC:
  144                 error = exec_ibcs2_coff_prep_zmagic(p, epp, fp, ap);
  145                 break;
  146         default:
  147                 return ENOEXEC;
  148         }
  149 
  150         if (error == 0)
  151                 epp->ep_emul = &emul_ibcs2;
  152 
  153         if (error)
  154                 kill_vmcmds(&epp->ep_vmcmds);
  155 
  156         return error;
  157 }
  158 
  159 /*
  160  * exec_ibcs2_coff_setup_stack(): Set up the stack segment for a coff
  161  * executable.
  162  *
  163  * Note that the ep_ssize parameter must be set to be the current stack
  164  * limit; this is adjusted in the body of execve() to yield the
  165  * appropriate stack segment usage once the argument length is
  166  * calculated.
  167  *
  168  * This function returns an int for uniformity with other (future) formats'
  169  * stack setup functions.  They might have errors to return.
  170  */
  171 
  172 int
  173 exec_ibcs2_coff_setup_stack(p, epp)
  174         struct proc *p;
  175         struct exec_package *epp;
  176 {
  177         /* DPRINTF(("enter exec_ibcs2_coff_setup_stack\n")); */
  178 
  179         epp->ep_maxsaddr = USRSTACK - MAXSSIZ;
  180         epp->ep_minsaddr = USRSTACK;
  181         epp->ep_ssize = p->p_rlimit[RLIMIT_STACK].rlim_cur;
  182 
  183         /*
  184          * set up commands for stack.  note that this takes *two*, one to
  185          * map the part of the stack which we can access, and one to map
  186          * the part which we can't.
  187          *
  188          * arguably, it could be made into one, but that would require the
  189          * addition of another mapping proc, which is unnecessary
  190          *
  191          * note that in memory, things assumed to be: 0 ....... ep_maxsaddr
  192          * <stack> ep_minsaddr
  193          */
  194         /* DPRINTF(("VMCMD: addr %x size %d\n", epp->ep_maxsaddr,
  195                  (epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr)); */
  196         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
  197                   ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
  198                   epp->ep_maxsaddr, NULLVP, 0, VM_PROT_NONE);
  199         /* DPRINTF(("VMCMD: addr %x size %d\n",
  200                  epp->ep_minsaddr - epp->ep_ssize,
  201                  epp->ep_ssize)); */
  202         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
  203                   (epp->ep_minsaddr - epp->ep_ssize), NULLVP, 0,
  204                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  205 
  206         return 0;
  207 }
  208 
  209 
  210 /*
  211  * exec_ibcs2_coff_prep_omagic(): Prepare a COFF OMAGIC binary's exec package
  212  */
  213 
  214 int
  215 exec_ibcs2_coff_prep_omagic(p, epp, fp, ap)
  216         struct proc *p;
  217         struct exec_package *epp;
  218         struct coff_filehdr *fp;
  219         struct coff_aouthdr *ap;
  220 {
  221         epp->ep_taddr = COFF_SEGMENT_ALIGN(ap, ap->a_tstart);
  222         epp->ep_tsize = ap->a_tsize;
  223         epp->ep_daddr = COFF_SEGMENT_ALIGN(ap, ap->a_dstart);
  224         epp->ep_dsize = ap->a_dsize;
  225         epp->ep_entry = ap->a_entry;
  226 
  227         /* set up command for text and data segments */
  228         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn,
  229                   ap->a_tsize + ap->a_dsize, epp->ep_taddr, epp->ep_vp,
  230                   COFF_TXTOFF(fp, ap),
  231                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  232 
  233         /* set up command for bss segment */
  234         if (ap->a_bsize > 0)
  235                 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, ap->a_bsize,
  236                           COFF_SEGMENT_ALIGN(ap, ap->a_dstart + ap->a_dsize),
  237                           NULLVP, 0,
  238                           VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  239         
  240         return exec_ibcs2_coff_setup_stack(p, epp);
  241 }
  242 
  243 /*
  244  * exec_ibcs2_coff_prep_nmagic(): Prepare a 'native' NMAGIC COFF binary's exec
  245  *                          package.
  246  */
  247 
  248 int
  249 exec_ibcs2_coff_prep_nmagic(p, epp, fp, ap)
  250         struct proc *p;
  251         struct exec_package *epp;
  252         struct coff_filehdr *fp;
  253         struct coff_aouthdr *ap;
  254 {
  255         epp->ep_taddr = COFF_SEGMENT_ALIGN(ap, ap->a_tstart);
  256         epp->ep_tsize = ap->a_tsize;
  257         epp->ep_daddr = COFF_ROUND(ap->a_dstart, COFF_LDPGSZ);
  258         epp->ep_dsize = ap->a_dsize;
  259         epp->ep_entry = ap->a_entry;
  260 
  261         /* set up command for text segment */
  262         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, epp->ep_tsize,
  263                   epp->ep_taddr, epp->ep_vp, COFF_TXTOFF(fp, ap),
  264                   VM_PROT_READ|VM_PROT_EXECUTE);
  265 
  266         /* set up command for data segment */
  267         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, epp->ep_dsize,
  268                   epp->ep_daddr, epp->ep_vp, COFF_DATOFF(fp, ap),
  269                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  270 
  271         /* set up command for bss segment */
  272         if (ap->a_bsize > 0)
  273                 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, ap->a_bsize,
  274                           COFF_SEGMENT_ALIGN(ap, ap->a_dstart + ap->a_dsize),
  275                           NULLVP, 0,
  276                           VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  277 
  278         return exec_ibcs2_coff_setup_stack(p, epp);
  279 }
  280 
  281 /*
  282  * coff_find_section - load specified section header
  283  *
  284  * TODO - optimize by reading all section headers in at once
  285  */
  286 
  287 static int
  288 coff_find_section(p, vp, fp, sh, s_type)
  289         struct proc *p;
  290         struct vnode *vp;
  291         struct coff_filehdr *fp;
  292         struct coff_scnhdr *sh;
  293         int s_type;
  294 {
  295         int i, pos, error;
  296         size_t siz, resid;
  297         
  298         pos = COFF_HDR_SIZE;
  299         for (i = 0; i < fp->f_nscns; i++, pos += sizeof(struct coff_scnhdr)) {
  300                 siz = sizeof(struct coff_scnhdr);
  301                 error = vn_rdwr(UIO_READ, vp, (caddr_t) sh,
  302                     siz, pos, UIO_SYSSPACE, 0, p->p_ucred,
  303                     &resid, p);
  304                 if (error) {
  305                         DPRINTF(("section hdr %d read error %d\n", i, error));
  306                         return error;
  307                 }
  308                 siz -= resid;
  309                 if (siz != sizeof(struct coff_scnhdr)) {
  310                         DPRINTF(("incomplete read: hdr %d ask=%d, rem=%u got %u\n",
  311                                  s_type, sizeof(struct coff_scnhdr),
  312                                  resid, siz));
  313                         return ENOEXEC;
  314                 }
  315                 /* DPRINTF(("found section: %x\n", sh->s_flags)); */
  316                 if (sh->s_flags == s_type)
  317                         return 0;
  318         }
  319         return ENOEXEC;
  320 }
  321 
  322 /*
  323  * exec_ibcs2_coff_prep_zmagic(): Prepare a COFF ZMAGIC binary's exec package
  324  *
  325  * First, set the various offsets/lengths in the exec package.
  326  *
  327  * Then, mark the text image busy (so it can be demand paged) or error
  328  * out if this is not possible.  Finally, set up vmcmds for the
  329  * text, data, bss, and stack segments.
  330  */
  331 
  332 int
  333 exec_ibcs2_coff_prep_zmagic(p, epp, fp, ap)
  334         struct proc *p;
  335         struct exec_package *epp;
  336         struct coff_filehdr *fp;
  337         struct coff_aouthdr *ap;
  338 {
  339         int error;
  340         u_long offset;
  341         long dsize, baddr, bsize;
  342         struct coff_scnhdr sh;
  343         
  344         /* DPRINTF(("enter exec_ibcs2_coff_prep_zmagic\n")); */
  345 
  346         /* set up command for text segment */
  347         error = coff_find_section(p, epp->ep_vp, fp, &sh, COFF_STYP_TEXT);
  348         if (error) {            
  349                 DPRINTF(("can't find text section: %d\n", error));
  350                 return error;
  351         }
  352         /* DPRINTF(("COFF text addr %x size %d offset %d\n", sh.s_vaddr,
  353                  sh.s_size, sh.s_scnptr)); */
  354         epp->ep_taddr = COFF_ALIGN(sh.s_vaddr);
  355         offset = sh.s_scnptr - (sh.s_vaddr - epp->ep_taddr);
  356         epp->ep_tsize = sh.s_size + (sh.s_vaddr - epp->ep_taddr);
  357 
  358 #ifdef notyet
  359         /*
  360          * check if vnode is in open for writing, because we want to
  361          * demand-page out of it.  if it is, don't do it, for various
  362          * reasons
  363 n        */
  364         if ((ap->a_tsize != 0 || ap->a_dsize != 0) &&
  365             epp->ep_vp->v_writecount != 0) {
  366 #ifdef DIAGNOSTIC
  367                 if (epp->ep_vp->v_flag & VTEXT)
  368                         panic("exec: a VTEXT vnode has writecount != 0");
  369 #endif
  370                 return ETXTBSY;
  371         }
  372         vn_marktext(epp->ep_vp);
  373 #endif
  374         
  375         /* DPRINTF(("VMCMD: addr %x size %d offset %d\n", epp->ep_taddr,
  376                  epp->ep_tsize, offset)); */
  377 #ifdef notyet
  378         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, epp->ep_tsize,
  379                   epp->ep_taddr, epp->ep_vp, offset,
  380                   VM_PROT_READ|VM_PROT_EXECUTE);
  381 #else
  382         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, epp->ep_tsize,
  383                   epp->ep_taddr, epp->ep_vp, offset,
  384                   VM_PROT_READ|VM_PROT_EXECUTE);
  385 #endif
  386 
  387         /* set up command for data segment */
  388         error = coff_find_section(p, epp->ep_vp, fp, &sh, COFF_STYP_DATA);
  389         if (error) {
  390                 DPRINTF(("can't find data section: %d\n", error));
  391                 return error;
  392         }
  393         /* DPRINTF(("COFF data addr %x size %d offset %d\n", sh.s_vaddr,
  394                  sh.s_size, sh.s_scnptr)); */
  395         epp->ep_daddr = COFF_ALIGN(sh.s_vaddr);
  396         offset = sh.s_scnptr - (sh.s_vaddr - epp->ep_daddr);
  397         dsize = sh.s_size + (sh.s_vaddr - epp->ep_daddr);
  398         epp->ep_dsize = dsize + ap->a_bsize;
  399 
  400         /* DPRINTF(("VMCMD: addr %x size %d offset %d\n", epp->ep_daddr,
  401                  dsize, offset)); */
  402 #ifdef notyet
  403         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, dsize,
  404                   epp->ep_daddr, epp->ep_vp, offset,
  405                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  406 #else
  407         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn,
  408                   dsize, epp->ep_daddr, epp->ep_vp, offset,
  409                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  410 #endif
  411 
  412         /* set up command for bss segment */
  413         baddr = round_page(epp->ep_daddr + dsize);
  414         bsize = epp->ep_daddr + epp->ep_dsize - baddr;
  415         if (bsize > 0) {
  416                 /* DPRINTF(("VMCMD: addr %x size %d offset %d\n",
  417                          baddr, bsize, 0)); */
  418                 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
  419                           bsize, baddr, NULLVP, 0,
  420                           VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  421         }
  422 
  423         /* load any shared libraries */
  424         error = coff_find_section(p, epp->ep_vp, fp, &sh, COFF_STYP_SHLIB);
  425         if (!error) {
  426                 size_t resid;
  427                 struct coff_slhdr *slhdr;
  428                 char buf[128], *bufp;   /* FIXME */
  429                 unsigned int len = sh.s_size, entry_len;
  430                 
  431                 /* DPRINTF(("COFF shlib size %d offset %d\n",
  432                          sh.s_size, sh.s_scnptr)); */
  433 
  434                 if (len > sizeof(buf))
  435                         return (ENOEXEC);
  436 
  437                 error = vn_rdwr(UIO_READ, epp->ep_vp, (caddr_t) buf,
  438                                 len, sh.s_scnptr,
  439                                 UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred,
  440                                 &resid, p);
  441                 if (error) {
  442                         DPRINTF(("shlib section read error %d\n", error));
  443                         return ENOEXEC;
  444                 }
  445                 bufp = buf;
  446                 while (len) {
  447                         slhdr = (struct coff_slhdr *)bufp;
  448 #ifdef notyet
  449                         path_index = slhdr->path_index * sizeof(long);
  450 #endif
  451                         entry_len = slhdr->entry_len * sizeof(long);
  452 
  453                         /* DPRINTF(("path_index: %d entry_len: %d name: %s\n",
  454                                  path_index, entry_len, slhdr->sl_name)); */
  455 
  456                         if (entry_len > len)
  457                                 return (ENOEXEC);
  458 
  459                         error = coff_load_shlib(p, slhdr->sl_name, epp);
  460                         if (error)
  461                                 return ENOEXEC;
  462                         bufp += entry_len;
  463                         len -= entry_len;
  464                 }
  465         }
  466                 
  467         /* set up entry point */
  468         epp->ep_entry = ap->a_entry;
  469 
  470 #if 0
  471         DPRINTF(("text addr: %x size: %d data addr: %x size: %d entry: %x\n",
  472                  epp->ep_taddr, epp->ep_tsize,
  473                  epp->ep_daddr, epp->ep_dsize,
  474                  epp->ep_entry));
  475 #endif
  476         
  477         return exec_ibcs2_coff_setup_stack(p, epp);
  478 }
  479 
  480 int
  481 coff_load_shlib(p, path, epp)
  482         struct proc *p;
  483         char *path;
  484         struct exec_package *epp;
  485 {
  486         int error, taddr, tsize, daddr, dsize, offset;
  487         size_t siz, resid;
  488         struct nameidata nd;
  489         struct coff_filehdr fh, *fhp = &fh;
  490         struct coff_scnhdr sh, *shp = &sh;
  491         caddr_t sg = stackgap_init(p->p_emul);
  492 
  493         /*
  494          * 1. open shlib file
  495          * 2. read filehdr
  496          * 3. map text, data, and bss out of it using VM_*
  497          */
  498         IBCS2_CHECK_ALT_EXIST(p, &sg, path);
  499         NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
  500         /* first get the vnode */
  501         if ((error = namei(&nd)) != 0) {
  502                 DPRINTF(("coff_load_shlib: can't find library %s\n", path));
  503                 return error;
  504         }
  505 
  506         siz = sizeof(struct coff_filehdr);
  507         error = vn_rdwr(UIO_READ, nd.ni_vp, (caddr_t) fhp, siz, 0,
  508             UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
  509         if (error) {
  510             DPRINTF(("filehdr read error %d\n", error));
  511             vrele(nd.ni_vp);
  512             return error;
  513         }
  514         siz -= resid;
  515         if (siz != sizeof(struct coff_filehdr)) {
  516             DPRINTF(("coff_load_shlib: incomplete read: ask=%d, rem=%u got %u\n",
  517                      sizeof(struct coff_filehdr), resid, siz));
  518             vrele(nd.ni_vp);
  519             return ENOEXEC;
  520         }
  521 
  522         /* load text */
  523         error = coff_find_section(p, nd.ni_vp, fhp, shp, COFF_STYP_TEXT);
  524         if (error) {
  525             DPRINTF(("can't find shlib text section\n"));
  526             vrele(nd.ni_vp);
  527             return error;
  528         }
  529         /* DPRINTF(("COFF text addr %x size %d offset %d\n", sh.s_vaddr,
  530                  sh.s_size, sh.s_scnptr)); */
  531         taddr = COFF_ALIGN(shp->s_vaddr);
  532         offset = shp->s_scnptr - (shp->s_vaddr - taddr);
  533         tsize = shp->s_size + (shp->s_vaddr - taddr);
  534         /* DPRINTF(("VMCMD: addr %x size %d offset %d\n", taddr, tsize, offset)); */
  535         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, tsize, taddr,
  536                   nd.ni_vp, offset,
  537                   VM_PROT_READ|VM_PROT_EXECUTE);
  538 
  539         /* load data */
  540         error = coff_find_section(p, nd.ni_vp, fhp, shp, COFF_STYP_DATA);
  541         if (error) {
  542             DPRINTF(("can't find shlib data section\n"));
  543             vrele(nd.ni_vp);
  544             return error;
  545         }
  546         /* DPRINTF(("COFF data addr %x size %d offset %d\n", shp->s_vaddr,
  547                  shp->s_size, shp->s_scnptr)); */
  548         daddr = COFF_ALIGN(shp->s_vaddr);
  549         offset = shp->s_scnptr - (shp->s_vaddr - daddr);
  550         dsize = shp->s_size + (shp->s_vaddr - daddr);
  551         /* epp->ep_dsize = dsize + ap->a_bsize; */
  552 
  553         /* DPRINTF(("VMCMD: addr %x size %d offset %d\n", daddr, dsize, offset)); */
  554         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn,
  555                   dsize, daddr, nd.ni_vp, offset,
  556                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  557 
  558         /* load bss */
  559         error = coff_find_section(p, nd.ni_vp, fhp, shp, COFF_STYP_BSS);
  560         if (!error) {
  561                 int baddr = round_page(daddr + dsize);
  562                 int bsize = daddr + dsize + shp->s_size - baddr;
  563                 if (bsize > 0) {
  564                         /* DPRINTF(("VMCMD: addr %x size %d offset %d\n",
  565                            baddr, bsize, 0)); */
  566                         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
  567                                   bsize, baddr, NULLVP, 0,
  568                                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  569             }
  570         }
  571         vrele(nd.ni_vp);
  572 
  573         return 0;
  574 }
  575 
  576 int
  577 exec_ibcs2_xout_makecmds(p, epp)
  578         struct proc *p;
  579         struct exec_package *epp;
  580 {
  581         int error;
  582         struct xexec *xp = epp->ep_hdr;
  583         struct xext *xep;
  584 
  585         if (epp->ep_hdrvalid < XOUT_HDR_SIZE)
  586                 return ENOEXEC;
  587 
  588         if ((xp->x_magic != XOUT_MAGIC) || (xp->x_cpu != XC_386))
  589                 return ENOEXEC;
  590         if ((xp->x_renv & (XE_ABS | XE_VMOD)) || !(xp->x_renv & XE_EXEC))
  591                 return ENOEXEC;
  592 
  593         xep = (struct xext *)((char *)epp->ep_hdr + sizeof(struct xexec));
  594 #ifdef notyet
  595         if (xp->x_renv & XE_PURE)
  596                 error = exec_ibcs2_xout_prep_zmagic(p, epp, xp, xep);
  597         else
  598 #endif
  599                 error = exec_ibcs2_xout_prep_nmagic(p, epp, xp, xep);
  600 
  601         if (error == 0)
  602                 epp->ep_emul = &emul_ibcs2;
  603 
  604         if (error)
  605                 kill_vmcmds(&epp->ep_vmcmds);
  606 
  607         return error;
  608 }
  609 
  610 /*
  611  * exec_ibcs2_xout_prep_nmagic(): Prepare a pure x.out binary's exec package
  612  *
  613  */
  614 
  615 int
  616 exec_ibcs2_xout_prep_nmagic(p, epp, xp, xep)
  617         struct proc *p;
  618         struct exec_package *epp;
  619         struct xexec *xp;
  620         struct xext *xep;
  621 {
  622         int error, nseg, i;
  623         size_t resid;
  624         long baddr, bsize;
  625         struct xseg *xs;
  626 
  627         /* read in segment table */
  628         if (xep->xe_segsize > 16 * sizeof(*xs))
  629                 return (ENOEXEC);
  630         xs = (struct xseg *)malloc(xep->xe_segsize, M_TEMP, M_WAITOK);
  631         error = vn_rdwr(UIO_READ, epp->ep_vp, (caddr_t)xs,
  632                         xep->xe_segsize, xep->xe_segpos,
  633                         UIO_SYSSPACE, 0, p->p_ucred,
  634                         &resid, p);
  635         if (error) {
  636                 DPRINTF(("segment table read error %d\n", error));
  637                 free(xs, M_TEMP);
  638                 return ENOEXEC;
  639         }
  640 
  641         for (nseg = xep->xe_segsize / sizeof(*xs), i = 0; i < nseg; i++) {
  642                 switch (xs[i].xs_type) {
  643                 case XS_TTEXT:  /* text segment */
  644 
  645                         DPRINTF(("text addr %x psize %d vsize %d off %d\n",
  646                                  xs[i].xs_rbase, xs[i].xs_psize,
  647                                  xs[i].xs_vsize, xs[i].xs_filpos));
  648 
  649                         epp->ep_taddr = xs[i].xs_rbase; /* XXX - align ??? */
  650                         epp->ep_tsize = xs[i].xs_vsize;
  651 
  652                         DPRINTF(("VMCMD: addr %x size %d offset %d\n",
  653                                  epp->ep_taddr, epp->ep_tsize,
  654                                  xs[i].xs_filpos));
  655                         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn,
  656                                   epp->ep_tsize, epp->ep_taddr,
  657                                   epp->ep_vp, xs[i].xs_filpos,
  658                                   VM_PROT_READ|VM_PROT_EXECUTE);
  659                         break;
  660 
  661                 case XS_TDATA:  /* data segment */
  662 
  663                         DPRINTF(("data addr %x psize %d vsize %d off %d\n",
  664                                  xs[i].xs_rbase, xs[i].xs_psize,
  665                                  xs[i].xs_vsize, xs[i].xs_filpos));
  666 
  667                         epp->ep_daddr = xs[i].xs_rbase; /* XXX - align ??? */
  668                         epp->ep_dsize = xs[i].xs_vsize;
  669 
  670                         DPRINTF(("VMCMD: addr %x size %d offset %d\n",
  671                                  epp->ep_daddr, xs[i].xs_psize,
  672                                  xs[i].xs_filpos));
  673                         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn,
  674                                   xs[i].xs_psize, epp->ep_daddr,
  675                                   epp->ep_vp, xs[i].xs_filpos,
  676                                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  677 
  678                         /* set up command for bss segment */
  679                         baddr = round_page(epp->ep_daddr + xs[i].xs_psize);
  680                         bsize = epp->ep_daddr + epp->ep_dsize - baddr;
  681                         if (bsize > 0) {
  682                                 DPRINTF(("VMCMD: bss addr %x size %d off %d\n",
  683                                          baddr, bsize, 0));
  684                                 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
  685                                           bsize, baddr, NULLVP, 0,
  686                                           VM_PROT_READ|VM_PROT_WRITE|
  687                                           VM_PROT_EXECUTE);
  688                         }
  689                         break;
  690 
  691                 default:
  692                         break;
  693                 }
  694         }
  695 
  696         /* set up entry point */
  697         epp->ep_entry = xp->x_entry;
  698 
  699         DPRINTF(("text addr: %x size: %d data addr: %x size: %d entry: %x\n",
  700                  epp->ep_taddr, epp->ep_tsize,
  701                  epp->ep_daddr, epp->ep_dsize,
  702                  epp->ep_entry));
  703         
  704         free(xs, M_TEMP);
  705         return exec_ibcs2_xout_setup_stack(p, epp);
  706 }
  707 
  708 /*
  709  * exec_ibcs2_xout_setup_stack(): Set up the stack segment for a x.out
  710  * executable.
  711  *
  712  * Note that the ep_ssize parameter must be set to be the current stack
  713  * limit; this is adjusted in the body of execve() to yield the
  714  * appropriate stack segment usage once the argument length is
  715  * calculated.
  716  *
  717  * This function returns an int for uniformity with other (future) formats'
  718  * stack setup functions.  They might have errors to return.
  719  */
  720 
  721 int
  722 exec_ibcs2_xout_setup_stack(p, epp)
  723         struct proc *p;
  724         struct exec_package *epp;
  725 {
  726         epp->ep_maxsaddr = USRSTACK - MAXSSIZ;
  727         epp->ep_minsaddr = USRSTACK;
  728         epp->ep_ssize = p->p_rlimit[RLIMIT_STACK].rlim_cur;
  729 
  730         /*
  731          * set up commands for stack.  note that this takes *two*, one to
  732          * map the part of the stack which we can access, and one to map
  733          * the part which we can't.
  734          *
  735          * arguably, it could be made into one, but that would require the
  736          * addition of another mapping proc, which is unnecessary
  737          *
  738          * note that in memory, things assumed to be: 0 ....... ep_maxsaddr
  739          * <stack> ep_minsaddr
  740          */
  741         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
  742                   ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
  743                   epp->ep_maxsaddr, NULLVP, 0, VM_PROT_NONE);
  744         NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
  745                   (epp->ep_minsaddr - epp->ep_ssize), NULLVP, 0,
  746                   VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
  747 
  748         return 0;
  749 }

/* [<][>][^][v][top][bottom][index][help] */