root/miscfs/specfs/spec_vnops.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. spec_vnoperate
  2. spec_lookup
  3. spec_open
  4. spec_read
  5. spec_inactive
  6. spec_write
  7. spec_ioctl
  8. spec_poll
  9. spec_kqfilter
  10. spec_fsync
  11. spec_strategy
  12. spec_bmap
  13. spec_close
  14. spec_print
  15. spec_pathconf
  16. spec_advlock
  17. spec_ebadf
  18. spec_badop

    1 /*      $OpenBSD: spec_vnops.c,v 1.43 2007/06/18 08:30:07 jasper Exp $  */
    2 /*      $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */
    3 
    4 /*
    5  * Copyright (c) 1989, 1993
    6  *      The Regents of the University of California.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)spec_vnops.c        8.8 (Berkeley) 11/21/94
   33  */
   34 
   35 #include <sys/param.h>
   36 #include <sys/proc.h>
   37 #include <sys/systm.h>
   38 #include <sys/kernel.h>
   39 #include <sys/conf.h>
   40 #include <sys/buf.h>
   41 #include <sys/mount.h>
   42 #include <sys/namei.h>
   43 #include <sys/vnode.h>
   44 #include <sys/stat.h>
   45 #include <sys/errno.h>
   46 #include <sys/ioctl.h>
   47 #include <sys/file.h>
   48 #include <sys/disklabel.h>
   49 #include <sys/lockf.h>
   50 #include <sys/poll.h>
   51 
   52 #include <miscfs/specfs/specdev.h>
   53 
   54 #define v_lastr v_specinfo->si_lastr
   55 
   56 struct vnode *speclisth[SPECHSZ];
   57 
   58 int (**spec_vnodeop_p)(void *);
   59 struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
   60         { &vop_default_desc, vn_default_error },
   61         { &vop_lookup_desc, spec_lookup },              /* lookup */
   62         { &vop_create_desc, spec_create },              /* create */
   63         { &vop_mknod_desc, spec_mknod },                /* mknod */
   64         { &vop_open_desc, spec_open },                  /* open */
   65         { &vop_close_desc, spec_close },                /* close */
   66         { &vop_access_desc, spec_access },              /* access */
   67         { &vop_getattr_desc, spec_getattr },            /* getattr */
   68         { &vop_setattr_desc, spec_setattr },            /* setattr */
   69         { &vop_read_desc, spec_read },                  /* read */
   70         { &vop_write_desc, spec_write },                /* write */
   71         { &vop_ioctl_desc, spec_ioctl },                /* ioctl */
   72         { &vop_poll_desc, spec_poll },                  /* poll */
   73         { &vop_kqfilter_desc, spec_kqfilter },          /* kqfilter */
   74         { &vop_revoke_desc, spec_revoke },              /* revoke */
   75         { &vop_fsync_desc, spec_fsync },                /* fsync */
   76         { &vop_remove_desc, spec_remove },              /* remove */
   77         { &vop_link_desc, spec_link },                  /* link */
   78         { &vop_rename_desc, spec_rename },              /* rename */
   79         { &vop_mkdir_desc, spec_mkdir },                /* mkdir */
   80         { &vop_rmdir_desc, spec_rmdir },                /* rmdir */
   81         { &vop_symlink_desc, spec_symlink },            /* symlink */
   82         { &vop_readdir_desc, spec_readdir },            /* readdir */
   83         { &vop_readlink_desc, spec_readlink },          /* readlink */
   84         { &vop_abortop_desc, spec_abortop },            /* abortop */
   85         { &vop_inactive_desc, spec_inactive },          /* inactive */
   86         { &vop_reclaim_desc, spec_reclaim },            /* reclaim */
   87         { &vop_lock_desc, spec_lock },                  /* lock */
   88         { &vop_unlock_desc, spec_unlock },              /* unlock */
   89         { &vop_bmap_desc, spec_bmap },                  /* bmap */
   90         { &vop_strategy_desc, spec_strategy },          /* strategy */
   91         { &vop_print_desc, spec_print },                /* print */
   92         { &vop_islocked_desc, spec_islocked },          /* islocked */
   93         { &vop_pathconf_desc, spec_pathconf },          /* pathconf */
   94         { &vop_advlock_desc, spec_advlock },            /* advlock */
   95         { &vop_bwrite_desc, spec_bwrite },              /* bwrite */
   96         { NULL, NULL }
   97 };
   98 struct vnodeopv_desc spec_vnodeop_opv_desc =
   99         { &spec_vnodeop_p, spec_vnodeop_entries };
  100 
  101 int
  102 spec_vnoperate(void *v)
  103 {
  104         struct vop_generic_args *ap = v;
  105 
  106         return (VOCALL(spec_vnodeop_p, ap->a_desc->vdesc_offset, ap));
  107 }
  108 
  109 /*
  110  * Trivial lookup routine that always fails.
  111  */
  112 int
  113 spec_lookup(void *v)
  114 {
  115         struct vop_lookup_args *ap = v;
  116 
  117         *ap->a_vpp = NULL;
  118         return (ENOTDIR);
  119 }
  120 
  121 /*
  122  * Open a special file.
  123  */
  124 /* ARGSUSED */
  125 int
  126 spec_open(void *v)
  127 {
  128         struct vop_open_args *ap = v;
  129         struct proc *p = ap->a_p;
  130         struct vnode *vp = ap->a_vp;
  131         struct vnode *bvp;
  132         dev_t bdev;
  133         dev_t dev = (dev_t)vp->v_rdev;
  134         int maj = major(dev);
  135         int error;
  136 
  137         /*
  138          * Don't allow open if fs is mounted -nodev.
  139          */
  140         if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
  141                 return (ENXIO);
  142 
  143         switch (vp->v_type) {
  144 
  145         case VCHR:
  146                 if ((u_int)maj >= nchrdev)
  147                         return (ENXIO);
  148                 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) {
  149                         /*
  150                          * When running in very secure mode, do not allow
  151                          * opens for writing of any disk character devices.
  152                          */
  153                         if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK)
  154                                 return (EPERM);
  155                         /*
  156                          * When running in secure mode, do not allow opens
  157                          * for writing of /dev/mem, /dev/kmem, or character
  158                          * devices whose corresponding block devices are
  159                          * currently mounted.
  160                          */
  161                         if (securelevel >= 1) {
  162                                 if ((bdev = chrtoblk(dev)) != NODEV &&
  163                                     vfinddev(bdev, VBLK, &bvp) &&
  164                                     bvp->v_usecount > 0 &&
  165                                     (error = vfs_mountedon(bvp)))
  166                                         return (error);
  167                                 if (iskmemdev(dev))
  168                                         return (EPERM);
  169                         }
  170                 }
  171                 if (cdevsw[maj].d_type == D_TTY)
  172                         vp->v_flag |= VISTTY;
  173                 if (cdevsw[maj].d_flags & D_CLONE)
  174                         return (spec_open_clone(ap));
  175                 VOP_UNLOCK(vp, 0, p);
  176                 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p);
  177                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
  178                 return (error);
  179 
  180         case VBLK:
  181                 if ((u_int)maj >= nblkdev)
  182                         return (ENXIO);
  183                 /*
  184                  * When running in very secure mode, do not allow
  185                  * opens for writing of any disk block devices.
  186                  */
  187                 if (securelevel >= 2 && ap->a_cred != FSCRED &&
  188                     (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK)
  189                         return (EPERM);
  190                 /*
  191                  * Do not allow opens of block devices that are
  192                  * currently mounted.
  193                  */
  194                 if ((error = vfs_mountedon(vp)) != 0)
  195                         return (error);
  196                 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p));
  197         case VNON:
  198         case VLNK:
  199         case VDIR:
  200         case VREG:
  201         case VBAD:
  202         case VFIFO:
  203         case VSOCK:
  204                 break;
  205         }
  206         return (0);
  207 }
  208 
  209 /*
  210  * Vnode op for read
  211  */
  212 /* ARGSUSED */
  213 int
  214 spec_read(void *v)
  215 {
  216         struct vop_read_args *ap = v;
  217         struct vnode *vp = ap->a_vp;
  218         struct uio *uio = ap->a_uio;
  219         struct proc *p = uio->uio_procp;
  220         struct buf *bp;
  221         daddr64_t bn, nextbn, bscale;
  222         int bsize;
  223         struct partinfo dpart;
  224         int n, on, majordev;
  225         int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *);
  226         int error = 0;
  227 
  228 #ifdef DIAGNOSTIC
  229         if (uio->uio_rw != UIO_READ)
  230                 panic("spec_read mode");
  231         if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
  232                 panic("spec_read proc");
  233 #endif
  234         if (uio->uio_resid == 0)
  235                 return (0);
  236 
  237         switch (vp->v_type) {
  238 
  239         case VCHR:
  240                 VOP_UNLOCK(vp, 0, p);
  241                 error = (*cdevsw[major(vp->v_rdev)].d_read)
  242                         (vp->v_rdev, uio, ap->a_ioflag);
  243                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
  244                 return (error);
  245 
  246         case VBLK:
  247                 if (uio->uio_offset < 0)
  248                         return (EINVAL);
  249                 bsize = BLKDEV_IOSIZE;
  250                 if ((majordev = major(vp->v_rdev)) < nblkdev &&
  251                     (ioctl = bdevsw[majordev].d_ioctl) != NULL &&
  252                     (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
  253                         u_int32_t frag =
  254                             DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock);
  255                         u_int32_t fsize =
  256                             DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock);
  257                         if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 &&
  258                             fsize != 0)
  259                                 bsize = frag * fsize;
  260                 }
  261                 bscale = btodb(bsize);
  262                 do {
  263                         bn = btodb(uio->uio_offset) & ~(bscale - 1);
  264                         on = uio->uio_offset % bsize;
  265                         n = min((bsize - on), uio->uio_resid);
  266                         if (vp->v_lastr + bscale == bn) {
  267                                 nextbn = bn + bscale;
  268                                 error = breadn(vp, bn, bsize, &nextbn, &bsize,
  269                                     1, NOCRED, &bp);
  270                         } else
  271                                 error = bread(vp, bn, bsize, NOCRED, &bp);
  272                         vp->v_lastr = bn;
  273                         n = min(n, bsize - bp->b_resid);
  274                         if (error) {
  275                                 brelse(bp);
  276                                 return (error);
  277                         }
  278                         error = uiomove((char *)bp->b_data + on, n, uio);
  279                         brelse(bp);
  280                 } while (error == 0 && uio->uio_resid > 0 && n != 0);
  281                 return (error);
  282 
  283         default:
  284                 panic("spec_read type");
  285         }
  286         /* NOTREACHED */
  287 }
  288 
  289 int
  290 spec_inactive(void *v)
  291 {
  292         struct vop_inactive_args *ap = v;
  293 
  294         VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
  295         return (0);
  296 }
  297 
  298 /*
  299  * Vnode op for write
  300  */
  301 /* ARGSUSED */
  302 int
  303 spec_write(void *v)
  304 {
  305         struct vop_write_args *ap = v;
  306         struct vnode *vp = ap->a_vp;
  307         struct uio *uio = ap->a_uio;
  308         struct proc *p = uio->uio_procp;
  309         struct buf *bp;
  310         daddr64_t bn, bscale;
  311         int bsize;
  312         struct partinfo dpart;
  313         int n, on, majordev;
  314         int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *);
  315         int error = 0;
  316 
  317 #ifdef DIAGNOSTIC
  318         if (uio->uio_rw != UIO_WRITE)
  319                 panic("spec_write mode");
  320         if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
  321                 panic("spec_write proc");
  322 #endif
  323 
  324         switch (vp->v_type) {
  325 
  326         case VCHR:
  327                 VOP_UNLOCK(vp, 0, p);
  328                 error = (*cdevsw[major(vp->v_rdev)].d_write)
  329                         (vp->v_rdev, uio, ap->a_ioflag);
  330                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
  331                 return (error);
  332 
  333         case VBLK:
  334                 if (uio->uio_resid == 0)
  335                         return (0);
  336                 if (uio->uio_offset < 0)
  337                         return (EINVAL);
  338                 bsize = BLKDEV_IOSIZE;
  339                 if ((majordev = major(vp->v_rdev)) < nblkdev &&
  340                     (ioctl = bdevsw[majordev].d_ioctl) != NULL &&
  341                     (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
  342                         u_int32_t frag =
  343                             DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock);
  344                         u_int32_t fsize =
  345                             DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock);
  346                         if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 &&
  347                             fsize != 0)
  348                                 bsize = frag * fsize;
  349                 }
  350                 bscale = btodb(bsize);
  351                 do {
  352                         bn = btodb(uio->uio_offset) & ~(bscale - 1);
  353                         on = uio->uio_offset % bsize;
  354                         n = min((bsize - on), uio->uio_resid);
  355                         error = bread(vp, bn, bsize, NOCRED, &bp);
  356                         n = min(n, bsize - bp->b_resid);
  357                         if (error) {
  358                                 brelse(bp);
  359                                 return (error);
  360                         }
  361                         error = uiomove((char *)bp->b_data + on, n, uio);
  362                         if (n + on == bsize)
  363                                 bawrite(bp);
  364                         else
  365                                 bdwrite(bp);
  366                 } while (error == 0 && uio->uio_resid > 0 && n != 0);
  367                 return (error);
  368 
  369         default:
  370                 panic("spec_write type");
  371         }
  372         /* NOTREACHED */
  373 }
  374 
  375 /*
  376  * Device ioctl operation.
  377  */
  378 /* ARGSUSED */
  379 int
  380 spec_ioctl(void *v)
  381 {
  382         struct vop_ioctl_args *ap = v;
  383         dev_t dev = ap->a_vp->v_rdev;
  384         int maj = major(dev);
  385 
  386         switch (ap->a_vp->v_type) {
  387 
  388         case VCHR:
  389                 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data,
  390                     ap->a_fflag, ap->a_p));
  391 
  392         case VBLK:
  393                 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data,
  394                     ap->a_fflag, ap->a_p));
  395 
  396         default:
  397                 panic("spec_ioctl");
  398                 /* NOTREACHED */
  399         }
  400 }
  401 
  402 /* ARGSUSED */
  403 int
  404 spec_poll(void *v)
  405 {
  406         struct vop_poll_args *ap = v;
  407         dev_t dev;
  408 
  409         switch (ap->a_vp->v_type) {
  410 
  411         default:
  412                 return (ap->a_events &
  413                     (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
  414 
  415         case VCHR:
  416                 dev = ap->a_vp->v_rdev;
  417                 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p);
  418         }
  419 }
  420 /* ARGSUSED */
  421 int
  422 spec_kqfilter(void *v)
  423 {
  424         struct vop_kqfilter_args *ap = v;
  425 
  426         dev_t dev;
  427 
  428         dev = ap->a_vp->v_rdev;
  429         if (cdevsw[major(dev)].d_flags & D_KQFILTER)
  430                 return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn);
  431         return (1);
  432 }
  433 
  434 /*
  435  * Synch buffers associated with a block device
  436  */
  437 /* ARGSUSED */
  438 int
  439 spec_fsync(void *v)
  440 {
  441         struct vop_fsync_args *ap = v;
  442         struct vnode *vp = ap->a_vp;
  443         struct buf *bp;
  444         struct buf *nbp;
  445         int s;
  446 
  447         if (vp->v_type == VCHR)
  448                 return (0);
  449         /*
  450          * Flush all dirty buffers associated with a block device.
  451          */
  452 loop:
  453         s = splbio();
  454         for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
  455             bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
  456                 nbp = LIST_NEXT(bp, b_vnbufs);
  457                 if ((bp->b_flags & B_BUSY))
  458                         continue;
  459                 if ((bp->b_flags & B_DELWRI) == 0)
  460                         panic("spec_fsync: not dirty");
  461                 bremfree(bp);
  462                 bp->b_flags |= B_BUSY;
  463                 splx(s);
  464                 bawrite(bp);
  465                 goto loop;
  466         }
  467         if (ap->a_waitfor == MNT_WAIT) {
  468                 vwaitforio (vp, 0, "spec_fsync", 0);
  469 
  470 #ifdef DIAGNOSTIC
  471                 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
  472                         splx(s);
  473                         vprint("spec_fsync: dirty", vp);
  474                         goto loop;
  475                 }
  476 #endif
  477         }
  478         splx(s);
  479         return (0);
  480 }
  481 
  482 int
  483 spec_strategy(void *v)
  484 {
  485         struct vop_strategy_args *ap = v;
  486         struct buf *bp = ap->a_bp;
  487         int maj = major(bp->b_dev);
  488         
  489         if (LIST_FIRST(&bp->b_dep) != NULL)
  490                 buf_start(bp);
  491 
  492         (*bdevsw[maj].d_strategy)(bp);
  493         return (0);
  494 }
  495 
  496 /*
  497  * This is a noop, simply returning what one has been given.
  498  */
  499 int
  500 spec_bmap(void *v)
  501 {
  502         struct vop_bmap_args *ap = v;
  503 
  504         if (ap->a_vpp != NULL)
  505                 *ap->a_vpp = ap->a_vp;
  506         if (ap->a_bnp != NULL)
  507                 *ap->a_bnp = ap->a_bn;
  508         if (ap->a_runp != NULL)
  509                 *ap->a_runp = 0;
  510         
  511         return (0);
  512 }
  513 
  514 /*
  515  * Device close routine
  516  */
  517 /* ARGSUSED */
  518 int
  519 spec_close(void *v)
  520 {
  521         struct vop_close_args *ap = v;
  522         struct vnode *vp = ap->a_vp;
  523         dev_t dev = vp->v_rdev;
  524         int (*devclose)(dev_t, int, int, struct proc *);
  525         int mode, error;
  526 
  527         switch (vp->v_type) {
  528 
  529         case VCHR:
  530                 /*
  531                  * Hack: a tty device that is a controlling terminal
  532                  * has a reference from the session structure.
  533                  * We cannot easily tell that a character device is
  534                  * a controlling terminal, unless it is the closing
  535                  * process' controlling terminal.  In that case,
  536                  * if the reference count is 2 (this last descriptor
  537                  * plus the session), release the reference from the session.
  538                  */
  539                 if (vcount(vp) == 2 && ap->a_p &&
  540                     vp == ap->a_p->p_session->s_ttyvp) {
  541                         vrele(vp);
  542                         ap->a_p->p_session->s_ttyvp = NULL;
  543                 }
  544                 /*
  545                  * If the vnode is locked, then we are in the midst
  546                  * of forcably closing the device, otherwise we only
  547                  * close on last reference.
  548                  */
  549                 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
  550                         return (0);
  551                 if (cdevsw[major(dev)].d_flags & D_CLONE)
  552                         return (spec_close_clone(ap));
  553                 devclose = cdevsw[major(dev)].d_close;
  554                 mode = S_IFCHR;
  555                 break;
  556 
  557         case VBLK:
  558                 /*
  559                  * On last close of a block device (that isn't mounted)
  560                  * we must invalidate any in core blocks, so that
  561                  * we can, for instance, change floppy disks. In order to do
  562                  * that, we must lock the vnode. If we are coming from
  563                  * vclean(), the vnode is already locked.
  564                  */
  565                 if (!(vp->v_flag & VXLOCK))
  566                         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
  567                 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
  568                 if (!(vp->v_flag & VXLOCK))
  569                         VOP_UNLOCK(vp, 0, ap->a_p);
  570                 if (error)
  571                         return (error);
  572                 /*
  573                  * We do not want to really close the device if it
  574                  * is still in use unless we are trying to close it
  575                  * forcibly. Since every use (buffer, vnode, swap, cmap)
  576                  * holds a reference to the vnode, and because we mark
  577                  * any other vnodes that alias this device, when the
  578                  * sum of the reference counts on all the aliased
  579                  * vnodes descends to one, we are on last close.
  580                  */
  581                 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
  582                         return (0);
  583                 devclose = bdevsw[major(dev)].d_close;
  584                 mode = S_IFBLK;
  585                 break;
  586 
  587         default:
  588                 panic("spec_close: not special");
  589         }
  590 
  591         return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p));
  592 }
  593 
  594 /*
  595  * Print out the contents of a special device vnode.
  596  */
  597 int
  598 spec_print(void *v)
  599 {
  600         struct vop_print_args *ap = v;
  601 
  602         printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
  603                 minor(ap->a_vp->v_rdev));
  604         return 0;
  605 }
  606 
  607 /*
  608  * Return POSIX pathconf information applicable to special devices.
  609  */
  610 int
  611 spec_pathconf(void *v)
  612 {
  613         struct vop_pathconf_args *ap = v;
  614 
  615         switch (ap->a_name) {
  616         case _PC_LINK_MAX:
  617                 *ap->a_retval = LINK_MAX;
  618                 return (0);
  619         case _PC_MAX_CANON:
  620                 *ap->a_retval = MAX_CANON;
  621                 return (0);
  622         case _PC_MAX_INPUT:
  623                 *ap->a_retval = MAX_INPUT;
  624                 return (0);
  625         case _PC_PIPE_BUF:
  626                 *ap->a_retval = PIPE_BUF;
  627                 return (0);
  628         case _PC_CHOWN_RESTRICTED:
  629                 *ap->a_retval = 1;
  630                 return (0);
  631         case _PC_VDISABLE:
  632                 *ap->a_retval = _POSIX_VDISABLE;
  633                 return (0);
  634         default:
  635                 return (EINVAL);
  636         }
  637         /* NOTREACHED */
  638 }
  639 
  640 /*
  641  * Special device advisory byte-level locks.
  642  */
  643 /* ARGSUSED */
  644 int
  645 spec_advlock(void *v)
  646 {
  647         struct vop_advlock_args *ap = v;
  648         struct vnode *vp = ap->a_vp;
  649 
  650         return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id,
  651                 ap->a_op, ap->a_fl, ap->a_flags));
  652 }
  653 
  654 /*
  655  * Special device failed operation
  656  */
  657 /*ARGSUSED*/
  658 int
  659 spec_ebadf(void *v)
  660 {
  661 
  662         return (EBADF);
  663 }
  664 
  665 /*
  666  * Special device bad operation
  667  */
  668 /*ARGSUSED*/
  669 int
  670 spec_badop(void *v)
  671 {
  672 
  673         panic("spec_badop called");
  674         /* NOTREACHED */
  675 }

/* [<][>][^][v][top][bottom][index][help] */