root/kern/vfs_lockf.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lf_init
  2. lf_alloc
  3. lf_free
  4. lf_advlock
  5. lf_setlock
  6. lf_clearlock
  7. lf_getlock
  8. lf_getblock
  9. lf_findoverlap
  10. lf_split
  11. lf_wakelock
  12. lf_print
  13. lf_printlist

    1 /*      $OpenBSD: vfs_lockf.c,v 1.12 2005/11/20 21:55:15 pedro Exp $    */
    2 /*      $NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $   */
    3 
    4 /*
    5  * Copyright (c) 1982, 1986, 1989, 1993
    6  *      The Regents of the University of California.  All rights reserved.
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * Scooter Morris at Genentech Inc.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. Neither the name of the University nor the names of its contributors
   20  *    may be used to endorse or promote products derived from this software
   21  *    without specific prior written permission.
   22  *
   23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   33  * SUCH DAMAGE.
   34  *
   35  *      @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
   36  */
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/kernel.h>
   41 #include <sys/file.h>
   42 #include <sys/proc.h>
   43 #include <sys/vnode.h>
   44 #include <sys/pool.h>
   45 #include <sys/fcntl.h>
   46 #include <sys/lockf.h>
   47 
   48 struct pool lockfpool;
   49 
   50 /*
   51  * This variable controls the maximum number of processes that will
   52  * be checked in doing deadlock detection.
   53  */
   54 int maxlockdepth = MAXDEPTH;
   55 
   56 #define SELF    0x1
   57 #define OTHERS  0x2
   58 
   59 #ifdef LOCKF_DEBUG
   60 
   61 #define DEBUG_SETLOCK           0x01
   62 #define DEBUG_CLEARLOCK         0x02
   63 #define DEBUG_GETLOCK           0x04
   64 #define DEBUG_FINDOVR           0x08
   65 #define DEBUG_SPLIT             0x10
   66 #define DEBUG_WAKELOCK          0x20
   67 
   68 int     lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
   69 
   70 #define DPRINTF(args, level)    if (lockf_debug & (level)) printf args
   71 #else
   72 #define DPRINTF(args, level)
   73 #endif
   74 
   75 void
   76 lf_init(void)
   77 {
   78         pool_init(&lockfpool, sizeof(struct lockf), 0, 0, 0,
   79             "lockfpl", &pool_allocator_nointr);
   80 }
   81 
   82 struct lockf *lf_alloc(uid_t, int);
   83 void lf_free(struct lockf *);
   84 
   85 /*
   86  * We enforce a limit on locks by uid, so that a single user cannot
   87  * run the kernel out of memory.  For now, the limit is pretty coarse.
   88  * There is no limit on root.
   89  *
   90  * Splitting a lock will always succeed, regardless of current allocations.
   91  * If you're slightly above the limit, we still have to permit an allocation
   92  * so that the unlock can succeed.  If the unlocking causes too many splits,
   93  * however, you're totally cutoff.
   94  */
   95 int maxlocksperuid = 1024;
   96 
   97 /*
   98  * 3 options for allowfail.
   99  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
  100  */
  101 struct lockf *
  102 lf_alloc(uid_t uid, int allowfail)
  103 {
  104         struct uidinfo *uip;
  105         struct lockf *lock;
  106 
  107         uip = uid_find(uid);
  108         if (uid && allowfail && uip->ui_lockcnt >
  109             (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2)))
  110                 return (NULL);
  111         uip->ui_lockcnt++;
  112         lock = pool_get(&lockfpool, PR_WAITOK);
  113         lock->lf_uid = uid;
  114         return (lock);
  115 }
  116 
  117 void
  118 lf_free(struct lockf *lock)
  119 {
  120         struct uidinfo *uip;
  121 
  122         uip = uid_find(lock->lf_uid);
  123         uip->ui_lockcnt--;
  124         pool_put(&lockfpool, lock);
  125 }
  126 
  127 
  128 /*
  129  * Do an advisory lock operation.
  130  */
  131 int
  132 lf_advlock(struct lockf **head, off_t size, caddr_t id, int op,
  133     struct flock *fl, int flags)
  134 {
  135         struct proc *p = curproc;
  136         struct lockf *lock;
  137         off_t start, end;
  138         int error;
  139 
  140         /*
  141          * Convert the flock structure into a start and end.
  142          */
  143         switch (fl->l_whence) {
  144 
  145         case SEEK_SET:
  146         case SEEK_CUR:
  147                 /*
  148                  * Caller is responsible for adding any necessary offset
  149                  * when SEEK_CUR is used.
  150                  */
  151                 start = fl->l_start;
  152                 break;
  153 
  154         case SEEK_END:
  155                 start = size + fl->l_start;
  156                 break;
  157 
  158         default:
  159                 return (EINVAL);
  160         }
  161         if (start < 0)
  162                 return (EINVAL);
  163         if (fl->l_len == 0)
  164                 end = -1;
  165         else {
  166                 end = start + fl->l_len - 1;
  167                 if (end < start)
  168                         return (EINVAL);
  169         }
  170 
  171         /*
  172          * Avoid the common case of unlocking when inode has no locks.
  173          */
  174         if (*head == NULL) {
  175                 if (op != F_SETLK) {
  176                         fl->l_type = F_UNLCK;
  177                         return (0);
  178                 }
  179         }
  180 
  181         /*
  182          * Create the lockf structure.
  183          */
  184         lock = lf_alloc(p->p_ucred->cr_uid, op != F_UNLCK ? 1 : 2);
  185         if (!lock)
  186                 return (ENOMEM);
  187         lock->lf_start = start;
  188         lock->lf_end = end;
  189         lock->lf_id = id;
  190         lock->lf_head = head;
  191         lock->lf_type = fl->l_type;
  192         lock->lf_next = NULL;
  193         TAILQ_INIT(&lock->lf_blkhd);
  194         lock->lf_flags = flags;
  195         /*
  196          * Do the requested operation.
  197          */
  198         switch (op) {
  199 
  200         case F_SETLK:
  201                 return (lf_setlock(lock));
  202 
  203         case F_UNLCK:
  204                 error = lf_clearlock(lock);
  205                 lf_free(lock);
  206                 return (error);
  207 
  208         case F_GETLK:
  209                 error = lf_getlock(lock, fl);
  210                 lf_free(lock);
  211                 return (error);
  212 
  213         default:
  214                 lf_free(lock);
  215                 return (EINVAL);
  216         }
  217         /* NOTREACHED */
  218 }
  219 
  220 /*
  221  * Set a byte-range lock.
  222  */
  223 int
  224 lf_setlock(struct lockf *lock)
  225 {
  226         struct lockf *block;
  227         struct lockf **head = lock->lf_head;
  228         struct lockf **prev, *overlap, *ltmp;
  229         static char lockstr[] = "lockf";
  230         int ovcase, priority, needtolink, error;
  231 
  232 #ifdef LOCKF_DEBUG
  233         if (lockf_debug & DEBUG_SETLOCK)
  234                 lf_print("lf_setlock", lock);
  235 #endif /* LOCKF_DEBUG */
  236 
  237         /*
  238          * Set the priority
  239          */
  240         priority = PLOCK;
  241         if (lock->lf_type == F_WRLCK)
  242                 priority += 4;
  243         priority |= PCATCH;
  244         /*
  245          * Scan lock list for this file looking for locks that would block us.
  246          */
  247         while ((block = lf_getblock(lock)) != NULL) {
  248                 /*
  249                  * Free the structure and return if nonblocking.
  250                  */
  251                 if ((lock->lf_flags & F_WAIT) == 0) {
  252                         lf_free(lock);
  253                         return (EAGAIN);
  254                 }
  255                 /*
  256                  * We are blocked. Since flock style locks cover
  257                  * the whole file, there is no chance for deadlock.
  258                  * For byte-range locks we must check for deadlock.
  259                  *
  260                  * Deadlock detection is done by looking through the
  261                  * wait channels to see if there are any cycles that
  262                  * involve us. MAXDEPTH is set just to make sure we
  263                  * do not go off into neverland.
  264                  */
  265                 if ((lock->lf_flags & F_POSIX) &&
  266                     (block->lf_flags & F_POSIX)) {
  267                         struct proc *wproc;
  268                         struct lockf *waitblock;
  269                         int i = 0;
  270 
  271                         /* The block is waiting on something */
  272                         wproc = (struct proc *)block->lf_id;
  273                         while (wproc->p_wchan &&
  274                             (wproc->p_wmesg == lockstr) &&
  275                             (i++ < maxlockdepth)) {
  276                                 waitblock = (struct lockf *)wproc->p_wchan;
  277                                 /* Get the owner of the blocking lock */
  278                                 waitblock = waitblock->lf_next;
  279                                 if ((waitblock->lf_flags & F_POSIX) == 0)
  280                                         break;
  281                                 wproc = (struct proc *)waitblock->lf_id;
  282                                 if (wproc == (struct proc *)lock->lf_id) {
  283                                         lf_free(lock);
  284                                         return (EDEADLK);
  285                                 }
  286                         }
  287                 }
  288                 /*
  289                  * For flock type locks, we must first remove
  290                  * any shared locks that we hold before we sleep
  291                  * waiting for an exclusive lock.
  292                  */
  293                 if ((lock->lf_flags & F_FLOCK) &&
  294                     lock->lf_type == F_WRLCK) {
  295                         lock->lf_type = F_UNLCK;
  296                         (void) lf_clearlock(lock);
  297                         lock->lf_type = F_WRLCK;
  298                 }
  299                 /*
  300                  * Add our lock to the blocked list and sleep until we're free.
  301                  * Remember who blocked us (for deadlock detection).
  302                  */
  303                 lock->lf_next = block;
  304 #ifdef LOCKF_DEBUG
  305                 if (lockf_debug & DEBUG_SETLOCK) {
  306                         lf_print("lf_setlock", lock);
  307                         lf_print("lf_setlock: blocking on", block);
  308                 }
  309 #endif /* LOCKF_DEBUG */
  310                 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
  311                 error = tsleep(lock, priority, lockstr, 0);
  312 #if 0
  313                 if (error) {
  314                         /*
  315                          * Delete ourselves from the waiting to lock list.
  316                          */
  317                         TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
  318                         lf_free(lock);
  319                         return (error);
  320                 }
  321 #else
  322                 if (lock->lf_next != NULL) {
  323                         TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
  324                         lock->lf_next = NULL;
  325                 }
  326                 if (error) {
  327                         lf_free(lock);
  328                         return (error);
  329                 }
  330 #endif
  331         }
  332         /*
  333          * No blocks!!  Add the lock.  Note that we will
  334          * downgrade or upgrade any overlapping locks this
  335          * process already owns.
  336          *
  337          * Skip over locks owned by other processes.
  338          * Handle any locks that overlap and are owned by ourselves.
  339          */
  340         prev = head;
  341         block = *head;
  342         needtolink = 1;
  343         for (;;) {
  344                 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
  345                 if (ovcase)
  346                         block = overlap->lf_next;
  347                 /*
  348                  * Six cases:
  349                  *      0) no overlap
  350                  *      1) overlap == lock
  351                  *      2) overlap contains lock
  352                  *      3) lock contains overlap
  353                  *      4) overlap starts before lock
  354                  *      5) overlap ends after lock
  355                  */
  356                 switch (ovcase) {
  357                 case 0: /* no overlap */
  358                         if (needtolink) {
  359                                 *prev = lock;
  360                                 lock->lf_next = overlap;
  361                         }
  362                         break;
  363 
  364                 case 1: /* overlap == lock */
  365                         /*
  366                          * If downgrading lock, others may be
  367                          * able to acquire it.
  368                          */
  369                         if (lock->lf_type == F_RDLCK &&
  370                             overlap->lf_type == F_WRLCK)
  371                                 lf_wakelock(overlap);
  372                         overlap->lf_type = lock->lf_type;
  373                         lf_free(lock);
  374                         lock = overlap; /* for debug output below */
  375                         break;
  376 
  377                 case 2: /* overlap contains lock */
  378                         /*
  379                          * Check for common starting point and different types.
  380                          */
  381                         if (overlap->lf_type == lock->lf_type) {
  382                                 lf_free(lock);
  383                                 lock = overlap; /* for debug output below */
  384                                 break;
  385                         }
  386                         if (overlap->lf_start == lock->lf_start) {
  387                                 *prev = lock;
  388                                 lock->lf_next = overlap;
  389                                 overlap->lf_start = lock->lf_end + 1;
  390                         } else
  391                                 lf_split(overlap, lock);
  392                         lf_wakelock(overlap);
  393                         break;
  394 
  395                 case 3: /* lock contains overlap */
  396                         /*
  397                          * If downgrading lock, others may be able to
  398                          * acquire it, otherwise take the list.
  399                          */
  400                         if (lock->lf_type == F_RDLCK &&
  401                             overlap->lf_type == F_WRLCK) {
  402                                 lf_wakelock(overlap);
  403                         } else {
  404                                 while ((ltmp =
  405                                     TAILQ_FIRST(&overlap->lf_blkhd))) {
  406                                         TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
  407                                             lf_block);
  408                                         ltmp->lf_next = lock;
  409                                         TAILQ_INSERT_TAIL(&lock->lf_blkhd,
  410                                             ltmp, lf_block);
  411                                 }
  412                         }
  413                         /*
  414                          * Add the new lock if necessary and delete the overlap.
  415                          */
  416                         if (needtolink) {
  417                                 *prev = lock;
  418                                 lock->lf_next = overlap->lf_next;
  419                                 prev = &lock->lf_next;
  420                                 needtolink = 0;
  421                         } else
  422                                 *prev = overlap->lf_next;
  423                         lf_free(overlap);
  424                         continue;
  425 
  426                 case 4: /* overlap starts before lock */
  427                         /*
  428                          * Add lock after overlap on the list.
  429                          */
  430                         lock->lf_next = overlap->lf_next;
  431                         overlap->lf_next = lock;
  432                         overlap->lf_end = lock->lf_start - 1;
  433                         prev = &lock->lf_next;
  434                         lf_wakelock(overlap);
  435                         needtolink = 0;
  436                         continue;
  437 
  438                 case 5: /* overlap ends after lock */
  439                         /*
  440                          * Add the new lock before overlap.
  441                          */
  442                         if (needtolink) {
  443                                 *prev = lock;
  444                                 lock->lf_next = overlap;
  445                         }
  446                         overlap->lf_start = lock->lf_end + 1;
  447                         lf_wakelock(overlap);
  448                         break;
  449                 }
  450                 break;
  451         }
  452 #ifdef LOCKF_DEBUG
  453         if (lockf_debug & DEBUG_SETLOCK) {
  454                 lf_print("lf_setlock: got the lock", lock);
  455         }
  456 #endif /* LOCKF_DEBUG */
  457         return (0);
  458 }
  459 
  460 /*
  461  * Remove a byte-range lock on an inode.
  462  *
  463  * Generally, find the lock (or an overlap to that lock)
  464  * and remove it (or shrink it), then wakeup anyone we can.
  465  */
  466 int
  467 lf_clearlock(struct lockf *lock)
  468 {
  469         struct lockf **head = lock->lf_head;
  470         struct lockf *lf = *head;
  471         struct lockf *overlap, **prev;
  472         int ovcase;
  473 
  474         if (lf == NULL)
  475                 return (0);
  476 #ifdef LOCKF_DEBUG
  477         if (lockf_debug & DEBUG_CLEARLOCK)
  478                 lf_print("lf_clearlock", lock);
  479 #endif /* LOCKF_DEBUG */
  480         prev = head;
  481         while ((ovcase = lf_findoverlap(lf, lock, SELF,
  482                                         &prev, &overlap)) != 0) {
  483                 /*
  484                  * Wakeup the list of locks to be retried.
  485                  */
  486                 lf_wakelock(overlap);
  487 
  488                 switch (ovcase) {
  489 
  490                 case 1: /* overlap == lock */
  491                         *prev = overlap->lf_next;
  492                         lf_free(overlap);
  493                         break;
  494 
  495                 case 2: /* overlap contains lock: split it */
  496                         if (overlap->lf_start == lock->lf_start) {
  497                                 overlap->lf_start = lock->lf_end + 1;
  498                                 break;
  499                         }
  500                         lf_split(overlap, lock);
  501                         overlap->lf_next = lock->lf_next;
  502                         break;
  503 
  504                 case 3: /* lock contains overlap */
  505                         *prev = overlap->lf_next;
  506                         lf = overlap->lf_next;
  507                         lf_free(overlap);                       
  508                         continue;
  509 
  510                 case 4: /* overlap starts before lock */
  511                         overlap->lf_end = lock->lf_start - 1;
  512                         prev = &overlap->lf_next;
  513                         lf = overlap->lf_next;
  514                         continue;
  515 
  516                 case 5: /* overlap ends after lock */
  517                         overlap->lf_start = lock->lf_end + 1;
  518                         break;
  519                 }
  520                 break;
  521         }
  522         return (0);
  523 }
  524 
  525 /*
  526  * Check whether there is a blocking lock,
  527  * and if so return its process identifier.
  528  */
  529 int
  530 lf_getlock(struct lockf *lock, struct flock *fl)
  531 {
  532         struct lockf *block;
  533 
  534 #ifdef LOCKF_DEBUG
  535         if (lockf_debug & DEBUG_CLEARLOCK)
  536                 lf_print("lf_getlock", lock);
  537 #endif /* LOCKF_DEBUG */
  538 
  539         if ((block = lf_getblock(lock)) != NULL) {
  540                 fl->l_type = block->lf_type;
  541                 fl->l_whence = SEEK_SET;
  542                 fl->l_start = block->lf_start;
  543                 if (block->lf_end == -1)
  544                         fl->l_len = 0;
  545                 else
  546                         fl->l_len = block->lf_end - block->lf_start + 1;
  547                 if (block->lf_flags & F_POSIX)
  548                         fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
  549                 else
  550                         fl->l_pid = -1;
  551         } else {
  552                 fl->l_type = F_UNLCK;
  553         }
  554         return (0);
  555 }
  556 
  557 /*
  558  * Walk the list of locks for an inode and
  559  * return the first blocking lock.
  560  */
  561 struct lockf *
  562 lf_getblock(struct lockf *lock)
  563 {
  564         struct lockf **prev, *overlap, *lf;
  565 
  566         prev = lock->lf_head;
  567         lf = *prev;
  568         while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
  569                 /*
  570                  * We've found an overlap, see if it blocks us
  571                  */
  572                 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
  573                         return (overlap);
  574                 /*
  575                  * Nope, point to the next one on the list and
  576                  * see if it blocks us
  577                  */
  578                 lf = overlap->lf_next;
  579         }
  580         return (NULL);
  581 }
  582 
  583 /*
  584  * Walk the list of locks for an inode to
  585  * find an overlapping lock (if any).
  586  *
  587  * NOTE: this returns only the FIRST overlapping lock.  There
  588  *       may be more than one.
  589  */
  590 int
  591 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
  592     struct lockf ***prev, struct lockf **overlap)
  593 {
  594         off_t start, end;
  595 
  596 #ifdef LOCKF_DEBUG
  597         if (lf && lockf_debug & DEBUG_FINDOVR)
  598                 lf_print("lf_findoverlap: looking for overlap in", lock);
  599 #endif /* LOCKF_DEBUG */
  600 
  601         *overlap = lf;
  602         start = lock->lf_start;
  603         end = lock->lf_end;
  604         while (lf != NULL) {
  605                 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
  606                     ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
  607                         *prev = &lf->lf_next;
  608                         *overlap = lf = lf->lf_next;
  609                         continue;
  610                 }
  611 #ifdef LOCKF_DEBUG
  612                 if (lockf_debug & DEBUG_FINDOVR)
  613                         lf_print("\tchecking", lf);
  614 #endif /* LOCKF_DEBUG */
  615                 /*
  616                  * OK, check for overlap
  617                  *
  618                  * Six cases:
  619                  *      0) no overlap
  620                  *      1) overlap == lock
  621                  *      2) overlap contains lock
  622                  *      3) lock contains overlap
  623                  *      4) overlap starts before lock
  624                  *      5) overlap ends after lock
  625                  */
  626 
  627                 /* Case 0 */
  628                 if ((lf->lf_end != -1 && start > lf->lf_end) ||
  629                     (end != -1 && lf->lf_start > end)) {
  630                         DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
  631                         if ((type & SELF) && end != -1 && lf->lf_start > end)
  632                                 return (0);
  633                         *prev = &lf->lf_next;
  634                         *overlap = lf = lf->lf_next;
  635                         continue;
  636                 }
  637                 /* Case 1 */
  638                 if ((lf->lf_start == start) && (lf->lf_end == end)) {
  639                         DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
  640                         return (1);
  641                 }
  642                 /* Case 2 */
  643                 if ((lf->lf_start <= start) &&
  644                     (lf->lf_end == -1 ||
  645                     (end != -1 && lf->lf_end >= end))) {
  646                         DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
  647                         return (2);
  648                 }
  649                 /* Case 3 */
  650                 if (start <= lf->lf_start &&
  651                     (end == -1 ||
  652                     (lf->lf_end != -1 && end >= lf->lf_end))) {
  653                         DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
  654                         return (3);
  655                 }
  656                 /* Case 4 */
  657                 if ((lf->lf_start < start) &&
  658                     ((lf->lf_end >= start) || (lf->lf_end == -1))) {
  659                         DPRINTF(("overlap starts before lock\n"),
  660                             DEBUG_FINDOVR);
  661                         return (4);
  662                 }
  663                 /* Case 5 */
  664                 if ((lf->lf_start > start) &&
  665                     (end != -1) &&
  666                     ((lf->lf_end > end) || (lf->lf_end == -1))) {
  667                         DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
  668                         return (5);
  669                 }
  670                 panic("lf_findoverlap: default");
  671         }
  672         return (0);
  673 }
  674 
  675 /*
  676  * Split a lock and a contained region into
  677  * two or three locks as necessary.
  678  */
  679 void
  680 lf_split(struct lockf *lock1, struct lockf *lock2)
  681 {
  682         struct lockf *splitlock;
  683 
  684 #ifdef LOCKF_DEBUG
  685         if (lockf_debug & DEBUG_SPLIT) {
  686                 lf_print("lf_split", lock1);
  687                 lf_print("splitting from", lock2);
  688         }
  689 #endif /* LOCKF_DEBUG */
  690         /*
  691          * Check to see if spliting into only two pieces.
  692          */
  693         if (lock1->lf_start == lock2->lf_start) {
  694                 lock1->lf_start = lock2->lf_end + 1;
  695                 lock2->lf_next = lock1;
  696                 return;
  697         }
  698         if (lock1->lf_end == lock2->lf_end) {
  699                 lock1->lf_end = lock2->lf_start - 1;
  700                 lock2->lf_next = lock1->lf_next;
  701                 lock1->lf_next = lock2;
  702                 return;
  703         }
  704         /*
  705          * Make a new lock consisting of the last part of
  706          * the encompassing lock
  707          */
  708         splitlock = lf_alloc(lock1->lf_uid, 0);
  709         memcpy(splitlock, lock1, sizeof(*splitlock));
  710         splitlock->lf_start = lock2->lf_end + 1;
  711         splitlock->lf_block.tqe_next = NULL;
  712         TAILQ_INIT(&splitlock->lf_blkhd);
  713         lock1->lf_end = lock2->lf_start - 1;
  714         /*
  715          * OK, now link it in
  716          */
  717         lock2->lf_next = splitlock;
  718         lock1->lf_next = lock2;
  719 }
  720 
  721 /*
  722  * Wakeup a blocklist
  723  */
  724 void
  725 lf_wakelock(struct lockf *lock)
  726 {
  727         struct lockf *wakelock;
  728 
  729         while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
  730                 TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
  731                 wakelock->lf_next = NULL;
  732                 wakeup_one(wakelock);
  733         }
  734 }
  735 
  736 #ifdef LOCKF_DEBUG
  737 /*
  738  * Print out a lock.
  739  */
  740 void
  741 lf_print(char *tag, struct lockf *lock)
  742 {
  743         struct lockf    *block;
  744         
  745         printf("%s: lock %p for ", tag, lock);
  746         if (lock->lf_flags & F_POSIX)
  747                 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
  748         else
  749                 printf("id %p", lock->lf_id);
  750         printf(" %s, start %llx, end %llx",
  751                 lock->lf_type == F_RDLCK ? "shared" :
  752                 lock->lf_type == F_WRLCK ? "exclusive" :
  753                 lock->lf_type == F_UNLCK ? "unlock" :
  754                 "unknown", lock->lf_start, lock->lf_end);
  755         block = TAILQ_FIRST(&lock->lf_blkhd);
  756         if (block)
  757                 printf(" block");
  758         TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
  759                 printf(" %p,", block);
  760         printf("\n");
  761 
  762 }
  763 
  764 void
  765 lf_printlist(char *tag, struct lockf *lock)
  766 {
  767         struct lockf *lf;
  768 
  769         printf("%s: Lock list:\n", tag);
  770         for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
  771                 printf("\tlock %p for ", lf);
  772                 if (lf->lf_flags & F_POSIX)
  773                         printf("proc %d", ((struct proc*)(lf->lf_id))->p_pid);
  774                 else
  775                         printf("id %p", lf->lf_id);
  776                 printf(" %s, start %llx, end %llx",
  777                         lf->lf_type == F_RDLCK ? "shared" :
  778                         lf->lf_type == F_WRLCK ? "exclusive" :
  779                         lf->lf_type == F_UNLCK ? "unlock" :
  780                         "unknown", lf->lf_start, lf->lf_end);
  781                 printf("\n");
  782         }
  783 }
  784 #endif /* LOCKF_DEBUG */

/* [<][>][^][v][top][bottom][index][help] */