root/kern/subr_extent.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. extent_align
  2. LIST_HEAD
  3. extent_pool_init
  4. extent_print_all
  5. extent_create
  6. extent_destroy
  7. extent_insert_and_optimize
  8. extent_alloc_region
  9. extent_alloc_subregion
  10. extent_free
  11. extent_alloc_region_descriptor
  12. extent_free_region_descriptor
  13. extent_print

    1 /*      $OpenBSD: subr_extent.c,v 1.33 2006/06/04 22:47:16 miod Exp $   */
    2 /*      $NetBSD: subr_extent.c,v 1.7 1996/11/21 18:46:34 cgd Exp $      */
    3 
    4 /*-
    5  * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
    6  * All rights reserved.
    7  *
    8  * This code is derived from software contributed to The NetBSD Foundation
    9  * by Jason R. Thorpe and Matthias Drochner.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. All advertising materials mentioning features or use of this software
   20  *    must display the following acknowledgement:
   21  *      This product includes software developed by the NetBSD
   22  *      Foundation, Inc. and its contributors.
   23  * 4. Neither the name of The NetBSD Foundation nor the names of its
   24  *    contributors may be used to endorse or promote products derived
   25  *    from this software without specific prior written permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   37  * POSSIBILITY OF SUCH DAMAGE.
   38  */
   39 
   40 /*
   41  * General purpose extent manager.
   42  */
   43 
   44 #ifdef _KERNEL
   45 #include <sys/param.h>
   46 #include <sys/extent.h>
   47 #include <sys/malloc.h>
   48 #include <sys/time.h>
   49 #include <sys/systm.h>
   50 #include <sys/proc.h>
   51 #include <sys/queue.h>
   52 #include <sys/pool.h>
   53 #include <ddb/db_output.h>
   54 #else
   55 /*
   56  * user-land definitions, so it can fit into a testing harness.
   57  */
   58 #include <sys/param.h>
   59 #include <sys/pool.h>
   60 #include <sys/extent.h>
   61 #include <sys/queue.h>
   62 #include <errno.h>
   63 #include <stdlib.h>
   64 #include <stdio.h>
   65 
   66 #define malloc(s, t, flags)             malloc(s)
   67 #define free(p, t)                      free(p)
   68 #define tsleep(chan, pri, str, timo)    (EWOULDBLOCK)
   69 #define wakeup(chan)                    ((void)0)
   70 #define pool_get(pool, flags)           malloc((pool)->pr_size, 0, 0)
   71 #define pool_init(a, b, c, d, e, f, g)  (a)->pr_size = (b)
   72 #define pool_put(pool, rp)              free((rp), 0)
   73 #define panic                           printf
   74 #define splvm()                         (1)
   75 #define splx(s)                         ((void)(s))
   76 #endif
   77 
   78 static  void extent_insert_and_optimize(struct extent *, u_long, u_long,
   79             struct extent_region *, struct extent_region *);
   80 static  struct extent_region *extent_alloc_region_descriptor(struct extent *, int);
   81 static  void extent_free_region_descriptor(struct extent *,
   82             struct extent_region *);
   83 
   84 /*
   85  * Shortcut to align to an arbitrary power-of-two boundary.
   86  */
   87 static __inline__ u_long
   88 extent_align(u_long start, u_long align, u_long skew)
   89 {
   90         return ((((start - skew) + (align - 1)) & (-align)) + skew);
   91 }
   92 
   93 
   94 #if defined(DIAGNOSTIC) || defined(DDB)
   95 /*
   96  * Register the extent on a doubly linked list.
   97  * Should work, no?
   98  */
   99 static LIST_HEAD(listhead, extent) ext_list;
  100 static  void extent_register(struct extent *);
  101 
  102 static void
  103 extent_register(struct extent *ex)
  104 {
  105 #ifdef DIAGNOSTIC
  106         struct extent *ep;
  107 #endif
  108         static int initialized;
  109 
  110         if (!initialized){
  111                 LIST_INIT(&ext_list);
  112                 initialized = 1;
  113         }
  114 
  115 #ifdef DIAGNOSTIC
  116         LIST_FOREACH(ep, &ext_list, ex_link) {
  117                 if (ep == ex)
  118                         panic("extent_register: already registered");
  119         }
  120 #endif
  121 
  122         /* Insert into list */
  123         LIST_INSERT_HEAD(&ext_list, ex, ex_link);
  124 }
  125 #endif  /* DIAGNOSTIC || DDB */
  126 
  127 struct pool ex_region_pl;
  128 
  129 static void
  130 extent_pool_init(void)
  131 {
  132         static int inited;
  133 
  134         if (!inited) {
  135                 pool_init(&ex_region_pl, sizeof(struct extent_region), 0, 0, 0,
  136                     "extentpl", NULL);
  137                 inited = 1;
  138         }
  139 }
  140 
  141 #ifdef DDB
  142 /*
  143  * Print out all extents registered.  This is used in
  144  * DDB show extents
  145  */
  146 void
  147 extent_print_all(void)
  148 {
  149         struct extent *ep;
  150 
  151         LIST_FOREACH(ep, &ext_list, ex_link) {
  152                 extent_print(ep);
  153         }
  154 }
  155 #endif
  156 
  157 /*
  158  * Allocate and initialize an extent map.
  159  */
  160 struct extent *
  161 extent_create(char *name, u_long start, u_long end, int mtype, caddr_t storage,
  162     size_t storagesize, int flags)
  163 {
  164         struct extent *ex;
  165         caddr_t cp = storage;
  166         size_t sz = storagesize;
  167         struct extent_region *rp;
  168         int fixed_extent = (storage != NULL);
  169 
  170 #ifdef DIAGNOSTIC
  171         /* Check arguments. */
  172         if (name == NULL)
  173                 panic("extent_create: name == NULL");
  174         if (end < start) {
  175                 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
  176                     name, start, end);
  177                 panic("extent_create: end < start");
  178         }
  179         if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
  180                 panic("extent_create: fixed extent, bad storagesize 0x%lx",
  181                     (u_long)storagesize);
  182         if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
  183                 panic("extent_create: storage provided for non-fixed");
  184 #endif
  185 
  186         extent_pool_init();
  187 
  188         /* Allocate extent descriptor. */
  189         if (fixed_extent) {
  190                 struct extent_fixed *fex;
  191 
  192                 bzero(storage, storagesize);
  193 
  194                 /*
  195                  * Align all descriptors on "long" boundaries.
  196                  */
  197                 fex = (struct extent_fixed *)cp;
  198                 ex = (struct extent *)fex;
  199                 cp += ALIGN(sizeof(struct extent_fixed));
  200                 sz -= ALIGN(sizeof(struct extent_fixed));
  201                 fex->fex_storage = storage;
  202                 fex->fex_storagesize = storagesize;
  203 
  204                 /*
  205                  * In a fixed extent, we have to pre-allocate region
  206                  * descriptors and place them in the extent's freelist.
  207                  */
  208                 LIST_INIT(&fex->fex_freelist);
  209                 while (sz >= ALIGN(sizeof(struct extent_region))) {
  210                         rp = (struct extent_region *)cp;
  211                         cp += ALIGN(sizeof(struct extent_region));
  212                         sz -= ALIGN(sizeof(struct extent_region));
  213                         LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
  214                 }
  215         } else {
  216                 ex = (struct extent *)malloc(sizeof(struct extent),
  217                     mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
  218                 if (ex == NULL)
  219                         return (NULL);
  220         }
  221 
  222         /* Fill in the extent descriptor and return it to the caller. */
  223         LIST_INIT(&ex->ex_regions);
  224         ex->ex_name = name;
  225         ex->ex_start = start;
  226         ex->ex_end = end;
  227         ex->ex_mtype = mtype;
  228         ex->ex_flags = 0;
  229         if (fixed_extent)
  230                 ex->ex_flags |= EXF_FIXED;
  231         if (flags & EX_NOCOALESCE)
  232                 ex->ex_flags |= EXF_NOCOALESCE;
  233 
  234 #if defined(DIAGNOSTIC) || defined(DDB)
  235         extent_register(ex);
  236 #endif
  237         return (ex);
  238 }
  239 
  240 /*
  241  * Destroy an extent map.
  242  */
  243 void
  244 extent_destroy(struct extent *ex)
  245 {
  246         struct extent_region *rp, *orp;
  247 
  248 #ifdef DIAGNOSTIC
  249         /* Check arguments. */
  250         if (ex == NULL)
  251                 panic("extent_destroy: NULL extent");
  252 #endif
  253 
  254         /* Free all region descriptors in extent. */
  255         for (rp = LIST_FIRST(&ex->ex_regions);
  256             rp != LIST_END(&ex->ex_regions); ) {
  257                 orp = rp;
  258                 rp = LIST_NEXT(rp, er_link);
  259                 LIST_REMOVE(orp, er_link);
  260                 extent_free_region_descriptor(ex, orp);
  261         }
  262 
  263 #if defined(DIAGNOSTIC) || defined(DDB)
  264         /* Remove from the list of all extents. */
  265         LIST_REMOVE(ex, ex_link);
  266 #endif
  267 
  268         /* If we're not a fixed extent, free the extent descriptor itself. */
  269         if ((ex->ex_flags & EXF_FIXED) == 0)
  270                 free(ex, ex->ex_mtype);
  271 }
  272 
  273 /*
  274  * Insert a region descriptor into the sorted region list after the
  275  * entry "after" or at the head of the list (if "after" is NULL).
  276  * The region descriptor we insert is passed in "rp".  We must
  277  * allocate the region descriptor before calling this function!
  278  * If we don't need the region descriptor, it will be freed here.
  279  */
  280 static void
  281 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size,
  282     struct extent_region *after, struct extent_region *rp)
  283 {
  284         struct extent_region *nextr;
  285         int appended = 0;
  286 
  287         if (after == NULL) {
  288                 /*
  289                  * We're the first in the region list.  If there's
  290                  * a region after us, attempt to coalesce to save
  291                  * descriptor overhead.
  292                  */
  293                 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
  294                     !LIST_EMPTY(&ex->ex_regions) &&
  295                     ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) {
  296                         /*
  297                          * We can coalesce.  Prepend us to the first region.
  298                          */
  299                         LIST_FIRST(&ex->ex_regions)->er_start = start;
  300                         extent_free_region_descriptor(ex, rp);
  301                         return;
  302                 }
  303 
  304                 /*
  305                  * Can't coalesce.  Fill in the region descriptor
  306                  * in, and insert us at the head of the region list.
  307                  */
  308                 rp->er_start = start;
  309                 rp->er_end = start + (size - 1);
  310                 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
  311                 return;
  312         }
  313 
  314         /*
  315          * If EXF_NOCOALESCE is set, coalescing is disallowed.
  316          */
  317         if (ex->ex_flags & EXF_NOCOALESCE)
  318                 goto cant_coalesce;
  319 
  320         /*
  321          * Attempt to coalesce with the region before us.
  322          */
  323         if ((after->er_end + 1) == start) {
  324                 /*
  325                  * We can coalesce.  Append ourselves and make
  326                  * note of it.
  327                  */
  328                 after->er_end = start + (size - 1);
  329                 appended = 1;
  330         }
  331 
  332         /*
  333          * Attempt to coalesce with the region after us.
  334          */
  335         if (LIST_NEXT(after, er_link) != NULL &&
  336             ((start + size) == LIST_NEXT(after, er_link)->er_start)) {
  337                 /*
  338                  * We can coalesce.  Note that if we appended ourselves
  339                  * to the previous region, we exactly fit the gap, and
  340                  * can free the "next" region descriptor.
  341                  */
  342                 if (appended) {
  343                         /*
  344                          * Yup, we can free it up.
  345                          */
  346                         after->er_end = LIST_NEXT(after, er_link)->er_end;
  347                         nextr = LIST_NEXT(after, er_link);
  348                         LIST_REMOVE(nextr, er_link);
  349                         extent_free_region_descriptor(ex, nextr);
  350                 } else {
  351                         /*
  352                          * Nope, just prepend us to the next region.
  353                          */
  354                         LIST_NEXT(after, er_link)->er_start = start;
  355                 }
  356 
  357                 extent_free_region_descriptor(ex, rp);
  358                 return;
  359         }
  360 
  361         /*
  362          * We weren't able to coalesce with the next region, but
  363          * we don't need to allocate a region descriptor if we
  364          * appended ourselves to the previous region.
  365          */
  366         if (appended) {
  367                 extent_free_region_descriptor(ex, rp);
  368                 return;
  369         }
  370 
  371  cant_coalesce:
  372 
  373         /*
  374          * Fill in the region descriptor and insert ourselves
  375          * into the region list.
  376          */
  377         rp->er_start = start;
  378         rp->er_end = start + (size - 1);
  379         LIST_INSERT_AFTER(after, rp, er_link);
  380 }
  381 
  382 /*
  383  * Allocate a specific region in an extent map.
  384  */
  385 int
  386 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags)
  387 {
  388         struct extent_region *rp, *last, *myrp;
  389         u_long end = start + (size - 1);
  390         int error;
  391 
  392 #ifdef DIAGNOSTIC
  393         /* Check arguments. */
  394         if (ex == NULL)
  395                 panic("extent_alloc_region: NULL extent");
  396         if (size < 1) {
  397                 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
  398                     ex->ex_name, size);
  399                 panic("extent_alloc_region: bad size");
  400         }
  401         if (end < start) {
  402                 printf(
  403                  "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
  404                  ex->ex_name, start, size);
  405                 panic("extent_alloc_region: overflow");
  406         }
  407 #endif
  408 
  409         /*
  410          * Make sure the requested region lies within the
  411          * extent.
  412          */
  413         if ((start < ex->ex_start) || (end > ex->ex_end)) {
  414 #ifdef DIAGNOSTIC
  415                 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
  416                     ex->ex_name, ex->ex_start, ex->ex_end);
  417                 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
  418                     start, end);
  419                 panic("extent_alloc_region: region lies outside extent");
  420 #else
  421                 return (EINVAL);
  422 #endif
  423         }
  424 
  425         /*
  426          * Allocate the region descriptor.  It will be freed later
  427          * if we can coalesce with another region.
  428          */
  429         myrp = extent_alloc_region_descriptor(ex, flags);
  430         if (myrp == NULL) {
  431 #ifdef DIAGNOSTIC
  432                 printf(
  433                     "extent_alloc_region: can't allocate region descriptor\n");
  434 #endif
  435                 return (ENOMEM);
  436         }
  437 
  438  alloc_start:
  439         /*
  440          * Attempt to place ourselves in the desired area of the
  441          * extent.  We save ourselves some work by keeping the list sorted.
  442          * In other words, if the start of the current region is greater
  443          * than the end of our region, we don't have to search any further.
  444          */
  445 
  446         /*
  447          * Keep a pointer to the last region we looked at so
  448          * that we don't have to traverse the list again when
  449          * we insert ourselves.  If "last" is NULL when we
  450          * finally insert ourselves, we go at the head of the
  451          * list.  See extent_insert_and_optimize() for details.
  452          */
  453         last = NULL;
  454 
  455         LIST_FOREACH(rp, &ex->ex_regions, er_link) {
  456                 if (rp->er_start > end) {
  457                         /*
  458                          * We lie before this region and don't
  459                          * conflict.
  460                          */
  461                         break;
  462                 }
  463 
  464                 /*
  465                  * The current region begins before we end.
  466                  * Check for a conflict.
  467                  */
  468                 if (rp->er_end >= start) {
  469                         /*
  470                          * We conflict.  If we can (and want to) wait,
  471                          * do so.
  472                          */
  473                         if (flags & EX_WAITSPACE) {
  474                                 ex->ex_flags |= EXF_WANTED;
  475                                 error = tsleep(ex,
  476                                     PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
  477                                     "extnt", 0);
  478                                 if (error)
  479                                         return (error);
  480                                 goto alloc_start;
  481                         }
  482                         extent_free_region_descriptor(ex, myrp);
  483                         return (EAGAIN);
  484                 }
  485                 /*
  486                  * We don't conflict, but this region lies before
  487                  * us.  Keep a pointer to this region, and keep
  488                  * trying.
  489                  */
  490                 last = rp;
  491         }
  492 
  493         /*
  494          * We don't conflict with any regions.  "last" points
  495          * to the region we fall after, or is NULL if we belong
  496          * at the beginning of the region list.  Insert ourselves.
  497          */
  498         extent_insert_and_optimize(ex, start, size, last, myrp);
  499         return (0);
  500 }
  501 
  502 /*
  503  * Macro to check (x + y) <= z.  This check is designed to fail
  504  * if an overflow occurs.
  505  */
  506 #define LE_OV(x, y, z)  ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
  507 
  508 /*
  509  * Allocate a region in an extent map subregion.
  510  *
  511  * If EX_FAST is specified, we return the first fit in the map.
  512  * Otherwise, we try to minimize fragmentation by finding the
  513  * smallest gap that will hold the request.
  514  *
  515  * The allocated region is aligned to "alignment", which must be
  516  * a power of 2.
  517  */
  518 int
  519 extent_alloc_subregion(struct extent *ex, u_long substart, u_long subend,
  520     u_long size, u_long alignment, u_long skew, u_long boundary, int flags,
  521     u_long *result)
  522 {
  523         struct extent_region *rp, *myrp, *last, *bestlast;
  524         u_long newstart, newend, exend, beststart, bestovh, ovh;
  525         u_long dontcross;
  526         int error;
  527 
  528 #ifdef DIAGNOSTIC
  529         /* Check arguments. */
  530         if (ex == NULL)
  531                 panic("extent_alloc_subregion: NULL extent");
  532         if (result == NULL)
  533                 panic("extent_alloc_subregion: NULL result pointer");
  534         if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
  535             (subend > ex->ex_end) || (subend < ex->ex_start)) {
  536   printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
  537                     ex->ex_name, ex->ex_start, ex->ex_end);
  538                 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
  539                     substart, subend);
  540                 panic("extent_alloc_subregion: bad subregion");
  541         }
  542         if ((size < 1) || ((size - 1) > (subend - substart))) {
  543                 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
  544                     ex->ex_name, size);
  545                 panic("extent_alloc_subregion: bad size");
  546         }
  547         if (alignment == 0)
  548                 panic("extent_alloc_subregion: bad alignment");
  549         if (boundary && (boundary < size)) {
  550                 printf(
  551                     "extent_alloc_subregion: extent `%s', size 0x%lx, "
  552                     "boundary 0x%lx\n", ex->ex_name, size, boundary);
  553                 panic("extent_alloc_subregion: bad boundary");
  554         }
  555 #endif
  556 
  557         /*
  558          * Allocate the region descriptor.  It will be freed later
  559          * if we can coalesce with another region.
  560          */
  561         myrp = extent_alloc_region_descriptor(ex, flags);
  562         if (myrp == NULL) {
  563 #ifdef DIAGNOSTIC
  564                 printf(
  565                  "extent_alloc_subregion: can't allocate region descriptor\n");
  566 #endif
  567                 return (ENOMEM);
  568         }
  569 
  570  alloc_start:
  571         /*
  572          * Keep a pointer to the last region we looked at so
  573          * that we don't have to traverse the list again when
  574          * we insert ourselves.  If "last" is NULL when we
  575          * finally insert ourselves, we go at the head of the
  576          * list.  See extent_insert_and_optimize() for deatails.
  577          */
  578         last = NULL;
  579 
  580         /*
  581          * Keep track of size and location of the smallest
  582          * chunk we fit in.
  583          *
  584          * Since the extent can be as large as the numeric range
  585          * of the CPU (0 - 0xffffffff for 32-bit systems), the
  586          * best overhead value can be the maximum unsigned integer.
  587          * Thus, we initialize "bestovh" to 0, since we insert ourselves
  588          * into the region list immediately on an exact match (which
  589          * is the only case where "bestovh" would be set to 0).
  590          */
  591         bestovh = 0;
  592         beststart = 0;
  593         bestlast = NULL;
  594 
  595         /*
  596          * Keep track of end of free region.  This is either the end of extent
  597          * or the start of a region past the subend.
  598          */
  599         exend = ex->ex_end;
  600 
  601         /*
  602          * For N allocated regions, we must make (N + 1)
  603          * checks for unallocated space.  The first chunk we
  604          * check is the area from the beginning of the subregion
  605          * to the first allocated region after that point.
  606          */
  607         newstart = extent_align(substart, alignment, skew);
  608         if (newstart < ex->ex_start) {
  609 #ifdef DIAGNOSTIC
  610                 printf(
  611       "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
  612                  ex->ex_name, ex->ex_start, ex->ex_end, alignment);
  613                 panic("extent_alloc_subregion: overflow after alignment");
  614 #else
  615                 extent_free_region_descriptor(ex, myrp);
  616                 return (EINVAL);
  617 #endif
  618         }
  619 
  620         /*
  621          * Find the first allocated region that begins on or after
  622          * the subregion start, advancing the "last" pointer along
  623          * the way.
  624          */
  625         LIST_FOREACH(rp, &ex->ex_regions, er_link) {
  626                 if (rp->er_start >= newstart)
  627                         break;
  628                 last = rp;
  629         }
  630 
  631         /*
  632          * Relocate the start of our candidate region to the end of
  633          * the last allocated region (if there was one overlapping
  634          * our subrange).
  635          */
  636         if (last != NULL && last->er_end >= newstart)
  637                 newstart = extent_align((last->er_end + 1), alignment, skew);
  638 
  639         for (; rp != LIST_END(&ex->ex_regions); rp = LIST_NEXT(rp, er_link)) {
  640                 /*
  641                  * If the region pasts the subend, bail out and see
  642                  * if we fit against the subend.
  643                  */
  644                 if (rp->er_start > subend) {
  645                         exend = rp->er_start;
  646                         break;
  647                 }
  648 
  649                 /*
  650                  * Check the chunk before "rp".  Note that our
  651                  * comparison is safe from overflow conditions.
  652                  */
  653                 if (LE_OV(newstart, size, rp->er_start)) {
  654                         /*
  655                          * Do a boundary check, if necessary.  Note
  656                          * that a region may *begin* on the boundary,
  657                          * but it must end before the boundary.
  658                          */
  659                         if (boundary) {
  660                                 newend = newstart + (size - 1);
  661 
  662                                 /*
  663                                  * Calculate the next boundary after the start
  664                                  * of this region.
  665                                  */
  666                                 dontcross = extent_align(newstart+1, boundary, 
  667                                     (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
  668                                     - 1;
  669 
  670 #if 0
  671                                 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
  672                                     newstart, newend, ex->ex_start, ex->ex_end,
  673                                     boundary, dontcross);
  674 #endif
  675 
  676                                 /* Check for overflow */
  677                                 if (dontcross < ex->ex_start)
  678                                         dontcross = ex->ex_end;
  679                                 else if (newend > dontcross) {
  680                                         /*
  681                                          * Candidate region crosses boundary.
  682                                          * Throw away the leading part and see
  683                                          * if we still fit.
  684                                          */
  685                                         newstart = dontcross + 1;
  686                                         newend = newstart + (size - 1);
  687                                         dontcross += boundary;
  688                                         if (!LE_OV(newstart, size, rp->er_start))
  689                                                 goto skip;
  690                                 }
  691 
  692                                 /*
  693                                  * If we run past the end of
  694                                  * the extent or the boundary
  695                                  * overflows, then the request
  696                                  * can't fit.
  697                                  */
  698                                 if (newstart + size - 1 > ex->ex_end ||
  699                                     dontcross < newstart)
  700                                         goto fail;
  701                         }
  702 
  703                         /*
  704                          * We would fit into this space.  Calculate
  705                          * the overhead (wasted space).  If we exactly
  706                          * fit, or we're taking the first fit, insert
  707                          * ourselves into the region list.
  708                          */
  709                         ovh = rp->er_start - newstart - size;
  710                         if ((flags & EX_FAST) || (ovh == 0))
  711                                 goto found;
  712 
  713                         /*
  714                          * Don't exactly fit, but check to see
  715                          * if we're better than any current choice.
  716                          */
  717                         if ((bestovh == 0) || (ovh < bestovh)) {
  718                                 bestovh = ovh;
  719                                 beststart = newstart;
  720                                 bestlast = last;
  721                         }
  722                 }
  723 
  724 skip:
  725                 /*
  726                  * Skip past the current region and check again.
  727                  */
  728                 newstart = extent_align((rp->er_end + 1), alignment, skew);
  729                 if (newstart < rp->er_end) {
  730                         /*
  731                          * Overflow condition.  Don't error out, since
  732                          * we might have a chunk of space that we can
  733                          * use.
  734                          */
  735                         goto fail;
  736                 }
  737                 
  738                 last = rp;
  739         }
  740 
  741         /*
  742          * The final check is from the current starting point to the
  743          * end of the subregion.  If there were no allocated regions,
  744          * "newstart" is set to the beginning of the subregion, or
  745          * just past the end of the last allocated region, adjusted
  746          * for alignment in either case.
  747          */
  748         if (LE_OV(newstart, (size - 1), subend)) {
  749                 /*
  750                  * Do a boundary check, if necessary.  Note
  751                  * that a region may *begin* on the boundary,
  752                  * but it must end before the boundary.
  753                  */
  754                 if (boundary) {
  755                         newend = newstart + (size - 1);
  756 
  757                         /*
  758                          * Calculate the next boundary after the start
  759                          * of this region.
  760                          */
  761                         dontcross = extent_align(newstart+1, boundary, 
  762                             (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
  763                             - 1;
  764 
  765 #if 0
  766                         printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
  767                             newstart, newend, ex->ex_start, ex->ex_end,
  768                             boundary, dontcross);
  769 #endif
  770 
  771                         /* Check for overflow */
  772                         if (dontcross < ex->ex_start)
  773                                 dontcross = ex->ex_end;
  774                         else if (newend > dontcross) {
  775                                 /*
  776                                  * Candidate region crosses boundary.
  777                                  * Throw away the leading part and see
  778                                  * if we still fit.
  779                                  */
  780                                 newstart = dontcross + 1;
  781                                 newend = newstart + (size - 1);
  782                                 dontcross += boundary;
  783                                 if (!LE_OV(newstart, (size - 1), subend))
  784                                         goto fail;
  785                         }
  786 
  787                         /*
  788                          * If we run past the end of
  789                          * the extent or the boundary
  790                          * overflows, then the request
  791                          * can't fit.
  792                          */
  793                         if (newstart + size - 1 > ex->ex_end ||
  794                             dontcross < newstart)
  795                                 goto fail;
  796                 }
  797 
  798                 /*
  799                  * We would fit into this space.  Calculate
  800                  * the overhead (wasted space).  If we exactly
  801                  * fit, or we're taking the first fit, insert
  802                  * ourselves into the region list.
  803                  */
  804                 ovh = exend - newstart - (size - 1);
  805                 if ((flags & EX_FAST) || (ovh == 0))
  806                         goto found;
  807 
  808                 /*
  809                  * Don't exactly fit, but check to see
  810                  * if we're better than any current choice.
  811                  */
  812                 if ((bestovh == 0) || (ovh < bestovh)) {
  813                         bestovh = ovh;
  814                         beststart = newstart;
  815                         bestlast = last;
  816                 }
  817         }
  818 
  819  fail:
  820         /*
  821          * One of the following two conditions have
  822          * occurred:
  823          *
  824          *      There is no chunk large enough to hold the request.
  825          *
  826          *      If EX_FAST was not specified, there is not an
  827          *      exact match for the request.
  828          *
  829          * Note that if we reach this point and EX_FAST is
  830          * set, then we know there is no space in the extent for
  831          * the request.
  832          */
  833         if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
  834                 /*
  835                  * We have a match that's "good enough".
  836                  */
  837                 newstart = beststart;
  838                 last = bestlast;
  839                 goto found;
  840         }
  841 
  842         /*
  843          * No space currently available.  Wait for it to free up,
  844          * if possible.
  845          */
  846         if (flags & EX_WAITSPACE) {
  847                 ex->ex_flags |= EXF_WANTED;
  848                 error = tsleep(ex,
  849                     PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
  850                 if (error)
  851                         return (error);
  852                 goto alloc_start;
  853         }
  854 
  855         extent_free_region_descriptor(ex, myrp);
  856         return (EAGAIN);
  857 
  858  found:
  859         /*
  860          * Insert ourselves into the region list.
  861          */
  862         extent_insert_and_optimize(ex, newstart, size, last, myrp);
  863         *result = newstart;
  864         return (0);
  865 }
  866 
  867 int
  868 extent_free(struct extent *ex, u_long start, u_long size, int flags)
  869 {
  870         struct extent_region *rp, *nrp = NULL;
  871         u_long end = start + (size - 1);
  872         int exflags;
  873 
  874 #ifdef DIAGNOSTIC
  875         /* Check arguments. */
  876         if (ex == NULL)
  877                 panic("extent_free: NULL extent");
  878         if ((start < ex->ex_start) || (end > ex->ex_end)) {
  879                 extent_print(ex);
  880                 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
  881                     ex->ex_name, start, size);
  882                 panic("extent_free: extent `%s', region not within extent",
  883                     ex->ex_name);
  884         }
  885         /* Check for an overflow. */
  886         if (end < start) {
  887                 extent_print(ex);
  888                 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
  889                     ex->ex_name, start, size);
  890                 panic("extent_free: overflow");
  891         }
  892 #endif
  893 
  894         /*
  895          * If we're allowing coalescing, we must allocate a region
  896          * descriptor now, since it might block.
  897          *
  898          * XXX Make a static, create-time flags word, so we don't
  899          * XXX have to lock to read it!
  900          */
  901         exflags = ex->ex_flags;
  902 
  903         if ((exflags & EXF_NOCOALESCE) == 0) {
  904                 /* Allocate a region descriptor. */
  905                 nrp = extent_alloc_region_descriptor(ex, flags);
  906                 if (nrp == NULL)
  907                         return (ENOMEM);
  908         }
  909 
  910         /*
  911          * Find region and deallocate.  Several possibilities:
  912          *
  913          *      1. (start == er_start) && (end == er_end):
  914          *         Free descriptor.
  915          *
  916          *      2. (start == er_start) && (end < er_end):
  917          *         Adjust er_start.
  918          *
  919          *      3. (start > er_start) && (end == er_end):
  920          *         Adjust er_end.
  921          *
  922          *      4. (start > er_start) && (end < er_end):
  923          *         Fragment region.  Requires descriptor alloc.
  924          *
  925          * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
  926          * is not set.
  927          */
  928         LIST_FOREACH(rp, &ex->ex_regions, er_link) {
  929                 /*
  930                  * Save ourselves some comparisons; does the current
  931                  * region end before chunk to be freed begins?  If so,
  932                  * then we haven't found the appropriate region descriptor.
  933                  */
  934                 if (rp->er_end < start)
  935                         continue;
  936 
  937                 /*
  938                  * Save ourselves some traversal; does the current
  939                  * region begin after the chunk to be freed ends?  If so,
  940                  * then we've already passed any possible region descriptors
  941                  * that might have contained the chunk to be freed.
  942                  */
  943                 if (rp->er_start > end)
  944                         break;
  945 
  946                 /* Case 1. */
  947                 if ((start == rp->er_start) && (end == rp->er_end)) {
  948                         LIST_REMOVE(rp, er_link);
  949                         extent_free_region_descriptor(ex, rp);
  950                         goto done;
  951                 }
  952 
  953                 /*
  954                  * The following cases all require that EXF_NOCOALESCE
  955                  * is not set.
  956                  */
  957                 if (ex->ex_flags & EXF_NOCOALESCE)
  958                         continue;
  959 
  960                 /* Case 2. */
  961                 if ((start == rp->er_start) && (end < rp->er_end)) {
  962                         rp->er_start = (end + 1);
  963                         goto done;
  964                 }
  965 
  966                 /* Case 3. */
  967                 if ((start > rp->er_start) && (end == rp->er_end)) {
  968                         rp->er_end = (start - 1);
  969                         goto done;
  970                 }
  971 
  972                 /* Case 4. */
  973                 if ((start > rp->er_start) && (end < rp->er_end)) {
  974                         /* Fill in new descriptor. */
  975                         nrp->er_start = end + 1;
  976                         nrp->er_end = rp->er_end;
  977 
  978                         /* Adjust current descriptor. */
  979                         rp->er_end = start - 1;
  980 
  981                         /* Insert new descriptor after current. */
  982                         LIST_INSERT_AFTER(rp, nrp, er_link);
  983 
  984                         /* We used the new descriptor, so don't free it below */
  985                         nrp = NULL;
  986                         goto done;
  987                 }
  988         }
  989 
  990         /* Region not found, or request otherwise invalid. */
  991 #if defined(DIAGNOSTIC) || defined(DDB)
  992         extent_print(ex);
  993 #endif
  994         printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
  995         panic("extent_free: region not found");
  996 
  997  done:
  998         if (nrp != NULL)
  999                 extent_free_region_descriptor(ex, nrp);
 1000         if (ex->ex_flags & EXF_WANTED) {
 1001                 ex->ex_flags &= ~EXF_WANTED;
 1002                 wakeup(ex);
 1003         }
 1004         return (0);
 1005 }
 1006 
 1007 static struct extent_region *
 1008 extent_alloc_region_descriptor(struct extent *ex, int flags)
 1009 {
 1010         struct extent_region *rp;
 1011         int s;
 1012 
 1013         if (ex->ex_flags & EXF_FIXED) {
 1014                 struct extent_fixed *fex = (struct extent_fixed *)ex;
 1015 
 1016                 while (LIST_EMPTY(&fex->fex_freelist)) {
 1017                         if (flags & EX_MALLOCOK)
 1018                                 goto alloc;
 1019 
 1020                         if ((flags & EX_WAITOK) == 0)
 1021                                 return (NULL);
 1022                         ex->ex_flags |= EXF_FLWANTED;
 1023                         if (tsleep(&fex->fex_freelist,
 1024                             PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
 1025                             "extnt", 0))
 1026                                 return (NULL);
 1027                 }
 1028                 rp = LIST_FIRST(&fex->fex_freelist);
 1029                 LIST_REMOVE(rp, er_link);
 1030 
 1031                 /*
 1032                  * Don't muck with flags after pulling it off the
 1033                  * freelist; it may be a dynamiclly allocated
 1034                  * region pointer that was kindly given to us,
 1035                  * and we need to preserve that information.
 1036                  */
 1037 
 1038                 return (rp);
 1039         }
 1040 
 1041  alloc:
 1042         s = splvm();
 1043         rp = pool_get(&ex_region_pl, (flags & EX_WAITOK) ? PR_WAITOK : 0);
 1044         splx(s);
 1045         if (rp != NULL)
 1046                 rp->er_flags = ER_ALLOC;
 1047 
 1048         return (rp);
 1049 }
 1050 
 1051 static void
 1052 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp)
 1053 {
 1054         int s;
 1055 
 1056         if (ex->ex_flags & EXF_FIXED) {
 1057                 struct extent_fixed *fex = (struct extent_fixed *)ex;
 1058 
 1059                 /*
 1060                  * If someone's waiting for a region descriptor,
 1061                  * be nice and give them this one, rather than
 1062                  * just free'ing it back to the system.
 1063                  */
 1064                 if (rp->er_flags & ER_ALLOC) {
 1065                         if (ex->ex_flags & EXF_FLWANTED) {
 1066                                 /* Clear all but ER_ALLOC flag. */
 1067                                 rp->er_flags = ER_ALLOC;
 1068                                 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
 1069                                     er_link);
 1070                                 goto wake_em_up;
 1071                         } else {
 1072                                 s = splvm();
 1073                                 pool_put(&ex_region_pl, rp);
 1074                                 splx(s);
 1075                         }
 1076                 } else {
 1077                         /* Clear all flags. */
 1078                         rp->er_flags = 0;
 1079                         LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
 1080                 }
 1081 
 1082                 if (ex->ex_flags & EXF_FLWANTED) {
 1083  wake_em_up:
 1084                         ex->ex_flags &= ~EXF_FLWANTED;
 1085                         wakeup(&fex->fex_freelist);
 1086                 }
 1087                 return;
 1088         }
 1089 
 1090         /*
 1091          * We know it's dynamically allocated if we get here.
 1092          */
 1093         s = splvm();
 1094         pool_put(&ex_region_pl, rp);
 1095         splx(s);
 1096 }
 1097 
 1098 #ifndef DDB
 1099 #define db_printf printf
 1100 #endif
 1101 
 1102 #if defined(DIAGNOSTIC) || defined(DDB) || !defined(_KERNEL)
 1103 void
 1104 extent_print(struct extent *ex)
 1105 {
 1106         struct extent_region *rp;
 1107 
 1108         if (ex == NULL)
 1109                 panic("extent_print: NULL extent");
 1110 
 1111 #ifdef _KERNEL
 1112         db_printf("extent `%s' (0x%lx - 0x%lx), flags=%b\n", ex->ex_name,
 1113             ex->ex_start, ex->ex_end, ex->ex_flags, EXF_BITS);
 1114 #else
 1115         db_printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
 1116             ex->ex_start, ex->ex_end, ex->ex_flags);
 1117 #endif
 1118 
 1119         LIST_FOREACH(rp, &ex->ex_regions, er_link)
 1120                 db_printf("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
 1121 }
 1122 #endif

/* [<][>][^][v][top][bottom][index][help] */