root/kern/kern_malloc_debug.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. TAILQ_HEAD
  2. debug_free
  3. debug_malloc_init
  4. debug_malloc_allocate_free
  5. debug_malloc_print
  6. debug_malloc_assert_allocated
  7. debug_malloc_printit

    1 /*      $OpenBSD: kern_malloc_debug.c,v 1.26 2007/04/13 18:57:49 art Exp $      */
    2 
    3 /*
    4  * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
    5  * All rights reserved. 
    6  *
    7  * Redistribution and use in source and binary forms, with or without 
    8  * modification, are permitted provided that the following conditions 
    9  * are met: 
   10  *
   11  * 1. Redistributions of source code must retain the above copyright 
   12  *    notice, this list of conditions and the following disclaimer. 
   13  * 2. The name of the author may not be used to endorse or promote products
   14  *    derived from this software without specific prior written permission. 
   15  *
   16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
   17  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
   18  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
   19  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   20  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
   26  */
   27 
   28 /*
   29  * This really belongs in kern/kern_malloc.c, but it was too much pollution.
   30  */
   31 
   32 /*
   33  * It's only possible to debug one type/size at a time. The question is
   34  * if this is a limitation or a feature. We never want to run this as the
   35  * default malloc because we'll run out of memory really fast. Adding
   36  * more types will also add to the complexity of the code.
   37  *
   38  * This is really simple. Every malloc() allocates two virtual pages,
   39  * the second page is left unmapped, and the value returned is aligned
   40  * so that it ends at (or very close to) the page boundary to catch overflows.
   41  * Every free() changes the protection of the first page to VM_PROT_NONE so
   42  * that we can catch any dangling writes to it.
   43  * To minimize the risk of writes to recycled chunks we keep an LRU of latest
   44  * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
   45  *
   46  * Don't expect any performance.
   47  *
   48  * TODO:
   49  *  - support for size >= PAGE_SIZE
   50  *  - add support to the fault handler to give better diagnostics if we fail.
   51  */
   52 
   53 #include <sys/param.h>
   54 #include <sys/proc.h>
   55 #include <sys/kernel.h>
   56 #include <sys/malloc.h>
   57 #include <sys/systm.h>
   58 #include <sys/pool.h>
   59 
   60 #include <uvm/uvm.h>
   61 
   62 /*
   63  * debug_malloc_type and debug_malloc_size define the type and size of
   64  * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo
   65  * is the lower limit and debug_malloc_size_hi the upper limit of sizes
   66  * being debugged; 0 will not work as a wildcard for the upper limit.
   67  * For any debugging to take place, type must be != -1, size must be >= 0,
   68  * and if the limits are being used, size must be set to 0.
   69  * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types.
   70  *
   71  * Although those are variables, it's a really bad idea to change the type
   72  * if any memory chunks of this type are used. It's ok to change the size
   73  * in runtime.
   74  */
   75 int debug_malloc_type = -1;
   76 int debug_malloc_size = -1;
   77 int debug_malloc_size_lo = -1;
   78 int debug_malloc_size_hi = -1;
   79 
   80 /*
   81  * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
   82  * freelist before we reuse them.
   83  */
   84 #define MALLOC_DEBUG_CHUNKS 16
   85 
   86 void debug_malloc_allocate_free(int);
   87 
   88 struct debug_malloc_entry {
   89         TAILQ_ENTRY(debug_malloc_entry) md_list;
   90         vaddr_t md_va;
   91         paddr_t md_pa;
   92         size_t md_size;
   93         int md_type;
   94 };
   95 
   96 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist;
   97 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist;
   98 
   99 int debug_malloc_allocs;
  100 int debug_malloc_frees;
  101 int debug_malloc_pages;
  102 int debug_malloc_chunks_on_freelist;
  103 
  104 int debug_malloc_initialized;
  105 
  106 struct pool debug_malloc_pool;
  107 
  108 int
  109 debug_malloc(unsigned long size, int type, int flags, void **addr)
  110 {
  111         struct debug_malloc_entry *md = NULL;
  112         int s, wait = (flags & M_NOWAIT) == 0;
  113 
  114         /* Careful not to compare unsigned long to int -1 */
  115         if (((type != debug_malloc_type && debug_malloc_type != 0) ||
  116             (size != debug_malloc_size && debug_malloc_size != 0) ||
  117             (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) ||
  118             (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi) ||
  119             !debug_malloc_initialized) && type != M_DEBUG)
  120                 return (0);
  121 
  122         /* XXX - fix later */
  123         if (size > PAGE_SIZE)
  124                 return (0);
  125 
  126         s = splvm();
  127         if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
  128                 debug_malloc_allocate_free(wait);
  129 
  130         md = TAILQ_FIRST(&debug_malloc_freelist);
  131         if (md == NULL) {
  132                 splx(s);
  133                 return (0);
  134         }
  135         TAILQ_REMOVE(&debug_malloc_freelist, md, md_list);
  136         debug_malloc_chunks_on_freelist--;
  137 
  138         TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list);
  139         debug_malloc_allocs++;
  140         splx(s);
  141 
  142         pmap_kenter_pa(md->md_va, md->md_pa, VM_PROT_READ|VM_PROT_WRITE);
  143         pmap_update(pmap_kernel());
  144 
  145         md->md_size = size;
  146         md->md_type = type;
  147 
  148         /*
  149          * Align the returned addr so that it ends where the first page
  150          * ends. roundup to get decent alignment.
  151          */
  152         *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
  153         return (1);
  154 }
  155 
  156 int
  157 debug_free(void *addr, int type)
  158 {
  159         struct debug_malloc_entry *md;
  160         vaddr_t va;
  161         int s;
  162 
  163         if (type != debug_malloc_type && debug_malloc_type != 0 &&
  164             type != M_DEBUG)
  165                 return (0);
  166 
  167         /*
  168          * trunc_page to get the address of the page.
  169          */
  170         va = trunc_page((vaddr_t)addr);
  171 
  172         s = splvm();
  173         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  174                 if (md->md_va == va)
  175                         break;
  176 
  177         /*
  178          * If we are not responsible for this entry, let the normal free
  179          * handle it
  180          */
  181         if (md == NULL) {
  182                 /*
  183                  * sanity check. Check for multiple frees.
  184                  */
  185                 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  186                         if (md->md_va == va)
  187                                 panic("debug_free: already free");
  188                 splx(s);
  189                 return (0);
  190         }
  191 
  192         debug_malloc_frees++;
  193         TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list);
  194 
  195         TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list);
  196         debug_malloc_chunks_on_freelist++;
  197         /*
  198          * unmap the page.
  199          */
  200         pmap_kremove(md->md_va, PAGE_SIZE);
  201         pmap_update(pmap_kernel());
  202         splx(s);
  203 
  204         return (1);
  205 }
  206 
  207 void
  208 debug_malloc_init(void)
  209 {
  210 
  211         TAILQ_INIT(&debug_malloc_freelist);
  212         TAILQ_INIT(&debug_malloc_usedlist);
  213 
  214         debug_malloc_allocs = 0;
  215         debug_malloc_frees = 0;
  216         debug_malloc_pages = 0;
  217         debug_malloc_chunks_on_freelist = 0;
  218 
  219         pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry),
  220             0, 0, 0, "mdbepl", NULL);
  221 
  222         debug_malloc_initialized = 1;
  223 }
  224 
  225 /*
  226  * Add one chunk to the freelist.
  227  *
  228  * called at splvm.
  229  */
  230 void
  231 debug_malloc_allocate_free(int wait)
  232 {
  233         vaddr_t va, offset;
  234         struct vm_page *pg;
  235         struct debug_malloc_entry *md;
  236 
  237         splassert(IPL_VM);
  238 
  239         md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT);
  240         if (md == NULL)
  241                 return;
  242 
  243         va = uvm_km_kmemalloc(kmem_map, NULL, PAGE_SIZE * 2,
  244             UVM_KMF_VALLOC | (wait ? 0: UVM_KMF_NOWAIT));
  245         if (va == 0) {
  246                 pool_put(&debug_malloc_pool, md);
  247                 return;
  248         }
  249 
  250         offset = va - vm_map_min(kernel_map);
  251         for (;;) {
  252                 pg = uvm_pagealloc(NULL, 0, NULL, 0);
  253                 if (pg) {
  254                         atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
  255                         UVM_PAGE_OWN(pg, NULL);
  256                 }
  257 
  258                 if (pg)
  259                         break;
  260 
  261                 if (wait == 0) {
  262                         uvm_unmap(kmem_map, va, va + PAGE_SIZE * 2);
  263                         pool_put(&debug_malloc_pool, md);
  264                         return;
  265                 }
  266                 uvm_wait("debug_malloc");
  267         }
  268 
  269         md->md_va = va;
  270         md->md_pa = VM_PAGE_TO_PHYS(pg);
  271 
  272         debug_malloc_pages++;
  273         TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list);
  274         debug_malloc_chunks_on_freelist++;
  275 }
  276 
  277 void
  278 debug_malloc_print(void)
  279 {
  280 
  281         debug_malloc_printit(printf, NULL);
  282 }
  283 
  284 void
  285 debug_malloc_assert_allocated(void *addr, const char *func)
  286 {
  287         struct debug_malloc_entry *md;
  288         vaddr_t va = (vaddr_t)addr;
  289 
  290         TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
  291                 if (va >= md->md_va &&
  292                     va < md->md_va + 2 * PAGE_SIZE)
  293                         panic("debug_malloc: (%s): %p - freed", func, addr);
  294         }
  295         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
  296                 if (va >= md->md_va + PAGE_SIZE &&
  297                     va < md->md_va + 2 * PAGE_SIZE)
  298                         panic("debug_malloc: (%s): %p - overflow", func, addr);
  299         }
  300 }
  301 
  302 void
  303 debug_malloc_printit(int (*pr)(const char *, ...), vaddr_t addr)
  304 {
  305         struct debug_malloc_entry *md;
  306 
  307         if (addr) {
  308                 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
  309                         if (addr >= md->md_va &&
  310                             addr < md->md_va + 2 * PAGE_SIZE) {
  311                                 (*pr)("Memory at address 0x%x is in a freed "
  312                                       "area. type %d, size: %d\n ",
  313                                       addr, md->md_type, md->md_size);
  314                                 return;
  315                         }
  316                 }
  317                 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
  318                         if (addr >= md->md_va + PAGE_SIZE &&
  319                             addr < md->md_va + 2 * PAGE_SIZE) {
  320                                 (*pr)("Memory at address 0x%x is just outside "
  321                                       "an allocated area. type %d, size: %d\n",
  322                                       addr, md->md_type, md->md_size);
  323                                 return;
  324                         }
  325                 }
  326                 (*pr)("Memory at address 0x%x is outside debugged malloc.\n");
  327                 return;
  328         }
  329 
  330         (*pr)("allocs: %d\n", debug_malloc_allocs);
  331         (*pr)("frees: %d\n", debug_malloc_frees);
  332         (*pr)("pages used: %d\n", debug_malloc_pages);
  333         (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist);
  334 
  335         (*pr)("\taddr:\tsize:\n");
  336         (*pr)("free chunks:\n");
  337         TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  338                 (*pr)("\t0x%x\t0x%x\t%d\n", md->md_va, md->md_size,
  339                       md->md_type);
  340         (*pr)("used chunks:\n");
  341         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  342                 (*pr)("\t0x%x\t0x%x\t%d\n", md->md_va, md->md_size,
  343                       md->md_type);
  344 }

/* [<][>][^][v][top][bottom][index][help] */