root/uvm/uvm_page_i.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. uvm_lock_fpageq
  2. uvm_unlock_fpageq
  3. uvm_pagelookup
  4. uvm_pagewire
  5. uvm_pageunwire
  6. uvm_pagedeactivate
  7. uvm_pageactivate
  8. uvm_pagezero
  9. uvm_pagecopy
  10. uvm_page_lookup_freelist

    1 /*      $OpenBSD: uvm_page_i.h,v 1.17 2007/04/13 18:57:49 art Exp $     */
    2 /*      $NetBSD: uvm_page_i.h,v 1.14 2000/11/27 07:47:42 chs Exp $      */
    3 
    4 /* 
    5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
    6  * Copyright (c) 1991, 1993, The Regents of the University of California.  
    7  *
    8  * All rights reserved.
    9  *
   10  * This code is derived from software contributed to Berkeley by
   11  * The Mach Operating System project at Carnegie-Mellon University.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. All advertising materials mentioning features or use of this software
   22  *    must display the following acknowledgement:
   23  *      This product includes software developed by Charles D. Cranor,
   24  *      Washington University, the University of California, Berkeley and 
   25  *      its contributors.
   26  * 4. Neither the name of the University nor the names of its contributors
   27  *    may be used to endorse or promote products derived from this software
   28  *    without specific prior written permission.
   29  *
   30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   40  * SUCH DAMAGE.
   41  *
   42  *      @(#)vm_page.c   8.3 (Berkeley) 3/21/94
   43  * from: Id: uvm_page_i.h,v 1.1.2.7 1998/01/05 00:26:02 chuck Exp
   44  *
   45  *
   46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   47  * All rights reserved.
   48  * 
   49  * Permission to use, copy, modify and distribute this software and
   50  * its documentation is hereby granted, provided that both the copyright
   51  * notice and this permission notice appear in all copies of the
   52  * software, derivative works or modified versions, and any portions
   53  * thereof, and that both notices appear in supporting documentation.
   54  * 
   55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
   56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 
   57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   58  * 
   59  * Carnegie Mellon requests users of this software to return to
   60  *
   61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   62  *  School of Computer Science
   63  *  Carnegie Mellon University
   64  *  Pittsburgh PA 15213-3890
   65  *
   66  * any improvements or extensions that they make and grant Carnegie the
   67  * rights to redistribute these changes.
   68  */
   69 
   70 #ifndef _UVM_UVM_PAGE_I_H_
   71 #define _UVM_UVM_PAGE_I_H_
   72 
   73 /*
   74  * uvm_page_i.h
   75  */
   76 
   77 /*
   78  * inline functions [maybe]
   79  */
   80 
   81 #if defined(UVM_PAGE_INLINE) || defined(UVM_PAGE)
   82 
   83 /*
   84  * uvm_lock_fpageq: lock the free page queue
   85  *
   86  * => free page queue can be accessed in interrupt context, so this
   87  *      blocks all interrupts that can cause memory allocation, and
   88  *      returns the previous interrupt level.
   89  */
   90 
   91 PAGE_INLINE int
   92 uvm_lock_fpageq(void)
   93 {
   94         int s;
   95 
   96         s = splvm();
   97         simple_lock(&uvm.fpageqlock);
   98         return (s);
   99 }
  100 
  101 /*
  102  * uvm_unlock_fpageq: unlock the free page queue
  103  *
  104  * => caller must supply interrupt level returned by uvm_lock_fpageq()
  105  *      so that it may be restored.
  106  */
  107 
  108 PAGE_INLINE void
  109 uvm_unlock_fpageq(int s)
  110 {
  111 
  112         simple_unlock(&uvm.fpageqlock);
  113         splx(s);
  114 }
  115 
  116 /*
  117  * uvm_pagelookup: look up a page
  118  *
  119  * => caller should lock object to keep someone from pulling the page
  120  *      out from under it
  121  */
  122 
  123 struct vm_page *
  124 uvm_pagelookup(struct uvm_object *obj, voff_t off)
  125 {
  126         struct vm_page *pg;
  127         struct pglist *buck;
  128         int s;
  129 
  130         buck = &uvm.page_hash[uvm_pagehash(obj,off)];
  131 
  132         s = splvm();
  133         simple_lock(&uvm.hashlock);
  134         TAILQ_FOREACH(pg, buck, hashq) {
  135                 if (pg->uobject == obj && pg->offset == off) {
  136                         break;
  137                 }
  138         }
  139         simple_unlock(&uvm.hashlock);
  140         splx(s);
  141         return(pg);
  142 }
  143 
  144 /*
  145  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
  146  *
  147  * => caller must lock page queues
  148  */
  149 
  150 PAGE_INLINE void
  151 uvm_pagewire(struct vm_page *pg)
  152 {
  153         if (pg->wire_count == 0) {
  154                 if (pg->pg_flags & PQ_ACTIVE) {
  155                         TAILQ_REMOVE(&uvm.page_active, pg, pageq);
  156                         atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
  157                         uvmexp.active--;
  158                 }
  159                 if (pg->pg_flags & PQ_INACTIVE) {
  160                         if (pg->pg_flags & PQ_SWAPBACKED)
  161                                 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
  162                         else
  163                                 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
  164                         atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
  165                         uvmexp.inactive--;
  166                 }
  167                 uvmexp.wired++;
  168         }
  169         pg->wire_count++;
  170 }
  171 
  172 /*
  173  * uvm_pageunwire: unwire the page.   
  174  *
  175  * => activate if wire count goes to zero.
  176  * => caller must lock page queues
  177  */
  178  
  179 PAGE_INLINE void
  180 uvm_pageunwire(struct vm_page *pg)
  181 {
  182         pg->wire_count--;
  183         if (pg->wire_count == 0) {
  184                 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
  185                 uvmexp.active++;
  186                 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
  187                 uvmexp.wired--;
  188         }
  189 }
  190 
  191 /*
  192  * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
  193  *
  194  * => caller must lock page queues
  195  * => caller must check to make sure page is not wired
  196  * => object that page belongs to must be locked (so we can adjust pg->flags)
  197  */
  198 
  199 PAGE_INLINE void
  200 uvm_pagedeactivate(struct vm_page *pg)
  201 {
  202         if (pg->pg_flags & PQ_ACTIVE) {
  203                 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
  204                 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
  205                 uvmexp.active--;
  206         }
  207         if ((pg->pg_flags & PQ_INACTIVE) == 0) {
  208                 KASSERT(pg->wire_count == 0);
  209                 if (pg->pg_flags & PQ_SWAPBACKED)
  210                         TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
  211                 else
  212                         TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
  213                 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
  214                 uvmexp.inactive++;
  215                 pmap_clear_reference(pg);
  216                 /*
  217                  * update the "clean" bit.  this isn't 100%
  218                  * accurate, and doesn't have to be.  we'll
  219                  * re-sync it after we zap all mappings when
  220                  * scanning the inactive list.
  221                  */
  222                 if ((pg->pg_flags & PG_CLEAN) != 0 &&
  223                     pmap_is_modified(pg))
  224                         atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
  225         }
  226 }
  227 
  228 /*
  229  * uvm_pageactivate: activate page
  230  *
  231  * => caller must lock page queues
  232  */
  233 
  234 PAGE_INLINE void
  235 uvm_pageactivate(struct vm_page *pg)
  236 {
  237         if (pg->pg_flags & PQ_INACTIVE) {
  238                 if (pg->pg_flags & PQ_SWAPBACKED)
  239                         TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
  240                 else
  241                         TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
  242                 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
  243                 uvmexp.inactive--;
  244         }
  245         if (pg->wire_count == 0) {
  246 
  247                 /*
  248                  * if page is already active, remove it from list so we
  249                  * can put it at tail.  if it wasn't active, then mark
  250                  * it active and bump active count
  251                  */
  252                 if (pg->pg_flags & PQ_ACTIVE)
  253                         TAILQ_REMOVE(&uvm.page_active, pg, pageq);
  254                 else {
  255                         atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
  256                         uvmexp.active++;
  257                 }
  258 
  259                 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
  260         }
  261 }
  262 
  263 /*
  264  * uvm_pagezero: zero fill a page
  265  *
  266  * => if page is part of an object then the object should be locked
  267  *      to protect pg->flags.
  268  */
  269 
  270 PAGE_INLINE void
  271 uvm_pagezero(struct vm_page *pg)
  272 {
  273         atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
  274         pmap_zero_page(pg);
  275 }
  276 
  277 /*
  278  * uvm_pagecopy: copy a page
  279  *
  280  * => if page is part of an object then the object should be locked
  281  *      to protect pg->flags.
  282  */
  283 
  284 PAGE_INLINE void
  285 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
  286 {
  287         atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
  288         pmap_copy_page(src, dst);
  289 }
  290 
  291 /*
  292  * uvm_page_lookup_freelist: look up the free list for the specified page
  293  */
  294 
  295 PAGE_INLINE int
  296 uvm_page_lookup_freelist(struct vm_page *pg)
  297 {
  298         int lcv;
  299 
  300         lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
  301         KASSERT(lcv != -1);
  302         return (vm_physmem[lcv].free_list);
  303 }
  304 
  305 #endif /* defined(UVM_PAGE_INLINE) || defined(UVM_PAGE) */
  306 
  307 #endif /* _UVM_UVM_PAGE_I_H_ */

/* [<][>][^][v][top][bottom][index][help] */