pg 127 arch/i386/i386/gdt.c struct vm_page *pg; pg 140 arch/i386/i386/gdt.c pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); pg 141 arch/i386/i386/gdt.c if (pg == NULL) pg 143 arch/i386/i386/gdt.c pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), pg 198 arch/i386/i386/gdt.c struct vm_page *pg; pg 210 arch/i386/i386/gdt.c (pg = pg 215 arch/i386/i386/gdt.c pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), pg 159 arch/i386/i386/mpbios.c paddr_t pg; pg 235 arch/i386/i386/mpbios.c handle->pg = pgpa; pg 416 arch/i386/i386/pmap.c pmap_sync_flags_pte(struct vm_page *pg, u_long pte) pg 419 arch/i386/i386/pmap.c atomic_setbits_int(&pg->pg_flags, pmap_pte2flags(pte)); pg 1050 arch/i386/i386/pmap.c struct vm_page *pg; pg 1093 arch/i386/i386/pmap.c pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); pg 1094 arch/i386/i386/pmap.c if (pg == NULL) pg 1097 arch/i386/i386/pmap.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY); pg 1106 arch/i386/i386/pmap.c pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg), pg 1302 arch/i386/i386/pmap.c pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap, pg 1308 arch/i386/i386/pmap.c pve->pv_next = pg->mdpage.pv_list; /* add to ... */ pg 1309 arch/i386/i386/pmap.c pg->mdpage.pv_list = pve; /* ... locked list */ pg 1322 arch/i386/i386/pmap.c pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va) pg 1326 arch/i386/i386/pmap.c prevptr = &pg->mdpage.pv_list; /* previous pv_entry pointer */ pg 1527 arch/i386/i386/pmap.c struct vm_page *pg; pg 1547 arch/i386/i386/pmap.c pg = TAILQ_FIRST(&pmap->pm_obj.memq); pg 1549 arch/i386/i386/pmap.c if (pg->pg_flags & PG_BUSY) pg 1554 arch/i386/i386/pmap.c pg->wire_count = 0; pg 1555 arch/i386/i386/pmap.c uvm_pagefree(pg); pg 1771 arch/i386/i386/pmap.c pmap_zero_page(struct vm_page *pg) pg 1773 arch/i386/i386/pmap.c pmap_zero_phys(VM_PAGE_TO_PHYS(pg)); pg 1878 arch/i386/i386/pmap.c struct vm_page *pg; pg 1911 arch/i386/i386/pmap.c pg = PHYS_TO_VM_PAGE(opte & PG_FRAME); pg 1918 arch/i386/i386/pmap.c if (pg != NULL) pg 1926 arch/i386/i386/pmap.c if (pg == NULL) pg 1933 arch/i386/i386/pmap.c pmap_sync_flags_pte(pg, opte); pg 1934 arch/i386/i386/pmap.c pve = pmap_remove_pv(pg, pmap, startva); pg 1962 arch/i386/i386/pmap.c struct vm_page *pg; pg 1983 arch/i386/i386/pmap.c pg = PHYS_TO_VM_PAGE(opte & PG_FRAME); pg 1990 arch/i386/i386/pmap.c if (pg != NULL) pg 1998 arch/i386/i386/pmap.c if (pg == NULL) pg 2004 arch/i386/i386/pmap.c pmap_sync_flags_pte(pg, opte); pg 2005 arch/i386/i386/pmap.c pve = pmap_remove_pv(pg, pmap, va); pg 2240 arch/i386/i386/pmap.c pmap_page_remove(struct vm_page *pg) pg 2247 arch/i386/i386/pmap.c if (pg->mdpage.pv_list == NULL) pg 2254 arch/i386/i386/pmap.c for (pve = pg->mdpage.pv_list ; pve != NULL ; pve = pve->pv_next) { pg 2264 arch/i386/i386/pmap.c pg, pve->pv_va, pve->pv_ptp); pg 2281 arch/i386/i386/pmap.c pmap_sync_flags_pte(pg, opte); pg 2317 arch/i386/i386/pmap.c pmap_free_pvs(NULL, pg->mdpage.pv_list); pg 2318 arch/i386/i386/pmap.c pg->mdpage.pv_list = NULL; pg 2340 arch/i386/i386/pmap.c pmap_test_attrs(struct vm_page *pg, int testbits) pg 2348 arch/i386/i386/pmap.c if (pg->pg_flags & testflags) pg 2353 arch/i386/i386/pmap.c for (pve = pg->mdpage.pv_list; pve != NULL && mybits == 0; pg 2365 arch/i386/i386/pmap.c atomic_setbits_int(&pg->pg_flags, pmap_pte2flags(mybits)); pg 2377 arch/i386/i386/pmap.c pmap_clear_attrs(struct vm_page *pg, int clearbits) pg 2388 arch/i386/i386/pmap.c result = pg->pg_flags & clearflags; pg 2390 arch/i386/i386/pmap.c atomic_clearbits_int(&pg->pg_flags, clearflags); pg 2392 arch/i386/i386/pmap.c for (pve = pg->mdpage.pv_list; pve != NULL; pve = pve->pv_next) { pg 2600 arch/i386/i386/pmap.c struct vm_page *pg = NULL; pg 2665 arch/i386/i386/pmap.c pg = PHYS_TO_VM_PAGE(pa); pg 2667 arch/i386/i386/pmap.c if (pg == NULL) pg 2673 arch/i386/i386/pmap.c pmap_sync_flags_pte(pg, opte); pg 2688 arch/i386/i386/pmap.c pg = PHYS_TO_VM_PAGE(opte & PG_FRAME); pg 2690 arch/i386/i386/pmap.c if (pg == NULL) pg 2695 arch/i386/i386/pmap.c pmap_sync_flags_pte(pg, opte); pg 2696 arch/i386/i386/pmap.c pve = pmap_remove_pv(pg, pmap, va); pg 2697 arch/i386/i386/pmap.c pg = NULL; /* This is not page we are looking for */ pg 2715 arch/i386/i386/pmap.c if (pmap_initialized && pg == NULL) pg 2716 arch/i386/i386/pmap.c pg = PHYS_TO_VM_PAGE(pa); pg 2718 arch/i386/i386/pmap.c if (pg != NULL) { pg 2733 arch/i386/i386/pmap.c pmap_enter_pv(pg, pve, pmap, va, ptp); pg 2760 arch/i386/i386/pmap.c if (pg) { pg 2762 arch/i386/i386/pmap.c pmap_sync_flags_pte(pg, npte); pg 1570 arch/i386/i386/pmapae.c pmap_page_remove_pae(struct vm_page *pg) pg 1581 arch/i386/i386/pmapae.c bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off); pg 1610 arch/i386/i386/pmapae.c pg, pve->pv_va, pve->pv_ptp); pg 1700 arch/i386/i386/pmapae.c pmap_test_attrs_pae(struct vm_page *pg, int testbits) pg 1709 arch/i386/i386/pmapae.c bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off); pg 1761 arch/i386/i386/pmapae.c pmap_change_attrs_pae(struct vm_page *pg, int setbits, int clearbits) pg 1772 arch/i386/i386/pmapae.c bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off); pg 353 arch/i386/include/pmap.h #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M) pg 354 arch/i386/include/pmap.h #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U) pg 356 arch/i386/include/pmap.h #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) pg 357 arch/i386/include/pmap.h #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) pg 399 arch/i386/include/pmap.h #define PMAP_PAGEIDLEZERO(pg) pmap_zero_page_uncached(VM_PAGE_TO_PHYS(pg)) pg 428 arch/i386/include/pmap.h pmap_page_protect(pg, prot) pg 429 arch/i386/include/pmap.h struct vm_page *pg; pg 434 arch/i386/include/pmap.h (void) pmap_clear_attrs(pg, PG_RW); pg 436 arch/i386/include/pmap.h pmap_page_remove(pg); pg 123 arch/i386/include/vmparam.h #define VM_MDPAGE_INIT(pg) do { \ pg 124 arch/i386/include/vmparam.h (pg)->mdpage.pv_list = NULL; \ pg 249 dev/ic/lemac.c int pg, conf; pg 263 dev/ic/lemac.c for (pg = 1; pg <= sc->sc_lastpage; pg++) pg 264 dev/ic/lemac.c LEMAC_OUTB(sc, LEMAC_REG_FMQ, pg); pg 1046 dev/isa/aha.c struct vm_page *pg; pg 1132 dev/isa/aha.c for (pg = TAILQ_FIRST(&pglist); pg != NULL; pg 1133 dev/isa/aha.c pg = TAILQ_NEXT(pg, pageq)) { pg 1134 dev/isa/aha.c pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), pg 581 dev/isa/if_ie.c u_short pg, adjust, decode, edecode; pg 694 dev/isa/if_ie.c pg = (kvtop(sc->sc_maddr) & 0x3C000) >> 14; pg 695 dev/isa/if_ie.c adjust = IEE16_MCTRL_FMCS16 | (pg & 0x3) << 2; pg 696 dev/isa/if_ie.c decode = ((1 << (sc->sc_msize / 16384)) - 1) << pg; pg 293 kern/kern_ktrace.c struct pgrp *pg; pg 350 kern/kern_ktrace.c pg = pgfind(-SCARG(uap, pid)); pg 351 kern/kern_ktrace.c if (pg == NULL) { pg 355 kern/kern_ktrace.c LIST_FOREACH(p, &pg->pg_members, p_pglist) pg 234 kern/kern_malloc_debug.c struct vm_page *pg; pg 252 kern/kern_malloc_debug.c pg = uvm_pagealloc(NULL, 0, NULL, 0); pg 253 kern/kern_malloc_debug.c if (pg) { pg 254 kern/kern_malloc_debug.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY); pg 255 kern/kern_malloc_debug.c UVM_PAGE_OWN(pg, NULL); pg 258 kern/kern_malloc_debug.c if (pg) pg 270 kern/kern_malloc_debug.c md->md_pa = VM_PAGE_TO_PHYS(pg); pg 348 kern/kern_proc.c orphanpg(struct pgrp *pg) pg 352 kern/kern_proc.c LIST_FOREACH(p, &pg->pg_members, p_pglist) { pg 354 kern/kern_proc.c LIST_FOREACH(p, &pg->pg_members, p_pglist) { pg 87 kern/kern_resource.c struct pgrp *pg; pg 90 kern/kern_resource.c pg = curp->p_pgrp; pg 91 kern/kern_resource.c else if ((pg = pgfind(SCARG(uap, who))) == NULL) pg 93 kern/kern_resource.c LIST_FOREACH(p, &pg->pg_members, p_pglist) { pg 144 kern/kern_resource.c struct pgrp *pg; pg 147 kern/kern_resource.c pg = curp->p_pgrp; pg 148 kern/kern_resource.c else if ((pg = pgfind(SCARG(uap, who))) == NULL) pg 150 kern/kern_resource.c LIST_FOREACH(p, &pg->pg_members, p_pglist) { pg 282 scsi/scsi_disk.h #define DISK_PGCODE(pg, n) ((pg) != NULL) && (((pg)->pg_code & 0x3f) == n) pg 755 uvm/uvm_amap.c struct vm_page *pg, *npg; pg 772 uvm/uvm_amap.c pg = anon->an_page; pg 778 uvm/uvm_amap.c if (pg == NULL) pg 792 uvm/uvm_amap.c if (anon->an_ref > 1 && pg->loan_count == 0) { pg 798 uvm/uvm_amap.c if (pg->pg_flags & PG_BUSY) { pg 799 uvm/uvm_amap.c atomic_setbits_int(&pg->pg_flags, PG_WANTED); pg 800 uvm/uvm_amap.c UVM_UNLOCK_AND_WAIT(pg, &anon->an_lock, FALSE, pg 833 uvm/uvm_amap.c uvm_pagecopy(pg, npg); /* old -> new */ pg 94 uvm/uvm_anon.c struct vm_page *pg; pg 102 uvm/uvm_anon.c pg = anon->an_page; pg 110 uvm/uvm_anon.c if (pg && pg->loan_count) pg 111 uvm/uvm_anon.c pg = uvm_anon_lockloanpg(anon); pg 118 uvm/uvm_anon.c if (pg) { pg 125 uvm/uvm_anon.c if (pg->uobject) { pg 127 uvm/uvm_anon.c KASSERT(pg->loan_count > 0); pg 128 uvm/uvm_anon.c pg->loan_count--; pg 129 uvm/uvm_anon.c pg->uanon = NULL; pg 131 uvm/uvm_anon.c simple_unlock(&pg->uobject->vmobjlock); pg 143 uvm/uvm_anon.c if ((pg->pg_flags & PG_BUSY) != 0) { pg 145 uvm/uvm_anon.c atomic_setbits_int(&pg->pg_flags, PG_RELEASED); pg 148 uvm/uvm_anon.c anon, pg, 0, 0); pg 151 uvm/uvm_anon.c pmap_page_protect(pg, VM_PROT_NONE); pg 153 uvm/uvm_anon.c uvm_pagefree(pg); /* bye bye */ pg 156 uvm/uvm_anon.c anon, pg, 0, 0); pg 224 uvm/uvm_anon.c struct vm_page *pg; pg 236 uvm/uvm_anon.c while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) { pg 247 uvm/uvm_anon.c if (pg->uobject) { pg 250 uvm/uvm_anon.c if (pg->uobject) { /* the "real" check */ pg 252 uvm/uvm_anon.c simple_lock_try(&pg->uobject->vmobjlock); pg 282 uvm/uvm_anon.c if (pg->uobject == NULL && (pg->pg_flags & PQ_ANON) == 0) { pg 284 uvm/uvm_anon.c atomic_setbits_int(&pg->pg_flags, PQ_ANON); pg 285 uvm/uvm_anon.c pg->loan_count--; /* ... and drop our loan */ pg 295 uvm/uvm_anon.c return(pg); pg 309 uvm/uvm_anon.c struct vm_page *pg; pg 348 uvm/uvm_anon.c pg = anon->an_page; pg 349 uvm/uvm_anon.c uobj = pg->uobject; pg 352 uvm/uvm_anon.c atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); pg 358 uvm/uvm_anon.c pmap_clear_reference(pg); pg 359 uvm/uvm_anon.c pmap_page_protect(pg, VM_PROT_NONE); pg 361 uvm/uvm_anon.c uvm_pagedeactivate(pg); pg 661 uvm/uvm_aobj.c struct vm_page *pg, *next; pg 693 uvm/uvm_aobj.c for (pg = TAILQ_FIRST(&uobj->memq); pg != NULL; pg = next) { pg 694 uvm/uvm_aobj.c next = TAILQ_NEXT(pg, listq); pg 695 uvm/uvm_aobj.c if (pg->pg_flags & PG_BUSY) { pg 696 uvm/uvm_aobj.c atomic_setbits_int(&pg->pg_flags, PG_RELEASED); pg 702 uvm/uvm_aobj.c pmap_page_protect(pg, VM_PROT_NONE); pg 703 uvm/uvm_aobj.c uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT); pg 705 uvm/uvm_aobj.c uvm_pagefree(pg); pg 1260 uvm/uvm_aobj.c uao_releasepg(pg, nextpgp) pg 1261 uvm/uvm_aobj.c struct vm_page *pg; pg 1264 uvm/uvm_aobj.c struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject; pg 1266 uvm/uvm_aobj.c KASSERT(pg->pg_flags & PG_RELEASED); pg 1271 uvm/uvm_aobj.c pmap_page_protect(pg, VM_PROT_NONE); pg 1272 uvm/uvm_aobj.c uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT); pg 1275 uvm/uvm_aobj.c *nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */ pg 1276 uvm/uvm_aobj.c uvm_pagefree(pg); pg 1484 uvm/uvm_aobj.c struct vm_page *pg; pg 1487 uvm/uvm_aobj.c pg = NULL; pg 1491 uvm/uvm_aobj.c &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0); pg 1513 uvm/uvm_aobj.c KASSERT((pg->pg_flags & PG_RELEASED) == 0); pg 1521 uvm/uvm_aobj.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE); pg 1522 uvm/uvm_aobj.c UVM_PAGE_OWN(pg, NULL); pg 1527 uvm/uvm_aobj.c pmap_clear_reference(pg); pg 1529 uvm/uvm_aobj.c pmap_page_protect(pg, VM_PROT_NONE); pg 1532 uvm/uvm_aobj.c uvm_pagedeactivate(pg); pg 197 uvm/uvm_fault.c struct vm_page *pg; pg 203 uvm/uvm_fault.c pg = anons[lcv]->an_page; pg 204 uvm/uvm_fault.c if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) { pg 206 uvm/uvm_fault.c if (pg->wire_count == 0) { pg 208 uvm/uvm_fault.c pmap_clear_reference(pg); pg 210 uvm/uvm_fault.c pmap_page_protect(pg, VM_PROT_NONE); pg 212 uvm/uvm_fault.c uvm_pagedeactivate(pg); pg 302 uvm/uvm_fault.c struct vm_page *pg; pg 321 uvm/uvm_fault.c pg = anon->an_page; pg 329 uvm/uvm_fault.c if (pg && pg->loan_count) pg 330 uvm/uvm_fault.c pg = uvm_anon_lockloanpg(anon); pg 336 uvm/uvm_fault.c if (pg) { pg 345 uvm/uvm_fault.c if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) { pg 349 uvm/uvm_fault.c atomic_setbits_int(&pg->pg_flags, PG_WANTED); pg 356 uvm/uvm_fault.c if (pg->uobject) { /* owner is uobject ? */ pg 360 uvm/uvm_fault.c UVM_UNLOCK_AND_WAIT(pg, pg 361 uvm/uvm_fault.c &pg->uobject->vmobjlock, pg 368 uvm/uvm_fault.c UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0, pg 378 uvm/uvm_fault.c pg = uvm_pagealloc(NULL, 0, anon, 0); pg 380 uvm/uvm_fault.c if (pg == NULL) { /* out of RAM. */ pg 403 uvm/uvm_fault.c result = uvm_swap_get(pg, anon->an_swslot, pg 435 uvm/uvm_fault.c if (pg->pg_flags & PG_WANTED) { pg 437 uvm/uvm_fault.c wakeup(pg); pg 440 uvm/uvm_fault.c atomic_clearbits_int(&pg->pg_flags, pg 442 uvm/uvm_fault.c UVM_PAGE_OWN(pg, NULL); pg 449 uvm/uvm_fault.c if (pg->pg_flags & PG_RELEASED) { pg 450 uvm/uvm_fault.c pmap_page_protect(pg, VM_PROT_NONE); pg 482 uvm/uvm_fault.c uvm_pagefree(pg); pg 498 uvm/uvm_fault.c pmap_clear_modify(pg); pg 500 uvm/uvm_fault.c uvm_pageactivate(pg); pg 575 uvm/uvm_fault.c struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage; pg 582 uvm/uvm_fault.c pg = NULL; pg 1109 uvm/uvm_fault.c pg = uvm_pagealloc(NULL, 0, NULL, 0); pg 1110 uvm/uvm_fault.c if (pg == NULL) { pg 1122 uvm/uvm_fault.c uvm_pagecopy(anon->an_page, pg); pg 1135 uvm/uvm_fault.c uvm_pageactivate(pg); pg 1143 uvm/uvm_fault.c anon->an_page = pg; pg 1144 uvm/uvm_fault.c pg->uanon = anon; pg 1145 uvm/uvm_fault.c atomic_setbits_int(&pg->pg_flags, PQ_ANON); pg 1146 uvm/uvm_fault.c atomic_clearbits_int(&pg->pg_flags, pg 1148 uvm/uvm_fault.c UVM_PAGE_OWN(pg, NULL); pg 1175 uvm/uvm_fault.c pg = uvm_pagealloc(NULL, 0, anon, 0); pg 1179 uvm/uvm_fault.c if (anon == NULL || pg == NULL) { pg 1198 uvm/uvm_fault.c uvm_pagecopy(oanon->an_page, pg); /* pg now !PG_CLEAN */ pg 1200 uvm/uvm_fault.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE); pg 1201 uvm/uvm_fault.c UVM_PAGE_OWN(pg, NULL); pg 1218 uvm/uvm_fault.c pg = anon->an_page; pg 1234 uvm/uvm_fault.c ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0); pg 1235 uvm/uvm_fault.c if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), pg 1265 uvm/uvm_fault.c uvm_pagewire(pg); pg 1273 uvm/uvm_fault.c atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); pg 1277 uvm/uvm_fault.c uvm_pageactivate(pg); pg 1468 uvm/uvm_fault.c pg = uobjpage; /* map in the actual object */ pg 1486 uvm/uvm_fault.c pg = uvm_pagealloc(NULL, 0, NULL, 0); pg 1488 uvm/uvm_fault.c if (pg == NULL) { pg 1523 uvm/uvm_fault.c uvm_pagecopy(uobjpage, pg); /* old -> new */ pg 1524 uvm/uvm_fault.c atomic_clearbits_int(&pg->pg_flags, pg 1544 uvm/uvm_fault.c uvm_pagerealloc(pg, uobj, uoff); pg 1552 uvm/uvm_fault.c uobjpage = pg; pg 1576 uvm/uvm_fault.c pg = uvm_pagealloc(NULL, 0, anon, pg 1583 uvm/uvm_fault.c if (anon == NULL || pg == NULL) { pg 1626 uvm/uvm_fault.c uvm_pagecopy(uobjpage, pg); pg 1656 uvm/uvm_fault.c uobjpage, anon, pg, 0); pg 1665 uvm/uvm_fault.c anon, pg, 0, 0); pg 1686 uvm/uvm_fault.c ufi.orig_map->pmap, ufi.orig_rvaddr, pg, promote); pg 1687 uvm/uvm_fault.c if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), pg 1699 uvm/uvm_fault.c if (pg->pg_flags & PG_WANTED) pg 1700 uvm/uvm_fault.c wakeup(pg); /* lock still held */ pg 1707 uvm/uvm_fault.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED); pg 1708 uvm/uvm_fault.c UVM_PAGE_OWN(pg, NULL); pg 1725 uvm/uvm_fault.c uvm_pagewire(pg); pg 1726 uvm/uvm_fault.c if (pg->pg_flags & PQ_AOBJ) { pg 1734 uvm/uvm_fault.c atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); pg 1735 uvm/uvm_fault.c uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); pg 1739 uvm/uvm_fault.c uvm_pageactivate(pg); pg 1743 uvm/uvm_fault.c if (pg->pg_flags & PG_WANTED) pg 1744 uvm/uvm_fault.c wakeup(pg); /* lock still held */ pg 1751 uvm/uvm_fault.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED); pg 1752 uvm/uvm_fault.c UVM_PAGE_OWN(pg, NULL); pg 1831 uvm/uvm_fault.c struct vm_page *pg; pg 1871 uvm/uvm_fault.c pg = PHYS_TO_VM_PAGE(pa); pg 1872 uvm/uvm_fault.c if (pg) pg 1873 uvm/uvm_fault.c uvm_pageunwire(pg); pg 311 uvm/uvm_km.c struct vm_page *pg; pg 318 uvm/uvm_km.c pg = PHYS_TO_VM_PAGE(pa); pg 319 uvm/uvm_km.c if (pg == NULL) pg 321 uvm/uvm_km.c uvm_pagefree(pg); pg 344 uvm/uvm_km.c struct vm_page *pg; pg 396 uvm/uvm_km.c pg = uvm_pagealloc(obj, offset, NULL, 0); pg 397 uvm/uvm_km.c if (pg) { pg 398 uvm/uvm_km.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY); pg 399 uvm/uvm_km.c UVM_PAGE_OWN(pg, NULL); pg 402 uvm/uvm_km.c if (__predict_false(pg == NULL)) { pg 421 uvm/uvm_km.c pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), pg 424 uvm/uvm_km.c pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), pg 480 uvm/uvm_km.c struct vm_page *pg; pg 514 uvm/uvm_km.c pg = uvm_pagelookup(uvm.kernel_object, offset); pg 520 uvm/uvm_km.c if (pg) { pg 521 uvm/uvm_km.c if ((pg->pg_flags & PG_RELEASED) == 0) pg 523 uvm/uvm_km.c atomic_setbits_int(&pg->pg_flags, PG_WANTED); pg 524 uvm/uvm_km.c UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, pg 530 uvm/uvm_km.c pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); pg 531 uvm/uvm_km.c if (pg) { pg 532 uvm/uvm_km.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY); pg 533 uvm/uvm_km.c UVM_PAGE_OWN(pg, NULL); pg 536 uvm/uvm_km.c if (__predict_false(pg == NULL)) { pg 555 uvm/uvm_km.c pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), pg 679 uvm/uvm_km.c struct vm_page *pg; pg 683 uvm/uvm_km.c pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); pg 684 uvm/uvm_km.c if (__predict_false(pg == NULL)) { pg 691 uvm/uvm_km.c va = pmap_map_direct(pg); pg 693 uvm/uvm_km.c uvm_pagefree(pg); pg 325 uvm/uvm_loan.c struct vm_page *pg; pg 335 uvm/uvm_loan.c pg = anon->an_page; pg 336 uvm/uvm_loan.c if (pg && (pg->pg_flags & PQ_ANON) != 0 && anon->an_ref == 1) pg 338 uvm/uvm_loan.c pmap_page_protect(pg, VM_PROT_READ); pg 380 uvm/uvm_loan.c pg = anon->an_page; pg 382 uvm/uvm_loan.c if (pg->loan_count == 0) pg 383 uvm/uvm_loan.c pmap_page_protect(pg, VM_PROT_READ); pg 384 uvm/uvm_loan.c pg->loan_count++; pg 385 uvm/uvm_loan.c uvm_pagewire(pg); /* always wire it */ pg 387 uvm/uvm_loan.c **output = pg; pg 391 uvm/uvm_loan.c if (pg->uobject) pg 392 uvm/uvm_loan.c simple_unlock(&pg->uobject->vmobjlock); pg 416 uvm/uvm_loan.c struct vm_page *pg; pg 429 uvm/uvm_loan.c pg = NULL; pg 431 uvm/uvm_loan.c &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED); pg 456 uvm/uvm_loan.c &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, 0); pg 484 uvm/uvm_loan.c if ((pg->pg_flags & PG_RELEASED) != 0 || pg 499 uvm/uvm_loan.c if (pg->pg_flags & PG_WANTED) pg 501 uvm/uvm_loan.c wakeup(pg); pg 503 uvm/uvm_loan.c if (pg->pg_flags & PG_RELEASED) { pg 509 uvm/uvm_loan.c if (uobj->pgops->pgo_releasepg(pg, NULL)) pg 515 uvm/uvm_loan.c uvm_pageactivate(pg); /* make sure it is in queues */ pg 517 uvm/uvm_loan.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_WANTED); pg 518 uvm/uvm_loan.c UVM_PAGE_OWN(pg, NULL); pg 532 uvm/uvm_loan.c if (pg->loan_count == 0) pg 533 uvm/uvm_loan.c pmap_page_protect(pg, VM_PROT_READ); pg 534 uvm/uvm_loan.c pg->loan_count++; pg 535 uvm/uvm_loan.c uvm_pagewire(pg); pg 537 uvm/uvm_loan.c **output = pg; pg 539 uvm/uvm_loan.c if (pg->pg_flags & PG_WANTED) pg 540 uvm/uvm_loan.c wakeup(pg); pg 541 uvm/uvm_loan.c atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); pg 542 uvm/uvm_loan.c UVM_PAGE_OWN(pg, NULL); pg 553 uvm/uvm_loan.c if (pg->uanon) { pg 554 uvm/uvm_loan.c anon = pg->uanon; pg 561 uvm/uvm_loan.c uvm_pageactivate(pg); /* reactivate */ pg 563 uvm/uvm_loan.c if (pg->pg_flags & PG_WANTED) pg 564 uvm/uvm_loan.c wakeup(pg); pg 565 uvm/uvm_loan.c atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); pg 566 uvm/uvm_loan.c UVM_PAGE_OWN(pg, NULL); pg 576 uvm/uvm_loan.c if (pg->pg_flags & PG_WANTED) pg 577 uvm/uvm_loan.c wakeup(pg); pg 578 uvm/uvm_loan.c atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); pg 579 uvm/uvm_loan.c UVM_PAGE_OWN(pg, NULL); pg 583 uvm/uvm_loan.c anon->an_page = pg; pg 584 uvm/uvm_loan.c pg->uanon = anon; pg 586 uvm/uvm_loan.c if (pg->loan_count == 0) pg 587 uvm/uvm_loan.c pmap_page_protect(pg, VM_PROT_READ); pg 588 uvm/uvm_loan.c pg->loan_count++; pg 589 uvm/uvm_loan.c uvm_pageactivate(pg); pg 593 uvm/uvm_loan.c if (pg->pg_flags & PG_WANTED) pg 594 uvm/uvm_loan.c wakeup(pg); pg 595 uvm/uvm_loan.c atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); pg 596 uvm/uvm_loan.c UVM_PAGE_OWN(pg, NULL); pg 617 uvm/uvm_loan.c struct vm_page *pg; pg 621 uvm/uvm_loan.c while ((pg = uvm_pagealloc(NULL, 0, NULL, pg 635 uvm/uvm_loan.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE); pg 636 uvm/uvm_loan.c UVM_PAGE_OWN(pg, NULL); pg 637 uvm/uvm_loan.c **output = pg; pg 641 uvm/uvm_loan.c uvm_pagewire(pg); pg 642 uvm/uvm_loan.c pg->loan_count = 1; pg 649 uvm/uvm_loan.c (pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) { pg 673 uvm/uvm_loan.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE); pg 674 uvm/uvm_loan.c UVM_PAGE_OWN(pg, NULL); pg 676 uvm/uvm_loan.c uvm_pageactivate(pg); pg 722 uvm/uvm_loan.c struct vm_page *pg; pg 727 uvm/uvm_loan.c pg = *ploans++; pg 729 uvm/uvm_loan.c if (pg->loan_count < 1) pg 730 uvm/uvm_loan.c panic("uvm_unloanpage: page %p isn't loaned", pg); pg 732 uvm/uvm_loan.c pg->loan_count--; /* drop loan */ pg 733 uvm/uvm_loan.c uvm_pageunwire(pg); /* and wire */ pg 739 uvm/uvm_loan.c if (pg->loan_count == 0 && pg->uobject == NULL && pg 740 uvm/uvm_loan.c pg->uanon == NULL) { pg 742 uvm/uvm_loan.c if (pg->pg_flags & PG_BUSY) pg 743 uvm/uvm_loan.c panic("uvm_unloanpage: page %p unowned but PG_BUSY!", pg); pg 746 uvm/uvm_loan.c pmap_page_protect(pg, VM_PROT_NONE); pg 747 uvm/uvm_loan.c uvm_pagefree(pg); /* pageq locked above */ pg 2865 uvm/uvm_map.c struct vm_page *pg; pg 2930 uvm/uvm_map.c pg = anon->an_page; pg 2931 uvm/uvm_map.c if (pg == NULL) { pg 2950 uvm/uvm_map.c if (pg->loan_count != 0 || pg 2951 uvm/uvm_map.c pg->wire_count != 0) { pg 2964 uvm/uvm_map.c if ((pg->pg_flags & PQ_ANON) == 0) { pg 2965 uvm/uvm_map.c KASSERT(pg->uobject == NULL); pg 2970 uvm/uvm_map.c KASSERT(pg->uanon == anon); pg 2974 uvm/uvm_map.c pmap_clear_reference(pg); pg 2977 uvm/uvm_map.c pmap_page_protect(pg, VM_PROT_NONE); pg 2981 uvm/uvm_map.c uvm_pagedeactivate(pg); pg 2998 uvm/uvm_map.c if (pg->wire_count != 0) { pg 3660 uvm/uvm_map.c struct vm_page *pg; pg 3674 uvm/uvm_map.c for (pg = TAILQ_FIRST(&uobj->memq); pg 3675 uvm/uvm_map.c pg != NULL; pg 3676 uvm/uvm_map.c pg = TAILQ_NEXT(pg, listq), cnt++) { pg 3677 uvm/uvm_map.c (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset); pg 3698 uvm/uvm_map.c uvm_page_printit(pg, full, pr) pg 3699 uvm/uvm_map.c struct vm_page *pg; pg 3709 uvm/uvm_map.c (*pr)("PAGE %p:\n", pg); pg 3710 uvm/uvm_map.c snprintf(pgbuf, sizeof(pgbuf), "%b", pg->pg_flags, page_flagbits); pg 3711 uvm/uvm_map.c snprintf(pqbuf, sizeof(pqbuf), "%b", pg->pg_flags, page_pqflagbits); pg 3713 uvm/uvm_map.c pgbuf, pqbuf, pg->pg_version, pg->wire_count, pg 3714 uvm/uvm_map.c (long long)pg->phys_addr); pg 3716 uvm/uvm_map.c pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count); pg 3718 uvm/uvm_map.c if (pg->pg_flags & PG_BUSY) pg 3720 uvm/uvm_map.c pg->owner, pg->owner_tag); pg 3731 uvm/uvm_map.c if ((pg->pg_flags & PQ_FREE) == 0) { pg 3732 uvm/uvm_map.c if (pg->pg_flags & PQ_ANON) { pg 3733 uvm/uvm_map.c if (pg->uanon == NULL || pg->uanon->an_page != pg) pg 3735 uvm/uvm_map.c (pg->uanon) ? pg->uanon->an_page : NULL); pg 3739 uvm/uvm_map.c uobj = pg->uobject; pg 3743 uvm/uvm_map.c if (tpg == pg) { pg 3756 uvm/uvm_map.c if (pg->pg_flags & PQ_FREE) { pg 3757 uvm/uvm_map.c int fl = uvm_page_lookup_freelist(pg); pg 3758 uvm/uvm_map.c pgl = &uvm.page_free[fl].pgfl_queues[((pg)->pg_flags & PG_ZERO) ? pg 3760 uvm/uvm_map.c } else if (pg->pg_flags & PQ_INACTIVE) { pg 3761 uvm/uvm_map.c pgl = (pg->pg_flags & PQ_SWAPBACKED) ? pg 3763 uvm/uvm_map.c } else if (pg->pg_flags & PQ_ACTIVE) { pg 3772 uvm/uvm_map.c if (tpg == pg) { pg 154 uvm/uvm_page.c uvm_pageinsert(struct vm_page *pg) pg 160 uvm/uvm_page.c KASSERT((pg->pg_flags & PG_TABLED) == 0); pg 161 uvm/uvm_page.c buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; pg 164 uvm/uvm_page.c TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ pg 168 uvm/uvm_page.c TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ pg 169 uvm/uvm_page.c atomic_setbits_int(&pg->pg_flags, PG_TABLED); pg 170 uvm/uvm_page.c pg->uobject->uo_npages++; pg 181 uvm/uvm_page.c uvm_pageremove(struct vm_page *pg) pg 187 uvm/uvm_page.c KASSERT(pg->pg_flags & PG_TABLED); pg 188 uvm/uvm_page.c buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; pg 191 uvm/uvm_page.c TAILQ_REMOVE(buck, pg, hashq); pg 196 uvm/uvm_page.c if (pg->uobject->pgops == &uvm_vnodeops) { pg 202 uvm/uvm_page.c TAILQ_REMOVE(&pg->uobject->memq, pg, listq); pg 204 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PG_TABLED); pg 205 uvm/uvm_page.c pg->uobject->uo_npages--; pg 206 uvm/uvm_page.c pg->uobject = NULL; pg 207 uvm/uvm_page.c pg->pg_version++; pg 775 uvm/uvm_page.c struct vm_page *pg; pg 829 uvm/uvm_page.c while ((pg = TAILQ_FIRST(&oldbuckets[lcv])) != NULL) { pg 830 uvm/uvm_page.c TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); pg 832 uvm/uvm_page.c &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], pg 833 uvm/uvm_page.c pg, hashq); pg 909 uvm/uvm_page.c struct vm_page *pg; pg 978 uvm/uvm_page.c if ((pg = TAILQ_FIRST((freeq = pg 980 uvm/uvm_page.c (pg = TAILQ_FIRST((freeq = pg 993 uvm/uvm_page.c if ((pg = TAILQ_FIRST((freeq = pg 995 uvm/uvm_page.c (pg = TAILQ_FIRST((freeq = pg 1014 uvm/uvm_page.c TAILQ_REMOVE(freeq, pg, pageq); pg 1018 uvm/uvm_page.c if (pg->pg_flags & PG_ZERO) pg 1026 uvm/uvm_page.c if (pg->pg_flags & PG_ZERO) { pg 1037 uvm/uvm_page.c pg->offset = off; pg 1038 uvm/uvm_page.c pg->uobject = obj; pg 1039 uvm/uvm_page.c pg->uanon = anon; pg 1040 uvm/uvm_page.c pg->pg_flags = PG_BUSY|PG_CLEAN|PG_FAKE; pg 1041 uvm/uvm_page.c pg->pg_version++; pg 1043 uvm/uvm_page.c anon->an_page = pg; pg 1044 uvm/uvm_page.c atomic_setbits_int(&pg->pg_flags, PQ_ANON); pg 1050 uvm/uvm_page.c uvm_pageinsert(pg); pg 1053 uvm/uvm_page.c pg->owner_tag = NULL; pg 1055 uvm/uvm_page.c UVM_PAGE_OWN(pg, "new alloc"); pg 1062 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); pg 1064 uvm/uvm_page.c pmap_zero_page(pg); pg 1067 uvm/uvm_page.c UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, pg 1068 uvm/uvm_page.c (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); pg 1069 uvm/uvm_page.c return(pg); pg 1084 uvm/uvm_page.c uvm_pagerealloc(pg, newobj, newoff) pg 1085 uvm/uvm_page.c struct vm_page *pg; pg 1096 uvm/uvm_page.c if (pg->uobject) { pg 1097 uvm/uvm_page.c uvm_pageremove(pg); pg 1105 uvm/uvm_page.c pg->uobject = newobj; pg 1106 uvm/uvm_page.c pg->offset = newoff; pg 1107 uvm/uvm_page.c pg->pg_version++; pg 1108 uvm/uvm_page.c uvm_pageinsert(pg); pg 1124 uvm/uvm_page.c uvm_pagefree(struct vm_page *pg) pg 1127 uvm/uvm_page.c int saved_loan_count = pg->loan_count; pg 1131 uvm/uvm_page.c if (pg->uobject == (void *)0xdeadbeef && pg 1132 uvm/uvm_page.c pg->uanon == (void *)0xdeadbeef) { pg 1133 uvm/uvm_page.c panic("uvm_pagefree: freeing free page %p", pg); pg 1137 uvm/uvm_page.c UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg, pg 1138 uvm/uvm_page.c (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); pg 1145 uvm/uvm_page.c if (pg->pg_flags & PG_TABLED) { pg 1157 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); pg 1158 uvm/uvm_page.c uvm_pageremove(pg); pg 1171 uvm/uvm_page.c } else if (saved_loan_count && pg->uanon) { pg 1179 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PQ_ANON); pg 1180 uvm/uvm_page.c pg->uanon->an_page = NULL; pg 1181 uvm/uvm_page.c pg->uanon = NULL; pg 1190 uvm/uvm_page.c if (pg->pg_flags & PQ_ACTIVE) { pg 1191 uvm/uvm_page.c TAILQ_REMOVE(&uvm.page_active, pg, pageq); pg 1192 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); pg 1195 uvm/uvm_page.c if (pg->pg_flags & PQ_INACTIVE) { pg 1196 uvm/uvm_page.c if (pg->pg_flags & PQ_SWAPBACKED) pg 1197 uvm/uvm_page.c TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); pg 1199 uvm/uvm_page.c TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); pg 1200 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); pg 1208 uvm/uvm_page.c if (pg->wire_count) { pg 1209 uvm/uvm_page.c pg->wire_count = 0; pg 1212 uvm/uvm_page.c if (pg->uanon) { pg 1213 uvm/uvm_page.c pg->uanon->an_page = NULL; pg 1223 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PG_ZERO); pg 1227 uvm/uvm_page.c uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq); pg 1228 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PQ_MASK); pg 1229 uvm/uvm_page.c atomic_setbits_int(&pg->pg_flags, PQ_FREE); pg 1231 uvm/uvm_page.c pg->uobject = (void *)0xdeadbeef; pg 1232 uvm/uvm_page.c pg->offset = 0xdeadbeef; pg 1233 uvm/uvm_page.c pg->uanon = (void *)0xdeadbeef; pg 1256 uvm/uvm_page.c struct vm_page *pg; pg 1262 uvm/uvm_page.c pg = pgs[i]; pg 1264 uvm/uvm_page.c if (pg == NULL || pg == PGO_DONTCARE) { pg 1267 uvm/uvm_page.c if (pg->pg_flags & PG_WANTED) { pg 1268 uvm/uvm_page.c wakeup(pg); pg 1270 uvm/uvm_page.c if (pg->pg_flags & PG_RELEASED) { pg 1271 uvm/uvm_page.c UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0); pg 1272 uvm/uvm_page.c uobj = pg->uobject; pg 1274 uvm/uvm_page.c uobj->pgops->pgo_releasepg(pg, NULL); pg 1276 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PG_BUSY); pg 1277 uvm/uvm_page.c UVM_PAGE_OWN(pg, NULL); pg 1278 uvm/uvm_page.c uvm_anfree(pg->uanon); pg 1281 uvm/uvm_page.c UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0); pg 1282 uvm/uvm_page.c atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); pg 1283 uvm/uvm_page.c UVM_PAGE_OWN(pg, NULL); pg 1299 uvm/uvm_page.c uvm_page_own(pg, tag) pg 1300 uvm/uvm_page.c struct vm_page *pg; pg 1305 uvm/uvm_page.c if (pg->owner_tag) { pg 1307 uvm/uvm_page.c "by proc %d [%s]\n", pg, pg 1308 uvm/uvm_page.c pg->owner, pg->owner_tag); pg 1311 uvm/uvm_page.c pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; pg 1312 uvm/uvm_page.c pg->owner_tag = tag; pg 1317 uvm/uvm_page.c if (pg->owner_tag == NULL) { pg 1319 uvm/uvm_page.c "page (%p)\n", pg); pg 1322 uvm/uvm_page.c pg->owner_tag = NULL; pg 1337 uvm/uvm_page.c struct vm_page *pg; pg 1353 uvm/uvm_page.c if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ pg 1358 uvm/uvm_page.c if (pg == NULL) { pg 1368 uvm/uvm_page.c TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); pg 1373 uvm/uvm_page.c if (PMAP_PAGEIDLEZERO(pg) == FALSE) { pg 1382 uvm/uvm_page.c pg, pageq); pg 1393 uvm/uvm_page.c pmap_zero_page(pg); pg 1395 uvm/uvm_page.c atomic_setbits_int(&pg->pg_flags, PG_ZERO); pg 1398 uvm/uvm_page.c TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); pg 126 uvm/uvm_page_i.h struct vm_page *pg; pg 134 uvm/uvm_page_i.h TAILQ_FOREACH(pg, buck, hashq) { pg 135 uvm/uvm_page_i.h if (pg->uobject == obj && pg->offset == off) { pg 141 uvm/uvm_page_i.h return(pg); pg 151 uvm/uvm_page_i.h uvm_pagewire(struct vm_page *pg) pg 153 uvm/uvm_page_i.h if (pg->wire_count == 0) { pg 154 uvm/uvm_page_i.h if (pg->pg_flags & PQ_ACTIVE) { pg 155 uvm/uvm_page_i.h TAILQ_REMOVE(&uvm.page_active, pg, pageq); pg 156 uvm/uvm_page_i.h atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); pg 159 uvm/uvm_page_i.h if (pg->pg_flags & PQ_INACTIVE) { pg 160 uvm/uvm_page_i.h if (pg->pg_flags & PQ_SWAPBACKED) pg 161 uvm/uvm_page_i.h TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); pg 163 uvm/uvm_page_i.h TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); pg 164 uvm/uvm_page_i.h atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); pg 169 uvm/uvm_page_i.h pg->wire_count++; pg 180 uvm/uvm_page_i.h uvm_pageunwire(struct vm_page *pg) pg 182 uvm/uvm_page_i.h pg->wire_count--; pg 183 uvm/uvm_page_i.h if (pg->wire_count == 0) { pg 184 uvm/uvm_page_i.h TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); pg 186 uvm/uvm_page_i.h atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); pg 200 uvm/uvm_page_i.h uvm_pagedeactivate(struct vm_page *pg) pg 202 uvm/uvm_page_i.h if (pg->pg_flags & PQ_ACTIVE) { pg 203 uvm/uvm_page_i.h TAILQ_REMOVE(&uvm.page_active, pg, pageq); pg 204 uvm/uvm_page_i.h atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); pg 207 uvm/uvm_page_i.h if ((pg->pg_flags & PQ_INACTIVE) == 0) { pg 208 uvm/uvm_page_i.h KASSERT(pg->wire_count == 0); pg 209 uvm/uvm_page_i.h if (pg->pg_flags & PQ_SWAPBACKED) pg 210 uvm/uvm_page_i.h TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); pg 212 uvm/uvm_page_i.h TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); pg 213 uvm/uvm_page_i.h atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); pg 215 uvm/uvm_page_i.h pmap_clear_reference(pg); pg 222 uvm/uvm_page_i.h if ((pg->pg_flags & PG_CLEAN) != 0 && pg 223 uvm/uvm_page_i.h pmap_is_modified(pg)) pg 224 uvm/uvm_page_i.h atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); pg 235 uvm/uvm_page_i.h uvm_pageactivate(struct vm_page *pg) pg 237 uvm/uvm_page_i.h if (pg->pg_flags & PQ_INACTIVE) { pg 238 uvm/uvm_page_i.h if (pg->pg_flags & PQ_SWAPBACKED) pg 239 uvm/uvm_page_i.h TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); pg 241 uvm/uvm_page_i.h TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); pg 242 uvm/uvm_page_i.h atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); pg 245 uvm/uvm_page_i.h if (pg->wire_count == 0) { pg 252 uvm/uvm_page_i.h if (pg->pg_flags & PQ_ACTIVE) pg 253 uvm/uvm_page_i.h TAILQ_REMOVE(&uvm.page_active, pg, pageq); pg 255 uvm/uvm_page_i.h atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); pg 259 uvm/uvm_page_i.h TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); pg 271 uvm/uvm_page_i.h uvm_pagezero(struct vm_page *pg) pg 273 uvm/uvm_page_i.h atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); pg 274 uvm/uvm_page_i.h pmap_zero_page(pg); pg 296 uvm/uvm_page_i.h uvm_page_lookup_freelist(struct vm_page *pg) pg 300 uvm/uvm_page_i.h lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); pg 417 uvm/uvm_pager.c uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop) pg 419 uvm/uvm_pager.c struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */ pg 444 uvm/uvm_pager.c npages, pg, flags, start, stop); pg 447 uvm/uvm_pager.c ppsp[0] = pg; pg 510 uvm/uvm_pager.c if (*npages > 1 || pg == NULL) pg 511 uvm/uvm_pager.c uvm_pager_dropcluster(uobj, pg, ppsp, npages, pg 525 uvm/uvm_pager.c if (*npages > 1 || pg == NULL) { pg 529 uvm/uvm_pager.c uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP); pg 538 uvm/uvm_pager.c if (uobj == NULL && pg != NULL) { pg 541 uvm/uvm_pager.c if (pg->pg_flags & PQ_ANON) { pg 542 uvm/uvm_pager.c simple_lock(&pg->uanon->an_lock); pg 543 uvm/uvm_pager.c pg->uanon->an_swslot = nswblk; pg 544 uvm/uvm_pager.c simple_unlock(&pg->uanon->an_lock); pg 546 uvm/uvm_pager.c simple_lock(&pg->uobject->vmobjlock); pg 547 uvm/uvm_pager.c uao_set_swslot(pg->uobject, pg 548 uvm/uvm_pager.c pg->offset >> PAGE_SHIFT, pg 550 uvm/uvm_pager.c simple_unlock(&pg->uobject->vmobjlock); pg 561 uvm/uvm_pager.c if (pg) { pg 569 uvm/uvm_pager.c if (pg) { pg 570 uvm/uvm_pager.c ppsp[0] = pg; pg 618 uvm/uvm_pager.c uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags) pg 620 uvm/uvm_pager.c struct vm_page *pg, **ppsp; /* IN, IN/OUT */ pg 635 uvm/uvm_pager.c if (ppsp[lcv] == pg || ppsp[lcv] == NULL) pg 793 uvm/uvm_pager.c struct vm_page *pg, *pgs[npages]; pg 827 uvm/uvm_pager.c pg = pgs[i]; pg 830 uvm/uvm_pager.c swap = (pg->pg_flags & PQ_SWAPBACKED) != 0; pg 832 uvm/uvm_pager.c uobj = pg->uobject; pg 836 uvm/uvm_pager.c KASSERT(swap || pg->uobject == uobj); pg 838 uvm/uvm_pager.c if (pg->pg_flags & PQ_ANON) { pg 839 uvm/uvm_pager.c simple_lock(&pg->uanon->an_lock); pg 841 uvm/uvm_pager.c simple_lock(&pg->uobject->vmobjlock); pg 850 uvm/uvm_pager.c atomic_setbits_int(&pg->pg_flags, PG_RELEASED); pg 868 uvm/uvm_pager.c if (pg->pg_flags & PQ_ANON) { pg 869 uvm/uvm_pager.c simple_unlock(&pg->uanon->an_lock); pg 871 uvm/uvm_pager.c simple_unlock(&pg->uobject->vmobjlock); pg 60 uvm/uvm_pager_i.h struct vm_page *pg; pg 66 uvm/uvm_pager_i.h pg = PHYS_TO_VM_PAGE(pa); pg 67 uvm/uvm_pager_i.h KASSERT(pg != NULL); pg 68 uvm/uvm_pager_i.h return (pg); pg 74 uvm/uvm_pglist.c struct vm_page *pg; pg 103 uvm/uvm_pglist.c pg = &vm_physmem[psi].pgs[idx]; pg 104 uvm/uvm_pglist.c if (VM_PAGE_IS_FREE(pg) == 0) pg 107 uvm/uvm_pglist.c free_list = uvm_page_lookup_freelist(pg); pg 108 uvm/uvm_pglist.c pgflidx = (pg->pg_flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN; pg 113 uvm/uvm_pglist.c if (tp == pg) pg 119 uvm/uvm_pglist.c TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_queues[pgflidx], pg, pageq); pg 121 uvm/uvm_pglist.c if (pg->pg_flags & PG_ZERO) pg 123 uvm/uvm_pglist.c pg->pg_flags = PG_CLEAN; pg 124 uvm/uvm_pglist.c pg->uobject = NULL; pg 125 uvm/uvm_pglist.c pg->uanon = NULL; pg 126 uvm/uvm_pglist.c pg->pg_version++; pg 127 uvm/uvm_pglist.c TAILQ_INSERT_TAIL(rlist, pg, pageq); pg 663 uvm/uvm_vnode.c uvn_releasepg(pg, nextpgp) pg 664 uvm/uvm_vnode.c struct vm_page *pg; pg 667 uvm/uvm_vnode.c struct uvm_vnode *uvn = (struct uvm_vnode *) pg->uobject; pg 669 uvm/uvm_vnode.c if ((pg->pg_flags & PG_RELEASED) == 0) pg 676 uvm/uvm_vnode.c pmap_page_protect(pg, VM_PROT_NONE); pg 679 uvm/uvm_vnode.c *nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */ pg 680 uvm/uvm_vnode.c uvm_pagefree(pg); pg 1116 xfs/xfs_vnodeops-bsd.c struct vm_page *pg; pg 1119 xfs/xfs_vnodeops-bsd.c pg = TAILQ_FIRST(&uobj->memq); pg 1121 xfs/xfs_vnodeops-bsd.c while (pg && !dirty) { pg 1122 xfs/xfs_vnodeops-bsd.c dirty = pmap_is_modified(pg) || (pg->flags & PG_CLEAN) == 0; pg 1123 xfs/xfs_vnodeops-bsd.c pg = TAILQ_NEXT(pg, listq);