pgs               865 arch/i386/i386/pmapae.c 					uvm_pagefree(&vm_physmem[i].pgs[pn]);
pgs               314 uvm/uvm_page.c 		vm_physmem[lcv].pgs = pagearray;
pgs               317 uvm/uvm_page.c 		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
pgs               322 uvm/uvm_page.c 			vm_physmem[lcv].pgs[i].phys_addr = paddr;
pgs               324 uvm/uvm_page.c 			VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
pgs               330 uvm/uvm_page.c 				uvm_pagefree(&vm_physmem[lcv].pgs[i]);
pgs               624 uvm/uvm_page.c 	struct vm_page *pgs;
pgs               653 uvm/uvm_page.c 		if (vm_physmem[lcv].pgs)
pgs               668 uvm/uvm_page.c 		pgs = (vm_page *)uvm_km_alloc(kernel_map,
pgs               670 uvm/uvm_page.c 		if (pgs == NULL) {
pgs               677 uvm/uvm_page.c 		memset(pgs, 0, sizeof(struct vm_page) * npages);
pgs               680 uvm/uvm_page.c 			pgs[lcv].phys_addr = paddr;
pgs               681 uvm/uvm_page.c 			pgs[lcv].free_list = free_list;
pgs               684 uvm/uvm_page.c 				uvm_pagefree(&pgs[lcv]);
pgs               692 uvm/uvm_page.c 		pgs = NULL;
pgs               748 uvm/uvm_page.c 		ps->pgs = NULL;
pgs               750 uvm/uvm_page.c 		ps->pgs = pgs;
pgs               751 uvm/uvm_page.c 		ps->lastpg = pgs + npages - 1;
pgs              1252 uvm/uvm_page.c uvm_page_unbusy(pgs, npgs)
pgs              1253 uvm/uvm_page.c 	struct vm_page **pgs;
pgs              1262 uvm/uvm_page.c 		pg = pgs[i];
pgs               212 uvm/uvm_page.h 	struct	vm_page *pgs;		/* vm_page structures (from start) */
pgs               405 uvm/uvm_page.h 		return(&vm_physmem[psi].pgs[off]);
pgs               793 uvm/uvm_pager.c 	struct vm_page *pg, *pgs[npages];
pgs               813 uvm/uvm_pager.c 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
pgs               814 uvm/uvm_pager.c 		UVMHIST_LOG(pdhist, "pgs[%ld] = %p", i, pgs[i],0,0);
pgs               821 uvm/uvm_pager.c 	if (pgs[0]->pg_flags & PQ_ENCRYPT) {
pgs               822 uvm/uvm_pager.c 		uvm_swap_freepages(pgs, npages);
pgs               827 uvm/uvm_pager.c 		pg = pgs[i];
pgs               853 uvm/uvm_pager.c 		KASSERT(!write || (pgs[i]->pg_flags & PG_FAKE) == 0);
pgs               861 uvm/uvm_pager.c 		if ((pgs[i]->pg_flags & PG_FAKE) || (write && error != ENOMEM)) {
pgs               862 uvm/uvm_pager.c 			pmap_clear_reference(pgs[i]);
pgs               863 uvm/uvm_pager.c 			pmap_clear_modify(pgs[i]);
pgs               864 uvm/uvm_pager.c 			atomic_setbits_int(&pgs[i]->pg_flags, PG_CLEAN);
pgs               865 uvm/uvm_pager.c 			atomic_clearbits_int(&pgs[i]->pg_flags, PG_FAKE);
pgs               875 uvm/uvm_pager.c 	uvm_page_unbusy(pgs, npages);
pgs               103 uvm/uvm_pglist.c 		pg = &vm_physmem[psi].pgs[idx];
pgs               183 uvm/uvm_pglist.c 	struct vm_page *pgs;
pgs               250 uvm/uvm_pglist.c 		pgs = vm_physmem[psi].pgs;
pgs               257 uvm/uvm_pglist.c 			if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
pgs               260 uvm/uvm_pglist.c 			idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
pgs               262 uvm/uvm_pglist.c 				lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
pgs               296 uvm/uvm_pglist.c 		m = &pgs[idx];