1 /* $OpenBSD: uvm_extern.h,v 1.66 2007/05/27 20:59:26 miod Exp $ */
2 /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
3
4 /*
5 *
6 * Copyright (c) 1997 Charles D. Cranor and Washington University.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Charles D. Cranor and
20 * Washington University.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp
36 */
37
38 /*-
39 * Copyright (c) 1991, 1992, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vm_extern.h 8.5 (Berkeley) 5/3/95
67 */
68
69 #ifndef _UVM_UVM_EXTERN_H_
70 #define _UVM_UVM_EXTERN_H_
71
72 /*
73 * uvm_extern.h: this file defines the external interface to the VM system.
74 *
75 * this should be the only file included by non-VM parts of the kernel
76 * which need access to VM services. if you want to know the interface
77 * to the MI VM layer without knowing the details, this is the file to
78 * learn.
79 *
80 * NOTE: vm system calls are prototyped in syscallargs.h
81 */
82
83 /*
84 * typedefs, necessary for standard UVM headers.
85 */
86
87 typedef unsigned int uvm_flag_t;
88 typedef int vm_fault_t;
89
90 typedef int vm_inherit_t; /* XXX: inheritance codes */
91 typedef off_t voff_t; /* XXX: offset within a uvm_object */
92
93 union vm_map_object;
94 typedef union vm_map_object vm_map_object_t;
95
96 struct vm_map_entry;
97 typedef struct vm_map_entry *vm_map_entry_t;
98
99 struct vm_map;
100 typedef struct vm_map *vm_map_t;
101
102 struct vm_page;
103 typedef struct vm_page *vm_page_t;
104
105 /*
106 * defines
107 */
108
109 /*
110 * the following defines are for uvm_map and functions which call it.
111 */
112
113 /* protections bits */
114 #define UVM_PROT_MASK 0x07 /* protection mask */
115 #define UVM_PROT_NONE 0x00 /* protection none */
116 #define UVM_PROT_ALL 0x07 /* everything */
117 #define UVM_PROT_READ 0x01 /* read */
118 #define UVM_PROT_WRITE 0x02 /* write */
119 #define UVM_PROT_EXEC 0x04 /* exec */
120
121 /* protection short codes */
122 #define UVM_PROT_R 0x01 /* read */
123 #define UVM_PROT_W 0x02 /* write */
124 #define UVM_PROT_RW 0x03 /* read-write */
125 #define UVM_PROT_X 0x04 /* exec */
126 #define UVM_PROT_RX 0x05 /* read-exec */
127 #define UVM_PROT_WX 0x06 /* write-exec */
128 #define UVM_PROT_RWX 0x07 /* read-write-exec */
129
130 /* 0x08: not used */
131
132 /* inherit codes */
133 #define UVM_INH_MASK 0x30 /* inherit mask */
134 #define UVM_INH_SHARE 0x00 /* "share" */
135 #define UVM_INH_COPY 0x10 /* "copy" */
136 #define UVM_INH_NONE 0x20 /* "none" */
137 #define UVM_INH_DONATE 0x30 /* "donate" << not used */
138
139 /* 0x40, 0x80: not used */
140
141 /* bits 0x700: max protection, 0x800: not used */
142
143 /* bits 0x7000: advice, 0x8000: not used */
144
145 typedef int vm_prot_t;
146
147 /*
148 * Protection values, defined as bits within the vm_prot_t type
149 *
150 * These are funky definitions from old CMU VM and are kept
151 * for compatibility reasons, one day they are going to die,
152 * just like everybody else.
153 */
154
155 #define VM_PROT_NONE ((vm_prot_t) 0x00)
156
157 #define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */
158 #define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */
159 #define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */
160
161 /*
162 * The default protection for newly-created virtual memory
163 */
164
165 #define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
166
167 /*
168 * The maximum privileges possible, for parameter checking.
169 */
170
171 #define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
172
173 /* advice: matches MADV_* from sys/mman.h */
174 #define UVM_ADV_NORMAL 0x0 /* 'normal' */
175 #define UVM_ADV_RANDOM 0x1 /* 'random' */
176 #define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */
177 /* 0x3: will need, 0x4: dontneed */
178 #define UVM_ADV_MASK 0x7 /* mask */
179
180 /* mapping flags */
181 #define UVM_FLAG_FIXED 0x010000 /* find space */
182 #define UVM_FLAG_OVERLAY 0x020000 /* establish overlay */
183 #define UVM_FLAG_NOMERGE 0x040000 /* don't merge map entries */
184 #define UVM_FLAG_COPYONW 0x080000 /* set copy_on_write flag */
185 #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce malloc() */
186 #define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */
187
188 /* macros to extract info */
189 #define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK)
190 #define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4)
191 #define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK)
192 #define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK)
193
194 #define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \
195 ((MAXPROT << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS))
196
197 /* magic offset value */
198 #define UVM_UNKNOWN_OFFSET ((voff_t) -1)
199 /* offset not known(obj) or don't care(!obj) */
200
201 /*
202 * the following defines are for uvm_km_kmemalloc's flags
203 */
204 #define UVM_KMF_NOWAIT 0x1 /* matches M_NOWAIT */
205 #define UVM_KMF_VALLOC 0x2 /* allocate VA only */
206 #define UVM_KMF_CANFAIL 0x4 /* caller handles failure */
207 #define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */
208
209 /*
210 * the following defines the strategies for uvm_pagealloc_strat()
211 */
212 #define UVM_PGA_STRAT_NORMAL 0 /* high -> low free list walk */
213 #define UVM_PGA_STRAT_ONLY 1 /* only specified free list */
214 #define UVM_PGA_STRAT_FALLBACK 2 /* ONLY falls back on NORMAL */
215
216 /*
217 * flags for uvm_pagealloc_strat()
218 */
219 #define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */
220 #define UVM_PGA_ZERO 0x0002 /* returned page must be zeroed */
221
222 /*
223 * lockflags that control the locking behavior of various functions.
224 */
225 #define UVM_LK_ENTER 0x00000001 /* map locked on entry */
226 #define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */
227
228 /*
229 * structures
230 */
231
232 struct buf;
233 struct core;
234 struct mount;
235 struct pglist;
236 struct proc;
237 struct ucred;
238 struct uio;
239 struct uvm_object;
240 struct vm_anon;
241 struct vm_aref;
242 struct vm_map;
243 struct vmspace;
244 struct pmap;
245 struct vnode;
246 struct pool;
247 struct simplelock;
248
249 extern struct pool *uvm_aiobuf_pool;
250
251 /*
252 * uvmexp: global data structures that are exported to parts of the kernel
253 * other than the vm system.
254 */
255
256 struct uvmexp {
257 /* vm_page constants */
258 int pagesize; /* size of a page (PAGE_SIZE): must be power of 2 */
259 int pagemask; /* page mask */
260 int pageshift; /* page shift */
261
262 /* vm_page counters */
263 int npages; /* number of pages we manage */
264 int free; /* number of free pages */
265 int active; /* number of active pages */
266 int inactive; /* number of pages that we free'd but may want back */
267 int paging; /* number of pages in the process of being paged out */
268 int wired; /* number of wired pages */
269
270 int zeropages; /* number of zero'd pages */
271 int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
272 int reserve_kernel; /* number of pages reserved for kernel */
273 int anonpages; /* number of pages used by anon pagers */
274 int vnodepages; /* number of pages used by vnode page cache */
275 int vtextpages; /* number of pages used by vtext vnodes */
276
277 /* pageout params */
278 int freemin; /* min number of free pages */
279 int freetarg; /* target number of free pages */
280 int inactarg; /* target number of inactive pages */
281 int wiredmax; /* max number of wired pages */
282 int anonmin; /* min threshold for anon pages */
283 int vtextmin; /* min threshold for vtext pages */
284 int vnodemin; /* min threshold for vnode pages */
285 int anonminpct; /* min percent anon pages */
286 int vtextminpct;/* min percent vtext pages */
287 int vnodeminpct;/* min percent vnode pages */
288
289 /* swap */
290 int nswapdev; /* number of configured swap devices in system */
291 int swpages; /* number of PAGE_SIZE'ed swap pages */
292 int swpginuse; /* number of swap pages in use */
293 int swpgonly; /* number of swap pages in use, not also in RAM */
294 int nswget; /* number of times fault calls uvm_swap_get() */
295 int nanon; /* number total of anon's in system */
296 int nanonneeded;/* number of anons currently needed */
297 int nfreeanon; /* number of free anon's */
298
299 /* stat counters */
300 int faults; /* page fault count */
301 int traps; /* trap count */
302 int intrs; /* interrupt count */
303 int swtch; /* context switch count */
304 int softs; /* software interrupt count */
305 int syscalls; /* system calls */
306 int pageins; /* pagein operation count */
307 /* pageouts are in pdpageouts below */
308 int swapins; /* swapins */
309 int swapouts; /* swapouts */
310 int pgswapin; /* pages swapped in */
311 int pgswapout; /* pages swapped out */
312 int forks; /* forks */
313 int forks_ppwait; /* forks where parent waits */
314 int forks_sharevm; /* forks where vmspace is shared */
315 int pga_zerohit; /* pagealloc where zero wanted and zero
316 was available */
317 int pga_zeromiss; /* pagealloc where zero wanted and zero
318 not available */
319 int zeroaborts; /* number of times page zeroing was
320 aborted */
321
322 /* fault subcounters */
323 int fltnoram; /* number of times fault was out of ram */
324 int fltnoanon; /* number of times fault was out of anons */
325 int fltpgwait; /* number of times fault had to wait on a page */
326 int fltpgrele; /* number of times fault found a released page */
327 int fltrelck; /* number of times fault relock called */
328 int fltrelckok; /* number of times fault relock is a success */
329 int fltanget; /* number of times fault gets anon page */
330 int fltanretry; /* number of times fault retrys an anon get */
331 int fltamcopy; /* number of times fault clears "needs copy" */
332 int fltnamap; /* number of times fault maps a neighbor anon page */
333 int fltnomap; /* number of times fault maps a neighbor obj page */
334 int fltlget; /* number of times fault does a locked pgo_get */
335 int fltget; /* number of times fault does an unlocked get */
336 int flt_anon; /* number of times fault anon (case 1a) */
337 int flt_acow; /* number of times fault anon cow (case 1b) */
338 int flt_obj; /* number of times fault is on object page (2a) */
339 int flt_prcopy; /* number of times fault promotes with copy (2b) */
340 int flt_przero; /* number of times fault promotes with zerofill (2b) */
341
342 /* daemon counters */
343 int pdwoke; /* number of times daemon woke up */
344 int pdrevs; /* number of times daemon rev'd clock hand */
345 int pdswout; /* number of times daemon called for swapout */
346 int pdfreed; /* number of pages daemon freed since boot */
347 int pdscans; /* number of pages daemon scanned since boot */
348 int pdanscan; /* number of anonymous pages scanned by daemon */
349 int pdobscan; /* number of object pages scanned by daemon */
350 int pdreact; /* number of pages daemon reactivated since boot */
351 int pdbusy; /* number of times daemon found a busy page */
352 int pdpageouts; /* number of times daemon started a pageout */
353 int pdpending; /* number of times daemon got a pending pagout */
354 int pddeact; /* number of pages daemon deactivates */
355 int pdreanon; /* anon pages reactivated due to min threshold */
356 int pdrevnode; /* vnode pages reactivated due to min threshold */
357 int pdrevtext; /* vtext pages reactivated due to min threshold */
358
359 int fpswtch; /* FPU context switches */
360 int kmapent; /* number of kernel map entries */
361 };
362
363 #ifdef _KERNEL
364 extern struct uvmexp uvmexp;
365 #endif
366
367 /*
368 * Finally, bring in standard UVM headers.
369 */
370 #include <sys/vmmeter.h>
371 #include <sys/queue.h>
372 #include <sys/tree.h>
373 #include <uvm/uvm_param.h>
374 #include <sys/lock.h>
375 #include <uvm/uvm_page.h>
376 #include <uvm/uvm_pmap.h>
377 #include <uvm/uvm_map.h>
378 #include <uvm/uvm_fault.h>
379 #include <uvm/uvm_pager.h>
380
381 /*
382 * Shareable process virtual address space.
383 * May eventually be merged with vm_map.
384 * Several fields are temporary (text, data stuff).
385 */
386 struct vmspace {
387 struct vm_map vm_map; /* VM address map */
388 int vm_refcnt; /* number of references */
389 caddr_t vm_shm; /* SYS5 shared memory private data XXX */
390 /* we copy from vm_startcopy to the end of the structure on fork */
391 #define vm_startcopy vm_rssize
392 segsz_t vm_rssize; /* current resident set size in pages */
393 segsz_t vm_swrss; /* resident set size before last swap */
394 segsz_t vm_tsize; /* text size (pages) XXX */
395 segsz_t vm_dsize; /* data size (pages) XXX */
396 segsz_t vm_dused; /* data segment length (pages) XXX */
397 segsz_t vm_ssize; /* stack size (pages) */
398 caddr_t vm_taddr; /* user virtual address of text XXX */
399 caddr_t vm_daddr; /* user virtual address of data XXX */
400 caddr_t vm_maxsaddr; /* user VA at max stack growth */
401 caddr_t vm_minsaddr; /* user VA at top of stack */
402 };
403
404 #ifdef _KERNEL
405
406 /*
407 * the various kernel maps, owned by MD code
408 */
409 extern struct vm_map *exec_map;
410 extern struct vm_map *kernel_map;
411 extern struct vm_map *kmem_map;
412 extern struct vm_map *phys_map;
413
414
415 /*
416 * macros
417 */
418
419 /* zalloc zeros memory, alloc does not */
420 #define uvm_km_zalloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,0,TRUE)
421 #define uvm_km_alloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,0,FALSE)
422
423 #endif /* _KERNEL */
424
425 #ifdef pmap_resident_count
426 #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
427 #else
428 #define vm_resident_count(vm) ((vm)->vm_rssize)
429 #endif
430
431 /* XXX clean up later */
432 struct buf;
433 struct loadavg;
434 struct proc;
435 struct pmap;
436 struct vmspace;
437 struct vmtotal;
438 struct mount;
439 struct vnode;
440 struct core;
441
442 #ifdef _KERNEL
443
444 /* vm_machdep.c */
445 void vmapbuf(struct buf *, vsize_t);
446 void vunmapbuf(struct buf *, vsize_t);
447 void cpu_fork(struct proc *, struct proc *, void *, size_t,
448 void (*)(void *), void *);
449
450 /* uvm_aobj.c */
451 struct uvm_object *uao_create(vsize_t, int);
452 void uao_detach(struct uvm_object *);
453 void uao_detach_locked(struct uvm_object *);
454 void uao_reference(struct uvm_object *);
455 void uao_reference_locked(struct uvm_object *);
456
457 /* uvm_fault.c */
458 int uvm_fault(vm_map_t, vaddr_t,
459 vm_fault_t, vm_prot_t);
460 /* handle a page fault */
461
462 /* uvm_glue.c */
463 #if defined(KGDB)
464 void uvm_chgkprot(caddr_t, size_t, int);
465 #endif
466 void uvm_fork(struct proc *, struct proc *, boolean_t,
467 void *, size_t, void (*)(void *), void *);
468 void uvm_exit(struct proc *);
469 void uvm_init_limits(struct proc *);
470 boolean_t uvm_kernacc(caddr_t, size_t, int);
471 __dead void uvm_scheduler(void);
472
473 int uvm_vslock(struct proc *, caddr_t, size_t,
474 vm_prot_t);
475 void uvm_vsunlock(struct proc *, caddr_t, size_t);
476
477
478 /* uvm_init.c */
479 void uvm_init(void);
480 /* init the uvm system */
481
482 /* uvm_io.c */
483 int uvm_io(vm_map_t, struct uio *, int);
484
485 #define UVM_IO_FIXPROT 0x01
486
487 /* uvm_km.c */
488 vaddr_t uvm_km_alloc1(vm_map_t, vsize_t, vsize_t, boolean_t);
489 void uvm_km_free(vm_map_t, vaddr_t, vsize_t);
490 void uvm_km_free_wakeup(vm_map_t, vaddr_t,
491 vsize_t);
492 vaddr_t uvm_km_kmemalloc(vm_map_t, struct uvm_object *,
493 vsize_t, int);
494 struct vm_map *uvm_km_suballoc(vm_map_t, vaddr_t *,
495 vaddr_t *, vsize_t, int,
496 boolean_t, vm_map_t);
497 vaddr_t uvm_km_valloc(vm_map_t, vsize_t);
498 vaddr_t uvm_km_valloc_align(vm_map_t, vsize_t, vsize_t);
499 vaddr_t uvm_km_valloc_wait(vm_map_t, vsize_t);
500 vaddr_t uvm_km_valloc_prefer_wait(vm_map_t, vsize_t,
501 voff_t);
502 vaddr_t uvm_km_alloc_poolpage1(vm_map_t,
503 struct uvm_object *, boolean_t);
504 void uvm_km_free_poolpage1(vm_map_t, vaddr_t);
505
506 void *uvm_km_getpage(boolean_t);
507 void uvm_km_putpage(void *);
508
509 /* uvm_map.c */
510 #define uvm_map(_m, _a, _sz, _u, _f, _al, _fl) uvm_map_p(_m, _a, _sz, _u, _f, _al, _fl, 0)
511 int uvm_map_p(vm_map_t, vaddr_t *, vsize_t,
512 struct uvm_object *, voff_t, vsize_t,
513 uvm_flag_t, struct proc *);
514 int uvm_map_pageable(vm_map_t, vaddr_t,
515 vaddr_t, boolean_t, int);
516 int uvm_map_pageable_all(vm_map_t, int, vsize_t);
517 boolean_t uvm_map_checkprot(vm_map_t, vaddr_t,
518 vaddr_t, vm_prot_t);
519 int uvm_map_protect(vm_map_t, vaddr_t,
520 vaddr_t, vm_prot_t, boolean_t);
521 struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t,
522 boolean_t);
523 void uvmspace_init(struct vmspace *, struct pmap *,
524 vaddr_t, vaddr_t, boolean_t);
525 void uvmspace_exec(struct proc *, vaddr_t, vaddr_t);
526 struct vmspace *uvmspace_fork(struct vmspace *);
527 void uvmspace_free(struct vmspace *);
528 void uvmspace_share(struct proc *, struct proc *);
529 void uvmspace_unshare(struct proc *);
530
531
532 /* uvm_meter.c */
533 void uvm_meter(void);
534 int uvm_sysctl(int *, u_int, void *, size_t *,
535 void *, size_t, struct proc *);
536 void uvm_total(struct vmtotal *);
537
538 /* uvm_mmap.c */
539 int uvm_mmap(vm_map_t, vaddr_t *, vsize_t,
540 vm_prot_t, vm_prot_t, int,
541 caddr_t, voff_t, vsize_t, struct proc *);
542
543 /* uvm_page.c */
544 struct vm_page *uvm_pagealloc_strat(struct uvm_object *,
545 voff_t, struct vm_anon *, int, int, int);
546 #define uvm_pagealloc(obj, off, anon, flags) \
547 uvm_pagealloc_strat((obj), (off), (anon), (flags), \
548 UVM_PGA_STRAT_NORMAL, 0)
549 vaddr_t uvm_pagealloc_contig(vaddr_t, vaddr_t,
550 vaddr_t, vaddr_t);
551 void uvm_pagerealloc(struct vm_page *,
552 struct uvm_object *, voff_t);
553 /* Actually, uvm_page_physload takes PF#s which need their own type */
554 void uvm_page_physload(paddr_t, paddr_t,
555 paddr_t, paddr_t, int);
556 void uvm_setpagesize(void);
557
558 /* uvm_pager.c */
559 void uvm_aio_biodone1(struct buf *);
560 void uvm_aio_biodone(struct buf *);
561 void uvm_aio_aiodone(struct buf *);
562
563 /* uvm_pdaemon.c */
564 void uvm_pageout(void *);
565 void uvm_aiodone_daemon(void *);
566
567 /* uvm_pglist.c */
568 int uvm_pglistalloc(psize_t, paddr_t,
569 paddr_t, paddr_t, paddr_t,
570 struct pglist *, int, int);
571 void uvm_pglistfree(struct pglist *);
572
573 /* uvm_swap.c */
574 void uvm_swap_init(void);
575
576 /* uvm_unix.c */
577 int uvm_coredump(struct proc *, struct vnode *,
578 struct ucred *, struct core *);
579 void uvm_grow(struct proc *, vaddr_t);
580
581 /* uvm_user.c */
582 void uvm_deallocate(vm_map_t, vaddr_t, vsize_t);
583
584 /* uvm_vnode.c */
585 void uvm_vnp_setsize(struct vnode *, voff_t);
586 void uvm_vnp_sync(struct mount *);
587 void uvm_vnp_terminate(struct vnode *);
588 /* terminate a uvm/uvn object */
589 boolean_t uvm_vnp_uncache(struct vnode *);
590 struct uvm_object *uvn_attach(void *, vm_prot_t);
591
592 /* kern_malloc.c */
593 void kmeminit_nkmempages(void);
594 void kmeminit(void);
595 extern u_int nkmempages;
596
597 #endif /* _KERNEL */
598
599 #endif /* _UVM_UVM_EXTERN_H_ */