1 /* $OpenBSD: uvm_loan.c,v 1.28 2007/06/18 21:51:15 pedro Exp $ */
2 /* $NetBSD: uvm_loan.c,v 1.22 2000/06/27 17:29:25 mrg Exp $ */
3
4 /*
5 *
6 * Copyright (c) 1997 Charles D. Cranor and Washington University.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Charles D. Cranor and
20 * Washington University.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
36 */
37
38 /*
39 * uvm_loan.c: page loanout handler
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/mman.h>
48
49 #include <uvm/uvm.h>
50
51 /*
52 * "loaned" pages are pages which are (read-only, copy-on-write) loaned
53 * from the VM system to other parts of the kernel. this allows page
54 * copying to be avoided (e.g. you can loan pages from objs/anons to
55 * the mbuf system).
56 *
57 * there are 3 types of loans possible:
58 * O->K uvm_object page to wired kernel page (e.g. mbuf data area)
59 * A->K anon page to wired kernel page (e.g. mbuf data area)
60 * O->A uvm_object to anon loan (e.g. vnode page to an anon)
61 * note that it possible to have an O page loaned to both an A and K
62 * at the same time.
63 *
64 * loans are tracked by pg->loan_count. an O->A page will have both
65 * a uvm_object and a vm_anon, but PQ_ANON will not be set. this sort
66 * of page is considered "owned" by the uvm_object (not the anon).
67 *
68 * each loan of a page to the kernel bumps the pg->wire_count. the
69 * kernel mappings for these pages will be read-only and wired. since
70 * the page will also be wired, it will not be a candidate for pageout,
71 * and thus will never be pmap_page_protect()'d with VM_PROT_NONE. a
72 * write fault in the kernel to one of these pages will not cause
73 * copy-on-write. instead, the page fault is considered fatal. this
74 * is because the kernel mapping will have no way to look up the
75 * object/anon which the page is owned by. this is a good side-effect,
76 * since a kernel write to a loaned page is an error.
77 *
78 * owners that want to free their pages and discover that they are
79 * loaned out simply "disown" them (the page becomes an orphan). these
80 * pages should be freed when the last loan is dropped. in some cases
81 * an anon may "adopt" an orphaned page.
82 *
83 * locking: to read pg->loan_count either the owner or the page queues
84 * must be locked. to modify pg->loan_count, both the owner of the page
85 * and the PQs must be locked. pg->flags is (as always) locked by
86 * the owner of the page.
87 *
88 * note that locking from the "loaned" side is tricky since the object
89 * getting the loaned page has no reference to the page's owner and thus
90 * the owner could "die" at any time. in order to prevent the owner
91 * from dying the page queues should be locked. this forces us to sometimes
92 * use "try" locking.
93 *
94 * loans are typically broken by the following events:
95 * 1. write fault to a loaned page
96 * 2. pageout of clean+inactive O->A loaned page
97 * 3. owner frees page (e.g. pager flush)
98 *
99 * note that loaning a page causes all mappings of the page to become
100 * read-only (via pmap_page_protect). this could have an unexpected
101 * effect on normal "wired" pages if one is not careful (XXX).
102 */
103
104 /*
105 * local prototypes
106 */
107
108 static int uvm_loananon(struct uvm_faultinfo *, void ***,
109 int, struct vm_anon *);
110 static int uvm_loanentry(struct uvm_faultinfo *, void ***, int);
111 static int uvm_loanuobj(struct uvm_faultinfo *, void ***,
112 int, vaddr_t);
113 static int uvm_loanzero(struct uvm_faultinfo *, void ***, int);
114
115 /*
116 * inlines
117 */
118
119 /*
120 * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan())
121 *
122 * => "ufi" is the result of a successful map lookup (meaning that
123 * the maps are locked by the caller)
124 * => we may unlock the maps if needed (for I/O)
125 * => we put our output result in "output"
126 * => we return the number of pages we loaned, or -1 if we had an error
127 */
128
129 static __inline int
130 uvm_loanentry(ufi, output, flags)
131 struct uvm_faultinfo *ufi;
132 void ***output;
133 int flags;
134 {
135 vaddr_t curaddr = ufi->orig_rvaddr;
136 vsize_t togo = ufi->size;
137 struct vm_aref *aref = &ufi->entry->aref;
138 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
139 struct vm_anon *anon;
140 int rv, result = 0;
141
142 /*
143 * lock us the rest of the way down
144 */
145 if (uobj)
146 simple_lock(&uobj->vmobjlock);
147
148 /*
149 * loop until done
150 */
151 while (togo) {
152
153 /*
154 * find the page we want. check the anon layer first.
155 */
156
157 if (aref->ar_amap) {
158 anon = amap_lookup(aref, curaddr - ufi->entry->start);
159 } else {
160 anon = NULL;
161 }
162
163 if (anon) {
164 rv = uvm_loananon(ufi, output, flags, anon);
165 } else if (uobj) {
166 rv = uvm_loanuobj(ufi, output, flags, curaddr);
167 } else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
168 rv = uvm_loanzero(ufi, output, flags);
169 } else {
170 rv = -1; /* null map entry... fail now */
171 }
172
173 /* total failure */
174 if (rv < 0)
175 return(-1);
176
177 /* relock failed, need to do another lookup */
178 if (rv == 0)
179 return(result);
180
181 /*
182 * got it... advance to next page
183 */
184 result++;
185 togo -= PAGE_SIZE;
186 curaddr += PAGE_SIZE;
187 }
188
189 /*
190 * unlock everything and return
191 */
192 uvmfault_unlockall(ufi, aref->ar_amap, uobj, NULL);
193 return(result);
194 }
195
196 /*
197 * normal functions
198 */
199
200 /*
201 * uvm_loan: loan pages out to anons or to the kernel
202 *
203 * => map should be unlocked
204 * => start and len should be multiples of PAGE_SIZE
205 * => result is either an array of anon's or vm_pages (depending on flags)
206 * => flag values: UVM_LOAN_TOANON - loan to anons
207 * UVM_LOAN_TOPAGE - loan to wired kernel page
208 * one and only one of these flags must be set!
209 */
210
211 int
212 uvm_loan(map, start, len, result, flags)
213 struct vm_map *map;
214 vaddr_t start;
215 vsize_t len;
216 void **result;
217 int flags;
218 {
219 struct uvm_faultinfo ufi;
220 void **output;
221 int rv;
222
223 #ifdef DIAGNOSTIC
224 if (map->flags & VM_MAP_INTRSAFE)
225 panic("uvm_loan: intrsafe map");
226 #endif
227
228 /*
229 * ensure that one and only one of the flags is set
230 */
231
232 if ((flags & (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE)) ==
233 (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE) ||
234 (flags & (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE)) == 0)
235 return (EFAULT);
236
237 /*
238 * "output" is a pointer to the current place to put the loaned
239 * page...
240 */
241
242 output = &result[0]; /* start at the beginning ... */
243
244 /*
245 * while we've got pages to do
246 */
247
248 while (len > 0) {
249
250 /*
251 * fill in params for a call to uvmfault_lookup
252 */
253
254 ufi.orig_map = map;
255 ufi.orig_rvaddr = start;
256 ufi.orig_size = len;
257
258 /*
259 * do the lookup, the only time this will fail is if we hit on
260 * an unmapped region (an error)
261 */
262
263 if (!uvmfault_lookup(&ufi, FALSE))
264 goto fail;
265
266 /*
267 * map now locked. now do the loanout...
268 */
269 rv = uvm_loanentry(&ufi, &output, flags);
270 if (rv < 0)
271 goto fail;
272
273 /*
274 * done! the map is unlocked. advance, if possible.
275 *
276 * XXXCDC: could be recoded to hold the map lock with
277 * smarter code (but it only happens on map entry
278 * boundaries, so it isn't that bad).
279 */
280 if (rv) {
281 rv <<= PAGE_SHIFT;
282 len -= rv;
283 start += rv;
284 }
285 }
286
287 /*
288 * got it! return success.
289 */
290
291 return (0);
292
293 fail:
294 /*
295 * fail: failed to do it. drop our loans and return failure code.
296 */
297 if (output - result) {
298 if (flags & UVM_LOAN_TOANON)
299 uvm_unloananon((struct vm_anon **)result,
300 output - result);
301 else
302 uvm_unloanpage((struct vm_page **)result,
303 output - result);
304 }
305 return (EFAULT);
306 }
307
308 /*
309 * uvm_loananon: loan a page from an anon out
310 *
311 * => return value:
312 * -1 = fatal error, everything is unlocked, abort.
313 * 0 = lookup in ufi went stale, everything unlocked, relookup and
314 * try again
315 * 1 = got it, everything still locked
316 */
317
318 int
319 uvm_loananon(ufi, output, flags, anon)
320 struct uvm_faultinfo *ufi;
321 void ***output;
322 int flags;
323 struct vm_anon *anon;
324 {
325 struct vm_page *pg;
326 int result;
327
328 /*
329 * if we are loaning to another anon then it is easy, we just
330 * bump the reference count on the current anon and return a
331 * pointer to it.
332 */
333 if (flags & UVM_LOAN_TOANON) {
334 simple_lock(&anon->an_lock);
335 pg = anon->an_page;
336 if (pg && (pg->pg_flags & PQ_ANON) != 0 && anon->an_ref == 1)
337 /* read protect it */
338 pmap_page_protect(pg, VM_PROT_READ);
339 anon->an_ref++;
340 **output = anon;
341 *output = (*output) + 1;
342 simple_unlock(&anon->an_lock);
343 return(1);
344 }
345
346 /*
347 * we are loaning to a kernel-page. we need to get the page
348 * resident so we can wire it. uvmfault_anonget will handle
349 * this for us.
350 */
351
352 simple_lock(&anon->an_lock);
353 result = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
354
355 /*
356 * if we were unable to get the anon, then uvmfault_anonget has
357 * unlocked everything and returned an error code.
358 */
359
360 if (result != VM_PAGER_OK) {
361
362 /* need to refault (i.e. refresh our lookup) ? */
363 if (result == VM_PAGER_REFAULT)
364 return(0);
365
366 /* "try again"? sleep a bit and retry ... */
367 if (result == VM_PAGER_AGAIN) {
368 tsleep((caddr_t)&lbolt, PVM, "loanagain", 0);
369 return(0);
370 }
371
372 /* otherwise flag it as an error */
373 return(-1);
374 }
375
376 /*
377 * we have the page and its owner locked: do the loan now.
378 */
379
380 pg = anon->an_page;
381 uvm_lock_pageq();
382 if (pg->loan_count == 0)
383 pmap_page_protect(pg, VM_PROT_READ);
384 pg->loan_count++;
385 uvm_pagewire(pg); /* always wire it */
386 uvm_unlock_pageq();
387 **output = pg;
388 *output = (*output) + 1;
389
390 /* unlock anon and return success */
391 if (pg->uobject)
392 simple_unlock(&pg->uobject->vmobjlock);
393 simple_unlock(&anon->an_lock);
394 return(1);
395 }
396
397 /*
398 * uvm_loanuobj: loan a page from a uobj out
399 *
400 * => return value:
401 * -1 = fatal error, everything is unlocked, abort.
402 * 0 = lookup in ufi went stale, everything unlocked, relookup and
403 * try again
404 * 1 = got it, everything still locked
405 */
406
407 int
408 uvm_loanuobj(ufi, output, flags, va)
409 struct uvm_faultinfo *ufi;
410 void ***output;
411 int flags;
412 vaddr_t va;
413 {
414 struct vm_amap *amap = ufi->entry->aref.ar_amap;
415 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
416 struct vm_page *pg;
417 struct vm_anon *anon;
418 int result, npages;
419 boolean_t locked;
420
421 /*
422 * first we must make sure the page is resident.
423 *
424 * XXXCDC: duplicate code with uvm_fault().
425 */
426
427 if (uobj->pgops->pgo_get) {
428 npages = 1;
429 pg = NULL;
430 result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
431 &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
432 } else {
433 result = VM_PAGER_ERROR;
434 }
435
436 /*
437 * check the result of the locked pgo_get. if there is a problem,
438 * then we fail the loan.
439 */
440
441 if (result != VM_PAGER_OK && result != VM_PAGER_UNLOCK) {
442 uvmfault_unlockall(ufi, amap, uobj, NULL);
443 return(-1);
444 }
445
446 /*
447 * if we need to unlock for I/O, do so now.
448 */
449
450 if (result == VM_PAGER_UNLOCK) {
451 uvmfault_unlockall(ufi, amap, NULL, NULL);
452
453 npages = 1;
454 /* locked: uobj */
455 result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
456 &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, 0);
457 /* locked: <nothing> */
458
459 /*
460 * check for errors
461 */
462
463 if (result != VM_PAGER_OK) {
464 if (result == VM_PAGER_AGAIN) {
465 tsleep((caddr_t)&lbolt, PVM, "fltagain2", 0);
466 return(0); /* redo the lookup and try again */
467 }
468 return(-1); /* total failure */
469 }
470
471 /*
472 * pgo_get was a success. attempt to relock everything.
473 */
474
475 locked = uvmfault_relock(ufi);
476 simple_lock(&uobj->vmobjlock);
477
478 /*
479 * verify that the page has not be released and re-verify
480 * that amap slot is still free. if there is a problem we
481 * drop our lock (thus force a lookup refresh/retry).
482 */
483
484 if ((pg->pg_flags & PG_RELEASED) != 0 ||
485 (locked && amap && amap_lookup(&ufi->entry->aref,
486 ufi->orig_rvaddr - ufi->entry->start))) {
487
488 if (locked)
489 uvmfault_unlockall(ufi, amap, NULL, NULL);
490 locked = FALSE;
491 }
492
493 /*
494 * didn't get the lock? release the page and retry.
495 */
496
497 if (locked == FALSE) {
498
499 if (pg->pg_flags & PG_WANTED)
500 /* still holding object lock */
501 wakeup(pg);
502
503 if (pg->pg_flags & PG_RELEASED) {
504 #ifdef DIAGNOSTIC
505 if (uobj->pgops->pgo_releasepg == NULL)
506 panic("uvm_loanuobj: object has no releasepg function");
507 #endif
508 /* frees page */
509 if (uobj->pgops->pgo_releasepg(pg, NULL))
510 simple_unlock(&uobj->vmobjlock);
511 return (0);
512 }
513
514 uvm_lock_pageq();
515 uvm_pageactivate(pg); /* make sure it is in queues */
516 uvm_unlock_pageq();
517 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_WANTED);
518 UVM_PAGE_OWN(pg, NULL);
519 simple_unlock(&uobj->vmobjlock);
520 return (0);
521 }
522 }
523
524 /*
525 * at this point we have the page we want ("pg") marked PG_BUSY for us
526 * and we have all data structures locked. do the loanout. page can
527 * not be PG_RELEASED (we caught this above).
528 */
529
530 if ((flags & UVM_LOAN_TOANON) == 0) { /* loan to wired-kernel page? */
531 uvm_lock_pageq();
532 if (pg->loan_count == 0)
533 pmap_page_protect(pg, VM_PROT_READ);
534 pg->loan_count++;
535 uvm_pagewire(pg);
536 uvm_unlock_pageq();
537 **output = pg;
538 *output = (*output) + 1;
539 if (pg->pg_flags & PG_WANTED)
540 wakeup(pg);
541 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
542 UVM_PAGE_OWN(pg, NULL);
543 return(1); /* got it! */
544 }
545
546 /*
547 * must be a loan to an anon. check to see if there is already
548 * an anon associated with this page. if so, then just return
549 * a reference to this object. the page should already be
550 * mapped read-only because it is already on loan.
551 */
552
553 if (pg->uanon) {
554 anon = pg->uanon;
555 simple_lock(&anon->an_lock);
556 anon->an_ref++;
557 simple_unlock(&anon->an_lock);
558 **output = anon;
559 *output = (*output) + 1;
560 uvm_lock_pageq();
561 uvm_pageactivate(pg); /* reactivate */
562 uvm_unlock_pageq();
563 if (pg->pg_flags & PG_WANTED)
564 wakeup(pg);
565 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
566 UVM_PAGE_OWN(pg, NULL);
567 return(1);
568 }
569
570 /*
571 * need to allocate a new anon
572 */
573
574 anon = uvm_analloc();
575 if (anon == NULL) { /* out of VM! */
576 if (pg->pg_flags & PG_WANTED)
577 wakeup(pg);
578 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
579 UVM_PAGE_OWN(pg, NULL);
580 uvmfault_unlockall(ufi, amap, uobj, NULL);
581 return(-1);
582 }
583 anon->an_page = pg;
584 pg->uanon = anon;
585 uvm_lock_pageq();
586 if (pg->loan_count == 0)
587 pmap_page_protect(pg, VM_PROT_READ);
588 pg->loan_count++;
589 uvm_pageactivate(pg);
590 uvm_unlock_pageq();
591 **output = anon;
592 *output = (*output) + 1;
593 if (pg->pg_flags & PG_WANTED)
594 wakeup(pg);
595 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
596 UVM_PAGE_OWN(pg, NULL);
597 return(1);
598 }
599
600 /*
601 * uvm_loanzero: "loan" a zero-fill page out
602 *
603 * => return value:
604 * -1 = fatal error, everything is unlocked, abort.
605 * 0 = lookup in ufi went stale, everything unlocked, relookup and
606 * try again
607 * 1 = got it, everything still locked
608 */
609
610 int
611 uvm_loanzero(ufi, output, flags)
612 struct uvm_faultinfo *ufi;
613 void ***output;
614 int flags;
615 {
616 struct vm_anon *anon;
617 struct vm_page *pg;
618
619 if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */
620
621 while ((pg = uvm_pagealloc(NULL, 0, NULL,
622 UVM_PGA_ZERO)) == NULL) {
623 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
624 ufi->entry->object.uvm_obj, NULL);
625 uvm_wait("loanzero1");
626 if (!uvmfault_relock(ufi))
627 return(0);
628 if (ufi->entry->object.uvm_obj)
629 simple_lock(
630 &ufi->entry->object.uvm_obj->vmobjlock);
631 /* ... and try again */
632 }
633
634 /* got a zero'd page; return */
635 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
636 UVM_PAGE_OWN(pg, NULL);
637 **output = pg;
638 *output = (*output) + 1;
639 uvm_lock_pageq();
640 /* wire it as we are loaning to kernel-page */
641 uvm_pagewire(pg);
642 pg->loan_count = 1;
643 uvm_unlock_pageq();
644 return(1);
645 }
646
647 /* loaning to an anon */
648 while ((anon = uvm_analloc()) == NULL ||
649 (pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) {
650
651 /* unlock everything */
652 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
653 ufi->entry->object.uvm_obj, NULL);
654
655 /* out of swap causes us to fail */
656 if (anon == NULL)
657 return(-1);
658
659 uvm_anfree(anon);
660 uvm_wait("loanzero2"); /* wait for pagedaemon */
661
662 if (!uvmfault_relock(ufi))
663 /* map changed while unlocked, need relookup */
664 return (0);
665
666 /* relock everything else */
667 if (ufi->entry->object.uvm_obj)
668 simple_lock(&ufi->entry->object.uvm_obj->vmobjlock);
669 /* ... and try again */
670 }
671
672 /* got a zero'd page; return */
673 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
674 UVM_PAGE_OWN(pg, NULL);
675 uvm_lock_pageq();
676 uvm_pageactivate(pg);
677 uvm_unlock_pageq();
678 **output = anon;
679 *output = (*output) + 1;
680 return(1);
681 }
682
683
684 /*
685 * uvm_unloananon: kill loans on anons (basically a normal ref drop)
686 *
687 * => we expect all our resources to be unlocked
688 */
689
690 void
691 uvm_unloananon(aloans, nanons)
692 struct vm_anon **aloans;
693 int nanons;
694 {
695 struct vm_anon *anon;
696
697 while (nanons-- > 0) {
698 int refs;
699
700 anon = *aloans++;
701 simple_lock(&anon->an_lock);
702 refs = --anon->an_ref;
703 simple_unlock(&anon->an_lock);
704
705 if (refs == 0) {
706 uvm_anfree(anon); /* last reference: kill anon */
707 }
708 }
709 }
710
711 /*
712 * uvm_unloanpage: kill loans on pages loaned out to the kernel
713 *
714 * => we expect all our resources to be unlocked
715 */
716
717 void
718 uvm_unloanpage(ploans, npages)
719 struct vm_page **ploans;
720 int npages;
721 {
722 struct vm_page *pg;
723
724 uvm_lock_pageq();
725
726 while (npages-- > 0) {
727 pg = *ploans++;
728
729 if (pg->loan_count < 1)
730 panic("uvm_unloanpage: page %p isn't loaned", pg);
731
732 pg->loan_count--; /* drop loan */
733 uvm_pageunwire(pg); /* and wire */
734
735 /*
736 * if page is unowned and we killed last loan, then we can
737 * free it
738 */
739 if (pg->loan_count == 0 && pg->uobject == NULL &&
740 pg->uanon == NULL) {
741
742 if (pg->pg_flags & PG_BUSY)
743 panic("uvm_unloanpage: page %p unowned but PG_BUSY!", pg);
744
745 /* be safe */
746 pmap_page_protect(pg, VM_PROT_NONE);
747 uvm_pagefree(pg); /* pageq locked above */
748
749 }
750 }
751
752 uvm_unlock_pageq();
753 }
754