This source file includes following definitions.
- uvm_loanentry
- uvm_loan
- uvm_loananon
- uvm_loanuobj
- uvm_loanzero
- uvm_unloananon
- uvm_unloanpage
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/mman.h>
48
49 #include <uvm/uvm.h>
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108 static int uvm_loananon(struct uvm_faultinfo *, void ***,
109 int, struct vm_anon *);
110 static int uvm_loanentry(struct uvm_faultinfo *, void ***, int);
111 static int uvm_loanuobj(struct uvm_faultinfo *, void ***,
112 int, vaddr_t);
113 static int uvm_loanzero(struct uvm_faultinfo *, void ***, int);
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129 static __inline int
130 uvm_loanentry(ufi, output, flags)
131 struct uvm_faultinfo *ufi;
132 void ***output;
133 int flags;
134 {
135 vaddr_t curaddr = ufi->orig_rvaddr;
136 vsize_t togo = ufi->size;
137 struct vm_aref *aref = &ufi->entry->aref;
138 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
139 struct vm_anon *anon;
140 int rv, result = 0;
141
142
143
144
145 if (uobj)
146 simple_lock(&uobj->vmobjlock);
147
148
149
150
151 while (togo) {
152
153
154
155
156
157 if (aref->ar_amap) {
158 anon = amap_lookup(aref, curaddr - ufi->entry->start);
159 } else {
160 anon = NULL;
161 }
162
163 if (anon) {
164 rv = uvm_loananon(ufi, output, flags, anon);
165 } else if (uobj) {
166 rv = uvm_loanuobj(ufi, output, flags, curaddr);
167 } else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
168 rv = uvm_loanzero(ufi, output, flags);
169 } else {
170 rv = -1;
171 }
172
173
174 if (rv < 0)
175 return(-1);
176
177
178 if (rv == 0)
179 return(result);
180
181
182
183
184 result++;
185 togo -= PAGE_SIZE;
186 curaddr += PAGE_SIZE;
187 }
188
189
190
191
192 uvmfault_unlockall(ufi, aref->ar_amap, uobj, NULL);
193 return(result);
194 }
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211 int
212 uvm_loan(map, start, len, result, flags)
213 struct vm_map *map;
214 vaddr_t start;
215 vsize_t len;
216 void **result;
217 int flags;
218 {
219 struct uvm_faultinfo ufi;
220 void **output;
221 int rv;
222
223 #ifdef DIAGNOSTIC
224 if (map->flags & VM_MAP_INTRSAFE)
225 panic("uvm_loan: intrsafe map");
226 #endif
227
228
229
230
231
232 if ((flags & (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE)) ==
233 (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE) ||
234 (flags & (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE)) == 0)
235 return (EFAULT);
236
237
238
239
240
241
242 output = &result[0];
243
244
245
246
247
248 while (len > 0) {
249
250
251
252
253
254 ufi.orig_map = map;
255 ufi.orig_rvaddr = start;
256 ufi.orig_size = len;
257
258
259
260
261
262
263 if (!uvmfault_lookup(&ufi, FALSE))
264 goto fail;
265
266
267
268
269 rv = uvm_loanentry(&ufi, &output, flags);
270 if (rv < 0)
271 goto fail;
272
273
274
275
276
277
278
279
280 if (rv) {
281 rv <<= PAGE_SHIFT;
282 len -= rv;
283 start += rv;
284 }
285 }
286
287
288
289
290
291 return (0);
292
293 fail:
294
295
296
297 if (output - result) {
298 if (flags & UVM_LOAN_TOANON)
299 uvm_unloananon((struct vm_anon **)result,
300 output - result);
301 else
302 uvm_unloanpage((struct vm_page **)result,
303 output - result);
304 }
305 return (EFAULT);
306 }
307
308
309
310
311
312
313
314
315
316
317
318 int
319 uvm_loananon(ufi, output, flags, anon)
320 struct uvm_faultinfo *ufi;
321 void ***output;
322 int flags;
323 struct vm_anon *anon;
324 {
325 struct vm_page *pg;
326 int result;
327
328
329
330
331
332
333 if (flags & UVM_LOAN_TOANON) {
334 simple_lock(&anon->an_lock);
335 pg = anon->an_page;
336 if (pg && (pg->pg_flags & PQ_ANON) != 0 && anon->an_ref == 1)
337
338 pmap_page_protect(pg, VM_PROT_READ);
339 anon->an_ref++;
340 **output = anon;
341 *output = (*output) + 1;
342 simple_unlock(&anon->an_lock);
343 return(1);
344 }
345
346
347
348
349
350
351
352 simple_lock(&anon->an_lock);
353 result = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
354
355
356
357
358
359
360 if (result != VM_PAGER_OK) {
361
362
363 if (result == VM_PAGER_REFAULT)
364 return(0);
365
366
367 if (result == VM_PAGER_AGAIN) {
368 tsleep((caddr_t)&lbolt, PVM, "loanagain", 0);
369 return(0);
370 }
371
372
373 return(-1);
374 }
375
376
377
378
379
380 pg = anon->an_page;
381 uvm_lock_pageq();
382 if (pg->loan_count == 0)
383 pmap_page_protect(pg, VM_PROT_READ);
384 pg->loan_count++;
385 uvm_pagewire(pg);
386 uvm_unlock_pageq();
387 **output = pg;
388 *output = (*output) + 1;
389
390
391 if (pg->uobject)
392 simple_unlock(&pg->uobject->vmobjlock);
393 simple_unlock(&anon->an_lock);
394 return(1);
395 }
396
397
398
399
400
401
402
403
404
405
406
407 int
408 uvm_loanuobj(ufi, output, flags, va)
409 struct uvm_faultinfo *ufi;
410 void ***output;
411 int flags;
412 vaddr_t va;
413 {
414 struct vm_amap *amap = ufi->entry->aref.ar_amap;
415 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
416 struct vm_page *pg;
417 struct vm_anon *anon;
418 int result, npages;
419 boolean_t locked;
420
421
422
423
424
425
426
427 if (uobj->pgops->pgo_get) {
428 npages = 1;
429 pg = NULL;
430 result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
431 &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
432 } else {
433 result = VM_PAGER_ERROR;
434 }
435
436
437
438
439
440
441 if (result != VM_PAGER_OK && result != VM_PAGER_UNLOCK) {
442 uvmfault_unlockall(ufi, amap, uobj, NULL);
443 return(-1);
444 }
445
446
447
448
449
450 if (result == VM_PAGER_UNLOCK) {
451 uvmfault_unlockall(ufi, amap, NULL, NULL);
452
453 npages = 1;
454
455 result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
456 &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, 0);
457
458
459
460
461
462
463 if (result != VM_PAGER_OK) {
464 if (result == VM_PAGER_AGAIN) {
465 tsleep((caddr_t)&lbolt, PVM, "fltagain2", 0);
466 return(0);
467 }
468 return(-1);
469 }
470
471
472
473
474
475 locked = uvmfault_relock(ufi);
476 simple_lock(&uobj->vmobjlock);
477
478
479
480
481
482
483
484 if ((pg->pg_flags & PG_RELEASED) != 0 ||
485 (locked && amap && amap_lookup(&ufi->entry->aref,
486 ufi->orig_rvaddr - ufi->entry->start))) {
487
488 if (locked)
489 uvmfault_unlockall(ufi, amap, NULL, NULL);
490 locked = FALSE;
491 }
492
493
494
495
496
497 if (locked == FALSE) {
498
499 if (pg->pg_flags & PG_WANTED)
500
501 wakeup(pg);
502
503 if (pg->pg_flags & PG_RELEASED) {
504 #ifdef DIAGNOSTIC
505 if (uobj->pgops->pgo_releasepg == NULL)
506 panic("uvm_loanuobj: object has no releasepg function");
507 #endif
508
509 if (uobj->pgops->pgo_releasepg(pg, NULL))
510 simple_unlock(&uobj->vmobjlock);
511 return (0);
512 }
513
514 uvm_lock_pageq();
515 uvm_pageactivate(pg);
516 uvm_unlock_pageq();
517 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_WANTED);
518 UVM_PAGE_OWN(pg, NULL);
519 simple_unlock(&uobj->vmobjlock);
520 return (0);
521 }
522 }
523
524
525
526
527
528
529
530 if ((flags & UVM_LOAN_TOANON) == 0) {
531 uvm_lock_pageq();
532 if (pg->loan_count == 0)
533 pmap_page_protect(pg, VM_PROT_READ);
534 pg->loan_count++;
535 uvm_pagewire(pg);
536 uvm_unlock_pageq();
537 **output = pg;
538 *output = (*output) + 1;
539 if (pg->pg_flags & PG_WANTED)
540 wakeup(pg);
541 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
542 UVM_PAGE_OWN(pg, NULL);
543 return(1);
544 }
545
546
547
548
549
550
551
552
553 if (pg->uanon) {
554 anon = pg->uanon;
555 simple_lock(&anon->an_lock);
556 anon->an_ref++;
557 simple_unlock(&anon->an_lock);
558 **output = anon;
559 *output = (*output) + 1;
560 uvm_lock_pageq();
561 uvm_pageactivate(pg);
562 uvm_unlock_pageq();
563 if (pg->pg_flags & PG_WANTED)
564 wakeup(pg);
565 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
566 UVM_PAGE_OWN(pg, NULL);
567 return(1);
568 }
569
570
571
572
573
574 anon = uvm_analloc();
575 if (anon == NULL) {
576 if (pg->pg_flags & PG_WANTED)
577 wakeup(pg);
578 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
579 UVM_PAGE_OWN(pg, NULL);
580 uvmfault_unlockall(ufi, amap, uobj, NULL);
581 return(-1);
582 }
583 anon->an_page = pg;
584 pg->uanon = anon;
585 uvm_lock_pageq();
586 if (pg->loan_count == 0)
587 pmap_page_protect(pg, VM_PROT_READ);
588 pg->loan_count++;
589 uvm_pageactivate(pg);
590 uvm_unlock_pageq();
591 **output = anon;
592 *output = (*output) + 1;
593 if (pg->pg_flags & PG_WANTED)
594 wakeup(pg);
595 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
596 UVM_PAGE_OWN(pg, NULL);
597 return(1);
598 }
599
600
601
602
603
604
605
606
607
608
609
610 int
611 uvm_loanzero(ufi, output, flags)
612 struct uvm_faultinfo *ufi;
613 void ***output;
614 int flags;
615 {
616 struct vm_anon *anon;
617 struct vm_page *pg;
618
619 if ((flags & UVM_LOAN_TOANON) == 0) {
620
621 while ((pg = uvm_pagealloc(NULL, 0, NULL,
622 UVM_PGA_ZERO)) == NULL) {
623 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
624 ufi->entry->object.uvm_obj, NULL);
625 uvm_wait("loanzero1");
626 if (!uvmfault_relock(ufi))
627 return(0);
628 if (ufi->entry->object.uvm_obj)
629 simple_lock(
630 &ufi->entry->object.uvm_obj->vmobjlock);
631
632 }
633
634
635 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
636 UVM_PAGE_OWN(pg, NULL);
637 **output = pg;
638 *output = (*output) + 1;
639 uvm_lock_pageq();
640
641 uvm_pagewire(pg);
642 pg->loan_count = 1;
643 uvm_unlock_pageq();
644 return(1);
645 }
646
647
648 while ((anon = uvm_analloc()) == NULL ||
649 (pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) {
650
651
652 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
653 ufi->entry->object.uvm_obj, NULL);
654
655
656 if (anon == NULL)
657 return(-1);
658
659 uvm_anfree(anon);
660 uvm_wait("loanzero2");
661
662 if (!uvmfault_relock(ufi))
663
664 return (0);
665
666
667 if (ufi->entry->object.uvm_obj)
668 simple_lock(&ufi->entry->object.uvm_obj->vmobjlock);
669
670 }
671
672
673 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
674 UVM_PAGE_OWN(pg, NULL);
675 uvm_lock_pageq();
676 uvm_pageactivate(pg);
677 uvm_unlock_pageq();
678 **output = anon;
679 *output = (*output) + 1;
680 return(1);
681 }
682
683
684
685
686
687
688
689
690 void
691 uvm_unloananon(aloans, nanons)
692 struct vm_anon **aloans;
693 int nanons;
694 {
695 struct vm_anon *anon;
696
697 while (nanons-- > 0) {
698 int refs;
699
700 anon = *aloans++;
701 simple_lock(&anon->an_lock);
702 refs = --anon->an_ref;
703 simple_unlock(&anon->an_lock);
704
705 if (refs == 0) {
706 uvm_anfree(anon);
707 }
708 }
709 }
710
711
712
713
714
715
716
717 void
718 uvm_unloanpage(ploans, npages)
719 struct vm_page **ploans;
720 int npages;
721 {
722 struct vm_page *pg;
723
724 uvm_lock_pageq();
725
726 while (npages-- > 0) {
727 pg = *ploans++;
728
729 if (pg->loan_count < 1)
730 panic("uvm_unloanpage: page %p isn't loaned", pg);
731
732 pg->loan_count--;
733 uvm_pageunwire(pg);
734
735
736
737
738
739 if (pg->loan_count == 0 && pg->uobject == NULL &&
740 pg->uanon == NULL) {
741
742 if (pg->pg_flags & PG_BUSY)
743 panic("uvm_unloanpage: page %p unowned but PG_BUSY!", pg);
744
745
746 pmap_page_protect(pg, VM_PROT_NONE);
747 uvm_pagefree(pg);
748
749 }
750 }
751
752 uvm_unlock_pageq();
753 }
754