This source file includes following definitions.
- uvmfault_anonflush
- uvmfault_amapcopy
- uvmfault_anonget
- uvm_fault
- uvm_fault_wire
- uvm_fault_unwire
- uvm_fault_unwire_locked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/mman.h>
48 #include <sys/user.h>
49
50 #include <uvm/uvm.h>
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154 struct uvm_advice {
155 int advice;
156 int nback;
157 int nforw;
158 };
159
160
161
162
163
164
165
166 static struct uvm_advice uvmadvice[] = {
167 { MADV_NORMAL, 3, 4 },
168 { MADV_RANDOM, 0, 0 },
169 { MADV_SEQUENTIAL, 8, 7},
170 };
171
172 #define UVM_MAXRANGE 16
173
174
175
176
177
178 static void uvmfault_amapcopy(struct uvm_faultinfo *);
179 static __inline void uvmfault_anonflush(struct vm_anon **, int);
180
181
182
183
184
185
186
187
188
189
190
191 static __inline void
192 uvmfault_anonflush(anons, n)
193 struct vm_anon **anons;
194 int n;
195 {
196 int lcv;
197 struct vm_page *pg;
198
199 for (lcv = 0 ; lcv < n ; lcv++) {
200 if (anons[lcv] == NULL)
201 continue;
202 simple_lock(&anons[lcv]->an_lock);
203 pg = anons[lcv]->an_page;
204 if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) {
205 uvm_lock_pageq();
206 if (pg->wire_count == 0) {
207 #ifdef UBC
208 pmap_clear_reference(pg);
209 #else
210 pmap_page_protect(pg, VM_PROT_NONE);
211 #endif
212 uvm_pagedeactivate(pg);
213 }
214 uvm_unlock_pageq();
215 }
216 simple_unlock(&anons[lcv]->an_lock);
217 }
218 }
219
220
221
222
223
224
225
226
227
228
229
230
231
232 static void
233 uvmfault_amapcopy(ufi)
234 struct uvm_faultinfo *ufi;
235 {
236
237
238
239
240
241 while (1) {
242
243
244
245
246
247 if (uvmfault_lookup(ufi, TRUE) == FALSE)
248 return;
249
250
251
252
253
254 if (UVM_ET_ISNEEDSCOPY(ufi->entry))
255 amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE,
256 ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
257
258
259
260
261
262 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
263 uvmfault_unlockmaps(ufi, TRUE);
264 uvm_wait("fltamapcopy");
265 continue;
266 }
267
268
269
270
271
272 uvmfault_unlockmaps(ufi, TRUE);
273 return;
274 }
275
276 }
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 int
295 uvmfault_anonget(ufi, amap, anon)
296 struct uvm_faultinfo *ufi;
297 struct vm_amap *amap;
298 struct vm_anon *anon;
299 {
300 boolean_t we_own;
301 boolean_t locked;
302 struct vm_page *pg;
303 int result;
304 UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist);
305
306 result = 0;
307 uvmexp.fltanget++;
308
309 if (anon->an_page)
310 curproc->p_addr->u_stats.p_ru.ru_minflt++;
311 else
312 curproc->p_addr->u_stats.p_ru.ru_majflt++;
313
314
315
316
317
318 while (1) {
319
320 we_own = FALSE;
321 pg = anon->an_page;
322
323
324
325
326
327
328
329 if (pg && pg->loan_count)
330 pg = uvm_anon_lockloanpg(anon);
331
332
333
334
335
336 if (pg) {
337
338
339
340
341
342
343
344
345 if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) {
346 UVMHIST_LOG(maphist, "<- OK",0,0,0,0);
347 return (VM_PAGER_OK);
348 }
349 atomic_setbits_int(&pg->pg_flags, PG_WANTED);
350 uvmexp.fltpgwait++;
351
352
353
354
355
356 if (pg->uobject) {
357 uvmfault_unlockall(ufi, amap, NULL, anon);
358 UVMHIST_LOG(maphist, " unlock+wait on uobj",0,
359 0,0,0);
360 UVM_UNLOCK_AND_WAIT(pg,
361 &pg->uobject->vmobjlock,
362 FALSE, "anonget1",0);
363 } else {
364
365 uvmfault_unlockall(ufi, amap, NULL, NULL);
366 UVMHIST_LOG(maphist, " unlock+wait on anon",0,
367 0,0,0);
368 UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0,
369 "anonget2",0);
370 }
371
372
373 } else {
374
375
376
377
378 pg = uvm_pagealloc(NULL, 0, anon, 0);
379
380 if (pg == NULL) {
381
382 uvmfault_unlockall(ufi, amap, NULL, anon);
383 uvmexp.fltnoram++;
384 UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0,
385 0,0,0);
386 uvm_wait("flt_noram1");
387
388
389 } else {
390
391
392 we_own = TRUE;
393 uvmfault_unlockall(ufi, amap, NULL, anon);
394
395
396
397
398
399
400
401
402 uvmexp.pageins++;
403 result = uvm_swap_get(pg, anon->an_swslot,
404 PGO_SYNCIO);
405
406
407
408
409
410
411 }
412 }
413
414
415
416
417
418 locked = uvmfault_relock(ufi);
419 if (locked || we_own)
420 simple_lock(&anon->an_lock);
421
422
423
424
425
426
427
428
429
430
431
432
433 if (we_own) {
434
435 if (pg->pg_flags & PG_WANTED) {
436
437 wakeup(pg);
438 }
439
440 atomic_clearbits_int(&pg->pg_flags,
441 PG_WANTED|PG_BUSY|PG_FAKE);
442 UVM_PAGE_OWN(pg, NULL);
443
444
445
446
447
448
449 if (pg->pg_flags & PG_RELEASED) {
450 pmap_page_protect(pg, VM_PROT_NONE);
451 simple_unlock(&anon->an_lock);
452 uvm_anfree(anon);
453 if (locked)
454 uvmfault_unlockall(ufi, amap, NULL,
455 NULL);
456 uvmexp.fltpgrele++;
457 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
458 return (VM_PAGER_REFAULT);
459 }
460
461 if (result != VM_PAGER_OK) {
462 KASSERT(result != VM_PAGER_PEND);
463
464
465 anon->an_page = NULL;
466
467
468
469
470
471
472
473 uvm_swap_markbad(anon->an_swslot, 1);
474 anon->an_swslot = SWSLOT_BAD;
475
476
477
478
479
480
481 uvm_lock_pageq();
482 uvm_pagefree(pg);
483 uvm_unlock_pageq();
484
485 if (locked)
486 uvmfault_unlockall(ufi, amap, NULL,
487 anon);
488 else
489 simple_unlock(&anon->an_lock);
490 UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
491 return (VM_PAGER_ERROR);
492 }
493
494
495
496
497
498 pmap_clear_modify(pg);
499 uvm_lock_pageq();
500 uvm_pageactivate(pg);
501 uvm_unlock_pageq();
502 if (!locked)
503 simple_unlock(&anon->an_lock);
504 }
505
506
507
508
509
510 if (!locked) {
511 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
512 return (VM_PAGER_REFAULT);
513 }
514
515
516
517
518
519 if (ufi != NULL &&
520 amap_lookup(&ufi->entry->aref,
521 ufi->orig_rvaddr - ufi->entry->start) != anon) {
522
523 uvmfault_unlockall(ufi, amap, NULL, anon);
524 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
525 return (VM_PAGER_REFAULT);
526 }
527
528
529
530
531
532 uvmexp.fltanretry++;
533 continue;
534
535 }
536
537
538 }
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
556 ~VM_PROT_WRITE : VM_PROT_ALL)
557
558 int
559 uvm_fault(orig_map, vaddr, fault_type, access_type)
560 vm_map_t orig_map;
561 vaddr_t vaddr;
562 vm_fault_t fault_type;
563 vm_prot_t access_type;
564 {
565 struct uvm_faultinfo ufi;
566 vm_prot_t enter_prot;
567 boolean_t wired, narrow, promote, locked, shadowed;
568 int npages, nback, nforw, centeridx, result, lcv, gotpages;
569 vaddr_t startva, currva;
570 voff_t uoff;
571 paddr_t pa;
572 struct vm_amap *amap;
573 struct uvm_object *uobj;
574 struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon;
575 struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage;
576 UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist);
577
578 UVMHIST_LOG(maphist, "(map=%p, vaddr=0x%lx, ft=%ld, at=%ld)",
579 orig_map, vaddr, fault_type, access_type);
580
581 anon = NULL;
582 pg = NULL;
583
584 uvmexp.faults++;
585
586
587
588
589
590 ufi.orig_map = orig_map;
591 ufi.orig_rvaddr = trunc_page(vaddr);
592 ufi.orig_size = PAGE_SIZE;
593 if (fault_type == VM_FAULT_WIRE)
594 narrow = TRUE;
595
596 else
597 narrow = FALSE;
598
599
600
601
602 ReFault:
603
604
605
606
607
608 if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
609 UVMHIST_LOG(maphist, "<- no mapping @ 0x%lx", vaddr, 0,0,0);
610 return (EFAULT);
611 }
612
613
614 #ifdef DIAGNOSTIC
615 if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0)
616 panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)",
617 ufi.map, vaddr);
618 #endif
619
620
621
622
623
624 if ((ufi.entry->protection & access_type) != access_type) {
625 UVMHIST_LOG(maphist,
626 "<- protection failure (prot=0x%lx, access=0x%lx)",
627 ufi.entry->protection, access_type, 0, 0);
628 uvmfault_unlockmaps(&ufi, FALSE);
629 return (EACCES);
630 }
631
632
633
634
635
636
637
638
639 enter_prot = ufi.entry->protection;
640 wired = VM_MAPENT_ISWIRED(ufi.entry) || (fault_type == VM_FAULT_WIRE);
641 if (wired)
642 access_type = enter_prot;
643
644
645
646
647
648
649
650
651 if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
652 if ((access_type & VM_PROT_WRITE) ||
653 (ufi.entry->object.uvm_obj == NULL)) {
654
655 UVMHIST_LOG(maphist,
656 " need to clear needs_copy and refault",0,0,0,0);
657 uvmfault_unlockmaps(&ufi, FALSE);
658 uvmfault_amapcopy(&ufi);
659 uvmexp.fltamcopy++;
660 goto ReFault;
661
662 } else {
663
664
665
666
667
668 enter_prot &= ~VM_PROT_WRITE;
669
670 }
671 }
672
673
674
675
676
677 amap = ufi.entry->aref.ar_amap;
678 uobj = ufi.entry->object.uvm_obj;
679
680
681
682
683
684
685 if (amap == NULL && uobj == NULL) {
686 uvmfault_unlockmaps(&ufi, FALSE);
687 UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
688 return (EFAULT);
689 }
690
691
692
693
694
695
696
697
698 if (narrow == FALSE) {
699
700
701 KASSERT(uvmadvice[ufi.entry->advice].advice ==
702 ufi.entry->advice);
703 nback = min(uvmadvice[ufi.entry->advice].nback,
704 (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
705 startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
706 nforw = min(uvmadvice[ufi.entry->advice].nforw,
707 ((ufi.entry->end - ufi.orig_rvaddr) >>
708 PAGE_SHIFT) - 1);
709
710
711
712
713 npages = nback + nforw + 1;
714 centeridx = nback;
715
716 narrow = TRUE;
717
718 } else {
719
720
721 nback = nforw = 0;
722 startva = ufi.orig_rvaddr;
723 npages = 1;
724 centeridx = 0;
725
726 }
727
728
729 UVMHIST_LOG(maphist, " narrow=%ld, back=%ld, forw=%ld, startva=0x%lx",
730 narrow, nback, nforw, startva);
731 UVMHIST_LOG(maphist, " entry=%p, amap=%p, obj=%p", ufi.entry,
732 amap, uobj, 0);
733
734
735
736
737
738 if (amap) {
739 anons = anons_store;
740 amap_lookups(&ufi.entry->aref, startva - ufi.entry->start,
741 anons, npages);
742 } else {
743 anons = NULL;
744 }
745
746
747
748
749
750
751
752
753 if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) {
754
755 UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages",
756 0,0,0,0);
757
758 if (amap)
759 uvmfault_anonflush(anons, nback);
760
761
762 if (uobj) {
763 uoff = (startva - ufi.entry->start) + ufi.entry->offset;
764 simple_lock(&uobj->vmobjlock);
765 (void) uobj->pgops->pgo_flush(uobj, uoff, uoff +
766 (nback << PAGE_SHIFT), PGO_DEACTIVATE);
767 simple_unlock(&uobj->vmobjlock);
768 }
769
770
771 if (amap)
772 anons += nback;
773 startva += (nback << PAGE_SHIFT);
774 npages -= nback;
775 nback = centeridx = 0;
776 }
777
778
779
780
781
782
783
784
785
786 currva = startva;
787 shadowed = FALSE;
788 for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) {
789
790
791
792
793
794 if (lcv != centeridx &&
795 pmap_extract(ufi.orig_map->pmap, currva, &pa)) {
796 pages[lcv] = PGO_DONTCARE;
797 continue;
798 }
799
800
801
802
803 if (amap == NULL || anons[lcv] == NULL) {
804 pages[lcv] = NULL;
805 continue;
806 }
807
808
809
810
811
812 pages[lcv] = PGO_DONTCARE;
813 if (lcv == centeridx) {
814 shadowed = TRUE;
815 continue;
816 }
817 anon = anons[lcv];
818 simple_lock(&anon->an_lock);
819
820 if (anon->an_page && anon->an_page->loan_count == 0 &&
821 (anon->an_page->pg_flags & (PG_RELEASED|PG_BUSY)) == 0) {
822 uvm_lock_pageq();
823 uvm_pageactivate(anon->an_page);
824 uvm_unlock_pageq();
825 UVMHIST_LOG(maphist,
826 " MAPPING: n anon: pm=%p, va=0x%lx, pg=%p",
827 ufi.orig_map->pmap, currva, anon->an_page, 0);
828 uvmexp.fltnamap++;
829
830
831
832
833
834
835
836 (void) pmap_enter(ufi.orig_map->pmap, currva,
837 VM_PAGE_TO_PHYS(anon->an_page),
838 (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
839 enter_prot,
840 PMAP_CANFAIL |
841 (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0));
842 }
843 simple_unlock(&anon->an_lock);
844 pmap_update(ufi.orig_map->pmap);
845 }
846
847
848
849 UVMHIST_LOG(maphist, " shadowed=%ld, will_get=%ld", shadowed,
850 (uobj && shadowed == FALSE),0,0);
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868 if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) {
869 simple_lock(&uobj->vmobjlock);
870
871
872 result = uobj->pgops->pgo_fault(&ufi, startva, pages, npages,
873 centeridx, fault_type, access_type,
874 PGO_LOCKED);
875
876
877
878 if (result == VM_PAGER_OK)
879 return (0);
880 else if (result == VM_PAGER_REFAULT)
881 goto ReFault;
882 else
883 return (EACCES);
884 }
885
886
887
888
889
890
891
892
893
894
895
896 if (uobj && shadowed == FALSE) {
897 simple_lock(&uobj->vmobjlock);
898
899
900
901
902
903
904 uvmexp.fltlget++;
905 gotpages = npages;
906 (void) uobj->pgops->pgo_get(uobj, ufi.entry->offset +
907 (startva - ufi.entry->start),
908 pages, &gotpages, centeridx,
909 access_type & MASK(ufi.entry),
910 ufi.entry->advice, PGO_LOCKED);
911
912
913
914
915
916 uobjpage = NULL;
917
918 if (gotpages) {
919 currva = startva;
920 for (lcv = 0 ; lcv < npages ;
921 lcv++, currva += PAGE_SIZE) {
922
923 if (pages[lcv] == NULL ||
924 pages[lcv] == PGO_DONTCARE)
925 continue;
926
927 KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0);
928
929
930
931
932
933
934
935
936
937 if (lcv == centeridx) {
938 uobjpage = pages[lcv];
939 UVMHIST_LOG(maphist, " got uobjpage "
940 "(%p) with locked get",
941 uobjpage, 0,0,0);
942 continue;
943 }
944
945
946
947
948
949
950
951
952
953
954 uvm_lock_pageq();
955 uvm_pageactivate(pages[lcv]);
956 uvm_unlock_pageq();
957 UVMHIST_LOG(maphist,
958 " MAPPING: n obj: pm=%p, va=0x%lx, pg=%p",
959 ufi.orig_map->pmap, currva, pages[lcv], 0);
960 uvmexp.fltnomap++;
961
962
963
964
965
966
967
968
969 (void) pmap_enter(ufi.orig_map->pmap, currva,
970 VM_PAGE_TO_PHYS(pages[lcv]),
971 enter_prot & MASK(ufi.entry),
972 PMAP_CANFAIL |
973 (wired ? PMAP_WIRED : 0));
974
975
976
977
978
979
980
981 atomic_clearbits_int(&pages[lcv]->pg_flags,
982 PG_BUSY);
983 UVM_PAGE_OWN(pages[lcv], NULL);
984 }
985 pmap_update(ufi.orig_map->pmap);
986 }
987
988 } else {
989 uobjpage = NULL;
990 }
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 if (shadowed == FALSE)
1016 goto Case2;
1017
1018
1019
1020
1021
1022
1023
1024 anon = anons[centeridx];
1025 UVMHIST_LOG(maphist, " case 1 fault: anon=%p", anon, 0,0,0);
1026 simple_lock(&anon->an_lock);
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 result = uvmfault_anonget(&ufi, amap, anon);
1045 switch (result) {
1046 case VM_PAGER_OK:
1047 break;
1048
1049 case VM_PAGER_REFAULT:
1050 goto ReFault;
1051
1052 case VM_PAGER_ERROR:
1053
1054
1055
1056
1057
1058 return (EACCES);
1059
1060 default:
1061 #ifdef DIAGNOSTIC
1062 panic("uvm_fault: uvmfault_anonget -> %d", result);
1063 #else
1064 return (EACCES);
1065 #endif
1066 }
1067
1068
1069
1070
1071
1072 uobj = anon->an_page->uobject;
1073
1074
1075
1076
1077
1078
1079
1080 if (anon->an_page->loan_count) {
1081
1082 if ((access_type & VM_PROT_WRITE) == 0) {
1083
1084
1085
1086
1087
1088
1089 enter_prot = enter_prot & ~VM_PROT_WRITE;
1090
1091 } else {
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 if (anon->an_ref == 1) {
1107
1108
1109 pg = uvm_pagealloc(NULL, 0, NULL, 0);
1110 if (pg == NULL) {
1111 uvmfault_unlockall(&ufi, amap, uobj,
1112 anon);
1113 uvm_wait("flt_noram2");
1114 goto ReFault;
1115 }
1116
1117
1118
1119
1120
1121
1122 uvm_pagecopy(anon->an_page, pg);
1123
1124
1125 pmap_page_protect(anon->an_page,
1126 VM_PROT_NONE);
1127 uvm_lock_pageq();
1128 if (uobj)
1129
1130 anon->an_page->loan_count--;
1131 anon->an_page->uanon = NULL;
1132
1133 atomic_clearbits_int(
1134 &anon->an_page->pg_flags, PQ_ANON);
1135 uvm_pageactivate(pg);
1136 uvm_unlock_pageq();
1137 if (uobj) {
1138 simple_unlock(&uobj->vmobjlock);
1139 uobj = NULL;
1140 }
1141
1142
1143 anon->an_page = pg;
1144 pg->uanon = anon;
1145 atomic_setbits_int(&pg->pg_flags, PQ_ANON);
1146 atomic_clearbits_int(&pg->pg_flags,
1147 PG_BUSY|PG_FAKE);
1148 UVM_PAGE_OWN(pg, NULL);
1149
1150
1151 }
1152 }
1153 }
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 if ((access_type & VM_PROT_WRITE) != 0 && anon->an_ref > 1) {
1169
1170 UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
1171 uvmexp.flt_acow++;
1172 oanon = anon;
1173 anon = uvm_analloc();
1174 if (anon) {
1175 pg = uvm_pagealloc(NULL, 0, anon, 0);
1176 }
1177
1178
1179 if (anon == NULL || pg == NULL) {
1180 if (anon)
1181 uvm_anfree(anon);
1182 uvmfault_unlockall(&ufi, amap, uobj, oanon);
1183 KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
1184 if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
1185 UVMHIST_LOG(maphist,
1186 "<- failed. out of VM",0,0,0,0);
1187 uvmexp.fltnoanon++;
1188 return (ENOMEM);
1189 }
1190
1191 uvmexp.fltnoram++;
1192 uvm_wait("flt_noram3");
1193 goto ReFault;
1194 }
1195
1196
1197
1198 uvm_pagecopy(oanon->an_page, pg);
1199
1200 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
1201 UVM_PAGE_OWN(pg, NULL);
1202 amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
1203 anon, 1);
1204
1205
1206 oanon->an_ref--;
1207
1208
1209
1210
1211
1212
1213
1214 } else {
1215
1216 uvmexp.flt_anon++;
1217 oanon = anon;
1218 pg = anon->an_page;
1219 if (anon->an_ref > 1)
1220 enter_prot = enter_prot & ~VM_PROT_WRITE;
1221
1222 }
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233 UVMHIST_LOG(maphist, " MAPPING: anon: pm=%p, va=0x%lx, pg=%p",
1234 ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0);
1235 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
1236 enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
1237 != 0) {
1238
1239
1240
1241
1242
1243
1244
1245 uvmfault_unlockall(&ufi, amap, uobj, oanon);
1246 KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
1247 if (uvmexp.swpgonly == uvmexp.swpages) {
1248 UVMHIST_LOG(maphist,
1249 "<- failed. out of VM",0,0,0,0);
1250
1251 return (ENOMEM);
1252 }
1253
1254 uvm_wait("flt_pmfail1");
1255 goto ReFault;
1256 }
1257
1258
1259
1260
1261
1262 uvm_lock_pageq();
1263
1264 if (fault_type == VM_FAULT_WIRE) {
1265 uvm_pagewire(pg);
1266
1267
1268
1269
1270
1271
1272
1273 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1274 uvm_anon_dropswap(anon);
1275 } else {
1276
1277 uvm_pageactivate(pg);
1278 }
1279
1280 uvm_unlock_pageq();
1281
1282
1283
1284
1285
1286 uvmfault_unlockall(&ufi, amap, uobj, oanon);
1287 pmap_update(ufi.orig_map->pmap);
1288 return (0);
1289
1290
1291 Case2:
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 if (uobj == NULL) {
1309 uobjpage = PGO_DONTCARE;
1310 promote = TRUE;
1311 } else {
1312 KASSERT(uobjpage != PGO_DONTCARE);
1313 promote = (access_type & VM_PROT_WRITE) &&
1314 UVM_ET_ISCOPYONWRITE(ufi.entry);
1315 }
1316 UVMHIST_LOG(maphist, " case 2 fault: promote=%ld, zfill=%ld",
1317 promote, (uobj == NULL), 0,0);
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 if (uobjpage) {
1329
1330 curproc->p_addr->u_stats.p_ru.ru_minflt++;
1331 } else {
1332
1333 curproc->p_addr->u_stats.p_ru.ru_majflt++;
1334
1335
1336 uvmfault_unlockall(&ufi, amap, NULL, NULL);
1337
1338
1339 uvmexp.fltget++;
1340 gotpages = 1;
1341 uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset;
1342 result = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages,
1343 0, access_type & MASK(ufi.entry), ufi.entry->advice,
1344 PGO_SYNCIO);
1345
1346
1347
1348
1349
1350
1351
1352 if (result != VM_PAGER_OK) {
1353 KASSERT(result != VM_PAGER_PEND);
1354
1355 if (result == VM_PAGER_AGAIN) {
1356 UVMHIST_LOG(maphist,
1357 " pgo_get says TRY AGAIN!",0,0,0,0);
1358 tsleep((caddr_t)&lbolt, PVM, "fltagain2", 0);
1359 goto ReFault;
1360 }
1361
1362 UVMHIST_LOG(maphist, "<- pgo_get failed (code %ld)",
1363 result, 0,0,0);
1364 return (EACCES);
1365 }
1366
1367
1368
1369
1370
1371
1372
1373
1374 locked = uvmfault_relock(&ufi);
1375 simple_lock(&uobj->vmobjlock);
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 if ((uobjpage->pg_flags & PG_RELEASED) != 0 ||
1387 (locked && amap &&
1388 amap_lookup(&ufi.entry->aref,
1389 ufi.orig_rvaddr - ufi.entry->start))) {
1390 if (locked)
1391 uvmfault_unlockall(&ufi, amap, NULL, NULL);
1392 locked = FALSE;
1393 }
1394
1395
1396
1397
1398
1399 if (locked == FALSE) {
1400
1401 UVMHIST_LOG(maphist,
1402 " wasn't able to relock after fault: retry",
1403 0,0,0,0);
1404 if (uobjpage->pg_flags & PG_WANTED)
1405
1406 wakeup(uobjpage);
1407
1408 if (uobjpage->pg_flags & PG_RELEASED) {
1409 uvmexp.fltpgrele++;
1410 KASSERT(uobj->pgops->pgo_releasepg != NULL);
1411
1412
1413 if (uobj->pgops->pgo_releasepg(uobjpage,NULL))
1414
1415 simple_unlock(&uobj->vmobjlock);
1416 goto ReFault;
1417 }
1418
1419 uvm_lock_pageq();
1420
1421 uvm_pageactivate(uobjpage);
1422
1423 uvm_unlock_pageq();
1424 atomic_clearbits_int(&uobjpage->pg_flags,
1425 PG_BUSY|PG_WANTED);
1426 UVM_PAGE_OWN(uobjpage, NULL);
1427 simple_unlock(&uobj->vmobjlock);
1428 goto ReFault;
1429
1430 }
1431
1432
1433
1434
1435
1436
1437
1438
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454 if (promote == FALSE) {
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465 uvmexp.flt_obj++;
1466 if (UVM_ET_ISCOPYONWRITE(ufi.entry))
1467 enter_prot &= ~VM_PROT_WRITE;
1468 pg = uobjpage;
1469
1470
1471
1472
1473
1474
1475
1476 if (uobjpage->loan_count) {
1477
1478 if ((access_type & VM_PROT_WRITE) == 0) {
1479
1480
1481 enter_prot = enter_prot & ~VM_PROT_WRITE;
1482 } else {
1483
1484
1485
1486 pg = uvm_pagealloc(NULL, 0, NULL, 0);
1487
1488 if (pg == NULL) {
1489
1490
1491
1492
1493 if (uobjpage->pg_flags & PG_WANTED)
1494 wakeup(uobjpage);
1495 atomic_clearbits_int(
1496 &uobjpage->pg_flags,
1497 PG_BUSY|PG_WANTED);
1498 UVM_PAGE_OWN(uobjpage, NULL);
1499
1500 uvm_lock_pageq();
1501
1502 uvm_pageactivate(uobjpage);
1503
1504 uvm_unlock_pageq();
1505 uvmfault_unlockall(&ufi, amap, uobj,
1506 NULL);
1507 UVMHIST_LOG(maphist,
1508 " out of RAM breaking loan, waiting",
1509 0,0,0,0);
1510 uvmexp.fltnoram++;
1511 uvm_wait("flt_noram4");
1512 goto ReFault;
1513 }
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523 uvm_pagecopy(uobjpage, pg);
1524 atomic_clearbits_int(&pg->pg_flags,
1525 PG_FAKE|PG_CLEAN);
1526 pmap_page_protect(uobjpage, VM_PROT_NONE);
1527 if (uobjpage->pg_flags & PG_WANTED)
1528 wakeup(uobjpage);
1529
1530 atomic_clearbits_int(&uobjpage->pg_flags,
1531 PG_BUSY|PG_WANTED);
1532 UVM_PAGE_OWN(uobjpage, NULL);
1533
1534 uvm_lock_pageq();
1535 uoff = uobjpage->offset;
1536
1537 uvm_pagerealloc(uobjpage, NULL, 0);
1538
1539
1540
1541
1542
1543
1544 uvm_pagerealloc(pg, uobj, uoff);
1545 uvm_unlock_pageq();
1546
1547
1548
1549
1550
1551
1552 uobjpage = pg;
1553
1554 }
1555 }
1556
1557 } else {
1558
1559
1560
1561
1562
1563 #ifdef DIAGNOSTIC
1564 if (amap == NULL)
1565 panic("uvm_fault: want to promote data, but no anon");
1566 #endif
1567
1568 anon = uvm_analloc();
1569 if (anon) {
1570
1571
1572
1573
1574
1575
1576 pg = uvm_pagealloc(NULL, 0, anon,
1577 (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
1578 }
1579
1580
1581
1582
1583 if (anon == NULL || pg == NULL) {
1584
1585
1586
1587
1588 if (uobjpage != PGO_DONTCARE) {
1589 if (uobjpage->pg_flags & PG_WANTED)
1590
1591 wakeup(uobjpage);
1592
1593 uvm_lock_pageq();
1594 uvm_pageactivate(uobjpage);
1595 uvm_unlock_pageq();
1596 atomic_clearbits_int(&uobjpage->pg_flags,
1597 PG_BUSY|PG_WANTED);
1598 UVM_PAGE_OWN(uobjpage, NULL);
1599 }
1600
1601
1602 uvmfault_unlockall(&ufi, amap, uobj, NULL);
1603 KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
1604 if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
1605 UVMHIST_LOG(maphist, " promote: out of VM",
1606 0,0,0,0);
1607 uvmexp.fltnoanon++;
1608 return (ENOMEM);
1609 }
1610
1611 UVMHIST_LOG(maphist, " out of RAM, waiting for more",
1612 0,0,0,0);
1613 uvm_anfree(anon);
1614 uvmexp.fltnoram++;
1615 uvm_wait("flt_noram5");
1616 goto ReFault;
1617 }
1618
1619
1620
1621
1622
1623 if (uobjpage != PGO_DONTCARE) {
1624 uvmexp.flt_prcopy++;
1625
1626 uvm_pagecopy(uobjpage, pg);
1627
1628
1629
1630
1631
1632 if ((amap_flags(amap) & AMAP_SHARED) != 0) {
1633 pmap_page_protect(uobjpage, VM_PROT_NONE);
1634 }
1635
1636
1637
1638
1639
1640
1641
1642 if (uobjpage->pg_flags & PG_WANTED)
1643
1644 wakeup(uobjpage);
1645 atomic_clearbits_int(&uobjpage->pg_flags,
1646 PG_BUSY|PG_WANTED);
1647 UVM_PAGE_OWN(uobjpage, NULL);
1648 uvm_lock_pageq();
1649 uvm_pageactivate(uobjpage);
1650 uvm_unlock_pageq();
1651 simple_unlock(&uobj->vmobjlock);
1652 uobj = NULL;
1653
1654 UVMHIST_LOG(maphist,
1655 " promote uobjpage %p to anon/page %p/%p",
1656 uobjpage, anon, pg, 0);
1657
1658 } else {
1659 uvmexp.flt_przero++;
1660
1661
1662
1663
1664 UVMHIST_LOG(maphist," zero fill anon/page %p/%p",
1665 anon, pg, 0, 0);
1666 }
1667
1668 amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
1669 anon, 0);
1670 }
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684 UVMHIST_LOG(maphist,
1685 " MAPPING: case2: pm=%p, va=0x%lx, pg=%p, promote=%ld",
1686 ufi.orig_map->pmap, ufi.orig_rvaddr, pg, promote);
1687 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
1688 enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
1689 != 0) {
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699 if (pg->pg_flags & PG_WANTED)
1700 wakeup(pg);
1701
1702
1703
1704
1705
1706
1707 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
1708 UVM_PAGE_OWN(pg, NULL);
1709 uvmfault_unlockall(&ufi, amap, uobj, NULL);
1710 KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
1711 if (uvmexp.swpgonly == uvmexp.swpages) {
1712 UVMHIST_LOG(maphist,
1713 "<- failed. out of VM",0,0,0,0);
1714
1715 return (ENOMEM);
1716 }
1717
1718 uvm_wait("flt_pmfail2");
1719 goto ReFault;
1720 }
1721
1722 uvm_lock_pageq();
1723
1724 if (fault_type == VM_FAULT_WIRE) {
1725 uvm_pagewire(pg);
1726 if (pg->pg_flags & PQ_AOBJ) {
1727
1728
1729
1730
1731
1732
1733
1734 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1735 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
1736 }
1737 } else {
1738
1739 uvm_pageactivate(pg);
1740 }
1741 uvm_unlock_pageq();
1742
1743 if (pg->pg_flags & PG_WANTED)
1744 wakeup(pg);
1745
1746
1747
1748
1749
1750
1751 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
1752 UVM_PAGE_OWN(pg, NULL);
1753 uvmfault_unlockall(&ufi, amap, uobj, NULL);
1754 pmap_update(ufi.orig_map->pmap);
1755
1756 UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
1757 return (0);
1758 }
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770 int
1771 uvm_fault_wire(map, start, end, access_type)
1772 vm_map_t map;
1773 vaddr_t start, end;
1774 vm_prot_t access_type;
1775 {
1776 vaddr_t va;
1777 pmap_t pmap;
1778 int rv;
1779
1780 pmap = vm_map_pmap(map);
1781
1782
1783
1784
1785
1786
1787
1788 for (va = start ; va < end ; va += PAGE_SIZE) {
1789 rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type);
1790 if (rv) {
1791 if (va != start) {
1792 uvm_fault_unwire(map, start, va);
1793 }
1794 return (rv);
1795 }
1796 }
1797
1798 return (0);
1799 }
1800
1801
1802
1803
1804
1805 void
1806 uvm_fault_unwire(map, start, end)
1807 vm_map_t map;
1808 vaddr_t start, end;
1809 {
1810
1811 vm_map_lock_read(map);
1812 uvm_fault_unwire_locked(map, start, end);
1813 vm_map_unlock_read(map);
1814 }
1815
1816
1817
1818
1819
1820
1821
1822 void
1823 uvm_fault_unwire_locked(map, start, end)
1824 vm_map_t map;
1825 vaddr_t start, end;
1826 {
1827 vm_map_entry_t entry;
1828 pmap_t pmap = vm_map_pmap(map);
1829 vaddr_t va;
1830 paddr_t pa;
1831 struct vm_page *pg;
1832
1833 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
1834
1835
1836
1837
1838
1839
1840
1841
1842 uvm_lock_pageq();
1843
1844
1845
1846
1847 KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
1848 if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
1849 panic("uvm_fault_unwire_locked: address not in map");
1850
1851 for (va = start; va < end ; va += PAGE_SIZE) {
1852 if (pmap_extract(pmap, va, &pa) == FALSE)
1853 continue;
1854
1855
1856
1857
1858 KASSERT(va >= entry->start);
1859 while (va >= entry->end) {
1860 KASSERT(entry->next != &map->header &&
1861 entry->next->start <= entry->end);
1862 entry = entry->next;
1863 }
1864
1865
1866
1867
1868 if (VM_MAPENT_ISWIRED(entry) == 0)
1869 pmap_unwire(pmap, va);
1870
1871 pg = PHYS_TO_VM_PAGE(pa);
1872 if (pg)
1873 uvm_pageunwire(pg);
1874 }
1875
1876 uvm_unlock_pageq();
1877 }