This source file includes following definitions.
- LIST_HEAD
- amap_list_remove
- pp_getreflen
- pp_setreflen
- amap_init
- amap_alloc1
- amap_alloc
- amap_free
- amap_extend
- amap_share_protect
- amap_wipeout
- amap_copy
- amap_cow_now
- amap_splitref
- amap_pp_establish
- amap_pp_adjref
- amap_wiperange
- amap_swap_off
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45 #undef UVM_AMAP_INLINE
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/kernel.h>
52 #include <sys/pool.h>
53
54 #define UVM_AMAP_C
55 #include <uvm/uvm.h>
56 #include <uvm/uvm_swap.h>
57
58
59
60
61
62
63
64
65
66 struct pool uvm_amap_pool;
67
68 LIST_HEAD(, vm_amap) amap_list;
69
70
71
72
73
74 static struct vm_amap *amap_alloc1(int, int, int);
75 static __inline void amap_list_insert(struct vm_amap *);
76 static __inline void amap_list_remove(struct vm_amap *);
77
78 static __inline void
79 amap_list_insert(struct vm_amap *amap)
80 {
81 LIST_INSERT_HEAD(&amap_list, amap, am_list);
82 }
83
84 static __inline void
85 amap_list_remove(struct vm_amap *amap)
86 {
87 LIST_REMOVE(amap, am_list);
88 }
89
90 #ifdef UVM_AMAP_PPREF
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124 static __inline void pp_getreflen(int *, int, int *, int *);
125 static __inline void pp_setreflen(int *, int, int, int);
126
127
128
129
130
131
132 static __inline void
133 pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
134 {
135
136 if (ppref[offset] > 0) {
137 *refp = ppref[offset] - 1;
138 *lenp = 1;
139 } else {
140 *refp = (ppref[offset] * -1) - 1;
141 *lenp = ppref[offset+1];
142 }
143 }
144
145
146
147
148
149
150 static __inline void
151 pp_setreflen(int *ppref, int offset, int ref, int len)
152 {
153 if (len == 1) {
154 ppref[offset] = ref + 1;
155 } else {
156 ppref[offset] = (ref + 1) * -1;
157 ppref[offset+1] = len;
158 }
159 }
160 #endif
161
162
163
164
165
166 void
167 amap_init(void)
168 {
169
170
171
172 pool_init(&uvm_amap_pool, sizeof(struct vm_amap), 0, 0, 0,
173 "amappl", &pool_allocator_nointr);
174 pool_sethiwat(&uvm_amap_pool, 4096);
175 }
176
177
178
179
180
181
182
183 static inline struct vm_amap *
184 amap_alloc1(int slots, int padslots, int waitf)
185 {
186 struct vm_amap *amap;
187 int totalslots;
188
189 amap = pool_get(&uvm_amap_pool, (waitf == M_WAITOK) ? PR_WAITOK : 0);
190 if (amap == NULL)
191 return(NULL);
192
193 totalslots = malloc_roundup((slots + padslots) * sizeof(int)) /
194 sizeof(int);
195 amap->am_ref = 1;
196 amap->am_flags = 0;
197 #ifdef UVM_AMAP_PPREF
198 amap->am_ppref = NULL;
199 #endif
200 amap->am_maxslot = totalslots;
201 amap->am_nslot = slots;
202 amap->am_nused = 0;
203
204 amap->am_slots = malloc(totalslots * sizeof(int), M_UVMAMAP,
205 waitf);
206 if (amap->am_slots == NULL)
207 goto fail1;
208
209 amap->am_bckptr = malloc(totalslots * sizeof(int), M_UVMAMAP, waitf);
210 if (amap->am_bckptr == NULL)
211 goto fail2;
212
213 amap->am_anon = malloc(totalslots * sizeof(struct vm_anon *),
214 M_UVMAMAP, waitf);
215 if (amap->am_anon == NULL)
216 goto fail3;
217
218 return(amap);
219
220 fail3:
221 free(amap->am_bckptr, M_UVMAMAP);
222 fail2:
223 free(amap->am_slots, M_UVMAMAP);
224 fail1:
225 pool_put(&uvm_amap_pool, amap);
226 return (NULL);
227 }
228
229
230
231
232
233
234
235
236
237 struct vm_amap *
238 amap_alloc(vaddr_t sz, vaddr_t padsz, int waitf)
239 {
240 struct vm_amap *amap;
241 int slots, padslots;
242 UVMHIST_FUNC("amap_alloc"); UVMHIST_CALLED(maphist);
243
244 AMAP_B2SLOT(slots, sz);
245 AMAP_B2SLOT(padslots, padsz);
246
247 amap = amap_alloc1(slots, padslots, waitf);
248 if (amap) {
249 memset(amap->am_anon, 0,
250 amap->am_maxslot * sizeof(struct vm_anon *));
251 amap_list_insert(amap);
252 }
253
254 UVMHIST_LOG(maphist,"<- done, amap = %p, sz=%lu", amap, sz, 0, 0);
255 return(amap);
256 }
257
258
259
260
261
262
263
264
265 void
266 amap_free(struct vm_amap *amap)
267 {
268 UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
269
270 KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
271 KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
272
273 free(amap->am_slots, M_UVMAMAP);
274 free(amap->am_bckptr, M_UVMAMAP);
275 free(amap->am_anon, M_UVMAMAP);
276 #ifdef UVM_AMAP_PPREF
277 if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
278 free(amap->am_ppref, M_UVMAMAP);
279 #endif
280 pool_put(&uvm_amap_pool, amap);
281
282 UVMHIST_LOG(maphist,"<- done, freed amap = %p", amap, 0, 0, 0);
283 }
284
285
286
287
288
289
290
291
292
293
294
295 int
296 amap_extend(struct vm_map_entry *entry, vsize_t addsize)
297 {
298 struct vm_amap *amap = entry->aref.ar_amap;
299 int slotoff = entry->aref.ar_pageoff;
300 int slotmapped, slotadd, slotneed, slotalloc;
301 #ifdef UVM_AMAP_PPREF
302 int *newppref, *oldppref;
303 #endif
304 u_int *newsl, *newbck, *oldsl, *oldbck;
305 struct vm_anon **newover, **oldover;
306 int slotadded;
307 UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
308
309 UVMHIST_LOG(maphist, " (entry=%p, addsize=%lu)", entry, addsize, 0, 0);
310
311
312
313
314
315
316
317 AMAP_B2SLOT(slotmapped, entry->end - entry->start);
318 AMAP_B2SLOT(slotadd, addsize);
319 slotneed = slotoff + slotmapped + slotadd;
320
321
322
323
324
325
326
327 if (amap->am_nslot >= slotneed) {
328 #ifdef UVM_AMAP_PPREF
329 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
330 amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1);
331 }
332 #endif
333 UVMHIST_LOG(maphist,"<- done (case 1), amap = %p, sltneed=%ld",
334 amap, slotneed, 0, 0);
335 return (0);
336 }
337
338
339
340
341
342
343 if (amap->am_maxslot >= slotneed) {
344 #ifdef UVM_AMAP_PPREF
345 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
346 if ((slotoff + slotmapped) < amap->am_nslot)
347 amap_pp_adjref(amap, slotoff + slotmapped,
348 (amap->am_nslot - (slotoff + slotmapped)),
349 1);
350 pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
351 slotneed - amap->am_nslot);
352 }
353 #endif
354 amap->am_nslot = slotneed;
355
356
357
358
359
360 UVMHIST_LOG(maphist,"<- done (case 2), amap = %p, slotneed=%ld",
361 amap, slotneed, 0, 0);
362 return (0);
363 }
364
365
366
367
368
369
370
371
372 slotalloc = malloc_roundup(slotneed * sizeof(int)) / sizeof(int);
373 #ifdef UVM_AMAP_PPREF
374 newppref = NULL;
375 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
376 newppref = malloc(slotalloc *sizeof(int), M_UVMAMAP,
377 M_WAITOK | M_CANFAIL);
378 if (newppref == NULL) {
379
380 free(amap->am_ppref, M_UVMAMAP);
381 amap->am_ppref = PPREF_NONE;
382 }
383 }
384 #endif
385 newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP,
386 M_WAITOK | M_CANFAIL);
387 newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP,
388 M_WAITOK | M_CANFAIL);
389 newover = malloc(slotalloc * sizeof(struct vm_anon *), M_UVMAMAP,
390 M_WAITOK | M_CANFAIL);
391 if (newsl == NULL || newbck == NULL || newover == NULL) {
392 if (newsl != NULL) {
393 free(newsl, M_UVMAMAP);
394 }
395 if (newbck != NULL) {
396 free(newbck, M_UVMAMAP);
397 }
398 if (newover != NULL) {
399 free(newover, M_UVMAMAP);
400 }
401 return (ENOMEM);
402 }
403 KASSERT(amap->am_maxslot < slotneed);
404
405
406
407
408
409 slotadded = slotalloc - amap->am_nslot;
410
411
412 oldsl = amap->am_slots;
413 memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
414 amap->am_slots = newsl;
415
416
417 oldover = amap->am_anon;
418 memcpy(newover, oldover, sizeof(struct vm_anon *) * amap->am_nslot);
419 memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) *
420 slotadded);
421 amap->am_anon = newover;
422
423
424 oldbck = amap->am_bckptr;
425 memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
426 memset(newbck + amap->am_nslot, 0, sizeof(int) * slotadded);
427 amap->am_bckptr = newbck;
428
429 #ifdef UVM_AMAP_PPREF
430
431 oldppref = amap->am_ppref;
432 if (newppref) {
433 memcpy(newppref, oldppref, sizeof(int) * amap->am_nslot);
434 memset(newppref + amap->am_nslot, 0, sizeof(int) * slotadded);
435 amap->am_ppref = newppref;
436 if ((slotoff + slotmapped) < amap->am_nslot)
437 amap_pp_adjref(amap, slotoff + slotmapped,
438 (amap->am_nslot - (slotoff + slotmapped)), 1);
439 pp_setreflen(newppref, amap->am_nslot, 1,
440 slotneed - amap->am_nslot);
441 }
442 #endif
443
444
445 amap->am_nslot = slotneed;
446 amap->am_maxslot = slotalloc;
447
448
449 free(oldsl, M_UVMAMAP);
450 free(oldbck, M_UVMAMAP);
451 free(oldover, M_UVMAMAP);
452 #ifdef UVM_AMAP_PPREF
453 if (oldppref && oldppref != PPREF_NONE)
454 free(oldppref, M_UVMAMAP);
455 #endif
456 UVMHIST_LOG(maphist,"<- done (case 3), amap = %p, slotneed=%ld",
457 amap, slotneed, 0, 0);
458 return (0);
459 }
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 void
475 amap_share_protect(struct vm_map_entry *entry, vm_prot_t prot)
476 {
477 struct vm_amap *amap = entry->aref.ar_amap;
478 int slots, lcv, slot, stop;
479
480 AMAP_B2SLOT(slots, (entry->end - entry->start));
481 stop = entry->aref.ar_pageoff + slots;
482
483 if (slots < amap->am_nused) {
484
485 for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) {
486 if (amap->am_anon[lcv] == NULL)
487 continue;
488 if (amap->am_anon[lcv]->an_page != NULL)
489 pmap_page_protect(amap->am_anon[lcv]->an_page,
490 prot);
491 }
492 return;
493 }
494
495
496 for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
497 slot = amap->am_slots[lcv];
498 if (slot < entry->aref.ar_pageoff || slot >= stop)
499 continue;
500 if (amap->am_anon[slot]->an_page != NULL)
501 pmap_page_protect(amap->am_anon[slot]->an_page, prot);
502 }
503 return;
504 }
505
506
507
508
509
510
511
512
513
514 void
515 amap_wipeout(struct vm_amap *amap)
516 {
517 int lcv, slot;
518 struct vm_anon *anon;
519 UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
520 UVMHIST_LOG(maphist,"(amap=%p)", amap, 0,0,0);
521
522 KASSERT(amap->am_ref == 0);
523
524 if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
525
526
527
528 return;
529 }
530 amap_list_remove(amap);
531
532 for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
533 int refs;
534
535 slot = amap->am_slots[lcv];
536 anon = amap->am_anon[slot];
537
538 if (anon == NULL || anon->an_ref == 0)
539 panic("amap_wipeout: corrupt amap");
540
541 simple_lock(&anon->an_lock);
542
543 UVMHIST_LOG(maphist," processing anon %p, ref=%ld", anon,
544 anon->an_ref, 0, 0);
545
546 refs = --anon->an_ref;
547 simple_unlock(&anon->an_lock);
548 if (refs == 0) {
549
550
551
552 uvm_anfree(anon);
553 }
554 }
555
556
557
558
559
560 amap->am_ref = 0;
561 amap->am_nused = 0;
562 amap_free(amap);
563 UVMHIST_LOG(maphist,"<- done!", 0,0,0,0);
564 }
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580 void
581 amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
582 boolean_t canchunk, vaddr_t startva, vaddr_t endva)
583 {
584 struct vm_amap *amap, *srcamap;
585 int slots, lcv;
586 vaddr_t chunksize;
587 UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
588 UVMHIST_LOG(maphist, " (map=%p, entry=%p, waitf=%ld)",
589 map, entry, waitf, 0);
590
591
592
593
594
595 if (entry->aref.ar_amap == NULL) {
596
597
598
599
600
601
602
603 if (canchunk && atop(entry->end - entry->start) >=
604 UVM_AMAP_LARGE) {
605
606 chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT;
607 startva = (startva / chunksize) * chunksize;
608 endva = roundup(endva, chunksize);
609 UVMHIST_LOG(maphist, " chunk amap ==> clip "
610 "0x%lx->0x%lx to 0x%lx->0x%lx",
611 entry->start, entry->end, startva, endva);
612 UVM_MAP_CLIP_START(map, entry, startva);
613
614 if (endva >= startva)
615 UVM_MAP_CLIP_END(map, entry, endva);
616 }
617
618 UVMHIST_LOG(maphist, "<- done [creating new amap 0x%lx->0x%lx]",
619 entry->start, entry->end, 0, 0);
620 entry->aref.ar_pageoff = 0;
621 entry->aref.ar_amap = amap_alloc(entry->end - entry->start, 0,
622 waitf);
623 if (entry->aref.ar_amap != NULL)
624 entry->etype &= ~UVM_ET_NEEDSCOPY;
625 return;
626 }
627
628
629
630
631
632
633
634
635
636
637
638 if (entry->aref.ar_amap->am_ref == 1) {
639 entry->etype &= ~UVM_ET_NEEDSCOPY;
640 UVMHIST_LOG(maphist, "<- done [ref cnt = 1, took it over]",
641 0, 0, 0, 0);
642 return;
643 }
644
645
646
647
648
649 UVMHIST_LOG(maphist," amap=%p, ref=%ld, must copy it",
650 entry->aref.ar_amap, entry->aref.ar_amap->am_ref, 0, 0);
651 AMAP_B2SLOT(slots, entry->end - entry->start);
652 amap = amap_alloc1(slots, 0, waitf);
653 if (amap == NULL) {
654 UVMHIST_LOG(maphist, " amap_alloc1 failed", 0,0,0,0);
655 return;
656 }
657 srcamap = entry->aref.ar_amap;
658
659
660
661
662
663
664
665
666
667 if (srcamap->am_ref == 1) {
668 entry->etype &= ~UVM_ET_NEEDSCOPY;
669 amap->am_ref--;
670 amap_free(amap);
671 return;
672 }
673
674
675
676
677
678 UVMHIST_LOG(maphist, " copying amap now",0, 0, 0, 0);
679 for (lcv = 0 ; lcv < slots; lcv++) {
680 amap->am_anon[lcv] =
681 srcamap->am_anon[entry->aref.ar_pageoff + lcv];
682 if (amap->am_anon[lcv] == NULL)
683 continue;
684 simple_lock(&amap->am_anon[lcv]->an_lock);
685 amap->am_anon[lcv]->an_ref++;
686 simple_unlock(&amap->am_anon[lcv]->an_lock);
687 amap->am_bckptr[lcv] = amap->am_nused;
688 amap->am_slots[amap->am_nused] = lcv;
689 amap->am_nused++;
690 }
691 memset(&amap->am_anon[lcv], 0,
692 (amap->am_maxslot - lcv) * sizeof(struct vm_anon *));
693
694
695
696
697
698
699
700
701 srcamap->am_ref--;
702 if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0)
703 srcamap->am_flags &= ~AMAP_SHARED;
704 #ifdef UVM_AMAP_PPREF
705 if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
706 amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
707 (entry->end - entry->start) >> PAGE_SHIFT, -1);
708 }
709 #endif
710
711
712
713
714
715 entry->aref.ar_pageoff = 0;
716 entry->aref.ar_amap = amap;
717 entry->etype &= ~UVM_ET_NEEDSCOPY;
718
719 amap_list_insert(amap);
720
721
722
723
724 UVMHIST_LOG(maphist, "<- done",0, 0, 0, 0);
725 }
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749 void
750 amap_cow_now(struct vm_map *map, struct vm_map_entry *entry)
751 {
752 struct vm_amap *amap = entry->aref.ar_amap;
753 int lcv, slot;
754 struct vm_anon *anon, *nanon;
755 struct vm_page *pg, *npg;
756
757
758
759
760
761
762 ReStart:
763 for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
764
765
766
767
768
769 slot = amap->am_slots[lcv];
770 anon = amap->am_anon[slot];
771 simple_lock(&anon->an_lock);
772 pg = anon->an_page;
773
774
775
776
777
778 if (pg == NULL)
779 panic("amap_cow_now: non-resident wired page in anon %p",
780 anon);
781
782
783
784
785
786
787
788
789
790
791
792 if (anon->an_ref > 1 && pg->loan_count == 0) {
793
794
795
796
797
798 if (pg->pg_flags & PG_BUSY) {
799 atomic_setbits_int(&pg->pg_flags, PG_WANTED);
800 UVM_UNLOCK_AND_WAIT(pg, &anon->an_lock, FALSE,
801 "cownow", 0);
802 goto ReStart;
803 }
804
805
806
807
808 nanon = uvm_analloc();
809 if (nanon) {
810 npg = uvm_pagealloc(NULL, 0, nanon, 0);
811 } else
812 npg = NULL;
813
814 if (nanon == NULL || npg == NULL) {
815
816
817
818
819
820 if (nanon) {
821 simple_lock(&nanon->an_lock);
822 uvm_anfree(nanon);
823 }
824 simple_unlock(&anon->an_lock);
825 uvm_wait("cownowpage");
826 goto ReStart;
827 }
828
829
830
831
832
833 uvm_pagecopy(pg, npg);
834 anon->an_ref--;
835 amap->am_anon[slot] = nanon;
836
837
838
839
840
841
842 atomic_clearbits_int(&npg->pg_flags, PG_BUSY|PG_FAKE);
843 UVM_PAGE_OWN(npg, NULL);
844 uvm_lock_pageq();
845 uvm_pageactivate(npg);
846 uvm_unlock_pageq();
847 }
848
849 simple_unlock(&anon->an_lock);
850
851
852
853
854 }
855 }
856
857
858
859
860
861
862
863
864 void
865 amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
866 {
867 int leftslots;
868
869 AMAP_B2SLOT(leftslots, offset);
870 if (leftslots == 0)
871 panic("amap_splitref: split at zero offset");
872
873
874
875
876
877 if (origref->ar_amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
878 panic("amap_splitref: map size check failed");
879
880 #ifdef UVM_AMAP_PPREF
881
882
883
884 if (origref->ar_amap->am_ppref == NULL)
885 amap_pp_establish(origref->ar_amap);
886 #endif
887
888 splitref->ar_amap = origref->ar_amap;
889 splitref->ar_amap->am_ref++;
890 splitref->ar_pageoff = origref->ar_pageoff + leftslots;
891 }
892
893 #ifdef UVM_AMAP_PPREF
894
895
896
897
898
899
900 void
901 amap_pp_establish(struct vm_amap *amap)
902 {
903
904 amap->am_ppref = malloc(sizeof(int) * amap->am_maxslot,
905 M_UVMAMAP, M_NOWAIT);
906
907
908
909
910 if (amap->am_ppref == NULL) {
911 amap->am_ppref = PPREF_NONE;
912 return;
913 }
914
915
916
917
918 memset(amap->am_ppref, 0, sizeof(int) * amap->am_maxslot);
919 pp_setreflen(amap->am_ppref, 0, amap->am_ref, amap->am_nslot);
920 }
921
922
923
924
925
926
927
928
929 void
930 amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
931 {
932 int stopslot, *ppref, lcv, prevlcv;
933 int ref, len, prevref, prevlen;
934
935 stopslot = curslot + slotlen;
936 ppref = amap->am_ppref;
937 prevlcv = 0;
938
939
940
941
942
943
944 for (lcv = 0 ; lcv < curslot ; lcv += len) {
945 pp_getreflen(ppref, lcv, &ref, &len);
946 if (lcv + len > curslot) {
947 pp_setreflen(ppref, lcv, ref, curslot - lcv);
948 pp_setreflen(ppref, curslot, ref, len - (curslot -lcv));
949 len = curslot - lcv;
950 }
951 prevlcv = lcv;
952 }
953 if (lcv != 0)
954 pp_getreflen(ppref, prevlcv, &prevref, &prevlen);
955 else {
956
957
958
959
960
961 prevref = -1;
962 prevlen = 0;
963 }
964
965
966
967
968
969
970 if (lcv != curslot)
971 panic("amap_pp_adjref: overshot target");
972
973 for (; lcv < stopslot ; lcv += len) {
974 pp_getreflen(ppref, lcv, &ref, &len);
975 if (lcv + len > stopslot) {
976 pp_setreflen(ppref, lcv, ref, stopslot - lcv);
977 pp_setreflen(ppref, stopslot, ref,
978 len - (stopslot - lcv));
979 len = stopslot - lcv;
980 }
981 ref += adjval;
982 if (ref < 0)
983 panic("amap_pp_adjref: negative reference count");
984 if (lcv == prevlcv + prevlen && ref == prevref) {
985 pp_setreflen(ppref, prevlcv, ref, prevlen + len);
986 } else {
987 pp_setreflen(ppref, lcv, ref, len);
988 }
989 if (ref == 0)
990 amap_wiperange(amap, lcv, len);
991 }
992
993 }
994
995
996
997
998
999
1000
1001 void
1002 amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
1003 {
1004 int byanon, lcv, stop, curslot, ptr, slotend;
1005 struct vm_anon *anon;
1006
1007
1008
1009
1010
1011
1012 if (slots < amap->am_nused) {
1013 byanon = TRUE;
1014 lcv = slotoff;
1015 stop = slotoff + slots;
1016 } else {
1017 byanon = FALSE;
1018 lcv = 0;
1019 stop = amap->am_nused;
1020 slotend = slotoff + slots;
1021 }
1022
1023 while (lcv < stop) {
1024 int refs;
1025
1026 if (byanon) {
1027 curslot = lcv++;
1028 if (amap->am_anon[curslot] == NULL)
1029 continue;
1030 } else {
1031 curslot = amap->am_slots[lcv];
1032 if (curslot < slotoff || curslot >= slotend) {
1033 lcv++;
1034 continue;
1035 }
1036 stop--;
1037 }
1038 anon = amap->am_anon[curslot];
1039
1040
1041
1042
1043 amap->am_anon[curslot] = NULL;
1044 ptr = amap->am_bckptr[curslot];
1045 if (ptr != (amap->am_nused - 1)) {
1046 amap->am_slots[ptr] =
1047 amap->am_slots[amap->am_nused - 1];
1048 amap->am_bckptr[amap->am_slots[ptr]] =
1049 ptr;
1050 }
1051 amap->am_nused--;
1052
1053
1054
1055
1056 simple_lock(&anon->an_lock);
1057 refs = --anon->an_ref;
1058 simple_unlock(&anon->an_lock);
1059 if (refs == 0) {
1060
1061
1062
1063
1064 uvm_anfree(anon);
1065 }
1066 }
1067 }
1068
1069 #endif
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 boolean_t
1081 amap_swap_off(int startslot, int endslot)
1082 {
1083 struct vm_amap *am;
1084 struct vm_amap *am_next;
1085 struct vm_amap marker_prev;
1086 struct vm_amap marker_next;
1087 boolean_t rv = FALSE;
1088
1089 #if defined(DIAGNOSTIC)
1090 memset(&marker_prev, 0, sizeof(marker_prev));
1091 memset(&marker_next, 0, sizeof(marker_next));
1092 #endif
1093
1094 for (am = LIST_FIRST(&amap_list); am != NULL && !rv; am = am_next) {
1095 int i;
1096
1097 LIST_INSERT_BEFORE(am, &marker_prev, am_list);
1098 LIST_INSERT_AFTER(am, &marker_next, am_list);
1099
1100 if (am->am_nused <= 0) {
1101 goto next;
1102 }
1103
1104 for (i = 0; i < am->am_nused; i++) {
1105 int slot;
1106 int swslot;
1107 struct vm_anon *anon;
1108
1109 slot = am->am_slots[i];
1110 anon = am->am_anon[slot];
1111 simple_lock(&anon->an_lock);
1112
1113 swslot = anon->an_swslot;
1114 if (swslot < startslot || endslot <= swslot) {
1115 simple_unlock(&anon->an_lock);
1116 continue;
1117 }
1118
1119 am->am_flags |= AMAP_SWAPOFF;
1120
1121 rv = uvm_anon_pagein(anon);
1122
1123 am->am_flags &= ~AMAP_SWAPOFF;
1124 if (amap_refs(am) == 0) {
1125 amap_wipeout(am);
1126 am = NULL;
1127 break;
1128 }
1129 if (rv) {
1130 break;
1131 }
1132 i = 0;
1133 }
1134
1135 next:
1136 KASSERT(LIST_NEXT(&marker_prev, am_list) == &marker_next ||
1137 LIST_NEXT(LIST_NEXT(&marker_prev, am_list), am_list) ==
1138 &marker_next);
1139 am_next = LIST_NEXT(&marker_next, am_list);
1140 LIST_REMOVE(&marker_prev, am_list);
1141 LIST_REMOVE(&marker_next, am_list);
1142 }
1143
1144 return rv;
1145 }