This source file includes following definitions.
- LIST_HEAD
- uao_find_swslot
- uao_set_swslot
- uao_free
- uao_create
- uao_init
- uao_reference
- uao_reference_locked
- uao_detach
- uao_detach_locked
- uao_flush
- uao_get
- uao_releasepg
- uao_dropswap
- uao_swap_off
- uao_pagein
- uao_pagein_page
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/malloc.h>
50 #include <sys/kernel.h>
51 #include <sys/pool.h>
52 #include <sys/kernel.h>
53
54 #include <uvm/uvm.h>
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74 #define UAO_SWHASH_CLUSTER_SHIFT 4
75 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
76
77
78 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
79 ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
80
81
82 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
83 ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
84
85
86 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
87 ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
88
89
90
91
92 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
93 (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
94 & (AOBJ)->u_swhashmask)])
95
96
97
98
99
100
101 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
102 #define UAO_USES_SWHASH(AOBJ) \
103 ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)
104
105
106
107
108 #define UAO_SWHASH_MAXBUCKETS 256
109 #define UAO_SWHASH_BUCKETS(AOBJ) \
110 (min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
111 UAO_SWHASH_MAXBUCKETS))
112
113
114
115
116
117
118
119 struct uao_swhash_elt {
120 LIST_ENTRY(uao_swhash_elt) list;
121 voff_t tag;
122 int count;
123 int slots[UAO_SWHASH_CLUSTER_SIZE];
124 };
125
126
127
128
129
130 LIST_HEAD(uao_swhash, uao_swhash_elt);
131
132
133
134
135
136 struct pool uao_swhash_elt_pool;
137
138
139
140
141
142
143
144
145
146 struct uvm_aobj {
147 struct uvm_object u_obj;
148 int u_pages;
149 int u_flags;
150 int *u_swslots;
151
152
153
154
155 struct uao_swhash *u_swhash;
156 u_long u_swhashmask;
157 LIST_ENTRY(uvm_aobj) u_list;
158 };
159
160
161
162
163
164 struct pool uvm_aobj_pool;
165
166
167
168
169
170 static struct uao_swhash_elt *uao_find_swhash_elt(struct uvm_aobj *,
171 int, boolean_t);
172 static int uao_find_swslot(struct uvm_aobj *, int);
173 static boolean_t uao_flush(struct uvm_object *,
174 voff_t, voff_t, int);
175 static void uao_free(struct uvm_aobj *);
176 static int uao_get(struct uvm_object *, voff_t,
177 vm_page_t *, int *, int,
178 vm_prot_t, int, int);
179 static boolean_t uao_releasepg(struct vm_page *,
180 struct vm_page **);
181 static boolean_t uao_pagein(struct uvm_aobj *, int, int);
182 static boolean_t uao_pagein_page(struct uvm_aobj *, int);
183
184
185
186
187
188
189
190 struct uvm_pagerops aobj_pager = {
191 NULL,
192 uao_reference,
193 uao_detach,
194 NULL,
195 uao_flush,
196 uao_get,
197 NULL,
198 NULL,
199 NULL,
200 uao_releasepg
201 };
202
203
204
205
206
207 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
208 static simple_lock_data_t uao_list_lock;
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226 static struct uao_swhash_elt *
227 uao_find_swhash_elt(aobj, pageidx, create)
228 struct uvm_aobj *aobj;
229 int pageidx;
230 boolean_t create;
231 {
232 struct uao_swhash *swhash;
233 struct uao_swhash_elt *elt;
234 voff_t page_tag;
235
236 swhash = UAO_SWHASH_HASH(aobj, pageidx);
237 page_tag = UAO_SWHASH_ELT_TAG(pageidx);
238
239
240
241
242 LIST_FOREACH(elt, swhash, list) {
243 if (elt->tag == page_tag)
244 return(elt);
245 }
246
247
248 if (!create)
249 return NULL;
250
251
252
253
254
255 elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
256 LIST_INSERT_HEAD(swhash, elt, list);
257 elt->tag = page_tag;
258 elt->count = 0;
259 memset(elt->slots, 0, sizeof(elt->slots));
260
261 return(elt);
262 }
263
264
265
266
267
268
269 __inline static int
270 uao_find_swslot(aobj, pageidx)
271 struct uvm_aobj *aobj;
272 int pageidx;
273 {
274
275
276
277
278
279 if (aobj->u_flags & UAO_FLAG_NOSWAP)
280 return(0);
281
282
283
284
285
286 if (UAO_USES_SWHASH(aobj)) {
287 struct uao_swhash_elt *elt =
288 uao_find_swhash_elt(aobj, pageidx, FALSE);
289
290 if (elt)
291 return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
292 else
293 return(0);
294 }
295
296
297
298
299 return(aobj->u_swslots[pageidx]);
300 }
301
302
303
304
305
306
307
308 int
309 uao_set_swslot(uobj, pageidx, slot)
310 struct uvm_object *uobj;
311 int pageidx, slot;
312 {
313 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
314 int oldslot;
315 UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
316 UVMHIST_LOG(pdhist, "aobj %p pageidx %ld slot %ld",
317 aobj, pageidx, slot, 0);
318
319
320
321
322
323 if (aobj->u_flags & UAO_FLAG_NOSWAP) {
324
325 if (slot == 0)
326 return(0);
327
328
329 printf("uao_set_swslot: uobj = %p\n", uobj);
330 panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
331 }
332
333
334
335
336
337 if (UAO_USES_SWHASH(aobj)) {
338
339
340
341
342
343
344
345 struct uao_swhash_elt *elt =
346 uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
347 if (elt == NULL) {
348 KASSERT(slot == 0);
349 return (0);
350 }
351
352 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
353 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
354
355
356
357
358
359
360
361 if (slot) {
362 if (oldslot == 0)
363 elt->count++;
364 } else {
365 if (oldslot)
366 elt->count--;
367
368 if (elt->count == 0) {
369 LIST_REMOVE(elt, list);
370 pool_put(&uao_swhash_elt_pool, elt);
371 }
372 }
373 } else {
374
375 oldslot = aobj->u_swslots[pageidx];
376 aobj->u_swslots[pageidx] = slot;
377 }
378 return (oldslot);
379 }
380
381
382
383
384
385
386
387
388
389
390 static void
391 uao_free(aobj)
392 struct uvm_aobj *aobj;
393 {
394
395 simple_unlock(&aobj->u_obj.vmobjlock);
396
397 if (UAO_USES_SWHASH(aobj)) {
398 int i, hashbuckets = aobj->u_swhashmask + 1;
399
400
401
402
403
404 for (i = 0; i < hashbuckets; i++) {
405 struct uao_swhash_elt *elt, *next;
406
407 for (elt = LIST_FIRST(&aobj->u_swhash[i]);
408 elt != NULL;
409 elt = next) {
410 int j;
411
412 for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
413 int slot = elt->slots[j];
414
415 if (slot == 0) {
416 continue;
417 }
418 uvm_swap_free(slot, 1);
419
420
421
422
423
424 simple_lock(&uvm.swap_data_lock);
425 uvmexp.swpgonly--;
426 simple_unlock(&uvm.swap_data_lock);
427 }
428
429 next = LIST_NEXT(elt, list);
430 pool_put(&uao_swhash_elt_pool, elt);
431 }
432 }
433 free(aobj->u_swhash, M_UVMAOBJ);
434 } else {
435 int i;
436
437
438
439
440
441 for (i = 0; i < aobj->u_pages; i++) {
442 int slot = aobj->u_swslots[i];
443
444 if (slot) {
445 uvm_swap_free(slot, 1);
446
447
448 simple_lock(&uvm.swap_data_lock);
449 uvmexp.swpgonly--;
450 simple_unlock(&uvm.swap_data_lock);
451 }
452 }
453 free(aobj->u_swslots, M_UVMAOBJ);
454 }
455
456
457
458
459 pool_put(&uvm_aobj_pool, aobj);
460 }
461
462
463
464
465
466
467
468
469
470
471
472
473
474 struct uvm_object *
475 uao_create(size, flags)
476 vsize_t size;
477 int flags;
478 {
479 static struct uvm_aobj kernel_object_store;
480 static int kobj_alloced = 0;
481 int pages = round_page(size) >> PAGE_SHIFT;
482 struct uvm_aobj *aobj;
483
484
485
486
487 if (flags & UAO_FLAG_KERNOBJ) {
488 if (kobj_alloced)
489 panic("uao_create: kernel object already allocated");
490
491 aobj = &kernel_object_store;
492 aobj->u_pages = pages;
493 aobj->u_flags = UAO_FLAG_NOSWAP;
494
495 aobj->u_obj.uo_refs = UVM_OBJ_KERN;
496 kobj_alloced = UAO_FLAG_KERNOBJ;
497 } else if (flags & UAO_FLAG_KERNSWAP) {
498 aobj = &kernel_object_store;
499 if (kobj_alloced != UAO_FLAG_KERNOBJ)
500 panic("uao_create: asked to enable swap on kernel object");
501 kobj_alloced = UAO_FLAG_KERNSWAP;
502 } else {
503 aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
504 aobj->u_pages = pages;
505 aobj->u_flags = 0;
506 aobj->u_obj.uo_refs = 1;
507 }
508
509
510
511
512
513
514
515 if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
516 int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
517 M_NOWAIT : M_WAITOK;
518
519
520 if (UAO_USES_SWHASH(aobj)) {
521 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
522 M_UVMAOBJ, mflags, &aobj->u_swhashmask);
523 if (aobj->u_swhash == NULL)
524 panic("uao_create: hashinit swhash failed");
525 } else {
526 aobj->u_swslots = malloc(pages * sizeof(int),
527 M_UVMAOBJ, mflags);
528 if (aobj->u_swslots == NULL)
529 panic("uao_create: malloc swslots failed");
530 memset(aobj->u_swslots, 0, pages * sizeof(int));
531 }
532
533 if (flags) {
534 aobj->u_flags &= ~UAO_FLAG_NOSWAP;
535 return(&aobj->u_obj);
536
537 }
538 }
539
540
541
542
543 simple_lock_init(&aobj->u_obj.vmobjlock);
544 aobj->u_obj.pgops = &aobj_pager;
545 TAILQ_INIT(&aobj->u_obj.memq);
546 aobj->u_obj.uo_npages = 0;
547
548
549
550
551 simple_lock(&uao_list_lock);
552 LIST_INSERT_HEAD(&uao_list, aobj, u_list);
553 simple_unlock(&uao_list_lock);
554
555
556
557
558 return(&aobj->u_obj);
559 }
560
561
562
563
564
565
566
567
568 void
569 uao_init()
570 {
571 static int uao_initialized;
572
573 if (uao_initialized)
574 return;
575 uao_initialized = TRUE;
576
577 LIST_INIT(&uao_list);
578 simple_lock_init(&uao_list_lock);
579
580
581
582
583
584 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
585 0, 0, 0, "uaoeltpl", &pool_allocator_nointr);
586
587 pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
588 "aobjpl", &pool_allocator_nointr);
589 }
590
591
592
593
594
595
596
597 void
598 uao_reference(uobj)
599 struct uvm_object *uobj;
600 {
601 simple_lock(&uobj->vmobjlock);
602 uao_reference_locked(uobj);
603 simple_unlock(&uobj->vmobjlock);
604 }
605
606
607
608
609
610
611
612
613
614 void
615 uao_reference_locked(uobj)
616 struct uvm_object *uobj;
617 {
618 UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
619
620
621
622
623
624 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
625 return;
626
627 uobj->uo_refs++;
628 UVMHIST_LOG(maphist, "<- done (uobj=%p, ref = %ld)",
629 uobj, uobj->uo_refs,0,0);
630 }
631
632
633
634
635
636
637
638
639 void
640 uao_detach(uobj)
641 struct uvm_object *uobj;
642 {
643 simple_lock(&uobj->vmobjlock);
644 uao_detach_locked(uobj);
645 }
646
647
648
649
650
651
652
653
654
655
656 void
657 uao_detach_locked(uobj)
658 struct uvm_object *uobj;
659 {
660 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
661 struct vm_page *pg, *next;
662 boolean_t busybody;
663 UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
664
665
666
667
668 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
669 simple_unlock(&uobj->vmobjlock);
670 return;
671 }
672
673 UVMHIST_LOG(maphist," (uobj=%p) ref=%ld", uobj,uobj->uo_refs,0,0);
674 uobj->uo_refs--;
675 if (uobj->uo_refs) {
676 simple_unlock(&uobj->vmobjlock);
677 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
678 return;
679 }
680
681
682
683
684 simple_lock(&uao_list_lock);
685 LIST_REMOVE(aobj, u_list);
686 simple_unlock(&uao_list_lock);
687
688
689
690
691
692 busybody = FALSE;
693 for (pg = TAILQ_FIRST(&uobj->memq); pg != NULL; pg = next) {
694 next = TAILQ_NEXT(pg, listq);
695 if (pg->pg_flags & PG_BUSY) {
696 atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
697 busybody = TRUE;
698 continue;
699 }
700
701
702 pmap_page_protect(pg, VM_PROT_NONE);
703 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
704 uvm_lock_pageq();
705 uvm_pagefree(pg);
706 uvm_unlock_pageq();
707 }
708
709
710
711
712
713 if (busybody) {
714 aobj->u_flags |= UAO_FLAG_KILLME;
715 simple_unlock(&aobj->u_obj.vmobjlock);
716 return;
717 }
718
719
720
721
722 uao_free(aobj);
723 }
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780 #define UAO_HASH_PENALTY 4
781
782 boolean_t
783 uao_flush(uobj, start, stop, flags)
784 struct uvm_object *uobj;
785 voff_t start, stop;
786 int flags;
787 {
788 struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
789 struct vm_page *pp, *ppnext;
790 boolean_t retval, by_list;
791 voff_t curoff;
792 UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
793
794 curoff = 0;
795
796 retval = TRUE;
797
798 if (flags & PGO_ALLPAGES) {
799 start = 0;
800 stop = aobj->u_pages << PAGE_SHIFT;
801 by_list = TRUE;
802 } else {
803 start = trunc_page(start);
804 stop = round_page(stop);
805 if (stop > (aobj->u_pages << PAGE_SHIFT)) {
806 printf("uao_flush: strange, got an out of range "
807 "flush (fixed)\n");
808 stop = aobj->u_pages << PAGE_SHIFT;
809 }
810 by_list = (uobj->uo_npages <=
811 ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
812 }
813
814 UVMHIST_LOG(maphist,
815 " flush start=0x%lx, stop=0x%lx, by_list=%ld, flags=0x%lx",
816 (u_long)start, (u_long)stop, by_list, flags);
817
818
819
820
821
822 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
823 UVMHIST_LOG(maphist,
824 "<- done (no work to do)",0,0,0,0);
825 return (retval);
826 }
827
828
829
830
831
832
833
834 if (by_list) {
835 pp = TAILQ_FIRST(&uobj->memq);
836 } else {
837 curoff = start;
838 pp = uvm_pagelookup(uobj, curoff);
839 }
840
841 ppnext = NULL;
842 uvm_lock_pageq();
843
844
845 for ( ; (by_list && pp != NULL) ||
846 (!by_list && curoff < stop) ; pp = ppnext) {
847 if (by_list) {
848 ppnext = TAILQ_NEXT(pp, listq);
849
850
851 if (pp->offset < start || pp->offset >= stop)
852 continue;
853 } else {
854 curoff += PAGE_SIZE;
855 if (curoff < stop)
856 ppnext = uvm_pagelookup(uobj, curoff);
857
858
859 if (pp == NULL)
860 continue;
861 }
862
863 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
864
865
866
867
868
869
870 case PGO_CLEANIT|PGO_FREE:
871 case PGO_CLEANIT|PGO_DEACTIVATE:
872 case PGO_DEACTIVATE:
873 deactivate_it:
874
875 if (pp->loan_count != 0 ||
876 pp->wire_count != 0)
877 continue;
878
879 #ifdef UBC
880
881 pmap_clear_reference(pp);
882 #else
883
884 pmap_page_protect(pp, VM_PROT_NONE);
885
886
887 #endif
888 uvm_pagedeactivate(pp);
889
890 continue;
891
892 case PGO_FREE:
893
894
895
896
897 if (uobj->uo_refs > 1)
898 goto deactivate_it;
899
900
901 if (pp->loan_count != 0 ||
902 pp->wire_count != 0)
903 continue;
904
905
906
907
908 if (pp->pg_flags & PG_BUSY) {
909 atomic_setbits_int(&pp->pg_flags, PG_RELEASED);
910 continue;
911 }
912
913
914 pmap_page_protect(pp, VM_PROT_NONE);
915
916 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
917 uvm_pagefree(pp);
918
919 continue;
920
921 default:
922 panic("uao_flush: weird flags");
923 }
924 }
925
926 uvm_unlock_pageq();
927
928 UVMHIST_LOG(maphist,
929 "<- done, rv=%ld",retval,0,0,0);
930 return (retval);
931 }
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952 static int
953 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
954 struct uvm_object *uobj;
955 voff_t offset;
956 struct vm_page **pps;
957 int *npagesp;
958 int centeridx, advice, flags;
959 vm_prot_t access_type;
960 {
961 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
962 voff_t current_offset;
963 vm_page_t ptmp;
964 int lcv, gotpages, maxpages, swslot, rv, pageidx;
965 boolean_t done;
966 UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
967
968 UVMHIST_LOG(pdhist, "aobj=%p offset=%ld, flags=%ld",
969 aobj, (u_long)offset, flags,0);
970
971
972
973
974 maxpages = *npagesp;
975
976
977
978
979
980 if (flags & PGO_LOCKED) {
981
982
983
984
985
986
987 done = TRUE;
988 gotpages = 0;
989
990 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
991 lcv++, current_offset += PAGE_SIZE) {
992
993 if (pps[lcv] == PGO_DONTCARE)
994 continue;
995
996 ptmp = uvm_pagelookup(uobj, current_offset);
997
998
999
1000
1001
1002 if (ptmp == NULL && uao_find_swslot(aobj,
1003 current_offset >> PAGE_SHIFT) == 0) {
1004 ptmp = uvm_pagealloc(uobj, current_offset,
1005 NULL, UVM_PGA_ZERO);
1006 if (ptmp) {
1007
1008 atomic_clearbits_int(&ptmp->pg_flags,
1009 PG_BUSY|PG_FAKE);
1010 atomic_setbits_int(&ptmp->pg_flags,
1011 PQ_AOBJ);
1012 UVM_PAGE_OWN(ptmp, NULL);
1013 }
1014 }
1015
1016
1017
1018
1019 if (ptmp == NULL ||
1020 (ptmp->pg_flags & (PG_BUSY|PG_RELEASED)) != 0) {
1021 if (lcv == centeridx ||
1022 (flags & PGO_ALLPAGES) != 0)
1023
1024 done = FALSE;
1025 continue;
1026 }
1027
1028
1029
1030
1031
1032
1033 atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
1034 UVM_PAGE_OWN(ptmp, "uao_get1");
1035 pps[lcv] = ptmp;
1036 gotpages++;
1037
1038 }
1039
1040
1041
1042
1043
1044
1045 UVMHIST_LOG(pdhist, "<- done (done=%ld)", done, 0,0,0);
1046
1047 *npagesp = gotpages;
1048 if (done)
1049
1050 return(VM_PAGER_OK);
1051 else
1052
1053 return(VM_PAGER_UNLOCK);
1054 }
1055
1056
1057
1058
1059
1060
1061 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1062 lcv++, current_offset += PAGE_SIZE) {
1063
1064
1065
1066
1067
1068
1069 if (pps[lcv] != NULL ||
1070 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1071 continue;
1072
1073 pageidx = current_offset >> PAGE_SHIFT;
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 while (pps[lcv] == NULL) {
1093
1094 ptmp = uvm_pagelookup(uobj, current_offset);
1095
1096
1097 if (ptmp == NULL) {
1098
1099 ptmp = uvm_pagealloc(uobj, current_offset,
1100 NULL, 0);
1101
1102
1103 if (ptmp == NULL) {
1104 simple_unlock(&uobj->vmobjlock);
1105 UVMHIST_LOG(pdhist,
1106 "sleeping, ptmp == NULL\n",0,0,0,0);
1107 uvm_wait("uao_getpage");
1108 simple_lock(&uobj->vmobjlock);
1109
1110 continue;
1111 }
1112
1113
1114
1115
1116
1117 atomic_setbits_int(&ptmp->pg_flags, PQ_AOBJ);
1118
1119
1120
1121
1122
1123 break;
1124 }
1125
1126
1127 if ((ptmp->pg_flags & (PG_BUSY|PG_RELEASED)) != 0) {
1128 atomic_setbits_int(&ptmp->pg_flags, PG_WANTED);
1129 UVMHIST_LOG(pdhist,
1130 "sleeping, ptmp->flags 0x%lx\n",
1131 ptmp->pg_flags,0,0,0);
1132 UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1133 FALSE, "uao_get", 0);
1134 simple_lock(&uobj->vmobjlock);
1135 continue;
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145 atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
1146 UVM_PAGE_OWN(ptmp, "uao_get2");
1147 pps[lcv] = ptmp;
1148 }
1149
1150
1151
1152
1153
1154 if (pps[lcv])
1155 continue;
1156
1157
1158
1159
1160
1161 swslot = uao_find_swslot(aobj, pageidx);
1162
1163
1164
1165
1166 if (swslot == 0)
1167 {
1168
1169
1170
1171 uvm_pagezero(ptmp);
1172 } else {
1173 UVMHIST_LOG(pdhist, "pagein from swslot %ld",
1174 swslot, 0,0,0);
1175
1176
1177
1178
1179
1180 simple_unlock(&uobj->vmobjlock);
1181 rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1182 simple_lock(&uobj->vmobjlock);
1183
1184
1185
1186
1187 if (rv != VM_PAGER_OK)
1188 {
1189 UVMHIST_LOG(pdhist, "<- done (error=%ld)",
1190 rv,0,0,0);
1191 if (ptmp->pg_flags & PG_WANTED)
1192 wakeup(ptmp);
1193
1194
1195
1196
1197
1198
1199
1200 swslot = uao_set_swslot(&aobj->u_obj, pageidx,
1201 SWSLOT_BAD);
1202 uvm_swap_markbad(swslot, 1);
1203
1204 atomic_clearbits_int(&ptmp->pg_flags,
1205 PG_WANTED|PG_BUSY);
1206 UVM_PAGE_OWN(ptmp, NULL);
1207 uvm_lock_pageq();
1208 uvm_pagefree(ptmp);
1209 uvm_unlock_pageq();
1210
1211 simple_unlock(&uobj->vmobjlock);
1212 return (rv);
1213 }
1214 }
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 atomic_clearbits_int(&ptmp->pg_flags, PG_FAKE);
1229 pmap_clear_modify(ptmp);
1230 pps[lcv] = ptmp;
1231
1232 }
1233
1234
1235
1236
1237
1238 simple_unlock(&uobj->vmobjlock);
1239 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1240 return(VM_PAGER_OK);
1241 }
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 static boolean_t
1260 uao_releasepg(pg, nextpgp)
1261 struct vm_page *pg;
1262 struct vm_page **nextpgp;
1263 {
1264 struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1265
1266 KASSERT(pg->pg_flags & PG_RELEASED);
1267
1268
1269
1270
1271 pmap_page_protect(pg, VM_PROT_NONE);
1272 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
1273 uvm_lock_pageq();
1274 if (nextpgp)
1275 *nextpgp = TAILQ_NEXT(pg, pageq);
1276 uvm_pagefree(pg);
1277 if (!nextpgp)
1278 uvm_unlock_pageq();
1279
1280
1281
1282
1283 if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1284 return TRUE;
1285 KASSERT(aobj->u_obj.uo_refs == 0);
1286
1287
1288
1289
1290 if (aobj->u_obj.uo_npages != 0)
1291 return TRUE;
1292
1293 KASSERT(TAILQ_EMPTY(&aobj->u_obj.memq));
1294
1295
1296
1297
1298 uao_free(aobj);
1299
1300 return FALSE;
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310 void
1311 uao_dropswap(uobj, pageidx)
1312 struct uvm_object *uobj;
1313 int pageidx;
1314 {
1315 int slot;
1316
1317 slot = uao_set_swslot(uobj, pageidx, 0);
1318 if (slot) {
1319 uvm_swap_free(slot, 1);
1320 }
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330 boolean_t
1331 uao_swap_off(startslot, endslot)
1332 int startslot, endslot;
1333 {
1334 struct uvm_aobj *aobj, *nextaobj;
1335
1336
1337
1338
1339
1340 restart:
1341 simple_lock(&uao_list_lock);
1342
1343 for (aobj = LIST_FIRST(&uao_list);
1344 aobj != NULL;
1345 aobj = nextaobj) {
1346 boolean_t rv;
1347
1348
1349
1350
1351
1352
1353
1354 if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
1355 simple_unlock(&uao_list_lock);
1356 goto restart;
1357 }
1358
1359
1360
1361
1362
1363 uao_reference_locked(&aobj->u_obj);
1364
1365
1366
1367
1368 simple_unlock(&uao_list_lock);
1369
1370
1371
1372
1373
1374 rv = uao_pagein(aobj, startslot, endslot);
1375 if (rv) {
1376 uao_detach_locked(&aobj->u_obj);
1377 return rv;
1378 }
1379
1380
1381
1382
1383
1384 simple_lock(&uao_list_lock);
1385 nextaobj = LIST_NEXT(aobj, u_list);
1386 uao_detach_locked(&aobj->u_obj);
1387 }
1388
1389
1390
1391
1392 simple_unlock(&uao_list_lock);
1393 return FALSE;
1394 }
1395
1396
1397
1398
1399
1400
1401
1402
1403 static boolean_t
1404 uao_pagein(aobj, startslot, endslot)
1405 struct uvm_aobj *aobj;
1406 int startslot, endslot;
1407 {
1408 boolean_t rv;
1409
1410 if (UAO_USES_SWHASH(aobj)) {
1411 struct uao_swhash_elt *elt;
1412 int bucket;
1413
1414 restart:
1415 for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
1416 for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
1417 elt != NULL;
1418 elt = LIST_NEXT(elt, list)) {
1419 int i;
1420
1421 for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1422 int slot = elt->slots[i];
1423
1424
1425
1426
1427 if (slot < startslot ||
1428 slot >= endslot) {
1429 continue;
1430 }
1431
1432
1433
1434
1435
1436
1437
1438 rv = uao_pagein_page(aobj,
1439 UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1440 if (rv) {
1441 return rv;
1442 }
1443 goto restart;
1444 }
1445 }
1446 }
1447 } else {
1448 int i;
1449
1450 for (i = 0; i < aobj->u_pages; i++) {
1451 int slot = aobj->u_swslots[i];
1452
1453
1454
1455
1456 if (slot < startslot || slot >= endslot) {
1457 continue;
1458 }
1459
1460
1461
1462
1463 rv = uao_pagein_page(aobj, i);
1464 if (rv) {
1465 return rv;
1466 }
1467 }
1468 }
1469
1470 return FALSE;
1471 }
1472
1473
1474
1475
1476
1477
1478
1479 static boolean_t
1480 uao_pagein_page(aobj, pageidx)
1481 struct uvm_aobj *aobj;
1482 int pageidx;
1483 {
1484 struct vm_page *pg;
1485 int rv, slot, npages;
1486
1487 pg = NULL;
1488 npages = 1;
1489
1490 rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
1491 &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
1492
1493
1494
1495
1496
1497 simple_lock(&aobj->u_obj.vmobjlock);
1498
1499 switch (rv) {
1500 case VM_PAGER_OK:
1501 break;
1502
1503 case VM_PAGER_ERROR:
1504 case VM_PAGER_REFAULT:
1505
1506
1507
1508
1509
1510 return FALSE;
1511
1512 }
1513 KASSERT((pg->pg_flags & PG_RELEASED) == 0);
1514
1515
1516
1517
1518
1519 slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
1520 uvm_swap_free(slot, 1);
1521 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE);
1522 UVM_PAGE_OWN(pg, NULL);
1523
1524
1525
1526
1527 pmap_clear_reference(pg);
1528 #ifndef UBC
1529 pmap_page_protect(pg, VM_PROT_NONE);
1530 #endif
1531 uvm_lock_pageq();
1532 uvm_pagedeactivate(pg);
1533 uvm_unlock_pageq();
1534
1535 return FALSE;
1536 }