This source file includes following definitions.
- uvm_pager_init
- uvm_pagermapin
- uvm_pagermapout
- uvm_mk_pcluster
- uvm_pager_put
- uvm_pager_dropcluster
- uvm_aio_biodone1
- uvm_aio_biodone
- uvm_aio_aiodone
- uvm_errno2vmerror
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #define UVM_PAGER
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50
51 #include <uvm/uvm.h>
52
53 struct pool *uvm_aiobuf_pool;
54
55 struct uvm_pagerops *uvmpagerops[] = {
56 &aobj_pager,
57 &uvm_deviceops,
58 &uvm_vnodeops,
59 };
60
61
62
63
64
65 vm_map_t pager_map;
66 simple_lock_data_t pager_map_wanted_lock;
67 boolean_t pager_map_wanted;
68 static vaddr_t emergva;
69 static boolean_t emerginuse;
70
71
72
73
74
75 void
76 uvm_pager_init()
77 {
78 int lcv;
79
80
81
82
83
84 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
85 PAGER_MAP_SIZE, 0, FALSE, NULL);
86 simple_lock_init(&pager_map_wanted_lock);
87 pager_map_wanted = FALSE;
88 emergva = uvm_km_valloc(kernel_map, MAXBSIZE);
89 emerginuse = FALSE;
90
91
92
93
94
95 TAILQ_INIT(&uvm.aio_done);
96
97
98
99
100 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
101 lcv++) {
102 if (uvmpagerops[lcv]->pgo_init)
103 uvmpagerops[lcv]->pgo_init();
104 }
105 }
106
107
108
109
110
111
112
113
114 vaddr_t
115 uvm_pagermapin(pps, npages, flags)
116 struct vm_page **pps;
117 int npages;
118 int flags;
119 {
120 vsize_t size;
121 vaddr_t kva;
122 vaddr_t cva;
123 struct vm_page *pp;
124 vm_prot_t prot;
125 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
126
127 UVMHIST_LOG(maphist,"(pps=%p, npages=%ld)", pps, npages,0,0);
128
129
130
131
132
133
134 prot = VM_PROT_READ;
135 if (flags & UVMPAGER_MAPIN_READ)
136 prot |= VM_PROT_WRITE;
137
138 ReStart:
139 size = npages << PAGE_SHIFT;
140 kva = 0;
141
142 if (uvm_map(pager_map, &kva, size, NULL,
143 UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) {
144 if (curproc == uvm.pagedaemon_proc) {
145 simple_lock(&pager_map_wanted_lock);
146 if (emerginuse) {
147 UVM_UNLOCK_AND_WAIT(&emergva,
148 &pager_map_wanted_lock, FALSE,
149 "emergva", 0);
150 goto ReStart;
151 }
152 emerginuse = TRUE;
153 simple_unlock(&pager_map_wanted_lock);
154 kva = emergva;
155 KASSERT(npages <= MAXBSIZE >> PAGE_SHIFT);
156 goto enter;
157 }
158 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
159 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
160 return(0);
161 }
162 simple_lock(&pager_map_wanted_lock);
163 pager_map_wanted = TRUE;
164 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
165 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
166 "pager_map", 0);
167 goto ReStart;
168 }
169
170 enter:
171
172 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
173 pp = *pps++;
174 KASSERT(pp);
175 KASSERT(pp->pg_flags & PG_BUSY);
176 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
177 prot, PMAP_WIRED | prot);
178 }
179 pmap_update(vm_map_pmap(pager_map));
180
181 UVMHIST_LOG(maphist, "<- done (KVA=0x%lx)", kva,0,0,0);
182 return(kva);
183 }
184
185
186
187
188
189
190
191
192 void
193 uvm_pagermapout(kva, npages)
194 vaddr_t kva;
195 int npages;
196 {
197 vsize_t size = npages << PAGE_SHIFT;
198 vm_map_entry_t entries;
199 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
200
201 UVMHIST_LOG(maphist, " (kva=0x%lx, npages=%ld)", kva, npages,0,0);
202
203
204
205
206
207 if (kva == emergva) {
208 simple_lock(&pager_map_wanted_lock);
209 emerginuse = FALSE;
210 wakeup(&emergva);
211 simple_unlock(&pager_map_wanted_lock);
212 entries = NULL;
213 goto remove;
214 }
215
216 vm_map_lock(pager_map);
217 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL);
218 simple_lock(&pager_map_wanted_lock);
219 if (pager_map_wanted) {
220 pager_map_wanted = FALSE;
221 wakeup(pager_map);
222 }
223 simple_unlock(&pager_map_wanted_lock);
224 vm_map_unlock(pager_map);
225 remove:
226 pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
227 if (entries)
228 uvm_unmap_detach(entries, 0);
229
230 pmap_update(pmap_kernel());
231 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
232 }
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256 struct vm_page **
257 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
258 struct uvm_object *uobj;
259 struct vm_page **pps, *center;
260 int *npages, flags;
261 voff_t mlo, mhi;
262 {
263 struct vm_page **ppsp, *pclust;
264 voff_t lo, hi, curoff;
265 int center_idx, forward, incr;
266 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
267
268
269
270
271
272
273
274
275
276
277
278
279
280 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
281 if ((flags & PGO_ALLPAGES) == 0) {
282 if (lo < mlo)
283 lo = mlo;
284 if (hi > mhi)
285 hi = mhi;
286 }
287 if ((hi - lo) >> PAGE_SHIFT > *npages) {
288 pps[0] = center;
289 *npages = 1;
290 return(pps);
291 }
292
293
294
295
296
297
298 center_idx = (center->offset - lo) >> PAGE_SHIFT;
299 pps[center_idx] = center;
300 ppsp = &pps[center_idx];
301 *npages = 1;
302
303
304
305
306
307
308
309
310
311
312
313 for (forward = 0 ; forward <= 1 ; forward++) {
314 incr = forward ? PAGE_SIZE : -PAGE_SIZE;
315 curoff = center->offset + incr;
316 for ( ;(forward == 0 && curoff >= lo) ||
317 (forward && curoff < hi);
318 curoff += incr) {
319
320 pclust = uvm_pagelookup(uobj, curoff);
321 if (pclust == NULL) {
322 break;
323 }
324
325
326 if ((pclust->pg_flags & PQ_INACTIVE) == 0) {
327 if ((flags & PGO_DOACTCLUST) == 0) {
328
329 break;
330 }
331
332
333 if ((pclust->pg_flags & PG_CLEANCHK) == 0) {
334 if ((pclust->pg_flags & (PG_CLEAN|PG_BUSY))
335 == PG_CLEAN &&
336 pmap_is_modified(pclust))
337 atomic_clearbits_int(
338 &pclust->pg_flags,
339 PG_CLEAN);
340
341 atomic_setbits_int(&pclust->pg_flags,
342 PG_CLEANCHK);
343 }
344 }
345
346
347 if ((pclust->pg_flags & (PG_CLEAN|PG_BUSY)) != 0) {
348 break;
349 }
350
351
352 atomic_setbits_int(&pclust->pg_flags, PG_BUSY);
353 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
354
355
356 pmap_page_protect(pclust, VM_PROT_READ);
357 if (!forward) {
358 ppsp--;
359 *ppsp = pclust;
360 } else {
361
362 ppsp[*npages] = pclust;
363 }
364 (*npages)++;
365 }
366 }
367
368
369
370
371
372 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
373 return(ppsp);
374 }
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416 int
417 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
418 struct uvm_object *uobj;
419 struct vm_page *pg, ***ppsp_ptr;
420 int *npages;
421 int flags;
422 voff_t start, stop;
423 {
424 int result;
425 daddr64_t swblk;
426 struct vm_page **ppsp = *ppsp_ptr;
427 UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(pdhist);
428
429
430
431
432
433
434
435 if (uobj) {
436
437
438
439
440
441
442 if (uobj->pgops->pgo_mk_pcluster) {
443 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
444 npages, pg, flags, start, stop);
445 *ppsp_ptr = ppsp;
446 } else {
447 ppsp[0] = pg;
448 *npages = 1;
449 }
450
451 swblk = 0;
452
453 } else {
454
455
456
457
458
459
460
461
462
463 swblk = (daddr64_t) start;
464
465 }
466
467
468 uvm_unlock_pageq();
469
470
471
472
473
474
475 ReTry:
476 if (uobj) {
477
478 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
479 UVMHIST_LOG(pdhist, "put -> %ld", result, 0,0,0);
480
481 } else {
482
483
484 result = uvm_swap_put(swblk, ppsp, *npages, flags);
485
486 }
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
502 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
503
504
505
506
507 if (uobj)
508
509 simple_lock(&uobj->vmobjlock);
510 if (*npages > 1 || pg == NULL)
511 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
512 PGO_PDFREECLUST);
513
514
515 }
516 return (result);
517 }
518
519
520
521
522
523
524
525 if (*npages > 1 || pg == NULL) {
526 if (uobj) {
527 simple_lock(&uobj->vmobjlock);
528 }
529 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
530
531
532
533
534
535
536
537
538 if (uobj == NULL && pg != NULL) {
539
540 int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
541 if (pg->pg_flags & PQ_ANON) {
542 simple_lock(&pg->uanon->an_lock);
543 pg->uanon->an_swslot = nswblk;
544 simple_unlock(&pg->uanon->an_lock);
545 } else {
546 simple_lock(&pg->uobject->vmobjlock);
547 uao_set_swslot(pg->uobject,
548 pg->offset >> PAGE_SHIFT,
549 nswblk);
550 simple_unlock(&pg->uobject->vmobjlock);
551 }
552 }
553 if (result == VM_PAGER_AGAIN) {
554
555
556
557
558
559
560 if (uobj == NULL) {
561 if (pg) {
562
563 uvm_swap_free(swblk + 1, *npages - 1);
564 } else {
565
566 uvm_swap_free(swblk, *npages);
567 }
568 }
569 if (pg) {
570 ppsp[0] = pg;
571 *npages = 1;
572 goto ReTry;
573 }
574 } else if (uobj == NULL) {
575
576
577
578
579
580
581
582
583 uvm_swap_markbad(swblk, *npages);
584 }
585 }
586
587
588
589
590
591
592
593 if (uobj && (flags & PGO_PDFREECLUST) != 0)
594 simple_lock(&uobj->vmobjlock);
595 return(result);
596 }
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617 void
618 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
619 struct uvm_object *uobj;
620 struct vm_page *pg, **ppsp;
621 int *npages;
622 int flags;
623 {
624 int lcv;
625 boolean_t obj_is_alive;
626 struct uvm_object *saved_uobj;
627
628
629
630
631
632 for (lcv = 0 ; lcv < *npages ; lcv++) {
633
634
635 if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
636 continue;
637
638
639
640
641
642
643
644
645
646
647 if (!uobj) {
648 if (ppsp[lcv]->pg_flags & PQ_ANON) {
649 simple_lock(&ppsp[lcv]->uanon->an_lock);
650 if (flags & PGO_REALLOCSWAP)
651
652 ppsp[lcv]->uanon->an_swslot = 0;
653 } else {
654 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
655 if (flags & PGO_REALLOCSWAP)
656 uao_set_swslot(ppsp[lcv]->uobject,
657 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
658 }
659 }
660
661
662 if (ppsp[lcv]->pg_flags & PG_WANTED) {
663
664 wakeup(ppsp[lcv]);
665 }
666
667
668 if (ppsp[lcv]->pg_flags & PG_RELEASED) {
669
670 if (ppsp[lcv]->pg_flags & PQ_ANON) {
671
672 atomic_clearbits_int(&ppsp[lcv]->pg_flags,
673 PG_BUSY);
674 UVM_PAGE_OWN(ppsp[lcv], NULL);
675
676 pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
677 simple_unlock(&ppsp[lcv]->uanon->an_lock);
678
679 uvm_anfree(ppsp[lcv]->uanon);
680
681 continue;
682 }
683
684
685
686
687
688 saved_uobj = ppsp[lcv]->uobject;
689 obj_is_alive =
690 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
691
692
693
694 KASSERT(!uobj || obj_is_alive);
695
696
697 if (obj_is_alive && saved_uobj != uobj)
698 simple_unlock(&saved_uobj->vmobjlock);
699
700
701
702
703
704
705
706 continue;
707 } else {
708 atomic_clearbits_int(&ppsp[lcv]->pg_flags,
709 PG_BUSY|PG_WANTED|PG_FAKE);
710 UVM_PAGE_OWN(ppsp[lcv], NULL);
711 }
712
713
714
715
716
717 if (flags & PGO_PDFREECLUST) {
718 pmap_clear_reference(ppsp[lcv]);
719 pmap_clear_modify(ppsp[lcv]);
720 atomic_setbits_int(&ppsp[lcv]->pg_flags, PG_CLEAN);
721 }
722
723
724 if (!uobj) {
725 if (ppsp[lcv]->pg_flags & PQ_ANON)
726 simple_unlock(&ppsp[lcv]->uanon->an_lock);
727 else
728 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
729 }
730 }
731 }
732
733 #ifdef UBC
734
735
736
737
738
739
740 void
741 uvm_aio_biodone1(bp)
742 struct buf *bp;
743 {
744 struct buf *mbp = bp->b_private;
745
746 splassert(IPL_BIO);
747
748 KASSERT(mbp != bp);
749 if (bp->b_flags & B_ERROR) {
750 mbp->b_flags |= B_ERROR;
751 mbp->b_error = bp->b_error;
752 }
753 mbp->b_resid -= bp->b_bcount;
754 pool_put(&bufpool, bp);
755 if (mbp->b_resid == 0) {
756 biodone(mbp);
757 }
758 }
759 #endif
760
761
762
763
764
765
766
767
768 void
769 uvm_aio_biodone(bp)
770 struct buf *bp;
771 {
772 splassert(IPL_BIO);
773
774
775 bp->b_iodone = uvm_aio_aiodone;
776
777 simple_lock(&uvm.aiodoned_lock);
778 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
779 wakeup(&uvm.aiodoned);
780 simple_unlock(&uvm.aiodoned_lock);
781 }
782
783
784
785
786
787
788 void
789 uvm_aio_aiodone(bp)
790 struct buf *bp;
791 {
792 int npages = bp->b_bufsize >> PAGE_SHIFT;
793 struct vm_page *pg, *pgs[npages];
794 struct uvm_object *uobj;
795 int i, error;
796 boolean_t write, swap;
797 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(pdhist);
798 UVMHIST_LOG(pdhist, "bp %p", bp, 0,0,0);
799
800 splassert(IPL_BIO);
801
802 error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
803 write = (bp->b_flags & B_READ) == 0;
804 #ifdef UBC
805
806 if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
807 (*bioops.io_pageiodone)(bp);
808 }
809 #endif
810
811 uobj = NULL;
812 for (i = 0; i < npages; i++) {
813 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
814 UVMHIST_LOG(pdhist, "pgs[%ld] = %p", i, pgs[i],0,0);
815 }
816 uvm_pagermapout((vaddr_t)bp->b_data, npages);
817 #ifdef UVM_SWAP_ENCRYPT
818
819
820
821 if (pgs[0]->pg_flags & PQ_ENCRYPT) {
822 uvm_swap_freepages(pgs, npages);
823 goto freed;
824 }
825 #endif
826 for (i = 0; i < npages; i++) {
827 pg = pgs[i];
828
829 if (i == 0) {
830 swap = (pg->pg_flags & PQ_SWAPBACKED) != 0;
831 if (!swap) {
832 uobj = pg->uobject;
833 simple_lock(&uobj->vmobjlock);
834 }
835 }
836 KASSERT(swap || pg->uobject == uobj);
837 if (swap) {
838 if (pg->pg_flags & PQ_ANON) {
839 simple_lock(&pg->uanon->an_lock);
840 } else {
841 simple_lock(&pg->uobject->vmobjlock);
842 }
843 }
844
845
846
847
848
849 if (!write && error) {
850 atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
851 continue;
852 }
853 KASSERT(!write || (pgs[i]->pg_flags & PG_FAKE) == 0);
854
855
856
857
858
859
860
861 if ((pgs[i]->pg_flags & PG_FAKE) || (write && error != ENOMEM)) {
862 pmap_clear_reference(pgs[i]);
863 pmap_clear_modify(pgs[i]);
864 atomic_setbits_int(&pgs[i]->pg_flags, PG_CLEAN);
865 atomic_clearbits_int(&pgs[i]->pg_flags, PG_FAKE);
866 }
867 if (swap) {
868 if (pg->pg_flags & PQ_ANON) {
869 simple_unlock(&pg->uanon->an_lock);
870 } else {
871 simple_unlock(&pg->uobject->vmobjlock);
872 }
873 }
874 }
875 uvm_page_unbusy(pgs, npages);
876 if (!swap) {
877 simple_unlock(&uobj->vmobjlock);
878 }
879
880 #ifdef UVM_SWAP_ENCRYPT
881 freed:
882 #endif
883 if (write && (bp->b_flags & B_AGE) != 0 && bp->b_vp != NULL) {
884 vwakeup(bp->b_vp);
885 }
886 pool_put(&bufpool, bp);
887 }
888
889
890
891
892
893 int
894 uvm_errno2vmerror(errno)
895 int errno;
896 {
897 switch (errno) {
898 case 0:
899 return VM_PAGER_OK;
900 case EINVAL:
901 return VM_PAGER_BAD;
902 case EINPROGRESS:
903 return VM_PAGER_PEND;
904 case EIO:
905 return VM_PAGER_ERROR;
906 case EAGAIN:
907 return VM_PAGER_AGAIN;
908 case EBUSY:
909 return VM_PAGER_UNLOCK;
910 default:
911 return VM_PAGER_ERROR;
912 }
913 }