This source file includes following definitions.
- uvn_init
- uvn_attach
- uvn_reference
- uvn_detach
- uvm_vnp_terminate
- uvn_releasepg
- uvn_flush
- uvn_cluster
- uvn_put
- uvn_get
- uvn_io
- uvm_vnp_uncache
- uvm_vnp_setsize
- uvm_vnp_sync
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/malloc.h>
57 #include <sys/vnode.h>
58 #include <sys/disklabel.h>
59 #include <sys/ioctl.h>
60 #include <sys/fcntl.h>
61 #include <sys/conf.h>
62 #include <sys/rwlock.h>
63
64 #include <miscfs/specfs/specdev.h>
65
66 #include <uvm/uvm.h>
67 #include <uvm/uvm_vnode.h>
68
69
70
71
72
73
74
75
76 LIST_HEAD(uvn_list_struct, uvm_vnode);
77 struct uvn_list_struct uvn_wlist;
78
79 SIMPLEQ_HEAD(uvn_sq_struct, uvm_vnode);
80 struct uvn_sq_struct uvn_sync_q;
81 struct rwlock uvn_sync_lock;
82
83
84
85
86
87 void uvn_cluster(struct uvm_object *, voff_t,
88 voff_t *, voff_t *);
89 void uvn_detach(struct uvm_object *);
90 boolean_t uvn_flush(struct uvm_object *, voff_t,
91 voff_t, int);
92 int uvn_get(struct uvm_object *, voff_t,
93 vm_page_t *, int *, int,
94 vm_prot_t, int, int);
95 void uvn_init(void);
96 int uvn_io(struct uvm_vnode *, vm_page_t *,
97 int, int, int);
98 int uvn_put(struct uvm_object *, vm_page_t *,
99 int, boolean_t);
100 void uvn_reference(struct uvm_object *);
101 boolean_t uvn_releasepg(struct vm_page *,
102 struct vm_page **);
103
104
105
106
107
108 struct uvm_pagerops uvm_vnodeops = {
109 uvn_init,
110 uvn_reference,
111 uvn_detach,
112 NULL,
113 uvn_flush,
114 uvn_get,
115 uvn_put,
116 uvn_cluster,
117 uvm_mk_pcluster,
118 uvn_releasepg,
119 };
120
121
122
123
124
125
126
127
128
129
130
131 void
132 uvn_init(void)
133 {
134
135 LIST_INIT(&uvn_wlist);
136
137 rw_init(&uvn_sync_lock, "uvnsync");
138 }
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155 struct uvm_object *
156 uvn_attach(arg, accessprot)
157 void *arg;
158 vm_prot_t accessprot;
159 {
160 struct vnode *vp = arg;
161 struct uvm_vnode *uvn = &vp->v_uvm;
162 struct vattr vattr;
163 int oldflags, result;
164 struct partinfo pi;
165 u_quad_t used_vnode_size;
166 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
167
168 UVMHIST_LOG(maphist, "(vn=%p)", arg,0,0,0);
169
170 used_vnode_size = (u_quad_t)0;
171
172
173
174
175 simple_lock(&uvn->u_obj.vmobjlock);
176 while (uvn->u_flags & UVM_VNODE_BLOCKED) {
177 printf("uvn_attach: blocked at %p flags 0x%x\n",
178 uvn, uvn->u_flags);
179 uvn->u_flags |= UVM_VNODE_WANTED;
180 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
181 UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE,
182 "uvn_attach", 0);
183 simple_lock(&uvn->u_obj.vmobjlock);
184 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
185 }
186
187
188
189
190 if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
191 simple_unlock(&uvn->u_obj.vmobjlock);
192 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0);
193 return(NULL);
194 }
195
196
197
198
199
200
201
202 if (uvn->u_flags & UVM_VNODE_VALID) {
203
204
205 if (uvn->u_obj.uo_refs == 0) {
206 VREF(vp);
207 UVMHIST_LOG(maphist," VREF (reclaim persisting vnode)",
208 0,0,0,0);
209 }
210 uvn->u_obj.uo_refs++;
211
212
213 if ((accessprot & VM_PROT_WRITE) != 0 &&
214 (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) {
215 LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist);
216
217 uvn->u_flags |= UVM_VNODE_WRITEABLE;
218 }
219
220
221 simple_unlock(&uvn->u_obj.vmobjlock);
222 UVMHIST_LOG(maphist,"<- done, refcnt=%ld", uvn->u_obj.uo_refs,
223 0, 0, 0);
224 return (&uvn->u_obj);
225 }
226
227
228
229
230
231
232
233
234
235 uvn->u_flags = UVM_VNODE_ALOCK;
236 simple_unlock(&uvn->u_obj.vmobjlock);
237
238
239 if (vp->v_type == VBLK) {
240
241
242
243
244
245
246
247
248 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
249 DIOCGPART, (caddr_t)&pi, FREAD, curproc);
250 if (result == 0) {
251
252 used_vnode_size = (u_quad_t)pi.disklab->d_secsize *
253 (u_quad_t)DL_GETPSIZE(pi.part);
254 }
255 } else {
256 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc);
257 if (result == 0)
258 used_vnode_size = vattr.va_size;
259 }
260
261
262 simple_lock(&uvn->u_obj.vmobjlock);
263
264 if (result != 0) {
265 if (uvn->u_flags & UVM_VNODE_WANTED)
266 wakeup(uvn);
267 uvn->u_flags = 0;
268 simple_unlock(&uvn->u_obj.vmobjlock);
269 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
270 return(NULL);
271 }
272
273
274
275
276
277 #ifdef DEBUG
278 if (vp->v_type == VBLK)
279 printf("used_vnode_size = %llu\n", (long long)used_vnode_size);
280 #endif
281
282
283
284
285 uvn->u_obj.pgops = &uvm_vnodeops;
286 TAILQ_INIT(&uvn->u_obj.memq);
287 uvn->u_obj.uo_npages = 0;
288 uvn->u_obj.uo_refs = 1;
289 oldflags = uvn->u_flags;
290 uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST;
291 uvn->u_nio = 0;
292 uvn->u_size = used_vnode_size;
293
294
295 if (accessprot & VM_PROT_WRITE) {
296 LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist);
297 uvn->u_flags |= UVM_VNODE_WRITEABLE;
298 }
299
300
301
302
303
304
305 VREF(vp);
306 simple_unlock(&uvn->u_obj.vmobjlock);
307 if (oldflags & UVM_VNODE_WANTED)
308 wakeup(uvn);
309
310 UVMHIST_LOG(maphist,"<- done/VREF, ret %p", &uvn->u_obj,0,0,0);
311 return(&uvn->u_obj);
312 }
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327 void
328 uvn_reference(uobj)
329 struct uvm_object *uobj;
330 {
331 #ifdef DEBUG
332 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
333 #endif
334 UVMHIST_FUNC("uvn_reference"); UVMHIST_CALLED(maphist);
335
336 simple_lock(&uobj->vmobjlock);
337 #ifdef DEBUG
338 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) {
339 printf("uvn_reference: ref=%d, flags=0x%x\n", uvn->u_flags,
340 uobj->uo_refs);
341 panic("uvn_reference: invalid state");
342 }
343 #endif
344 uobj->uo_refs++;
345 UVMHIST_LOG(maphist, "<- done (uobj=%p, ref = %ld)",
346 uobj, uobj->uo_refs,0,0);
347 simple_unlock(&uobj->vmobjlock);
348 }
349
350
351
352
353
354
355
356
357
358
359 void
360 uvn_detach(uobj)
361 struct uvm_object *uobj;
362 {
363 struct uvm_vnode *uvn;
364 struct vnode *vp;
365 int oldflags;
366 UVMHIST_FUNC("uvn_detach"); UVMHIST_CALLED(maphist);
367
368 simple_lock(&uobj->vmobjlock);
369
370 UVMHIST_LOG(maphist," (uobj=%p) ref=%ld", uobj,uobj->uo_refs,0,0);
371 uobj->uo_refs--;
372 if (uobj->uo_refs) {
373 simple_unlock(&uobj->vmobjlock);
374 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
375 return;
376 }
377
378
379
380
381
382 uvn = (struct uvm_vnode *) uobj;
383 vp = (struct vnode *) uobj;
384
385
386
387
388
389 vp->v_flag &= ~VTEXT;
390
391
392
393
394
395
396 if (uvn->u_flags & UVM_VNODE_CANPERSIST) {
397
398 uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES);
399 simple_unlock(&uobj->vmobjlock);
400 vrele(vp);
401 UVMHIST_LOG(maphist,"<- done/vrele! (persist)", 0,0,0,0);
402 return;
403 }
404
405
406
407
408
409 UVMHIST_LOG(maphist," its a goner (flushing)!", 0,0,0,0);
410
411 uvn->u_flags |= UVM_VNODE_DYING;
412
413
414
415
416
417
418
419
420
421
422
423 (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES);
424
425 UVMHIST_LOG(maphist," its a goner (done flush)!", 0,0,0,0);
426
427
428
429
430
431
432
433
434
435
436
437
438
439 if (uobj->uo_npages) {
440 #ifdef DEBUG
441
442
443
444
445 printf("uvn_detach: vn %p has pages left after flush - "
446 "relkill mode\n", uobj);
447 #endif
448 uvn->u_flags |= UVM_VNODE_RELKILL;
449 simple_unlock(&uobj->vmobjlock);
450 UVMHIST_LOG(maphist,"<- done! (releasepg will kill obj)", 0, 0,
451 0, 0);
452 return;
453 }
454
455
456
457
458
459 if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
460 LIST_REMOVE(uvn, u_wlist);
461 }
462 #ifdef DIAGNOSTIC
463 if (!TAILQ_EMPTY(&uobj->memq))
464 panic("uvn_deref: vnode VM object still has pages afer "
465 "syncio/free flush");
466 #endif
467 oldflags = uvn->u_flags;
468 uvn->u_flags = 0;
469 simple_unlock(&uobj->vmobjlock);
470
471
472 if (oldflags & UVM_VNODE_WANTED)
473 wakeup(uvn);
474
475
476
477
478 vrele(vp);
479 UVMHIST_LOG(maphist,"<- done (vrele) final", 0,0,0,0);
480
481 return;
482 }
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512 void
513 uvm_vnp_terminate(vp)
514 struct vnode *vp;
515 {
516 struct uvm_vnode *uvn = &vp->v_uvm;
517 int oldflags;
518 UVMHIST_FUNC("uvm_vnp_terminate"); UVMHIST_CALLED(maphist);
519
520
521
522
523 simple_lock(&uvn->u_obj.vmobjlock);
524 UVMHIST_LOG(maphist, " vp=%p, ref=%ld, flag=0x%lx", vp,
525 uvn->u_obj.uo_refs, uvn->u_flags, 0);
526 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) {
527 simple_unlock(&uvn->u_obj.vmobjlock);
528 UVMHIST_LOG(maphist, "<- done (not active)", 0, 0, 0, 0);
529 return;
530 }
531
532
533
534
535
536
537
538
539 #ifdef DEBUG
540
541
542
543 if (uvn->u_obj.uo_refs) {
544 printf("uvm_vnp_terminate(%p): terminating active vnode "
545 "(refs=%d)\n", uvn, uvn->u_obj.uo_refs);
546 }
547 #endif
548
549
550
551
552
553
554
555
556
557
558 if (uvn->u_flags & UVM_VNODE_RELKILL)
559 uvn->u_flags &= ~UVM_VNODE_RELKILL;
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574 uvn->u_flags |= UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED;
575
576 (void) uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES);
577
578
579
580
581
582
583 while (uvn->u_obj.uo_npages) {
584 #ifdef DEBUG
585 struct vm_page *pp;
586 TAILQ_FOREACH(pp, &uvn->u_obj.memq, listq) {
587 if ((pp->pg_flags & PG_BUSY) == 0)
588 panic("uvm_vnp_terminate: detected unbusy pg");
589 }
590 if (uvn->u_nio == 0)
591 panic("uvm_vnp_terminate: no I/O to wait for?");
592 printf("uvm_vnp_terminate: waiting for I/O to fin.\n");
593
594
595
596
597 #endif
598 uvn->u_flags |= UVM_VNODE_IOSYNC;
599 UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE,
600 "uvn_term",0);
601 simple_lock(&uvn->u_obj.vmobjlock);
602 }
603
604
605
606
607
608
609
610
611 oldflags = uvn->u_flags;
612 if (uvn->u_obj.uo_refs) {
613
614
615
616
617
618
619 uvn->u_flags &= ~(UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED|
620 UVM_VNODE_WANTED|UVM_VNODE_CANPERSIST);
621
622 } else {
623
624
625
626
627
628 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED)
629 panic("uvm_vnp_terminate: io sync wanted bit set");
630
631 if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
632 LIST_REMOVE(uvn, u_wlist);
633 }
634 uvn->u_flags = 0;
635 }
636
637 if (oldflags & UVM_VNODE_WANTED)
638 wakeup(uvn);
639
640 simple_unlock(&uvn->u_obj.vmobjlock);
641 UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0);
642
643 }
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662 boolean_t
663 uvn_releasepg(pg, nextpgp)
664 struct vm_page *pg;
665 struct vm_page **nextpgp;
666 {
667 struct uvm_vnode *uvn = (struct uvm_vnode *) pg->uobject;
668 #ifdef DIAGNOSTIC
669 if ((pg->pg_flags & PG_RELEASED) == 0)
670 panic("uvn_releasepg: page not released!");
671 #endif
672
673
674
675
676 pmap_page_protect(pg, VM_PROT_NONE);
677 uvm_lock_pageq();
678 if (nextpgp)
679 *nextpgp = TAILQ_NEXT(pg, pageq);
680 uvm_pagefree(pg);
681 if (!nextpgp)
682 uvm_unlock_pageq();
683
684
685
686
687 if (uvn->u_flags & UVM_VNODE_RELKILL) {
688 if (uvn->u_obj.uo_refs)
689 panic("uvn_releasepg: kill flag set on referenced "
690 "object!");
691 if (uvn->u_obj.uo_npages == 0) {
692 if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
693 LIST_REMOVE(uvn, u_wlist);
694 }
695 #ifdef DIAGNOSTIC
696 if (!TAILQ_EMPTY(&uvn->u_obj.memq))
697 panic("uvn_releasepg: pages in object with npages == 0");
698 #endif
699 if (uvn->u_flags & UVM_VNODE_WANTED)
700
701 wakeup(uvn);
702
703 uvn->u_flags = 0;
704 simple_unlock(&uvn->u_obj.vmobjlock);
705 return (FALSE);
706 }
707 }
708 return (TRUE);
709 }
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802 #define UVN_HASH_PENALTY 4
803
804 boolean_t
805 uvn_flush(uobj, start, stop, flags)
806 struct uvm_object *uobj;
807 voff_t start, stop;
808 int flags;
809 {
810 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
811 struct vm_page *pp, *ppnext, *ptmp;
812 struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
813 int npages, result, lcv;
814 boolean_t retval, need_iosync, by_list, needs_clean, all;
815 voff_t curoff;
816 u_short pp_version;
817 UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist);
818
819 curoff = 0;
820
821
822
823
824 need_iosync = FALSE;
825 retval = TRUE;
826 if (flags & PGO_ALLPAGES) {
827 all = TRUE;
828 by_list = TRUE;
829 } else {
830 start = trunc_page(start);
831 stop = round_page(stop);
832 #ifdef DEBUG
833 if (stop > round_page(uvn->u_size))
834 printf("uvn_flush: strange, got an out of range "
835 "flush (fixed)\n");
836 #endif
837 all = FALSE;
838 by_list = (uobj->uo_npages <=
839 ((stop - start) >> PAGE_SHIFT) * UVN_HASH_PENALTY);
840 }
841
842 UVMHIST_LOG(maphist,
843 " flush start=0x%lx, stop=0x%lx, by_list=%ld, flags=0x%lx",
844 (u_long)start, (u_long)stop, by_list, flags);
845
846
847
848
849
850
851
852
853
854
855
856
857 if ((flags & PGO_CLEANIT) != 0 &&
858 uobj->pgops->pgo_mk_pcluster != NULL) {
859 if (by_list) {
860 TAILQ_FOREACH(pp, &uobj->memq, listq) {
861 if (!all &&
862 (pp->offset < start || pp->offset >= stop))
863 continue;
864 atomic_clearbits_int(&pp->pg_flags,
865 PG_CLEANCHK);
866 }
867
868 } else {
869 for (curoff = start ; curoff < stop;
870 curoff += PAGE_SIZE) {
871 pp = uvm_pagelookup(uobj, curoff);
872 if (pp)
873 atomic_clearbits_int(&pp->pg_flags,
874 PG_CLEANCHK);
875 }
876 }
877 }
878
879
880
881
882
883
884
885 if (by_list) {
886 pp = TAILQ_FIRST(&uobj->memq);
887 } else {
888 curoff = start;
889 pp = uvm_pagelookup(uobj, curoff);
890 }
891
892 ppnext = NULL;
893 ppsp = NULL;
894 uvm_lock_pageq();
895
896
897 for ( ; (by_list && pp != NULL) ||
898 (!by_list && curoff < stop) ; pp = ppnext) {
899
900 if (by_list) {
901
902
903
904
905
906 if (!all &&
907 (pp->offset < start || pp->offset >= stop)) {
908 ppnext = TAILQ_NEXT(pp, listq);
909 continue;
910 }
911
912 } else {
913
914
915
916
917
918 curoff += PAGE_SIZE;
919 if (pp == NULL) {
920 if (curoff < stop)
921 ppnext = uvm_pagelookup(uobj, curoff);
922 continue;
923 }
924
925 }
926
927
928
929
930
931
932
933
934
935
936
937
938 if ((flags & PGO_CLEANIT) == 0 || (pp->pg_flags & PG_BUSY) != 0) {
939 needs_clean = FALSE;
940 if ((pp->pg_flags & PG_BUSY) != 0 &&
941 (flags & (PGO_CLEANIT|PGO_SYNCIO)) ==
942 (PGO_CLEANIT|PGO_SYNCIO))
943 need_iosync = TRUE;
944 } else {
945
946
947
948
949 if ((pp->pg_flags & PG_CLEAN) != 0 &&
950 (flags & PGO_FREE) != 0 &&
951 (pp->pg_flags & PQ_ACTIVE) != 0)
952 pmap_page_protect(pp, VM_PROT_NONE);
953 if ((pp->pg_flags & PG_CLEAN) != 0 &&
954 pmap_is_modified(pp))
955 atomic_clearbits_int(&pp->pg_flags, PG_CLEAN);
956 atomic_setbits_int(&pp->pg_flags, PG_CLEANCHK);
957
958 needs_clean = ((pp->pg_flags & PG_CLEAN) == 0);
959 }
960
961
962
963
964 if (!needs_clean) {
965
966 if (by_list)
967 ppnext = TAILQ_NEXT(pp, listq);
968 else {
969 if (curoff < stop)
970 ppnext = uvm_pagelookup(uobj, curoff);
971 }
972
973
974 if (flags & PGO_DEACTIVATE) {
975 if ((pp->pg_flags & PQ_INACTIVE) == 0 &&
976 pp->wire_count == 0) {
977 pmap_page_protect(pp, VM_PROT_NONE);
978 uvm_pagedeactivate(pp);
979 }
980
981 } else if (flags & PGO_FREE) {
982 if (pp->pg_flags & PG_BUSY) {
983
984 atomic_setbits_int(&pp->pg_flags,
985 PG_RELEASED);
986 } else {
987 pmap_page_protect(pp, VM_PROT_NONE);
988
989 uvm_pagefree(pp);
990 }
991 }
992
993 continue;
994 }
995
996
997
998
999
1000
1001
1002
1003
1004
1005 atomic_setbits_int(&pp->pg_flags, PG_BUSY);
1006 UVM_PAGE_OWN(pp, "uvn_flush");
1007 pmap_page_protect(pp, VM_PROT_READ);
1008 pp_version = pp->pg_version;
1009 ReTry:
1010 ppsp = pps;
1011 npages = sizeof(pps) / sizeof(struct vm_page *);
1012
1013
1014 result = uvm_pager_put(uobj, pp, &ppsp, &npages,
1015 flags | PGO_DOACTCLUST, start, stop);
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 simple_lock(&uobj->vmobjlock);
1028 uvm_lock_pageq();
1029
1030
1031
1032
1033
1034
1035
1036
1037 if (result == VM_PAGER_AGAIN) {
1038
1039
1040
1041
1042
1043
1044
1045 #ifdef DIAGNOSTIC
1046 if (flags & PGO_SYNCIO)
1047 panic("uvn_flush: PGO_SYNCIO return 'try again' error (impossible)");
1048 #endif
1049 flags |= PGO_SYNCIO;
1050 goto ReTry;
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 if (result == VM_PAGER_PEND) {
1066
1067 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
1068
1069
1070
1071 if (by_list) {
1072 if (pp->pg_version == pp_version)
1073 ppnext = TAILQ_NEXT(pp, listq);
1074 else
1075
1076 ppnext = TAILQ_FIRST(&uobj->memq);
1077 } else {
1078 if (curoff < stop)
1079 ppnext = uvm_pagelookup(uobj,
1080 curoff);
1081 }
1082 continue;
1083 }
1084
1085
1086 }
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 for (lcv = 0 ; lcv < npages + 1 ; lcv++) {
1097
1098
1099
1100
1101
1102 if (lcv < npages) {
1103 if (ppsp[lcv] == pp)
1104 continue;
1105 ptmp = ppsp[lcv];
1106 } else {
1107 ptmp = pp;
1108
1109
1110 if (by_list) {
1111 if (pp->pg_version == pp_version)
1112 ppnext = TAILQ_NEXT(pp, listq);
1113 else
1114
1115 ppnext = TAILQ_FIRST(&uobj->memq);
1116 } else {
1117 if (curoff < stop)
1118 ppnext = uvm_pagelookup(uobj, curoff);
1119 }
1120 }
1121
1122
1123
1124
1125
1126 if (result == VM_PAGER_PEND && ptmp->uobject != uobj)
1127 continue;
1128
1129
1130
1131
1132
1133
1134
1135
1136 if (result != VM_PAGER_PEND) {
1137 if (ptmp->pg_flags & PG_WANTED)
1138
1139 wakeup(ptmp);
1140
1141 atomic_clearbits_int(&ptmp->pg_flags,
1142 PG_WANTED|PG_BUSY);
1143 UVM_PAGE_OWN(ptmp, NULL);
1144 if (ptmp->pg_flags & PG_RELEASED) {
1145
1146
1147 uvm_unlock_pageq();
1148 if (!uvn_releasepg(ptmp, NULL))
1149 return (TRUE);
1150
1151 uvm_lock_pageq();
1152 continue;
1153
1154 } else {
1155 atomic_setbits_int(&ptmp->pg_flags,
1156 PG_CLEAN|PG_CLEANCHK);
1157 if ((flags & PGO_FREE) == 0)
1158 pmap_clear_modify(ptmp);
1159 }
1160 }
1161
1162
1163
1164
1165
1166 if (flags & PGO_DEACTIVATE) {
1167 if ((pp->pg_flags & PQ_INACTIVE) == 0 &&
1168 pp->wire_count == 0) {
1169 pmap_page_protect(ptmp, VM_PROT_NONE);
1170 uvm_pagedeactivate(ptmp);
1171 }
1172
1173 } else if (flags & PGO_FREE) {
1174 if (result == VM_PAGER_PEND) {
1175 if ((ptmp->pg_flags & PG_BUSY) != 0)
1176
1177 atomic_setbits_int(
1178 &ptmp->pg_flags,
1179 PG_RELEASED);
1180 } else {
1181 if (result != VM_PAGER_OK) {
1182 printf("uvn_flush: obj=%p, "
1183 "offset=0x%llx. error "
1184 "during pageout.\n",
1185 pp->uobject,
1186 (long long)pp->offset);
1187 printf("uvn_flush: WARNING: "
1188 "changes to page may be "
1189 "lost!\n");
1190 retval = FALSE;
1191 }
1192 pmap_page_protect(ptmp, VM_PROT_NONE);
1193 uvm_pagefree(ptmp);
1194 }
1195 }
1196
1197 }
1198
1199 }
1200
1201
1202
1203
1204 uvm_unlock_pageq();
1205
1206
1207
1208
1209 if (need_iosync) {
1210
1211 UVMHIST_LOG(maphist," <<DOING IOSYNC>>",0,0,0,0);
1212 while (uvn->u_nio != 0) {
1213 uvn->u_flags |= UVM_VNODE_IOSYNC;
1214 UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock,
1215 FALSE, "uvn_flush",0);
1216 simple_lock(&uvn->u_obj.vmobjlock);
1217 }
1218 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED)
1219 wakeup(&uvn->u_flags);
1220 uvn->u_flags &= ~(UVM_VNODE_IOSYNC|UVM_VNODE_IOSYNCWANTED);
1221 }
1222
1223
1224 UVMHIST_LOG(maphist,"<- done (retval=0x%lx)",retval,0,0,0);
1225 return(retval);
1226 }
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 void
1239 uvn_cluster(uobj, offset, loffset, hoffset)
1240 struct uvm_object *uobj;
1241 voff_t offset;
1242 voff_t *loffset, *hoffset;
1243 {
1244 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
1245 *loffset = offset;
1246
1247 if (*loffset >= uvn->u_size)
1248 panic("uvn_cluster: offset out of range");
1249
1250
1251
1252
1253 *hoffset = *loffset + MAXBSIZE;
1254 if (*hoffset > round_page(uvn->u_size))
1255 *hoffset = round_page(uvn->u_size);
1256
1257 return;
1258 }
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 int
1272 uvn_put(uobj, pps, npages, flags)
1273 struct uvm_object *uobj;
1274 struct vm_page **pps;
1275 int npages, flags;
1276 {
1277 int retval;
1278
1279
1280 retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE);
1281
1282
1283 return(retval);
1284 }
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 int
1299 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
1300 struct uvm_object *uobj;
1301 voff_t offset;
1302 struct vm_page **pps;
1303 int *npagesp;
1304 int centeridx, advice, flags;
1305 vm_prot_t access_type;
1306 {
1307 voff_t current_offset;
1308 struct vm_page *ptmp;
1309 int lcv, result, gotpages;
1310 boolean_t done;
1311 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(maphist);
1312 UVMHIST_LOG(maphist, "flags=%ld", flags,0,0,0);
1313
1314
1315
1316
1317
1318 if (flags & PGO_LOCKED) {
1319
1320
1321
1322
1323
1324
1325 gotpages = 0;
1326
1327
1328
1329
1330
1331
1332
1333 done = TRUE;
1334
1335 for (lcv = 0, current_offset = offset ; lcv < *npagesp ;
1336 lcv++, current_offset += PAGE_SIZE) {
1337
1338
1339 if (pps[lcv] == PGO_DONTCARE)
1340 continue;
1341
1342
1343 ptmp = uvm_pagelookup(uobj, current_offset);
1344
1345
1346 if (ptmp == NULL ||
1347 (ptmp->pg_flags & (PG_BUSY|PG_RELEASED)) != 0) {
1348 if (lcv == centeridx || (flags & PGO_ALLPAGES)
1349 != 0)
1350 done = FALSE;
1351 continue;
1352 }
1353
1354
1355
1356
1357
1358 atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
1359 UVM_PAGE_OWN(ptmp, "uvn_get1");
1360 pps[lcv] = ptmp;
1361 gotpages++;
1362
1363 }
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 *npagesp = gotpages;
1383 if (done)
1384 return(VM_PAGER_OK);
1385 else
1386
1387 return(VM_PAGER_UNLOCK);
1388 }
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 for (lcv = 0, current_offset = offset;
1400 lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) {
1401
1402
1403
1404 if (pps[lcv] != NULL || (lcv != centeridx &&
1405 (flags & PGO_ALLPAGES) == 0))
1406 continue;
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 while (pps[lcv] == NULL) {
1425
1426
1427 ptmp = uvm_pagelookup(uobj, current_offset);
1428
1429
1430 if (ptmp == NULL) {
1431
1432 ptmp = uvm_pagealloc(uobj, current_offset,
1433 NULL, 0);
1434
1435
1436 if (ptmp == NULL) {
1437 simple_unlock(&uobj->vmobjlock);
1438 uvm_wait("uvn_getpage");
1439 simple_lock(&uobj->vmobjlock);
1440
1441
1442 continue;
1443 }
1444
1445
1446
1447
1448
1449 break;
1450 }
1451
1452
1453 if ((ptmp->pg_flags & (PG_BUSY|PG_RELEASED)) != 0) {
1454 atomic_setbits_int(&ptmp->pg_flags, PG_WANTED);
1455 UVM_UNLOCK_AND_WAIT(ptmp,
1456 &uobj->vmobjlock, FALSE, "uvn_get",0);
1457 simple_lock(&uobj->vmobjlock);
1458 continue;
1459 }
1460
1461
1462
1463
1464
1465
1466
1467 atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
1468 UVM_PAGE_OWN(ptmp, "uvn_get2");
1469 pps[lcv] = ptmp;
1470 }
1471
1472
1473
1474
1475
1476
1477
1478 if (pps[lcv])
1479 continue;
1480
1481
1482
1483
1484
1485
1486
1487 result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1,
1488 PGO_SYNCIO, UIO_READ);
1489
1490
1491
1492
1493
1494
1495
1496
1497 simple_lock(&uobj->vmobjlock);
1498 if (result != VM_PAGER_OK) {
1499 if (ptmp->pg_flags & PG_WANTED)
1500
1501 wakeup(ptmp);
1502
1503 atomic_clearbits_int(&ptmp->pg_flags,
1504 PG_WANTED|PG_BUSY);
1505 UVM_PAGE_OWN(ptmp, NULL);
1506 uvm_lock_pageq();
1507 uvm_pagefree(ptmp);
1508 uvm_unlock_pageq();
1509 simple_unlock(&uobj->vmobjlock);
1510 return(result);
1511 }
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 atomic_clearbits_int(&ptmp->pg_flags, PG_FAKE);
1526 pmap_clear_modify(ptmp);
1527 pps[lcv] = ptmp;
1528
1529 }
1530
1531
1532
1533
1534
1535 simple_unlock(&uobj->vmobjlock);
1536 return (VM_PAGER_OK);
1537 }
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 int
1550 uvn_io(uvn, pps, npages, flags, rw)
1551 struct uvm_vnode *uvn;
1552 vm_page_t *pps;
1553 int npages, flags, rw;
1554 {
1555 struct vnode *vn;
1556 struct uio uio;
1557 struct iovec iov;
1558 vaddr_t kva;
1559 off_t file_offset;
1560 int waitf, result, mapinflags;
1561 size_t got, wanted;
1562 UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist);
1563
1564 UVMHIST_LOG(maphist, "rw=%ld", rw,0,0,0);
1565
1566
1567
1568
1569
1570 waitf = (flags & PGO_SYNCIO) ? M_WAITOK : M_NOWAIT;
1571 vn = (struct vnode *) uvn;
1572 file_offset = pps[0]->offset;
1573
1574
1575
1576
1577
1578 while (uvn->u_flags & UVM_VNODE_IOSYNC) {
1579 if (waitf == M_NOWAIT) {
1580 simple_unlock(&uvn->u_obj.vmobjlock);
1581 UVMHIST_LOG(maphist,"<- try again (iosync)",0,0,0,0);
1582 return(VM_PAGER_AGAIN);
1583 }
1584 uvn->u_flags |= UVM_VNODE_IOSYNCWANTED;
1585 UVM_UNLOCK_AND_WAIT(&uvn->u_flags, &uvn->u_obj.vmobjlock,
1586 FALSE, "uvn_iosync",0);
1587 simple_lock(&uvn->u_obj.vmobjlock);
1588 }
1589
1590
1591
1592
1593
1594 if (file_offset >= uvn->u_size) {
1595 simple_unlock(&uvn->u_obj.vmobjlock);
1596 UVMHIST_LOG(maphist,"<- BAD (size check)",0,0,0,0);
1597 return(VM_PAGER_BAD);
1598 }
1599
1600
1601
1602
1603
1604 mapinflags = (rw == UIO_READ) ?
1605 UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WRITE;
1606
1607 kva = uvm_pagermapin(pps, npages, mapinflags);
1608 if (kva == 0 && waitf == M_NOWAIT) {
1609 simple_unlock(&uvn->u_obj.vmobjlock);
1610 UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0);
1611 return(VM_PAGER_AGAIN);
1612 }
1613
1614
1615
1616
1617
1618
1619
1620 uvn->u_nio++;
1621 simple_unlock(&uvn->u_obj.vmobjlock);
1622
1623 if (kva == 0)
1624 kva = uvm_pagermapin(pps, npages,
1625 mapinflags | UVMPAGER_MAPIN_WAITOK);
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637 iov.iov_base = (caddr_t) kva;
1638 wanted = npages << PAGE_SHIFT;
1639 if (file_offset + wanted > uvn->u_size)
1640 wanted = uvn->u_size - file_offset;
1641 iov.iov_len = wanted;
1642 uio.uio_iov = &iov;
1643 uio.uio_iovcnt = 1;
1644 uio.uio_offset = file_offset;
1645 uio.uio_segflg = UIO_SYSSPACE;
1646 uio.uio_rw = rw;
1647 uio.uio_resid = wanted;
1648 uio.uio_procp = curproc;
1649
1650
1651
1652
1653
1654 UVMHIST_LOG(maphist, "calling VOP",0,0,0,0);
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665 result = 0;
1666 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0)
1667 result = vn_lock(vn, LK_EXCLUSIVE | LK_RECURSEFAIL, curproc);
1668
1669 if (result == 0) {
1670
1671
1672 if (rw == UIO_READ)
1673 result = VOP_READ(vn, &uio, 0, curproc->p_ucred);
1674 else
1675 result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred);
1676
1677 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0)
1678 VOP_UNLOCK(vn, 0, curproc);
1679 }
1680
1681
1682
1683 UVMHIST_LOG(maphist, "done calling VOP",0,0,0,0);
1684
1685
1686
1687
1688
1689
1690
1691 if (result == 0) {
1692 got = wanted - uio.uio_resid;
1693
1694 if (wanted && got == 0) {
1695 result = EIO;
1696 } else if (got < PAGE_SIZE * npages && rw == UIO_READ) {
1697 memset((void *) (kva + got), 0,
1698 (npages << PAGE_SHIFT) - got);
1699 }
1700 }
1701
1702
1703
1704
1705 uvm_pagermapout(kva, npages);
1706
1707
1708
1709
1710
1711 simple_lock(&uvn->u_obj.vmobjlock);
1712
1713
1714 uvn->u_nio--;
1715 if ((uvn->u_flags & UVM_VNODE_IOSYNC) != 0 && uvn->u_nio == 0) {
1716 wakeup(&uvn->u_nio);
1717 }
1718 simple_unlock(&uvn->u_obj.vmobjlock);
1719
1720
1721
1722
1723
1724
1725 UVMHIST_LOG(maphist, "<- done (result %ld)", result,0,0,0);
1726 if (result == 0)
1727 return(VM_PAGER_OK);
1728 else
1729 return(VM_PAGER_ERROR);
1730 }
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768 boolean_t
1769 uvm_vnp_uncache(vp)
1770 struct vnode *vp;
1771 {
1772 struct uvm_vnode *uvn = &vp->v_uvm;
1773
1774
1775
1776
1777
1778 simple_lock(&uvn->u_obj.vmobjlock);
1779 if ((uvn->u_flags & UVM_VNODE_VALID) == 0 ||
1780 (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) {
1781 simple_unlock(&uvn->u_obj.vmobjlock);
1782 return(TRUE);
1783 }
1784
1785
1786
1787
1788
1789
1790 uvn->u_flags &= ~UVM_VNODE_CANPERSIST;
1791 if (uvn->u_obj.uo_refs) {
1792 simple_unlock(&uvn->u_obj.vmobjlock);
1793 return(FALSE);
1794 }
1795
1796
1797
1798
1799
1800
1801 VREF(vp);
1802 uvn->u_obj.uo_refs++;
1803 simple_unlock(&uvn->u_obj.vmobjlock);
1804
1805
1806 #ifdef DEBUG
1807
1808
1809
1810
1811 if (!VOP_ISLOCKED(vp)) {
1812 boolean_t is_ok_anyway = FALSE;
1813 #if defined(NFSCLIENT)
1814 extern int (**nfsv2_vnodeop_p)(void *);
1815 extern int (**spec_nfsv2nodeop_p)(void *);
1816 #if defined(FIFO)
1817 extern int (**fifo_nfsv2nodeop_p)(void *);
1818 #endif
1819
1820
1821 if (vp->v_op == nfsv2_vnodeop_p ||
1822 vp->v_op == spec_nfsv2nodeop_p) {
1823 is_ok_anyway = TRUE;
1824 }
1825 #if defined(FIFO)
1826 if (vp->v_op == fifo_nfsv2nodeop_p) {
1827 is_ok_anyway = TRUE;
1828 }
1829 #endif
1830 #endif
1831 if (!is_ok_anyway)
1832 panic("uvm_vnp_uncache: vnode not locked!");
1833 }
1834 #endif
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846 VOP_UNLOCK(vp, 0, curproc);
1847 uvn_detach(&uvn->u_obj);
1848 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1849
1850
1851
1852
1853
1854 return(TRUE);
1855 }
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875 void
1876 uvm_vnp_setsize(vp, newsize)
1877 struct vnode *vp;
1878 voff_t newsize;
1879 {
1880 struct uvm_vnode *uvn = &vp->v_uvm;
1881
1882
1883
1884
1885 simple_lock(&uvn->u_obj.vmobjlock);
1886 if (uvn->u_flags & UVM_VNODE_VALID) {
1887
1888
1889
1890
1891
1892
1893 if (uvn->u_size > newsize) {
1894 (void)uvn_flush(&uvn->u_obj, newsize,
1895 uvn->u_size, PGO_FREE);
1896 }
1897 uvn->u_size = newsize;
1898 }
1899 simple_unlock(&uvn->u_obj.vmobjlock);
1900
1901
1902
1903
1904 return;
1905 }
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918 void
1919 uvm_vnp_sync(mp)
1920 struct mount *mp;
1921 {
1922 struct uvm_vnode *uvn;
1923 struct vnode *vp;
1924 boolean_t got_lock;
1925
1926
1927
1928
1929
1930 rw_enter_write(&uvn_sync_lock);
1931
1932
1933
1934
1935
1936 SIMPLEQ_INIT(&uvn_sync_q);
1937 LIST_FOREACH(uvn, &uvn_wlist, u_wlist) {
1938
1939 vp = (struct vnode *) uvn;
1940 if (mp && vp->v_mount != mp)
1941 continue;
1942
1943
1944 while ((got_lock = simple_lock_try(&uvn->u_obj.vmobjlock)) ==
1945 FALSE &&
1946 (uvn->u_flags & UVM_VNODE_BLOCKED) == 0)
1947 ;
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963 if (!got_lock || (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) {
1964 if (got_lock)
1965 simple_unlock(&uvn->u_obj.vmobjlock);
1966 continue;
1967 }
1968
1969
1970
1971
1972
1973 if (uvn->u_obj.uo_refs == 0)
1974 VREF(vp);
1975 uvn->u_obj.uo_refs++;
1976 simple_unlock(&uvn->u_obj.vmobjlock);
1977
1978
1979
1980
1981 SIMPLEQ_INSERT_HEAD(&uvn_sync_q, uvn, u_syncq);
1982 }
1983
1984
1985
1986
1987
1988
1989 SIMPLEQ_FOREACH(uvn, &uvn_sync_q, u_syncq) {
1990 simple_lock(&uvn->u_obj.vmobjlock);
1991 #ifdef DEBUG
1992 if (uvn->u_flags & UVM_VNODE_DYING) {
1993 printf("uvm_vnp_sync: dying vnode on sync list\n");
1994 }
1995 #endif
1996 uvn_flush(&uvn->u_obj, 0, 0,
1997 PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST);
1998
1999
2000
2001
2002
2003
2004
2005 if (uvn->u_obj.uo_refs == 1 &&
2006 (uvn->u_flags & UVM_VNODE_WRITEABLE)) {
2007 LIST_REMOVE(uvn, u_wlist);
2008 uvn->u_flags &= ~UVM_VNODE_WRITEABLE;
2009 }
2010
2011 simple_unlock(&uvn->u_obj.vmobjlock);
2012
2013
2014 uvn_detach(&uvn->u_obj);
2015 }
2016
2017
2018
2019
2020 rw_exit_write(&uvn_sync_lock);
2021 }