This source file includes following definitions.
- uvm_km_init
- uvm_km_suballoc
- uvm_km_pgremove
- uvm_km_pgremove_intrsafe
- uvm_km_kmemalloc
- uvm_km_free
- uvm_km_free_wakeup
- uvm_km_alloc1
- uvm_km_valloc
- uvm_km_valloc_align
- uvm_km_valloc_prefer_wait
- uvm_km_valloc_wait
- uvm_km_alloc_poolpage1
- uvm_km_free_poolpage1
- uvm_km_page_init
- uvm_km_getpage
- uvm_km_putpage
- uvm_km_page_init
- uvm_km_createthread
- uvm_km_thread
- uvm_km_getpage
- uvm_km_putpage
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136 #include <sys/param.h>
137 #include <sys/systm.h>
138 #include <sys/proc.h>
139 #include <sys/kthread.h>
140
141 #include <uvm/uvm.h>
142
143
144
145
146
147 struct vm_map *kernel_map = NULL;
148
149
150
151
152
153 static struct vm_map kernel_map_store;
154
155
156
157
158
159
160
161
162
163
164 void
165 uvm_km_init(vaddr_t start, vaddr_t end)
166 {
167 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
168
169
170
171
172
173
174 uao_init();
175 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
176 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
177
178
179
180
181
182
183 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
184 kernel_map_store.pmap = pmap_kernel();
185 if (base != start && uvm_map(&kernel_map_store, &base, start - base,
186 NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
187 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0)
188 panic("uvm_km_init: could not reserve space for kernel");
189
190
191
192
193
194 kernel_map = &kernel_map_store;
195 }
196
197
198
199
200
201
202
203
204
205
206
207 struct vm_map *
208 uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
209 int flags, boolean_t fixed, struct vm_map *submap)
210 {
211 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
212
213 size = round_page(size);
214
215
216
217
218
219 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
220 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
221 UVM_ADV_RANDOM, mapflags)) != 0) {
222 panic("uvm_km_suballoc: unable to allocate space in parent map");
223 }
224
225
226
227
228
229 *max = *min + size;
230
231
232
233
234
235 pmap_reference(vm_map_pmap(map));
236 if (submap == NULL) {
237 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
238 if (submap == NULL)
239 panic("uvm_km_suballoc: unable to create submap");
240 } else {
241 uvm_map_setup(submap, *min, *max, flags);
242 submap->pmap = vm_map_pmap(map);
243 }
244
245
246
247
248
249 if (uvm_map_submap(map, *min, *max, submap) != 0)
250 panic("uvm_km_suballoc: submap allocation failed");
251
252 return(submap);
253 }
254
255
256
257
258
259
260
261 void
262 uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end)
263 {
264 struct vm_page *pp;
265 voff_t curoff;
266 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
267
268 KASSERT(uobj->pgops == &aobj_pager);
269
270 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
271 pp = uvm_pagelookup(uobj, curoff);
272 if (pp == NULL)
273 continue;
274
275 UVMHIST_LOG(maphist," page %p, busy=%ld", pp,
276 pp->pg_flags & PG_BUSY, 0, 0);
277
278 if (pp->pg_flags & PG_BUSY) {
279
280 atomic_setbits_int(&pp->pg_flags, PG_RELEASED);
281 } else {
282
283 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
284
285
286
287
288
289 uvm_lock_pageq();
290 uvm_pagefree(pp);
291 uvm_unlock_pageq();
292 }
293 }
294 }
295
296
297
298
299
300
301
302
303
304
305
306
307
308 void
309 uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
310 {
311 struct vm_page *pg;
312 vaddr_t va;
313 paddr_t pa;
314
315 for (va = start; va < end; va += PAGE_SIZE) {
316 if (!pmap_extract(pmap_kernel(), va, &pa))
317 continue;
318 pg = PHYS_TO_VM_PAGE(pa);
319 if (pg == NULL)
320 panic("uvm_km_pgremove_intrsafe: no page");
321 uvm_pagefree(pg);
322 }
323 }
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338 vaddr_t
339 uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj, vsize_t size,
340 int flags)
341 {
342 vaddr_t kva, loopva;
343 voff_t offset;
344 struct vm_page *pg;
345 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
346
347 UVMHIST_LOG(maphist," (map=%p, obj=%p, size=0x%lx, flags=%d)",
348 map, obj, size, flags);
349 KASSERT(vm_map_pmap(map) == pmap_kernel());
350
351
352
353
354
355 size = round_page(size);
356 kva = vm_map_min(map);
357
358
359
360
361
362 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
363 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
364 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) {
365 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
366 return(0);
367 }
368
369
370
371
372
373 if (flags & UVM_KMF_VALLOC) {
374 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%lx)", kva,0,0,0);
375 return(kva);
376 }
377
378
379
380
381
382 if (obj != NULL)
383 offset = kva - vm_map_min(kernel_map);
384 else
385 offset = 0;
386
387 UVMHIST_LOG(maphist, " kva=0x%lx, offset=0x%lx", kva, offset,0,0);
388
389
390
391
392
393
394 loopva = kva;
395 while (loopva != kva + size) {
396 pg = uvm_pagealloc(obj, offset, NULL, 0);
397 if (pg) {
398 atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
399 UVM_PAGE_OWN(pg, NULL);
400 }
401
402 if (__predict_false(pg == NULL)) {
403 if ((flags & UVM_KMF_NOWAIT) ||
404 ((flags & UVM_KMF_CANFAIL) &&
405 uvmexp.swpgonly == uvmexp.swpages)) {
406
407 uvm_unmap(map, kva, kva + size);
408 return (0);
409 } else {
410 uvm_wait("km_getwait2");
411 continue;
412 }
413 }
414
415
416
417
418
419
420 if (obj == NULL) {
421 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
422 UVM_PROT_RW);
423 } else {
424 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
425 UVM_PROT_RW,
426 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
427 }
428 loopva += PAGE_SIZE;
429 offset += PAGE_SIZE;
430 }
431 pmap_update(pmap_kernel());
432
433 UVMHIST_LOG(maphist,"<- done (kva=0x%lx)", kva,0,0,0);
434 return(kva);
435 }
436
437
438
439
440
441 void
442 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size)
443 {
444 uvm_unmap(map, trunc_page(addr), round_page(addr+size));
445 }
446
447
448
449
450
451
452
453
454 void
455 uvm_km_free_wakeup(struct vm_map *map, vaddr_t addr, vsize_t size)
456 {
457 struct vm_map_entry *dead_entries;
458
459 vm_map_lock(map);
460 uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size),
461 &dead_entries, NULL);
462 wakeup(map);
463 vm_map_unlock(map);
464
465 if (dead_entries != NULL)
466 uvm_unmap_detach(dead_entries, 0);
467 }
468
469
470
471
472
473
474
475 vaddr_t
476 uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
477 {
478 vaddr_t kva, loopva;
479 voff_t offset;
480 struct vm_page *pg;
481 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
482
483 UVMHIST_LOG(maphist,"(map=%p, size=0x%lx)", map, size,0,0);
484 KASSERT(vm_map_pmap(map) == pmap_kernel());
485
486 size = round_page(size);
487 kva = vm_map_min(map);
488
489
490
491
492
493 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
494 UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
495 UVM_INH_NONE, UVM_ADV_RANDOM, 0)) != 0)) {
496 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
497 return(0);
498 }
499
500
501
502
503
504 offset = kva - vm_map_min(kernel_map);
505 UVMHIST_LOG(maphist," kva=0x%lx, offset=0x%lx", kva, offset,0,0);
506
507
508
509
510
511 loopva = kva;
512 while (size) {
513 simple_lock(&uvm.kernel_object->vmobjlock);
514 pg = uvm_pagelookup(uvm.kernel_object, offset);
515
516
517
518
519
520 if (pg) {
521 if ((pg->pg_flags & PG_RELEASED) == 0)
522 panic("uvm_km_alloc1: non-released page");
523 atomic_setbits_int(&pg->pg_flags, PG_WANTED);
524 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,
525 FALSE, "km_alloc", 0);
526 continue;
527 }
528
529
530 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
531 if (pg) {
532 atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
533 UVM_PAGE_OWN(pg, NULL);
534 }
535 simple_unlock(&uvm.kernel_object->vmobjlock);
536 if (__predict_false(pg == NULL)) {
537 if (curproc == uvm.pagedaemon_proc) {
538
539
540
541
542
543 uvm_unmap(map, kva, loopva - kva);
544 return (NULL);
545 } else {
546 uvm_wait("km_alloc1w");
547 continue;
548 }
549 }
550
551
552
553
554
555 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
556 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
557
558 loopva += PAGE_SIZE;
559 offset += PAGE_SIZE;
560 size -= PAGE_SIZE;
561 }
562 pmap_update(map->pmap);
563
564
565
566
567
568
569 if (zeroit)
570 memset((caddr_t)kva, 0, loopva - kva);
571
572 UVMHIST_LOG(maphist,"<- done (kva=0x%lx)", kva,0,0,0);
573 return(kva);
574 }
575
576
577
578
579
580
581
582 vaddr_t
583 uvm_km_valloc(struct vm_map *map, vsize_t size)
584 {
585 return(uvm_km_valloc_align(map, size, 0));
586 }
587
588 vaddr_t
589 uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align)
590 {
591 vaddr_t kva;
592 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
593
594 UVMHIST_LOG(maphist, "(map=%p, size=0x%lx)", map, size, 0,0);
595 KASSERT(vm_map_pmap(map) == pmap_kernel());
596
597 size = round_page(size);
598 kva = vm_map_min(map);
599
600
601
602
603
604 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
605 UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
606 UVM_INH_NONE, UVM_ADV_RANDOM, 0)) != 0)) {
607 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
608 return(0);
609 }
610
611 UVMHIST_LOG(maphist, "<- done (kva=0x%lx)", kva,0,0,0);
612 return(kva);
613 }
614
615
616
617
618
619
620
621
622
623 vaddr_t
624 uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
625 {
626 vaddr_t kva;
627 UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);
628
629 UVMHIST_LOG(maphist, "(map=%p, size=0x%lx)", map, size, 0,0);
630 KASSERT(vm_map_pmap(map) == pmap_kernel());
631
632 size = round_page(size);
633 if (size > vm_map_max(map) - vm_map_min(map))
634 return(0);
635
636 while (1) {
637 kva = vm_map_min(map);
638
639
640
641
642
643
644 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
645 prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL,
646 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) == 0)) {
647 UVMHIST_LOG(maphist,"<- done (kva=0x%lx)", kva,0,0,0);
648 return(kva);
649 }
650
651
652
653
654
655 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
656 tsleep((caddr_t)map, PVM, "vallocwait", 0);
657 }
658
659 }
660
661 vaddr_t
662 uvm_km_valloc_wait(struct vm_map *map, vsize_t size)
663 {
664 return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET);
665 }
666
667
668
669
670
671
672
673
674 vaddr_t
675 uvm_km_alloc_poolpage1(struct vm_map *map, struct uvm_object *obj,
676 boolean_t waitok)
677 {
678 #if defined(__HAVE_PMAP_DIRECT)
679 struct vm_page *pg;
680 vaddr_t va;
681
682 again:
683 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
684 if (__predict_false(pg == NULL)) {
685 if (waitok) {
686 uvm_wait("plpg");
687 goto again;
688 } else
689 return (0);
690 }
691 va = pmap_map_direct(pg);
692 if (__predict_false(va == 0))
693 uvm_pagefree(pg);
694 return (va);
695 #else
696 vaddr_t va;
697 int s;
698
699
700
701
702
703
704
705
706
707
708
709 s = splvm();
710 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
711 splx(s);
712 return (va);
713 #endif
714 }
715
716
717
718
719
720
721
722
723 void
724 uvm_km_free_poolpage1(struct vm_map *map, vaddr_t addr)
725 {
726 #if defined(__HAVE_PMAP_DIRECT)
727 uvm_pagefree(pmap_unmap_direct(addr));
728 #else
729 int s;
730
731
732
733
734
735
736
737
738
739
740
741 s = splvm();
742 uvm_km_free(map, addr, PAGE_SIZE);
743 splx(s);
744 #endif
745 }
746
747 #if defined(__HAVE_PMAP_DIRECT)
748
749
750
751
752
753
754 void
755 uvm_km_page_init(void)
756 {
757
758 }
759
760 void *
761 uvm_km_getpage(boolean_t waitok)
762 {
763
764 return ((void *)uvm_km_alloc_poolpage1(NULL, NULL, waitok));
765 }
766
767 void
768 uvm_km_putpage(void *v)
769 {
770
771 uvm_km_free_poolpage1(NULL, (vaddr_t)v);
772 }
773
774 #else
775
776
777
778
779
780
781
782
783
784
785
786 int uvm_km_pages_lowat;
787 int uvm_km_pages_free;
788 struct km_page {
789 struct km_page *next;
790 } *uvm_km_pages_head;
791
792 void uvm_km_createthread(void *);
793 void uvm_km_thread(void *);
794
795
796
797
798
799
800
801 void
802 uvm_km_page_init(void)
803 {
804 struct km_page *page;
805 int i;
806
807 if (!uvm_km_pages_lowat) {
808
809 uvm_km_pages_lowat = physmem / 256;
810 if (uvm_km_pages_lowat > 2048)
811 uvm_km_pages_lowat = 2048;
812 if (uvm_km_pages_lowat < 128)
813 uvm_km_pages_lowat = 128;
814 }
815
816 for (i = 0; i < uvm_km_pages_lowat * 4; i++) {
817 page = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE);
818 page->next = uvm_km_pages_head;
819 uvm_km_pages_head = page;
820 }
821 uvm_km_pages_free = i;
822
823
824 if (uvm_km_pages_lowat > 512)
825 uvm_km_pages_lowat = 512;
826
827 kthread_create_deferred(uvm_km_createthread, NULL);
828 }
829
830 void
831 uvm_km_createthread(void *arg)
832 {
833 kthread_create(uvm_km_thread, NULL, NULL, "kmthread");
834 }
835
836
837
838
839
840
841
842 void
843 uvm_km_thread(void *arg)
844 {
845 struct km_page *head, *tail, *page;
846 int i, s, want;
847
848 for (i = want = 16; ; ) {
849 if (i < want || uvm_km_pages_free >= uvm_km_pages_lowat)
850 tsleep(&uvm_km_pages_head, PVM, "kmalloc", 0);
851 for (i = 0; i < want; i++) {
852 page = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE);
853 if (i == 0)
854 head = tail = page;
855 if (page == NULL)
856 break;
857 page->next = head;
858 head = page;
859 }
860 if (head != NULL) {
861 s = splvm();
862 tail->next = uvm_km_pages_head;
863 uvm_km_pages_head = head;
864 uvm_km_pages_free += i;
865 splx(s);
866 }
867 if (uvm_km_pages_free)
868 wakeup(&uvm_km_pages_free);
869 }
870 }
871
872
873
874
875
876
877 void *
878 uvm_km_getpage(boolean_t waitok)
879 {
880 struct km_page *page = NULL;
881 int s;
882
883 s = splvm();
884 for (;;) {
885 page = uvm_km_pages_head;
886 if (page) {
887 uvm_km_pages_head = page->next;
888 uvm_km_pages_free--;
889 break;
890 }
891 if (!waitok)
892 break;
893 tsleep(&uvm_km_pages_free, PVM, "getpage", 0);
894 }
895 splx(s);
896 if (uvm_km_pages_free < uvm_km_pages_lowat)
897 wakeup(&uvm_km_pages_head);
898 return (page);
899 }
900
901 void
902 uvm_km_putpage(void *v)
903 {
904 struct km_page *page = v;
905 int s;
906
907 s = splvm();
908 page->next = uvm_km_pages_head;
909 uvm_km_pages_head = page;
910 uvm_km_pages_free++;
911 splx(s);
912 }
913 #endif