This source file includes following definitions.
- TAILQ_HEAD
- lkmopen
- lkmalloc
- lkmfree
- lkm_list
- lkmlookup
- lkmunreserve
- lkmclose
- lkmioctl
- sys_lkmnosys
- lkmenodev
- lkm_nofunc
- lkmexists
- _lkm_syscall
- _lkm_vfs
- _lkm_dev
- _lkm_exec
- lkmdispatch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/ioctl.h>
47 #include <sys/tty.h>
48 #include <sys/file.h>
49 #include <sys/proc.h>
50 #include <sys/uio.h>
51 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/malloc.h>
54 #include <sys/mount.h>
55 #include <sys/exec.h>
56 #include <sys/syscallargs.h>
57 #include <sys/conf.h>
58
59 #include <sys/lkm.h>
60 #include <sys/syscall.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #ifdef DDB
65 #include <machine/db_machdep.h>
66 #include <ddb/db_sym.h>
67 #endif
68
69
70 #define LKM_ALLOC 0x01
71 #define LKM_WANT 0x02
72 #define LKM_INIT 0x04
73
74 #define LKMS_IDLE 0x00
75 #define LKMS_RESERVED 0x01
76 #define LKMS_LOADING 0x02
77 #define LKMS_LOADING_SYMS 0x03
78 #define LKMS_LOADED 0x04
79 #define LKMS_UNLOADING 0x08
80
81 static int lkm_v = 0;
82 static int lkm_state = LKMS_IDLE;
83
84 static TAILQ_HEAD(lkmods, lkm_table) lkmods;
85 static struct lkm_table *curp;
86
87 static struct lkm_table *lkmalloc(void);
88 static void lkmfree(struct lkm_table *);
89 static struct lkm_table *lkmlookup(int, char *, int *);
90 static void lkmunreserve(void);
91 static int _lkm_syscall(struct lkm_table *, int);
92 static int _lkm_vfs(struct lkm_table *, int);
93 static int _lkm_dev(struct lkm_table *, int);
94 static int _lkm_exec(struct lkm_table *, int);
95
96 void lkminit(void);
97 int lkmexists(struct lkm_table *);
98
99 void init_exec(void);
100
101 void
102 lkminit(void)
103 {
104
105 TAILQ_INIT(&lkmods);
106 lkm_v |= LKM_INIT;
107 }
108
109
110 int
111 lkmopen(dev_t dev, int flag, int devtype, struct proc *p)
112 {
113 int error;
114
115 if (minor(dev) != 0)
116 return (ENXIO);
117
118 if (!(lkm_v & LKM_INIT))
119 lkminit();
120
121
122
123
124
125
126 while (lkm_v & LKM_ALLOC) {
127 if (flag & FNONBLOCK)
128 return (EBUSY);
129 lkm_v |= LKM_WANT;
130
131
132
133
134 error = tsleep(&lkm_v, TTIPRI|PCATCH, "lkmopn", 0);
135 if (error)
136 return (error);
137 }
138 lkm_v |= LKM_ALLOC;
139
140 return (0);
141 }
142
143
144
145
146
147
148 static struct lkm_table *
149 lkmalloc(void)
150 {
151 struct lkm_table *p, *ret = NULL;
152 int id = 0;
153
154 MALLOC(ret, struct lkm_table *, sizeof(*ret), M_DEVBUF, M_WAITOK);
155 ret->refcnt = ret->depcnt = 0;
156 ret->sym_id = -1;
157
158
159
160
161
162 TAILQ_FOREACH(p, &lkmods, list) {
163 if (id == p->id)
164 id++;
165 else
166 break;
167 }
168 ret->id = id;
169 if (p == NULL)
170 TAILQ_INSERT_TAIL(&lkmods, ret, list);
171 else
172 TAILQ_INSERT_BEFORE(p, ret, list);
173
174 return ret;
175 }
176
177
178
179
180 static void
181 lkmfree(struct lkm_table *p)
182 {
183
184 TAILQ_REMOVE(&lkmods, p, list);
185 free(p, M_DEVBUF);
186 curp = NULL;
187 }
188
189 struct lkm_table *
190 lkm_list(struct lkm_table *p)
191 {
192
193 if (p == NULL)
194 p = TAILQ_FIRST(&lkmods);
195 else
196 p = TAILQ_NEXT(p, list);
197
198 return (p);
199 }
200
201 static struct lkm_table *
202 lkmlookup(int i, char *name, int *error)
203 {
204 struct lkm_table *p = NULL;
205 char istr[MAXLKMNAME];
206
207
208
209
210
211
212
213 p = TAILQ_LAST(&lkmods, lkmods);
214 if (p == NULL || i > p->id) {
215 *error = EINVAL;
216 return NULL;
217 }
218
219 if (i < 0) {
220
221
222
223
224 *error = copyinstr(name, istr, MAXLKMNAME-1, NULL);
225 if (*error)
226 return NULL;
227 istr[MAXLKMNAME-1] = '\0';
228
229 TAILQ_FOREACH(p, &lkmods, list) {
230 if (!strcmp(istr, p->private.lkm_any->lkm_name))
231 break;
232 }
233 } else
234 TAILQ_FOREACH(p, &lkmods, list)
235 if (i == p->id)
236 break;
237
238 if (p == NULL)
239 *error = ENOENT;
240
241 return p;
242 }
243
244
245
246
247
248
249 static void
250 lkmunreserve(void)
251 {
252
253 if (lkm_state == LKMS_IDLE)
254 return;
255
256 #ifdef DDB
257 if (curp && curp->sym_id != -1)
258 db_del_symbol_table(curp->private.lkm_any->lkm_name);
259 #endif
260
261 if (curp && curp->syms) {
262 uvm_km_free(kernel_map, (vaddr_t)curp->syms, curp->sym_size);
263 curp->syms = NULL;
264 }
265
266
267
268
269 if (curp && curp->area) {
270 uvm_km_free(kernel_map, curp->area, curp->size);
271 curp->area = 0;
272 }
273 lkm_state = LKMS_IDLE;
274 }
275
276 int
277 lkmclose(dev_t dev, int flag, int mode, struct proc *p)
278 {
279
280 if (minor(dev) != 0)
281 return (ENXIO);
282
283 if (!(lkm_v & LKM_ALLOC))
284 return (EBADF);
285
286
287 if (curp != NULL && !curp->refcnt) {
288
289
290
291
292
293 lkmunreserve();
294 lkmfree(curp);
295 }
296 lkm_v &= ~LKM_ALLOC;
297 wakeup(&lkm_v);
298
299 return (0);
300 }
301
302
303 int
304 lkmioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p)
305 {
306 int error = 0;
307
308 if (securelevel > 0) {
309 switch (cmd) {
310 case LMSTAT:
311 break;
312 default:
313 return (EPERM);
314 }
315 }
316
317 if (!(flags & FWRITE)) {
318 switch (cmd) {
319 case LMSTAT:
320 break;
321 default:
322 return (EACCES);
323 }
324 }
325
326 switch (cmd) {
327 case LMRESERV:
328 case LMRESERV_O: {
329 struct lmc_resrv *resrvp = (struct lmc_resrv *)data;
330
331 if ((curp = lkmalloc()) == NULL) {
332 error = ENOMEM;
333 break;
334 }
335 curp->ver = (cmd == LMRESERV) ? LKM_VERSION : LKM_OLDVERSION;
336 resrvp->slot = curp->id;
337
338
339
340
341 curp->size = resrvp->size;
342 curp->area = uvm_km_zalloc(kernel_map, curp->size);
343 curp->offset = 0;
344 resrvp->addr = curp->area;
345
346 if (cmd == LMRESERV && resrvp->sym_size) {
347 curp->sym_size = resrvp->sym_size;
348 curp->sym_symsize = resrvp->sym_symsize;
349 curp->syms = (caddr_t)uvm_km_zalloc(kernel_map,
350 curp->sym_size);
351 curp->sym_offset = 0;
352 resrvp->sym_addr = curp->syms;
353 } else {
354 curp->sym_size = 0;
355 curp->syms = 0;
356 curp->sym_offset = 0;
357 if (cmd == LMRESERV)
358 resrvp->sym_addr = 0;
359 }
360 #ifdef LKM_DEBUG
361 printf("LKM: LMRESERV (actual = 0x%08lx)\n", curp->area);
362 printf("LKM: LMRESERV (syms = 0x%08x)\n", curp->syms);
363 printf("LKM: LMRESERV (adjusted = 0x%08lx)\n",
364 trunc_page(curp->area));
365 #endif
366
367 lkm_state = LKMS_RESERVED;
368 break;
369 }
370
371 case LMLOADBUF: {
372 struct lmc_loadbuf *loadbufp = (struct lmc_loadbuf *)data;
373
374 if ((lkm_state != LKMS_RESERVED && lkm_state != LKMS_LOADING)
375 || loadbufp->cnt < 0
376 || loadbufp->cnt > MODIOBUF
377 || loadbufp->cnt > (curp->size - curp->offset)) {
378 error = ENOMEM;
379 break;
380 }
381
382
383 error = copyin(loadbufp->data,
384 (caddr_t)curp->area + curp->offset, loadbufp->cnt);
385 if (error)
386 break;
387
388 if ((curp->offset + loadbufp->cnt) < curp->size)
389 lkm_state = LKMS_LOADING;
390 else
391 lkm_state = LKMS_LOADING_SYMS;
392
393 curp->offset += loadbufp->cnt;
394 break;
395 }
396
397 case LMLOADSYMS: {
398 struct lmc_loadbuf *loadbufp = (struct lmc_loadbuf *)data;
399
400 if ((lkm_state != LKMS_LOADING &&
401 lkm_state != LKMS_LOADING_SYMS)
402 || loadbufp->cnt < 0
403 || loadbufp->cnt > MODIOBUF
404 || loadbufp->cnt > (curp->sym_size - curp->sym_offset)) {
405 error = ENOMEM;
406 break;
407 }
408
409
410 error = copyin(loadbufp->data, curp->syms +
411 curp->sym_offset, loadbufp->cnt);
412 if (error)
413 break;
414
415 if ((curp->sym_offset + loadbufp->cnt) < curp->sym_size)
416 lkm_state = LKMS_LOADING_SYMS;
417 else
418 lkm_state = LKMS_LOADED;
419
420 curp->sym_offset += loadbufp->cnt;
421 break;
422 }
423
424 case LMUNRESRV:
425 lkmunreserve();
426 if (curp)
427 lkmfree(curp);
428 break;
429
430 case LMREADY:
431 switch (lkm_state) {
432 case LKMS_LOADED:
433 break;
434 case LKMS_LOADING:
435 case LKMS_LOADING_SYMS:
436 if ((curp->size - curp->offset) > 0)
437
438 bzero((caddr_t)curp->area + curp->offset,
439 curp->size - curp->offset);
440 break;
441 default:
442 return (ENXIO);
443 }
444
445 curp->entry = (int (*)(struct lkm_table *, int, int))
446 (*((long *) (data)));
447
448 #ifdef LKM_DEBUG
449 printf("LKM: call entrypoint %x\n", curp->entry);
450 #endif
451
452
453 error = (*(curp->entry))(curp, LKM_E_LOAD, curp->ver);
454 if (error) {
455
456
457
458
459 lkm_state = LKMS_UNLOADING;
460 lkmunreserve();
461 lkmfree(curp);
462 break;
463 }
464
465 #ifdef LKM_DEBUG
466 printf("LKM: LMREADY, id=%d, dev=%d\n", curp->id,
467 curp->private.lkm_any->lkm_offset);
468 #endif
469
470 #ifdef DDB
471 if (curp->syms && curp->sym_offset >= curp->sym_size) {
472 curp->sym_id = db_add_symbol_table(curp->syms,
473 curp->syms + curp->sym_symsize,
474 curp->private.lkm_any->lkm_name,
475 curp->syms);
476 printf("DDB symbols added: %ld bytes\n",
477 curp->sym_symsize);
478 }
479 #endif
480
481 curp->refcnt++;
482 lkm_state = LKMS_IDLE;
483 break;
484
485 case LMUNLOAD: {
486 struct lmc_unload *unloadp = (struct lmc_unload *)data;
487
488 curp = lkmlookup(unloadp->id, unloadp->name, &error);
489 if (curp == NULL)
490 break;
491
492
493 if ((*(curp->entry))(curp, LKM_E_UNLOAD, curp->ver)) {
494 error = EBUSY;
495 break;
496 }
497
498 lkm_state = LKMS_UNLOADING;
499 lkmunreserve();
500 lkmfree(curp);
501 break;
502 }
503
504 case LMSTAT: {
505 struct lmc_stat *statp = (struct lmc_stat *)data;
506
507 if ((curp = lkmlookup(statp->id, statp->name, &error)) == NULL)
508 break;
509
510 if ((error = (*curp->entry)(curp, LKM_E_STAT, curp->ver)))
511 break;
512
513
514
515
516 statp->id = curp->id;
517 statp->offset = curp->private.lkm_any->lkm_offset;
518 statp->type = curp->private.lkm_any->lkm_type;
519 statp->area = curp->area;
520 statp->size = curp->size / PAGE_SIZE;
521 statp->private = (unsigned long)curp->private.lkm_any;
522 statp->ver = curp->private.lkm_any->lkm_ver;
523 copyoutstr(curp->private.lkm_any->lkm_name,
524 statp->name, MAXLKMNAME, NULL);
525
526 break;
527 }
528
529 default:
530 error = ENODEV;
531 break;
532 }
533
534 return (error);
535 }
536
537
538
539
540
541
542
543 int
544 sys_lkmnosys(struct proc *p, void *v, register_t *retval)
545 {
546
547 return (sys_nosys(p, v, retval));
548 }
549
550
551
552
553
554
555
556
557 int
558 lkmenodev(void)
559 {
560
561 return (enodev());
562 }
563
564
565
566
567
568 int
569 lkm_nofunc(struct lkm_table *lkmtp, int cmd)
570 {
571
572 return (0);
573 }
574
575 int
576 lkmexists(struct lkm_table *lkmtp)
577 {
578 struct lkm_table *p;
579
580 TAILQ_FOREACH(p, &lkmods, list) {
581 if (!strcmp(lkmtp->private.lkm_any->lkm_name,
582 p->private.lkm_any->lkm_name) && p->refcnt)
583 return (1);
584 }
585 return (0);
586 }
587
588
589
590
591
592 static int
593 _lkm_syscall(struct lkm_table *lkmtp, int cmd)
594 {
595 struct lkm_syscall *args = lkmtp->private.lkm_syscall;
596 int i;
597 int error = 0;
598
599 switch (cmd) {
600 case LKM_E_LOAD:
601
602 if (lkmexists(lkmtp))
603 return (EEXIST);
604
605 if ((i = args->lkm_offset) == -1) {
606
607
608
609 for (i = 0; i < SYS_MAXSYSCALL; i++)
610 if (sysent[i].sy_call == sys_lkmnosys)
611 break;
612
613 if (i == SYS_MAXSYSCALL) {
614 error = ENFILE;
615 break;
616 }
617 } else {
618 if (i < 0 || i >= SYS_MAXSYSCALL) {
619 error = EINVAL;
620 break;
621 }
622 }
623
624
625 bcopy(&sysent[i], &args->lkm_oldent, sizeof(struct sysent));
626
627
628 bcopy(args->lkm_sysent, &sysent[i], sizeof(struct sysent));
629
630
631 args->lkm_offset = i;
632
633 break;
634
635 case LKM_E_UNLOAD:
636
637 i = args->lkm_offset;
638
639
640 bcopy(&args->lkm_oldent, &sysent[i], sizeof(struct sysent));
641
642 break;
643
644 case LKM_E_STAT:
645 break;
646 }
647
648 return (error);
649 }
650
651
652
653
654
655 static int
656 _lkm_vfs(struct lkm_table *lkmtp, int cmd)
657 {
658 int error = 0;
659 struct lkm_vfs *args = lkmtp->private.lkm_vfs;
660
661 switch (cmd) {
662 case LKM_E_LOAD:
663
664 if (lkmexists(lkmtp))
665 return (EEXIST);
666 error = vfs_register(args->lkm_vfsconf);
667 break;
668
669 case LKM_E_UNLOAD:
670 error = vfs_unregister(args->lkm_vfsconf);
671 break;
672
673 case LKM_E_STAT:
674 break;
675 }
676
677 return (error);
678 }
679
680
681
682
683
684 static int
685 _lkm_dev(struct lkm_table *lkmtp, int cmd)
686 {
687 struct lkm_dev *args = lkmtp->private.lkm_dev;
688 int i;
689 int error = 0;
690
691 switch (cmd) {
692 case LKM_E_LOAD:
693
694 if (lkmexists(lkmtp))
695 return (EEXIST);
696
697 switch (args->lkm_devtype) {
698 case LM_DT_BLOCK:
699 if ((i = args->lkm_offset) == -1) {
700
701
702
703 for (i = 0; i < nblkdev; i++)
704 if (bdevsw[i].d_open ==
705 (dev_type_open((*))) lkmenodev)
706 break;
707
708 if (i == nblkdev) {
709 error = ENFILE;
710 break;
711 }
712 } else {
713 if (i < 0 || i >= nblkdev) {
714 error = EINVAL;
715 break;
716 }
717 }
718
719
720 bcopy(&bdevsw[i], &args->lkm_olddev.bdev,
721 sizeof(struct bdevsw));
722
723
724 bcopy(args->lkm_dev.bdev, &bdevsw[i],
725 sizeof(struct bdevsw));
726
727
728 args->lkm_offset = i;
729 break;
730
731 case LM_DT_CHAR:
732 if ((i = args->lkm_offset) == -1) {
733
734
735
736 for (i = 0; i < nchrdev; i++)
737 if (cdevsw[i].d_open ==
738 (dev_type_open((*))) lkmenodev)
739 break;
740
741 if (i == nchrdev) {
742 error = ENFILE;
743 break;
744 }
745 } else {
746 if (i < 0 || i >= nchrdev) {
747 error = EINVAL;
748 break;
749 }
750 }
751
752
753 bcopy(&cdevsw[i], &args->lkm_olddev.cdev,
754 sizeof(struct cdevsw));
755
756
757 bcopy(args->lkm_dev.cdev, &cdevsw[i],
758 sizeof(struct cdevsw));
759
760
761 args->lkm_offset = i;
762
763 break;
764
765 default:
766 error = ENODEV;
767 break;
768 }
769 break;
770
771 case LKM_E_UNLOAD:
772
773 i = args->lkm_offset;
774
775 switch (args->lkm_devtype) {
776 case LM_DT_BLOCK:
777
778 bcopy(&args->lkm_olddev.bdev, &bdevsw[i],
779 sizeof(struct bdevsw));
780 break;
781
782 case LM_DT_CHAR:
783
784 bcopy(&args->lkm_olddev.cdev, &cdevsw[i],
785 sizeof(struct cdevsw));
786 break;
787
788 default:
789 error = ENODEV;
790 break;
791 }
792 break;
793
794 case LKM_E_STAT:
795 break;
796 }
797
798 return (error);
799 }
800
801
802
803
804
805 static int
806 _lkm_exec(struct lkm_table *lkmtp, int cmd)
807 {
808 struct lkm_exec *args = lkmtp->private.lkm_exec;
809 int i;
810 int error = 0;
811
812 switch (cmd) {
813 case LKM_E_LOAD:
814
815 if (lkmexists(lkmtp))
816 return (EEXIST);
817
818 if ((i = args->lkm_offset) == -1) {
819
820
821
822 for (i = 0; i < nexecs; i++)
823 if (execsw[i].es_check == NULL)
824 break;
825
826 if (i == nexecs) {
827 error = ENFILE;
828 break;
829 }
830 } else {
831 if (i < 0 || i >= nexecs) {
832 error = EINVAL;
833 break;
834 }
835 }
836
837
838 bcopy(&execsw[i], &args->lkm_oldexec, sizeof(struct execsw));
839
840
841 bcopy(args->lkm_exec, &execsw[i], sizeof(struct execsw));
842
843
844 init_exec();
845
846
847 args->lkm_offset = i;
848
849 break;
850
851 case LKM_E_UNLOAD:
852
853 i = args->lkm_offset;
854
855
856 bcopy(&args->lkm_oldexec, &execsw[i], sizeof(struct execsw));
857
858
859 init_exec();
860
861 break;
862
863 case LKM_E_STAT:
864 break;
865 }
866
867 return (error);
868 }
869
870
871
872
873
874
875
876 int
877 lkmdispatch(struct lkm_table *lkmtp, int cmd)
878 {
879 int error = 0;
880
881 #ifdef LKM_DEBUG
882 printf("lkmdispatch: %x %d\n", lkmtp, cmd);
883 #endif
884
885 switch (lkmtp->private.lkm_any->lkm_type) {
886 case LM_SYSCALL:
887 error = _lkm_syscall(lkmtp, cmd);
888 break;
889
890 case LM_VFS:
891 error = _lkm_vfs(lkmtp, cmd);
892 break;
893
894 case LM_DEV:
895 error = _lkm_dev(lkmtp, cmd);
896 break;
897
898 case LM_EXEC:
899 error = _lkm_exec(lkmtp, cmd);
900 break;
901
902 case LM_MISC:
903 break;
904
905 default:
906 error = ENXIO;
907 break;
908 }
909
910 return (error);
911 }