1 /* $OpenBSD: kern_lkm.c,v 1.43 2006/11/15 17:25:40 jmc Exp $ */
2 /* $NetBSD: kern_lkm.c,v 1.31 1996/03/31 21:40:27 christos Exp $ */
3
4 /*
5 * Copyright (c) 1994 Christopher G. Demetriou
6 * Copyright (c) 1992 Terrence R. Lambert.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Terrence R. Lambert.
20 * 4. The name Terrence R. Lambert may not be used to endorse or promote
21 * products derived from this software without specific prior written
22 * permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY TERRENCE R. LAMBERT ``AS IS'' AND ANY
25 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE TERRENCE R. LAMBERT BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 /*
38 * XXX it's not really safe to unload *any* of the types which are
39 * currently loadable; e.g. you could unload a syscall which was being
40 * blocked in, etc. In the long term, a solution should be come up
41 * with, but "not right now." -- cgd
42 */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/ioctl.h>
47 #include <sys/tty.h>
48 #include <sys/file.h>
49 #include <sys/proc.h>
50 #include <sys/uio.h>
51 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/malloc.h>
54 #include <sys/mount.h>
55 #include <sys/exec.h>
56 #include <sys/syscallargs.h>
57 #include <sys/conf.h>
58
59 #include <sys/lkm.h>
60 #include <sys/syscall.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #ifdef DDB
65 #include <machine/db_machdep.h>
66 #include <ddb/db_sym.h>
67 #endif
68
69 /* flags */
70 #define LKM_ALLOC 0x01
71 #define LKM_WANT 0x02
72 #define LKM_INIT 0x04
73
74 #define LKMS_IDLE 0x00
75 #define LKMS_RESERVED 0x01
76 #define LKMS_LOADING 0x02
77 #define LKMS_LOADING_SYMS 0x03
78 #define LKMS_LOADED 0x04
79 #define LKMS_UNLOADING 0x08
80
81 static int lkm_v = 0;
82 static int lkm_state = LKMS_IDLE;
83
84 static TAILQ_HEAD(lkmods, lkm_table) lkmods; /* table of loaded modules */
85 static struct lkm_table *curp; /* global for in-progress ops */
86
87 static struct lkm_table *lkmalloc(void);
88 static void lkmfree(struct lkm_table *);
89 static struct lkm_table *lkmlookup(int, char *, int *);
90 static void lkmunreserve(void);
91 static int _lkm_syscall(struct lkm_table *, int);
92 static int _lkm_vfs(struct lkm_table *, int);
93 static int _lkm_dev(struct lkm_table *, int);
94 static int _lkm_exec(struct lkm_table *, int);
95
96 void lkminit(void);
97 int lkmexists(struct lkm_table *);
98
99 void init_exec(void);
100
101 void
102 lkminit(void)
103 {
104
105 TAILQ_INIT(&lkmods);
106 lkm_v |= LKM_INIT;
107 }
108
109 /*ARGSUSED*/
110 int
111 lkmopen(dev_t dev, int flag, int devtype, struct proc *p)
112 {
113 int error;
114
115 if (minor(dev) != 0)
116 return (ENXIO);
117
118 if (!(lkm_v & LKM_INIT))
119 lkminit();
120
121 /*
122 * Use of the loadable kernel module device must be exclusive; we
123 * may try to remove this restriction later, but it's really no
124 * hardship.
125 */
126 while (lkm_v & LKM_ALLOC) {
127 if (flag & FNONBLOCK) /* don't hang */
128 return (EBUSY);
129 lkm_v |= LKM_WANT;
130 /*
131 * Sleep pending unlock; we use tsleep() to allow
132 * an alarm out of the open.
133 */
134 error = tsleep(&lkm_v, TTIPRI|PCATCH, "lkmopn", 0);
135 if (error)
136 return (error); /* leave LKM_WANT set -- no problem */
137 }
138 lkm_v |= LKM_ALLOC;
139
140 return (0); /* pseudo-device open */
141 }
142
143 /*
144 * Alocates new LKM table entry, fills module id, inserts in the list.
145 * Returns NULL on failure.
146 *
147 */
148 static struct lkm_table *
149 lkmalloc(void)
150 {
151 struct lkm_table *p, *ret = NULL;
152 int id = 0;
153
154 MALLOC(ret, struct lkm_table *, sizeof(*ret), M_DEVBUF, M_WAITOK);
155 ret->refcnt = ret->depcnt = 0;
156 ret->sym_id = -1;
157 /*
158 * walk the list finding the first free id. as long as the list is
159 * kept sorted this is not too inefficient, which is why we insert in
160 * order below.
161 */
162 TAILQ_FOREACH(p, &lkmods, list) {
163 if (id == p->id)
164 id++;
165 else
166 break;
167 }
168 ret->id = id;
169 if (p == NULL) /* either first or last entry */
170 TAILQ_INSERT_TAIL(&lkmods, ret, list);
171 else
172 TAILQ_INSERT_BEFORE(p, ret, list);
173
174 return ret;
175 }
176
177 /*
178 * Frees the slot, decreases the number of modules.
179 */
180 static void
181 lkmfree(struct lkm_table *p)
182 {
183
184 TAILQ_REMOVE(&lkmods, p, list);
185 free(p, M_DEVBUF);
186 curp = NULL;
187 }
188
189 struct lkm_table *
190 lkm_list(struct lkm_table *p)
191 {
192
193 if (p == NULL)
194 p = TAILQ_FIRST(&lkmods);
195 else
196 p = TAILQ_NEXT(p, list);
197
198 return (p);
199 }
200
201 static struct lkm_table *
202 lkmlookup(int i, char *name, int *error)
203 {
204 struct lkm_table *p = NULL;
205 char istr[MAXLKMNAME];
206
207 /*
208 * p being NULL here implies the list is empty, so any lookup is
209 * invalid (name based or otherwise). Since the list of modules is
210 * kept sorted by id, lowest to highest, the id of the last entry
211 * will be the highest in use.
212 */
213 p = TAILQ_LAST(&lkmods, lkmods);
214 if (p == NULL || i > p->id) {
215 *error = EINVAL;
216 return NULL;
217 }
218
219 if (i < 0) { /* unload by name */
220 /*
221 * Copy name and lookup id from all loaded
222 * modules. May fail.
223 */
224 *error = copyinstr(name, istr, MAXLKMNAME-1, NULL);
225 if (*error)
226 return NULL;
227 istr[MAXLKMNAME-1] = '\0';
228
229 TAILQ_FOREACH(p, &lkmods, list) {
230 if (!strcmp(istr, p->private.lkm_any->lkm_name))
231 break;
232 }
233 } else
234 TAILQ_FOREACH(p, &lkmods, list)
235 if (i == p->id)
236 break;
237
238 if (p == NULL)
239 *error = ENOENT;
240
241 return p;
242 }
243
244 /*
245 * Unreserve the memory associated with the current loaded module; done on
246 * a coerced close of the lkm device (close on premature exit of modload)
247 * or explicitly by modload as a result of a link failure.
248 */
249 static void
250 lkmunreserve(void)
251 {
252
253 if (lkm_state == LKMS_IDLE)
254 return;
255
256 #ifdef DDB
257 if (curp && curp->sym_id != -1)
258 db_del_symbol_table(curp->private.lkm_any->lkm_name);
259 #endif
260
261 if (curp && curp->syms) {
262 uvm_km_free(kernel_map, (vaddr_t)curp->syms, curp->sym_size);
263 curp->syms = NULL;
264 }
265
266 /*
267 * Actually unreserve the memory
268 */
269 if (curp && curp->area) {
270 uvm_km_free(kernel_map, curp->area, curp->size);
271 curp->area = 0;
272 }
273 lkm_state = LKMS_IDLE;
274 }
275
276 int
277 lkmclose(dev_t dev, int flag, int mode, struct proc *p)
278 {
279
280 if (minor(dev) != 0)
281 return (ENXIO);
282
283 if (!(lkm_v & LKM_ALLOC))
284 return (EBADF);
285
286 /* do this before waking the herd... */
287 if (curp != NULL && !curp->refcnt) {
288 /*
289 * If we close before setting used, we have aborted
290 * by way of error or by way of close-on-exit from
291 * a premature exit of "modload".
292 */
293 lkmunreserve();
294 lkmfree(curp);
295 }
296 lkm_v &= ~LKM_ALLOC;
297 wakeup(&lkm_v); /* thundering herd "problem" here */
298
299 return (0);
300 }
301
302 /*ARGSUSED*/
303 int
304 lkmioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p)
305 {
306 int error = 0;
307
308 if (securelevel > 0) {
309 switch (cmd) {
310 case LMSTAT:
311 break;
312 default:
313 return (EPERM);
314 }
315 }
316
317 if (!(flags & FWRITE)) {
318 switch (cmd) {
319 case LMSTAT:
320 break;
321 default:
322 return (EACCES);
323 }
324 }
325
326 switch (cmd) {
327 case LMRESERV:
328 case LMRESERV_O: {
329 struct lmc_resrv *resrvp = (struct lmc_resrv *)data;
330
331 if ((curp = lkmalloc()) == NULL) {
332 error = ENOMEM;
333 break;
334 }
335 curp->ver = (cmd == LMRESERV) ? LKM_VERSION : LKM_OLDVERSION;
336 resrvp->slot = curp->id; /* return slot */
337
338 /*
339 * Get memory for module
340 */
341 curp->size = resrvp->size;
342 curp->area = uvm_km_zalloc(kernel_map, curp->size);
343 curp->offset = 0;
344 resrvp->addr = curp->area;
345
346 if (cmd == LMRESERV && resrvp->sym_size) {
347 curp->sym_size = resrvp->sym_size;
348 curp->sym_symsize = resrvp->sym_symsize;
349 curp->syms = (caddr_t)uvm_km_zalloc(kernel_map,
350 curp->sym_size);
351 curp->sym_offset = 0;
352 resrvp->sym_addr = curp->syms;
353 } else {
354 curp->sym_size = 0;
355 curp->syms = 0;
356 curp->sym_offset = 0;
357 if (cmd == LMRESERV)
358 resrvp->sym_addr = 0;
359 }
360 #ifdef LKM_DEBUG
361 printf("LKM: LMRESERV (actual = 0x%08lx)\n", curp->area);
362 printf("LKM: LMRESERV (syms = 0x%08x)\n", curp->syms);
363 printf("LKM: LMRESERV (adjusted = 0x%08lx)\n",
364 trunc_page(curp->area));
365 #endif /* LKM_DEBUG */
366
367 lkm_state = LKMS_RESERVED;
368 break;
369 }
370
371 case LMLOADBUF: {
372 struct lmc_loadbuf *loadbufp = (struct lmc_loadbuf *)data;
373
374 if ((lkm_state != LKMS_RESERVED && lkm_state != LKMS_LOADING)
375 || loadbufp->cnt < 0
376 || loadbufp->cnt > MODIOBUF
377 || loadbufp->cnt > (curp->size - curp->offset)) {
378 error = ENOMEM;
379 break;
380 }
381
382 /* copy in buffer full of data */
383 error = copyin(loadbufp->data,
384 (caddr_t)curp->area + curp->offset, loadbufp->cnt);
385 if (error)
386 break;
387
388 if ((curp->offset + loadbufp->cnt) < curp->size)
389 lkm_state = LKMS_LOADING;
390 else
391 lkm_state = LKMS_LOADING_SYMS;
392
393 curp->offset += loadbufp->cnt;
394 break;
395 }
396
397 case LMLOADSYMS: {
398 struct lmc_loadbuf *loadbufp = (struct lmc_loadbuf *)data;
399
400 if ((lkm_state != LKMS_LOADING &&
401 lkm_state != LKMS_LOADING_SYMS)
402 || loadbufp->cnt < 0
403 || loadbufp->cnt > MODIOBUF
404 || loadbufp->cnt > (curp->sym_size - curp->sym_offset)) {
405 error = ENOMEM;
406 break;
407 }
408
409 /* copy in buffer full of data*/
410 error = copyin(loadbufp->data, curp->syms +
411 curp->sym_offset, loadbufp->cnt);
412 if (error)
413 break;
414
415 if ((curp->sym_offset + loadbufp->cnt) < curp->sym_size)
416 lkm_state = LKMS_LOADING_SYMS;
417 else
418 lkm_state = LKMS_LOADED;
419
420 curp->sym_offset += loadbufp->cnt;
421 break;
422 }
423
424 case LMUNRESRV:
425 lkmunreserve();
426 if (curp)
427 lkmfree(curp);
428 break;
429
430 case LMREADY:
431 switch (lkm_state) {
432 case LKMS_LOADED:
433 break;
434 case LKMS_LOADING:
435 case LKMS_LOADING_SYMS:
436 if ((curp->size - curp->offset) > 0)
437 /* The remainder must be bss, so we clear it */
438 bzero((caddr_t)curp->area + curp->offset,
439 curp->size - curp->offset);
440 break;
441 default:
442 return (ENXIO);
443 }
444
445 curp->entry = (int (*)(struct lkm_table *, int, int))
446 (*((long *) (data)));
447
448 #ifdef LKM_DEBUG
449 printf("LKM: call entrypoint %x\n", curp->entry);
450 #endif /* LKM_DEBUG */
451
452 /* call entry(load)... (assigns "private" portion) */
453 error = (*(curp->entry))(curp, LKM_E_LOAD, curp->ver);
454 if (error) {
455 /*
456 * Module may refuse loading or may have a
457 * version mismatch...
458 */
459 lkm_state = LKMS_UNLOADING; /* for lkmunreserve */
460 lkmunreserve(); /* free memory */
461 lkmfree(curp); /* free slot */
462 break;
463 }
464
465 #ifdef LKM_DEBUG
466 printf("LKM: LMREADY, id=%d, dev=%d\n", curp->id,
467 curp->private.lkm_any->lkm_offset);
468 #endif /* LKM_DEBUG */
469
470 #ifdef DDB
471 if (curp->syms && curp->sym_offset >= curp->sym_size) {
472 curp->sym_id = db_add_symbol_table(curp->syms,
473 curp->syms + curp->sym_symsize,
474 curp->private.lkm_any->lkm_name,
475 curp->syms);
476 printf("DDB symbols added: %ld bytes\n",
477 curp->sym_symsize);
478 }
479 #endif /* DDB */
480
481 curp->refcnt++;
482 lkm_state = LKMS_IDLE;
483 break;
484
485 case LMUNLOAD: {
486 struct lmc_unload *unloadp = (struct lmc_unload *)data;
487
488 curp = lkmlookup(unloadp->id, unloadp->name, &error);
489 if (curp == NULL)
490 break;
491
492 /* call entry(unload) */
493 if ((*(curp->entry))(curp, LKM_E_UNLOAD, curp->ver)) {
494 error = EBUSY;
495 break;
496 }
497
498 lkm_state = LKMS_UNLOADING; /* non-idle for lkmunreserve */
499 lkmunreserve(); /* free memory */
500 lkmfree(curp); /* free slot */
501 break;
502 }
503
504 case LMSTAT: {
505 struct lmc_stat *statp = (struct lmc_stat *)data;
506
507 if ((curp = lkmlookup(statp->id, statp->name, &error)) == NULL)
508 break;
509
510 if ((error = (*curp->entry)(curp, LKM_E_STAT, curp->ver)))
511 break;
512
513 /*
514 * Copy out stat information for this module...
515 */
516 statp->id = curp->id;
517 statp->offset = curp->private.lkm_any->lkm_offset;
518 statp->type = curp->private.lkm_any->lkm_type;
519 statp->area = curp->area;
520 statp->size = curp->size / PAGE_SIZE;
521 statp->private = (unsigned long)curp->private.lkm_any;
522 statp->ver = curp->private.lkm_any->lkm_ver;
523 copyoutstr(curp->private.lkm_any->lkm_name,
524 statp->name, MAXLKMNAME, NULL);
525
526 break;
527 }
528
529 default:
530 error = ENODEV;
531 break;
532 }
533
534 return (error);
535 }
536
537 /*
538 * Acts like "nosys" but can be identified in sysent for dynamic call
539 * number assignment for a limited number of calls.
540 *
541 * Place holder for system call slots reserved for loadable modules.
542 */
543 int
544 sys_lkmnosys(struct proc *p, void *v, register_t *retval)
545 {
546
547 return (sys_nosys(p, v, retval));
548 }
549
550 /*
551 * Acts like "enodev", but can be identified in cdevsw and bdevsw for
552 * dynamic driver major number assignment for a limited number of
553 * drivers.
554 *
555 * Place holder for device switch slots reserved for loadable modules.
556 */
557 int
558 lkmenodev(void)
559 {
560
561 return (enodev());
562 }
563
564 /*
565 * A placeholder function for load/unload/stat calls; simply returns zero.
566 * Used where people don't want to specify a special function.
567 */
568 int
569 lkm_nofunc(struct lkm_table *lkmtp, int cmd)
570 {
571
572 return (0);
573 }
574
575 int
576 lkmexists(struct lkm_table *lkmtp)
577 {
578 struct lkm_table *p;
579
580 TAILQ_FOREACH(p, &lkmods, list) {
581 if (!strcmp(lkmtp->private.lkm_any->lkm_name,
582 p->private.lkm_any->lkm_name) && p->refcnt)
583 return (1);
584 }
585 return (0);
586 }
587
588 /*
589 * For the loadable system call described by the structure pointed to
590 * by lkmtp, load/unload/stat it depending on the cmd requested.
591 */
592 static int
593 _lkm_syscall(struct lkm_table *lkmtp, int cmd)
594 {
595 struct lkm_syscall *args = lkmtp->private.lkm_syscall;
596 int i;
597 int error = 0;
598
599 switch (cmd) {
600 case LKM_E_LOAD:
601 /* don't load twice! */
602 if (lkmexists(lkmtp))
603 return (EEXIST);
604
605 if ((i = args->lkm_offset) == -1) { /* auto */
606 /*
607 * Search the table looking for a slot...
608 */
609 for (i = 0; i < SYS_MAXSYSCALL; i++)
610 if (sysent[i].sy_call == sys_lkmnosys)
611 break; /* found it! */
612 /* out of allocable slots? */
613 if (i == SYS_MAXSYSCALL) {
614 error = ENFILE;
615 break;
616 }
617 } else { /* assign */
618 if (i < 0 || i >= SYS_MAXSYSCALL) {
619 error = EINVAL;
620 break;
621 }
622 }
623
624 /* save old */
625 bcopy(&sysent[i], &args->lkm_oldent, sizeof(struct sysent));
626
627 /* replace with new */
628 bcopy(args->lkm_sysent, &sysent[i], sizeof(struct sysent));
629
630 /* done! */
631 args->lkm_offset = i; /* slot in sysent[] */
632
633 break;
634
635 case LKM_E_UNLOAD:
636 /* current slot... */
637 i = args->lkm_offset;
638
639 /* replace current slot contents with old contents */
640 bcopy(&args->lkm_oldent, &sysent[i], sizeof(struct sysent));
641
642 break;
643
644 case LKM_E_STAT: /* no special handling... */
645 break;
646 }
647
648 return (error);
649 }
650
651 /*
652 * For the loadable virtual file system described by the structure pointed
653 * to by lkmtp, load/unload/stat it depending on the cmd requested.
654 */
655 static int
656 _lkm_vfs(struct lkm_table *lkmtp, int cmd)
657 {
658 int error = 0;
659 struct lkm_vfs *args = lkmtp->private.lkm_vfs;
660
661 switch (cmd) {
662 case LKM_E_LOAD:
663 /* don't load twice! */
664 if (lkmexists(lkmtp))
665 return (EEXIST);
666 error = vfs_register(args->lkm_vfsconf);
667 break;
668
669 case LKM_E_UNLOAD:
670 error = vfs_unregister(args->lkm_vfsconf);
671 break;
672
673 case LKM_E_STAT: /* no special handling... */
674 break;
675 }
676
677 return (error);
678 }
679
680 /*
681 * For the loadable device driver described by the structure pointed to
682 * by lkmtp, load/unload/stat it depending on the cmd requested.
683 */
684 static int
685 _lkm_dev(struct lkm_table *lkmtp, int cmd)
686 {
687 struct lkm_dev *args = lkmtp->private.lkm_dev;
688 int i;
689 int error = 0;
690
691 switch (cmd) {
692 case LKM_E_LOAD:
693 /* don't load twice! */
694 if (lkmexists(lkmtp))
695 return (EEXIST);
696
697 switch (args->lkm_devtype) {
698 case LM_DT_BLOCK:
699 if ((i = args->lkm_offset) == -1) { /* auto */
700 /*
701 * Search the table looking for a slot...
702 */
703 for (i = 0; i < nblkdev; i++)
704 if (bdevsw[i].d_open ==
705 (dev_type_open((*))) lkmenodev)
706 break; /* found it! */
707 /* out of allocable slots? */
708 if (i == nblkdev) {
709 error = ENFILE;
710 break;
711 }
712 } else { /* assign */
713 if (i < 0 || i >= nblkdev) {
714 error = EINVAL;
715 break;
716 }
717 }
718
719 /* save old */
720 bcopy(&bdevsw[i], &args->lkm_olddev.bdev,
721 sizeof(struct bdevsw));
722
723 /* replace with new */
724 bcopy(args->lkm_dev.bdev, &bdevsw[i],
725 sizeof(struct bdevsw));
726
727 /* done! */
728 args->lkm_offset = i; /* slot in bdevsw[] */
729 break;
730
731 case LM_DT_CHAR:
732 if ((i = args->lkm_offset) == -1) { /* auto */
733 /*
734 * Search the table looking for a slot...
735 */
736 for (i = 0; i < nchrdev; i++)
737 if (cdevsw[i].d_open ==
738 (dev_type_open((*))) lkmenodev)
739 break; /* found it! */
740 /* out of allocable slots? */
741 if (i == nchrdev) {
742 error = ENFILE;
743 break;
744 }
745 } else { /* assign */
746 if (i < 0 || i >= nchrdev) {
747 error = EINVAL;
748 break;
749 }
750 }
751
752 /* save old */
753 bcopy(&cdevsw[i], &args->lkm_olddev.cdev,
754 sizeof(struct cdevsw));
755
756 /* replace with new */
757 bcopy(args->lkm_dev.cdev, &cdevsw[i],
758 sizeof(struct cdevsw));
759
760 /* done! */
761 args->lkm_offset = i; /* slot in cdevsw[] */
762
763 break;
764
765 default:
766 error = ENODEV;
767 break;
768 }
769 break;
770
771 case LKM_E_UNLOAD:
772 /* current slot... */
773 i = args->lkm_offset;
774
775 switch (args->lkm_devtype) {
776 case LM_DT_BLOCK:
777 /* replace current slot contents with old contents */
778 bcopy(&args->lkm_olddev.bdev, &bdevsw[i],
779 sizeof(struct bdevsw));
780 break;
781
782 case LM_DT_CHAR:
783 /* replace current slot contents with old contents */
784 bcopy(&args->lkm_olddev.cdev, &cdevsw[i],
785 sizeof(struct cdevsw));
786 break;
787
788 default:
789 error = ENODEV;
790 break;
791 }
792 break;
793
794 case LKM_E_STAT: /* no special handling... */
795 break;
796 }
797
798 return (error);
799 }
800
801 /*
802 * For the loadable execution class described by the structure pointed to
803 * by lkmtp, load/unload/stat it depending on the cmd requested.
804 */
805 static int
806 _lkm_exec(struct lkm_table *lkmtp, int cmd)
807 {
808 struct lkm_exec *args = lkmtp->private.lkm_exec;
809 int i;
810 int error = 0;
811
812 switch (cmd) {
813 case LKM_E_LOAD:
814 /* don't load twice! */
815 if (lkmexists(lkmtp))
816 return (EEXIST);
817
818 if ((i = args->lkm_offset) == -1) { /* auto */
819 /*
820 * Search the table looking for a slot...
821 */
822 for (i = 0; i < nexecs; i++)
823 if (execsw[i].es_check == NULL)
824 break; /* found it! */
825 /* out of allocable slots? */
826 if (i == nexecs) {
827 error = ENFILE;
828 break;
829 }
830 } else { /* assign */
831 if (i < 0 || i >= nexecs) {
832 error = EINVAL;
833 break;
834 }
835 }
836
837 /* save old */
838 bcopy(&execsw[i], &args->lkm_oldexec, sizeof(struct execsw));
839
840 /* replace with new */
841 bcopy(args->lkm_exec, &execsw[i], sizeof(struct execsw));
842
843 /* need to recompute max header size */
844 init_exec();
845
846 /* done! */
847 args->lkm_offset = i; /* slot in execsw[] */
848
849 break;
850
851 case LKM_E_UNLOAD:
852 /* current slot... */
853 i = args->lkm_offset;
854
855 /* replace current slot contents with old contents */
856 bcopy(&args->lkm_oldexec, &execsw[i], sizeof(struct execsw));
857
858 /* need to recompute max header size */
859 init_exec();
860
861 break;
862
863 case LKM_E_STAT: /* no special handling... */
864 break;
865 }
866
867 return (error);
868 }
869
870 /*
871 * This code handles the per-module type "wiring-in" of loadable modules
872 * into existing kernel tables. For "LM_MISC" modules, wiring and unwiring
873 * is assumed to be done in their entry routines internal to the module
874 * itself.
875 */
876 int
877 lkmdispatch(struct lkm_table *lkmtp, int cmd)
878 {
879 int error = 0;
880
881 #ifdef LKM_DEBUG
882 printf("lkmdispatch: %x %d\n", lkmtp, cmd);
883 #endif /* LKM_DEBUG */
884
885 switch (lkmtp->private.lkm_any->lkm_type) {
886 case LM_SYSCALL:
887 error = _lkm_syscall(lkmtp, cmd);
888 break;
889
890 case LM_VFS:
891 error = _lkm_vfs(lkmtp, cmd);
892 break;
893
894 case LM_DEV:
895 error = _lkm_dev(lkmtp, cmd);
896 break;
897
898 case LM_EXEC:
899 error = _lkm_exec(lkmtp, cmd);
900 break;
901
902 case LM_MISC: /* ignore content -- no "misc-specific" procedure */
903 break;
904
905 default:
906 error = ENXIO; /* unknown type */
907 break;
908 }
909
910 return (error);
911 }