This source file includes following definitions.
- spec_vnoperate
- spec_lookup
- spec_open
- spec_read
- spec_inactive
- spec_write
- spec_ioctl
- spec_poll
- spec_kqfilter
- spec_fsync
- spec_strategy
- spec_bmap
- spec_close
- spec_print
- spec_pathconf
- spec_advlock
- spec_ebadf
- spec_badop
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/conf.h>
40 #include <sys/buf.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/vnode.h>
44 #include <sys/stat.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/file.h>
48 #include <sys/disklabel.h>
49 #include <sys/lockf.h>
50 #include <sys/poll.h>
51
52 #include <miscfs/specfs/specdev.h>
53
54 #define v_lastr v_specinfo->si_lastr
55
56 struct vnode *speclisth[SPECHSZ];
57
58 int (**spec_vnodeop_p)(void *);
59 struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
60 { &vop_default_desc, vn_default_error },
61 { &vop_lookup_desc, spec_lookup },
62 { &vop_create_desc, spec_create },
63 { &vop_mknod_desc, spec_mknod },
64 { &vop_open_desc, spec_open },
65 { &vop_close_desc, spec_close },
66 { &vop_access_desc, spec_access },
67 { &vop_getattr_desc, spec_getattr },
68 { &vop_setattr_desc, spec_setattr },
69 { &vop_read_desc, spec_read },
70 { &vop_write_desc, spec_write },
71 { &vop_ioctl_desc, spec_ioctl },
72 { &vop_poll_desc, spec_poll },
73 { &vop_kqfilter_desc, spec_kqfilter },
74 { &vop_revoke_desc, spec_revoke },
75 { &vop_fsync_desc, spec_fsync },
76 { &vop_remove_desc, spec_remove },
77 { &vop_link_desc, spec_link },
78 { &vop_rename_desc, spec_rename },
79 { &vop_mkdir_desc, spec_mkdir },
80 { &vop_rmdir_desc, spec_rmdir },
81 { &vop_symlink_desc, spec_symlink },
82 { &vop_readdir_desc, spec_readdir },
83 { &vop_readlink_desc, spec_readlink },
84 { &vop_abortop_desc, spec_abortop },
85 { &vop_inactive_desc, spec_inactive },
86 { &vop_reclaim_desc, spec_reclaim },
87 { &vop_lock_desc, spec_lock },
88 { &vop_unlock_desc, spec_unlock },
89 { &vop_bmap_desc, spec_bmap },
90 { &vop_strategy_desc, spec_strategy },
91 { &vop_print_desc, spec_print },
92 { &vop_islocked_desc, spec_islocked },
93 { &vop_pathconf_desc, spec_pathconf },
94 { &vop_advlock_desc, spec_advlock },
95 { &vop_bwrite_desc, spec_bwrite },
96 { NULL, NULL }
97 };
98 struct vnodeopv_desc spec_vnodeop_opv_desc =
99 { &spec_vnodeop_p, spec_vnodeop_entries };
100
101 int
102 spec_vnoperate(void *v)
103 {
104 struct vop_generic_args *ap = v;
105
106 return (VOCALL(spec_vnodeop_p, ap->a_desc->vdesc_offset, ap));
107 }
108
109
110
111
112 int
113 spec_lookup(void *v)
114 {
115 struct vop_lookup_args *ap = v;
116
117 *ap->a_vpp = NULL;
118 return (ENOTDIR);
119 }
120
121
122
123
124
125 int
126 spec_open(void *v)
127 {
128 struct vop_open_args *ap = v;
129 struct proc *p = ap->a_p;
130 struct vnode *vp = ap->a_vp;
131 struct vnode *bvp;
132 dev_t bdev;
133 dev_t dev = (dev_t)vp->v_rdev;
134 int maj = major(dev);
135 int error;
136
137
138
139
140 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
141 return (ENXIO);
142
143 switch (vp->v_type) {
144
145 case VCHR:
146 if ((u_int)maj >= nchrdev)
147 return (ENXIO);
148 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) {
149
150
151
152
153 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK)
154 return (EPERM);
155
156
157
158
159
160
161 if (securelevel >= 1) {
162 if ((bdev = chrtoblk(dev)) != NODEV &&
163 vfinddev(bdev, VBLK, &bvp) &&
164 bvp->v_usecount > 0 &&
165 (error = vfs_mountedon(bvp)))
166 return (error);
167 if (iskmemdev(dev))
168 return (EPERM);
169 }
170 }
171 if (cdevsw[maj].d_type == D_TTY)
172 vp->v_flag |= VISTTY;
173 if (cdevsw[maj].d_flags & D_CLONE)
174 return (spec_open_clone(ap));
175 VOP_UNLOCK(vp, 0, p);
176 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p);
177 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
178 return (error);
179
180 case VBLK:
181 if ((u_int)maj >= nblkdev)
182 return (ENXIO);
183
184
185
186
187 if (securelevel >= 2 && ap->a_cred != FSCRED &&
188 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK)
189 return (EPERM);
190
191
192
193
194 if ((error = vfs_mountedon(vp)) != 0)
195 return (error);
196 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p));
197 case VNON:
198 case VLNK:
199 case VDIR:
200 case VREG:
201 case VBAD:
202 case VFIFO:
203 case VSOCK:
204 break;
205 }
206 return (0);
207 }
208
209
210
211
212
213 int
214 spec_read(void *v)
215 {
216 struct vop_read_args *ap = v;
217 struct vnode *vp = ap->a_vp;
218 struct uio *uio = ap->a_uio;
219 struct proc *p = uio->uio_procp;
220 struct buf *bp;
221 daddr64_t bn, nextbn, bscale;
222 int bsize;
223 struct partinfo dpart;
224 int n, on, majordev;
225 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *);
226 int error = 0;
227
228 #ifdef DIAGNOSTIC
229 if (uio->uio_rw != UIO_READ)
230 panic("spec_read mode");
231 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
232 panic("spec_read proc");
233 #endif
234 if (uio->uio_resid == 0)
235 return (0);
236
237 switch (vp->v_type) {
238
239 case VCHR:
240 VOP_UNLOCK(vp, 0, p);
241 error = (*cdevsw[major(vp->v_rdev)].d_read)
242 (vp->v_rdev, uio, ap->a_ioflag);
243 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
244 return (error);
245
246 case VBLK:
247 if (uio->uio_offset < 0)
248 return (EINVAL);
249 bsize = BLKDEV_IOSIZE;
250 if ((majordev = major(vp->v_rdev)) < nblkdev &&
251 (ioctl = bdevsw[majordev].d_ioctl) != NULL &&
252 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
253 u_int32_t frag =
254 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock);
255 u_int32_t fsize =
256 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock);
257 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 &&
258 fsize != 0)
259 bsize = frag * fsize;
260 }
261 bscale = btodb(bsize);
262 do {
263 bn = btodb(uio->uio_offset) & ~(bscale - 1);
264 on = uio->uio_offset % bsize;
265 n = min((bsize - on), uio->uio_resid);
266 if (vp->v_lastr + bscale == bn) {
267 nextbn = bn + bscale;
268 error = breadn(vp, bn, bsize, &nextbn, &bsize,
269 1, NOCRED, &bp);
270 } else
271 error = bread(vp, bn, bsize, NOCRED, &bp);
272 vp->v_lastr = bn;
273 n = min(n, bsize - bp->b_resid);
274 if (error) {
275 brelse(bp);
276 return (error);
277 }
278 error = uiomove((char *)bp->b_data + on, n, uio);
279 brelse(bp);
280 } while (error == 0 && uio->uio_resid > 0 && n != 0);
281 return (error);
282
283 default:
284 panic("spec_read type");
285 }
286
287 }
288
289 int
290 spec_inactive(void *v)
291 {
292 struct vop_inactive_args *ap = v;
293
294 VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
295 return (0);
296 }
297
298
299
300
301
302 int
303 spec_write(void *v)
304 {
305 struct vop_write_args *ap = v;
306 struct vnode *vp = ap->a_vp;
307 struct uio *uio = ap->a_uio;
308 struct proc *p = uio->uio_procp;
309 struct buf *bp;
310 daddr64_t bn, bscale;
311 int bsize;
312 struct partinfo dpart;
313 int n, on, majordev;
314 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *);
315 int error = 0;
316
317 #ifdef DIAGNOSTIC
318 if (uio->uio_rw != UIO_WRITE)
319 panic("spec_write mode");
320 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
321 panic("spec_write proc");
322 #endif
323
324 switch (vp->v_type) {
325
326 case VCHR:
327 VOP_UNLOCK(vp, 0, p);
328 error = (*cdevsw[major(vp->v_rdev)].d_write)
329 (vp->v_rdev, uio, ap->a_ioflag);
330 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
331 return (error);
332
333 case VBLK:
334 if (uio->uio_resid == 0)
335 return (0);
336 if (uio->uio_offset < 0)
337 return (EINVAL);
338 bsize = BLKDEV_IOSIZE;
339 if ((majordev = major(vp->v_rdev)) < nblkdev &&
340 (ioctl = bdevsw[majordev].d_ioctl) != NULL &&
341 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
342 u_int32_t frag =
343 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock);
344 u_int32_t fsize =
345 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock);
346 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 &&
347 fsize != 0)
348 bsize = frag * fsize;
349 }
350 bscale = btodb(bsize);
351 do {
352 bn = btodb(uio->uio_offset) & ~(bscale - 1);
353 on = uio->uio_offset % bsize;
354 n = min((bsize - on), uio->uio_resid);
355 error = bread(vp, bn, bsize, NOCRED, &bp);
356 n = min(n, bsize - bp->b_resid);
357 if (error) {
358 brelse(bp);
359 return (error);
360 }
361 error = uiomove((char *)bp->b_data + on, n, uio);
362 if (n + on == bsize)
363 bawrite(bp);
364 else
365 bdwrite(bp);
366 } while (error == 0 && uio->uio_resid > 0 && n != 0);
367 return (error);
368
369 default:
370 panic("spec_write type");
371 }
372
373 }
374
375
376
377
378
379 int
380 spec_ioctl(void *v)
381 {
382 struct vop_ioctl_args *ap = v;
383 dev_t dev = ap->a_vp->v_rdev;
384 int maj = major(dev);
385
386 switch (ap->a_vp->v_type) {
387
388 case VCHR:
389 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data,
390 ap->a_fflag, ap->a_p));
391
392 case VBLK:
393 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data,
394 ap->a_fflag, ap->a_p));
395
396 default:
397 panic("spec_ioctl");
398
399 }
400 }
401
402
403 int
404 spec_poll(void *v)
405 {
406 struct vop_poll_args *ap = v;
407 dev_t dev;
408
409 switch (ap->a_vp->v_type) {
410
411 default:
412 return (ap->a_events &
413 (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
414
415 case VCHR:
416 dev = ap->a_vp->v_rdev;
417 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p);
418 }
419 }
420
421 int
422 spec_kqfilter(void *v)
423 {
424 struct vop_kqfilter_args *ap = v;
425
426 dev_t dev;
427
428 dev = ap->a_vp->v_rdev;
429 if (cdevsw[major(dev)].d_flags & D_KQFILTER)
430 return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn);
431 return (1);
432 }
433
434
435
436
437
438 int
439 spec_fsync(void *v)
440 {
441 struct vop_fsync_args *ap = v;
442 struct vnode *vp = ap->a_vp;
443 struct buf *bp;
444 struct buf *nbp;
445 int s;
446
447 if (vp->v_type == VCHR)
448 return (0);
449
450
451
452 loop:
453 s = splbio();
454 for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
455 bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
456 nbp = LIST_NEXT(bp, b_vnbufs);
457 if ((bp->b_flags & B_BUSY))
458 continue;
459 if ((bp->b_flags & B_DELWRI) == 0)
460 panic("spec_fsync: not dirty");
461 bremfree(bp);
462 bp->b_flags |= B_BUSY;
463 splx(s);
464 bawrite(bp);
465 goto loop;
466 }
467 if (ap->a_waitfor == MNT_WAIT) {
468 vwaitforio (vp, 0, "spec_fsync", 0);
469
470 #ifdef DIAGNOSTIC
471 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
472 splx(s);
473 vprint("spec_fsync: dirty", vp);
474 goto loop;
475 }
476 #endif
477 }
478 splx(s);
479 return (0);
480 }
481
482 int
483 spec_strategy(void *v)
484 {
485 struct vop_strategy_args *ap = v;
486 struct buf *bp = ap->a_bp;
487 int maj = major(bp->b_dev);
488
489 if (LIST_FIRST(&bp->b_dep) != NULL)
490 buf_start(bp);
491
492 (*bdevsw[maj].d_strategy)(bp);
493 return (0);
494 }
495
496
497
498
499 int
500 spec_bmap(void *v)
501 {
502 struct vop_bmap_args *ap = v;
503
504 if (ap->a_vpp != NULL)
505 *ap->a_vpp = ap->a_vp;
506 if (ap->a_bnp != NULL)
507 *ap->a_bnp = ap->a_bn;
508 if (ap->a_runp != NULL)
509 *ap->a_runp = 0;
510
511 return (0);
512 }
513
514
515
516
517
518 int
519 spec_close(void *v)
520 {
521 struct vop_close_args *ap = v;
522 struct vnode *vp = ap->a_vp;
523 dev_t dev = vp->v_rdev;
524 int (*devclose)(dev_t, int, int, struct proc *);
525 int mode, error;
526
527 switch (vp->v_type) {
528
529 case VCHR:
530
531
532
533
534
535
536
537
538
539 if (vcount(vp) == 2 && ap->a_p &&
540 vp == ap->a_p->p_session->s_ttyvp) {
541 vrele(vp);
542 ap->a_p->p_session->s_ttyvp = NULL;
543 }
544
545
546
547
548
549 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
550 return (0);
551 if (cdevsw[major(dev)].d_flags & D_CLONE)
552 return (spec_close_clone(ap));
553 devclose = cdevsw[major(dev)].d_close;
554 mode = S_IFCHR;
555 break;
556
557 case VBLK:
558
559
560
561
562
563
564
565 if (!(vp->v_flag & VXLOCK))
566 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
567 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
568 if (!(vp->v_flag & VXLOCK))
569 VOP_UNLOCK(vp, 0, ap->a_p);
570 if (error)
571 return (error);
572
573
574
575
576
577
578
579
580
581 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
582 return (0);
583 devclose = bdevsw[major(dev)].d_close;
584 mode = S_IFBLK;
585 break;
586
587 default:
588 panic("spec_close: not special");
589 }
590
591 return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p));
592 }
593
594
595
596
597 int
598 spec_print(void *v)
599 {
600 struct vop_print_args *ap = v;
601
602 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
603 minor(ap->a_vp->v_rdev));
604 return 0;
605 }
606
607
608
609
610 int
611 spec_pathconf(void *v)
612 {
613 struct vop_pathconf_args *ap = v;
614
615 switch (ap->a_name) {
616 case _PC_LINK_MAX:
617 *ap->a_retval = LINK_MAX;
618 return (0);
619 case _PC_MAX_CANON:
620 *ap->a_retval = MAX_CANON;
621 return (0);
622 case _PC_MAX_INPUT:
623 *ap->a_retval = MAX_INPUT;
624 return (0);
625 case _PC_PIPE_BUF:
626 *ap->a_retval = PIPE_BUF;
627 return (0);
628 case _PC_CHOWN_RESTRICTED:
629 *ap->a_retval = 1;
630 return (0);
631 case _PC_VDISABLE:
632 *ap->a_retval = _POSIX_VDISABLE;
633 return (0);
634 default:
635 return (EINVAL);
636 }
637
638 }
639
640
641
642
643
644 int
645 spec_advlock(void *v)
646 {
647 struct vop_advlock_args *ap = v;
648 struct vnode *vp = ap->a_vp;
649
650 return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id,
651 ap->a_op, ap->a_fl, ap->a_flags));
652 }
653
654
655
656
657
658 int
659 spec_ebadf(void *v)
660 {
661
662 return (EBADF);
663 }
664
665
666
667
668
669 int
670 spec_badop(void *v)
671 {
672
673 panic("spec_badop called");
674
675 }