vp                172 arch/i386/i386/vm_machdep.c cpu_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
vp                198 arch/i386/i386/vm_machdep.c 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
vp                204 arch/i386/i386/vm_machdep.c 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core),
vp                480 arch/i386/stand/installboot/installboot.c 	char		*vp;
vp                505 arch/i386/stand/installboot/installboot.c 		vp = proto + nl->n_value;
vp                509 arch/i386/stand/installboot/installboot.c 			lp = (u_int32_t *) vp;
vp                516 arch/i386/stand/installboot/installboot.c 			wp = (u_int16_t *) vp;
vp                523 arch/i386/stand/installboot/installboot.c 			bp = (u_int8_t *) vp;
vp                 61 compat/common/compat_dir.c 	struct vnode *vp;
vp                 67 compat/common/compat_dir.c 	vp = (struct vnode *)fp->f_data;
vp                 69 compat/common/compat_dir.c 	if (vp->v_type != VDIR)
vp                 72 compat/common/compat_dir.c 	if ((error = VOP_GETATTR(vp, &va, fp->f_cred, curproc)) != 0)
vp                 78 compat/common/compat_dir.c 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
vp                 98 compat/common/compat_dir.c 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies,
vp                169 compat/common/compat_dir.c 	VOP_UNLOCK(vp, 0, curproc);
vp                192 compat/common/vfs_syscalls_35.c 	struct vnode *vp;
vp                205 compat/common/vfs_syscalls_35.c 	if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
vp                207 compat/common/vfs_syscalls_35.c 	error = vn_stat(vp, &ub, p);
vp                208 compat/common/vfs_syscalls_35.c 	vput(vp);
vp                340 compat/common/vfs_syscalls_43.c 	struct vnode *vp;
vp                357 compat/common/vfs_syscalls_43.c 	vp = (struct vnode *)fp->f_data;
vp                358 compat/common/vfs_syscalls_43.c 	if (vp->v_type != VDIR) {
vp                371 compat/common/vfs_syscalls_43.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                374 compat/common/vfs_syscalls_43.c 		if (vp->v_mount->mnt_maxsymlinklen <= 0) {
vp                375 compat/common/vfs_syscalls_43.c 			error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag,
vp                392 compat/common/vfs_syscalls_43.c 		error = VOP_READDIR(vp, &kuio, fp->f_cred, &eofflag,
vp                429 compat/common/vfs_syscalls_43.c 	VOP_UNLOCK(vp, 0, p);
vp                142 compat/freebsd/freebsd_misc.c 	struct vnode *vp;
vp                150 compat/freebsd/freebsd_misc.c 	vp = (struct vnode *)fp->f_data;
vp               1058 compat/hpux/hpux_compat.c 	struct vnode *vp;
vp               1135 compat/hpux/hpux_compat.c 	vp = nd.ni_vp;
vp               1137 compat/hpux/hpux_compat.c 	if (VOP_ACCESS(vp, VREAD, cred, p) == 0)
vp               1139 compat/hpux/hpux_compat.c 	if (vn_writechk(vp) == 0 && VOP_ACCESS(vp, VWRITE, cred, p) == 0)
vp               1141 compat/hpux/hpux_compat.c 	if (VOP_ACCESS(vp, VEXEC, cred, p) == 0)
vp               1143 compat/hpux/hpux_compat.c 	vput(vp);
vp               1340 compat/hpux/hpux_compat.c 	struct vnode *vp;
vp               1362 compat/hpux/hpux_compat.c 	vp = nd.ni_vp;
vp               1363 compat/hpux/hpux_compat.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1366 compat/hpux/hpux_compat.c 		error = VOP_SETATTR(vp, &vattr, nd.ni_cnd.cn_cred, p);
vp               1367 compat/hpux/hpux_compat.c 	vput(vp);
vp                215 compat/hpux/hpux_file.c 	struct vnode *vp;
vp                262 compat/hpux/hpux_file.c 		vp = (struct vnode *)fp->f_data;
vp                286 compat/hpux/hpux_file.c 			error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg);
vp                295 compat/hpux/hpux_file.c 			error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg);
vp                299 compat/hpux/hpux_file.c 			error = VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &fl,
vp                315 compat/hpux/hpux_file.c 		vp = (struct vnode *)fp->f_data;
vp                332 compat/hpux/hpux_file.c 		    VOP_ADVLOCK(vp, (caddr_t)p, F_GETLK, &fl, F_POSIX)))
vp                288 compat/ibcs2/ibcs2_exec.c coff_find_section(p, vp, fp, sh, s_type)
vp                290 compat/ibcs2/ibcs2_exec.c 	struct vnode *vp;
vp                301 compat/ibcs2/ibcs2_exec.c 		error = vn_rdwr(UIO_READ, vp, (caddr_t) sh,
vp                250 compat/ibcs2/ibcs2_fcntl.c 	register struct vnode *vp;
vp                261 compat/ibcs2/ibcs2_fcntl.c         vp = nd.ni_vp;
vp                272 compat/ibcs2/ibcs2_fcntl.c                 if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
vp                273 compat/ibcs2/ibcs2_fcntl.c                         error = VOP_ACCESS(vp, flags, cred, p);
vp                275 compat/ibcs2/ibcs2_fcntl.c         vput(vp);
vp                456 compat/ibcs2/ibcs2_misc.c 	struct vnode *vp;
vp                471 compat/ibcs2/ibcs2_misc.c 	vp = (struct vnode *)fp->f_data;
vp                472 compat/ibcs2/ibcs2_misc.c 	if (vp->v_type != VDIR) {
vp                531 compat/linux/linux_exec.c 	struct vnode *vp;
vp                545 compat/linux/linux_exec.c 	vp = ni.ni_vp;
vp                547 compat/linux/linux_exec.c 	if ((error = vn_rdwr(UIO_READ, vp, (caddr_t) &hdr, LINUX_AOUT_HDR_SIZE,
vp                550 compat/linux/linux_exec.c 		vrele(vp);
vp                555 compat/linux/linux_exec.c 		vrele(vp);
vp                568 compat/linux/linux_exec.c 	if ((hdr.a_text != 0 || hdr.a_data != 0) && vp->v_writecount != 0) {
vp                569 compat/linux/linux_exec.c 		vrele(vp);
vp                572 compat/linux/linux_exec.c 	vn_marktext(vp);
vp                578 compat/linux/linux_exec.c 	    hdr.a_text + hdr.a_data, taddr, vp, LINUX_N_TXTOFF(hdr, magic),
vp                597 compat/linux/linux_exec.c 	vrele(vp);
vp                319 compat/linux/linux_file.c 	struct vnode *vp;
vp                402 compat/linux/linux_file.c 		vp = (struct vnode *)fp->f_data;
vp                403 compat/linux/linux_file.c 		if (vp->v_type != VCHR)
vp                406 compat/linux/linux_file.c 		error = VOP_GETATTR(vp, &va, p->p_ucred, p);
vp               1318 compat/linux/linux_socket.c 	struct vnode *vp;
vp               1329 compat/linux/linux_socket.c 		vp = (struct vnode *)fp->f_data;
vp               1330 compat/linux/linux_socket.c 		isdev = vp->v_type == VCHR;
vp                577 compat/ossaudio/ossaudio.c 	struct vnode *vp;
vp                585 compat/ossaudio/ossaudio.c 	vp = (struct vnode *)fp->f_data;
vp                586 compat/ossaudio/ossaudio.c 	if (vp->v_type != VCHR)
vp                588 compat/ossaudio/ossaudio.c 	if (VOP_GETATTR(vp, &va, p->p_ucred, p))
vp                503 compat/sunos/sunos_ioctl.c 		struct vnode *vp;
vp                506 compat/sunos/sunos_ioctl.c 			vp = (struct vnode *)fp->f_data;
vp                507 compat/sunos/sunos_ioctl.c 			if (error == EIO && vp != NULL &&
vp                508 compat/sunos/sunos_ioctl.c 			    vp->v_type == VCHR && major(vp->v_rdev) == 21)
vp                425 compat/sunos/sunos_misc.c 	struct vnode *vp;
vp                433 compat/sunos/sunos_misc.c 	vp = (struct vnode *)fp->f_data;
vp                435 compat/sunos/sunos_misc.c 	if (vp->v_type != VDIR) {
vp                587 compat/sunos/sunos_misc.c 	register struct vnode *vp;
vp                595 compat/sunos/sunos_misc.c 	vp = (struct vnode *)fp->f_data;
vp                596 compat/sunos/sunos_misc.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                597 compat/sunos/sunos_misc.c 	if (vp->v_type != VDIR)
vp                600 compat/sunos/sunos_misc.c 		error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
vp                601 compat/sunos/sunos_misc.c 	VOP_UNLOCK(vp, 0, p);
vp                606 compat/sunos/sunos_misc.c 	VREF(vp);
vp                609 compat/sunos/sunos_misc.c 	fdp->fd_rdir = vp;
vp                260 compat/svr4/svr4_fcntl.c 	struct vnode *vp;
vp                271 compat/svr4/svr4_fcntl.c 	vp = (struct vnode *)fp->f_data;
vp                272 compat/svr4/svr4_fcntl.c 	if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO)
vp                277 compat/svr4/svr4_fcntl.c 	if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) != 0)
vp                423 compat/svr4/svr4_misc.c 	struct vnode	*vp;
vp                432 compat/svr4/svr4_misc.c 	vp = (struct vnode *) fp->f_data;
vp                433 compat/svr4/svr4_misc.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                434 compat/svr4/svr4_misc.c 	if (vp->v_type != VDIR)
vp                437 compat/svr4/svr4_misc.c 		error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
vp                438 compat/svr4/svr4_misc.c 	VOP_UNLOCK(vp, 0, p);
vp                443 compat/svr4/svr4_misc.c 	VREF(vp);
vp                446 compat/svr4/svr4_misc.c 	fdp->fd_rdir = vp;
vp                 58 ddb/db_variables.c 	struct db_variable *vp;
vp                 62 ddb/db_variables.c 	    for (vp = db_vars; vp < db_evars; vp++) {
vp                 63 ddb/db_variables.c 		if (!strcmp(db_tok_string, vp->name)) {
vp                 64 ddb/db_variables.c 		    *varp = vp;
vp                 68 ddb/db_variables.c 	    for (vp = db_regs; vp < db_eregs; vp++) {
vp                 69 ddb/db_variables.c 		if (!strcmp(db_tok_string, vp->name)) {
vp                 70 ddb/db_variables.c 		    *varp = vp;
vp                 83 ddb/db_variables.c 	struct db_variable *vp;
vp                 85 ddb/db_variables.c 	if (!db_find_variable(&vp))
vp                 88 ddb/db_variables.c 	db_read_variable(vp, valuep);
vp                 96 ddb/db_variables.c 	struct db_variable *vp;
vp                 98 ddb/db_variables.c 	if (!db_find_variable(&vp))
vp                101 ddb/db_variables.c 	db_write_variable(vp, &value);
vp                108 ddb/db_variables.c db_read_variable(struct db_variable *vp, db_expr_t *valuep)
vp                110 ddb/db_variables.c 	int	(*func)(struct db_variable *, db_expr_t *, int) = vp->fcn;
vp                113 ddb/db_variables.c 	    *valuep = *(vp->valuep);
vp                115 ddb/db_variables.c 	    (*func)(vp, valuep, DB_VAR_GET);
vp                119 ddb/db_variables.c db_write_variable(struct db_variable *vp, db_expr_t *valuep)
vp                121 ddb/db_variables.c 	int	(*func)(struct db_variable *, db_expr_t *, int) = vp->fcn;
vp                124 ddb/db_variables.c 	    *(vp->valuep) = *valuep;
vp                126 ddb/db_variables.c 	    (*func)(vp, valuep, DB_VAR_SET);
vp                134 ddb/db_variables.c 	struct db_variable *vp;
vp                142 ddb/db_variables.c 	if (!db_find_variable(&vp)) {
vp                160 ddb/db_variables.c 	db_write_variable(vp, &value);
vp                272 dev/ccd.c      	struct vnode *vp;
vp                306 dev/ccd.c      		vp = ccd->ccd_vpp[ix];
vp                308 dev/ccd.c      		ci->ci_vp = vp;
vp                329 dev/ccd.c      		if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
vp                342 dev/ccd.c      		error = VOP_IOCTL(vp, DIOCGPART, (caddr_t)&dpart,
vp               1309 dev/ccd.c      	struct vnode *vp;
vp               1319 dev/ccd.c      	vp = nd.ni_vp;
vp               1321 dev/ccd.c      	if (vp->v_usecount > 1) {
vp               1322 dev/ccd.c      		VOP_UNLOCK(vp, 0, p);
vp               1323 dev/ccd.c      		(void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
vp               1327 dev/ccd.c      	if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
vp               1330 dev/ccd.c      		VOP_UNLOCK(vp, 0, p);
vp               1331 dev/ccd.c      		(void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
vp               1337 dev/ccd.c      		VOP_UNLOCK(vp, 0, p);
vp               1338 dev/ccd.c      		(void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
vp               1343 dev/ccd.c      	CCD_DCALL(CCDB_VNODE, vprint("ccdlookup: vnode info", vp));
vp               1346 dev/ccd.c      	VOP_UNLOCK(vp, 0, p);
vp               1347 dev/ccd.c      	*vpp = vp;
vp                 67 dev/clock_subr.h #define todr_getcal(ct, vp)	((*(ct)->todr_gettime)(ct, vp))
vp                 90 dev/cons.c     	struct vnode *vp;
vp                106 dev/cons.c     	if (vfinddev(dev, VCHR, &vp) && vcount(vp))
vp                530 dev/ic/bt485.c bt485_update(vp)
vp                531 dev/ic/bt485.c 	void *vp;
vp                533 dev/ic/bt485.c 	struct bt485data *data = vp;
vp                422 dev/ic/ibm561.c ibm561_update(vp)
vp                423 dev/ic/ibm561.c 	void *vp;
vp                425 dev/ic/ibm561.c 	struct ibm561data *data = (struct ibm561data *)vp;
vp                129 dev/ic/isp_target.c 		void *		*vp;
vp                145 dev/ic/isp_target.c 	unp.vp = vptr;
vp                234 dev/ic/mk48txx.c mk48txx_getcal(handle, vp)
vp                236 dev/ic/mk48txx.c 	int *vp;
vp                250 dev/ic/mk48txx.c mk48txx_get_nvram_size(handle, vp)
vp                252 dev/ic/mk48txx.c 	bus_size_t *vp;
vp                255 dev/ic/mk48txx.c 	*vp = mk->mk_nvramsz;
vp                264 dev/pci/ami_pci.c 		const struct ami_pci_vendor *vp;
vp                267 dev/pci/ami_pci.c 		for (vp = ami_pci_vendors;
vp                268 dev/pci/ami_pci.c 		     vp->id && vp->id != (csr & 0xffff); vp++);
vp                269 dev/pci/ami_pci.c 		if (vp->id)
vp                270 dev/pci/ami_pci.c 			snprintf(modelbuf, sizeof(modelbuf), "%s %x", vp->name,
vp                509 dev/pci/if_che.c che_read_flash_reg(struct cheg_softc *sc, size_t bcnt, int cont, u_int32_t *vp)
vp                520 dev/pci/if_che.c 	*vp = che_read(sc, CHE_REG_SF_DATA);
vp               1176 dev/pci/pciide.c 	const struct pciide_vendor_desc *vp;
vp               1179 dev/pci/pciide.c 	for (i = 0, vp = pciide_vendors;
vp               1181 dev/pci/pciide.c 	    vp++, i++)
vp               1182 dev/pci/pciide.c 		if (PCI_VENDOR(id) == vp->ide_vendor)
vp               1188 dev/pci/pciide.c 	for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++)
vp               1192 dev/pci/pciide.c 	if (i == vp->ide_nproducts)
vp               3026 dev/pcmcia/if_ray.c ray_read_region(struct ray_softc *sc, bus_size_t off, void *vp, size_t c)
vp               3032 dev/pcmcia/if_ray.c 	p = vp;
vp               3072 dev/pcmcia/if_ray.c 	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, off, vp, c);
vp               3084 dev/pcmcia/if_ray.c ray_write_region(struct ray_softc *sc, bus_size_t off, void *vp, size_t c)
vp               3090 dev/pcmcia/if_ray.c 	p = vp;
vp               3129 dev/pcmcia/if_ray.c 	bus_space_write_region_1(sc->sc_memt, sc->sc_memh, off, vp, c);
vp                100 dev/raidframe/rf_copyback.c 	struct vnode *vp;
vp                138 dev/raidframe/rf_copyback.c  		vp = raidPtr->raid_cinfo[frow][fcol].ci_vp;
vp                140 dev/raidframe/rf_copyback.c  		rf_close_component(raidPtr, vp, ac);
vp                150 dev/raidframe/rf_copyback.c 	retcode = raidlookup(raidPtr->Disks[frow][fcol].devname, proc, &vp);
vp                169 dev/raidframe/rf_copyback.c 		if ((retcode = VOP_GETATTR(vp, &va, proc->p_ucred, proc)) != 0)
vp                173 dev/raidframe/rf_copyback.c 		retcode = VOP_IOCTL(vp, DIOCGPART, (caddr_t) &dpart, FREAD,
vp                183 dev/raidframe/rf_copyback.c 		raidPtr->raid_cinfo[frow][fcol].ci_vp = vp;
vp                516 dev/raidframe/rf_disks.c 				raidPtr->raid_cinfo[r][c].ci_vp = ac->vp;
vp                607 dev/raidframe/rf_disks.c 			VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
vp                608 dev/raidframe/rf_disks.c 			vput(ac->vp);
vp                609 dev/raidframe/rf_disks.c 			ac->vp = NULL;
vp                645 dev/raidframe/rf_disks.c 	struct vnode *vp;
vp                666 dev/raidframe/rf_disks.c 	error = raidlookup(diskPtr->devname, curproc, &vp);
vp                678 dev/raidframe/rf_disks.c 		if ((error = VOP_GETATTR(vp, &va, proc->p_ucred, proc)) != 0) {
vp                681 dev/raidframe/rf_disks.c 		error = VOP_IOCTL(vp, DIOCGPART, (caddr_t) & dpart, FREAD,
vp                691 dev/raidframe/rf_disks.c 		raidPtr->raid_cinfo[row][col].ci_vp = vp;
vp                190 dev/raidframe/rf_openbsd.h 	struct vnode		*vp;		/* Master Vnode Pointer. */
vp               2186 dev/raidframe/rf_openbsdkintf.c 	struct vnode *vp;
vp               2197 dev/raidframe/rf_openbsdkintf.c 	vp = nd.ni_vp;
vp               2198 dev/raidframe/rf_openbsdkintf.c 	if (vp->v_usecount > 1) {
vp               2199 dev/raidframe/rf_openbsdkintf.c 		VOP_UNLOCK(vp, 0, p);
vp               2200 dev/raidframe/rf_openbsdkintf.c 		(void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
vp               2203 dev/raidframe/rf_openbsdkintf.c 	if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
vp               2204 dev/raidframe/rf_openbsdkintf.c 		VOP_UNLOCK(vp, 0, p);
vp               2205 dev/raidframe/rf_openbsdkintf.c 		(void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
vp               2210 dev/raidframe/rf_openbsdkintf.c 		VOP_UNLOCK(vp, 0, p);
vp               2211 dev/raidframe/rf_openbsdkintf.c 		(void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
vp               2214 dev/raidframe/rf_openbsdkintf.c 	VOP_UNLOCK(vp, 0, p);
vp               2215 dev/raidframe/rf_openbsdkintf.c 	*vpp = vp;
vp               2558 dev/raidframe/rf_openbsdkintf.c rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
vp               2562 dev/raidframe/rf_openbsdkintf.c 	if (vp != NULL) {
vp               2565 dev/raidframe/rf_openbsdkintf.c 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, p);
vp               2566 dev/raidframe/rf_openbsdkintf.c 			vrele(vp);
vp               2569 dev/raidframe/rf_openbsdkintf.c 			(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
vp               2580 dev/raidframe/rf_openbsdkintf.c 	struct vnode *vp;
vp               2590 dev/raidframe/rf_openbsdkintf.c 			vp = raidPtr->raid_cinfo[r][c].ci_vp;
vp               2592 dev/raidframe/rf_openbsdkintf.c 			rf_close_component(raidPtr, vp, acd);
vp               2599 dev/raidframe/rf_openbsdkintf.c 		vp = raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp;
vp               2601 dev/raidframe/rf_openbsdkintf.c 		rf_close_component(raidPtr, vp, acd);
vp               2704 dev/raidframe/rf_openbsdkintf.c 	struct vnode *vp;
vp               2743 dev/raidframe/rf_openbsdkintf.c 		if (bdevvp(dev, &vp))
vp               2746 dev/raidframe/rf_openbsdkintf.c 		error = VOP_OPEN(vp, FREAD, NOCRED, 0);
vp               2753 dev/raidframe/rf_openbsdkintf.c 			vput(vp);
vp               2758 dev/raidframe/rf_openbsdkintf.c 		error = VOP_IOCTL(vp, DIOCGDINFO, (caddr_t)&label,
vp               2773 dev/raidframe/rf_openbsdkintf.c 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
vp               2774 dev/raidframe/rf_openbsdkintf.c 		vrele(vp);
vp               2793 dev/raidframe/rf_openbsdkintf.c 			if (bdevvp(dev, &vp))
vp               2796 dev/raidframe/rf_openbsdkintf.c 			error = VOP_OPEN(vp, FREAD, NOCRED, 0);
vp               2799 dev/raidframe/rf_openbsdkintf.c 				vput(vp);
vp               2814 dev/raidframe/rf_openbsdkintf.c 			if (!raidread_component_label(dev, vp, clabel)) {
vp               2841 dev/raidframe/rf_openbsdkintf.c 					ac->vp = vp;
vp               2851 dev/raidframe/rf_openbsdkintf.c 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
vp               2852 dev/raidframe/rf_openbsdkintf.c 				vrele(vp);
vp               3301 dev/raidframe/rf_openbsdkintf.c 	struct vnode *vp;
vp               3311 dev/raidframe/rf_openbsdkintf.c 				vp = raidPtr->raid_cinfo[row][column].ci_vp;
vp               3312 dev/raidframe/rf_openbsdkintf.c 				raidread_component_label(dev, vp, &clabel);
vp               3314 dev/raidframe/rf_openbsdkintf.c 				raidwrite_component_label(dev, vp, &clabel);
vp               3325 dev/raidframe/rf_openbsdkintf.c 	struct vnode *vp;
vp               3335 dev/raidframe/rf_openbsdkintf.c 				vp = raidPtr->raid_cinfo[row][column].ci_vp;
vp               3336 dev/raidframe/rf_openbsdkintf.c 				raidread_component_label(dev, vp, &clabel);
vp               3338 dev/raidframe/rf_openbsdkintf.c 				raidwrite_component_label(dev, vp, &clabel);
vp               3353 dev/raidframe/rf_openbsdkintf.c 		if (ac->vp) {
vp               3354 dev/raidframe/rf_openbsdkintf.c 			VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
vp               3355 dev/raidframe/rf_openbsdkintf.c 			vrele(ac->vp);
vp               3356 dev/raidframe/rf_openbsdkintf.c 			ac->vp = NULL;
vp                475 dev/raidframe/rf_reconstruct.c 	struct vnode *vp;
vp                573 dev/raidframe/rf_reconstruct.c 			vp = raidPtr->raid_cinfo[row][col].ci_vp;
vp                575 dev/raidframe/rf_reconstruct.c 			rf_close_component(raidPtr, vp, ac);
vp                587 dev/raidframe/rf_reconstruct.c 		    proc, &vp);
vp                610 dev/raidframe/rf_reconstruct.c 			     VOP_GETATTR(vp, &va, proc->p_ucred, proc)) != 0) {
vp                615 dev/raidframe/rf_reconstruct.c 			retcode = VOP_IOCTL(vp, DIOCGPART, (caddr_t) & dpart,
vp                628 dev/raidframe/rf_reconstruct.c 			raidPtr->raid_cinfo[row][col].ci_vp = vp;
vp                503 dev/vnd.c      		struct vnode *vp;
vp                509 dev/vnd.c      		error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
vp                526 dev/vnd.c      		    vnd->sc_vp, vp, bn, nbn, sz);
vp                535 dev/vnd.c      		if (vp->v_type == VBLK || vp->v_type == VCHR)
vp                536 dev/vnd.c      			nbp->vb_buf.b_dev = vp->v_rdev;
vp                543 dev/vnd.c      		nbp->vb_buf.b_vp = vp;
vp                981 dev/vnd.c      	struct vnode *vp = vnd->sc_vp;
vp                984 dev/vnd.c      	DNPRINTF(VDB_FOLLOW, "vndclear(%p): vp %p\n", vnd, vp);
vp                987 dev/vnd.c      	if (vp == NULL)
vp                989 dev/vnd.c      	(void) vn_close(vp, VNDRW(vnd), vnd->sc_cred, p);
vp                437 isofs/cd9660/cd9660_lookup.c 	struct vnode *vp = ITOV(ip);
vp                443 isofs/cd9660/cd9660_lookup.c 	if ((error = bread(vp, lbn, bsize, NOCRED, &bp)) != 0) {
vp                159 isofs/cd9660/cd9660_node.c 	struct vnode *vp;
vp                165 isofs/cd9660/cd9660_node.c                        vp = ITOV(ip);
vp                167 isofs/cd9660/cd9660_node.c                        if (vget(vp, LK_EXCLUSIVE, p))
vp                169 isofs/cd9660/cd9660_node.c                        return (vp);
vp                238 isofs/cd9660/cd9660_node.c 	struct vnode *vp = ap->a_vp;
vp                240 isofs/cd9660/cd9660_node.c 	register struct iso_node *ip = VTOI(vp);
vp                244 isofs/cd9660/cd9660_node.c 	if (prtactive && vp->v_usecount != 0)
vp                245 isofs/cd9660/cd9660_node.c 		vprint("cd9660_inactive: pushing active", vp);
vp                249 isofs/cd9660/cd9660_node.c 	VOP_UNLOCK(vp, 0, p);
vp                255 isofs/cd9660/cd9660_node.c 		vrecycle(vp, p);
vp                268 isofs/cd9660/cd9660_node.c 	register struct vnode *vp = ap->a_vp;
vp                269 isofs/cd9660/cd9660_node.c 	register struct iso_node *ip = VTOI(vp);
vp                272 isofs/cd9660/cd9660_node.c 	if (prtactive && vp->v_usecount != 0)
vp                273 isofs/cd9660/cd9660_node.c 		vprint("cd9660_reclaim: pushing active", vp);
vp                283 isofs/cd9660/cd9660_node.c 	cache_purge(vp);
vp                288 isofs/cd9660/cd9660_node.c 	FREE(vp->v_data, M_ISOFSNODE);
vp                289 isofs/cd9660/cd9660_node.c 	vp->v_data = NULL;
vp                101 isofs/cd9660/cd9660_node.h #define VTOI(vp) ((struct iso_node *)(vp)->v_data)
vp                753 isofs/cd9660/cd9660_vfsops.c 	struct vnode *vp, *nvp;
vp                764 isofs/cd9660/cd9660_vfsops.c 	if ((error = getnewvnode(VT_ISOFS, mp, cd9660_vnodeop_p, &vp)) != 0) {
vp                772 isofs/cd9660/cd9660_vfsops.c 	vp->v_data = ip;
vp                773 isofs/cd9660/cd9660_vfsops.c 	ip->i_vnode = vp;
vp                786 isofs/cd9660/cd9660_vfsops.c 		vrele(vp);
vp                799 isofs/cd9660/cd9660_vfsops.c 			vput(vp);
vp                807 isofs/cd9660/cd9660_vfsops.c 			vput(vp);
vp                817 isofs/cd9660/cd9660_vfsops.c 			vput(vp);
vp                826 isofs/cd9660/cd9660_vfsops.c 			vput(vp);
vp                863 isofs/cd9660/cd9660_vfsops.c 			vput(vp);
vp                876 isofs/cd9660/cd9660_vfsops.c 	vp->v_type = VNON;
vp                905 isofs/cd9660/cd9660_vfsops.c 	switch (vp->v_type = IFTOVT(ip->inode.iso_mode)) {
vp                908 isofs/cd9660/cd9660_vfsops.c 		vp->v_op = cd9660_fifoop_p;
vp                911 isofs/cd9660/cd9660_vfsops.c 		vput(vp);
vp                923 isofs/cd9660/cd9660_vfsops.c 		vp->v_op = cd9660_specop_p;
vp                924 isofs/cd9660/cd9660_vfsops.c 		if ((nvp = checkalias(vp, ip->inode.iso_rdev, mp)) != NULL) {
vp                929 isofs/cd9660/cd9660_vfsops.c 			nvp->v_data = vp->v_data;
vp                930 isofs/cd9660/cd9660_vfsops.c 			vp->v_data = NULL;
vp                931 isofs/cd9660/cd9660_vfsops.c 			vp->v_op = spec_vnodeop_p;
vp                932 isofs/cd9660/cd9660_vfsops.c 			vrele(vp);
vp                933 isofs/cd9660/cd9660_vfsops.c 			vgone(vp);
vp                937 isofs/cd9660/cd9660_vfsops.c 			vp = nvp;
vp                938 isofs/cd9660/cd9660_vfsops.c 			ip->i_vnode = vp;
vp                948 isofs/cd9660/cd9660_vfsops.c 		uvm_vnp_setsize(vp, ip->i_size);
vp                953 isofs/cd9660/cd9660_vfsops.c 		vp->v_flag |= VROOT;
vp                959 isofs/cd9660/cd9660_vfsops.c 	*vpp = vp;
vp                968 isofs/cd9660/cd9660_vfsops.c cd9660_vptofh(vp, fhp)
vp                969 isofs/cd9660/cd9660_vfsops.c 	struct vnode *vp;
vp                972 isofs/cd9660/cd9660_vfsops.c 	register struct iso_node *ip = VTOI(vp);
vp                106 isofs/cd9660/cd9660_vnops.c 	register struct vnode *vp;
vp                111 isofs/cd9660/cd9660_vnops.c 	vp = ndp->ni_vp;
vp                112 isofs/cd9660/cd9660_vnops.c 	ip = VTOI(vp);
vp                115 isofs/cd9660/cd9660_vnops.c 	    || vap->va_type != vp->v_type
vp                137 isofs/cd9660/cd9660_vnops.c 	vput(vp);
vp                138 isofs/cd9660/cd9660_vnops.c 	vp->v_type = VNON;
vp                139 isofs/cd9660/cd9660_vnops.c 	vgone(vp);
vp                153 isofs/cd9660/cd9660_vnops.c 	struct vnode *vp = ap->a_vp;
vp                161 isofs/cd9660/cd9660_vnops.c 		switch (vp->v_type) {
vp                227 isofs/cd9660/cd9660_vnops.c 	struct vnode *vp = ap->a_vp;
vp                229 isofs/cd9660/cd9660_vnops.c 	register struct iso_node *ip = VTOI(vp);
vp                244 isofs/cd9660/cd9660_vnops.c 	if (ip->i_size == 0 && vp->v_type  == VLNK) {
vp                271 isofs/cd9660/cd9660_vnops.c 	vap->va_type	= vp->v_type;
vp                283 isofs/cd9660/cd9660_vnops.c 	struct vnode *vp = ap->a_vp;
vp                285 isofs/cd9660/cd9660_vnops.c 	register struct iso_node *ip = VTOI(vp);
vp                329 isofs/cd9660/cd9660_vnops.c 			error = breadn(vp, lbn, size, ra->blks,
vp                333 isofs/cd9660/cd9660_vnops.c 			error = bread(vp, lbn, size, NOCRED, &bp);
vp                795 isofs/cd9660/cd9660_vnops.c 	struct vnode *vp = ap->a_vp;
vp                797 isofs/cd9660/cd9660_vnops.c 	return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags, NULL));
vp                808 isofs/cd9660/cd9660_vnops.c 	struct vnode *vp = ap->a_vp;
vp                810 isofs/cd9660/cd9660_vnops.c 	return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags | LK_RELEASE, NULL));
vp                823 isofs/cd9660/cd9660_vnops.c 	struct vnode *vp = bp->b_vp;
vp                828 isofs/cd9660/cd9660_vnops.c 	ip = VTOI(vp);
vp                829 isofs/cd9660/cd9660_vnops.c 	if (vp->v_type == VBLK || vp->v_type == VCHR)
vp                832 isofs/cd9660/cd9660_vnops.c 		error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL);
vp                850 isofs/cd9660/cd9660_vnops.c 	vp = ip->i_devvp;
vp                851 isofs/cd9660/cd9660_vnops.c 	bp->b_dev = vp->v_rdev;
vp                852 isofs/cd9660/cd9660_vnops.c 	VOCALL (vp->v_op, VOFFSET(vop_strategy), ap);
vp                 94 isofs/udf/udf.h #define	VTOU(vp)	((struct unode *)((vp)->v_data))
vp                190 isofs/udf/udf_subr.c 	struct vnode *vp;
vp                194 isofs/udf/udf_subr.c 	error = udf_vget(ump->um_mountp, lb - ump->um_start - 3, &vp);
vp                198 isofs/udf/udf_subr.c 	up = VTOU(vp);
vp                207 isofs/udf/udf_subr.c 	vput(vp);
vp                456 isofs/udf/udf_vfsops.c 	struct vnode *vp;
vp                468 isofs/udf/udf_vfsops.c 	vp = *vpp;
vp                469 isofs/udf/udf_vfsops.c 	vp->v_flag |= VROOT;
vp                512 isofs/udf/udf_vfsops.c 	struct vnode *vp;
vp                571 isofs/udf/udf_vfsops.c 	if ((error = udf_allocv(mp, &vp, p))) {
vp                577 isofs/udf/udf_vfsops.c 	up->u_vnode = vp;
vp                582 isofs/udf/udf_vfsops.c 	vp->v_data = up;
vp                594 isofs/udf/udf_vfsops.c 		vp->v_type = VBAD;
vp                597 isofs/udf/udf_vfsops.c 		vp->v_type = VDIR;
vp                600 isofs/udf/udf_vfsops.c 		vp->v_type = VREG;
vp                603 isofs/udf/udf_vfsops.c 		vp->v_type = VBLK;
vp                606 isofs/udf/udf_vfsops.c 		vp->v_type = VCHR;
vp                609 isofs/udf/udf_vfsops.c 		vp->v_type = VFIFO;
vp                612 isofs/udf/udf_vfsops.c 		vp->v_type = VSOCK;
vp                615 isofs/udf/udf_vfsops.c 		vp->v_type = VLNK;
vp                618 isofs/udf/udf_vfsops.c 		vp->v_type = VREG;
vp                622 isofs/udf/udf_vfsops.c 	*vpp = vp;
vp                654 isofs/udf/udf_vfsops.c udf_vptofh(struct vnode *vp, struct fid *fhp)
vp                659 isofs/udf/udf_vfsops.c 	up = VTOU(vp);
vp                166 isofs/udf/udf_vnops.c 	struct vnode *vp;
vp                168 isofs/udf/udf_vnops.c 	error = getnewvnode(VT_UDF, mp, udf_vnodeop_p, &vp);
vp                174 isofs/udf/udf_vnops.c 	*vpp = vp;
vp                203 isofs/udf/udf_vnops.c 	struct vnode *vp;
vp                207 isofs/udf/udf_vnops.c 	vp = ap->a_vp;
vp                208 isofs/udf/udf_vnops.c 	up = VTOU(vp);
vp                212 isofs/udf/udf_vnops.c 		switch (vp->v_type) {
vp                310 isofs/udf/udf_vnops.c 	struct vnode *vp;
vp                318 isofs/udf/udf_vnops.c 	vp = ap->a_vp;
vp                320 isofs/udf/udf_vnops.c 	up = VTOU(vp);
vp                337 isofs/udf/udf_vnops.c 	if (vp->v_type & VDIR) {
vp                358 isofs/udf/udf_vnops.c 	vap->va_type = vp->v_type;
vp                417 isofs/udf/udf_vnops.c 	struct vnode *vp = ap->a_vp;
vp                419 isofs/udf/udf_vnops.c 	struct unode *up = VTOU(vp);
vp                698 isofs/udf/udf_vnops.c 	struct vnode *vp;
vp                713 isofs/udf/udf_vnops.c 	vp = ap->a_vp;
vp                715 isofs/udf/udf_vnops.c 	up = VTOU(vp);
vp                833 isofs/udf/udf_vnops.c 	struct vnode *vp;
vp                838 isofs/udf/udf_vnops.c 	vp = bp->b_vp;
vp                839 isofs/udf/udf_vnops.c 	up = VTOU(vp);
vp                853 isofs/udf/udf_vnops.c 		error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL);
vp                872 isofs/udf/udf_vnops.c 		bp->b_dev = vp->v_rdev;
vp                884 isofs/udf/udf_vnops.c 	struct vnode *vp = ap->a_vp;
vp                886 isofs/udf/udf_vnops.c 	return (lockmgr(&VTOU(vp)->u_lock, ap->a_flags, NULL));
vp                894 isofs/udf/udf_vnops.c 	struct vnode *vp = ap->a_vp;
vp                896 isofs/udf/udf_vnops.c 	return (lockmgr(&VTOU(vp)->u_lock, ap->a_flags | LK_RELEASE, NULL));
vp                911 isofs/udf/udf_vnops.c 	struct vnode *vp = ap->a_vp;
vp                912 isofs/udf/udf_vnops.c 	struct unode *up = VTOU(vp);
vp               1120 isofs/udf/udf_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1126 isofs/udf/udf_vnops.c 	VOP_UNLOCK(vp, 0, p);
vp               1135 isofs/udf/udf_vnops.c 	struct vnode *vp;
vp               1138 isofs/udf/udf_vnops.c 	vp = ap->a_vp;
vp               1139 isofs/udf/udf_vnops.c 	up = VTOU(vp);
vp               1152 isofs/udf/udf_vnops.c 		vp->v_data = NULL;
vp                245 kern/exec_elf.c ELFNAME(load_psection)(struct exec_vmcmd_set *vcset, struct vnode *vp,
vp                292 kern/exec_elf.c 			NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp,
vp                296 kern/exec_elf.c 			    base + psize, vp, offset + psize, *prot, flags);
vp                299 kern/exec_elf.c 		NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, offset,
vp                320 kern/exec_elf.c ELFNAME(read_from)(struct proc *p, struct vnode *vp, u_long off, caddr_t buf,
vp                326 kern/exec_elf.c 	if ((error = vn_rdwr(UIO_READ, vp, buf, size, off, UIO_SYSSPACE,
vp                351 kern/exec_elf.c 	struct vnode *vp;
vp                368 kern/exec_elf.c 	vp = nd.ni_vp;
vp                369 kern/exec_elf.c 	if (vp->v_type != VREG) {
vp                373 kern/exec_elf.c 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
vp                375 kern/exec_elf.c 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
vp                379 kern/exec_elf.c 	if ((error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) != 0)
vp                431 kern/exec_elf.c 			uobj = &vp->v_uvm.u_obj;
vp                 57 kern/exec_subr.c new_vmcmd(evsp, proc, len, addr, vp, offset, prot, flags)
vp                 62 kern/exec_subr.c 	struct	vnode *vp;
vp                 75 kern/exec_subr.c 	if ((vcp->ev_vp = vp) != NULL)
vp                 76 kern/exec_subr.c 		vref(vp);
vp                165 kern/kern_acct.c 	struct vnode *vp;
vp                170 kern/kern_acct.c 	vp = acctp;
vp                171 kern/kern_acct.c 	if (vp == NULL)
vp                230 kern/kern_acct.c 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&acct, sizeof (acct),
vp                303 kern/kern_descrip.c 	struct vnode *vp;
vp                412 kern/kern_descrip.c 		vp = (struct vnode *)fp->f_data;
vp                434 kern/kern_descrip.c 			error = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg));
vp                443 kern/kern_descrip.c 			error = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg));
vp                447 kern/kern_descrip.c 			error = (VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &fl,
vp                461 kern/kern_descrip.c 		vp = (struct vnode *)fp->f_data;
vp                482 kern/kern_descrip.c 		error = VOP_ADVLOCK(vp, (caddr_t)p, F_GETLK, &fl, F_POSIX);
vp                638 kern/kern_descrip.c 	struct vnode *vp;
vp                656 kern/kern_descrip.c 		vp = (struct vnode *)fp->f_data;
vp                657 kern/kern_descrip.c 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                658 kern/kern_descrip.c 		error = VOP_PATHCONF(vp, SCARG(uap, name), retval);
vp                659 kern/kern_descrip.c 		VOP_UNLOCK(vp, 0, p);
vp               1018 kern/kern_descrip.c 	struct vnode *vp;
vp               1038 kern/kern_descrip.c 		vp = (struct vnode *)fp->f_data;
vp               1039 kern/kern_descrip.c 		(void) VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX);
vp               1073 kern/kern_descrip.c 		vp = (struct vnode *)fp->f_data;
vp               1074 kern/kern_descrip.c 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
vp               1112 kern/kern_descrip.c 	struct vnode *vp;
vp               1120 kern/kern_descrip.c 	vp = (struct vnode *)fp->f_data;
vp               1127 kern/kern_descrip.c 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
vp               1140 kern/kern_descrip.c 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK);
vp               1142 kern/kern_descrip.c 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK|F_WAIT);
vp                115 kern/kern_exec.c 	struct vnode *vp;
vp                125 kern/kern_exec.c 	epp->ep_vp = vp = ndp->ni_vp;
vp                128 kern/kern_exec.c 	if (vp->v_type == VDIR) {
vp                132 kern/kern_exec.c 	if (vp->v_type != VREG) {
vp                138 kern/kern_exec.c 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
vp                142 kern/kern_exec.c 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
vp                147 kern/kern_exec.c 	if ((vp->v_mount->mnt_flag & MNT_NOSUID))
vp                151 kern/kern_exec.c 	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
vp                159 kern/kern_exec.c 	if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
vp                163 kern/kern_exec.c 	VOP_UNLOCK(vp, 0, p);
vp                166 kern/kern_exec.c 	error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
vp                216 kern/kern_exec.c 	vn_close(vp, FREAD, p->p_ucred, p);
vp                226 kern/kern_exec.c 	vput(vp);
vp                543 kern/kern_exec.c 				struct vnode *vp;
vp                552 kern/kern_exec.c 				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
vp                557 kern/kern_exec.c 				if ((error = VOP_OPEN(vp, flags, p->p_ucred, p)) != 0) {
vp                560 kern/kern_exec.c 					vrele(vp);
vp                564 kern/kern_exec.c 					vp->v_writecount++;
vp                568 kern/kern_exec.c 				fp->f_data = (caddr_t)vp;
vp                 68 kern/kern_ktrace.c 	struct vnode *vp;
vp                 76 kern/kern_ktrace.c 	vp = p->p_tracep;
vp                 79 kern/kern_ktrace.c 	if (vp != NULL)
vp                 80 kern/kern_ktrace.c 		vrele(vp);
vp                291 kern/kern_ktrace.c 	struct vnode *vp = NULL;
vp                312 kern/kern_ktrace.c 		vp = nd.ni_vp;
vp                314 kern/kern_ktrace.c 		VOP_UNLOCK(vp, 0, curp);
vp                315 kern/kern_ktrace.c 		if (vp->v_type != VREG) {
vp                316 kern/kern_ktrace.c 			(void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
vp                326 kern/kern_ktrace.c 			if (p->p_tracep == vp) {
vp                357 kern/kern_ktrace.c 				ret |= ktrsetchildren(curp, p, ops, facs, vp);
vp                359 kern/kern_ktrace.c 				ret |= ktrops(curp, p, ops, facs, vp);
vp                371 kern/kern_ktrace.c 			ret |= ktrsetchildren(curp, p, ops, facs, vp);
vp                373 kern/kern_ktrace.c 			ret |= ktrops(curp, p, ops, facs, vp);
vp                378 kern/kern_ktrace.c 	if (vp != NULL)
vp                379 kern/kern_ktrace.c 		(void) vn_close(vp, FWRITE, curp->p_ucred, curp);
vp                385 kern/kern_ktrace.c ktrops(struct proc *curp, struct proc *p, int ops, int facs, struct vnode *vp)
vp                391 kern/kern_ktrace.c 		ktrsettracevnode(p, vp);
vp                416 kern/kern_ktrace.c     struct vnode *vp)
vp                423 kern/kern_ktrace.c 		ret |= ktrops(curp, p, ops, facs, vp);
vp                450 kern/kern_ktrace.c 	struct vnode *vp = p->p_tracep;
vp                452 kern/kern_ktrace.c 	if (vp == NULL)
vp                469 kern/kern_ktrace.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                470 kern/kern_ktrace.c 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred);
vp                471 kern/kern_ktrace.c 	VOP_UNLOCK(vp, 0, p);
vp                480 kern/kern_ktrace.c 		if (p->p_tracep == vp) {
vp               1354 kern/kern_sig.c 	struct vnode *vp;
vp               1406 kern/kern_sig.c 	vp = nd.ni_vp;
vp               1407 kern/kern_sig.c 	if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0)
vp               1410 kern/kern_sig.c 	if (vp->v_type != VREG || vattr.va_nlink != 1 ||
vp               1417 kern/kern_sig.c 	VOP_SETATTR(vp, &vattr, cred, p);
vp               1431 kern/kern_sig.c 	error = cpu_coredump(p, vp, cred, &core);
vp               1438 kern/kern_sig.c 	error = uvm_coredump(p, vp, cred, &core);
vp               1441 kern/kern_sig.c 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&core,
vp               1445 kern/kern_sig.c 	VOP_UNLOCK(vp, 0, p);
vp               1446 kern/kern_sig.c 	error1 = vn_close(vp, FWRITE, cred, p);
vp               1353 kern/kern_sysctl.c 	struct proc *vp;
vp               1384 kern/kern_sysctl.c 	if ((vp = pfind(pid)) == NULL)
vp               1395 kern/kern_sysctl.c 	if (P_ZOMBIE(vp) || (vp->p_flag & P_SYSTEM))
vp               1399 kern/kern_sysctl.c 	if ((vp->p_flag & P_WEXIT))
vp               1403 kern/kern_sysctl.c 	if ((vp->p_flag & P_INEXEC))
vp               1406 kern/kern_sysctl.c 	vp->p_vmspace->vm_refcnt++;	/* XXX */
vp               1419 kern/kern_sysctl.c 	if ((error = uvm_io(&vp->p_vmspace->vm_map, &uio, 0)) != 0)
vp               1476 kern/kern_sysctl.c 		if ((error = uvm_io(&vp->p_vmspace->vm_map, &uio, 0)) != 0)
vp               1498 kern/kern_sysctl.c 		if ((error = uvm_io(&vp->p_vmspace->vm_map, &uio, 0)) != 0)
vp               1546 kern/kern_sysctl.c 	uvmspace_free(vp->p_vmspace);
vp                658 kern/subr_prf.c 		if ((vp != NULL) && (sbuf == tailp)) {			\
vp                664 kern/subr_prf.c 		kputchar(chr, oflags, (struct tty *)vp);			\
vp                669 kern/subr_prf.c kprintf(const char *fmt0, int oflags, void *vp, char *sbuf, va_list ap)
vp                690 kern/subr_prf.c 	if ((oflags & TOBUFONLY) && (vp != NULL))
vp                691 kern/subr_prf.c 		tailp = *(char **)vp;
vp               1116 kern/subr_prf.c 	if ((oflags & TOBUFONLY) && (vp != NULL))
vp               1117 kern/subr_prf.c 		*(char **)vp = sbuf;
vp               1015 kern/tty_pty.c 	struct vnode *vp;
vp               1020 kern/tty_pty.c 	vp = ndp->ni_vp;
vp               1021 kern/tty_pty.c 	if (vp->v_type != VCHR) {
vp               1030 kern/tty_pty.c 	error = VOP_OPEN(vp, FREAD|FWRITE, cred, p);
vp               1037 kern/tty_pty.c 		(void)VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1044 kern/tty_pty.c 	vp->v_writecount++;
vp               1048 kern/tty_pty.c 	vput(vp);
vp                399 kern/uipc_usrreq.c 	struct vnode *vp;
vp                416 kern/uipc_usrreq.c 	vp = nd.ni_vp;
vp                417 kern/uipc_usrreq.c 	if (vp != NULL) {
vp                419 kern/uipc_usrreq.c 		if (nd.ni_dvp == vp)
vp                423 kern/uipc_usrreq.c 		vrele(vp);
vp                432 kern/uipc_usrreq.c 	vp = nd.ni_vp;
vp                433 kern/uipc_usrreq.c 	vp->v_socket = unp->unp_socket;
vp                434 kern/uipc_usrreq.c 	unp->unp_vnode = vp;
vp                439 kern/uipc_usrreq.c 	VOP_UNLOCK(vp, 0, p);
vp                447 kern/uipc_usrreq.c 	struct vnode *vp;
vp                461 kern/uipc_usrreq.c 	vp = nd.ni_vp;
vp                462 kern/uipc_usrreq.c 	if (vp->v_type != VSOCK) {
vp                466 kern/uipc_usrreq.c 	if ((error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) != 0)
vp                468 kern/uipc_usrreq.c 	so2 = vp->v_socket;
vp                501 kern/uipc_usrreq.c 	vput(vp);
vp                639 kern/uipc_usrreq.c 				struct vnode *vp = (struct vnode *)fp->f_data;
vp                640 kern/uipc_usrreq.c 				if ((vp->v_type == VDIR) &&
vp                641 kern/uipc_usrreq.c 				    !vn_isunder(vp, p->p_cwdi->cwdi_rdir, p)) {
vp                222 kern/vfs_bio.c buf_stub(struct vnode *vp, daddr64_t lblkno)
vp                255 kern/vfs_bio.c 	bgetvp(vp, bp);
vp                377 kern/vfs_bio.c bio_doread(struct vnode *vp, daddr64_t blkno, int size, int async)
vp                381 kern/vfs_bio.c 	bp = getblk(vp, blkno, size, 0, 0);
vp                406 kern/vfs_bio.c bread(struct vnode *vp, daddr64_t blkno, int size, struct ucred *cred,
vp                412 kern/vfs_bio.c 	bp = *bpp = bio_doread(vp, blkno, size, 0);
vp                423 kern/vfs_bio.c breadn(struct vnode *vp, daddr64_t blkno, int size, daddr64_t rablks[],
vp                429 kern/vfs_bio.c 	bp = *bpp = bio_doread(vp, blkno, size, 0);
vp                436 kern/vfs_bio.c 		if (incore(vp, rablks[i]))
vp                440 kern/vfs_bio.c 		(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
vp                470 kern/vfs_bio.c bread_cluster(struct vnode *vp, daddr64_t blkno, int size, struct buf **rbpp)
vp                477 kern/vfs_bio.c 	*rbpp = bio_doread(vp, blkno, size, 0);
vp                482 kern/vfs_bio.c 	if (VOP_BMAP(vp, blkno + 1, NULL, &sblkno, &maxra))
vp                498 kern/vfs_bio.c 		if (incore(vp, blkno + i + 1)) {
vp                506 kern/vfs_bio.c 		xbpp[i] = buf_stub(vp, blkno + i + 1);
vp                533 kern/vfs_bio.c 		binshash(xbpp[i], BUFHASH(vp, xbpp[i]->b_lblkno));
vp                544 kern/vfs_bio.c 	bp->b_vp = vp;
vp                564 kern/vfs_bio.c 	struct vnode *vp;
vp                567 kern/vfs_bio.c 	vp = bp->b_vp;
vp                568 kern/vfs_bio.c 	if (vp != NULL)
vp                569 kern/vfs_bio.c 		mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
vp                842 kern/vfs_bio.c incore(struct vnode *vp, daddr64_t blkno)
vp                847 kern/vfs_bio.c 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
vp                848 kern/vfs_bio.c 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
vp                865 kern/vfs_bio.c getblk(struct vnode *vp, daddr64_t blkno, int size, int slpflag, int slptimeo)
vp                881 kern/vfs_bio.c 	bh = BUFHASH(vp, blkno);
vp                883 kern/vfs_bio.c 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
vp                884 kern/vfs_bio.c 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
vp                931 kern/vfs_bio.c 		bgetvp(vp, bp);
vp                 83 kern/vfs_cache.c #define NCVHASH(vp) (vp)->v_id & ncvhash
vp                111 kern/vfs_cache.c 	struct vnode *vp;
vp                165 kern/vfs_cache.c 	vp = ncp->nc_vp;
vp                166 kern/vfs_cache.c 	vpid = vp->v_id;
vp                167 kern/vfs_cache.c 	if (vp == dvp) {	/* lookup on "." */
vp                173 kern/vfs_cache.c 		error = vget(vp, LK_EXCLUSIVE, p);
vp                180 kern/vfs_cache.c 				vput(vp);
vp                186 kern/vfs_cache.c 		error = vget(vp, LK_EXCLUSIVE, p);
vp                201 kern/vfs_cache.c 	if (error || vpid != vp->v_id) {
vp                203 kern/vfs_cache.c 			vput(vp);
vp                212 kern/vfs_cache.c 		if (vp == dvp || error ||
vp                229 kern/vfs_cache.c 	*vpp = vp;
vp                268 kern/vfs_cache.c cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
vp                278 kern/vfs_cache.c 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
vp                281 kern/vfs_cache.c 		if (ncp->nc_vp == vp &&
vp                282 kern/vfs_cache.c 		    ncp->nc_vpid == vp->v_id &&
vp                285 kern/vfs_cache.c 		    dvp != vp && ncp->nc_dvpid == dvp->v_id) {
vp                335 kern/vfs_cache.c cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
vp                364 kern/vfs_cache.c 	ncp->nc_vp = vp;
vp                365 kern/vfs_cache.c 	if (vp)
vp                366 kern/vfs_cache.c 		ncp->nc_vpid = vp->v_id;
vp                384 kern/vfs_cache.c 	if (vp && vp != dvp && vp->v_type == VDIR &&
vp                388 kern/vfs_cache.c 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
vp                412 kern/vfs_cache.c cache_purge(struct vnode *vp)
vp                417 kern/vfs_cache.c 	vp->v_id = ++nextvnodeid;
vp                426 kern/vfs_cache.c 	vp->v_id = ++nextvnodeid;
vp                 64 kern/vfs_cluster.c 	struct vnode *vp;
vp                 68 kern/vfs_cluster.c 	vp = bp->b_vp;
vp                 77 kern/vfs_cluster.c 		maxclen = MAXBSIZE / vp->v_mount->mnt_stat.f_iosize - 1;
vp                 91 kern/vfs_cluster.c 				cluster_wbuild(vp, NULL, bp->b_bcount,
vp                 97 kern/vfs_cluster.c 				buflist = cluster_collectbufs(vp, ci, bp);
vp                100 kern/vfs_cluster.c 				if (VOP_REALLOCBLKS(vp, buflist)) {
vp                108 kern/vfs_cluster.c 					cluster_wbuild(vp, NULL, bp->b_bcount,
vp                130 kern/vfs_cluster.c 		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) ||
vp                151 kern/vfs_cluster.c 		cluster_wbuild(vp, bp, bp->b_bcount, ci->ci_cstart,
vp                171 kern/vfs_cluster.c cluster_wbuild(struct vnode *vp, struct buf *last_bp, long size,
vp                177 kern/vfs_cluster.c 	if (size != vp->v_mount->mnt_stat.f_iosize)
vp                179 kern/vfs_cluster.c 			size, vp->v_mount->mnt_stat.f_iosize);
vp                182 kern/vfs_cluster.c 	while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) {
vp                192 kern/vfs_cluster.c 			bp = getblk(vp, start_lbn, size, 0, 0);
vp                206 kern/vfs_cluster.c 	bp = getblk(vp, start_lbn, size, 0, 0);
vp                225 kern/vfs_cluster.c cluster_collectbufs(struct vnode *vp, struct cluster_info *ci,
vp                238 kern/vfs_cluster.c 		(void)bread(vp, lbn, last_bp->b_bcount, NOCRED,
vp                 61 kern/vfs_default.c 	struct vnode *vp, *vq;
vp                 69 kern/vfs_default.c 	vp = ap->a_vp;
vp                 71 kern/vfs_default.c 	if (vp->v_flag & VALIASED) {
vp                 76 kern/vfs_default.c 		if (vp->v_flag & VXLOCK) {
vp                 77 kern/vfs_default.c 			vp->v_flag |= VXWANT;
vp                 78 kern/vfs_default.c 			tsleep(vp, PINOD, "vop_generic_revokeall", 0);
vp                 87 kern/vfs_default.c 		vp->v_flag |= VXLOCK;
vp                 88 kern/vfs_default.c 		while (vp->v_flag & VALIASED) {
vp                 89 kern/vfs_default.c 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
vp                 90 kern/vfs_default.c 				if (vq->v_rdev != vp->v_rdev ||
vp                 91 kern/vfs_default.c 				    vq->v_type != vp->v_type || vp == vq)
vp                103 kern/vfs_default.c 		vp->v_flag &= ~VXLOCK;
vp                106 kern/vfs_default.c 	vgonel(vp, p);
vp                343 kern/vfs_subr.c 	struct vnode *vp;
vp                370 kern/vfs_subr.c 		vp = pool_get(&vnode_pool, PR_WAITOK);
vp                371 kern/vfs_subr.c 		bzero((char *)vp, sizeof *vp);
vp                374 kern/vfs_subr.c 		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
vp                375 kern/vfs_subr.c 		    vp = TAILQ_NEXT(vp, v_freelist)) {
vp                376 kern/vfs_subr.c 			if (VOP_ISLOCKED(vp) == 0)
vp                384 kern/vfs_subr.c 		if (vp == NULL) {
vp                392 kern/vfs_subr.c 		if (vp->v_usecount) {
vp                393 kern/vfs_subr.c 			vprint("free vnode", vp);
vp                398 kern/vfs_subr.c 		TAILQ_REMOVE(listhd, vp, v_freelist);
vp                399 kern/vfs_subr.c 		vp->v_bioflag &= ~VBIOONFREELIST;
vp                402 kern/vfs_subr.c 		if (vp->v_type != VBAD)
vp                403 kern/vfs_subr.c 			vgonel(vp, p);
vp                405 kern/vfs_subr.c 		if (vp->v_data) {
vp                406 kern/vfs_subr.c 			vprint("cleaned vnode", vp);
vp                410 kern/vfs_subr.c 		if (vp->v_numoutput)
vp                414 kern/vfs_subr.c 		vp->v_flag = 0;
vp                415 kern/vfs_subr.c 		vp->v_socket = 0;
vp                417 kern/vfs_subr.c 	vp->v_type = VNON;
vp                418 kern/vfs_subr.c 	cache_purge(vp);
vp                419 kern/vfs_subr.c 	vp->v_tag = tag;
vp                420 kern/vfs_subr.c 	vp->v_op = vops;
vp                421 kern/vfs_subr.c 	insmntque(vp, mp);
vp                422 kern/vfs_subr.c 	*vpp = vp;
vp                423 kern/vfs_subr.c 	vp->v_usecount = 1;
vp                424 kern/vfs_subr.c 	vp->v_data = 0;
vp                425 kern/vfs_subr.c 	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
vp                433 kern/vfs_subr.c insmntque(struct vnode *vp, struct mount *mp)
vp                438 kern/vfs_subr.c 	if (vp->v_mount != NULL)
vp                439 kern/vfs_subr.c 		LIST_REMOVE(vp, v_mntvnodes);
vp                443 kern/vfs_subr.c 	if ((vp->v_mount = mp) != NULL)
vp                444 kern/vfs_subr.c 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
vp                476 kern/vfs_subr.c 	struct vnode *vp;
vp                489 kern/vfs_subr.c 	vp = nvp;
vp                490 kern/vfs_subr.c 	vp->v_type = type;
vp                491 kern/vfs_subr.c 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
vp                492 kern/vfs_subr.c 		vput(vp);
vp                493 kern/vfs_subr.c 		vp = nvp;
vp                495 kern/vfs_subr.c 	*vpp = vp;
vp                511 kern/vfs_subr.c 	struct vnode *vp;
vp                519 kern/vfs_subr.c 	for (vp = *vpp; vp; vp = vp->v_specnext) {
vp                520 kern/vfs_subr.c 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
vp                526 kern/vfs_subr.c 		if (vp->v_usecount == 0) {
vp                527 kern/vfs_subr.c 			vgonel(vp, p);
vp                530 kern/vfs_subr.c 		if (vget(vp, LK_EXCLUSIVE, p)) {
vp                539 kern/vfs_subr.c 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
vp                549 kern/vfs_subr.c 		if (vp != NULLVP) {
vp                551 kern/vfs_subr.c 			vp->v_flag |= VALIASED;
vp                552 kern/vfs_subr.c 			vput(vp);
vp                568 kern/vfs_subr.c 	VOP_UNLOCK(vp, 0, p);
vp                569 kern/vfs_subr.c 	vclean(vp, 0, p);
vp                570 kern/vfs_subr.c 	vp->v_op = nvp->v_op;
vp                571 kern/vfs_subr.c 	vp->v_tag = nvp->v_tag;
vp                573 kern/vfs_subr.c 	insmntque(vp, mp);
vp                574 kern/vfs_subr.c 	return (vp);
vp                587 kern/vfs_subr.c vget(struct vnode *vp, int flags, struct proc *p)
vp                598 kern/vfs_subr.c 	if (vp->v_flag & VXLOCK) {
vp                603 kern/vfs_subr.c 		vp->v_flag |= VXWANT;
vp                604 kern/vfs_subr.c 		ltsleep(vp, PINOD | PNORELOCK, "vget", 0, NULL);
vp                608 kern/vfs_subr.c 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
vp                609 kern/vfs_subr.c 	if (vp->v_usecount == 0 && onfreelist) {
vp                611 kern/vfs_subr.c 		if (vp->v_holdcnt > 0)
vp                612 kern/vfs_subr.c 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
vp                614 kern/vfs_subr.c 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
vp                615 kern/vfs_subr.c 		vp->v_bioflag &= ~VBIOONFREELIST;
vp                619 kern/vfs_subr.c  	vp->v_usecount++;
vp                621 kern/vfs_subr.c 		if ((error = vn_lock(vp, flags, p)) != 0) {
vp                622 kern/vfs_subr.c 			vp->v_usecount--;
vp                623 kern/vfs_subr.c 			if (vp->v_usecount == 0 && onfreelist)
vp                624 kern/vfs_subr.c 				vputonfreelist(vp);
vp                635 kern/vfs_subr.c vref(struct vnode *vp)
vp                638 kern/vfs_subr.c 	if (vp->v_usecount == 0)
vp                641 kern/vfs_subr.c 	vp->v_usecount++;
vp                645 kern/vfs_subr.c vputonfreelist(struct vnode *vp)
vp                652 kern/vfs_subr.c 	if (vp->v_usecount != 0)
vp                655 kern/vfs_subr.c 	if (vp->v_bioflag & VBIOONFREELIST) {
vp                656 kern/vfs_subr.c 		vprint("vnode already on free list: ", vp);
vp                661 kern/vfs_subr.c 	vp->v_bioflag |= VBIOONFREELIST;
vp                663 kern/vfs_subr.c 	if (vp->v_holdcnt > 0)
vp                668 kern/vfs_subr.c 	if (vp->v_type == VBAD)
vp                669 kern/vfs_subr.c 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
vp                671 kern/vfs_subr.c 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
vp                680 kern/vfs_subr.c vput(struct vnode *vp)
vp                685 kern/vfs_subr.c 	if (vp == NULL)
vp                690 kern/vfs_subr.c 	if (vp->v_usecount == 0) {
vp                691 kern/vfs_subr.c 		vprint("vput: bad ref count", vp);
vp                695 kern/vfs_subr.c 	vp->v_usecount--;
vp                696 kern/vfs_subr.c 	if (vp->v_usecount > 0) {
vp                697 kern/vfs_subr.c 		VOP_UNLOCK(vp, 0, p);
vp                702 kern/vfs_subr.c 	if (vp->v_writecount != 0) {
vp                703 kern/vfs_subr.c 		vprint("vput: bad writecount", vp);
vp                708 kern/vfs_subr.c 	VOP_INACTIVE(vp, p);
vp                710 kern/vfs_subr.c 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
vp                711 kern/vfs_subr.c 		vputonfreelist(vp);
vp                719 kern/vfs_subr.c vrele(struct vnode *vp)
vp                724 kern/vfs_subr.c 	if (vp == NULL)
vp                728 kern/vfs_subr.c 	if (vp->v_usecount == 0) {
vp                729 kern/vfs_subr.c 		vprint("vrele: bad ref count", vp);
vp                733 kern/vfs_subr.c 	vp->v_usecount--;
vp                734 kern/vfs_subr.c 	if (vp->v_usecount > 0) {
vp                739 kern/vfs_subr.c 	if (vp->v_writecount != 0) {
vp                740 kern/vfs_subr.c 		vprint("vrele: bad writecount", vp);
vp                745 kern/vfs_subr.c 	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
vp                747 kern/vfs_subr.c 		vprint("vrele: cannot lock", vp);
vp                752 kern/vfs_subr.c 	VOP_INACTIVE(vp, p);
vp                754 kern/vfs_subr.c 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
vp                755 kern/vfs_subr.c 		vputonfreelist(vp);
vp                758 kern/vfs_subr.c void vhold(struct vnode *vp);
vp                764 kern/vfs_subr.c vhold(struct vnode *vp)
vp                770 kern/vfs_subr.c 	if ((vp->v_bioflag & VBIOONFREELIST) &&
vp                771 kern/vfs_subr.c 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
vp                772 kern/vfs_subr.c 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
vp                773 kern/vfs_subr.c 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
vp                775 kern/vfs_subr.c 	vp->v_holdcnt++;
vp                794 kern/vfs_subr.c 	struct vnode *vp, *nvp;
vp                798 kern/vfs_subr.c 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
vp                799 kern/vfs_subr.c 		if (vp->v_mount != mp)
vp                801 kern/vfs_subr.c 		nvp = LIST_NEXT(vp, v_mntvnodes);
vp                803 kern/vfs_subr.c 		error = func(vp, arg);
vp                819 kern/vfs_subr.c vflush_vnode(struct vnode *vp, void *arg) {
vp                823 kern/vfs_subr.c 	if (vp == va->skipvp) {
vp                827 kern/vfs_subr.c 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
vp                836 kern/vfs_subr.c 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
vp                844 kern/vfs_subr.c 	if (vp->v_usecount == 0) {
vp                845 kern/vfs_subr.c 		vgonel(vp, p);
vp                855 kern/vfs_subr.c 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
vp                856 kern/vfs_subr.c 			vgonel(vp, p);
vp                858 kern/vfs_subr.c 			vclean(vp, 0, p);
vp                859 kern/vfs_subr.c 			vp->v_op = spec_vnodeop_p;
vp                860 kern/vfs_subr.c 			insmntque(vp, (struct mount *)0);
vp                867 kern/vfs_subr.c 		vprint("vflush: busy vnode", vp);
vp                892 kern/vfs_subr.c vclean(struct vnode *vp, int flags, struct proc *p)
vp                902 kern/vfs_subr.c 	if ((active = vp->v_usecount) != 0)
vp                903 kern/vfs_subr.c 		vp->v_usecount++;
vp                909 kern/vfs_subr.c 	if (vp->v_flag & VXLOCK)
vp                911 kern/vfs_subr.c 	vp->v_flag |= VXLOCK;
vp                919 kern/vfs_subr.c 	VOP_LOCK(vp, LK_DRAIN, p);
vp                924 kern/vfs_subr.c 	uvm_vnp_terminate(vp);
vp                929 kern/vfs_subr.c 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
vp                937 kern/vfs_subr.c 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
vp                938 kern/vfs_subr.c 		VOP_INACTIVE(vp, p);
vp                944 kern/vfs_subr.c 		VOP_UNLOCK(vp, 0, p);
vp                950 kern/vfs_subr.c 	if (VOP_RECLAIM(vp, p))
vp                953 kern/vfs_subr.c 		vp->v_usecount--;
vp                954 kern/vfs_subr.c 		if (vp->v_usecount == 0) {
vp                955 kern/vfs_subr.c 			if (vp->v_holdcnt > 0)
vp                957 kern/vfs_subr.c 			vputonfreelist(vp);
vp                960 kern/vfs_subr.c 	cache_purge(vp);
vp                965 kern/vfs_subr.c 	vp->v_op = dead_vnodeop_p;
vp                966 kern/vfs_subr.c 	VN_KNOTE(vp, NOTE_REVOKE);
vp                967 kern/vfs_subr.c 	vp->v_tag = VT_NON;
vp                968 kern/vfs_subr.c 	vp->v_flag &= ~VXLOCK;
vp                970 kern/vfs_subr.c 	vp->v_flag &= ~VLOCKSWORK;
vp                972 kern/vfs_subr.c 	if (vp->v_flag & VXWANT) {
vp                973 kern/vfs_subr.c 		vp->v_flag &= ~VXWANT;
vp                974 kern/vfs_subr.c 		wakeup(vp);
vp                982 kern/vfs_subr.c vrecycle(struct vnode *vp, struct proc *p)
vp                984 kern/vfs_subr.c 	if (vp->v_usecount == 0) {
vp                985 kern/vfs_subr.c 		vgonel(vp, p);
vp                996 kern/vfs_subr.c vgone(struct vnode *vp)
vp                999 kern/vfs_subr.c 	vgonel(vp, p);
vp               1006 kern/vfs_subr.c vgonel(struct vnode *vp, struct proc *p)
vp               1017 kern/vfs_subr.c 	if (vp->v_flag & VXLOCK) {
vp               1018 kern/vfs_subr.c 		vp->v_flag |= VXWANT;
vp               1019 kern/vfs_subr.c 		ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, NULL);
vp               1026 kern/vfs_subr.c 	vclean(vp, DOCLOSE, p);
vp               1030 kern/vfs_subr.c 	if (vp->v_mount != NULL)
vp               1031 kern/vfs_subr.c 		insmntque(vp, (struct mount *)0);
vp               1036 kern/vfs_subr.c 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
vp               1037 kern/vfs_subr.c 		if (*vp->v_hashchain == vp) {
vp               1038 kern/vfs_subr.c 			*vp->v_hashchain = vp->v_specnext;
vp               1040 kern/vfs_subr.c 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
vp               1041 kern/vfs_subr.c 				if (vq->v_specnext != vp)
vp               1043 kern/vfs_subr.c 				vq->v_specnext = vp->v_specnext;
vp               1049 kern/vfs_subr.c 		if (vp->v_flag & VALIASED) {
vp               1051 kern/vfs_subr.c 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
vp               1052 kern/vfs_subr.c 				if (vq->v_rdev != vp->v_rdev ||
vp               1053 kern/vfs_subr.c 				    vq->v_type != vp->v_type)
vp               1063 kern/vfs_subr.c 			vp->v_flag &= ~VALIASED;
vp               1071 kern/vfs_subr.c 		mp = vp->v_specmountpoint;
vp               1079 kern/vfs_subr.c 		FREE(vp->v_specinfo, M_VNODE);
vp               1080 kern/vfs_subr.c 		vp->v_specinfo = NULL;
vp               1086 kern/vfs_subr.c 	vp->v_type = VBAD;
vp               1092 kern/vfs_subr.c 	if (vp->v_usecount == 0 &&
vp               1093 kern/vfs_subr.c 	    (vp->v_bioflag & VBIOONFREELIST)) {
vp               1098 kern/vfs_subr.c 		if (vp->v_holdcnt > 0)
vp               1101 kern/vfs_subr.c 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
vp               1102 kern/vfs_subr.c 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
vp               1103 kern/vfs_subr.c 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
vp               1115 kern/vfs_subr.c 	struct vnode *vp;
vp               1118 kern/vfs_subr.c 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
vp               1119 kern/vfs_subr.c 		if (dev != vp->v_rdev || type != vp->v_type)
vp               1121 kern/vfs_subr.c 		*vpp = vp;
vp               1135 kern/vfs_subr.c 	struct vnode *vp;
vp               1139 kern/vfs_subr.c 		if (vfinddev(makedev(maj, mn), type, &vp))
vp               1140 kern/vfs_subr.c 			VOP_REVOKE(vp, REVOKEALL);
vp               1147 kern/vfs_subr.c vcount(struct vnode *vp)
vp               1153 kern/vfs_subr.c 	if ((vp->v_flag & VALIASED) == 0)
vp               1154 kern/vfs_subr.c 		return (vp->v_usecount);
vp               1155 kern/vfs_subr.c 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
vp               1157 kern/vfs_subr.c 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
vp               1162 kern/vfs_subr.c 		if (vq->v_usecount == 0 && vq != vp) {
vp               1179 kern/vfs_subr.c vprint(char *label, struct vnode *vp)
vp               1186 kern/vfs_subr.c 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp               1187 kern/vfs_subr.c 		vp->v_holdcnt);
vp               1189 kern/vfs_subr.c 	if (vp->v_flag & VROOT)
vp               1191 kern/vfs_subr.c 	if (vp->v_flag & VTEXT)
vp               1193 kern/vfs_subr.c 	if (vp->v_flag & VSYSTEM)
vp               1195 kern/vfs_subr.c 	if (vp->v_flag & VXLOCK)
vp               1197 kern/vfs_subr.c 	if (vp->v_flag & VXWANT)
vp               1199 kern/vfs_subr.c 	if (vp->v_bioflag & VBIOWAIT)
vp               1201 kern/vfs_subr.c 	if (vp->v_bioflag & VBIOONFREELIST)
vp               1203 kern/vfs_subr.c 	if (vp->v_bioflag & VBIOONSYNCLIST)
vp               1205 kern/vfs_subr.c 	if (vp->v_flag & VALIASED)
vp               1209 kern/vfs_subr.c 	if (vp->v_data == NULL) {
vp               1213 kern/vfs_subr.c 		VOP_PRINT(vp);
vp               1227 kern/vfs_subr.c 	struct vnode *vp;
vp               1237 kern/vfs_subr.c 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
vp               1238 kern/vfs_subr.c 			if (VOP_ISLOCKED(vp))
vp               1239 kern/vfs_subr.c 				vprint((char *)0, vp);
vp               1317 kern/vfs_subr.c 	struct vnode *vp, *nvp;
vp               1336 kern/vfs_subr.c 		for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL;
vp               1337 kern/vfs_subr.c 		    vp = nvp) {
vp               1343 kern/vfs_subr.c 			if (vp->v_mount != mp) {
vp               1349 kern/vfs_subr.c 			nvp = LIST_NEXT(vp, v_mntvnodes);
vp               1355 kern/vfs_subr.c 			if ((error = copyout(&vp,
vp               1358 kern/vfs_subr.c 			   (error = copyout(vp,
vp               1380 kern/vfs_subr.c vfs_mountedon(struct vnode *vp)
vp               1385 kern/vfs_subr.c  	if (vp->v_specmountpoint != NULL)
vp               1387 kern/vfs_subr.c 	if (vp->v_flag & VALIASED) {
vp               1388 kern/vfs_subr.c 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
vp               1389 kern/vfs_subr.c 			if (vq->v_rdev != vp->v_rdev ||
vp               1390 kern/vfs_subr.c 			    vq->v_type != vp->v_type)
vp               1778 kern/vfs_subr.c vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
vp               1784 kern/vfs_subr.c 	while (vp->v_numoutput) {
vp               1785 kern/vfs_subr.c 		vp->v_bioflag |= VBIOWAIT;
vp               1786 kern/vfs_subr.c 		error = tsleep(&vp->v_numoutput,
vp               1801 kern/vfs_subr.c vwakeup(struct vnode *vp)
vp               1805 kern/vfs_subr.c 	if (vp != NULL) {
vp               1806 kern/vfs_subr.c 		if (vp->v_numoutput-- == 0)
vp               1808 kern/vfs_subr.c 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
vp               1809 kern/vfs_subr.c 			vp->v_bioflag &= ~VBIOWAIT;
vp               1810 kern/vfs_subr.c 			wakeup(&vp->v_numoutput);
vp               1820 kern/vfs_subr.c vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
vp               1828 kern/vfs_subr.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp               1834 kern/vfs_subr.c 		vwaitforio(vp, 0, "vinvalbuf", 0);
vp               1835 kern/vfs_subr.c 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
vp               1837 kern/vfs_subr.c 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
vp               1840 kern/vfs_subr.c 			if (vp->v_numoutput > 0 ||
vp               1841 kern/vfs_subr.c 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
vp               1849 kern/vfs_subr.c 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
vp               1854 kern/vfs_subr.c 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
vp               1892 kern/vfs_subr.c 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
vp               1899 kern/vfs_subr.c vflushbuf(struct vnode *vp, int sync)
vp               1906 kern/vfs_subr.c 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
vp               1907 kern/vfs_subr.c 	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
vp               1920 kern/vfs_subr.c 		if (bp->b_vp == vp || sync == 0)
vp               1930 kern/vfs_subr.c 	vwaitforio(vp, 0, "vflushbuf", 0);
vp               1931 kern/vfs_subr.c 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
vp               1934 kern/vfs_subr.c 		vprint("vflushbuf: dirty", vp);
vp               1947 kern/vfs_subr.c bgetvp(struct vnode *vp, struct buf *bp)
vp               1954 kern/vfs_subr.c 	vhold(vp);
vp               1955 kern/vfs_subr.c 	bp->b_vp = vp;
vp               1956 kern/vfs_subr.c 	if (vp->v_type == VBLK || vp->v_type == VCHR)
vp               1957 kern/vfs_subr.c 		bp->b_dev = vp->v_rdev;
vp               1963 kern/vfs_subr.c 	bufinsvn(bp, &vp->v_cleanblkhd);
vp               1974 kern/vfs_subr.c 	struct vnode *vp;
vp               1978 kern/vfs_subr.c 	if ((vp = bp->b_vp) == (struct vnode *) 0)
vp               1985 kern/vfs_subr.c 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
vp               1986 kern/vfs_subr.c 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
vp               1987 kern/vfs_subr.c 		vp->v_bioflag &= ~VBIOONSYNCLIST;
vp               1988 kern/vfs_subr.c 		LIST_REMOVE(vp, v_synclist);
vp               1993 kern/vfs_subr.c 	if (vp->v_holdcnt == 0)
vp               1996 kern/vfs_subr.c 	vp->v_holdcnt--;
vp               2002 kern/vfs_subr.c 	if ((vp->v_bioflag & VBIOONFREELIST) &&
vp               2003 kern/vfs_subr.c 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
vp               2004 kern/vfs_subr.c 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
vp               2005 kern/vfs_subr.c 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp               2049 kern/vfs_subr.c 	struct vnode *vp = bp->b_vp;
vp               2064 kern/vfs_subr.c 		listheadp = &vp->v_cleanblkhd;
vp               2065 kern/vfs_subr.c 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
vp               2066 kern/vfs_subr.c 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
vp               2067 kern/vfs_subr.c 			vp->v_bioflag &= ~VBIOONSYNCLIST;
vp               2068 kern/vfs_subr.c 			LIST_REMOVE(vp, v_synclist);
vp               2071 kern/vfs_subr.c 		listheadp = &vp->v_dirtyblkhd;
vp               2072 kern/vfs_subr.c 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
vp               2073 kern/vfs_subr.c 			switch (vp->v_type) {
vp               2078 kern/vfs_subr.c 				if (vp->v_specmountpoint != NULL) {
vp               2086 kern/vfs_subr.c 			vn_syncer_add_to_worklist(vp, delay);
vp               2162 kern/vfs_subr.c vn_isdisk(struct vnode *vp, int *errp)
vp               2164 kern/vfs_subr.c 	if (vp->v_type != VBLK && vp->v_type != VCHR)
vp               2202 kern/vfs_subr.c vfs_vnode_print(struct vnode *vp, int full, int (*pr)(const char *, ...))
vp               2207 kern/vfs_subr.c 	      vp->v_tag > NENTS(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag,
vp               2208 kern/vfs_subr.c 	      vp->v_type > NENTS(vtypes)? "<unk>":vtypes[vp->v_type],
vp               2209 kern/vfs_subr.c 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
vp               2212 kern/vfs_subr.c 	      vp->v_data, vp->v_usecount, vp->v_writecount,
vp               2213 kern/vfs_subr.c 	      vp->v_holdcnt, vp->v_numoutput);
vp               2221 kern/vfs_subr.c 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
vp               2227 kern/vfs_subr.c 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
vp               2238 kern/vfs_subr.c 	struct vnode *vp;
vp               2269 kern/vfs_subr.c 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
vp               2270 kern/vfs_subr.c 		if (VOP_ISLOCKED(vp)) {
vp               2271 kern/vfs_subr.c 			if (!LIST_NEXT(vp, v_mntvnodes))
vp               2272 kern/vfs_subr.c 				(*pr)(" %p", vp);
vp               2274 kern/vfs_subr.c 				(*pr)("\n\t%p", vp);
vp               2276 kern/vfs_subr.c 				(*pr)(", %p", vp);
vp               2283 kern/vfs_subr.c 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
vp               2284 kern/vfs_subr.c 			if (!LIST_NEXT(vp, v_mntvnodes))
vp               2285 kern/vfs_subr.c 				(*pr)(" %p", vp);
vp               2287 kern/vfs_subr.c 				(*pr)(" %p,\n\t", vp);
vp               2289 kern/vfs_subr.c 				(*pr)(" %p,", vp);
vp               2290 kern/vfs_subr.c 		(*pr)("\n", vp);
vp                115 kern/vfs_sync.c vn_syncer_add_to_worklist(struct vnode *vp, int delay)
vp                124 kern/vfs_sync.c 	if (vp->v_bioflag & VBIOONSYNCLIST)
vp                125 kern/vfs_sync.c 		LIST_REMOVE(vp, v_synclist);
vp                127 kern/vfs_sync.c 	vp->v_bioflag |= VBIOONSYNCLIST;
vp                128 kern/vfs_sync.c 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
vp                139 kern/vfs_sync.c 	struct vnode *vp;
vp                158 kern/vfs_sync.c 		while ((vp = LIST_FIRST(slp)) != NULL) {
vp                159 kern/vfs_sync.c 			if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT, p)) {
vp                165 kern/vfs_sync.c 				vn_syncer_add_to_worklist(vp, 1);
vp                169 kern/vfs_sync.c 			(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
vp                170 kern/vfs_sync.c 			vput(vp);
vp                172 kern/vfs_sync.c 			if (LIST_FIRST(slp) == vp) {
vp                180 kern/vfs_sync.c 				if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
vp                181 kern/vfs_sync.c 				    vp->v_type != VBLK) {
vp                182 kern/vfs_sync.c 					vprint("fsync failed", vp);
vp                183 kern/vfs_sync.c 					if (vp->v_mount != NULL)
vp                185 kern/vfs_sync.c 						    vp->v_mount->mnt_stat.f_mntonname);
vp                195 kern/vfs_sync.c 				vn_syncer_add_to_worklist(vp, syncdelay);
vp                292 kern/vfs_sync.c 	struct vnode *vp;
vp                297 kern/vfs_sync.c 	if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
vp                301 kern/vfs_sync.c 	vp->v_writecount = 1;
vp                302 kern/vfs_sync.c 	vp->v_type = VNON;
vp                319 kern/vfs_sync.c 	vn_syncer_add_to_worklist(vp, next);
vp                320 kern/vfs_sync.c 	mp->mnt_syncer = vp;
vp                370 kern/vfs_sync.c 	struct vnode *vp = ap->a_vp;
vp                373 kern/vfs_sync.c 	if (vp->v_usecount == 0) {
vp                374 kern/vfs_sync.c 		VOP_UNLOCK(vp, 0, ap->a_p);
vp                378 kern/vfs_sync.c 	vp->v_mount->mnt_syncer = NULL;
vp                382 kern/vfs_sync.c 	LIST_REMOVE(vp, v_synclist);
vp                383 kern/vfs_sync.c 	vp->v_bioflag &= ~VBIOONSYNCLIST;
vp                387 kern/vfs_sync.c 	vp->v_writecount = 0;
vp                388 kern/vfs_sync.c 	vput(vp);
vp                 88 kern/vfs_syscalls.c 	struct vnode *vp;
vp                116 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp                118 kern/vfs_syscalls.c 		if ((vp->v_flag & VROOT) == 0) {
vp                119 kern/vfs_syscalls.c 			vput(vp);
vp                122 kern/vfs_syscalls.c 		mp = vp->v_mount;
vp                130 kern/vfs_syscalls.c 			vput(vp);
vp                140 kern/vfs_syscalls.c 			vput(vp);
vp                150 kern/vfs_syscalls.c 				vput(vp);
vp                158 kern/vfs_syscalls.c 			vput(vp);
vp                161 kern/vfs_syscalls.c 		VOP_UNLOCK(vp, 0, p);
vp                169 kern/vfs_syscalls.c 	if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) ||
vp                172 kern/vfs_syscalls.c 		vput(vp);
vp                182 kern/vfs_syscalls.c 			vput(vp);
vp                186 kern/vfs_syscalls.c 		if (vp->v_mount->mnt_flag & MNT_NOEXEC)
vp                189 kern/vfs_syscalls.c 	if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, p, 0, 0)) != 0) {
vp                190 kern/vfs_syscalls.c 		vput(vp);
vp                193 kern/vfs_syscalls.c 	if (vp->v_type != VDIR) {
vp                194 kern/vfs_syscalls.c 		vput(vp);
vp                212 kern/vfs_syscalls.c 			vput(vp);
vp                218 kern/vfs_syscalls.c 		vput(vp);
vp                228 kern/vfs_syscalls.c 		vput(vp);
vp                232 kern/vfs_syscalls.c 	if (vp->v_mountedhere != NULL) {
vp                233 kern/vfs_syscalls.c 		vput(vp);
vp                248 kern/vfs_syscalls.c 	mp->mnt_vnodecovered = vp;
vp                272 kern/vfs_syscalls.c 		vrele(vp);
vp                293 kern/vfs_syscalls.c 	vp->v_mountedhere = mp;
vp                298 kern/vfs_syscalls.c 	cache_purge(vp);
vp                302 kern/vfs_syscalls.c 		checkdirs(vp);
vp                303 kern/vfs_syscalls.c 		VOP_UNLOCK(vp, 0, p);
vp                309 kern/vfs_syscalls.c 			vrele(vp);
vp                314 kern/vfs_syscalls.c 		vput(vp);
vp                370 kern/vfs_syscalls.c 	struct vnode *vp;
vp                379 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp                380 kern/vfs_syscalls.c 	mp = vp->v_mount;
vp                388 kern/vfs_syscalls.c 		vput(vp);
vp                396 kern/vfs_syscalls.c 		vput(vp);
vp                403 kern/vfs_syscalls.c 	if ((vp->v_flag & VROOT) == 0) {
vp                404 kern/vfs_syscalls.c 		vput(vp);
vp                407 kern/vfs_syscalls.c 	vput(vp);
vp                412 kern/vfs_syscalls.c 	return (dounmount(mp, SCARG(uap, flags), p, vp));
vp                700 kern/vfs_syscalls.c 	struct vnode *vp, *tdp;
vp                707 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp                708 kern/vfs_syscalls.c 	VREF(vp);
vp                710 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                711 kern/vfs_syscalls.c 	if (vp->v_type != VDIR)
vp                714 kern/vfs_syscalls.c 		error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
vp                716 kern/vfs_syscalls.c 	while (!error && (mp = vp->v_mountedhere) != NULL) {
vp                723 kern/vfs_syscalls.c 		vput(vp);
vp                724 kern/vfs_syscalls.c 		vp = tdp;
vp                727 kern/vfs_syscalls.c 		vput(vp);
vp                730 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp                732 kern/vfs_syscalls.c 	fdp->fd_cdir = vp;
vp                799 kern/vfs_syscalls.c 	struct vnode *vp;
vp                804 kern/vfs_syscalls.c 	vp = ndp->ni_vp;
vp                805 kern/vfs_syscalls.c 	if (vp->v_type != VDIR)
vp                808 kern/vfs_syscalls.c 		error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
vp                810 kern/vfs_syscalls.c 		vput(vp);
vp                812 kern/vfs_syscalls.c 		VOP_UNLOCK(vp, 0, p);
vp                830 kern/vfs_syscalls.c 	struct vnode *vp;
vp                866 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp                870 kern/vfs_syscalls.c 	fp->f_data = vp;
vp                882 kern/vfs_syscalls.c 		VOP_UNLOCK(vp, 0, p);
vp                883 kern/vfs_syscalls.c 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type);
vp                890 kern/vfs_syscalls.c 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                896 kern/vfs_syscalls.c 		else if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                898 kern/vfs_syscalls.c 		else if (vp->v_type == VDIR)
vp                900 kern/vfs_syscalls.c 		else if ((error = vn_writechk(vp)) == 0) {
vp                903 kern/vfs_syscalls.c 			error = VOP_SETATTR(vp, &vattr, fp->f_cred, p);
vp                906 kern/vfs_syscalls.c 			VOP_UNLOCK(vp, 0, p);
vp                913 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp                931 kern/vfs_syscalls.c 	struct vnode *vp;
vp                947 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp                949 kern/vfs_syscalls.c 	fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
vp                950 kern/vfs_syscalls.c 	error = VFS_VPTOFH(vp, &fh.fh_fid);
vp                951 kern/vfs_syscalls.c 	vput(vp);
vp                973 kern/vfs_syscalls.c 	struct vnode *vp = NULL;
vp               1008 kern/vfs_syscalls.c 	if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)) != 0) {
vp               1009 kern/vfs_syscalls.c 		vp = NULL;	/* most likely unnecessary sanity for bad: */
vp               1015 kern/vfs_syscalls.c 	if (vp->v_type == VSOCK) {
vp               1020 kern/vfs_syscalls.c 		if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0)
vp               1024 kern/vfs_syscalls.c 		if (vp->v_type == VDIR) {
vp               1028 kern/vfs_syscalls.c 		if ((error = vn_writechk(vp)) != 0 ||
vp               1029 kern/vfs_syscalls.c 		    (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0)
vp               1035 kern/vfs_syscalls.c 		if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0)
vp               1038 kern/vfs_syscalls.c 	if ((error = VOP_OPEN(vp, flags, cred, p)) != 0)
vp               1041 kern/vfs_syscalls.c 		vp->v_writecount++;
vp               1048 kern/vfs_syscalls.c 	fp->f_data = vp;
vp               1060 kern/vfs_syscalls.c 		VOP_UNLOCK(vp, 0, p);
vp               1061 kern/vfs_syscalls.c 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type);
vp               1064 kern/vfs_syscalls.c 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1067 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp               1078 kern/vfs_syscalls.c 		if (vp != NULL)
vp               1079 kern/vfs_syscalls.c 			vput(vp);
vp               1097 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1110 kern/vfs_syscalls.c 	if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
vp               1112 kern/vfs_syscalls.c 	error = vn_stat(vp, &sb, p);
vp               1113 kern/vfs_syscalls.c 	vput(vp);
vp               1131 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1145 kern/vfs_syscalls.c 	if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
vp               1147 kern/vfs_syscalls.c 	mp = vp->v_mount;
vp               1149 kern/vfs_syscalls.c 	vput(vp);
vp               1168 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1180 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1181 kern/vfs_syscalls.c 	if (vp != NULL)
vp               1207 kern/vfs_syscalls.c 		if (nd.ni_dvp == vp)
vp               1211 kern/vfs_syscalls.c 		if (vp)
vp               1212 kern/vfs_syscalls.c 			vrele(vp);
vp               1265 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1273 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1276 kern/vfs_syscalls.c 	if (vp->v_type == VDIR) {
vp               1293 kern/vfs_syscalls.c 	error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
vp               1295 kern/vfs_syscalls.c 	vrele(vp);
vp               1350 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1358 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1363 kern/vfs_syscalls.c 	if (vp->v_flag & VROOT) {
vp               1365 kern/vfs_syscalls.c 		if (nd.ni_dvp == vp)
vp               1369 kern/vfs_syscalls.c 		vput(vp);
vp               1374 kern/vfs_syscalls.c 	(void)uvm_vnp_uncache(vp);
vp               1397 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1405 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               1406 kern/vfs_syscalls.c 	if (vp->v_type == VFIFO)
vp               1408 kern/vfs_syscalls.c 	if (vp->v_type == VCHR)
vp               1463 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1477 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1488 kern/vfs_syscalls.c 		if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
vp               1489 kern/vfs_syscalls.c 			error = VOP_ACCESS(vp, flags, cred, p);
vp               1491 kern/vfs_syscalls.c 	vput(vp);
vp               1593 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1603 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1604 kern/vfs_syscalls.c 	if (vp->v_type != VLNK)
vp               1616 kern/vfs_syscalls.c 		error = VOP_READLINK(vp, &auio, p->p_ucred);
vp               1618 kern/vfs_syscalls.c 	vput(vp);
vp               1634 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1642 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1643 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1644 kern/vfs_syscalls.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1650 kern/vfs_syscalls.c 			if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) != 0)
vp               1659 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1662 kern/vfs_syscalls.c 	vput(vp);
vp               1678 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1684 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               1685 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1686 kern/vfs_syscalls.c 	if (vp->v_mount && vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1692 kern/vfs_syscalls.c 			if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p))
vp               1702 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1705 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp               1721 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1732 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1733 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1734 kern/vfs_syscalls.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1739 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1741 kern/vfs_syscalls.c 	vput(vp);
vp               1757 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1766 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               1767 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1768 kern/vfs_syscalls.c 	if (vp->v_mount && vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1773 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1775 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp               1792 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1801 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1802 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1803 kern/vfs_syscalls.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1808 kern/vfs_syscalls.c 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
vp               1821 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1824 kern/vfs_syscalls.c 	vput(vp);
vp               1840 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1849 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1850 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1851 kern/vfs_syscalls.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1856 kern/vfs_syscalls.c 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
vp               1869 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1872 kern/vfs_syscalls.c 	vput(vp);
vp               1888 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1896 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               1897 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1898 kern/vfs_syscalls.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1903 kern/vfs_syscalls.c 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
vp               1915 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1918 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp               1934 kern/vfs_syscalls.c 	struct vnode *vp;
vp               1959 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               1960 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               1961 kern/vfs_syscalls.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp               1968 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               1970 kern/vfs_syscalls.c 	vput(vp);
vp               1985 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2009 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2010 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               2011 kern/vfs_syscalls.c 	if (vp->v_mount && vp->v_mount->mnt_flag & MNT_RDONLY)
vp               2018 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               2020 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp               2037 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2045 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               2046 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               2047 kern/vfs_syscalls.c 	if (vp->v_type == VDIR)
vp               2049 kern/vfs_syscalls.c 	else if ((error = vn_writechk(vp)) == 0 &&
vp               2050 kern/vfs_syscalls.c 	    (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) == 0) {
vp               2053 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
vp               2055 kern/vfs_syscalls.c 	vput(vp);
vp               2072 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2084 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2085 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               2086 kern/vfs_syscalls.c 	if (vp->v_type == VDIR)
vp               2088 kern/vfs_syscalls.c 	else if ((error = vn_writechk(vp)) == 0) {
vp               2091 kern/vfs_syscalls.c 		error = VOP_SETATTR(vp, &vattr, fp->f_cred, p);
vp               2093 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp               2109 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2115 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2116 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               2117 kern/vfs_syscalls.c 	error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
vp               2119 kern/vfs_syscalls.c 	if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
vp               2120 kern/vfs_syscalls.c 		error = softdep_fsync(vp);
vp               2123 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp               2226 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2235 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               2236 kern/vfs_syscalls.c 	if (vp != NULL) {
vp               2238 kern/vfs_syscalls.c 		if (nd.ni_dvp == vp)
vp               2242 kern/vfs_syscalls.c 		vrele(vp);
vp               2264 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2272 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               2273 kern/vfs_syscalls.c 	if (vp->v_type != VDIR) {
vp               2280 kern/vfs_syscalls.c 	if (nd.ni_dvp == vp) {
vp               2287 kern/vfs_syscalls.c 	if (vp->v_flag & VROOT)
vp               2294 kern/vfs_syscalls.c 		if (nd.ni_dvp == vp)
vp               2298 kern/vfs_syscalls.c 		vput(vp);
vp               2315 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2330 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2331 kern/vfs_syscalls.c 	if (vp->v_type != VDIR) {
vp               2343 kern/vfs_syscalls.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               2345 kern/vfs_syscalls.c 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, 0, 0);
vp               2347 kern/vfs_syscalls.c 	VOP_UNLOCK(vp, 0, p);
vp               2386 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2394 kern/vfs_syscalls.c 	vp = nd.ni_vp;
vp               2395 kern/vfs_syscalls.c 	if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) != 0)
vp               2400 kern/vfs_syscalls.c 	if (vp->v_usecount > 1 || (vp->v_flag & (VALIASED)))
vp               2401 kern/vfs_syscalls.c 		VOP_REVOKE(vp, REVOKEALL);
vp               2403 kern/vfs_syscalls.c 	vrele(vp);
vp               2416 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2424 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2425 kern/vfs_syscalls.c 	if (vp->v_type == VBAD)
vp               2449 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2458 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2459 kern/vfs_syscalls.c 	if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
vp               2487 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2496 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2497 kern/vfs_syscalls.c 	if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
vp               2525 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2534 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2535 kern/vfs_syscalls.c 	if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
vp               2563 kern/vfs_syscalls.c 	struct vnode *vp;
vp               2572 kern/vfs_syscalls.c 	vp = (struct vnode *)fp->f_data;
vp               2573 kern/vfs_syscalls.c 	if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
vp                 76 kern/vfs_vnops.c 	struct vnode *vp;
vp                103 kern/vfs_vnops.c 			vp = ndp->ni_vp;
vp                111 kern/vfs_vnops.c 			vp = ndp->ni_vp;
vp                124 kern/vfs_vnops.c 		vp = ndp->ni_vp;
vp                126 kern/vfs_vnops.c 	if (vp->v_type == VSOCK) {
vp                130 kern/vfs_vnops.c 	if (vp->v_type == VLNK) {
vp                136 kern/vfs_vnops.c 			if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0)
vp                140 kern/vfs_vnops.c 			if (vp->v_type == VDIR) {
vp                144 kern/vfs_vnops.c 			if ((error = vn_writechk(vp)) != 0 ||
vp                145 kern/vfs_vnops.c 			    (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0)
vp                149 kern/vfs_vnops.c 	if ((fmode & O_TRUNC) && vp->v_type == VREG) {
vp                152 kern/vfs_vnops.c 		if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0)
vp                155 kern/vfs_vnops.c 	if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0)
vp                158 kern/vfs_vnops.c 	if (vp->v_flag & VCLONED) {
vp                159 kern/vfs_vnops.c 		struct cloneinfo *cip = (struct cloneinfo *) vp->v_data;
vp                161 kern/vfs_vnops.c 		vp->v_flag &= ~VCLONED;
vp                163 kern/vfs_vnops.c 		vp->v_data = cip->ci_data; /* restore v_data */
vp                164 kern/vfs_vnops.c 		VOP_UNLOCK(vp, 0, p); /* keep a reference */
vp                165 kern/vfs_vnops.c 		vp = ndp->ni_vp; /* for the increment below */
vp                171 kern/vfs_vnops.c 		vp->v_writecount++;
vp                174 kern/vfs_vnops.c 	vput(vp);
vp                183 kern/vfs_vnops.c vn_writechk(struct vnode *vp)
vp                190 kern/vfs_vnops.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
vp                191 kern/vfs_vnops.c 		switch (vp->v_type) {
vp                210 kern/vfs_vnops.c 	if ((vp->v_flag & VTEXT) && !uvm_vnp_uncache(vp))
vp                220 kern/vfs_vnops.c vn_marktext(struct vnode *vp)
vp                222 kern/vfs_vnops.c 	vp->v_flag |= VTEXT;
vp                229 kern/vfs_vnops.c vn_close(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
vp                234 kern/vfs_vnops.c 		vp->v_writecount--;
vp                235 kern/vfs_vnops.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                236 kern/vfs_vnops.c 	error = VOP_CLOSE(vp, flags, cred, p);
vp                237 kern/vfs_vnops.c 	vput(vp);
vp                245 kern/vfs_vnops.c vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
vp                254 kern/vfs_vnops.c 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                265 kern/vfs_vnops.c 		error = VOP_READ(vp, &auio, ioflg, cred);
vp                267 kern/vfs_vnops.c 		error = VOP_WRITE(vp, &auio, ioflg, cred);
vp                275 kern/vfs_vnops.c 		VOP_UNLOCK(vp, 0, p);
vp                285 kern/vfs_vnops.c 	struct vnode *vp = (struct vnode *)fp->f_data;
vp                290 kern/vfs_vnops.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                293 kern/vfs_vnops.c 	if (vp->v_type != VDIR)
vp                294 kern/vfs_vnops.c 		error = VOP_READ(vp, uio,
vp                297 kern/vfs_vnops.c 	VOP_UNLOCK(vp, 0, p);
vp                307 kern/vfs_vnops.c 	struct vnode *vp = (struct vnode *)fp->f_data;
vp                312 kern/vfs_vnops.c 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
vp                317 kern/vfs_vnops.c 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
vp                319 kern/vfs_vnops.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                322 kern/vfs_vnops.c 	error = VOP_WRITE(vp, uio, ioflag, cred);
vp                327 kern/vfs_vnops.c 	VOP_UNLOCK(vp, 0, p);
vp                337 kern/vfs_vnops.c 	struct vnode *vp = (struct vnode *)fp->f_data;
vp                338 kern/vfs_vnops.c 	return vn_stat(vp, sb, p);
vp                345 kern/vfs_vnops.c vn_stat(struct vnode *vp, struct stat *sb, struct proc *p)
vp                351 kern/vfs_vnops.c 	error = VOP_GETATTR(vp, &va, p->p_ucred, p);
vp                360 kern/vfs_vnops.c 	switch (vp->v_type) {
vp                407 kern/vfs_vnops.c 	struct vnode *vp = ((struct vnode *)fp->f_data);
vp                411 kern/vfs_vnops.c 	switch (vp->v_type) {
vp                416 kern/vfs_vnops.c 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
vp                431 kern/vfs_vnops.c 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
vp                435 kern/vfs_vnops.c 			p->p_session->s_ttyvp = vp;
vp                436 kern/vfs_vnops.c 			VREF(vp);
vp                456 kern/vfs_vnops.c vn_lock(struct vnode *vp, int flags, struct proc *p)
vp                464 kern/vfs_vnops.c 		if (vp->v_flag & VXLOCK) {
vp                465 kern/vfs_vnops.c 			vp->v_flag |= VXWANT;
vp                466 kern/vfs_vnops.c 			tsleep(vp, PINOD, "vn_lock", 0);
vp                469 kern/vfs_vnops.c 			error = VOP_LOCK(vp, flags, p);
vp                 73 kern/vnode_if.c int VOP_ISLOCKED(struct vnode *vp)
vp                 77 kern/vnode_if.c 	a.a_vp = vp;
vp                 78 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_islocked), &a));
vp                188 kern/vnode_if.c int VOP_OPEN(struct vnode *vp, int mode, struct ucred *cred, struct proc *p)
vp                192 kern/vnode_if.c 	a.a_vp = vp;
vp                196 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_open), &a));
vp                215 kern/vnode_if.c int VOP_CLOSE(struct vnode *vp, int fflag, struct ucred *cred, struct proc *p)
vp                219 kern/vnode_if.c 	a.a_vp = vp;
vp                221 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                227 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_close), &a));
vp                246 kern/vnode_if.c int VOP_ACCESS(struct vnode *vp, int mode, struct ucred *cred, struct proc *p)
vp                250 kern/vnode_if.c 	a.a_vp = vp;
vp                252 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                258 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_access), &a));
vp                277 kern/vnode_if.c int VOP_GETATTR(struct vnode *vp, struct vattr *vap, struct ucred *cred, 
vp                282 kern/vnode_if.c 	a.a_vp = vp;
vp                286 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_getattr), &a));
vp                305 kern/vnode_if.c int VOP_SETATTR(struct vnode *vp, struct vattr *vap, struct ucred *cred, 
vp                310 kern/vnode_if.c 	a.a_vp = vp;
vp                312 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                318 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_setattr), &a));
vp                337 kern/vnode_if.c int VOP_READ(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
vp                341 kern/vnode_if.c 	a.a_vp = vp;
vp                343 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                349 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_read), &a));
vp                368 kern/vnode_if.c int VOP_WRITE(struct vnode *vp, struct uio *uio, int ioflag, 
vp                373 kern/vnode_if.c 	a.a_vp = vp;
vp                375 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                381 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_write), &a));
vp                400 kern/vnode_if.c int VOP_IOCTL(struct vnode *vp, u_long command, void *data, int fflag, 
vp                405 kern/vnode_if.c 	a.a_vp = vp;
vp                411 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_ioctl), &a));
vp                430 kern/vnode_if.c int VOP_POLL(struct vnode *vp, int events, struct proc *p)
vp                434 kern/vnode_if.c 	a.a_vp = vp;
vp                437 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_poll), &a));
vp                456 kern/vnode_if.c int VOP_KQFILTER(struct vnode *vp, struct knote *kn)
vp                460 kern/vnode_if.c 	a.a_vp = vp;
vp                462 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_kqfilter), &a));
vp                481 kern/vnode_if.c int VOP_REVOKE(struct vnode *vp, int flags)
vp                485 kern/vnode_if.c 	a.a_vp = vp;
vp                487 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_revoke), &a));
vp                506 kern/vnode_if.c int VOP_FSYNC(struct vnode *vp, struct ucred *cred, int waitfor, 
vp                511 kern/vnode_if.c 	a.a_vp = vp;
vp                513 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                519 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_fsync), &a));
vp                539 kern/vnode_if.c int VOP_REMOVE(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
vp                548 kern/vnode_if.c 	a.a_vp = vp;
vp                550 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                574 kern/vnode_if.c int VOP_LINK(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
vp                583 kern/vnode_if.c 	a.a_vp = vp;
vp                675 kern/vnode_if.c int VOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
vp                684 kern/vnode_if.c 	a.a_vp = vp;
vp                686 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                742 kern/vnode_if.c int VOP_READDIR(struct vnode *vp, struct uio *uio, struct ucred *cred, 
vp                747 kern/vnode_if.c 	a.a_vp = vp;
vp                749 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                757 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_readdir), &a));
vp                776 kern/vnode_if.c int VOP_READLINK(struct vnode *vp, struct uio *uio, struct ucred *cred)
vp                780 kern/vnode_if.c 	a.a_vp = vp;
vp                782 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                787 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_readlink), &a));
vp                831 kern/vnode_if.c int VOP_INACTIVE(struct vnode *vp, struct proc *p)
vp                835 kern/vnode_if.c 	a.a_vp = vp;
vp                837 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                841 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_inactive), &a));
vp                860 kern/vnode_if.c int VOP_RECLAIM(struct vnode *vp, struct proc *p)
vp                864 kern/vnode_if.c 	a.a_vp = vp;
vp                866 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_reclaim), &a));
vp                885 kern/vnode_if.c int VOP_LOCK(struct vnode *vp, int flags, struct proc *p)
vp                889 kern/vnode_if.c 	a.a_vp = vp;
vp                892 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_lock), &a));
vp                911 kern/vnode_if.c int VOP_UNLOCK(struct vnode *vp, int flags, struct proc *p)
vp                915 kern/vnode_if.c 	a.a_vp = vp;
vp                918 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_unlock), &a));
vp                937 kern/vnode_if.c int VOP_BMAP(struct vnode *vp, daddr64_t bn, struct vnode **vpp, 
vp                942 kern/vnode_if.c 	a.a_vp = vp;
vp                944 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp                951 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_bmap), &a));
vp                970 kern/vnode_if.c int VOP_PRINT(struct vnode *vp)
vp                974 kern/vnode_if.c 	a.a_vp = vp;
vp                975 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_print), &a));
vp                994 kern/vnode_if.c int VOP_PATHCONF(struct vnode *vp, int name, register_t *retval)
vp                998 kern/vnode_if.c 	a.a_vp = vp;
vp               1000 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp               1005 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_pathconf), &a));
vp               1024 kern/vnode_if.c int VOP_ADVLOCK(struct vnode *vp, void *id, int op, struct flock *fl, int flags)
vp               1028 kern/vnode_if.c 	a.a_vp = vp;
vp               1033 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_advlock), &a));
vp               1052 kern/vnode_if.c int VOP_REALLOCBLKS(struct vnode *vp, struct cluster_save *buflist)
vp               1056 kern/vnode_if.c 	a.a_vp = vp;
vp               1058 kern/vnode_if.c 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
vp               1062 kern/vnode_if.c 	return (VCALL(vp, VOFFSET(vop_reallocblks), &a));
vp                266 lib/libsa/bootp.c 	struct cmu_vend *vp;
vp                272 lib/libsa/bootp.c 	vp = (struct cmu_vend *)cp;
vp                274 lib/libsa/bootp.c 	if (vp->v_smask.s_addr != 0)
vp                275 lib/libsa/bootp.c 		smask = vp->v_smask.s_addr;
vp                276 lib/libsa/bootp.c 	if (vp->v_dgate.s_addr != 0)
vp                277 lib/libsa/bootp.c 		gateip = vp->v_dgate;
vp                239 miscfs/deadfs/dead_vnops.c 	struct vnode *vp = ap->a_vp;
vp                241 miscfs/deadfs/dead_vnops.c 	if (ap->a_flags & LK_DRAIN || !chkvnlock(vp))
vp                244 miscfs/deadfs/dead_vnops.c 	return (VCALL(vp, VOFFSET(vop_lock), ap));
vp                297 miscfs/deadfs/dead_vnops.c chkvnlock(struct vnode *vp)
vp                301 miscfs/deadfs/dead_vnops.c 	while (vp->v_flag & VXLOCK) {
vp                302 miscfs/deadfs/dead_vnops.c 		vp->v_flag |= VXWANT;
vp                303 miscfs/deadfs/dead_vnops.c 		tsleep(vp, PINOD, "chkvnlock", 0);
vp                148 miscfs/fifofs/fifo_vnops.c 	struct vnode *vp = ap->a_vp;
vp                154 miscfs/fifofs/fifo_vnops.c 	if ((fip = vp->v_fifoinfo) == NULL) {
vp                156 miscfs/fifofs/fifo_vnops.c 		vp->v_fifoinfo = fip;
vp                159 miscfs/fifofs/fifo_vnops.c 			vp->v_fifoinfo = NULL;
vp                166 miscfs/fifofs/fifo_vnops.c 			vp->v_fifoinfo = NULL;
vp                174 miscfs/fifofs/fifo_vnops.c 			vp->v_fifoinfo = NULL;
vp                203 miscfs/fifofs/fifo_vnops.c 			VOP_UNLOCK(vp, 0, p);
vp                206 miscfs/fifofs/fifo_vnops.c 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                211 miscfs/fifofs/fifo_vnops.c 			VOP_UNLOCK(vp, 0, p);
vp                214 miscfs/fifofs/fifo_vnops.c 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                221 miscfs/fifofs/fifo_vnops.c 	VOP_CLOSE(vp, ap->a_mode, ap->a_cred, ap->a_p);
vp                377 miscfs/fifofs/fifo_vnops.c 	struct vnode *vp = ap->a_vp;
vp                378 miscfs/fifofs/fifo_vnops.c 	struct fifoinfo *fip = vp->v_fifoinfo;
vp                396 miscfs/fifofs/fifo_vnops.c 		vp->v_fifoinfo = NULL;
vp                405 miscfs/fifofs/fifo_vnops.c 	struct vnode *vp = ap->a_vp;
vp                406 miscfs/fifofs/fifo_vnops.c 	struct fifoinfo *fip = vp->v_fifoinfo;
vp                414 miscfs/fifofs/fifo_vnops.c 	vp->v_fifoinfo = NULL;
vp                437 miscfs/fifofs/fifo_vnops.c fifo_printinfo(struct vnode *vp)
vp                439 miscfs/fifofs/fifo_vnops.c 	struct fifoinfo *fip = vp->v_fifoinfo;
vp                 65 miscfs/portal/portal.h #define	VTOPORTAL(vp) ((struct portalnode *)(vp)->v_data)
vp                200 miscfs/portal/portal_vfsops.c 	struct vnode *vp;
vp                206 miscfs/portal/portal_vfsops.c 	vp = VFSTOPORTAL(mp)->pm_root;
vp                207 miscfs/portal/portal_vfsops.c 	VREF(vp);
vp                208 miscfs/portal/portal_vfsops.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                209 miscfs/portal/portal_vfsops.c 	*vpp = vp;
vp                276 miscfs/portal/portal_vnops.c 	struct vnode *vp = ap->a_vp;
vp                295 miscfs/portal/portal_vnops.c 	if (vp->v_flag & VROOT)
vp                306 miscfs/portal/portal_vnops.c 	pt = VTOPORTAL(vp);
vp                307 miscfs/portal/portal_vnops.c 	fmp = VFSTOPORTAL(vp->v_mount);
vp                509 miscfs/portal/portal_vnops.c 	struct vnode *vp = ap->a_vp;
vp                516 miscfs/portal/portal_vnops.c 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
vp                528 miscfs/portal/portal_vnops.c 	if (vp->v_flag & VROOT) {
vp                541 miscfs/portal/portal_vnops.c 		vap->va_fileid = VTOPORTAL(vp)->pt_fileid;
vp                101 miscfs/procfs/procfs.h #define VTOPFS(vp)	((struct pfsnode *)(vp)->v_data)
vp                 94 miscfs/procfs/procfs_subr.c 	struct vnode *vp;
vp                105 miscfs/procfs/procfs_subr.c 		vp = PFSTOV(pfs);
vp                108 miscfs/procfs/procfs_subr.c 		    vp->v_mount == mp) {
vp                109 miscfs/procfs/procfs_subr.c 			if (vget(vp, 0, p))
vp                111 miscfs/procfs/procfs_subr.c 			*vpp = vp;
vp                118 miscfs/procfs/procfs_subr.c 	vp = *vpp;
vp                121 miscfs/procfs/procfs_subr.c 	vp->v_data = pfs;
vp                125 miscfs/procfs/procfs_subr.c 	pfs->pfs_vnode = vp;
vp                132 miscfs/procfs/procfs_subr.c 		vp->v_type = VDIR;
vp                133 miscfs/procfs/procfs_subr.c 		vp->v_flag = VROOT;
vp                139 miscfs/procfs/procfs_subr.c 		vp->v_type = VLNK;
vp                144 miscfs/procfs/procfs_subr.c 		vp->v_type = VDIR;
vp                152 miscfs/procfs/procfs_subr.c 		vp->v_type = VREG;
vp                159 miscfs/procfs/procfs_subr.c 		vp->v_type = VREG;
vp                167 miscfs/procfs/procfs_subr.c 		vp->v_type = VREG;
vp                176 miscfs/procfs/procfs_subr.c 	uvm_vnp_setsize(vp, 0);
vp                184 miscfs/procfs/procfs_subr.c procfs_freevp(struct vnode *vp)
vp                186 miscfs/procfs/procfs_subr.c 	struct pfsnode *pfs = VTOPFS(vp);
vp                189 miscfs/procfs/procfs_subr.c 	FREE(vp->v_data, M_TEMP);
vp                190 miscfs/procfs/procfs_subr.c 	vp->v_data = 0;
vp                198 miscfs/procfs/procfs_subr.c 	struct vnode *vp = ap->a_vp;
vp                201 miscfs/procfs/procfs_subr.c 	struct pfsnode *pfs = VTOPFS(vp);
vp                331 miscfs/procfs/procfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                332 miscfs/procfs/procfs_vnops.c 	struct pfsnode *pfs = VTOPFS(vp);
vp                334 miscfs/procfs/procfs_vnops.c 	if (pfind(pfs->pfs_pid) == NULL && !(vp->v_flag & VXLOCK))
vp                335 miscfs/procfs/procfs_vnops.c 		vgone(vp);
vp                850 miscfs/procfs/procfs_vnops.c 	struct vnode *vp;
vp                854 miscfs/procfs/procfs_vnops.c 	vp = ap->a_vp;
vp                855 miscfs/procfs/procfs_vnops.c 	pfs = VTOPFS(vp);
vp                884 miscfs/procfs/procfs_vnops.c 			    (*pt->pt_valid)(p, vp->v_mount) == 0)
vp                946 miscfs/procfs/procfs_vnops.c 				if (VFSTOPROC(vp->v_mount)->pmnt_flags &
vp                957 miscfs/procfs/procfs_vnops.c 				if (VFSTOPROC(vp->v_mount)->pmnt_flags &
vp                 31 miscfs/specfs/spec_subr.c 	struct vnode *cvp, *vp = ap->a_vp;
vp                 35 miscfs/specfs/spec_subr.c 	for (i = 1; i < sizeof(vp->v_specbitmap) * NBBY; i++)
vp                 36 miscfs/specfs/spec_subr.c 		if (isclr(vp->v_specbitmap, i)) {
vp                 37 miscfs/specfs/spec_subr.c 			setbit(vp->v_specbitmap, i);
vp                 41 miscfs/specfs/spec_subr.c 	if (i == sizeof(vp->v_specbitmap) * NBBY)
vp                 45 miscfs/specfs/spec_subr.c 	    major(vp->v_rdev), minor(vp->v_rdev), curproc->p_pid);
vp                 47 miscfs/specfs/spec_subr.c 	error = cdevvp(makedev(major(vp->v_rdev), i), &cvp);
vp                 51 miscfs/specfs/spec_subr.c 	VOP_UNLOCK(vp, 0, ap->a_p);
vp                 53 miscfs/specfs/spec_subr.c 	error = cdevsw[major(vp->v_rdev)].d_open(cvp->v_rdev, ap->a_mode,
vp                 56 miscfs/specfs/spec_subr.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
vp                 59 miscfs/specfs/spec_subr.c 		 clrbit(vp->v_specbitmap, i);
vp                 64 miscfs/specfs/spec_subr.c 	cip->ci_data = vp->v_data;
vp                 67 miscfs/specfs/spec_subr.c 	cvp->v_specparent = vp;
vp                 68 miscfs/specfs/spec_subr.c 	vp->v_flag |= VCLONED;
vp                 69 miscfs/specfs/spec_subr.c 	vp->v_data = cip;
vp                 80 miscfs/specfs/spec_subr.c 	struct vnode *pvp, *vp = ap->a_vp;
vp                 83 miscfs/specfs/spec_subr.c 	error = cdevsw[major(vp->v_rdev)].d_close(vp->v_rdev, ap->a_fflag,
vp                 88 miscfs/specfs/spec_subr.c 	pvp = vp->v_specparent; /* get parent device */
vp                 89 miscfs/specfs/spec_subr.c 	clrbit(pvp->v_specbitmap, minor(vp->v_rdev));
vp                 92 miscfs/specfs/spec_subr.c 	    " pid %u\n", minor(vp->v_rdev), major(vp->v_rdev), curproc->p_pid);
vp                130 miscfs/specfs/spec_vnops.c 	struct vnode *vp = ap->a_vp;
vp                133 miscfs/specfs/spec_vnops.c 	dev_t dev = (dev_t)vp->v_rdev;
vp                140 miscfs/specfs/spec_vnops.c 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
vp                143 miscfs/specfs/spec_vnops.c 	switch (vp->v_type) {
vp                172 miscfs/specfs/spec_vnops.c 			vp->v_flag |= VISTTY;
vp                175 miscfs/specfs/spec_vnops.c 		VOP_UNLOCK(vp, 0, p);
vp                177 miscfs/specfs/spec_vnops.c 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                194 miscfs/specfs/spec_vnops.c 		if ((error = vfs_mountedon(vp)) != 0)
vp                217 miscfs/specfs/spec_vnops.c 	struct vnode *vp = ap->a_vp;
vp                237 miscfs/specfs/spec_vnops.c 	switch (vp->v_type) {
vp                240 miscfs/specfs/spec_vnops.c 		VOP_UNLOCK(vp, 0, p);
vp                241 miscfs/specfs/spec_vnops.c 		error = (*cdevsw[major(vp->v_rdev)].d_read)
vp                242 miscfs/specfs/spec_vnops.c 			(vp->v_rdev, uio, ap->a_ioflag);
vp                243 miscfs/specfs/spec_vnops.c 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                250 miscfs/specfs/spec_vnops.c 		if ((majordev = major(vp->v_rdev)) < nblkdev &&
vp                252 miscfs/specfs/spec_vnops.c 		    (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
vp                266 miscfs/specfs/spec_vnops.c 			if (vp->v_lastr + bscale == bn) {
vp                268 miscfs/specfs/spec_vnops.c 				error = breadn(vp, bn, bsize, &nextbn, &bsize,
vp                271 miscfs/specfs/spec_vnops.c 				error = bread(vp, bn, bsize, NOCRED, &bp);
vp                272 miscfs/specfs/spec_vnops.c 			vp->v_lastr = bn;
vp                306 miscfs/specfs/spec_vnops.c 	struct vnode *vp = ap->a_vp;
vp                324 miscfs/specfs/spec_vnops.c 	switch (vp->v_type) {
vp                327 miscfs/specfs/spec_vnops.c 		VOP_UNLOCK(vp, 0, p);
vp                328 miscfs/specfs/spec_vnops.c 		error = (*cdevsw[major(vp->v_rdev)].d_write)
vp                329 miscfs/specfs/spec_vnops.c 			(vp->v_rdev, uio, ap->a_ioflag);
vp                330 miscfs/specfs/spec_vnops.c 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp                339 miscfs/specfs/spec_vnops.c 		if ((majordev = major(vp->v_rdev)) < nblkdev &&
vp                341 miscfs/specfs/spec_vnops.c 		    (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
vp                355 miscfs/specfs/spec_vnops.c 			error = bread(vp, bn, bsize, NOCRED, &bp);
vp                442 miscfs/specfs/spec_vnops.c 	struct vnode *vp = ap->a_vp;
vp                447 miscfs/specfs/spec_vnops.c 	if (vp->v_type == VCHR)
vp                454 miscfs/specfs/spec_vnops.c 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
vp                455 miscfs/specfs/spec_vnops.c 	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
vp                468 miscfs/specfs/spec_vnops.c 		vwaitforio (vp, 0, "spec_fsync", 0);
vp                471 miscfs/specfs/spec_vnops.c 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
vp                473 miscfs/specfs/spec_vnops.c 			vprint("spec_fsync: dirty", vp);
vp                522 miscfs/specfs/spec_vnops.c 	struct vnode *vp = ap->a_vp;
vp                523 miscfs/specfs/spec_vnops.c 	dev_t dev = vp->v_rdev;
vp                527 miscfs/specfs/spec_vnops.c 	switch (vp->v_type) {
vp                539 miscfs/specfs/spec_vnops.c 		if (vcount(vp) == 2 && ap->a_p &&
vp                540 miscfs/specfs/spec_vnops.c 		    vp == ap->a_p->p_session->s_ttyvp) {
vp                541 miscfs/specfs/spec_vnops.c 			vrele(vp);
vp                549 miscfs/specfs/spec_vnops.c 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
vp                565 miscfs/specfs/spec_vnops.c 		if (!(vp->v_flag & VXLOCK))
vp                566 miscfs/specfs/spec_vnops.c 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
vp                567 miscfs/specfs/spec_vnops.c 		error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
vp                568 miscfs/specfs/spec_vnops.c 		if (!(vp->v_flag & VXLOCK))
vp                569 miscfs/specfs/spec_vnops.c 			VOP_UNLOCK(vp, 0, ap->a_p);
vp                581 miscfs/specfs/spec_vnops.c 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
vp                648 miscfs/specfs/spec_vnops.c 	struct vnode *vp = ap->a_vp;
vp                650 miscfs/specfs/spec_vnops.c 	return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id,
vp                225 msdosfs/denode.h #define	VTODE(vp)	((struct denode *)(vp)->v_data)
vp                106 msdosfs/msdosfs_denode.c 				struct vnode *vp = DETOV(dep);
vp                108 msdosfs/msdosfs_denode.c 				if (!vget(vp, LK_EXCLUSIVE, p))
vp                591 msdosfs/msdosfs_denode.c 	struct vnode *vp = ap->a_vp;
vp                592 msdosfs/msdosfs_denode.c 	struct denode *dep = VTODE(vp);
vp                596 msdosfs/msdosfs_denode.c 	if (prtactive && vp->v_usecount != 0)
vp                597 msdosfs/msdosfs_denode.c 		vprint("msdosfs_reclaim(): pushing active", vp);
vp                612 msdosfs/msdosfs_denode.c 	cache_purge(vp);
vp                621 msdosfs/msdosfs_denode.c 	vp->v_data = NULL;
vp                630 msdosfs/msdosfs_denode.c 	struct vnode *vp = ap->a_vp;
vp                631 msdosfs/msdosfs_denode.c 	struct denode *dep = VTODE(vp);
vp                637 msdosfs/msdosfs_denode.c 	if (prtactive && vp->v_usecount != 0)
vp                638 msdosfs/msdosfs_denode.c 		vprint("msdosfs_inactive(): pushing active", vp);
vp                660 msdosfs/msdosfs_denode.c 	       dep, dep->de_refcnt, vp->v_mount->mnt_flag, MNT_RDONLY);
vp                662 msdosfs/msdosfs_denode.c 	if (dep->de_refcnt <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
vp                669 msdosfs/msdosfs_denode.c 	VOP_UNLOCK(vp, 0, p);
vp                675 msdosfs/msdosfs_denode.c 	printf("msdosfs_inactive(): v_usecount %d, de_Name[0] %x\n", vp->v_usecount,
vp                679 msdosfs/msdosfs_denode.c 		vrecycle(vp, p);
vp                589 msdosfs/msdosfs_vfsops.c 	struct vnode *vp;
vp                600 msdosfs/msdosfs_vfsops.c 	vp = pmp->pm_devvp;
vp                602 msdosfs/msdosfs_vfsops.c 	vprint("msdosfs_umount(): just before calling VOP_CLOSE()\n", vp);
vp                604 msdosfs/msdosfs_vfsops.c 	error = VOP_CLOSE(vp,
vp                606 msdosfs/msdosfs_vfsops.c 	vrele(vp);
vp                670 msdosfs/msdosfs_vfsops.c msdosfs_sync_vnode(struct vnode *vp, void *arg)
vp                676 msdosfs/msdosfs_vfsops.c 	dep = VTODE(vp);
vp                677 msdosfs/msdosfs_vfsops.c 	if (vp->v_type == VNON || 
vp                679 msdosfs/msdosfs_vfsops.c 	      && LIST_EMPTY(&vp->v_dirtyblkhd)) ||
vp                684 msdosfs/msdosfs_vfsops.c 	if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT, msa->p))
vp                687 msdosfs/msdosfs_vfsops.c 	if ((error = VOP_FSYNC(vp, msa->cred, msa->waitfor, msa->p)) != 0)
vp                689 msdosfs/msdosfs_vfsops.c 	VOP_UNLOCK(vp, 0, msa->p);
vp                690 msdosfs/msdosfs_vfsops.c 	vrele(vp);
vp                762 msdosfs/msdosfs_vfsops.c msdosfs_vptofh(vp, fhp)
vp                763 msdosfs/msdosfs_vfsops.c 	struct vnode *vp;
vp                769 msdosfs/msdosfs_vfsops.c 	dep = VTODE(vp);
vp                202 msdosfs/msdosfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                203 msdosfs/msdosfs_vnops.c 	struct denode *dep = VTODE(vp);
vp                206 msdosfs/msdosfs_vnops.c 	if (vp->v_usecount > 1 && !VOP_ISLOCKED(vp)) {
vp                439 msdosfs/msdosfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                440 msdosfs/msdosfs_vnops.c 	struct denode *dep = VTODE(vp);
vp                488 msdosfs/msdosfs_vnops.c 				error = breadn(vp, de_cn2bn(pmp, lbn),
vp                492 msdosfs/msdosfs_vnops.c 				error = bread(vp, de_cn2bn(pmp, lbn),
vp                504 msdosfs/msdosfs_vnops.c 	if (!isadir && !(vp->v_mount->mnt_flag & MNT_NOATIME))
vp                528 msdosfs/msdosfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                530 msdosfs/msdosfs_vnops.c 	struct denode *dep = VTODE(vp);
vp                536 msdosfs/msdosfs_vnops.c 	    vp, uio, ioflag, cred);
vp                541 msdosfs/msdosfs_vnops.c 	switch (vp->v_type) {
vp                545 msdosfs/msdosfs_vnops.c 		thisvp = vp;
vp                654 msdosfs/msdosfs_vnops.c 			uvm_vnp_setsize(vp, dep->de_FileSize);
vp                656 msdosfs/msdosfs_vnops.c 		uvm_vnp_uncache(vp);
vp                741 msdosfs/msdosfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                743 msdosfs/msdosfs_vnops.c 	vflushbuf(vp, ap->a_waitfor == MNT_WAIT);
vp                744 msdosfs/msdosfs_vnops.c 	return (deupdat(VTODE(vp), ap->a_waitfor == MNT_WAIT));
vp               1299 msdosfs/msdosfs_vnops.c 	register struct vnode *vp = ap->a_vp;
vp               1305 msdosfs/msdosfs_vnops.c 	ip = VTODE(vp);
vp               1312 msdosfs/msdosfs_vnops.c 		vput(vp);
vp               1349 msdosfs/msdosfs_vnops.c 	cache_purge(vp);
vp               1353 msdosfs/msdosfs_vnops.c 	vput(vp);
vp               1665 msdosfs/msdosfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1667 msdosfs/msdosfs_vnops.c 	return (lockmgr(&VTODE(vp)->de_lock, ap->a_flags, NULL));
vp               1675 msdosfs/msdosfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1677 msdosfs/msdosfs_vnops.c 	return (lockmgr(&VTODE(vp)->de_lock, ap->a_flags | LK_RELEASE, NULL));
vp               1724 msdosfs/msdosfs_vnops.c 	struct vnode *vp;
vp               1756 msdosfs/msdosfs_vnops.c 	vp = dep->de_devvp;
vp               1757 msdosfs/msdosfs_vnops.c 	bp->b_dev = vp->v_rdev;
vp               1758 msdosfs/msdosfs_vnops.c 	VOCALL(vp->v_op, VOFFSET(vop_strategy), ap);
vp                 68 nfs/nfs_bio.c  nfs_bioread(vp, uio, ioflag, cred)
vp                 69 nfs/nfs_bio.c  	struct vnode *vp;
vp                 74 nfs/nfs_bio.c  	struct nfsnode *np = VTONFS(vp);
vp                 79 nfs/nfs_bio.c  	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp                 95 nfs/nfs_bio.c  		(void)nfs_fsinfo(nmp, vp, cred, p);
vp                115 nfs/nfs_bio.c  		error = VOP_GETATTR(vp, &vattr, cred, p);
vp                120 nfs/nfs_bio.c  		error = VOP_GETATTR(vp, &vattr, cred, p);
vp                124 nfs/nfs_bio.c  			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
vp                140 nfs/nfs_bio.c  	    if ((vp->v_flag & VROOT) && vp->v_type == VLNK) {
vp                141 nfs/nfs_bio.c  		    return (nfs_readlinkrpc(vp, uio, cred));
vp                144 nfs/nfs_bio.c  	    switch (vp->v_type) {
vp                159 nfs/nfs_bio.c  			if (!incore(vp, rabn)) {
vp                160 nfs/nfs_bio.c  			    rabp = nfs_getcacheblk(vp, rabn, biosize, p);
vp                181 nfs/nfs_bio.c  		if ((bp = incore(vp, bn)) &&
vp                187 nfs/nfs_bio.c  			bp = nfs_getcacheblk(vp, bn, biosize, p);
vp                208 nfs/nfs_bio.c  				    bp = nfs_getcacheblk(vp, bn, biosize, p);
vp                230 nfs/nfs_bio.c  		bp = nfs_getcacheblk(vp, 0, NFS_MAXPATHLEN, p);
vp                246 nfs/nfs_bio.c  		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
vp                255 nfs/nfs_bio.c  	    switch (vp->v_type) {
vp                262 nfs/nfs_bio.c  		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
vp                281 nfs/nfs_bio.c  	struct vnode *vp = ap->a_vp;
vp                282 nfs/nfs_bio.c  	struct nfsnode *np = VTONFS(vp);
vp                287 nfs/nfs_bio.c  	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp                297 nfs/nfs_bio.c  	if (vp->v_type != VREG)
vp                304 nfs/nfs_bio.c  		(void)nfs_fsinfo(nmp, vp, cred, p);
vp                308 nfs/nfs_bio.c  			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
vp                314 nfs/nfs_bio.c  			error = VOP_GETATTR(vp, &vattr, cred, p);
vp                353 nfs/nfs_bio.c  		uvm_vnp_uncache(vp);
vp                361 nfs/nfs_bio.c  		bp = nfs_getcacheblk(vp, bn, biosize, p);
vp                367 nfs/nfs_bio.c  			uvm_vnp_setsize(vp, (u_long)np->n_size);
vp                414 nfs/nfs_bio.c  		if (NFS_ISV3(vp)) {
vp                418 nfs/nfs_bio.c  				nfs_del_tobecommitted_range(vp, bp);
vp                420 nfs/nfs_bio.c  			nfs_del_committed_range(vp, bp);
vp                443 nfs/nfs_bio.c  		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0) |
vp                457 nfs/nfs_bio.c  nfs_getcacheblk(vp, bn, size, p)
vp                458 nfs/nfs_bio.c  	struct vnode *vp;
vp                464 nfs/nfs_bio.c  	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp                467 nfs/nfs_bio.c  		bp = getblk(vp, bn, size, PCATCH, 0);
vp                471 nfs/nfs_bio.c  			bp = getblk(vp, bn, size, 0, 2 * hz);
vp                474 nfs/nfs_bio.c  		bp = getblk(vp, bn, size, 0, 0);
vp                483 nfs/nfs_bio.c  nfs_vinvalbuf(vp, flags, cred, p, intrflg)
vp                484 nfs/nfs_bio.c  	struct vnode *vp;
vp                490 nfs/nfs_bio.c  	struct nfsnode *np = VTONFS(vp);
vp                491 nfs/nfs_bio.c  	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp                518 nfs/nfs_bio.c  	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
vp                528 nfs/nfs_bio.c  		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
vp                594 nfs/nfs_bio.c  	struct vnode *vp;
vp                601 nfs/nfs_bio.c  	vp = bp->b_vp;
vp                602 nfs/nfs_bio.c  	np = VTONFS(vp);
vp                603 nfs/nfs_bio.c  	nmp = VFSTONFS(vp->v_mount);
vp                624 nfs/nfs_bio.c  		error = nfs_readrpc(vp, uiop);
vp                629 nfs/nfs_bio.c  		error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
vp                639 nfs/nfs_bio.c  	    switch (vp->v_type) {
vp                643 nfs/nfs_bio.c  		error = nfs_readrpc(vp, uiop);
vp                665 nfs/nfs_bio.c  		if (p && (vp->v_flag & VTEXT) &&
vp                674 nfs/nfs_bio.c  		error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
vp                677 nfs/nfs_bio.c  		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
vp                697 nfs/nfs_bio.c  	    error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
vp                702 nfs/nfs_bio.c  		nfs_add_tobecommitted_range(vp, bp);
vp                705 nfs/nfs_bio.c  		nfs_del_committed_range(vp, bp);
vp                741 nfs/nfs_bio.c  		nfs_clearcommit(vp->v_mount);
vp                 71 nfs/nfs_kq.c   	struct vnode		*vp;
vp                113 nfs/nfs_kq.c   			struct nfsnode *np = VTONFS(ke->vp);
vp                117 nfs/nfs_kq.c   			VOP_PRINT(ke->vp);
vp                120 nfs/nfs_kq.c   			if (nfs_getattrcache(ke->vp, &attr) != ENOENT)
vp                133 nfs/nfs_kq.c   			error = VOP_GETATTR(ke->vp, &attr, p->p_ucred, p);
vp                136 nfs/nfs_kq.c   				VN_KNOTE(ke->vp, NOTE_DELETE);
vp                144 nfs/nfs_kq.c   				VN_KNOTE(ke->vp, NOTE_WRITE
vp                149 nfs/nfs_kq.c   				VN_KNOTE(ke->vp, NOTE_WRITE);
vp                155 nfs/nfs_kq.c   				VN_KNOTE(ke->vp, NOTE_ATTRIB);
vp                160 nfs/nfs_kq.c   				VN_KNOTE(ke->vp, NOTE_LINK);
vp                190 nfs/nfs_kq.c   	struct vnode *vp = (struct vnode *)kn->kn_hook;
vp                193 nfs/nfs_kq.c   	SLIST_REMOVE(&vp->v_selectinfo.si_note, kn, knote, kn_selnext);
vp                198 nfs/nfs_kq.c   		if (ke->vp == vp) {
vp                223 nfs/nfs_kq.c   	struct vnode *vp = (struct vnode *)kn->kn_hook;
vp                224 nfs/nfs_kq.c   	struct nfsnode *np = VTONFS(vp);
vp                263 nfs/nfs_kq.c   	struct vnode *vp;
vp                270 nfs/nfs_kq.c   	vp = ap->a_vp;
vp                275 nfs/nfs_kq.c   	VOP_PRINT(vp);
vp                289 nfs/nfs_kq.c   	kn->kn_hook = vp;
vp                301 nfs/nfs_kq.c   	(void) VOP_GETATTR(vp, &attr, p->p_ucred, p);
vp                314 nfs/nfs_kq.c   		if (ke->vp == vp)
vp                324 nfs/nfs_kq.c   		ke->vp = vp;
vp                336 nfs/nfs_kq.c   	SLIST_INSERT_HEAD(&vp->v_selectinfo.si_note, kn, kn_selnext);
vp                 99 nfs/nfs_node.c 	struct vnode *vp;
vp                110 nfs/nfs_node.c 		vp = NFSTOV(np);
vp                111 nfs/nfs_node.c 		if (vget(vp, LK_EXCLUSIVE, p))
vp                124 nfs/nfs_node.c 	vp = nvp;
vp                127 nfs/nfs_node.c 	vp->v_data = np;
vp                128 nfs/nfs_node.c 	np->n_vnode = vp;
vp                140 nfs/nfs_node.c 			if (vp->v_type == VNON)
vp                141 nfs/nfs_node.c 				vp->v_type = VDIR;
vp                142 nfs/nfs_node.c 			vp->v_flag |= VROOT;
vp                203 nfs/nfs_node.c 	struct vnode *vp = ap->a_vp;
vp                204 nfs/nfs_node.c 	struct nfsnode *np = VTONFS(vp);
vp                208 nfs/nfs_node.c 	if (prtactive && vp->v_usecount != 0)
vp                209 nfs/nfs_node.c 		vprint("nfs_reclaim: pushing active", vp);
vp                220 nfs/nfs_node.c 	if (vp->v_type == VDIR) {
vp                236 nfs/nfs_node.c 	cache_purge(vp);
vp                237 nfs/nfs_node.c 	pool_put(&nfs_node_pool, vp->v_data);
vp                238 nfs/nfs_node.c 	vp->v_data = NULL;
vp                106 nfs/nfs_serv.c 	struct vnode *vp;
vp                122 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, &rdonly,
vp                131 nfs/nfs_serv.c 		nfsrv_access(vp, VREAD, cred, rdonly, procp, 0))
vp                133 nfs/nfs_serv.c 	if (vp->v_type == VDIR)
vp                139 nfs/nfs_serv.c 		nfsrv_access(vp, VWRITE, cred, rdonly, procp, 0))
vp                141 nfs/nfs_serv.c 	if (vp->v_type == VDIR)
vp                146 nfs/nfs_serv.c 		nfsrv_access(vp, VEXEC, cred, rdonly, procp, 0))
vp                148 nfs/nfs_serv.c 	getret = VOP_GETATTR(vp, &va, cred, procp);
vp                149 nfs/nfs_serv.c 	vput(vp);
vp                173 nfs/nfs_serv.c 	struct vnode *vp;
vp                186 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, &rdonly,
vp                192 nfs/nfs_serv.c 	error = VOP_GETATTR(vp, &va, cred, procp);
vp                193 nfs/nfs_serv.c 	vput(vp);
vp                219 nfs/nfs_serv.c 	struct vnode *vp;
vp                278 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, &rdonly,
vp                286 nfs/nfs_serv.c 		error = preat_ret = VOP_GETATTR(vp, &preat, cred, procp);
vp                292 nfs/nfs_serv.c 			vput(vp);
vp                304 nfs/nfs_serv.c 		if (rdonly || (vp->v_mount->mnt_flag & MNT_RDONLY)) {
vp                309 nfs/nfs_serv.c 		if (vp->v_type == VDIR) {
vp                312 nfs/nfs_serv.c 		} else if ((error = nfsrv_access(vp, VWRITE, cred, rdonly,
vp                316 nfs/nfs_serv.c 	error = VOP_SETATTR(vp, &va, cred, procp);
vp                317 nfs/nfs_serv.c 	postat_ret = VOP_GETATTR(vp, &va, cred, procp);
vp                321 nfs/nfs_serv.c 	vput(vp);
vp                349 nfs/nfs_serv.c 	struct vnode *vp, *dirp;
vp                384 nfs/nfs_serv.c 	vp = nd.ni_vp;
vp                386 nfs/nfs_serv.c 	fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
vp                387 nfs/nfs_serv.c 	error = VFS_VPTOFH(vp, &fhp->fh_fid);
vp                389 nfs/nfs_serv.c 		error = VOP_GETATTR(vp, &va, cred, procp);
vp                390 nfs/nfs_serv.c 	vput(vp);
vp                431 nfs/nfs_serv.c 	struct vnode *vp;
vp                469 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp                477 nfs/nfs_serv.c 	if (vp->v_type != VLNK) {
vp                484 nfs/nfs_serv.c 	error = VOP_READLINK(vp, uiop, cred);
vp                486 nfs/nfs_serv.c 	getret = VOP_GETATTR(vp, &attr, cred, procp);
vp                487 nfs/nfs_serv.c 	vput(vp);
vp                534 nfs/nfs_serv.c 	struct vnode *vp;
vp                552 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp                559 nfs/nfs_serv.c 	if (vp->v_type != VREG) {
vp                563 nfs/nfs_serv.c 			error = (vp->v_type == VDIR) ? EISDIR : EACCES;
vp                566 nfs/nfs_serv.c 	    if ((error = nfsrv_access(vp, VREAD, cred, rdonly, procp, 1)) != 0)
vp                567 nfs/nfs_serv.c 		error = nfsrv_access(vp, VEXEC, cred, rdonly, procp, 1);
vp                569 nfs/nfs_serv.c 	getret = VOP_GETATTR(vp, &va, cred, procp);
vp                573 nfs/nfs_serv.c 		vput(vp);
vp                641 nfs/nfs_serv.c 		error = VOP_READ(vp, uiop, IO_NODELOCKED, cred);
vp                644 nfs/nfs_serv.c 		if (error || (getret = VOP_GETATTR(vp, &va, cred, procp)) != 0){
vp                648 nfs/nfs_serv.c 			vput(vp);
vp                655 nfs/nfs_serv.c 	vput(vp);
vp                702 nfs/nfs_serv.c 	struct vnode *vp;
vp                764 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp                772 nfs/nfs_serv.c 		forat_ret = VOP_GETATTR(vp, &forat, cred, procp);
vp                773 nfs/nfs_serv.c 	if (vp->v_type != VREG) {
vp                777 nfs/nfs_serv.c 			error = (vp->v_type == VDIR) ? EISDIR : EACCES;
vp                780 nfs/nfs_serv.c 		error = nfsrv_access(vp, VWRITE, cred, rdonly, procp, 1);
vp                783 nfs/nfs_serv.c 		vput(vp);
vp                822 nfs/nfs_serv.c 	    error = VOP_WRITE(vp, uiop, ioflags, cred);
vp                826 nfs/nfs_serv.c 	aftat_ret = VOP_GETATTR(vp, &va, cred, procp);
vp                827 nfs/nfs_serv.c 	vput(vp);
vp                886 nfs/nfs_serv.c 	struct vnode *vp;
vp               1033 nfs/nfs_serv.c 		error = nfsrv_fhtovp(&nfsd->nd_fh, 1, &vp, cred, slp, 
vp               1037 nfs/nfs_serv.c 			forat_ret = VOP_GETATTR(vp, &forat, cred, procp);
vp               1038 nfs/nfs_serv.c 		    if (vp->v_type != VREG) {
vp               1042 nfs/nfs_serv.c 			    error = (vp->v_type == VDIR) ? EISDIR : EACCES;
vp               1045 nfs/nfs_serv.c 		    vp = NULL;
vp               1047 nfs/nfs_serv.c 		    error = nfsrv_access(vp, VWRITE, cred, rdonly, procp, 1);
vp               1083 nfs/nfs_serv.c 			error = VOP_WRITE(vp, uiop, ioflags, cred);
vp               1089 nfs/nfs_serv.c 		if (vp) {
vp               1090 nfs/nfs_serv.c 		    aftat_ret = VOP_GETATTR(vp, &va, cred, procp);
vp               1091 nfs/nfs_serv.c 		    vput(vp);
vp               1251 nfs/nfs_serv.c 	struct vnode *vp = NULL, *dirp = NULL;
vp               1394 nfs/nfs_serv.c 		vp = nd.ni_vp;
vp               1398 nfs/nfs_serv.c 		vp = nd.ni_vp;
vp               1399 nfs/nfs_serv.c 		if (nd.ni_dvp == vp)
vp               1405 nfs/nfs_serv.c 			error = nfsrv_access(vp, VWRITE, cred,
vp               1411 nfs/nfs_serv.c 				error = VOP_SETATTR(vp, &va, cred,
vp               1415 nfs/nfs_serv.c 				vput(vp);
vp               1420 nfs/nfs_serv.c 		fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
vp               1421 nfs/nfs_serv.c 		error = VFS_VPTOFH(vp, &fhp->fh_fid);
vp               1423 nfs/nfs_serv.c 			error = VOP_GETATTR(vp, &va, cred, procp);
vp               1424 nfs/nfs_serv.c 		vput(vp);
vp               1487 nfs/nfs_serv.c 	struct vnode *vp, *dirp = (struct vnode *)0;
vp               1576 nfs/nfs_serv.c 	vp = nd.ni_vp;
vp               1579 nfs/nfs_serv.c 		fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
vp               1580 nfs/nfs_serv.c 		error = VFS_VPTOFH(vp, &fhp->fh_fid);
vp               1582 nfs/nfs_serv.c 			error = VOP_GETATTR(vp, &va, cred, procp);
vp               1583 nfs/nfs_serv.c 		vput(vp);
vp               1633 nfs/nfs_serv.c 	struct vnode *vp, *dirp;
vp               1640 nfs/nfs_serv.c 	vp = (struct vnode *)0;
vp               1658 nfs/nfs_serv.c 		vp = nd.ni_vp;
vp               1659 nfs/nfs_serv.c 		if (vp->v_type == VDIR &&
vp               1665 nfs/nfs_serv.c 		if (vp->v_flag & VROOT) {
vp               1669 nfs/nfs_serv.c 		if (vp->v_flag & VTEXT)
vp               1670 nfs/nfs_serv.c 			uvm_vnp_uncache(vp);
vp               1676 nfs/nfs_serv.c 			if (nd.ni_dvp == vp)
vp               1680 nfs/nfs_serv.c 			vput(vp);
vp               1897 nfs/nfs_serv.c 	struct vnode *vp, *xp, *dirp = (struct vnode *)0;
vp               1908 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, FALSE, &vp, cred, slp, nam,
vp               1916 nfs/nfs_serv.c 	if (vp->v_type == VDIR && (error = suser_ucred(cred)) != 0)
vp               1940 nfs/nfs_serv.c 	if (vp->v_mount != xp->v_mount)
vp               1944 nfs/nfs_serv.c 		error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
vp               1956 nfs/nfs_serv.c 		getret = VOP_GETATTR(vp, &at, cred, procp);
vp               1961 nfs/nfs_serv.c 	vrele(vp);
vp               2137 nfs/nfs_serv.c 	struct vnode *vp, *dirp = (struct vnode *)0;
vp               2174 nfs/nfs_serv.c 	vp = nd.ni_vp;
vp               2175 nfs/nfs_serv.c 	if (vp != NULL) {
vp               2177 nfs/nfs_serv.c 		if (nd.ni_dvp == vp)
vp               2181 nfs/nfs_serv.c 		vrele(vp);
vp               2187 nfs/nfs_serv.c 		vp = nd.ni_vp;
vp               2189 nfs/nfs_serv.c 		fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
vp               2190 nfs/nfs_serv.c 		error = VFS_VPTOFH(vp, &fhp->fh_fid);
vp               2192 nfs/nfs_serv.c 			error = VOP_GETATTR(vp, &va, cred, procp);
vp               2193 nfs/nfs_serv.c 		vput(vp);
vp               2247 nfs/nfs_serv.c 	struct vnode *vp, *dirp = (struct vnode *)0;
vp               2278 nfs/nfs_serv.c 	vp = nd.ni_vp;
vp               2279 nfs/nfs_serv.c 	if (vp->v_type != VDIR) {
vp               2286 nfs/nfs_serv.c 	if (nd.ni_dvp == vp) {
vp               2293 nfs/nfs_serv.c 	if (vp->v_flag & VROOT)
vp               2304 nfs/nfs_serv.c 		vput(vp);
vp               2376 nfs/nfs_serv.c 	struct vnode *vp;
vp               2409 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp               2417 nfs/nfs_serv.c 		error = getret = VOP_GETATTR(vp, &at, cred, procp);
vp               2427 nfs/nfs_serv.c 		error = nfsrv_access(vp, VEXEC, cred, rdonly, procp, 0);
vp               2429 nfs/nfs_serv.c 		vput(vp);
vp               2434 nfs/nfs_serv.c 	VOP_UNLOCK(vp, 0, procp);
vp               2453 nfs/nfs_serv.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, procp);
vp               2454 nfs/nfs_serv.c 	error = VOP_READDIR(vp, &io, cred, &eofflag, &ncookies, &cookies);
vp               2460 nfs/nfs_serv.c 		getret = VOP_GETATTR(vp, &at, cred, procp);
vp               2465 nfs/nfs_serv.c 	VOP_UNLOCK(vp, 0, procp);
vp               2467 nfs/nfs_serv.c 		vrele(vp);
vp               2483 nfs/nfs_serv.c 			vrele(vp);
vp               2598 nfs/nfs_serv.c 	vrele(vp);
vp               2638 nfs/nfs_serv.c 	struct vnode *vp, *nvp;
vp               2668 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp               2675 nfs/nfs_serv.c 	error = getret = VOP_GETATTR(vp, &at, cred, procp);
vp               2684 nfs/nfs_serv.c 		error = nfsrv_access(vp, VEXEC, cred, rdonly, procp, 0);
vp               2687 nfs/nfs_serv.c 		vput(vp);
vp               2692 nfs/nfs_serv.c 	VOP_UNLOCK(vp, 0, procp);
vp               2712 nfs/nfs_serv.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, procp);
vp               2713 nfs/nfs_serv.c 	error = VOP_READDIR(vp, &io, cred, &eofflag, &ncookies, &cookies);
vp               2716 nfs/nfs_serv.c 	getret = VOP_GETATTR(vp, &at, cred, procp);
vp               2718 nfs/nfs_serv.c 	VOP_UNLOCK(vp, 0, procp);
vp               2725 nfs/nfs_serv.c 		vrele(vp);
vp               2741 nfs/nfs_serv.c 			vrele(vp);
vp               2808 nfs/nfs_serv.c 			if (VFS_VGET(vp->v_mount, dp->d_fileno, &nvp))
vp               2916 nfs/nfs_serv.c 	vrele(vp);
vp               2951 nfs/nfs_serv.c 	struct vnode *vp;
vp               2973 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp               2980 nfs/nfs_serv.c 	for_ret = VOP_GETATTR(vp, &bfor, cred, procp);
vp               2981 nfs/nfs_serv.c 	error = VOP_FSYNC(vp, cred, MNT_WAIT, procp);
vp               2982 nfs/nfs_serv.c 	aft_ret = VOP_GETATTR(vp, &aft, cred, procp);
vp               2983 nfs/nfs_serv.c 	vput(vp);
vp               3018 nfs/nfs_serv.c 	struct vnode *vp;
vp               3027 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp               3035 nfs/nfs_serv.c 	error = VFS_STATFS(vp->v_mount, sf, procp);
vp               3036 nfs/nfs_serv.c 	getret = VOP_GETATTR(vp, &at, cred, procp);
vp               3037 nfs/nfs_serv.c 	vput(vp);
vp               3092 nfs/nfs_serv.c 	struct vnode *vp;
vp               3100 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp               3107 nfs/nfs_serv.c 	getret = VOP_GETATTR(vp, &at, cred, procp);
vp               3108 nfs/nfs_serv.c 	vput(vp);
vp               3161 nfs/nfs_serv.c 	struct vnode *vp;
vp               3169 nfs/nfs_serv.c 	error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam,
vp               3176 nfs/nfs_serv.c 	error = VOP_PATHCONF(vp, _PC_LINK_MAX, &linkmax);
vp               3178 nfs/nfs_serv.c 		error = VOP_PATHCONF(vp, _PC_NAME_MAX, &namemax);
vp               3180 nfs/nfs_serv.c 		error = VOP_PATHCONF(vp, _PC_CHOWN_RESTRICTED, &chownres);
vp               3182 nfs/nfs_serv.c 		error = VOP_PATHCONF(vp, _PC_NO_TRUNC, &notrunc);
vp               3183 nfs/nfs_serv.c 	getret = VOP_GETATTR(vp, &at, cred, procp);
vp               3184 nfs/nfs_serv.c 	vput(vp);
vp               3264 nfs/nfs_serv.c nfsrv_access(vp, flags, cred, rdonly, p, override)
vp               3265 nfs/nfs_serv.c 	struct vnode *vp;
vp               3282 nfs/nfs_serv.c 		if (rdonly || (vp->v_mount->mnt_flag & MNT_RDONLY)) {
vp               3283 nfs/nfs_serv.c 			switch (vp->v_type) {
vp               3297 nfs/nfs_serv.c 		if ((vp->v_flag & VTEXT) && !uvm_vnp_uncache(vp))
vp               3300 nfs/nfs_serv.c 	error = VOP_ACCESS(vp, flags, cred, p);
vp               3306 nfs/nfs_serv.c 	    VOP_GETATTR(vp, &vattr, cred, p) == 0 &&
vp                823 nfs/nfs_socket.c nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp)
vp                824 nfs/nfs_socket.c 	struct vnode *vp;
vp                851 nfs/nfs_socket.c 	nmp = VFSTONFS(vp->v_mount);
vp                854 nfs/nfs_socket.c 	rep->r_vp = vp;
vp               1057 nfs/nfs_socket.c 				cache_purge(vp);
vp                539 nfs/nfs_subs.c nfsm_reqh(vp, procid, hsiz, bposp)
vp                540 nfs/nfs_subs.c 	struct vnode *vp;
vp               1112 nfs/nfs_subs.c 	struct vnode *vp = *vpp;
vp               1126 nfs/nfs_subs.c 	int v3 = NFS_ISV3(vp);
vp               1162 nfs/nfs_subs.c 	np = VTONFS(vp);
vp               1163 nfs/nfs_subs.c 	if (vp->v_type != vtyp) {
vp               1164 nfs/nfs_subs.c 		vp->v_type = vtyp;
vp               1165 nfs/nfs_subs.c 		if (vp->v_type == VFIFO) {
vp               1170 nfs/nfs_subs.c 			vp->v_op = fifo_nfsv2nodeop_p;
vp               1173 nfs/nfs_subs.c 		if (vp->v_type == VCHR || vp->v_type == VBLK) {
vp               1174 nfs/nfs_subs.c 			vp->v_op = spec_nfsv2nodeop_p;
vp               1175 nfs/nfs_subs.c 			nvp = checkalias(vp, (dev_t)rdev, vp->v_mount);
vp               1183 nfs/nfs_subs.c 				nvp->v_data = vp->v_data;
vp               1184 nfs/nfs_subs.c 				vp->v_data = NULL;
vp               1185 nfs/nfs_subs.c 				vp->v_op = spec_vnodeop_p;
vp               1186 nfs/nfs_subs.c 				vrele(vp);
vp               1187 nfs/nfs_subs.c 				vgone(vp);
vp               1192 nfs/nfs_subs.c 				*vpp = vp = nvp;
vp               1202 nfs/nfs_subs.c 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
vp               1243 nfs/nfs_subs.c 			uvm_vnp_setsize(vp, np->n_size);
vp               1264 nfs/nfs_subs.c 	struct vnode *vp = np->n_vnode;
vp               1265 nfs/nfs_subs.c 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp               1269 nfs/nfs_subs.c 	if (vp->v_type == VDIR) {
vp               1292 nfs/nfs_subs.c nfs_getattrcache(vp, vaper)
vp               1293 nfs/nfs_subs.c 	struct vnode *vp;
vp               1296 nfs/nfs_subs.c 	struct nfsnode *np = VTONFS(vp);
vp               1315 nfs/nfs_subs.c 			uvm_vnp_setsize(vp, np->n_size);
vp               1721 nfs/nfs_subs.c 	struct vnode *vp, *nvp;
vp               1727 nfs/nfs_subs.c 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
vp               1728 nfs/nfs_subs.c 		if (vp->v_mount != mp)	/* Paranoia */
vp               1730 nfs/nfs_subs.c 		nvp = LIST_NEXT(vp, v_mntvnodes);
vp               1731 nfs/nfs_subs.c 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
vp               1742 nfs/nfs_subs.c nfs_merge_commit_ranges(vp)
vp               1743 nfs/nfs_subs.c 	struct vnode *vp;
vp               1745 nfs/nfs_subs.c 	struct nfsnode *np = VTONFS(vp);
vp               1763 nfs/nfs_subs.c nfs_in_committed_range(vp, bp)
vp               1764 nfs/nfs_subs.c 	struct vnode *vp;
vp               1767 nfs/nfs_subs.c 	struct nfsnode *np = VTONFS(vp);
vp               1779 nfs/nfs_subs.c nfs_in_tobecommitted_range(vp, bp)
vp               1780 nfs/nfs_subs.c 	struct vnode *vp;
vp               1783 nfs/nfs_subs.c 	struct nfsnode *np = VTONFS(vp);
vp               1795 nfs/nfs_subs.c nfs_add_committed_range(vp, bp)
vp               1796 nfs/nfs_subs.c 	struct vnode *vp;
vp               1799 nfs/nfs_subs.c 	struct nfsnode *np = VTONFS(vp);
vp               1818 nfs/nfs_subs.c nfs_del_committed_range(vp, bp)
vp               1819 nfs/nfs_subs.c 	struct vnode *vp;
vp               1822 nfs/nfs_subs.c 	struct nfsnode *np = VTONFS(vp);
vp               1851 nfs/nfs_subs.c nfs_add_tobecommitted_range(vp, bp)
vp               1852 nfs/nfs_subs.c 	struct vnode *vp;
vp               1855 nfs/nfs_subs.c 	struct nfsnode *np = VTONFS(vp);
vp               1874 nfs/nfs_subs.c nfs_del_tobecommitted_range(vp, bp)
vp               1875 nfs/nfs_subs.c 	struct vnode *vp;
vp               1878 nfs/nfs_subs.c 	struct nfsnode *np = VTONFS(vp);
vp                699 nfs/nfs_syscalls.c 	struct vnode *vp;
vp                736 nfs/nfs_syscalls.c 		    vp = bp->b_vp;
vp                738 nfs/nfs_syscalls.c 		    for (nbp = LIST_FIRST(&vp->v_dirtyblkhd); nbp != NULL;
vp                110 nfs/nfs_vfsops.c 	struct vnode *vp;
vp                126 nfs/nfs_vfsops.c 	vp = NFSTOV(np);
vp                130 nfs/nfs_vfsops.c 		(void)nfs_fsinfo(nmp, vp, cred, p);
vp                132 nfs/nfs_vfsops.c 	nfsm_reqhead(vp, NFSPROC_FSSTAT, NFSX_FH(v3));
vp                133 nfs/nfs_vfsops.c 	nfsm_fhtom(vp, v3);
vp                134 nfs/nfs_vfsops.c 	nfsm_request(vp, NFSPROC_FSSTAT, p, cred);
vp                136 nfs/nfs_vfsops.c 		nfsm_postop_attr(vp, retattr);
vp                173 nfs/nfs_vfsops.c 	vrele(vp);
vp                182 nfs/nfs_vfsops.c nfs_fsinfo(nmp, vp, cred, p)
vp                184 nfs/nfs_vfsops.c 	struct vnode *vp;
vp                197 nfs/nfs_vfsops.c 	nfsm_reqhead(vp, NFSPROC_FSINFO, NFSX_FH(1));
vp                198 nfs/nfs_vfsops.c 	nfsm_fhtom(vp, 1);
vp                199 nfs/nfs_vfsops.c 	nfsm_request(vp, NFSPROC_FSINFO, p, cred);
vp                200 nfs/nfs_vfsops.c 	nfsm_postop_attr(vp, retattr);
vp                252 nfs/nfs_vfsops.c 	struct vnode *vp;
vp                324 nfs/nfs_vfsops.c 		nfs_root(mp, &vp);
vp                331 nfs/nfs_vfsops.c 		vp->v_type = VREG;
vp                332 nfs/nfs_vfsops.c 		vp->v_flag = 0;
vp                340 nfs/nfs_vfsops.c 		swdevt[0].sw_vp = vp;
vp                345 nfs/nfs_vfsops.c 		error = VOP_GETATTR(vp, &attr, procp->p_ucred, procp);
vp                786 nfs/nfs_vfsops.c 	struct vnode *vp;
vp                799 nfs/nfs_vfsops.c 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL;
vp                800 nfs/nfs_vfsops.c 	     vp = LIST_NEXT(vp, v_mntvnodes)) {
vp                805 nfs/nfs_vfsops.c 		if (vp->v_mount != mp)
vp                807 nfs/nfs_vfsops.c 		if (VOP_ISLOCKED(vp) || LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
vp                809 nfs/nfs_vfsops.c 		if (vget(vp, LK_EXCLUSIVE, p))
vp                811 nfs/nfs_vfsops.c 		error = VOP_FSYNC(vp, cred, waitfor, p);
vp                814 nfs/nfs_vfsops.c 		vput(vp);
vp                907 nfs/nfs_vfsops.c nfs_vptofh(vp, fhp)
vp                908 nfs/nfs_vfsops.c 	struct vnode *vp;
vp                193 nfs/nfs_vnops.c nfs_null(vp, cred, procp)
vp                194 nfs/nfs_vnops.c 	struct vnode *vp;
vp                202 nfs/nfs_vnops.c 	nfsm_reqhead(vp, NFSPROC_NULL, 0);
vp                203 nfs/nfs_vnops.c 	nfsm_request(vp, NFSPROC_NULL, procp, cred);
vp                219 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                227 nfs/nfs_vnops.c 	int v3 = NFS_ISV3(vp);
vp                234 nfs/nfs_vnops.c 	if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
vp                235 nfs/nfs_vnops.c 		switch (vp->v_type) {
vp                254 nfs/nfs_vnops.c 		nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
vp                255 nfs/nfs_vnops.c 		nfsm_fhtom(vp, v3);
vp                261 nfs/nfs_vnops.c 		if (vp->v_type == VDIR) {
vp                274 nfs/nfs_vnops.c 		nfsm_request(vp, NFSPROC_ACCESS, ap->a_p, ap->a_cred);
vp                275 nfs/nfs_vnops.c 		nfsm_postop_attr(vp, attrflag);
vp                306 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                307 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp                311 nfs/nfs_vnops.c 	if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
vp                313 nfs/nfs_vnops.c 		printf("open eacces vtyp=%d\n",vp->v_type);
vp                339 nfs/nfs_vnops.c 		if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
vp                342 nfs/nfs_vnops.c 		uvm_vnp_uncache(vp);
vp                344 nfs/nfs_vnops.c 		if (vp->v_type == VDIR)
vp                346 nfs/nfs_vnops.c 		error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
vp                351 nfs/nfs_vnops.c 		error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
vp                355 nfs/nfs_vnops.c 			if (vp->v_type == VDIR)
vp                357 nfs/nfs_vnops.c 			if ((error = nfs_vinvalbuf(vp, V_SAVE,
vp                360 nfs/nfs_vnops.c 			uvm_vnp_uncache(vp);
vp                401 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                402 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp                405 nfs/nfs_vnops.c 	if (vp->v_type == VREG) {
vp                407 nfs/nfs_vnops.c 		if (NFS_ISV3(vp)) {
vp                408 nfs/nfs_vnops.c 		    error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0);
vp                411 nfs/nfs_vnops.c 		    error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
vp                430 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                431 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp                438 nfs/nfs_vnops.c 	int v3 = NFS_ISV3(vp);
vp                448 nfs/nfs_vnops.c 	if (nfs_getattrcache(vp, ap->a_vap) == 0)
vp                451 nfs/nfs_vnops.c 	nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
vp                452 nfs/nfs_vnops.c 	nfsm_fhtom(vp, v3);
vp                453 nfs/nfs_vnops.c 	nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred);
vp                455 nfs/nfs_vnops.c 		nfsm_loadattr(vp, ap->a_vap);
vp                468 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                469 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp                486 nfs/nfs_vnops.c 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
vp                489 nfs/nfs_vnops.c  		switch (vp->v_type) {
vp                509 nfs/nfs_vnops.c 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                512 nfs/nfs_vnops.c  				error = nfs_vinvalbuf(vp, 0,
vp                515 nfs/nfs_vnops.c 				error = nfs_vinvalbuf(vp, V_SAVE,
vp                521 nfs/nfs_vnops.c 			uvm_vnp_setsize(vp, np->n_size);
vp                525 nfs/nfs_vnops.c 		vp->v_type == VREG &&
vp                526 nfs/nfs_vnops.c   		(error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
vp                529 nfs/nfs_vnops.c 	error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
vp                532 nfs/nfs_vnops.c 		uvm_vnp_setsize(vp, np->n_size);
vp                535 nfs/nfs_vnops.c 	VN_KNOTE(vp, NOTE_ATTRIB); /* XXX setattrrpc? */
vp                544 nfs/nfs_vnops.c nfs_setattrrpc(vp, vap, cred, procp)
vp                545 nfs/nfs_vnops.c 	struct vnode *vp;
vp                557 nfs/nfs_vnops.c 	int v3 = NFS_ISV3(vp);
vp                560 nfs/nfs_vnops.c 	nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
vp                561 nfs/nfs_vnops.c 	nfsm_fhtom(vp, v3);
vp                571 nfs/nfs_vnops.c 			sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
vp                584 nfs/nfs_vnops.c 	nfsm_request(vp, NFSPROC_SETATTR, procp, cred);
vp                586 nfs/nfs_vnops.c 		nfsm_wcc_data(vp, wccflag);
vp                588 nfs/nfs_vnops.c 		nfsm_loadattr(vp, (struct vattr *)0);
vp                860 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                862 nfs/nfs_vnops.c 	if (vp->v_type != VREG)
vp                864 nfs/nfs_vnops.c 	return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
vp                875 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                877 nfs/nfs_vnops.c 	if (vp->v_type != VLNK)
vp                879 nfs/nfs_vnops.c 	return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
vp                887 nfs/nfs_vnops.c nfs_readlinkrpc(vp, uiop, cred)
vp                888 nfs/nfs_vnops.c 	struct vnode *vp;
vp                898 nfs/nfs_vnops.c 	int v3 = NFS_ISV3(vp);
vp                901 nfs/nfs_vnops.c 	nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
vp                902 nfs/nfs_vnops.c 	nfsm_fhtom(vp, v3);
vp                903 nfs/nfs_vnops.c 	nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred);
vp                905 nfs/nfs_vnops.c 		nfsm_postop_attr(vp, attrflag);
vp                919 nfs/nfs_vnops.c nfs_readrpc(vp, uiop)
vp                920 nfs/nfs_vnops.c 	struct vnode *vp;
vp                930 nfs/nfs_vnops.c 	int v3 = NFS_ISV3(vp);
vp                935 nfs/nfs_vnops.c 	nmp = VFSTONFS(vp->v_mount);
vp                942 nfs/nfs_vnops.c 		nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
vp                943 nfs/nfs_vnops.c 		nfsm_fhtom(vp, v3);
vp                953 nfs/nfs_vnops.c 		nfsm_request(vp, NFSPROC_READ, uiop->uio_procp,
vp                954 nfs/nfs_vnops.c 		    VTONFS(vp)->n_rcred);
vp                956 nfs/nfs_vnops.c 			nfsm_postop_attr(vp, attrflag);
vp                964 nfs/nfs_vnops.c 			nfsm_loadattr(vp, (struct vattr *)0);
vp                983 nfs/nfs_vnops.c nfs_writerpc(vp, uiop, iomode, must_commit)
vp                984 nfs/nfs_vnops.c 	struct vnode *vp;
vp                993 nfs/nfs_vnops.c 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp                995 nfs/nfs_vnops.c 	int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
vp               1008 nfs/nfs_vnops.c 		nfsm_reqhead(vp, NFSPROC_WRITE,
vp               1010 nfs/nfs_vnops.c 		nfsm_fhtom(vp, v3);
vp               1032 nfs/nfs_vnops.c 		nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp,
vp               1033 nfs/nfs_vnops.c 		    VTONFS(vp)->n_wcred);
vp               1036 nfs/nfs_vnops.c 			nfsm_wcc_data(vp, wccflag);
vp               1075 nfs/nfs_vnops.c 		    nfsm_loadattr(vp, (struct vattr *)0);
vp               1077 nfs/nfs_vnops.c 		    VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
vp               1318 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1321 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp               1328 nfs/nfs_vnops.c 	if (vp->v_usecount < 1)
vp               1331 nfs/nfs_vnops.c 	if (vp->v_type == VDIR)
vp               1333 nfs/nfs_vnops.c 	else if (vp->v_usecount == 1 || (np->n_sillyrename &&
vp               1334 nfs/nfs_vnops.c 	    VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
vp               1343 nfs/nfs_vnops.c 		cache_purge(vp);
vp               1348 nfs/nfs_vnops.c 		error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1);
vp               1362 nfs/nfs_vnops.c 		error = nfs_sillyrename(dvp, vp, cnp);
vp               1366 nfs/nfs_vnops.c 	vrele(vp);
vp               1368 nfs/nfs_vnops.c 	VN_KNOTE(vp, NOTE_DELETE);
vp               1554 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1565 nfs/nfs_vnops.c 	if (dvp->v_mount != vp->v_mount) {
vp               1567 nfs/nfs_vnops.c 		if (vp == dvp)
vp               1579 nfs/nfs_vnops.c 	VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc);
vp               1581 nfs/nfs_vnops.c 	v3 = NFS_ISV3(vp);
vp               1583 nfs/nfs_vnops.c 	nfsm_reqhead(vp, NFSPROC_LINK,
vp               1585 nfs/nfs_vnops.c 	nfsm_fhtom(vp, v3);
vp               1588 nfs/nfs_vnops.c 	nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred);
vp               1590 nfs/nfs_vnops.c 		nfsm_postop_attr(vp, attrflag);
vp               1597 nfs/nfs_vnops.c 		VTONFS(vp)->n_attrstamp = 0;
vp               1601 nfs/nfs_vnops.c 	VN_KNOTE(vp, NOTE_LINK);
vp               1761 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1772 nfs/nfs_vnops.c 	if (dvp == vp) {
vp               1793 nfs/nfs_vnops.c 	VN_KNOTE(vp, NOTE_DELETE);
vp               1796 nfs/nfs_vnops.c 	cache_purge(vp);
vp               1797 nfs/nfs_vnops.c 	vrele(vp);
vp               1836 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1837 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp               1844 nfs/nfs_vnops.c 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp               1852 nfs/nfs_vnops.c 	if (vp->v_type != VDIR)
vp               1859 nfs/nfs_vnops.c 		if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
vp               1885 nfs/nfs_vnops.c 		(void)nfs_fsinfo(nmp, vp, cred, p);
vp               1906 nfs/nfs_vnops.c 			error = nfs_readdirplusrpc(vp, &readdir_uio, cred, 
vp               1912 nfs/nfs_vnops.c 			error = nfs_readdirrpc(vp, &readdir_uio, cred, &eof);
vp               1982 nfs/nfs_vnops.c nfs_readdirrpc(struct vnode *vp, 
vp               1996 nfs/nfs_vnops.c 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp               1997 nfs/nfs_vnops.c 	struct nfsnode *dnp = VTONFS(vp);
vp               2001 nfs/nfs_vnops.c 	int v3 = NFS_ISV3(vp);
vp               2018 nfs/nfs_vnops.c 		nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
vp               2020 nfs/nfs_vnops.c 		nfsm_fhtom(vp, v3);
vp               2038 nfs/nfs_vnops.c 		nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred);
vp               2040 nfs/nfs_vnops.c 			nfsm_postop_attr(vp, attrflag);
vp               2168 nfs/nfs_vnops.c nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 
vp               2183 nfs/nfs_vnops.c 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp               2184 nfs/nfs_vnops.c 	struct nfsnode *dnp = VTONFS(vp), *np;
vp               2195 nfs/nfs_vnops.c 	ndp->ni_dvp = vp;
vp               2207 nfs/nfs_vnops.c 		nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
vp               2209 nfs/nfs_vnops.c 		nfsm_fhtom(vp, 1);
vp               2223 nfs/nfs_vnops.c 		nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred);
vp               2224 nfs/nfs_vnops.c 		nfsm_postop_attr(vp, attrflag);
vp               2302 nfs/nfs_vnops.c 				    VREF(vp);
vp               2303 nfs/nfs_vnops.c 				    newvp = vp;
vp               2306 nfs/nfs_vnops.c 				    error = nfs_nget(vp->v_mount, fhp,
vp               2394 nfs/nfs_vnops.c nfs_sillyrename(dvp, vp, cnp)
vp               2395 nfs/nfs_vnops.c 	struct vnode *dvp, *vp;
vp               2403 nfs/nfs_vnops.c 	np = VTONFS(vp);
vp               2410 nfs/nfs_vnops.c 	if (vp->v_type == VDIR) {
vp               2529 nfs/nfs_vnops.c nfs_commit(vp, offset, cnt, procp)
vp               2530 nfs/nfs_vnops.c 	struct vnode *vp;
vp               2538 nfs/nfs_vnops.c 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp               2546 nfs/nfs_vnops.c 	nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
vp               2547 nfs/nfs_vnops.c 	nfsm_fhtom(vp, 1);
vp               2552 nfs/nfs_vnops.c 	nfsm_request(vp, NFSPROC_COMMIT, procp, VTONFS(vp)->n_wcred);
vp               2553 nfs/nfs_vnops.c 	nfsm_wcc_data(vp, wccflag);
vp               2581 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               2584 nfs/nfs_vnops.c 		*ap->a_vpp = vp;
vp               2586 nfs/nfs_vnops.c 		*ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
vp               2640 nfs/nfs_vnops.c nfs_flush(vp, cred, waitfor, p, commit)
vp               2641 nfs/nfs_vnops.c 	struct vnode *vp;
vp               2647 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp               2651 nfs/nfs_vnops.c 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
vp               2673 nfs/nfs_vnops.c 	if (NFS_ISV3(vp) && commit) {
vp               2675 nfs/nfs_vnops.c 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
vp               2706 nfs/nfs_vnops.c 		retv = nfs_commit(vp, off, (int)(endoff - off), p);
vp               2708 nfs/nfs_vnops.c 			nfs_clearcommit(vp->v_mount);
vp               2722 nfs/nfs_vnops.c 			    vp->v_numoutput++;
vp               2737 nfs/nfs_vnops.c 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
vp               2777 nfs/nfs_vnops.c 		error = vwaitforio(vp, slpflag, "nfs_fsync", slptimeo);
vp               2789 nfs/nfs_vnops.c 		if (LIST_FIRST(&vp->v_dirtyblkhd) && commit) {
vp               2791 nfs/nfs_vnops.c 			vprint("nfs_fsync: dirty", vp);
vp               2843 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               2844 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp               2849 nfs/nfs_vnops.c 	if (vp->v_type == VFIFO)
vp               2850 nfs/nfs_vnops.c 		fifo_printinfo(vp);
vp               2882 nfs/nfs_vnops.c 	struct vnode *vp;
vp               2888 nfs/nfs_vnops.c 	vp = bp->b_vp;
vp               2889 nfs/nfs_vnops.c 	np = VTONFS(vp);
vp               2921 nfs/nfs_vnops.c 		if (!nfs_in_committed_range(vp, bp)) {
vp               2928 nfs/nfs_vnops.c 			if (nfs_in_tobecommitted_range(vp, bp)) {
vp               2940 nfs/nfs_vnops.c 					nfs_merge_commit_ranges(vp);
vp               2942 nfs/nfs_vnops.c 					nfs_add_committed_range(vp, bp);
vp               2986 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               2994 nfs/nfs_vnops.c 	if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
vp               2995 nfs/nfs_vnops.c 		switch (vp->v_type) {
vp               3005 nfs/nfs_vnops.c 	error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_p);
vp               3072 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               3073 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp               3078 nfs/nfs_vnops.c 		if (vp->v_usecount == 1 &&
vp               3079 nfs/nfs_vnops.c 		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
vp               3085 nfs/nfs_vnops.c 			(void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
vp               3140 nfs/nfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               3141 nfs/nfs_vnops.c 	struct nfsnode *np = VTONFS(vp);
vp               3153 nfs/nfs_vnops.c 		if (vp->v_usecount == 1 &&
vp               3154 nfs/nfs_vnops.c 		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
vp               3160 nfs/nfs_vnops.c 			(void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
vp                113 nfs/nfsmount.h int	nfs_vptofh(struct vnode *vp, struct fid *fhp);
vp                155 nfs/nfsnode.h  #define VTONFS(vp)	((struct nfsnode *)(vp)->v_data)
vp                325 ntfs/ntfs_subr.c 		struct vnode   *vp;
vp                327 ntfs/ntfs_subr.c 		vp = ntmp->ntm_sysvn[NTFS_MFTINO];
vp                328 ntfs/ntfs_subr.c 		error = ntfs_readattr(ntmp, VTONT(vp), NTFS_A_DATA, NULL,
vp                900 ntfs/ntfs_subr.c 	      struct vnode * vp,
vp                909 ntfs/ntfs_subr.c 	struct fnode   *fp = VTOF(vp);
vp               1033 ntfs/ntfs_subr.c 				VREF(vp);
vp               1034 ntfs/ntfs_subr.c 				*vpp = vp;
vp               2089 ntfs/ntfs_subr.c 	struct vnode *vp;
vp               2107 ntfs/ntfs_subr.c 	if ((error = VFS_VGET(mp, NTFS_UPCASEINO, &vp)))
vp               2109 ntfs/ntfs_subr.c 	error = ntfs_readattr(ntmp, VTONT(vp), NTFS_A_DATA, NULL,
vp               2112 ntfs/ntfs_subr.c 	vput(vp);
vp                518 ntfs/ntfs_vfsops.c 	struct vnode *vp;
vp                635 ntfs/ntfs_vfsops.c 		error = VFS_VGET(mp, NTFS_ATTRDEFINO, &vp );
vp                641 ntfs/ntfs_vfsops.c 			error = ntfs_readattr(ntmp, VTONT(vp),
vp                660 ntfs/ntfs_vfsops.c 			error = ntfs_readattr(ntmp, VTONT(vp),
vp                674 ntfs/ntfs_vfsops.c 		vput(vp);
vp                842 ntfs/ntfs_vfsops.c 	struct vnode *vp;
vp                848 ntfs/ntfs_vfsops.c 	vp = ntmp->ntm_sysvn[NTFS_BITMAPINO];
vp                850 ntfs/ntfs_vfsops.c 	bmsize = VTOF(vp)->f_size;
vp                854 ntfs/ntfs_vfsops.c 	error = ntfs_readattr(ntmp, VTONT(vp), NTFS_A_DATA, NULL,
vp                965 ntfs/ntfs_vfsops.c 	struct vnode *vp,
vp                972 ntfs/ntfs_vfsops.c 	ddprintf(("ntfs_fhtovp(): %s: %p\n", vp->v_mount->mnt_stat.f_mntonname,
vp                973 ntfs/ntfs_vfsops.c 		vp));
vp                975 ntfs/ntfs_vfsops.c 	fn = VTOF(vp);
vp                976 ntfs/ntfs_vfsops.c 	ntp = VTONT(vp);
vp               1002 ntfs/ntfs_vfsops.c 	struct vnode *vp;
vp               1095 ntfs/ntfs_vfsops.c 	error = getnewvnode(VT_NTFS, ntmp->ntm_mountp, ntfs_vnodeop_p, &vp);
vp               1105 ntfs/ntfs_vfsops.c 	dprintf(("ntfs_vget: vnode: %p for ntnode: %d\n", vp,ino));
vp               1110 ntfs/ntfs_vfsops.c 	fp->f_vp = vp;
vp               1111 ntfs/ntfs_vfsops.c 	vp->v_data = fp;
vp               1112 ntfs/ntfs_vfsops.c 	vp->v_type = f_type;
vp               1115 ntfs/ntfs_vfsops.c 		vp->v_flag |= VROOT;
vp               1118 ntfs/ntfs_vfsops.c 		error = VN_LOCK(vp, lkflags, p);
vp               1120 ntfs/ntfs_vfsops.c 			vput(vp);
vp               1126 ntfs/ntfs_vfsops.c 	genfs_node_init(vp, &ntfs_genfsops);
vp               1128 ntfs/ntfs_vfsops.c 	*vpp = vp;
vp                151 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                152 ntfs/ntfs_vnops.c 	struct fnode *fp = VTOF(vp);
vp                198 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                199 ntfs/ntfs_vnops.c 	struct fnode *fp = VTOF(vp);
vp                224 ntfs/ntfs_vnops.c 	vap->va_type = vp->v_type;
vp                237 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                242 ntfs/ntfs_vnops.c 	struct ntnode *ip = VTONT(vp);
vp                245 ntfs/ntfs_vnops.c 	dprintf(("ntfs_inactive: vnode: %p, ntnode: %d\n", vp, ip->i_number));
vp                248 ntfs/ntfs_vnops.c 	if (ntfs_prtactive && vp->v_usecount != 0)
vp                249 ntfs/ntfs_vnops.c 		vprint("ntfs_inactive: pushing active", vp);
vp                252 ntfs/ntfs_vnops.c 	VOP__UNLOCK(vp, 0, p);
vp                267 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                268 ntfs/ntfs_vnops.c 	struct fnode *fp = VTOF(vp);
vp                275 ntfs/ntfs_vnops.c 	dprintf(("ntfs_reclaim: vnode: %p, ntnode: %d\n", vp, ip->i_number));
vp                278 ntfs/ntfs_vnops.c 	if (ntfs_prtactive && vp->v_usecount != 0)
vp                279 ntfs/ntfs_vnops.c 		vprint("ntfs_reclaim: pushing active", vp);
vp                290 ntfs/ntfs_vnops.c 	cache_purge(vp);
vp                298 ntfs/ntfs_vnops.c 	vp->v_data = NULL;
vp                324 ntfs/ntfs_vnops.c 	struct vnode *vp = bp->b_vp;
vp                325 ntfs/ntfs_vnops.c 	struct fnode *fp = VTOF(vp);
vp                400 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                401 ntfs/ntfs_vnops.c 	struct fnode *fp = VTOF(vp);
vp                435 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                436 ntfs/ntfs_vnops.c 	struct ntnode *ip = VTONT(vp);
vp                450 ntfs/ntfs_vnops.c 		switch ((int)vp->v_type) {
vp                454 ntfs/ntfs_vnops.c 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                510 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                511 ntfs/ntfs_vnops.c 	struct ntnode *ip = VTONT(vp);
vp                534 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                535 ntfs/ntfs_vnops.c 	struct ntnode *ip = VTONT(vp);
vp                547 ntfs/ntfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                548 ntfs/ntfs_vnops.c 	struct fnode *fp = VTOF(vp);
vp                189 sys/exec.h     		    u_long len, u_long addr, struct vnode *vp, u_long offset,
vp                191 sys/exec.h     #define	NEW_VMCMD(evsp,proc,len,addr,vp,offset,prot) \
vp                192 sys/exec.h     	new_vmcmd(evsp,proc,len,addr,vp,offset,prot, 0);
vp                193 sys/exec.h     #define NEW_VMCMD2(evsp,proc,len,addr,vp,offset,prot,flags) \
vp                194 sys/exec.h     	new_vmcmd(evsp,proc,len,addr,vp,offset,prot,flags)
vp                196 sys/exec.h     #define NEW_VMCMD(evsp,proc,len,addr,vp,offset,prot) \
vp                197 sys/exec.h     	NEW_VMCMD2(evsp,proc,len,addr,vp,offset,prot,0)
vp                198 sys/exec.h     #define	NEW_VMCMD2(evsp,proc,len,addr,vp,offset,prot,flags) do { \
vp                206 sys/exec.h     	if ((vcp->ev_vp = (vp)) != NULLVP) \
vp                207 sys/exec.h     		VREF(vp); \
vp                488 sys/mount.h    	int	(*vfs_vptofh)(struct vnode *vp, struct fid *fhp);
vp                232 sys/vnode.h    #define	VREF(vp)	vref(vp)		/* increase reference */
vp                234 sys/vnode.h    #define	VN_KNOTE(vp, b)					\
vp                235 sys/vnode.h    	KNOTE(&vp->v_selectinfo.si_note, (b))
vp                 65 ufs/ext2fs/ext2fs_balloc.c 	struct vnode *vp = ITOV(ip);
vp                 85 ufs/ext2fs/ext2fs_balloc.c 			error = bread(vp, bn, fs->e2fs_bsize, NOCRED, &bp);
vp                106 ufs/ext2fs/ext2fs_balloc.c 		bp = getblk(vp, bn, fs->e2fs_bsize, 0, 0);
vp                117 ufs/ext2fs/ext2fs_balloc.c 	if ((error = ufs_getlbns(vp, bn, indirs, &num)) != 0)
vp                138 ufs/ext2fs/ext2fs_balloc.c 		bp = getblk(vp, indirs[1].in_lbn, fs->e2fs_bsize, 0, 0);
vp                156 ufs/ext2fs/ext2fs_balloc.c 		error = bread(vp,
vp                180 ufs/ext2fs/ext2fs_balloc.c 		nbp = getblk(vp, indirs[i].in_lbn, fs->e2fs_bsize, 0, 0);
vp                228 ufs/ext2fs/ext2fs_balloc.c 		nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0);
vp                237 ufs/ext2fs/ext2fs_balloc.c 		error = bread(vp, lbn, (int)fs->e2fs_bsize, NOCRED, &nbp);
vp                243 ufs/ext2fs/ext2fs_balloc.c 		nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0);
vp                264 ufs/ext2fs/ext2fs_balloc.c 			r = bread(vp, indirs[unwindidx].in_lbn, 
vp                279 ufs/ext2fs/ext2fs_balloc.c 			bp = getblk(vp, indirs[i].in_lbn, (int)fs->e2fs_bsize,
vp                 99 ufs/ext2fs/ext2fs_bmap.c ext2fs_bmaparray(struct vnode *vp, int32_t bn, daddr64_t *bnp,
vp                112 ufs/ext2fs/ext2fs_bmap.c 	ip = VTOI(vp);
vp                113 ufs/ext2fs/ext2fs_bmap.c 	mp = vp->v_mount;
vp                134 ufs/ext2fs/ext2fs_bmap.c 	if ((error = ufs_getlbns(vp, bn, xap, nump)) != 0)
vp                154 ufs/ext2fs/ext2fs_bmap.c 	devvp = VFSTOUFS(vp->v_mount)->um_devvp;
vp                170 ufs/ext2fs/ext2fs_bmap.c 		if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
vp                180 ufs/ext2fs/ext2fs_bmap.c 		bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
vp                152 ufs/ext2fs/ext2fs_extern.h #define IS_EXT2_VNODE(vp)   (vp->v_tag == VT_EXT2FS)
vp                111 ufs/ext2fs/ext2fs_inode.c 	struct vnode *vp = ap->a_vp;
vp                112 ufs/ext2fs/ext2fs_inode.c 	struct inode *ip = VTOI(vp);
vp                119 ufs/ext2fs/ext2fs_inode.c 	if (prtactive && vp->v_usecount != 0)
vp                120 ufs/ext2fs/ext2fs_inode.c 		vprint("ext2fs_inactive: pushing active", vp);
vp                128 ufs/ext2fs/ext2fs_inode.c 	if (ip->i_e2fs_nlink == 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
vp                141 ufs/ext2fs/ext2fs_inode.c 	VOP_UNLOCK(vp, 0, p);
vp                147 ufs/ext2fs/ext2fs_inode.c 		vrecycle(vp, p);
vp                439 ufs/ext2fs/ext2fs_inode.c 	struct vnode *vp;
vp                465 ufs/ext2fs/ext2fs_inode.c 	vp = ITOV(ip);
vp                466 ufs/ext2fs/ext2fs_inode.c 	bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0);
vp                133 ufs/ext2fs/ext2fs_lookup.c 	struct vnode *vp = ap->a_vp;
vp                134 ufs/ext2fs/ext2fs_lookup.c 	struct m_ext2fs *fs = VTOI(vp)->i_e2fs;
vp                146 ufs/ext2fs/ext2fs_lookup.c 	if (vp->v_type != VDIR)
vp               1015 ufs/ext2fs/ext2fs_lookup.c 	struct vnode *vp;
vp               1020 ufs/ext2fs/ext2fs_lookup.c 	vp = ITOV(target);
vp               1031 ufs/ext2fs/ext2fs_lookup.c 		if (vp->v_type != VDIR) {
vp               1035 ufs/ext2fs/ext2fs_lookup.c 		error = vn_rdwr(UIO_READ, vp, (caddr_t)&dirbuf,
vp               1055 ufs/ext2fs/ext2fs_lookup.c 		vput(vp);
vp               1056 ufs/ext2fs/ext2fs_lookup.c 		error = VFS_VGET(vp->v_mount, ino, &vp);
vp               1058 ufs/ext2fs/ext2fs_lookup.c 			vp = NULL;
vp               1068 ufs/ext2fs/ext2fs_lookup.c 	if (vp != NULL)
vp               1069 ufs/ext2fs/ext2fs_lookup.c 		vput(vp);
vp                 68 ufs/ext2fs/ext2fs_readwrite.c 	struct vnode *vp;
vp                 78 ufs/ext2fs/ext2fs_readwrite.c 	vp = ap->a_vp;
vp                 79 ufs/ext2fs/ext2fs_readwrite.c 	ip = VTOI(vp);
vp                 86 ufs/ext2fs/ext2fs_readwrite.c 	if (vp->v_type == VLNK) {
vp                 87 ufs/ext2fs/ext2fs_readwrite.c 		if ((int)ext2fs_size(ip) < vp->v_mount->mnt_maxsymlinklen ||
vp                 88 ufs/ext2fs/ext2fs_readwrite.c 			(vp->v_mount->mnt_maxsymlinklen == 0 &&
vp                 91 ufs/ext2fs/ext2fs_readwrite.c 	} else if (vp->v_type != VREG && vp->v_type != VDIR)
vp                 92 ufs/ext2fs/ext2fs_readwrite.c 		panic("%s: type %d", "ext2fs_read", vp->v_type);
vp                115 ufs/ext2fs/ext2fs_readwrite.c 			error = bread(vp, lbn, size, NOCRED, &bp);
vp                118 ufs/ext2fs/ext2fs_readwrite.c 			error = breadn(vp, lbn,
vp                121 ufs/ext2fs/ext2fs_readwrite.c 			error = bread(vp, lbn, size, NOCRED, &bp);
vp                147 ufs/ext2fs/ext2fs_readwrite.c 	if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
vp                160 ufs/ext2fs/ext2fs_readwrite.c 	struct vnode *vp;
vp                172 ufs/ext2fs/ext2fs_readwrite.c 	vp = ap->a_vp;
vp                173 ufs/ext2fs/ext2fs_readwrite.c 	ip = VTOI(vp);
vp                187 ufs/ext2fs/ext2fs_readwrite.c 	switch (vp->v_type) {
vp                215 ufs/ext2fs/ext2fs_readwrite.c 	if (vp->v_type == VREG && p &&
vp                245 ufs/ext2fs/ext2fs_readwrite.c 			uvm_vnp_setsize(vp, ip->i_e2fs_size);
vp                247 ufs/ext2fs/ext2fs_readwrite.c 		uvm_vnp_uncache(vp);
vp                 82 ufs/ext2fs/ext2fs_subr.c 	struct vnode *vp;
vp                 88 ufs/ext2fs/ext2fs_subr.c 	vp = ITOV(ip);
vp                 93 ufs/ext2fs/ext2fs_subr.c 	if ((error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp)) != 0) {
vp                110 ufs/ext2fs/ext2fs_subr.c 	struct vnode *vp;
vp                118 ufs/ext2fs/ext2fs_subr.c 		if (VOP_BMAP(ep->b_vp, (daddr_t)0, &vp, (daddr_t)0, NULL))
vp                120 ufs/ext2fs/ext2fs_subr.c 		if (vp != ip->i_devvp)
vp                126 ufs/ext2fs/ext2fs_subr.c 		vprint("Disk overlap", vp);
vp                143 ufs/ext2fs/ext2fs_subr.c 	struct vnode *vp, *nvp;
vp                146 ufs/ext2fs/ext2fs_subr.c 	vp = *vpp;
vp                147 ufs/ext2fs/ext2fs_subr.c 	ip = VTOI(vp);
vp                148 ufs/ext2fs/ext2fs_subr.c 	vp->v_type = IFTOVT(ip->i_e2fs_mode);
vp                150 ufs/ext2fs/ext2fs_subr.c 	switch(vp->v_type) {
vp                153 ufs/ext2fs/ext2fs_subr.c 		vp->v_op = specops;
vp                155 ufs/ext2fs/ext2fs_subr.c 		nvp = checkalias(vp, fs2h32(ip->i_e2din->e2di_rdev), mp);
vp                162 ufs/ext2fs/ext2fs_subr.c 			nvp->v_data = vp->v_data;
vp                163 ufs/ext2fs/ext2fs_subr.c 			vp->v_data = NULL;
vp                164 ufs/ext2fs/ext2fs_subr.c 			vp->v_op = spec_vnodeop_p;
vp                166 ufs/ext2fs/ext2fs_subr.c 			vp->v_flag &= ~VLOCKSWORK;
vp                168 ufs/ext2fs/ext2fs_subr.c 			vrele(vp);
vp                169 ufs/ext2fs/ext2fs_subr.c 			vgone(vp);
vp                171 ufs/ext2fs/ext2fs_subr.c 			vp = nvp;
vp                172 ufs/ext2fs/ext2fs_subr.c 			ip->i_vnode = vp;
vp                179 ufs/ext2fs/ext2fs_subr.c 		vp->v_op = fifoops;
vp                191 ufs/ext2fs/ext2fs_subr.c 		vp->v_flag |= VROOT;
vp                198 ufs/ext2fs/ext2fs_subr.c 	*vpp = vp;
vp                330 ufs/ext2fs/ext2fs_vfsops.c ext2fs_reload_vnode(struct vnode *vp, void *args)
vp                341 ufs/ext2fs/ext2fs_vfsops.c 	if (vp->v_usecount == 0) {
vp                342 ufs/ext2fs/ext2fs_vfsops.c 		vgonel(vp, era->p);
vp                349 ufs/ext2fs/ext2fs_vfsops.c 	if (vget(vp, LK_EXCLUSIVE, era->p))
vp                352 ufs/ext2fs/ext2fs_vfsops.c 	if (vinvalbuf(vp, 0, era->cred, era->p, 0, 0))
vp                357 ufs/ext2fs/ext2fs_vfsops.c 	ip = VTOI(vp);
vp                362 ufs/ext2fs/ext2fs_vfsops.c 		vput(vp);
vp                369 ufs/ext2fs/ext2fs_vfsops.c 	vput(vp);
vp                715 ufs/ext2fs/ext2fs_vfsops.c int ext2fs_sync_vnode(struct vnode *vp, void *);
vp                725 ufs/ext2fs/ext2fs_vfsops.c ext2fs_sync_vnode(struct vnode *vp, void *args) 
vp                731 ufs/ext2fs/ext2fs_vfsops.c 	ip = VTOI(vp);
vp                732 ufs/ext2fs/ext2fs_vfsops.c 	if (vp->v_type == VNON || 
vp                734 ufs/ext2fs/ext2fs_vfsops.c 		LIST_EMPTY(&vp->v_dirtyblkhd)) ||
vp                739 ufs/ext2fs/ext2fs_vfsops.c 	if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT, esa->p))
vp                742 ufs/ext2fs/ext2fs_vfsops.c 	if ((error = VOP_FSYNC(vp, esa->cred, esa->waitfor, esa->p)) != 0)
vp                744 ufs/ext2fs/ext2fs_vfsops.c 	vput(vp);
vp                815 ufs/ext2fs/ext2fs_vfsops.c 	struct vnode *vp;
vp                827 ufs/ext2fs/ext2fs_vfsops.c 	if ((error = getnewvnode(VT_EXT2FS, mp, ext2fs_vnodeop_p, &vp)) != 0) {
vp                835 ufs/ext2fs/ext2fs_vfsops.c 	vp->v_data = ip;
vp                836 ufs/ext2fs/ext2fs_vfsops.c 	ip->i_vnode = vp;
vp                853 ufs/ext2fs/ext2fs_vfsops.c 		vrele(vp);
vp                871 ufs/ext2fs/ext2fs_vfsops.c 		vput(vp);
vp                905 ufs/ext2fs/ext2fs_vfsops.c 	error = ext2fs_vinit(mp, ext2fs_specop_p, EXT2FS_FIFOOPS, &vp);
vp                907 ufs/ext2fs/ext2fs_vfsops.c 		vput(vp);
vp                924 ufs/ext2fs/ext2fs_vfsops.c 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
vp                928 ufs/ext2fs/ext2fs_vfsops.c 	*vpp = vp;
vp                977 ufs/ext2fs/ext2fs_vfsops.c ext2fs_vptofh(struct vnode *vp, struct fid *fhp)
vp                982 ufs/ext2fs/ext2fs_vfsops.c 	ip = VTOI(vp);
vp                150 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                151 ufs/ext2fs/ext2fs_vnops.c 	struct inode *ip = VTOI(vp);
vp                167 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                168 ufs/ext2fs/ext2fs_vnops.c 	struct inode *ip = VTOI(vp);
vp                200 ufs/ext2fs/ext2fs_vnops.c 	if (vp->v_type == VBLK)
vp                202 ufs/ext2fs/ext2fs_vnops.c 	else if (vp->v_type == VCHR)
vp                205 ufs/ext2fs/ext2fs_vnops.c 		vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
vp                207 ufs/ext2fs/ext2fs_vnops.c 	vap->va_type = vp->v_type;
vp                220 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                221 ufs/ext2fs/ext2fs_vnops.c 	struct inode *ip = VTOI(vp);
vp                236 ufs/ext2fs/ext2fs_vnops.c 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                269 ufs/ext2fs/ext2fs_vnops.c 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                271 ufs/ext2fs/ext2fs_vnops.c 		error = ext2fs_chown(vp, vap->va_uid, vap->va_gid, cred, p);
vp                281 ufs/ext2fs/ext2fs_vnops.c 		switch (vp->v_type) {
vp                286 ufs/ext2fs/ext2fs_vnops.c 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                296 ufs/ext2fs/ext2fs_vnops.c 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                301 ufs/ext2fs/ext2fs_vnops.c 			(error = VOP_ACCESS(vp, VWRITE, cred, p))))
vp                304 ufs/ext2fs/ext2fs_vnops.c 			if (!(vp->v_mount->mnt_flag & MNT_NOATIME))
vp                314 ufs/ext2fs/ext2fs_vnops.c 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                316 ufs/ext2fs/ext2fs_vnops.c 		error = ext2fs_chmod(vp, (int)vap->va_mode, cred, p);
vp                326 ufs/ext2fs/ext2fs_vnops.c ext2fs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct proc *p)
vp                328 ufs/ext2fs/ext2fs_vnops.c 	struct inode *ip = VTOI(vp);
vp                334 ufs/ext2fs/ext2fs_vnops.c 		if (vp->v_type != VDIR && (mode & S_ISTXT))
vp                342 ufs/ext2fs/ext2fs_vnops.c 	if ((vp->v_flag & VTEXT) && (ip->i_e2fs_mode & S_ISTXT) == 0)
vp                343 ufs/ext2fs/ext2fs_vnops.c 		(void) uvm_vnp_uncache(vp);
vp                352 ufs/ext2fs/ext2fs_vnops.c ext2fs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, struct proc *p)
vp                354 ufs/ext2fs/ext2fs_vnops.c 	struct inode *ip = VTOI(vp);
vp                391 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                395 ufs/ext2fs/ext2fs_vnops.c 	ip = VTOI(vp);
vp                396 ufs/ext2fs/ext2fs_vnops.c 	if (vp->v_type == VDIR ||
vp                408 ufs/ext2fs/ext2fs_vnops.c 	if (dvp == vp)
vp                409 ufs/ext2fs/ext2fs_vnops.c 		vrele(vp);
vp                411 ufs/ext2fs/ext2fs_vnops.c 		vput(vp);
vp                424 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                434 ufs/ext2fs/ext2fs_vnops.c 	if (vp->v_type == VDIR) {
vp                439 ufs/ext2fs/ext2fs_vnops.c 	if (dvp->v_mount != vp->v_mount) {
vp                444 ufs/ext2fs/ext2fs_vnops.c 	if (dvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE, p))) {
vp                448 ufs/ext2fs/ext2fs_vnops.c 	ip = VTOI(vp);
vp                470 ufs/ext2fs/ext2fs_vnops.c 	if (dvp != vp)
vp                471 ufs/ext2fs/ext2fs_vnops.c 		VOP_UNLOCK(vp, 0, p);
vp               1001 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1007 ufs/ext2fs/ext2fs_vnops.c 	ip = VTOI(vp);
vp               1014 ufs/ext2fs/ext2fs_vnops.c 		vput(vp);
vp               1065 ufs/ext2fs/ext2fs_vnops.c 	vput(vp);
vp               1076 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp, **vpp = ap->a_vpp;
vp               1084 ufs/ext2fs/ext2fs_vnops.c 	vp = *vpp;
vp               1086 ufs/ext2fs/ext2fs_vnops.c 	if (len < vp->v_mount->mnt_maxsymlinklen) {
vp               1087 ufs/ext2fs/ext2fs_vnops.c 		ip = VTOI(vp);
vp               1094 ufs/ext2fs/ext2fs_vnops.c 		error = vn_rdwr(UIO_WRITE, vp, ap->a_target, len, (off_t)0,
vp               1098 ufs/ext2fs/ext2fs_vnops.c 	vput(vp);
vp               1109 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1110 ufs/ext2fs/ext2fs_vnops.c 	struct inode *ip = VTOI(vp);
vp               1114 ufs/ext2fs/ext2fs_vnops.c 	if (isize < vp->v_mount->mnt_maxsymlinklen ||
vp               1115 ufs/ext2fs/ext2fs_vnops.c 	    (vp->v_mount->mnt_maxsymlinklen == 0 && ip->i_e2fs_nblock == 0)) {
vp               1119 ufs/ext2fs/ext2fs_vnops.c 	return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred));
vp               1209 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1211 ufs/ext2fs/ext2fs_vnops.c 	vflushbuf(vp, ap->a_waitfor == MNT_WAIT);
vp               1223 ufs/ext2fs/ext2fs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1228 ufs/ext2fs/ext2fs_vnops.c 	if (prtactive && vp->v_usecount != 0) 
vp               1229 ufs/ext2fs/ext2fs_vnops.c 		vprint("ext2fs_reclaim: pushing active", vp);
vp               1235 ufs/ext2fs/ext2fs_vnops.c 	ip = VTOI(vp);
vp               1241 ufs/ext2fs/ext2fs_vnops.c 	cache_purge(vp);
vp               1250 ufs/ext2fs/ext2fs_vnops.c 	vp->v_data = NULL;
vp                372 ufs/ffs/ffs_alloc.c 	struct vnode *vp;
vp                380 ufs/ffs/ffs_alloc.c 	vp = ap->a_vp;
vp                381 ufs/ffs/ffs_alloc.c 	ip = VTOI(vp);
vp                414 ufs/ffs/ffs_alloc.c 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
vp                415 ufs/ffs/ffs_alloc.c 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
vp                425 ufs/ffs/ffs_alloc.c 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
vp                448 ufs/ffs/ffs_alloc.c 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
vp                487 ufs/ffs/ffs_alloc.c 		if (DOINGSOFTDEP(vp)) {
vp                539 ufs/ffs/ffs_alloc.c 		if (!DOINGSOFTDEP(vp))
vp                575 ufs/ffs/ffs_alloc.c 	struct vnode *vp;
vp                585 ufs/ffs/ffs_alloc.c 	vp = ap->a_vp;
vp                586 ufs/ffs/ffs_alloc.c 	ip = VTOI(vp);
vp                624 ufs/ffs/ffs_alloc.c 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
vp                625 ufs/ffs/ffs_alloc.c 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
vp                636 ufs/ffs/ffs_alloc.c 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
vp                655 ufs/ffs/ffs_alloc.c 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
vp                703 ufs/ffs/ffs_alloc.c 		if (DOINGSOFTDEP(vp)) {
vp                756 ufs/ffs/ffs_alloc.c 		if (!DOINGSOFTDEP(vp))
vp                 80 ufs/ffs/ffs_balloc.c 	struct vnode *vp;
vp                 88 ufs/ffs/ffs_balloc.c 	vp = ITOV(ip);
vp                114 ufs/ffs/ffs_balloc.c 			if (DOINGSOFTDEP(vp))
vp                120 ufs/ffs/ffs_balloc.c 			uvm_vnp_setsize(vp, ip->i_ffs1_size);
vp                145 ufs/ffs/ffs_balloc.c 				error = bread(vp, lbn, fs->fs_bsize, NOCRED,
vp                167 ufs/ffs/ffs_balloc.c 					error = bread(vp, lbn, fs->fs_bsize,
vp                187 ufs/ffs/ffs_balloc.c 				if (DOINGSOFTDEP(vp))
vp                208 ufs/ffs/ffs_balloc.c 				*bpp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
vp                215 ufs/ffs/ffs_balloc.c 			if (DOINGSOFTDEP(vp))
vp                228 ufs/ffs/ffs_balloc.c 	if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
vp                251 ufs/ffs/ffs_balloc.c 		bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0);
vp                255 ufs/ffs/ffs_balloc.c 		if (DOINGSOFTDEP(vp)) {
vp                276 ufs/ffs/ffs_balloc.c 		error = bread(vp,
vp                301 ufs/ffs/ffs_balloc.c 		nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0);
vp                305 ufs/ffs/ffs_balloc.c 		if (DOINGSOFTDEP(vp)) {
vp                346 ufs/ffs/ffs_balloc.c 			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
vp                352 ufs/ffs/ffs_balloc.c 		if (DOINGSOFTDEP(vp))
vp                370 ufs/ffs/ffs_balloc.c 			error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
vp                376 ufs/ffs/ffs_balloc.c 			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
vp                400 ufs/ffs/ffs_balloc.c 	VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p);
vp                410 ufs/ffs/ffs_balloc.c 		r = bread(vp, indirs[unwindidx].in_lbn, 
vp                431 ufs/ffs/ffs_balloc.c 	VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p);
vp                447 ufs/ffs/ffs_balloc.c 	struct vnode *vp;
vp                450 ufs/ffs/ffs_balloc.c 	vp = ITOV(ip);
vp                483 ufs/ffs/ffs_balloc.c 			if (DOINGSOFTDEP(vp))
vp                489 ufs/ffs/ffs_balloc.c 			uvm_vnp_setsize(vp, ip->i_ffs2_size);
vp                516 ufs/ffs/ffs_balloc.c 				error = bread(vp, lbn, fs->fs_bsize, NOCRED,
vp                540 ufs/ffs/ffs_balloc.c 					error = bread(vp, lbn, fs->fs_bsize,
vp                562 ufs/ffs/ffs_balloc.c 				if (DOINGSOFTDEP(vp))
vp                583 ufs/ffs/ffs_balloc.c 				bp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
vp                592 ufs/ffs/ffs_balloc.c 			if (DOINGSOFTDEP(vp))
vp                607 ufs/ffs/ffs_balloc.c 	error = ufs_getlbns(vp, lbn, indirs, &num);
vp                633 ufs/ffs/ffs_balloc.c 		bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0);
vp                637 ufs/ffs/ffs_balloc.c 		if (DOINGSOFTDEP(vp)) {
vp                661 ufs/ffs/ffs_balloc.c 		error = bread(vp, indirs[i].in_lbn, (int) fs->fs_bsize,
vp                693 ufs/ffs/ffs_balloc.c 		nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0);
vp                697 ufs/ffs/ffs_balloc.c 		if (DOINGSOFTDEP(vp)) {
vp                745 ufs/ffs/ffs_balloc.c 			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
vp                752 ufs/ffs/ffs_balloc.c 		if (DOINGSOFTDEP(vp))
vp                777 ufs/ffs/ffs_balloc.c 			error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
vp                783 ufs/ffs/ffs_balloc.c 			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
vp                810 ufs/ffs/ffs_balloc.c 	VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p);
vp                822 ufs/ffs/ffs_balloc.c 			bp = getblk(vp, indirs[i].in_lbn, (int) fs->fs_bsize,
vp                842 ufs/ffs/ffs_balloc.c 		if (DOINGSOFTDEP(vp) && unwindidx == 0) {
vp                854 ufs/ffs/ffs_balloc.c 			if (DOINGSOFTDEP(vp))
vp                857 ufs/ffs/ffs_balloc.c 			r = bread(vp, indirs[unwindidx].in_lbn,
vp                868 ufs/ffs/ffs_balloc.c 			bp = getblk(vp, indirs[i].in_lbn, (int)fs->fs_bsize, 0,
vp                889 ufs/ffs/ffs_balloc.c 	VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p);
vp                 71 ufs/ffs/ffs_inode.c 	struct vnode *vp;
vp                 77 ufs/ffs/ffs_inode.c 	vp = ITOV(ip);
vp                 78 ufs/ffs/ffs_inode.c 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
vp                 84 ufs/ffs/ffs_inode.c 	if ((vp->v_mount->mnt_flag & MNT_NOATIME) &&
vp                131 ufs/ffs/ffs_inode.c 	if (DOINGSOFTDEP(vp))
vp                145 ufs/ffs/ffs_inode.c 	if (waitfor && !DOINGASYNC(vp)) {
vp                474 ufs/ffs/ffs_inode.c 	struct vnode *vp;
vp                505 ufs/ffs/ffs_inode.c 	vp = ITOV(ip);
vp                506 ufs/ffs/ffs_inode.c 	bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0);
vp                536 ufs/ffs/ffs_inode.c 		if (!DOINGASYNC(vp)) {
vp                739 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp                753 ufs/ffs/ffs_softdep.c 		vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev,
vp                755 ufs/ffs/ffs_softdep.c 		if (vp == NULL || !VOP_ISLOCKED(vp))
vp               1923 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp               2012 ufs/ffs/ffs_softdep.c 	vp = ITOV(ip);
vp               2014 ufs/ffs/ffs_softdep.c 	drain_output(vp, 1);
vp               2015 ufs/ffs/ffs_softdep.c 	while ((bp = LIST_FIRST(&vp->v_dirtyblkhd))) {
vp               3141 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp               3146 ufs/ffs/ffs_softdep.c 	if ((error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, &vp)) != 0) {
vp               3150 ufs/ffs/ffs_softdep.c 	ip = VTOI(vp);
vp               3169 ufs/ffs/ffs_softdep.c 		vput(vp);
vp               3195 ufs/ffs/ffs_softdep.c 		vput(vp);
vp               3213 ufs/ffs/ffs_softdep.c 		vput(vp);
vp               3220 ufs/ffs/ffs_softdep.c 	UFS_UPDATE(VTOI(vp), 0);
vp               3221 ufs/ffs/ffs_softdep.c 	vput(vp);
vp               3243 ufs/ffs/ffs_softdep.c 	struct vnode vp;
vp               3261 ufs/ffs/ffs_softdep.c 	tip.i_vnode = &vp;
vp               3262 ufs/ffs/ffs_softdep.c 	vp.v_data = &tip;
vp               4546 ufs/ffs/ffs_softdep.c softdep_fsync(vp)
vp               4547 ufs/ffs/ffs_softdep.c 	struct vnode *vp;	/* the "in_core" copy of the inode */
vp               4564 ufs/ffs/ffs_softdep.c 	ip = VTOI(vp);
vp               4615 ufs/ffs/ffs_softdep.c 		if (vp->v_flag & VXLOCK)
vp               4625 ufs/ffs/ffs_softdep.c 		VOP_UNLOCK(vp, 0, p);
vp               4627 ufs/ffs/ffs_softdep.c 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
vp               4679 ufs/ffs/ffs_softdep.c softdep_fsync_mountdev(vp, waitfor)
vp               4680 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp               4686 ufs/ffs/ffs_softdep.c 	if (!vn_isdisk(vp, NULL))
vp               4689 ufs/ffs/ffs_softdep.c 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
vp               4719 ufs/ffs/ffs_softdep.c 		nbp = LIST_FIRST(&vp->v_dirtyblkhd);
vp               4722 ufs/ffs/ffs_softdep.c 		drain_output(vp, 1);
vp               4741 ufs/ffs/ffs_softdep.c 	struct vnode *vp = ap->a_vp;
vp               4753 ufs/ffs/ffs_softdep.c 	if (!vn_isdisk(vp, NULL)) {
vp               4754 ufs/ffs/ffs_softdep.c 		if (!DOINGSOFTDEP(vp))
vp               4757 ufs/ffs/ffs_softdep.c 		if (vp->v_specmountpoint == NULL ||
vp               4758 ufs/ffs/ffs_softdep.c 		    (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP) == 0)
vp               4764 ufs/ffs/ffs_softdep.c 	if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) {
vp               4791 ufs/ffs/ffs_softdep.c 	drain_output(vp, 1);
vp               4792 ufs/ffs/ffs_softdep.c 	bp = LIST_FIRST(&vp->v_dirtyblkhd);
vp               4880 ufs/ffs/ffs_softdep.c 				    flush_pagedep_deps(vp, pagedep->pd_mnt,
vp               4966 ufs/ffs/ffs_softdep.c 	drain_output(vp, 1);
vp               4967 ufs/ffs/ffs_softdep.c 	if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
vp               4980 ufs/ffs/ffs_softdep.c 	if (vn_isdisk(vp, NULL) &&
vp               4981 ufs/ffs/ffs_softdep.c 	    vp->v_specmountpoint && !VOP_ISLOCKED(vp) &&
vp               4982 ufs/ffs/ffs_softdep.c 	    (error = VFS_SYNC(vp->v_specmountpoint, MNT_WAIT, ap->a_cred,
vp               5094 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp               5137 ufs/ffs/ffs_softdep.c 			if ((error = VFS_VGET(mp, inum, &vp)) != 0)
vp               5139 ufs/ffs/ffs_softdep.c 			if ((error=VOP_FSYNC(vp, p->p_ucred, MNT_NOWAIT, p)) ||
vp               5140 ufs/ffs/ffs_softdep.c 			    (error=VOP_FSYNC(vp, p->p_ucred, MNT_NOWAIT, p))) {
vp               5141 ufs/ffs/ffs_softdep.c 				vput(vp);
vp               5144 ufs/ffs/ffs_softdep.c 			drain_output(vp, 0);
vp               5152 ufs/ffs/ffs_softdep.c 				bp = incore(vp, 0);
vp               5169 ufs/ffs/ffs_softdep.c 			vput(vp);
vp               5248 ufs/ffs/ffs_softdep.c softdep_slowdown(vp)
vp               5249 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp               5379 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp               5398 ufs/ffs/ffs_softdep.c 			if ((error = VFS_VGET(mp, ino, &vp)) != 0) {
vp               5405 ufs/ffs/ffs_softdep.c 			if ((error = VOP_FSYNC(vp, p->p_ucred, MNT_NOWAIT, p)))
vp               5407 ufs/ffs/ffs_softdep.c 			drain_output(vp, 0);
vp               5408 ufs/ffs/ffs_softdep.c 			vput(vp);
vp               5430 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp               5479 ufs/ffs/ffs_softdep.c 		if ((error = VFS_VGET(mp, ino, &vp)) != 0) {
vp               5487 ufs/ffs/ffs_softdep.c 			if ((error = VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p)))
vp               5490 ufs/ffs/ffs_softdep.c 			if ((error = VOP_FSYNC(vp, p->p_ucred, MNT_NOWAIT, p)))
vp               5492 ufs/ffs/ffs_softdep.c 			drain_output(vp, 0);
vp               5494 ufs/ffs/ffs_softdep.c 		vput(vp);
vp               5628 ufs/ffs/ffs_softdep.c drain_output(vp, islocked)
vp               5629 ufs/ffs/ffs_softdep.c 	struct vnode *vp;
vp               5639 ufs/ffs/ffs_softdep.c 	while (vp->v_numoutput) {
vp               5640 ufs/ffs/ffs_softdep.c 		vp->v_bioflag |= VBIOWAIT;
vp               5642 ufs/ffs/ffs_softdep.c 		tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "drain_output", 0);
vp                165 ufs/ffs/ffs_softdep_stub.c softdep_fsync_mountdev(struct vnode *vp, int waitfor)
vp                186 ufs/ffs/ffs_softdep_stub.c softdep_slowdown(struct vnode *vp)
vp                 60 ufs/ffs/ffs_subr.c 	struct vnode *vp;
vp                 65 ufs/ffs/ffs_subr.c 	vp = ITOV(ip);
vp                 71 ufs/ffs/ffs_subr.c 	if ((error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp)) != 0) {
vp                127 ufs/ffs/ffs_subr.c 	struct vnode *vp;
vp                136 ufs/ffs/ffs_subr.c 		if (VOP_BMAP(ep->b_vp, (daddr_t)0, &vp, (daddr_t)0, NULL))
vp                138 ufs/ffs/ffs_subr.c 		if (vp != ip->i_devvp)
vp                144 ufs/ffs/ffs_subr.c 		vprint("Disk overlap", vp);
vp                465 ufs/ffs/ffs_vfsops.c ffs_reload_vnode(struct vnode *vp, void *args) 
vp                475 ufs/ffs/ffs_vfsops.c 	if (vp->v_usecount == 0) {
vp                476 ufs/ffs/ffs_vfsops.c 		vgonel(vp, fra->p);
vp                483 ufs/ffs/ffs_vfsops.c 	if (vget(vp, LK_EXCLUSIVE, fra->p))
vp                486 ufs/ffs/ffs_vfsops.c 	if (vinvalbuf(vp, 0, fra->cred, fra->p, 0, 0))
vp                492 ufs/ffs/ffs_vfsops.c 	ip = VTOI(vp);
vp                499 ufs/ffs/ffs_vfsops.c 		vput(vp);
vp                507 ufs/ffs/ffs_vfsops.c 	vput(vp);
vp               1134 ufs/ffs/ffs_vfsops.c ffs_sync_vnode(struct vnode *vp, void *arg) {
vp               1139 ufs/ffs/ffs_vfsops.c 	ip = VTOI(vp);
vp               1140 ufs/ffs/ffs_vfsops.c 	if (vp->v_type == VNON || 
vp               1143 ufs/ffs/ffs_vfsops.c 		LIST_EMPTY(&vp->v_dirtyblkhd)) ) {
vp               1147 ufs/ffs/ffs_vfsops.c 	if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT, fsa->p))
vp               1150 ufs/ffs/ffs_vfsops.c 	if ((error = VOP_FSYNC(vp, fsa->cred, fsa->waitfor, fsa->p)))
vp               1152 ufs/ffs/ffs_vfsops.c 	VOP_UNLOCK(vp, 0, fsa->p);
vp               1153 ufs/ffs/ffs_vfsops.c 	vrele(vp);
vp               1244 ufs/ffs/ffs_vfsops.c 	struct vnode *vp;
vp               1255 ufs/ffs/ffs_vfsops.c 	if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
vp               1260 ufs/ffs/ffs_vfsops.c 	vp->v_flag |= VLOCKSWORK;
vp               1268 ufs/ffs/ffs_vfsops.c 	vp->v_data = ip;
vp               1269 ufs/ffs/ffs_vfsops.c 	ip->i_vnode = vp;
vp               1288 ufs/ffs/ffs_vfsops.c 		vrele(vp);
vp               1307 ufs/ffs/ffs_vfsops.c 		vput(vp);
vp               1328 ufs/ffs/ffs_vfsops.c 	if (DOINGSOFTDEP(vp))
vp               1337 ufs/ffs/ffs_vfsops.c 	error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp);
vp               1339 ufs/ffs/ffs_vfsops.c 		vput(vp);
vp               1352 ufs/ffs/ffs_vfsops.c 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
vp               1365 ufs/ffs/ffs_vfsops.c 	*vpp = vp;
vp               1397 ufs/ffs/ffs_vfsops.c ffs_vptofh(struct vnode *vp, struct fid *fhp)
vp               1402 ufs/ffs/ffs_vfsops.c 	ip = VTOI(vp);
vp                172 ufs/ffs/ffs_vnops.c 	struct vnode *vp;
vp                183 ufs/ffs/ffs_vnops.c 	vp = ap->a_vp;
vp                184 ufs/ffs/ffs_vnops.c 	ip = VTOI(vp);
vp                192 ufs/ffs/ffs_vnops.c 	if (vp->v_type == VLNK) {
vp                193 ufs/ffs/ffs_vnops.c 		if ((int)DIP(ip, size) < vp->v_mount->mnt_maxsymlinklen ||
vp                194 ufs/ffs/ffs_vnops.c 		    (vp->v_mount->mnt_maxsymlinklen == 0 &&
vp                197 ufs/ffs/ffs_vnops.c 	} else if (vp->v_type != VREG && vp->v_type != VDIR)
vp                198 ufs/ffs/ffs_vnops.c 		panic("ffs_read: type %d", vp->v_type);
vp                221 ufs/ffs/ffs_vnops.c 			error = bread(vp, lbn, size, NOCRED, &bp);
vp                223 ufs/ffs/ffs_vnops.c 			error = bread_cluster(vp, lbn, size, &bp);
vp                225 ufs/ffs/ffs_vnops.c 			error = bread(vp, lbn, size, NOCRED, &bp);
vp                263 ufs/ffs/ffs_vnops.c 	struct vnode *vp;
vp                276 ufs/ffs/ffs_vnops.c 	vp = ap->a_vp;
vp                277 ufs/ffs/ffs_vnops.c 	ip = VTOI(vp);
vp                291 ufs/ffs/ffs_vnops.c 	switch (vp->v_type) {
vp                317 ufs/ffs/ffs_vnops.c 	if (vp->v_type == VREG && p && !(ioflag & IO_NOLIMIT) &&
vp                344 ufs/ffs/ffs_vnops.c 			uvm_vnp_setsize(vp, DIP(ip, size));
vp                347 ufs/ffs/ffs_vnops.c 		(void)uvm_vnp_uncache(vp);
vp                381 ufs/ffs/ffs_vnops.c 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
vp                403 ufs/ffs/ffs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                407 ufs/ffs/ffs_vnops.c 	if (vp->v_type == VBLK &&
vp                408 ufs/ffs/ffs_vnops.c 	    vp->v_specmountpoint != NULL &&
vp                409 ufs/ffs/ffs_vnops.c 	    (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP))
vp                410 ufs/ffs/ffs_vnops.c 		softdep_fsync_mountdev(vp, ap->a_waitfor);
vp                421 ufs/ffs/ffs_vnops.c 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp;
vp                424 ufs/ffs/ffs_vnops.c 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
vp                465 ufs/ffs/ffs_vnops.c 		nbp = LIST_FIRST(&vp->v_dirtyblkhd);
vp                472 ufs/ffs/ffs_vnops.c 		vwaitforio(vp, 0, "ffs_fsync", 0);
vp                482 ufs/ffs/ffs_vnops.c 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
vp                496 ufs/ffs/ffs_vnops.c 			if (vp->v_type != VBLK)
vp                497 ufs/ffs/ffs_vnops.c 				vprint("ffs_fsync: dirty", vp);
vp                502 ufs/ffs/ffs_vnops.c 	return (UFS_UPDATE(VTOI(vp), ap->a_waitfor == MNT_WAIT));
vp                512 ufs/ffs/ffs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                513 ufs/ffs/ffs_vnops.c 	struct inode *ip = VTOI(vp);
vp                516 ufs/ffs/ffs_vnops.c 	if ((error = ufs_reclaim(vp, ap->a_p)) != 0)
vp                530 ufs/ffs/ffs_vnops.c 	vp->v_data = NULL;
vp                246 ufs/mfs/mfs_vfsops.c 	struct vnode *vp = VFSTOUFS(mp)->um_devvp;
vp                247 ufs/mfs/mfs_vfsops.c 	struct mfsnode *mfsp = VTOMFS(vp);
vp                272 ufs/mfs/mfs_vfsops.c 		sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0);
vp                141 ufs/mfs/mfs_vnops.c 	struct vnode *vp;
vp                145 ufs/mfs/mfs_vnops.c 	if (!vfinddev(bp->b_dev, VBLK, &vp) || vp->v_usecount == 0)
vp                148 ufs/mfs/mfs_vnops.c 	mfsp = VTOMFS(vp);
vp                166 ufs/mfs/mfs_vnops.c 		wakeup((caddr_t)vp);
vp                221 ufs/mfs/mfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                222 ufs/mfs/mfs_vnops.c 	struct mfsnode *mfsp = VTOMFS(vp);
vp                239 ufs/mfs/mfs_vnops.c 	if ((error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0)) != 0)
vp                246 ufs/mfs/mfs_vnops.c 	if (vp->v_usecount > 1)
vp                247 ufs/mfs/mfs_vnops.c 		printf("mfs_close: ref count %d > 1\n", vp->v_usecount);
vp                250 ufs/mfs/mfs_vnops.c 	if (vp->v_usecount > 1 || mfsp->mfs_buflist)
vp                257 ufs/mfs/mfs_vnops.c 	wakeup((caddr_t)vp);
vp                287 ufs/mfs/mfs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                289 ufs/mfs/mfs_vnops.c 	free(vp->v_data, M_MFSNODE);
vp                290 ufs/mfs/mfs_vnops.c 	vp->v_data = NULL;
vp                 51 ufs/mfs/mfsnode.h #define VTOMFS(vp)	((struct mfsnode *)(vp)->v_data)
vp                299 ufs/ufs/inode.h #define	VTOI(vp)	((struct inode *)(vp)->v_data)
vp                342 ufs/ufs/inode.h #define DOINGSOFTDEP(vp)      ((vp)->v_mount->mnt_flag & MNT_SOFTDEP)
vp                344 ufs/ufs/inode.h #define DOINGSOFTDEP(vp)      (0)
vp                346 ufs/ufs/inode.h #define DOINGASYNC(vp)        ((vp)->v_mount->mnt_flag & MNT_ASYNC)
vp                 92 ufs/ufs/ufs_bmap.c ufs_bmaparray(struct vnode *vp, daddr_t bn, daddr64_t *bnp, struct indir *ap,
vp                105 ufs/ufs/ufs_bmap.c 	ip = VTOI(vp);
vp                106 ufs/ufs/ufs_bmap.c 	mp = vp->v_mount;
vp                127 ufs/ufs/ufs_bmap.c 	if ((error = ufs_getlbns(vp, bn, xap, nump)) != 0)
vp                147 ufs/ufs/ufs_bmap.c 	devvp = VFSTOUFS(vp->v_mount)->um_devvp;
vp                156 ufs/ufs/ufs_bmap.c 		if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
vp                166 ufs/ufs/ufs_bmap.c 		bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
vp                228 ufs/ufs/ufs_bmap.c ufs_getlbns(struct vnode *vp, daddr_t bn, struct indir *ap, int *nump)
vp                235 ufs/ufs/ufs_bmap.c 	ump = VFSTOUFS(vp->v_mount);
vp                 58 ufs/ufs/ufs_dirhash.c #define OFSFMT(vp)		((vp)->v_mount->mnt_maxsymlinklen <= 0)
vp                112 ufs/ufs/ufs_dirhash.c 	struct vnode *vp;
vp                138 ufs/ufs/ufs_dirhash.c 	vp = ip->i_vnode;
vp                206 ufs/ufs/ufs_dirhash.c 	bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
vp                317 ufs/ufs/ufs_dirhash.c 	struct vnode *vp;
vp                364 ufs/ufs/ufs_dirhash.c 	vp = ip->i_vnode;
vp                365 ufs/ufs/ufs_dirhash.c 	bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
vp                 93 ufs/ufs/ufs_ihash.c 	struct vnode *vp;
vp                 98 ufs/ufs/ufs_ihash.c 			vp = ITOV(ip);
vp                100 ufs/ufs/ufs_ihash.c 			if (vget(vp, LK_EXCLUSIVE, p))
vp                102 ufs/ufs/ufs_ihash.c 			return (vp);
vp                 65 ufs/ufs/ufs_inode.c 	struct vnode *vp = ap->a_vp;
vp                 66 ufs/ufs/ufs_inode.c 	struct inode *ip = VTOI(vp);
vp                 73 ufs/ufs/ufs_inode.c 	if (prtactive && vp->v_usecount != 0)
vp                 74 ufs/ufs/ufs_inode.c 		vprint("ffs_inactive: pushing active", vp);
vp                 83 ufs/ufs/ufs_inode.c 	if (DIP(ip, nlink) <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
vp                106 ufs/ufs/ufs_inode.c 		if (DOINGSOFTDEP(vp))
vp                116 ufs/ufs/ufs_inode.c 	VOP_UNLOCK(vp, 0, p);
vp                123 ufs/ufs/ufs_inode.c 		vrecycle(vp, p);
vp                132 ufs/ufs/ufs_inode.c ufs_reclaim(struct vnode *vp, struct proc *p)
vp                138 ufs/ufs/ufs_inode.c 	if (prtactive && vp->v_usecount != 0)
vp                139 ufs/ufs/ufs_inode.c 		vprint("ufs_reclaim: pushing active", vp);
vp                145 ufs/ufs/ufs_inode.c 	ip = VTOI(vp);
vp                150 ufs/ufs/ufs_inode.c 	cache_purge(vp);
vp                 69 ufs/ufs/ufs_lookup.c #define FSFMT(vp)	((vp)->v_mount->mnt_maxsymlinklen <= 0)
vp               1126 ufs/ufs/ufs_lookup.c 	struct vnode *vp;
vp               1130 ufs/ufs/ufs_lookup.c 	vp = ITOV(target);
vp               1141 ufs/ufs/ufs_lookup.c 		if (vp->v_type != VDIR) {
vp               1145 ufs/ufs/ufs_lookup.c 		error = vn_rdwr(UIO_READ, vp, (caddr_t)&dirbuf,
vp               1151 ufs/ufs/ufs_lookup.c 			if (vp->v_mount->mnt_maxsymlinklen > 0)
vp               1170 ufs/ufs/ufs_lookup.c 		vput(vp);
vp               1171 ufs/ufs/ufs_lookup.c 		error = VFS_VGET(vp->v_mount, dirbuf.dotdot_ino, &vp);
vp               1173 ufs/ufs/ufs_lookup.c 			vp = NULL;
vp               1181 ufs/ufs/ufs_lookup.c 	if (vp != NULL)
vp               1182 ufs/ufs/ufs_lookup.c 		vput(vp);
vp                151 ufs/ufs/ufs_quota.c 	struct vnode *vp = ITOV(ip);
vp                161 ufs/ufs/ufs_quota.c 		dqget(vp, DIP(ip, uid), ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
vp                170 ufs/ufs/ufs_quota.c 		dqget(vp, DIP(ip, gid), ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
vp                437 ufs/ufs/ufs_quota.c 	struct vnode *vp = ITOV(ip);
vp                439 ufs/ufs/ufs_quota.c 	if (!VOP_ISLOCKED(vp)) 
vp                459 ufs/ufs/ufs_quota.c quotaon_vnode(struct vnode *vp, void *arg) 
vp                464 ufs/ufs/ufs_quota.c 	if (vp->v_type == VNON || vp->v_writecount == 0)
vp                467 ufs/ufs/ufs_quota.c 	if (vget(vp, LK_EXCLUSIVE, p)) {
vp                471 ufs/ufs/ufs_quota.c 	error = getinoquota(VTOI(vp));
vp                472 ufs/ufs/ufs_quota.c 	vput(vp);
vp                484 ufs/ufs/ufs_quota.c 	struct vnode *vp, **vpp;
vp                498 ufs/ufs/ufs_quota.c 	vp = nd.ni_vp;
vp                499 ufs/ufs/ufs_quota.c 	VOP_UNLOCK(vp, 0, p);
vp                500 ufs/ufs/ufs_quota.c 	if (vp->v_type != VREG) {
vp                501 ufs/ufs/ufs_quota.c 		(void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
vp                504 ufs/ufs/ufs_quota.c 	if (*vpp != vp)
vp                508 ufs/ufs/ufs_quota.c 	vp->v_flag |= VSYSTEM;
vp                509 ufs/ufs/ufs_quota.c 	*vpp = vp;
vp                544 ufs/ufs/ufs_quota.c quotaoff_vnode(struct vnode *vp, void *arg) 
vp                550 ufs/ufs/ufs_quota.c 	if (vp->v_type == VNON)
vp                554 ufs/ufs/ufs_quota.c 	if (vget(vp, LK_EXCLUSIVE, qa->p))
vp                556 ufs/ufs/ufs_quota.c 	ip = VTOI(vp);
vp                559 ufs/ufs/ufs_quota.c 	dqrele(vp, dq);
vp                560 ufs/ufs/ufs_quota.c 	vput(vp);
vp                719 ufs/ufs/ufs_quota.c qsync_vnode(struct vnode *vp, void *arg)
vp                725 ufs/ufs/ufs_quota.c 	if (vp->v_type == VNON)
vp                728 ufs/ufs/ufs_quota.c 	if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT, p))
vp                732 ufs/ufs/ufs_quota.c 		dq = VTOI(vp)->i_dquot[i];
vp                734 ufs/ufs/ufs_quota.c 			dqsync(vp, dq);
vp                736 ufs/ufs/ufs_quota.c 	vput(vp);
vp                796 ufs/ufs/ufs_quota.c dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
vp                856 ufs/ufs/ufs_quota.c 	if (vp != dqvp)
vp                878 ufs/ufs/ufs_quota.c 	if (vp != dqvp)
vp                889 ufs/ufs/ufs_quota.c 		dqrele(vp, dq);
vp                914 ufs/ufs/ufs_quota.c dqrele(struct vnode *vp, struct dquot *dq)
vp                924 ufs/ufs/ufs_quota.c 		(void) dqsync(vp, dq);
vp                934 ufs/ufs/ufs_quota.c dqsync(struct vnode *vp, struct dquot *dq)
vp                949 ufs/ufs/ufs_quota.c 	if (vp != dqvp)
vp                955 ufs/ufs/ufs_quota.c 			if (vp != dqvp)
vp                976 ufs/ufs/ufs_quota.c 	if (vp != dqvp)
vp                984 ufs/ufs/ufs_quota.c 	struct vnode *vp = ITOV(ip);
vp                988 ufs/ufs/ufs_quota.c 			dqrele(vp, ip->i_dquot[i]);
vp                203 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                204 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp                206 ufs/ufs/ufs_vnops.c 	if (vp->v_usecount > 1) {
vp                219 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                220 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp                229 ufs/ufs/ufs_vnops.c 		switch (vp->v_type) {
vp                234 ufs/ufs/ufs_vnops.c 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                264 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                265 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp                291 ufs/ufs/ufs_vnops.c 	if (vp->v_type == VBLK)
vp                293 ufs/ufs/ufs_vnops.c 	else if (vp->v_type == VCHR)
vp                296 ufs/ufs/ufs_vnops.c 		vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
vp                298 ufs/ufs/ufs_vnops.c 	vap->va_type = vp->v_type;
vp                311 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                312 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp                329 ufs/ufs/ufs_vnops.c 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                356 ufs/ufs/ufs_vnops.c 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                358 ufs/ufs/ufs_vnops.c 		error = ufs_chown(vp, vap->va_uid, vap->va_gid, cred, p);
vp                369 ufs/ufs/ufs_vnops.c 		switch (vp->v_type) {
vp                374 ufs/ufs/ufs_vnops.c 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                386 ufs/ufs/ufs_vnops.c 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                391 ufs/ufs/ufs_vnops.c 		    (error = VOP_ACCESS(vp, VWRITE, cred, p))))
vp                403 ufs/ufs/ufs_vnops.c 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
vp                405 ufs/ufs/ufs_vnops.c 		error = ufs_chmod(vp, (int)vap->va_mode, cred, p);
vp                407 ufs/ufs/ufs_vnops.c 	VN_KNOTE(vp, hint);
vp                416 ufs/ufs/ufs_vnops.c ufs_chmod(struct vnode *vp, int mode, struct ucred *cred, struct proc *p)
vp                418 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp                425 ufs/ufs/ufs_vnops.c 		if (vp->v_type != VDIR && (mode & S_ISTXT))
vp                433 ufs/ufs/ufs_vnops.c 	if ((vp->v_flag & VTEXT) && (DIP(ip, mode) & S_ISTXT) == 0)
vp                434 ufs/ufs/ufs_vnops.c 		(void) uvm_vnp_uncache(vp);
vp                443 ufs/ufs/ufs_vnops.c ufs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
vp                446 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp                554 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                558 ufs/ufs/ufs_vnops.c 	ip = VTOI(vp);
vp                559 ufs/ufs/ufs_vnops.c 	if (vp->v_type == VDIR || (DIP(ip, flags) & (IMMUTABLE | APPEND)) ||
vp                565 ufs/ufs/ufs_vnops.c 	VN_KNOTE(vp, NOTE_DELETE);
vp                568 ufs/ufs/ufs_vnops.c 	if (dvp == vp)
vp                569 ufs/ufs/ufs_vnops.c 		vrele(vp);
vp                571 ufs/ufs/ufs_vnops.c 		vput(vp);
vp                584 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp                595 ufs/ufs/ufs_vnops.c 	if (vp->v_type == VDIR) {
vp                600 ufs/ufs/ufs_vnops.c 	if (dvp->v_mount != vp->v_mount) {
vp                605 ufs/ufs/ufs_vnops.c 	if (dvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE, p))) {
vp                609 ufs/ufs/ufs_vnops.c 	ip = VTOI(vp);
vp                623 ufs/ufs/ufs_vnops.c 	if (DOINGSOFTDEP(vp))
vp                625 ufs/ufs/ufs_vnops.c 	if ((error = UFS_UPDATE(ip, !DOINGSOFTDEP(vp))) == 0) {
vp                627 ufs/ufs/ufs_vnops.c 		error = ufs_direnter(dvp, vp, &newdir, cnp, NULL);
vp                633 ufs/ufs/ufs_vnops.c 		if (DOINGSOFTDEP(vp))
vp                637 ufs/ufs/ufs_vnops.c 	VN_KNOTE(vp, NOTE_LINK);
vp                640 ufs/ufs/ufs_vnops.c 	if (dvp != vp)
vp                641 ufs/ufs/ufs_vnops.c 		VOP_UNLOCK(vp, 0, p);
vp               1242 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1248 ufs/ufs/ufs_vnops.c 	ip = VTOI(vp);
vp               1253 ufs/ufs/ufs_vnops.c 	if (dp == ip || vp->v_mountedhere != 0) {
vp               1258 ufs/ufs/ufs_vnops.c 		vput(vp);
vp               1289 ufs/ufs/ufs_vnops.c 	if (DOINGSOFTDEP(vp)) {
vp               1296 ufs/ufs/ufs_vnops.c 		if (DOINGSOFTDEP(vp)) {
vp               1312 ufs/ufs/ufs_vnops.c 	if (!DOINGSOFTDEP(vp)) {
vp               1319 ufs/ufs/ufs_vnops.c 		ioflag = DOINGASYNC(vp) ? 0 : IO_SYNC;
vp               1322 ufs/ufs/ufs_vnops.c 	cache_purge(vp);
vp               1330 ufs/ufs/ufs_vnops.c 	VN_KNOTE(vp, NOTE_DELETE);
vp               1332 ufs/ufs/ufs_vnops.c 	vput(vp);
vp               1343 ufs/ufs/ufs_vnops.c 	struct vnode *vp, **vpp = ap->a_vpp;
vp               1352 ufs/ufs/ufs_vnops.c 	vp = *vpp;
vp               1354 ufs/ufs/ufs_vnops.c 	if (len < vp->v_mount->mnt_maxsymlinklen) {
vp               1355 ufs/ufs/ufs_vnops.c 		ip = VTOI(vp);
vp               1360 ufs/ufs/ufs_vnops.c 		error = vn_rdwr(UIO_WRITE, vp, ap->a_target, len, (off_t)0,
vp               1363 ufs/ufs/ufs_vnops.c 	vput(vp);
vp               1490 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1491 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp               1495 ufs/ufs/ufs_vnops.c 	if (isize < vp->v_mount->mnt_maxsymlinklen ||
vp               1496 ufs/ufs/ufs_vnops.c 	    (vp->v_mount->mnt_maxsymlinklen == 0 && DIP(ip, blocks) == 0)) {
vp               1500 ufs/ufs/ufs_vnops.c 	return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred));
vp               1510 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1512 ufs/ufs/ufs_vnops.c 	return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags, NULL));
vp               1522 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1524 ufs/ufs/ufs_vnops.c 	return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags | LK_RELEASE, NULL));
vp               1547 ufs/ufs/ufs_vnops.c 	struct vnode *vp = bp->b_vp;
vp               1552 ufs/ufs/ufs_vnops.c 	ip = VTOI(vp);
vp               1553 ufs/ufs/ufs_vnops.c 	if (vp->v_type == VBLK || vp->v_type == VCHR)
vp               1556 ufs/ufs/ufs_vnops.c 		error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
vp               1575 ufs/ufs/ufs_vnops.c 	vp = ip->i_devvp;
vp               1576 ufs/ufs/ufs_vnops.c 	bp->b_dev = vp->v_rdev;
vp               1577 ufs/ufs/ufs_vnops.c 	VOCALL (vp->v_op, VOFFSET(vop_strategy), ap);
vp               1590 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1591 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp               1601 ufs/ufs/ufs_vnops.c 	if (vp->v_type == VFIFO)
vp               1602 ufs/ufs/ufs_vnops.c 		fifo_printinfo(vp);
vp               1651 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1652 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp               1706 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1707 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp               1774 ufs/ufs/ufs_vnops.c 	struct vnode *vp, *nvp;
vp               1777 ufs/ufs/ufs_vnops.c 	vp = *vpp;
vp               1778 ufs/ufs/ufs_vnops.c 	ip = VTOI(vp);
vp               1779 ufs/ufs/ufs_vnops.c 	switch(vp->v_type = IFTOVT(DIP(ip, mode))) {
vp               1782 ufs/ufs/ufs_vnops.c 		vp->v_op = specops;
vp               1783 ufs/ufs/ufs_vnops.c 		if ((nvp = checkalias(vp, DIP(ip, rdev), mntp)) != NULL) {
vp               1789 ufs/ufs/ufs_vnops.c 			nvp->v_data = vp->v_data;
vp               1790 ufs/ufs/ufs_vnops.c 			vp->v_data = NULL;
vp               1791 ufs/ufs/ufs_vnops.c 			vp->v_op = spec_vnodeop_p;
vp               1793 ufs/ufs/ufs_vnops.c 			vp->v_flag &= ~VLOCKSWORK;
vp               1795 ufs/ufs/ufs_vnops.c 			vrele(vp);
vp               1796 ufs/ufs/ufs_vnops.c 			vgone(vp);
vp               1800 ufs/ufs/ufs_vnops.c 			vp = nvp;
vp               1801 ufs/ufs/ufs_vnops.c 			ip->i_vnode = vp;
vp               1806 ufs/ufs/ufs_vnops.c 		vp->v_op = fifoops;
vp               1820 ufs/ufs/ufs_vnops.c                 vp->v_flag |= VROOT;
vp               1827 ufs/ufs/ufs_vnops.c 	*vpp = vp;
vp               1929 ufs/ufs/ufs_vnops.c 	struct vnode *vp = ap->a_vp;
vp               1946 ufs/ufs/ufs_vnops.c 	kn->kn_hook = (caddr_t)vp;
vp               1948 ufs/ufs/ufs_vnops.c 	SLIST_INSERT_HEAD(&vp->v_selectinfo.si_note, kn, kn_selnext);
vp               1956 ufs/ufs/ufs_vnops.c 	struct vnode *vp = (struct vnode *)kn->kn_hook;
vp               1958 ufs/ufs/ufs_vnops.c 	SLIST_REMOVE(&vp->v_selectinfo.si_note, kn, knote, kn_selnext);
vp               1965 ufs/ufs/ufs_vnops.c 	struct vnode *vp = (struct vnode *)kn->kn_hook;
vp               1966 ufs/ufs/ufs_vnops.c 	struct inode *ip = VTOI(vp);
vp                402 uvm/uvm_mmap.c 	struct vnode *vp;
vp                481 uvm/uvm_mmap.c 		vp = (struct vnode *)fp->f_data;	/* convert to vnode */
vp                483 uvm/uvm_mmap.c 		if (vp->v_type != VREG && vp->v_type != VCHR &&
vp                484 uvm/uvm_mmap.c 		    vp->v_type != VBLK) {
vp                489 uvm/uvm_mmap.c 		if (vp->v_type == VREG && (pos + size) < pos) {
vp                495 uvm/uvm_mmap.c 		if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
vp                511 uvm/uvm_mmap.c 			   "%s (pid %d comm %s)\n", vp->v_type == VCHR ?
vp                515 uvm/uvm_mmap.c 			if (vp->v_type == VCHR)
vp                526 uvm/uvm_mmap.c 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
vp                554 uvm/uvm_mmap.c 				    VOP_GETATTR(vp, &va, p->p_ucred, p)))
vp                575 uvm/uvm_mmap.c 		handle = (caddr_t)vp;
vp               1101 uvm/uvm_mmap.c 	struct vnode *vp;
vp               1151 uvm/uvm_mmap.c 		vp = (struct vnode *) handle;	/* get vnode */
vp               1152 uvm/uvm_mmap.c 		if (vp->v_type != VCHR) {
vp               1153 uvm/uvm_mmap.c 			uobj = uvn_attach((void *) vp, (flags & MAP_SHARED) ?
vp               1183 uvm/uvm_mmap.c 					uvm_vnp_uncache(vp);
vp               1188 uvm/uvm_mmap.c 			VREF(vp);
vp               1191 uvm/uvm_mmap.c 			uobj = udv_attach((void *) &vp->v_rdev,
vp               1201 uvm/uvm_mmap.c 				uobj = udv_attach((void *) &vp->v_rdev,
vp               1209 uvm/uvm_mmap.c 			return((vp->v_type == VREG) ? ENOMEM : EINVAL);
vp                512 uvm/uvm_swap.c swaplist_find(vp, remove)
vp                513 uvm/uvm_swap.c 	struct vnode *vp;
vp                527 uvm/uvm_swap.c 			if (sdp->swd_vp == vp) {
vp                624 uvm/uvm_swap.c 	struct vnode *vp;
vp                726 uvm/uvm_swap.c 		vp = rootvp;		/* miniroot */
vp                727 uvm/uvm_swap.c 		if (vget(vp, LK_EXCLUSIVE, p)) {
vp                751 uvm/uvm_swap.c 		vp = nd.ni_vp;
vp                759 uvm/uvm_swap.c 		if (vp->v_type != VBLK) {
vp                763 uvm/uvm_swap.c 		dumpdev = vp->v_rdev;
vp                775 uvm/uvm_swap.c 		if ((sdp = swaplist_find(vp, 1)) == NULL) {
vp                797 uvm/uvm_swap.c 		if ((sdp = swaplist_find(vp, 0)) != NULL) {
vp                806 uvm/uvm_swap.c 		sdp->swd_vp = vp;
vp                807 uvm/uvm_swap.c 		sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
vp                812 uvm/uvm_swap.c 		if (vp->v_type == VREG) {
vp                833 uvm/uvm_swap.c 			(void) swaplist_find(vp, 1);  /* kill fake entry */
vp                836 uvm/uvm_swap.c 			if (vp->v_type == VREG) {
vp                847 uvm/uvm_swap.c 		if ((sdp = swaplist_find(vp, 0)) == NULL) {
vp                876 uvm/uvm_swap.c 	vput(vp);
vp                901 uvm/uvm_swap.c 	struct vnode *vp;
vp                917 uvm/uvm_swap.c 	vp = sdp->swd_vp;
vp                927 uvm/uvm_swap.c 	if (vp != rootvp) {
vp                928 uvm/uvm_swap.c 		if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
vp                944 uvm/uvm_swap.c 	switch (vp->v_type) {
vp                954 uvm/uvm_swap.c 		if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
vp                958 uvm/uvm_swap.c 		     VFS_STATFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
vp                961 uvm/uvm_swap.c 		sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
vp                967 uvm/uvm_swap.c 		if (vp->v_op == nfsv2_vnodeop_p)
vp                993 uvm/uvm_swap.c 	if (vp->v_type == VBLK) {
vp               1038 uvm/uvm_swap.c 	if (vp == rootvp) {
vp               1062 uvm/uvm_swap.c 	vref(vp);
vp               1084 uvm/uvm_swap.c 	if (vp != rootvp)
vp               1085 uvm/uvm_swap.c 		(void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
vp               1286 uvm/uvm_swap.c 	struct vnode	*vp;
vp               1326 uvm/uvm_swap.c 				 	&vp, &nbn, &nra);
vp               1366 uvm/uvm_swap.c 			    sdp->swd_vp, vp, (u_long)byteoff, nbn);
vp               1426 uvm/uvm_swap.c 		bgetvp(vp, &nbp->vb_buf);
vp               2076 uvm/uvm_swap.c 	struct vnode *vp;
vp               2089 uvm/uvm_swap.c 	if (bdevvp(swap_dev, &vp)) {
vp               2100 uvm/uvm_swap.c 	sdp->swd_vp = vp;
vp               2108 uvm/uvm_swap.c 		swaplist_find(vp, 1);
vp               2116 uvm/uvm_swap.c 	VOP_UNLOCK(vp, 0, curproc);
vp                180 uvm/uvm_unix.c uvm_coredump(p, vp, cred, chdr)
vp                182 uvm/uvm_unix.c 	struct vnode *vp;
vp                247 uvm/uvm_unix.c 		error = vn_rdwr(UIO_WRITE, vp,
vp                259 uvm/uvm_unix.c 		error = vn_rdwr(UIO_WRITE, vp,
vp                160 uvm/uvm_vnode.c 	struct vnode *vp = arg;
vp                161 uvm/uvm_vnode.c 	struct uvm_vnode *uvn = &vp->v_uvm;
vp                190 uvm/uvm_vnode.c 	if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
vp                206 uvm/uvm_vnode.c 			VREF(vp);
vp                239 uvm/uvm_vnode.c 	if (vp->v_type == VBLK) {
vp                248 uvm/uvm_vnode.c 		result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
vp                256 uvm/uvm_vnode.c 		result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc);
vp                278 uvm/uvm_vnode.c 	if (vp->v_type == VBLK)
vp                305 uvm/uvm_vnode.c 	VREF(vp);
vp                364 uvm/uvm_vnode.c 	struct vnode *vp;
vp                383 uvm/uvm_vnode.c 	vp = (struct vnode *) uobj;
vp                389 uvm/uvm_vnode.c 	vp->v_flag &= ~VTEXT;
vp                400 uvm/uvm_vnode.c 		vrele(vp);			/* drop vnode reference */
vp                478 uvm/uvm_vnode.c 	vrele(vp);
vp                513 uvm/uvm_vnode.c uvm_vnp_terminate(vp)
vp                514 uvm/uvm_vnode.c 	struct vnode *vp;
vp                516 uvm/uvm_vnode.c 	struct uvm_vnode *uvn = &vp->v_uvm;
vp                524 uvm/uvm_vnode.c 	UVMHIST_LOG(maphist, "  vp=%p, ref=%ld, flag=0x%lx", vp,
vp               1769 uvm/uvm_vnode.c uvm_vnp_uncache(vp)
vp               1770 uvm/uvm_vnode.c 	struct vnode *vp;
vp               1772 uvm/uvm_vnode.c 	struct uvm_vnode *uvn = &vp->v_uvm;
vp               1801 uvm/uvm_vnode.c 	VREF(vp);			/* seems ok, even with VOP_LOCK */
vp               1811 uvm/uvm_vnode.c 	if (!VOP_ISLOCKED(vp)) {
vp               1821 uvm/uvm_vnode.c 		if (vp->v_op == nfsv2_vnodeop_p ||
vp               1822 uvm/uvm_vnode.c 		    vp->v_op == spec_nfsv2nodeop_p) {
vp               1826 uvm/uvm_vnode.c 		if (vp->v_op == fifo_nfsv2nodeop_p) {
vp               1846 uvm/uvm_vnode.c 	VOP_UNLOCK(vp, 0, curproc);
vp               1848 uvm/uvm_vnode.c 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
vp               1876 uvm/uvm_vnode.c uvm_vnp_setsize(vp, newsize)
vp               1877 uvm/uvm_vnode.c 	struct vnode *vp;
vp               1880 uvm/uvm_vnode.c 	struct uvm_vnode *uvn = &vp->v_uvm;
vp               1923 uvm/uvm_vnode.c 	struct vnode *vp;
vp               1939 uvm/uvm_vnode.c 		vp = (struct vnode *) uvn;
vp               1940 uvm/uvm_vnode.c 		if (mp && vp->v_mount != mp)
vp               1974 uvm/uvm_vnode.c 			VREF(vp);
vp                 72 xfs/xfs_fs.h   #define NNPFS_FROM_VNODE(vp) VFS_TO_NNPFS((vp)->v_mount)
vp                301 xfs/xfs_locl.h #define xfs_pushdirty(vp, cred, p)
vp                370 xfs/xfs_locl.h #define xfs_vfs_object_create(vp,proc,ucred) vfs_object_create(vp,proc,ucred,TRUE)
vp                372 xfs/xfs_locl.h #define xfs_vfs_object_create(vp,proc,ucred) vfs_object_create(vp,proc,ucred)
vp                376 xfs/xfs_locl.h #define xfs_set_vp_size(vp, sz) uvm_vnp_setsize(vp, sz)
vp                378 xfs/xfs_locl.h #define xfs_set_vp_size(vp, sz) vnode_pager_setsize(vp, sz)
vp                380 xfs/xfs_locl.h #define xfs_set_vp_size(vp, sz) ubc_setsize(vp, sz)
vp                382 xfs/xfs_locl.h #define xfs_set_vp_size(vp, sz)
vp                393 xfs/xfs_locl.h void    vgonel (struct vnode *vp, d_thread_t *p);
vp                186 xfs/xfs_message.c 	struct vnode *vp;
vp                195 xfs/xfs_message.c 	    error = xfs_fhlookup (p, fh, &vp);
vp                214 xfs/xfs_message.c 	    vp = ndp->ni_vp;
vp                219 xfs/xfs_message.c 	    xfs_vfs_unlock(vp, p);
vp                223 xfs/xfs_message.c 	    DATA_FROM_XNODE(t) = vp;
vp                285 xfs/xfs_message.c 	struct vnode *vp = XNODE_TO_VNODE(t);
vp                288 xfs/xfs_message.c 	if (vp->v_usecount >= 0 && vp->xfs_writecount >= 1)
vp                293 xfs/xfs_message.c 	    vm_object_t obj = vp->v_object;
vp                307 xfs/xfs_message.c 	if (vp->v_usecount > 0 && vp->v_type != VDIR) {
vp                309 xfs/xfs_message.c 	    if (vget(vp, 0, p))
vp                312 xfs/xfs_message.c 	    if (UBCISVALID(vp) && !ubc_isinuse(vp, 1)) {
vp                313 xfs/xfs_message.c 		ubc_setsize(vp, 0);
vp                314 xfs/xfs_message.c 		vrele(vp);
vp                316 xfs/xfs_message.c 		vrele(vp);
vp                334 xfs/xfs_message.c 	if (vp->v_type == VDIR)
vp                335 xfs/xfs_message.c 	    xfs_dnlc_purge(vp);
vp                336 xfs/xfs_message.c 	if (vp->v_usecount == 0) {
vp                339 xfs/xfs_message.c 	    vrecycle(vp, p);
vp                386 xfs/xfs_message.c gc_vnode (struct vnode *vp,
vp                390 xfs/xfs_message.c     if (vp->v_usecount <= 0) {
vp                393 xfs/xfs_message.c 	if (vp->v_usecount < 0) {
vp                394 xfs/xfs_message.c 		    vprint("vrele: bad ref count", vp);
vp                400 xfs/xfs_message.c 	vgone(vp, VX_NOSLEEP, NULL);
vp                415 xfs/xfs_message.c gc_vnode (struct vnode *vp,
vp                419 xfs/xfs_message.c     mtx_lock(&vp->v_interlock);
vp                421 xfs/xfs_message.c     simple_lock(&vp->v_interlock);
vp                425 xfs/xfs_message.c     if (vp->v_usecount <= 0) {
vp                429 xfs/xfs_message.c 	obj = vp->v_object;
vp                438 xfs/xfs_message.c 	    mtx_unlock(&vp->v_interlock);
vp                440 xfs/xfs_message.c 	    simple_unlock (&vp->v_interlock);
vp                447 xfs/xfs_message.c 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
vp                448 xfs/xfs_message.c 		    vprint("vrele: bad ref count", vp);
vp                456 xfs/xfs_message.c 	vgonel (vp, p);
vp                459 xfs/xfs_message.c 	mtx_unlock(&vp->v_interlock);
vp                461 xfs/xfs_message.c 	simple_unlock(&vp->v_interlock); 
vp                463 xfs/xfs_message.c 	vgone (vp);
vp                468 xfs/xfs_message.c 	mtx_unlock(&vp->v_interlock);
vp                470 xfs/xfs_message.c 	simple_unlock(&vp->v_interlock);
vp                408 xfs/xfs_node-bsd.c     struct vnode *dvp, *vp;
vp                423 xfs/xfs_node-bsd.c     tbl.dvp = tbl.vp = NULL;
vp                434 xfs/xfs_node-bsd.c tbl_enter (size_t len, const char *name, struct vnode *dvp, struct vnode *vp)
vp                439 xfs/xfs_node-bsd.c     tbl.vp = vp;
vp                441 xfs/xfs_node-bsd.c     tbl.vpid = vp->v_id;
vp                458 xfs/xfs_node-bsd.c 	&& tbl.vpid == tbl.vp->v_id) {
vp                460 xfs/xfs_node-bsd.c 	*res = tbl.vp;
vp                473 xfs/xfs_node-bsd.c 	       struct vnode *vp)
vp                478 xfs/xfs_node-bsd.c 		      (unsigned long)vp));
vp                485 xfs/xfs_node-bsd.c 		      (unsigned long)vp,
vp                526 xfs/xfs_node-bsd.c 	cache_enter(dvp, vp, cnp);
vp                529 xfs/xfs_node-bsd.c     if (vp != NULL)
vp                530 xfs/xfs_node-bsd.c 	tbl_enter (cnp->cn_namelen, cnp->cn_nameptr, dvp, vp);
vp                586 xfs/xfs_node-bsd.c 		    struct vnode *vp)
vp                593 xfs/xfs_node-bsd.c 		      (unsigned long)vp));
vp                596 xfs/xfs_node-bsd.c     return xfs_dnlc_enter (dvp, &cn, vp);
vp                746 xfs/xfs_node-bsd.c xfs_dnlc_purge (struct vnode *vp)
vp                750 xfs/xfs_node-bsd.c     if (tbl.dvp == vp || tbl.vp == vp)
vp                753 xfs/xfs_node-bsd.c     cache_purge(vp);
vp                107 xfs/xfs_node.h #define DATA_FROM_VNODE(vp) DATA_FROM_XNODE(VNODE_TO_XNODE(vp))
vp                112 xfs/xfs_node.h #define VNODE_TO_XNODE(vp) ((struct xfs_node *) (vp)->v_data)
vp                115 xfs/xfs_node.h #define xfs_do_vget(vp, lockflag, proc) vget((vp))
vp                117 xfs/xfs_node.h #define xfs_do_vget(vp, lockflag, proc) vget((vp), (lockflag))
vp                119 xfs/xfs_node.h #define xfs_do_vget(vp, lockflag, proc) vget((vp), (lockflag), (proc))
vp                314 xfs/xfs_syscalls-common.c     struct vnode *vp;
vp                338 xfs/xfs_syscalls-common.c     vp = ndp->ni_vp;
vp                340 xfs/xfs_syscalls-common.c     *res = vp;
vp                352 xfs/xfs_syscalls-common.c 	      struct vnode *vp)
vp                359 xfs/xfs_syscalls-common.c     fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
vp                361 xfs/xfs_syscalls-common.c     VFS_VPTOFH(vp, &fh.fh_fid, error);
vp                363 xfs/xfs_syscalls-common.c     error = VFS_VPTOFH(vp, &fh.fh_fid);
vp                382 xfs/xfs_syscalls-common.c 	    struct vnode *vp)
vp                392 xfs/xfs_syscalls-common.c     xfs_vop_getattr(vp, &vattr, xfs_thread_to_cred(p), p, error);
vp                394 xfs/xfs_syscalls-common.c     xfs_vop_getattr(vp, &vattr, xfs_proc_to_cred(p), p, error);
vp                399 xfs/xfs_syscalls-common.c     mnt = vp->v_mount;
vp                428 xfs/xfs_syscalls-common.c 	    struct vnode *vp)
vp                434 xfs/xfs_syscalls-common.c     if (vp == NULL)
vp                447 xfs/xfs_syscalls-common.c     error = getfh_compat (p, vice_ioctl, vp);
vp                449 xfs/xfs_syscalls-common.c     error = trad_fhget (p, vice_ioctl, vp);
vp                452 xfs/xfs_syscalls-common.c     vrele(vp);
vp                463 xfs/xfs_syscalls-common.c 	     struct vnode *vp,
vp                470 xfs/xfs_syscalls-common.c     if (vp != NULL) {
vp                471 xfs/xfs_syscalls-common.c 	vrele (vp);
vp                493 xfs/xfs_syscalls-common.c 	       struct vnode *vp)
vp                506 xfs/xfs_syscalls-common.c     if (vp != NULL) {
vp                509 xfs/xfs_syscalls-common.c 	if (vp->v_tag != VT_XFS) {
vp                511 xfs/xfs_syscalls-common.c 	    vrele(vp);
vp                516 xfs/xfs_syscalls-common.c 	xn = VNODE_TO_XNODE(vp);
vp                519 xfs/xfs_syscalls-common.c 	vrele(vp);
vp                632 xfs/xfs_syscalls-common.c     struct vnode *vp = NULL;
vp                653 xfs/xfs_syscalls-common.c 	error = lookup_node (pathptr, SCARG(arg, a_followSymlinks), &vp,
vp                661 xfs/xfs_syscalls-common.c 	return fhget_call (proc, &vice_ioctl, vp);
vp                663 xfs/xfs_syscalls-common.c 	return fhopen_call (proc, &vice_ioctl, vp,
vp                666 xfs/xfs_syscalls-common.c 	if (vp != NULL)
vp                667 xfs/xfs_syscalls-common.c 	    vrele (vp);
vp                671 xfs/xfs_syscalls-common.c 	return remote_pioctl (proc, arg, &vice_ioctl, vp);
vp                143 xfs/xfs_vfsops-bsd.c     struct vnode *vp;
vp                164 xfs/xfs_vfsops-bsd.c         error = xfs_getnewvnode(xfs[0].mp, &vp, &handle);
vp                168 xfs/xfs_vfsops-bsd.c 	xfs_do_vget(vp, 0, curproc);
vp                172 xfs/xfs_vfsops-bsd.c 	vp = XNODE_TO_VNODE(xn);
vp                175 xfs/xfs_vfsops-bsd.c         if (vp->v_usecount <= 0) 
vp                176 xfs/xfs_vfsops-bsd.c 	    xfs_do_vget(vp, 0, curproc);
vp                178 xfs/xfs_vfsops-bsd.c 	    VREF(vp);
vp                182 xfs/xfs_vfsops-bsd.c     *vpp = vp;
vp                270 xfs/xfs_vfsops-bsd.c xfs_vptofh(struct vnode * vp,
vp                280 xfs/xfs_vfsops-bsd.c     xn = VNODE_TO_XNODE(vp);
vp                425 xfs/xfs_vfsops-bsd.c     struct vnode *vp;
vp                443 xfs/xfs_vfsops-bsd.c     error = xfs_fhlookup (proc, &fh, &vp);
vp                448 xfs/xfs_vfsops-bsd.c     switch (vp->v_type) {
vp                462 xfs/xfs_vfsops-bsd.c 	switch (vp->v_type) {
vp                473 xfs/xfs_vfsops-bsd.c 	error = vn_writechk (vp);
vp                483 xfs/xfs_vfsops-bsd.c 	error = VOP_ACCESS(vp, mode, cred, proc);
vp                488 xfs/xfs_vfsops-bsd.c     error = VOP_OPEN(vp, flags, cred, proc);
vp                497 xfs/xfs_vfsops-bsd.c         vp->v_writecount++;
vp                500 xfs/xfs_vfsops-bsd.c     if (vp->v_type == VREG) {
vp                502 xfs/xfs_vfsops-bsd.c 	error = xfs_vfs_object_create(vp, proc, proc->td_proc->p_ucred);
vp                504 xfs/xfs_vfsops-bsd.c 	error = xfs_vfs_object_create(vp, proc, proc->p_ucred);
vp                514 xfs/xfs_vfsops-bsd.c     fp->f_data = (caddr_t)vp;
vp                515 xfs/xfs_vfsops-bsd.c     xfs_vfs_unlock(vp, proc);
vp                526 xfs/xfs_vfsops-bsd.c     vput(vp);
vp                 97 xfs/xfs_vfsops-bsd.h xfs_vptofh(struct vnode * vp,
vp                405 xfs/xfs_vnodeops-bsd.c     struct vnode *vp  = ap->a_vp;
vp                408 xfs/xfs_vnodeops-bsd.c     int error = xfs_remove_common(dvp, vp, cnp->cn_nameptr, 
vp                411 xfs/xfs_vnodeops-bsd.c     int error = xfs_remove_common(dvp, vp, cnp->cn_nameptr, 
vp                418 xfs/xfs_vnodeops-bsd.c     if (dvp == vp)
vp                419 xfs/xfs_vnodeops-bsd.c 	vrele(vp);
vp                421 xfs/xfs_vnodeops-bsd.c 	vput(vp);
vp                427 xfs/xfs_vnodeops-bsd.c 	if (UBCINFOEXISTS(vp)) {
vp                428 xfs/xfs_vnodeops-bsd.c 	    ubc_setsize(vp, 0);
vp                429 xfs/xfs_vnodeops-bsd.c 	    ubc_release(vp);
vp                430 xfs/xfs_vnodeops-bsd.c 	    ubc_uncache(vp);
vp                526 xfs/xfs_vnodeops-bsd.c     struct vnode *vp  = ap->a_vp;
vp                538 xfs/xfs_vnodeops-bsd.c     if (dvp == vp)
vp                539 xfs/xfs_vnodeops-bsd.c 	vrele(vp);
vp                541 xfs/xfs_vnodeops-bsd.c 	vput(vp);
vp                625 xfs/xfs_vnodeops-bsd.c     struct vnode *vp = ap->a_vp;
vp                642 xfs/xfs_vnodeops-bsd.c     if (vp->v_type == VDIR) {
vp                649 xfs/xfs_vnodeops-bsd.c     if (dvp->v_mount != vp->v_mount) {
vp                659 xfs/xfs_vnodeops-bsd.c     if (dvp != vp && (error = xfs_vfs_writelock(vp, p))) {
vp                669 xfs/xfs_vnodeops-bsd.c 			   vp,
vp                680 xfs/xfs_vnodeops-bsd.c     if (dvp != vp)
vp                681 xfs/xfs_vnodeops-bsd.c 	xfs_vfs_unlock(vp, p);
vp                760 xfs/xfs_vnodeops-bsd.c     struct vnode *vp = ap->a_vp;
vp                763 xfs/xfs_vnodeops-bsd.c     ret = xfs_reclaim_common(vp);
vp                764 xfs/xfs_vnodeops-bsd.c     vp->v_data = NULL;
vp                779 xfs/xfs_vnodeops-bsd.c     struct vnode *vp    = ap->a_vp;
vp                780 xfs/xfs_vnodeops-bsd.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                786 xfs/xfs_vnodeops-bsd.c 		       (unsigned long)vp, flags));
vp                801 xfs/xfs_vnodeops-bsd.c     ret = lockmgr(l, flags, &vp->v_interlock, ap->a_td);
vp                803 xfs/xfs_vnodeops-bsd.c     ret = lockmgr(l, flags, &vp->v_interlock, ap->a_p);
vp                810 xfs/xfs_vnodeops-bsd.c     ret = debuglockmgr(l, flags, &vp->v_interlock, ap->a_td,
vp                813 xfs/xfs_vnodeops-bsd.c     ret = debuglockmgr(l, flags, &vp->v_interlock, ap->a_p,
vp                831 xfs/xfs_vnodeops-bsd.c     struct vnode *vp    = ap->a_vp;
vp                832 xfs/xfs_vnodeops-bsd.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                842 xfs/xfs_vnodeops-bsd.c 	    (unsigned long)vp, flags,
vp                855 xfs/xfs_vnodeops-bsd.c     ret = lockmgr (l, flags | LK_RELEASE, &vp->v_interlock, ap->a_td);
vp                857 xfs/xfs_vnodeops-bsd.c     ret = lockmgr (l, flags | LK_RELEASE, &vp->v_interlock, ap->a_p);
vp                864 xfs/xfs_vnodeops-bsd.c     ret = debuglockmgr (l, flags | LK_RELEASE, &vp->v_interlock, ap->a_td,
vp                867 xfs/xfs_vnodeops-bsd.c     ret = debuglockmgr (l, flags | LK_RELEASE, &vp->v_interlock, ap->a_p,
vp                880 xfs/xfs_vnodeops-bsd.c     struct vnode *vp    = ap->a_vp;
vp                881 xfs/xfs_vnodeops-bsd.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                885 xfs/xfs_vnodeops-bsd.c 		       (unsigned long)vp));
vp                907 xfs/xfs_vnodeops-bsd.c     struct vnode *vp    = ap->a_vp;
vp                908 xfs/xfs_vnodeops-bsd.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                911 xfs/xfs_vnodeops-bsd.c 		       (unsigned long)vp, xn->vnlocks));
vp                913 xfs/xfs_vnodeops-bsd.c     while (vp->v_flag & VXLOCK) {
vp                914 xfs/xfs_vnodeops-bsd.c 	vp->v_flag |= VXWANT;
vp                915 xfs/xfs_vnodeops-bsd.c 	(void) tsleep((caddr_t)vp, PINOD, "xfs_vnlock", 0);
vp                917 xfs/xfs_vnodeops-bsd.c     if (vp->v_tag == VT_NON)
vp                928 xfs/xfs_vnodeops-bsd.c     struct vnode *vp    = ap->a_vp;
vp                929 xfs/xfs_vnodeops-bsd.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                931 xfs/xfs_vnodeops-bsd.c 		       (unsigned long)vp, xn->vnlocks));
vp                948 xfs/xfs_vnodeops-bsd.c     struct vnode *vp    = ap->a_vp;
vp                949 xfs/xfs_vnodeops-bsd.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                952 xfs/xfs_vnodeops-bsd.c 		       (unsigned long)vp, xn->vnlocks));
vp               1096 xfs/xfs_vnodeops-bsd.c     struct vnode *vp    = ap->a_vp;
vp               1097 xfs/xfs_vnodeops-bsd.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp               1114 xfs/xfs_vnodeops-bsd.c     if (vp->v_type == VREG && ap->a_flags & PGO_CLEANIT) {
vp               1150 xfs/xfs_vnodeops-bsd.c xfs_realvp(struct vnode * vp,
vp               1160 xfs/xfs_vnodeops-bsd.c xfs_cntl(struct vnode * vp,
vp               1387 xfs/xfs_vnodeops-bsd.c xfs_pushdirty(struct vnode *vp, struct ucred *cred, d_thread_t *p)
vp               1390 xfs/xfs_vnodeops-bsd.c     VOP_PUTPAGES(vp, 0, 0, PGO_ALLPAGES|PGO_SYNCIO|PGO_CLEANIT);
vp               1392 xfs/xfs_vnodeops-bsd.c     ubc_pushdirty(vp);
vp                 53 xfs/xfs_vnodeops-common.c     struct vnode *vp = XNODE_TO_VNODE(xn);
vp                 60 xfs/xfs_vnodeops-common.c     if (UBCISVALID(vp) && !ubc_isinuse(vp, 1)) {
vp                 62 xfs/xfs_vnodeops-common.c 	ubc_setsize(vp, 0);
vp                 71 xfs/xfs_vnodeops-common.c xfs_open_valid(struct vnode *vp, struct ucred *cred, d_thread_t *p,
vp                 74 xfs/xfs_vnodeops-common.c     struct xfs *xfsp = NNPFS_FROM_VNODE(vp);
vp                 75 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                108 xfs/xfs_vnodeops-common.c xfs_attr_valid(struct vnode *vp, struct ucred *cred, d_thread_t *p,
vp                111 xfs/xfs_vnodeops-common.c     struct xfs *xfsp = NNPFS_FROM_VNODE(vp);
vp                112 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                137 xfs/xfs_vnodeops-common.c xfs_data_valid(struct vnode *vp, struct ucred *cred, d_thread_t *p,
vp                140 xfs/xfs_vnodeops-common.c     struct xfs *xfsp = NNPFS_FROM_VNODE(vp);
vp                141 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                182 xfs/xfs_vnodeops-common.c xfs_open_common(struct vnode *vp,
vp                187 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                193 xfs/xfs_vnodeops-common.c 	ret = xfs_open_valid(vp, cred, p, NNPFS_OPEN_NW);
vp                195 xfs/xfs_vnodeops-common.c 	ret = xfs_open_valid(vp, cred, p, NNPFS_OPEN_NR);
vp                241 xfs/xfs_vnodeops-common.c xfs_fsync_common(struct vnode *vp, struct ucred *cred,
vp                244 xfs/xfs_vnodeops-common.c     struct xfs *xfsp = NNPFS_FROM_VNODE(vp);
vp                245 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                248 xfs/xfs_vnodeops-common.c     NNPFSDEB(XDEBVNOPS, ("xfs_fsync: %lx\n", (unsigned long)vp));
vp                260 xfs/xfs_vnodeops-common.c     xfs_pushdirty(vp, cred, proc);
vp                278 xfs/xfs_vnodeops-common.c xfs_close_common(struct vnode *vp, int fflag,
vp                281 xfs/xfs_vnodeops-common.c     struct xfs *xfsp = NNPFS_FROM_VNODE(vp);
vp                282 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                289 xfs/xfs_vnodeops-common.c     if (vp->v_type == VREG)
vp                290 xfs/xfs_vnodeops-common.c 	xfs_pushdirty(vp, cred, proc);
vp                315 xfs/xfs_vnodeops-common.c xfs_read_common(struct vnode *vp, struct uio *uio, int ioflag,
vp                323 xfs/xfs_vnodeops-common.c     xfs_update_read_cred(VNODE_TO_XNODE(vp), cred);
vp                326 xfs/xfs_vnodeops-common.c     error = xfs_data_valid(vp, cred, xfs_uio_to_thread(uio), NNPFS_DATA_R,
vp                329 xfs/xfs_vnodeops-common.c     error = xfs_data_valid(vp, cred, xfs_uio_to_proc(uio), NNPFS_DATA_R,
vp                340 xfs/xfs_vnodeops-common.c 	struct vnode *t = DATA_FROM_VNODE(vp);
vp                362 xfs/xfs_vnodeops-common.c xfs_write_common(struct vnode *vp, struct uio *uiop, int ioflag,
vp                365 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                373 xfs/xfs_vnodeops-common.c     error = xfs_data_valid(vp, cred, xfs_uio_to_thread(uiop), NNPFS_DATA_W,
vp                374 xfs/xfs_vnodeops-common.c 			   VNODE_TO_XNODE(vp)->attr.va_size);
vp                376 xfs/xfs_vnodeops-common.c     error = xfs_data_valid(vp, cred, xfs_uio_to_proc(uiop), NNPFS_DATA_W,
vp                377 xfs/xfs_vnodeops-common.c 			   VNODE_TO_XNODE(vp)->attr.va_size);
vp                388 xfs/xfs_vnodeops-common.c 	VNODE_TO_XNODE(vp)->flags |= NNPFS_DATA_DIRTY;
vp                393 xfs/xfs_vnodeops-common.c 	VNODE_TO_XNODE(vp)->flags |= NNPFS_DATA_DIRTY;
vp                401 xfs/xfs_vnodeops-common.c 	    xfs_set_vp_size(vp, sub_attr.va_size);
vp                415 xfs/xfs_vnodeops-common.c xfs_getattr_common(struct vnode *vp, struct vattr *vap,
vp                420 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                424 xfs/xfs_vnodeops-common.c     error = xfs_attr_valid(vp, cred, p, NNPFS_ATTR_R);
vp                431 xfs/xfs_vnodeops-common.c xfs_setattr_common(struct vnode *vp, struct vattr *vap,
vp                434 xfs/xfs_vnodeops-common.c     struct xfs *xfsp = NNPFS_FROM_VNODE(vp);
vp                435 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                454 xfs/xfs_vnodeops-common.c 	VNODE_TO_XNODE(vp)->flags |= NNPFS_ATTR_DIRTY;
vp                471 xfs/xfs_vnodeops-common.c 	    if (vp->v_type == VREG) {
vp                478 xfs/xfs_vnodeops-common.c 		if (UBCINFOEXISTS(vp))
vp                479 xfs/xfs_vnodeops-common.c 		    ubc_setsize(vp, msg.attr.xa_size);
vp                516 xfs/xfs_vnodeops-common.c xfs_access_common(struct vnode *vp, int mode, struct ucred *cred,
vp                524 xfs/xfs_vnodeops-common.c     error = xfs_attr_valid(vp, cred, p, NNPFS_ATTR_R);
vp                526 xfs/xfs_vnodeops-common.c 	struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                691 xfs/xfs_vnodeops-common.c 		  struct vnode *vp,
vp                716 xfs/xfs_vnodeops-common.c 	xfs_dnlc_purge (vp);
vp                801 xfs/xfs_vnodeops-common.c 		 struct vnode *vp,
vp                825 xfs/xfs_vnodeops-common.c 	xfs_dnlc_purge (vp);
vp                833 xfs/xfs_vnodeops-common.c xfs_readdir_common(struct vnode *vp, 
vp                846 xfs/xfs_vnodeops-common.c     error = xfs_data_valid(vp, cred, xfs_uio_to_thread(uiop), NNPFS_DATA_R,
vp                849 xfs/xfs_vnodeops-common.c     error = xfs_data_valid(vp, cred, xfs_uio_to_proc(uiop), NNPFS_DATA_R,
vp                853 xfs/xfs_vnodeops-common.c 	struct vnode *t = DATA_FROM_VNODE(vp);
vp                884 xfs/xfs_vnodeops-common.c 		struct vnode *vp, 
vp                891 xfs/xfs_vnodeops-common.c     struct xfs_node *xn2 = VNODE_TO_XNODE(vp);
vp                964 xfs/xfs_vnodeops-common.c xfs_readlink_common(struct vnode *vp, struct uio *uiop, struct ucred *cred)
vp                971 xfs/xfs_vnodeops-common.c     error = xfs_data_valid(vp, cred, xfs_uio_to_thread(uiop), NNPFS_DATA_R,
vp                974 xfs/xfs_vnodeops-common.c     error = xfs_data_valid(vp, cred, xfs_uio_to_proc(uiop), NNPFS_DATA_R,
vp                978 xfs/xfs_vnodeops-common.c 	struct vnode *t = DATA_FROM_VNODE(vp);
vp                994 xfs/xfs_vnodeops-common.c xfs_inactive_common(struct vnode *vp, d_thread_t *p)
vp                997 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp               1000 xfs/xfs_vnodeops-common.c 		       (unsigned long)vp));
vp               1014 xfs/xfs_vnodeops-common.c     if (vp->v_type == VREG)
vp               1015 xfs/xfs_vnodeops-common.c 	xfs_pushdirty(vp, xn->wr_cred, p);
vp               1017 xfs/xfs_vnodeops-common.c     error = xfs_fsync_common(vp, xn->wr_cred, /* XXX */ 0, p);
vp               1028 xfs/xfs_vnodeops-common.c 	xfs_vfs_unlock(vp, p);
vp               1030 xfs/xfs_vnodeops-common.c         vrecycle(vp, p);
vp               1033 xfs/xfs_vnodeops-common.c 			     (unsigned long)vp, vp?vp->v_usecount:0));
vp               1037 xfs/xfs_vnodeops-common.c 	xfs_vfs_unlock(vp, p);
vp               1048 xfs/xfs_vnodeops-common.c xfs_reclaim_common(struct vnode *vp)
vp               1051 xfs/xfs_vnodeops-common.c     struct xfs *xfsp = NNPFS_FROM_VNODE(vp);
vp               1052 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp               1055 xfs/xfs_vnodeops-common.c 		       (unsigned long)vp));
vp               1074 xfs/xfs_vnodeops-common.c     xfs_dnlc_purge(vp);
vp               1135 xfs/xfs_vnodeops-common.c xfs_printnode_common (struct vnode *vp)
vp               1137 xfs/xfs_vnodeops-common.c     struct xfs_node *xn = VNODE_TO_XNODE(vp);
vp                 47 xfs/xfs_vnodeops.h #define xfs_vfs_readlock(vp, proc) VREF((vp))
vp                 48 xfs/xfs_vnodeops.h #define xfs_vfs_writelock(vp, proc) VREF((vp))
vp                 49 xfs/xfs_vnodeops.h #define xfs_vfs_unlock(vp, proc) vrele((vp))
vp                 53 xfs/xfs_vnodeops.h #define xfs_vfs_vn_lock(vp, flags, proc) (0)
vp                 57 xfs/xfs_vnodeops.h #define xfs_vfs_readlock(vp, proc) vn_lock((vp), LK_SHARED | LK_RETRY)
vp                 58 xfs/xfs_vnodeops.h #define xfs_vfs_writelock(vp, proc) vn_lock((vp), LK_EXCLUSIVE | LK_RETRY)
vp                 59 xfs/xfs_vnodeops.h #define xfs_vfs_unlock(vp, proc) VOP_UNLOCK((vp), 0)
vp                 60 xfs/xfs_vnodeops.h #define xfs_vfs_vn_lock(vp, flags, proc) vn_lock((vp), (flags))
vp                 64 xfs/xfs_vnodeops.h #define xfs_vfs_readlock(vp, proc) vn_lock((vp), LK_SHARED | LK_RETRY, (proc))
vp                 65 xfs/xfs_vnodeops.h #define xfs_vfs_writelock(vp, proc) vn_lock((vp), LK_EXCLUSIVE | LK_RETRY, (proc))
vp                 66 xfs/xfs_vnodeops.h #define xfs_vfs_unlock(vp, proc) VOP_UNLOCK((vp), 0, (proc))
vp                 67 xfs/xfs_vnodeops.h #define xfs_vfs_vn_lock(vp, flags, proc) vn_lock((vp), (flags), (proc))
vp                 71 xfs/xfs_vnodeops.h #define xfs_vfs_readlock(vp, proc) VOP_LOCK((vp))
vp                 72 xfs/xfs_vnodeops.h #define xfs_vfs_writelock(vp, proc) VOP_LOCK((vp))
vp                 73 xfs/xfs_vnodeops.h #define xfs_vfs_unlock(vp, proc) VOP_UNLOCK((vp))
vp                 77 xfs/xfs_vnodeops.h #define xfs_vfs_vn_lock(vp, flags, proc) (0)
vp                 86 xfs/xfs_vnodeops.h xfs_open_valid(struct vnode *vp, struct ucred *cred, d_thread_t *p,
vp                 90 xfs/xfs_vnodeops.h xfs_attr_valid(struct vnode *vp, struct ucred *cred, d_thread_t *p,
vp                 94 xfs/xfs_vnodeops.h xfs_fetch_rights(struct vnode *vp, struct ucred *cred, d_thread_t *p);
vp                 97 xfs/xfs_vnodeops.h xfs_data_valid(struct vnode *vp, struct ucred *cred, d_thread_t *p,
vp                101 xfs/xfs_vnodeops.h xfs_open_common(struct vnode *vp,
vp                107 xfs/xfs_vnodeops.h xfs_fsync_common(struct vnode *vp, struct ucred *cred,
vp                111 xfs/xfs_vnodeops.h xfs_close_common(struct vnode *vp, int fflag,
vp                115 xfs/xfs_vnodeops.h xfs_read_common(struct vnode *vp, struct uio *uio, int ioflag,
vp                119 xfs/xfs_vnodeops.h xfs_write_common(struct vnode *vp, struct uio *uiop, int ioflag,
vp                123 xfs/xfs_vnodeops.h xfs_getattr_common(struct vnode *vp, struct vattr *vap,
vp                127 xfs/xfs_vnodeops.h xfs_setattr_common(struct vnode *vp, struct vattr *vap,
vp                131 xfs/xfs_vnodeops.h xfs_access_common(struct vnode *vp, int mode,
vp                148 xfs/xfs_vnodeops.h 		  struct vnode *vp, 
vp                172 xfs/xfs_vnodeops.h 		 struct vnode *vp,
vp                178 xfs/xfs_vnodeops.h xfs_readdir_common(struct vnode *vp, 
vp                186 xfs/xfs_vnodeops.h 		struct vnode *vp, 
vp                199 xfs/xfs_vnodeops.h xfs_readlink_common(struct vnode *vp, struct uio *uiop, struct ucred *cred);
vp                202 xfs/xfs_vnodeops.h xfs_inactive_common(struct vnode *vp, d_thread_t *p);
vp                205 xfs/xfs_vnodeops.h xfs_reclaim_common(struct vnode *vp);
vp                214 xfs/xfs_vnodeops.h xfs_printnode_common (struct vnode *vp);