devfs_vnops.c revision 257121
1/*-
2 * Copyright (c) 2000-2004
3 *	Poul-Henning Kamp.  All rights reserved.
4 * Copyright (c) 1989, 1992-1993, 1995
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software donated to Berkeley by
8 * Jan-Simon Pendry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	@(#)kernfs_vnops.c	8.15 (Berkeley) 5/21/95
32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
33 *
34 * $FreeBSD: stable/10/sys/fs/devfs/devfs_vnops.c 257121 2013-10-25 16:31:28Z kib $
35 */
36
37/*
38 * TODO:
39 *	mkdir: want it ?
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/conf.h>
45#include <sys/dirent.h>
46#include <sys/fcntl.h>
47#include <sys/file.h>
48#include <sys/filedesc.h>
49#include <sys/filio.h>
50#include <sys/jail.h>
51#include <sys/kernel.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mount.h>
55#include <sys/namei.h>
56#include <sys/priv.h>
57#include <sys/proc.h>
58#include <sys/stat.h>
59#include <sys/sx.h>
60#include <sys/time.h>
61#include <sys/ttycom.h>
62#include <sys/unistd.h>
63#include <sys/vnode.h>
64
65static struct vop_vector devfs_vnodeops;
66static struct vop_vector devfs_specops;
67static struct fileops devfs_ops_f;
68
69#include <fs/devfs/devfs.h>
70#include <fs/devfs/devfs_int.h>
71
72#include <security/mac/mac_framework.h>
73
74static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
75
76struct mtx	devfs_de_interlock;
77MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
78struct sx	clone_drain_lock;
79SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
80struct mtx	cdevpriv_mtx;
81MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
82
83static int
84devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp,
85    int *ref)
86{
87
88	*dswp = devvn_refthread(fp->f_vnode, devp, ref);
89	if (*devp != fp->f_data) {
90		if (*dswp != NULL)
91			dev_relthread(*devp, *ref);
92		return (ENXIO);
93	}
94	KASSERT((*devp)->si_refcount > 0,
95	    ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
96	if (*dswp == NULL)
97		return (ENXIO);
98	curthread->td_fpop = fp;
99	return (0);
100}
101
102int
103devfs_get_cdevpriv(void **datap)
104{
105	struct file *fp;
106	struct cdev_privdata *p;
107	int error;
108
109	fp = curthread->td_fpop;
110	if (fp == NULL)
111		return (EBADF);
112	p = fp->f_cdevpriv;
113	if (p != NULL) {
114		error = 0;
115		*datap = p->cdpd_data;
116	} else
117		error = ENOENT;
118	return (error);
119}
120
121int
122devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr)
123{
124	struct file *fp;
125	struct cdev_priv *cdp;
126	struct cdev_privdata *p;
127	int error;
128
129	fp = curthread->td_fpop;
130	if (fp == NULL)
131		return (ENOENT);
132	cdp = cdev2priv((struct cdev *)fp->f_data);
133	p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
134	p->cdpd_data = priv;
135	p->cdpd_dtr = priv_dtr;
136	p->cdpd_fp = fp;
137	mtx_lock(&cdevpriv_mtx);
138	if (fp->f_cdevpriv == NULL) {
139		LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
140		fp->f_cdevpriv = p;
141		mtx_unlock(&cdevpriv_mtx);
142		error = 0;
143	} else {
144		mtx_unlock(&cdevpriv_mtx);
145		free(p, M_CDEVPDATA);
146		error = EBUSY;
147	}
148	return (error);
149}
150
151void
152devfs_destroy_cdevpriv(struct cdev_privdata *p)
153{
154
155	mtx_assert(&cdevpriv_mtx, MA_OWNED);
156	p->cdpd_fp->f_cdevpriv = NULL;
157	LIST_REMOVE(p, cdpd_list);
158	mtx_unlock(&cdevpriv_mtx);
159	(p->cdpd_dtr)(p->cdpd_data);
160	free(p, M_CDEVPDATA);
161}
162
163void
164devfs_fpdrop(struct file *fp)
165{
166	struct cdev_privdata *p;
167
168	mtx_lock(&cdevpriv_mtx);
169	if ((p = fp->f_cdevpriv) == NULL) {
170		mtx_unlock(&cdevpriv_mtx);
171		return;
172	}
173	devfs_destroy_cdevpriv(p);
174}
175
176void
177devfs_clear_cdevpriv(void)
178{
179	struct file *fp;
180
181	fp = curthread->td_fpop;
182	if (fp == NULL)
183		return;
184	devfs_fpdrop(fp);
185}
186
187/*
188 * On success devfs_populate_vp() returns with dmp->dm_lock held.
189 */
190static int
191devfs_populate_vp(struct vnode *vp)
192{
193	struct devfs_dirent *de;
194	struct devfs_mount *dmp;
195	int locked;
196
197	ASSERT_VOP_LOCKED(vp, "devfs_populate_vp");
198
199	dmp = VFSTODEVFS(vp->v_mount);
200	locked = VOP_ISLOCKED(vp);
201
202	sx_xlock(&dmp->dm_lock);
203	DEVFS_DMP_HOLD(dmp);
204
205	/* Can't call devfs_populate() with the vnode lock held. */
206	VOP_UNLOCK(vp, 0);
207	devfs_populate(dmp);
208
209	sx_xunlock(&dmp->dm_lock);
210	vn_lock(vp, locked | LK_RETRY);
211	sx_xlock(&dmp->dm_lock);
212	if (DEVFS_DMP_DROP(dmp)) {
213		sx_xunlock(&dmp->dm_lock);
214		devfs_unmount_final(dmp);
215		return (EBADF);
216	}
217	if ((vp->v_iflag & VI_DOOMED) != 0) {
218		sx_xunlock(&dmp->dm_lock);
219		return (EBADF);
220	}
221	de = vp->v_data;
222	KASSERT(de != NULL,
223	    ("devfs_populate_vp: vp->v_data == NULL but vnode not doomed"));
224	if ((de->de_flags & DE_DOOMED) != 0) {
225		sx_xunlock(&dmp->dm_lock);
226		return (EBADF);
227	}
228
229	return (0);
230}
231
232static int
233devfs_vptocnp(struct vop_vptocnp_args *ap)
234{
235	struct vnode *vp = ap->a_vp;
236	struct vnode **dvp = ap->a_vpp;
237	struct devfs_mount *dmp;
238	char *buf = ap->a_buf;
239	int *buflen = ap->a_buflen;
240	struct devfs_dirent *dd, *de;
241	int i, error;
242
243	dmp = VFSTODEVFS(vp->v_mount);
244
245	error = devfs_populate_vp(vp);
246	if (error != 0)
247		return (error);
248
249	i = *buflen;
250	dd = vp->v_data;
251
252	if (vp->v_type == VCHR) {
253		i -= strlen(dd->de_cdp->cdp_c.si_name);
254		if (i < 0) {
255			error = ENOMEM;
256			goto finished;
257		}
258		bcopy(dd->de_cdp->cdp_c.si_name, buf + i,
259		    strlen(dd->de_cdp->cdp_c.si_name));
260		de = dd->de_dir;
261	} else if (vp->v_type == VDIR) {
262		if (dd == dmp->dm_rootdir) {
263			*dvp = vp;
264			vref(*dvp);
265			goto finished;
266		}
267		i -= dd->de_dirent->d_namlen;
268		if (i < 0) {
269			error = ENOMEM;
270			goto finished;
271		}
272		bcopy(dd->de_dirent->d_name, buf + i,
273		    dd->de_dirent->d_namlen);
274		de = dd;
275	} else {
276		error = ENOENT;
277		goto finished;
278	}
279	*buflen = i;
280	de = devfs_parent_dirent(de);
281	if (de == NULL) {
282		error = ENOENT;
283		goto finished;
284	}
285	mtx_lock(&devfs_de_interlock);
286	*dvp = de->de_vnode;
287	if (*dvp != NULL) {
288		VI_LOCK(*dvp);
289		mtx_unlock(&devfs_de_interlock);
290		vholdl(*dvp);
291		VI_UNLOCK(*dvp);
292		vref(*dvp);
293		vdrop(*dvp);
294	} else {
295		mtx_unlock(&devfs_de_interlock);
296		error = ENOENT;
297	}
298finished:
299	sx_xunlock(&dmp->dm_lock);
300	return (error);
301}
302
303/*
304 * Construct the fully qualified path name relative to the mountpoint.
305 * If a NULL cnp is provided, no '/' is appended to the resulting path.
306 */
307char *
308devfs_fqpn(char *buf, struct devfs_mount *dmp, struct devfs_dirent *dd,
309    struct componentname *cnp)
310{
311	int i;
312	struct devfs_dirent *de;
313
314	sx_assert(&dmp->dm_lock, SA_LOCKED);
315
316	i = SPECNAMELEN;
317	buf[i] = '\0';
318	if (cnp != NULL)
319		i -= cnp->cn_namelen;
320	if (i < 0)
321		 return (NULL);
322	if (cnp != NULL)
323		bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
324	de = dd;
325	while (de != dmp->dm_rootdir) {
326		if (cnp != NULL || i < SPECNAMELEN) {
327			i--;
328			if (i < 0)
329				 return (NULL);
330			buf[i] = '/';
331		}
332		i -= de->de_dirent->d_namlen;
333		if (i < 0)
334			 return (NULL);
335		bcopy(de->de_dirent->d_name, buf + i,
336		    de->de_dirent->d_namlen);
337		de = devfs_parent_dirent(de);
338		if (de == NULL)
339			return (NULL);
340	}
341	return (buf + i);
342}
343
344static int
345devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
346	struct devfs_dirent *de)
347{
348	int not_found;
349
350	not_found = 0;
351	if (de->de_flags & DE_DOOMED)
352		not_found = 1;
353	if (DEVFS_DE_DROP(de)) {
354		KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
355		devfs_dirent_free(de);
356	}
357	if (DEVFS_DMP_DROP(dmp)) {
358		KASSERT(not_found == 1,
359			("DEVFS mount struct freed before dirent"));
360		not_found = 2;
361		sx_xunlock(&dmp->dm_lock);
362		devfs_unmount_final(dmp);
363	}
364	if (not_found == 1 || (drop_dm_lock && not_found != 2))
365		sx_unlock(&dmp->dm_lock);
366	return (not_found);
367}
368
369static void
370devfs_insmntque_dtr(struct vnode *vp, void *arg)
371{
372	struct devfs_dirent *de;
373
374	de = (struct devfs_dirent *)arg;
375	mtx_lock(&devfs_de_interlock);
376	vp->v_data = NULL;
377	de->de_vnode = NULL;
378	mtx_unlock(&devfs_de_interlock);
379	vgone(vp);
380	vput(vp);
381}
382
383/*
384 * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
385 * it on return.
386 */
387int
388devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
389    struct vnode **vpp)
390{
391	int error;
392	struct vnode *vp;
393	struct cdev *dev;
394	struct devfs_mount *dmp;
395	struct cdevsw *dsw;
396
397	dmp = VFSTODEVFS(mp);
398	if (de->de_flags & DE_DOOMED) {
399		sx_xunlock(&dmp->dm_lock);
400		return (ENOENT);
401	}
402loop:
403	DEVFS_DE_HOLD(de);
404	DEVFS_DMP_HOLD(dmp);
405	mtx_lock(&devfs_de_interlock);
406	vp = de->de_vnode;
407	if (vp != NULL) {
408		VI_LOCK(vp);
409		mtx_unlock(&devfs_de_interlock);
410		sx_xunlock(&dmp->dm_lock);
411		vget(vp, lockmode | LK_INTERLOCK | LK_RETRY, curthread);
412		sx_xlock(&dmp->dm_lock);
413		if (devfs_allocv_drop_refs(0, dmp, de)) {
414			vput(vp);
415			return (ENOENT);
416		}
417		else if ((vp->v_iflag & VI_DOOMED) != 0) {
418			mtx_lock(&devfs_de_interlock);
419			if (de->de_vnode == vp) {
420				de->de_vnode = NULL;
421				vp->v_data = NULL;
422			}
423			mtx_unlock(&devfs_de_interlock);
424			vput(vp);
425			goto loop;
426		}
427		sx_xunlock(&dmp->dm_lock);
428		*vpp = vp;
429		return (0);
430	}
431	mtx_unlock(&devfs_de_interlock);
432	if (de->de_dirent->d_type == DT_CHR) {
433		if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
434			devfs_allocv_drop_refs(1, dmp, de);
435			return (ENOENT);
436		}
437		dev = &de->de_cdp->cdp_c;
438	} else {
439		dev = NULL;
440	}
441	error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
442	if (error != 0) {
443		devfs_allocv_drop_refs(1, dmp, de);
444		printf("devfs_allocv: failed to allocate new vnode\n");
445		return (error);
446	}
447
448	if (de->de_dirent->d_type == DT_CHR) {
449		vp->v_type = VCHR;
450		VI_LOCK(vp);
451		dev_lock();
452		dev_refl(dev);
453		/* XXX: v_rdev should be protect by vnode lock */
454		vp->v_rdev = dev;
455		KASSERT(vp->v_usecount == 1,
456		    ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
457		dev->si_usecount += vp->v_usecount;
458		/* Special casing of ttys for deadfs.  Probably redundant. */
459		dsw = dev->si_devsw;
460		if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
461			vp->v_vflag |= VV_ISTTY;
462		dev_unlock();
463		VI_UNLOCK(vp);
464		if ((dev->si_flags & SI_ETERNAL) != 0)
465			vp->v_vflag |= VV_ETERNALDEV;
466		vp->v_op = &devfs_specops;
467	} else if (de->de_dirent->d_type == DT_DIR) {
468		vp->v_type = VDIR;
469	} else if (de->de_dirent->d_type == DT_LNK) {
470		vp->v_type = VLNK;
471	} else {
472		vp->v_type = VBAD;
473	}
474	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
475	VN_LOCK_ASHARE(vp);
476	mtx_lock(&devfs_de_interlock);
477	vp->v_data = de;
478	de->de_vnode = vp;
479	mtx_unlock(&devfs_de_interlock);
480	error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
481	if (error != 0) {
482		(void) devfs_allocv_drop_refs(1, dmp, de);
483		return (error);
484	}
485	if (devfs_allocv_drop_refs(0, dmp, de)) {
486		vput(vp);
487		return (ENOENT);
488	}
489#ifdef MAC
490	mac_devfs_vnode_associate(mp, de, vp);
491#endif
492	sx_xunlock(&dmp->dm_lock);
493	*vpp = vp;
494	return (0);
495}
496
497static int
498devfs_access(struct vop_access_args *ap)
499{
500	struct vnode *vp = ap->a_vp;
501	struct devfs_dirent *de;
502	int error;
503
504	de = vp->v_data;
505	if (vp->v_type == VDIR)
506		de = de->de_dir;
507
508	error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
509	    ap->a_accmode, ap->a_cred, NULL);
510	if (error == 0)
511		return (0);
512	if (error != EACCES)
513		return (error);
514	/* We do, however, allow access to the controlling terminal */
515	if (!(ap->a_td->td_proc->p_flag & P_CONTROLT))
516		return (error);
517	if (ap->a_td->td_proc->p_session->s_ttydp == de->de_cdp)
518		return (0);
519	return (error);
520}
521
522/* ARGSUSED */
523static int
524devfs_close(struct vop_close_args *ap)
525{
526	struct vnode *vp = ap->a_vp, *oldvp;
527	struct thread *td = ap->a_td;
528	struct cdev *dev = vp->v_rdev;
529	struct cdevsw *dsw;
530	int vp_locked, error, ref;
531
532	/*
533	 * XXX: Don't call d_close() if we were called because of
534	 * XXX: insmntque1() failure.
535	 */
536	if (vp->v_data == NULL)
537		return (0);
538
539	/*
540	 * Hack: a tty device that is a controlling terminal
541	 * has a reference from the session structure.
542	 * We cannot easily tell that a character device is
543	 * a controlling terminal, unless it is the closing
544	 * process' controlling terminal.  In that case,
545	 * if the reference count is 2 (this last descriptor
546	 * plus the session), release the reference from the session.
547	 */
548	oldvp = NULL;
549	sx_xlock(&proctree_lock);
550	if (td && vp == td->td_proc->p_session->s_ttyvp) {
551		SESS_LOCK(td->td_proc->p_session);
552		VI_LOCK(vp);
553		if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) {
554			td->td_proc->p_session->s_ttyvp = NULL;
555			td->td_proc->p_session->s_ttydp = NULL;
556			oldvp = vp;
557		}
558		VI_UNLOCK(vp);
559		SESS_UNLOCK(td->td_proc->p_session);
560	}
561	sx_xunlock(&proctree_lock);
562	if (oldvp != NULL)
563		vrele(oldvp);
564	/*
565	 * We do not want to really close the device if it
566	 * is still in use unless we are trying to close it
567	 * forcibly. Since every use (buffer, vnode, swap, cmap)
568	 * holds a reference to the vnode, and because we mark
569	 * any other vnodes that alias this device, when the
570	 * sum of the reference counts on all the aliased
571	 * vnodes descends to one, we are on last close.
572	 */
573	dsw = dev_refthread(dev, &ref);
574	if (dsw == NULL)
575		return (ENXIO);
576	VI_LOCK(vp);
577	if (vp->v_iflag & VI_DOOMED) {
578		/* Forced close. */
579	} else if (dsw->d_flags & D_TRACKCLOSE) {
580		/* Keep device updated on status. */
581	} else if (count_dev(dev) > 1) {
582		VI_UNLOCK(vp);
583		dev_relthread(dev, ref);
584		return (0);
585	}
586	vholdl(vp);
587	VI_UNLOCK(vp);
588	vp_locked = VOP_ISLOCKED(vp);
589	VOP_UNLOCK(vp, 0);
590	KASSERT(dev->si_refcount > 0,
591	    ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
592	error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
593	dev_relthread(dev, ref);
594	vn_lock(vp, vp_locked | LK_RETRY);
595	vdrop(vp);
596	return (error);
597}
598
599static int
600devfs_close_f(struct file *fp, struct thread *td)
601{
602	int error;
603	struct file *fpop;
604
605	/*
606	 * NB: td may be NULL if this descriptor is closed due to
607	 * garbage collection from a closed UNIX domain socket.
608	 */
609	fpop = curthread->td_fpop;
610	curthread->td_fpop = fp;
611	error = vnops.fo_close(fp, td);
612	curthread->td_fpop = fpop;
613
614	/*
615	 * The f_cdevpriv cannot be assigned non-NULL value while we
616	 * are destroying the file.
617	 */
618	if (fp->f_cdevpriv != NULL)
619		devfs_fpdrop(fp);
620	return (error);
621}
622
623static int
624devfs_fsync(struct vop_fsync_args *ap)
625{
626	int error;
627	struct bufobj *bo;
628	struct devfs_dirent *de;
629
630	if (!vn_isdisk(ap->a_vp, &error)) {
631		bo = &ap->a_vp->v_bufobj;
632		de = ap->a_vp->v_data;
633		if (error == ENXIO && bo->bo_dirty.bv_cnt > 0) {
634			printf("Device %s went missing before all of the data "
635			    "could be written to it; expect data loss.\n",
636			    de->de_dirent->d_name);
637
638			error = vop_stdfsync(ap);
639			if (bo->bo_dirty.bv_cnt != 0 || error != 0)
640				panic("devfs_fsync: vop_stdfsync failed.");
641		}
642
643		return (0);
644	}
645
646	return (vop_stdfsync(ap));
647}
648
649static int
650devfs_getattr(struct vop_getattr_args *ap)
651{
652	struct vnode *vp = ap->a_vp;
653	struct vattr *vap = ap->a_vap;
654	int error;
655	struct devfs_dirent *de;
656	struct devfs_mount *dmp;
657	struct cdev *dev;
658
659	error = devfs_populate_vp(vp);
660	if (error != 0)
661		return (error);
662
663	dmp = VFSTODEVFS(vp->v_mount);
664	sx_xunlock(&dmp->dm_lock);
665
666	de = vp->v_data;
667	KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
668	if (vp->v_type == VDIR) {
669		de = de->de_dir;
670		KASSERT(de != NULL,
671		    ("Null dir dirent in devfs_getattr vp=%p", vp));
672	}
673	vap->va_uid = de->de_uid;
674	vap->va_gid = de->de_gid;
675	vap->va_mode = de->de_mode;
676	if (vp->v_type == VLNK)
677		vap->va_size = strlen(de->de_symlink);
678	else if (vp->v_type == VDIR)
679		vap->va_size = vap->va_bytes = DEV_BSIZE;
680	else
681		vap->va_size = 0;
682	if (vp->v_type != VDIR)
683		vap->va_bytes = 0;
684	vap->va_blocksize = DEV_BSIZE;
685	vap->va_type = vp->v_type;
686
687#define fix(aa)							\
688	do {							\
689		if ((aa).tv_sec <= 3600) {			\
690			(aa).tv_sec = boottime.tv_sec;		\
691			(aa).tv_nsec = boottime.tv_usec * 1000; \
692		}						\
693	} while (0)
694
695	if (vp->v_type != VCHR)  {
696		fix(de->de_atime);
697		vap->va_atime = de->de_atime;
698		fix(de->de_mtime);
699		vap->va_mtime = de->de_mtime;
700		fix(de->de_ctime);
701		vap->va_ctime = de->de_ctime;
702	} else {
703		dev = vp->v_rdev;
704		fix(dev->si_atime);
705		vap->va_atime = dev->si_atime;
706		fix(dev->si_mtime);
707		vap->va_mtime = dev->si_mtime;
708		fix(dev->si_ctime);
709		vap->va_ctime = dev->si_ctime;
710
711		vap->va_rdev = cdev2priv(dev)->cdp_inode;
712	}
713	vap->va_gen = 0;
714	vap->va_flags = 0;
715	vap->va_filerev = 0;
716	vap->va_nlink = de->de_links;
717	vap->va_fileid = de->de_inode;
718
719	return (error);
720}
721
722/* ARGSUSED */
723static int
724devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
725{
726	struct cdev *dev;
727	struct cdevsw *dsw;
728	struct vnode *vp;
729	struct vnode *vpold;
730	int error, i, ref;
731	const char *p;
732	struct fiodgname_arg *fgn;
733	struct file *fpop;
734
735	fpop = td->td_fpop;
736	error = devfs_fp_check(fp, &dev, &dsw, &ref);
737	if (error)
738		return (error);
739
740	if (com == FIODTYPE) {
741		*(int *)data = dsw->d_flags & D_TYPEMASK;
742		td->td_fpop = fpop;
743		dev_relthread(dev, ref);
744		return (0);
745	} else if (com == FIODGNAME) {
746		fgn = data;
747		p = devtoname(dev);
748		i = strlen(p) + 1;
749		if (i > fgn->len)
750			error = EINVAL;
751		else
752			error = copyout(p, fgn->buf, i);
753		td->td_fpop = fpop;
754		dev_relthread(dev, ref);
755		return (error);
756	}
757	error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
758	td->td_fpop = NULL;
759	dev_relthread(dev, ref);
760	if (error == ENOIOCTL)
761		error = ENOTTY;
762	if (error == 0 && com == TIOCSCTTY) {
763		vp = fp->f_vnode;
764
765		/* Do nothing if reassigning same control tty */
766		sx_slock(&proctree_lock);
767		if (td->td_proc->p_session->s_ttyvp == vp) {
768			sx_sunlock(&proctree_lock);
769			return (0);
770		}
771
772		vpold = td->td_proc->p_session->s_ttyvp;
773		VREF(vp);
774		SESS_LOCK(td->td_proc->p_session);
775		td->td_proc->p_session->s_ttyvp = vp;
776		td->td_proc->p_session->s_ttydp = cdev2priv(dev);
777		SESS_UNLOCK(td->td_proc->p_session);
778
779		sx_sunlock(&proctree_lock);
780
781		/* Get rid of reference to old control tty */
782		if (vpold)
783			vrele(vpold);
784	}
785	return (error);
786}
787
788/* ARGSUSED */
789static int
790devfs_kqfilter_f(struct file *fp, struct knote *kn)
791{
792	struct cdev *dev;
793	struct cdevsw *dsw;
794	int error, ref;
795	struct file *fpop;
796	struct thread *td;
797
798	td = curthread;
799	fpop = td->td_fpop;
800	error = devfs_fp_check(fp, &dev, &dsw, &ref);
801	if (error)
802		return (error);
803	error = dsw->d_kqfilter(dev, kn);
804	td->td_fpop = fpop;
805	dev_relthread(dev, ref);
806	return (error);
807}
808
809static inline int
810devfs_prison_check(struct devfs_dirent *de, struct thread *td)
811{
812	struct cdev_priv *cdp;
813	struct ucred *dcr;
814	int error;
815
816	cdp = de->de_cdp;
817	if (cdp == NULL)
818		return (0);
819	dcr = cdp->cdp_c.si_cred;
820	if (dcr == NULL)
821		return (0);
822
823	error = prison_check(td->td_ucred, dcr);
824	if (error == 0)
825		return (0);
826	/* We do, however, allow access to the controlling terminal */
827	if (!(td->td_proc->p_flag & P_CONTROLT))
828		return (error);
829	if (td->td_proc->p_session->s_ttydp == cdp)
830		return (0);
831	return (error);
832}
833
834static int
835devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
836{
837	struct componentname *cnp;
838	struct vnode *dvp, **vpp;
839	struct thread *td;
840	struct devfs_dirent *de, *dd;
841	struct devfs_dirent **dde;
842	struct devfs_mount *dmp;
843	struct cdev *cdev;
844	int error, flags, nameiop, dvplocked;
845	char specname[SPECNAMELEN + 1], *pname;
846
847	cnp = ap->a_cnp;
848	vpp = ap->a_vpp;
849	dvp = ap->a_dvp;
850	pname = cnp->cn_nameptr;
851	td = cnp->cn_thread;
852	flags = cnp->cn_flags;
853	nameiop = cnp->cn_nameiop;
854	dmp = VFSTODEVFS(dvp->v_mount);
855	dd = dvp->v_data;
856	*vpp = NULLVP;
857
858	if ((flags & ISLASTCN) && nameiop == RENAME)
859		return (EOPNOTSUPP);
860
861	if (dvp->v_type != VDIR)
862		return (ENOTDIR);
863
864	if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
865		return (EIO);
866
867	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
868	if (error)
869		return (error);
870
871	if (cnp->cn_namelen == 1 && *pname == '.') {
872		if ((flags & ISLASTCN) && nameiop != LOOKUP)
873			return (EINVAL);
874		*vpp = dvp;
875		VREF(dvp);
876		return (0);
877	}
878
879	if (flags & ISDOTDOT) {
880		if ((flags & ISLASTCN) && nameiop != LOOKUP)
881			return (EINVAL);
882		de = devfs_parent_dirent(dd);
883		if (de == NULL)
884			return (ENOENT);
885		dvplocked = VOP_ISLOCKED(dvp);
886		VOP_UNLOCK(dvp, 0);
887		error = devfs_allocv(de, dvp->v_mount,
888		    cnp->cn_lkflags & LK_TYPE_MASK, vpp);
889		*dm_unlock = 0;
890		vn_lock(dvp, dvplocked | LK_RETRY);
891		return (error);
892	}
893
894	dd = dvp->v_data;
895	de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen, 0);
896	while (de == NULL) {	/* While(...) so we can use break */
897
898		if (nameiop == DELETE)
899			return (ENOENT);
900
901		/*
902		 * OK, we didn't have an entry for the name we were asked for
903		 * so we try to see if anybody can create it on demand.
904		 */
905		pname = devfs_fqpn(specname, dmp, dd, cnp);
906		if (pname == NULL)
907			break;
908
909		cdev = NULL;
910		DEVFS_DMP_HOLD(dmp);
911		sx_xunlock(&dmp->dm_lock);
912		sx_slock(&clone_drain_lock);
913		EVENTHANDLER_INVOKE(dev_clone,
914		    td->td_ucred, pname, strlen(pname), &cdev);
915		sx_sunlock(&clone_drain_lock);
916
917		if (cdev == NULL)
918			sx_xlock(&dmp->dm_lock);
919		else if (devfs_populate_vp(dvp) != 0) {
920			*dm_unlock = 0;
921			sx_xlock(&dmp->dm_lock);
922			if (DEVFS_DMP_DROP(dmp)) {
923				sx_xunlock(&dmp->dm_lock);
924				devfs_unmount_final(dmp);
925			} else
926				sx_xunlock(&dmp->dm_lock);
927			dev_rel(cdev);
928			return (ENOENT);
929		}
930		if (DEVFS_DMP_DROP(dmp)) {
931			*dm_unlock = 0;
932			sx_xunlock(&dmp->dm_lock);
933			devfs_unmount_final(dmp);
934			if (cdev != NULL)
935				dev_rel(cdev);
936			return (ENOENT);
937		}
938
939		if (cdev == NULL)
940			break;
941
942		dev_lock();
943		dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx];
944		if (dde != NULL && *dde != NULL)
945			de = *dde;
946		dev_unlock();
947		dev_rel(cdev);
948		break;
949	}
950
951	if (de == NULL || de->de_flags & DE_WHITEOUT) {
952		if ((nameiop == CREATE || nameiop == RENAME) &&
953		    (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
954			cnp->cn_flags |= SAVENAME;
955			return (EJUSTRETURN);
956		}
957		return (ENOENT);
958	}
959
960	if (devfs_prison_check(de, td))
961		return (ENOENT);
962
963	if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
964		error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
965		if (error)
966			return (error);
967		if (*vpp == dvp) {
968			VREF(dvp);
969			*vpp = dvp;
970			return (0);
971		}
972	}
973	error = devfs_allocv(de, dvp->v_mount, cnp->cn_lkflags & LK_TYPE_MASK,
974	    vpp);
975	*dm_unlock = 0;
976	return (error);
977}
978
979static int
980devfs_lookup(struct vop_lookup_args *ap)
981{
982	int j;
983	struct devfs_mount *dmp;
984	int dm_unlock;
985
986	if (devfs_populate_vp(ap->a_dvp) != 0)
987		return (ENOTDIR);
988
989	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
990	dm_unlock = 1;
991	j = devfs_lookupx(ap, &dm_unlock);
992	if (dm_unlock == 1)
993		sx_xunlock(&dmp->dm_lock);
994	return (j);
995}
996
997static int
998devfs_mknod(struct vop_mknod_args *ap)
999{
1000	struct componentname *cnp;
1001	struct vnode *dvp, **vpp;
1002	struct devfs_dirent *dd, *de;
1003	struct devfs_mount *dmp;
1004	int error;
1005
1006	/*
1007	 * The only type of node we should be creating here is a
1008	 * character device, for anything else return EOPNOTSUPP.
1009	 */
1010	if (ap->a_vap->va_type != VCHR)
1011		return (EOPNOTSUPP);
1012	dvp = ap->a_dvp;
1013	dmp = VFSTODEVFS(dvp->v_mount);
1014
1015	cnp = ap->a_cnp;
1016	vpp = ap->a_vpp;
1017	dd = dvp->v_data;
1018
1019	error = ENOENT;
1020	sx_xlock(&dmp->dm_lock);
1021	TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
1022		if (cnp->cn_namelen != de->de_dirent->d_namlen)
1023			continue;
1024		if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
1025		    de->de_dirent->d_namlen) != 0)
1026			continue;
1027		if (de->de_flags & DE_WHITEOUT)
1028			break;
1029		goto notfound;
1030	}
1031	if (de == NULL)
1032		goto notfound;
1033	de->de_flags &= ~DE_WHITEOUT;
1034	error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp);
1035	return (error);
1036notfound:
1037	sx_xunlock(&dmp->dm_lock);
1038	return (error);
1039}
1040
1041/* ARGSUSED */
1042static int
1043devfs_open(struct vop_open_args *ap)
1044{
1045	struct thread *td = ap->a_td;
1046	struct vnode *vp = ap->a_vp;
1047	struct cdev *dev = vp->v_rdev;
1048	struct file *fp = ap->a_fp;
1049	int error, ref, vlocked;
1050	struct cdevsw *dsw;
1051	struct file *fpop;
1052	struct mtx *mtxp;
1053
1054	if (vp->v_type == VBLK)
1055		return (ENXIO);
1056
1057	if (dev == NULL)
1058		return (ENXIO);
1059
1060	/* Make this field valid before any I/O in d_open. */
1061	if (dev->si_iosize_max == 0)
1062		dev->si_iosize_max = DFLTPHYS;
1063
1064	dsw = dev_refthread(dev, &ref);
1065	if (dsw == NULL)
1066		return (ENXIO);
1067	if (fp == NULL && dsw->d_fdopen != NULL) {
1068		dev_relthread(dev, ref);
1069		return (ENXIO);
1070	}
1071
1072	vlocked = VOP_ISLOCKED(vp);
1073	VOP_UNLOCK(vp, 0);
1074
1075	fpop = td->td_fpop;
1076	td->td_fpop = fp;
1077	if (fp != NULL) {
1078		fp->f_data = dev;
1079		fp->f_vnode = vp;
1080	}
1081	if (dsw->d_fdopen != NULL)
1082		error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
1083	else
1084		error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
1085	/* cleanup any cdevpriv upon error */
1086	if (error != 0)
1087		devfs_clear_cdevpriv();
1088	td->td_fpop = fpop;
1089
1090	vn_lock(vp, vlocked | LK_RETRY);
1091	dev_relthread(dev, ref);
1092	if (error != 0) {
1093		if (error == ERESTART)
1094			error = EINTR;
1095		return (error);
1096	}
1097
1098#if 0	/* /dev/console */
1099	KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp"));
1100#else
1101	if (fp == NULL)
1102		return (error);
1103#endif
1104	if (fp->f_ops == &badfileops)
1105		finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
1106	mtxp = mtx_pool_find(mtxpool_sleep, fp);
1107
1108	/*
1109	 * Hint to the dofilewrite() to not force the buffer draining
1110	 * on the writer to the file.  Most likely, the write would
1111	 * not need normal buffers.
1112	 */
1113	mtx_lock(mtxp);
1114	fp->f_vnread_flags |= FDEVFS_VNODE;
1115	mtx_unlock(mtxp);
1116	return (error);
1117}
1118
1119static int
1120devfs_pathconf(struct vop_pathconf_args *ap)
1121{
1122
1123	switch (ap->a_name) {
1124	case _PC_MAC_PRESENT:
1125#ifdef MAC
1126		/*
1127		 * If MAC is enabled, devfs automatically supports
1128		 * trivial non-persistant label storage.
1129		 */
1130		*ap->a_retval = 1;
1131#else
1132		*ap->a_retval = 0;
1133#endif
1134		return (0);
1135	default:
1136		return (vop_stdpathconf(ap));
1137	}
1138	/* NOTREACHED */
1139}
1140
1141/* ARGSUSED */
1142static int
1143devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
1144{
1145	struct cdev *dev;
1146	struct cdevsw *dsw;
1147	int error, ref;
1148	struct file *fpop;
1149
1150	fpop = td->td_fpop;
1151	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1152	if (error)
1153		return (poll_no_poll(events));
1154	error = dsw->d_poll(dev, events, td);
1155	td->td_fpop = fpop;
1156	dev_relthread(dev, ref);
1157	return(error);
1158}
1159
1160/*
1161 * Print out the contents of a special device vnode.
1162 */
1163static int
1164devfs_print(struct vop_print_args *ap)
1165{
1166
1167	printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
1168	return (0);
1169}
1170
1171static int
1172devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred,
1173    int flags, struct thread *td)
1174{
1175	struct cdev *dev;
1176	int ioflag, error, ref;
1177	ssize_t resid;
1178	struct cdevsw *dsw;
1179	struct file *fpop;
1180
1181	fpop = td->td_fpop;
1182	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1183	if (error)
1184		return (error);
1185	resid = uio->uio_resid;
1186	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
1187	if (ioflag & O_DIRECT)
1188		ioflag |= IO_DIRECT;
1189
1190	foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
1191	error = dsw->d_read(dev, uio, ioflag);
1192	if (uio->uio_resid != resid || (error == 0 && resid != 0))
1193		vfs_timestamp(&dev->si_atime);
1194	td->td_fpop = fpop;
1195	dev_relthread(dev, ref);
1196
1197	foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
1198	return (error);
1199}
1200
1201static int
1202devfs_readdir(struct vop_readdir_args *ap)
1203{
1204	int error;
1205	struct uio *uio;
1206	struct dirent *dp;
1207	struct devfs_dirent *dd;
1208	struct devfs_dirent *de;
1209	struct devfs_mount *dmp;
1210	off_t off;
1211	int *tmp_ncookies = NULL;
1212
1213	if (ap->a_vp->v_type != VDIR)
1214		return (ENOTDIR);
1215
1216	uio = ap->a_uio;
1217	if (uio->uio_offset < 0)
1218		return (EINVAL);
1219
1220	/*
1221	 * XXX: This is a temporary hack to get around this filesystem not
1222	 * supporting cookies. We store the location of the ncookies pointer
1223	 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
1224	 * and set the number of cookies to 0. We then set the pointer to
1225	 * NULL so that vfs_read_dirent doesn't try to call realloc() on
1226	 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
1227	 * pointer to its original location before returning to the caller.
1228	 */
1229	if (ap->a_ncookies != NULL) {
1230		tmp_ncookies = ap->a_ncookies;
1231		*ap->a_ncookies = 0;
1232		ap->a_ncookies = NULL;
1233	}
1234
1235	dmp = VFSTODEVFS(ap->a_vp->v_mount);
1236	if (devfs_populate_vp(ap->a_vp) != 0) {
1237		if (tmp_ncookies != NULL)
1238			ap->a_ncookies = tmp_ncookies;
1239		return (EIO);
1240	}
1241	error = 0;
1242	de = ap->a_vp->v_data;
1243	off = 0;
1244	TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
1245		KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
1246		if (dd->de_flags & (DE_COVERED | DE_WHITEOUT))
1247			continue;
1248		if (devfs_prison_check(dd, uio->uio_td))
1249			continue;
1250		if (dd->de_dirent->d_type == DT_DIR)
1251			de = dd->de_dir;
1252		else
1253			de = dd;
1254		dp = dd->de_dirent;
1255		if (dp->d_reclen > uio->uio_resid)
1256			break;
1257		dp->d_fileno = de->de_inode;
1258		if (off >= uio->uio_offset) {
1259			error = vfs_read_dirent(ap, dp, off);
1260			if (error)
1261				break;
1262		}
1263		off += dp->d_reclen;
1264	}
1265	sx_xunlock(&dmp->dm_lock);
1266	uio->uio_offset = off;
1267
1268	/*
1269	 * Restore ap->a_ncookies if it wasn't originally NULL in the first
1270	 * place.
1271	 */
1272	if (tmp_ncookies != NULL)
1273		ap->a_ncookies = tmp_ncookies;
1274
1275	return (error);
1276}
1277
1278static int
1279devfs_readlink(struct vop_readlink_args *ap)
1280{
1281	struct devfs_dirent *de;
1282
1283	de = ap->a_vp->v_data;
1284	return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
1285}
1286
1287static int
1288devfs_reclaim(struct vop_reclaim_args *ap)
1289{
1290	struct vnode *vp = ap->a_vp;
1291	struct devfs_dirent *de;
1292	struct cdev *dev;
1293
1294	mtx_lock(&devfs_de_interlock);
1295	de = vp->v_data;
1296	if (de != NULL) {
1297		de->de_vnode = NULL;
1298		vp->v_data = NULL;
1299	}
1300	mtx_unlock(&devfs_de_interlock);
1301
1302	vnode_destroy_vobject(vp);
1303
1304	VI_LOCK(vp);
1305	dev_lock();
1306	dev = vp->v_rdev;
1307	vp->v_rdev = NULL;
1308
1309	if (dev == NULL) {
1310		dev_unlock();
1311		VI_UNLOCK(vp);
1312		return (0);
1313	}
1314
1315	dev->si_usecount -= vp->v_usecount;
1316	dev_unlock();
1317	VI_UNLOCK(vp);
1318	dev_rel(dev);
1319	return (0);
1320}
1321
1322static int
1323devfs_remove(struct vop_remove_args *ap)
1324{
1325	struct vnode *dvp = ap->a_dvp;
1326	struct vnode *vp = ap->a_vp;
1327	struct devfs_dirent *dd;
1328	struct devfs_dirent *de, *de_covered;
1329	struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
1330
1331	ASSERT_VOP_ELOCKED(dvp, "devfs_remove");
1332	ASSERT_VOP_ELOCKED(vp, "devfs_remove");
1333
1334	sx_xlock(&dmp->dm_lock);
1335	dd = ap->a_dvp->v_data;
1336	de = vp->v_data;
1337	if (de->de_cdp == NULL) {
1338		TAILQ_REMOVE(&dd->de_dlist, de, de_list);
1339		if (de->de_dirent->d_type == DT_LNK) {
1340			de_covered = devfs_find(dd, de->de_dirent->d_name,
1341			    de->de_dirent->d_namlen, 0);
1342			if (de_covered != NULL)
1343				de_covered->de_flags &= ~DE_COVERED;
1344		}
1345		/* We need to unlock dvp because devfs_delete() may lock it. */
1346		VOP_UNLOCK(vp, 0);
1347		if (dvp != vp)
1348			VOP_UNLOCK(dvp, 0);
1349		devfs_delete(dmp, de, 0);
1350		sx_xunlock(&dmp->dm_lock);
1351		if (dvp != vp)
1352			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1353		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1354	} else {
1355		de->de_flags |= DE_WHITEOUT;
1356		sx_xunlock(&dmp->dm_lock);
1357	}
1358	return (0);
1359}
1360
1361/*
1362 * Revoke is called on a tty when a terminal session ends.  The vnode
1363 * is orphaned by setting v_op to deadfs so we need to let go of it
1364 * as well so that we create a new one next time around.
1365 *
1366 */
1367static int
1368devfs_revoke(struct vop_revoke_args *ap)
1369{
1370	struct vnode *vp = ap->a_vp, *vp2;
1371	struct cdev *dev;
1372	struct cdev_priv *cdp;
1373	struct devfs_dirent *de;
1374	int i;
1375
1376	KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
1377
1378	dev = vp->v_rdev;
1379	cdp = cdev2priv(dev);
1380
1381	dev_lock();
1382	cdp->cdp_inuse++;
1383	dev_unlock();
1384
1385	vhold(vp);
1386	vgone(vp);
1387	vdrop(vp);
1388
1389	VOP_UNLOCK(vp,0);
1390 loop:
1391	for (;;) {
1392		mtx_lock(&devfs_de_interlock);
1393		dev_lock();
1394		vp2 = NULL;
1395		for (i = 0; i <= cdp->cdp_maxdirent; i++) {
1396			de = cdp->cdp_dirents[i];
1397			if (de == NULL)
1398				continue;
1399
1400			vp2 = de->de_vnode;
1401			if (vp2 != NULL) {
1402				dev_unlock();
1403				VI_LOCK(vp2);
1404				mtx_unlock(&devfs_de_interlock);
1405				if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
1406				    curthread))
1407					goto loop;
1408				vhold(vp2);
1409				vgone(vp2);
1410				vdrop(vp2);
1411				vput(vp2);
1412				break;
1413			}
1414		}
1415		if (vp2 != NULL) {
1416			continue;
1417		}
1418		dev_unlock();
1419		mtx_unlock(&devfs_de_interlock);
1420		break;
1421	}
1422	dev_lock();
1423	cdp->cdp_inuse--;
1424	if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
1425		TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
1426		dev_unlock();
1427		dev_rel(&cdp->cdp_c);
1428	} else
1429		dev_unlock();
1430
1431	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1432	return (0);
1433}
1434
1435static int
1436devfs_rioctl(struct vop_ioctl_args *ap)
1437{
1438	struct vnode *vp;
1439	struct devfs_mount *dmp;
1440	int error;
1441
1442	vp = ap->a_vp;
1443	vn_lock(vp, LK_SHARED | LK_RETRY);
1444	if (vp->v_iflag & VI_DOOMED) {
1445		VOP_UNLOCK(vp, 0);
1446		return (EBADF);
1447	}
1448	dmp = VFSTODEVFS(vp->v_mount);
1449	sx_xlock(&dmp->dm_lock);
1450	VOP_UNLOCK(vp, 0);
1451	DEVFS_DMP_HOLD(dmp);
1452	devfs_populate(dmp);
1453	if (DEVFS_DMP_DROP(dmp)) {
1454		sx_xunlock(&dmp->dm_lock);
1455		devfs_unmount_final(dmp);
1456		return (ENOENT);
1457	}
1458	error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
1459	sx_xunlock(&dmp->dm_lock);
1460	return (error);
1461}
1462
1463static int
1464devfs_rread(struct vop_read_args *ap)
1465{
1466
1467	if (ap->a_vp->v_type != VDIR)
1468		return (EINVAL);
1469	return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
1470}
1471
1472static int
1473devfs_setattr(struct vop_setattr_args *ap)
1474{
1475	struct devfs_dirent *de;
1476	struct vattr *vap;
1477	struct vnode *vp;
1478	struct thread *td;
1479	int c, error;
1480	uid_t uid;
1481	gid_t gid;
1482
1483	vap = ap->a_vap;
1484	vp = ap->a_vp;
1485	td = curthread;
1486	if ((vap->va_type != VNON) ||
1487	    (vap->va_nlink != VNOVAL) ||
1488	    (vap->va_fsid != VNOVAL) ||
1489	    (vap->va_fileid != VNOVAL) ||
1490	    (vap->va_blocksize != VNOVAL) ||
1491	    (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1492	    (vap->va_rdev != VNOVAL) ||
1493	    ((int)vap->va_bytes != VNOVAL) ||
1494	    (vap->va_gen != VNOVAL)) {
1495		return (EINVAL);
1496	}
1497
1498	de = vp->v_data;
1499	if (vp->v_type == VDIR)
1500		de = de->de_dir;
1501
1502	error = c = 0;
1503	if (vap->va_uid == (uid_t)VNOVAL)
1504		uid = de->de_uid;
1505	else
1506		uid = vap->va_uid;
1507	if (vap->va_gid == (gid_t)VNOVAL)
1508		gid = de->de_gid;
1509	else
1510		gid = vap->va_gid;
1511	if (uid != de->de_uid || gid != de->de_gid) {
1512		if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
1513		    (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
1514			error = priv_check(td, PRIV_VFS_CHOWN);
1515			if (error)
1516				return (error);
1517		}
1518		de->de_uid = uid;
1519		de->de_gid = gid;
1520		c = 1;
1521	}
1522
1523	if (vap->va_mode != (mode_t)VNOVAL) {
1524		if (ap->a_cred->cr_uid != de->de_uid) {
1525			error = priv_check(td, PRIV_VFS_ADMIN);
1526			if (error)
1527				return (error);
1528		}
1529		de->de_mode = vap->va_mode;
1530		c = 1;
1531	}
1532
1533	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1534		/* See the comment in ufs_vnops::ufs_setattr(). */
1535		if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) &&
1536		    ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1537		    (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td))))
1538			return (error);
1539		if (vap->va_atime.tv_sec != VNOVAL) {
1540			if (vp->v_type == VCHR)
1541				vp->v_rdev->si_atime = vap->va_atime;
1542			else
1543				de->de_atime = vap->va_atime;
1544		}
1545		if (vap->va_mtime.tv_sec != VNOVAL) {
1546			if (vp->v_type == VCHR)
1547				vp->v_rdev->si_mtime = vap->va_mtime;
1548			else
1549				de->de_mtime = vap->va_mtime;
1550		}
1551		c = 1;
1552	}
1553
1554	if (c) {
1555		if (vp->v_type == VCHR)
1556			vfs_timestamp(&vp->v_rdev->si_ctime);
1557		else
1558			vfs_timestamp(&de->de_mtime);
1559	}
1560	return (0);
1561}
1562
1563#ifdef MAC
1564static int
1565devfs_setlabel(struct vop_setlabel_args *ap)
1566{
1567	struct vnode *vp;
1568	struct devfs_dirent *de;
1569
1570	vp = ap->a_vp;
1571	de = vp->v_data;
1572
1573	mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
1574	mac_devfs_update(vp->v_mount, de, vp);
1575
1576	return (0);
1577}
1578#endif
1579
1580static int
1581devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
1582{
1583
1584	return (vnops.fo_stat(fp, sb, cred, td));
1585}
1586
1587static int
1588devfs_symlink(struct vop_symlink_args *ap)
1589{
1590	int i, error;
1591	struct devfs_dirent *dd;
1592	struct devfs_dirent *de, *de_covered, *de_dotdot;
1593	struct devfs_mount *dmp;
1594
1595	error = priv_check(curthread, PRIV_DEVFS_SYMLINK);
1596	if (error)
1597		return(error);
1598	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1599	if (devfs_populate_vp(ap->a_dvp) != 0)
1600		return (ENOENT);
1601
1602	dd = ap->a_dvp->v_data;
1603	de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
1604	de->de_flags = DE_USER;
1605	de->de_uid = 0;
1606	de->de_gid = 0;
1607	de->de_mode = 0755;
1608	de->de_inode = alloc_unr(devfs_inos);
1609	de->de_dir = dd;
1610	de->de_dirent->d_type = DT_LNK;
1611	i = strlen(ap->a_target) + 1;
1612	de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
1613	bcopy(ap->a_target, de->de_symlink, i);
1614#ifdef MAC
1615	mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
1616#endif
1617	de_covered = devfs_find(dd, de->de_dirent->d_name,
1618	    de->de_dirent->d_namlen, 0);
1619	if (de_covered != NULL) {
1620		if ((de_covered->de_flags & DE_USER) != 0) {
1621			devfs_delete(dmp, de, DEVFS_DEL_NORECURSE);
1622			sx_xunlock(&dmp->dm_lock);
1623			return (EEXIST);
1624		}
1625		KASSERT((de_covered->de_flags & DE_COVERED) == 0,
1626		    ("devfs_symlink: entry %p already covered", de_covered));
1627		de_covered->de_flags |= DE_COVERED;
1628	}
1629
1630	de_dotdot = TAILQ_FIRST(&dd->de_dlist);		/* "." */
1631	de_dotdot = TAILQ_NEXT(de_dotdot, de_list);	/* ".." */
1632	TAILQ_INSERT_AFTER(&dd->de_dlist, de_dotdot, de, de_list);
1633	devfs_dir_ref_de(dmp, dd);
1634	devfs_rules_apply(dmp, de);
1635
1636	return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp));
1637}
1638
1639static int
1640devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
1641{
1642
1643	return (vnops.fo_truncate(fp, length, cred, td));
1644}
1645
1646static int
1647devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred,
1648    int flags, struct thread *td)
1649{
1650	struct cdev *dev;
1651	int error, ioflag, ref;
1652	ssize_t resid;
1653	struct cdevsw *dsw;
1654	struct file *fpop;
1655
1656	fpop = td->td_fpop;
1657	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1658	if (error)
1659		return (error);
1660	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
1661	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
1662	if (ioflag & O_DIRECT)
1663		ioflag |= IO_DIRECT;
1664	foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
1665
1666	resid = uio->uio_resid;
1667
1668	error = dsw->d_write(dev, uio, ioflag);
1669	if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
1670		vfs_timestamp(&dev->si_ctime);
1671		dev->si_mtime = dev->si_ctime;
1672	}
1673	td->td_fpop = fpop;
1674	dev_relthread(dev, ref);
1675
1676	foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
1677	return (error);
1678}
1679
1680dev_t
1681dev2udev(struct cdev *x)
1682{
1683	if (x == NULL)
1684		return (NODEV);
1685	return (cdev2priv(x)->cdp_inode);
1686}
1687
1688static struct fileops devfs_ops_f = {
1689	.fo_read =	devfs_read_f,
1690	.fo_write =	devfs_write_f,
1691	.fo_truncate =	devfs_truncate_f,
1692	.fo_ioctl =	devfs_ioctl_f,
1693	.fo_poll =	devfs_poll_f,
1694	.fo_kqfilter =	devfs_kqfilter_f,
1695	.fo_stat =	devfs_stat_f,
1696	.fo_close =	devfs_close_f,
1697	.fo_chmod =	vn_chmod,
1698	.fo_chown =	vn_chown,
1699	.fo_sendfile =	vn_sendfile,
1700	.fo_seek =	vn_seek,
1701	.fo_flags =	DFLAG_PASSABLE | DFLAG_SEEKABLE
1702};
1703
1704static struct vop_vector devfs_vnodeops = {
1705	.vop_default =		&default_vnodeops,
1706
1707	.vop_access =		devfs_access,
1708	.vop_getattr =		devfs_getattr,
1709	.vop_ioctl =		devfs_rioctl,
1710	.vop_lookup =		devfs_lookup,
1711	.vop_mknod =		devfs_mknod,
1712	.vop_pathconf =		devfs_pathconf,
1713	.vop_read =		devfs_rread,
1714	.vop_readdir =		devfs_readdir,
1715	.vop_readlink =		devfs_readlink,
1716	.vop_reclaim =		devfs_reclaim,
1717	.vop_remove =		devfs_remove,
1718	.vop_revoke =		devfs_revoke,
1719	.vop_setattr =		devfs_setattr,
1720#ifdef MAC
1721	.vop_setlabel =		devfs_setlabel,
1722#endif
1723	.vop_symlink =		devfs_symlink,
1724	.vop_vptocnp =		devfs_vptocnp,
1725};
1726
1727static struct vop_vector devfs_specops = {
1728	.vop_default =		&default_vnodeops,
1729
1730	.vop_access =		devfs_access,
1731	.vop_bmap =		VOP_PANIC,
1732	.vop_close =		devfs_close,
1733	.vop_create =		VOP_PANIC,
1734	.vop_fsync =		devfs_fsync,
1735	.vop_getattr =		devfs_getattr,
1736	.vop_link =		VOP_PANIC,
1737	.vop_mkdir =		VOP_PANIC,
1738	.vop_mknod =		VOP_PANIC,
1739	.vop_open =		devfs_open,
1740	.vop_pathconf =		devfs_pathconf,
1741	.vop_print =		devfs_print,
1742	.vop_read =		VOP_PANIC,
1743	.vop_readdir =		VOP_PANIC,
1744	.vop_readlink =		VOP_PANIC,
1745	.vop_reallocblks =	VOP_PANIC,
1746	.vop_reclaim =		devfs_reclaim,
1747	.vop_remove =		devfs_remove,
1748	.vop_rename =		VOP_PANIC,
1749	.vop_revoke =		devfs_revoke,
1750	.vop_rmdir =		VOP_PANIC,
1751	.vop_setattr =		devfs_setattr,
1752#ifdef MAC
1753	.vop_setlabel =		devfs_setlabel,
1754#endif
1755	.vop_strategy =		VOP_PANIC,
1756	.vop_symlink =		VOP_PANIC,
1757	.vop_vptocnp =		devfs_vptocnp,
1758	.vop_write =		VOP_PANIC,
1759};
1760
1761/*
1762 * Our calling convention to the device drivers used to be that we passed
1763 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_
1764 * flags instead since that's what open(), close() and ioctl() takes and
1765 * we don't really want vnode.h in device drivers.
1766 * We solved the source compatibility by redefining some vnode flags to
1767 * be the same as the fcntl ones and by sending down the bitwise OR of
1768 * the respective fcntl/vnode flags.  These CTASSERTS make sure nobody
1769 * pulls the rug out under this.
1770 */
1771CTASSERT(O_NONBLOCK == IO_NDELAY);
1772CTASSERT(O_FSYNC == IO_SYNC);
1773