devfs_vnops.c revision 279685
1/*-
2 * Copyright (c) 2000-2004
3 *	Poul-Henning Kamp.  All rights reserved.
4 * Copyright (c) 1989, 1992-1993, 1995
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software donated to Berkeley by
8 * Jan-Simon Pendry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	@(#)kernfs_vnops.c	8.15 (Berkeley) 5/21/95
32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
33 *
34 * $FreeBSD: stable/10/sys/fs/devfs/devfs_vnops.c 279685 2015-03-06 09:22:05Z kib $
35 */
36
37/*
38 * TODO:
39 *	mkdir: want it ?
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/conf.h>
45#include <sys/dirent.h>
46#include <sys/fcntl.h>
47#include <sys/file.h>
48#include <sys/filedesc.h>
49#include <sys/filio.h>
50#include <sys/jail.h>
51#include <sys/kernel.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mount.h>
55#include <sys/namei.h>
56#include <sys/priv.h>
57#include <sys/proc.h>
58#include <sys/stat.h>
59#include <sys/sx.h>
60#include <sys/time.h>
61#include <sys/ttycom.h>
62#include <sys/unistd.h>
63#include <sys/vnode.h>
64
65static struct vop_vector devfs_vnodeops;
66static struct fileops devfs_ops_f;
67
68#include <fs/devfs/devfs.h>
69#include <fs/devfs/devfs_int.h>
70
71#include <security/mac/mac_framework.h>
72
73static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
74
75struct mtx	devfs_de_interlock;
76MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
77struct sx	clone_drain_lock;
78SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
79struct mtx	cdevpriv_mtx;
80MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
81
82static int
83devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp,
84    int *ref)
85{
86
87	*dswp = devvn_refthread(fp->f_vnode, devp, ref);
88	if (*devp != fp->f_data) {
89		if (*dswp != NULL)
90			dev_relthread(*devp, *ref);
91		return (ENXIO);
92	}
93	KASSERT((*devp)->si_refcount > 0,
94	    ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
95	if (*dswp == NULL)
96		return (ENXIO);
97	curthread->td_fpop = fp;
98	return (0);
99}
100
101int
102devfs_get_cdevpriv(void **datap)
103{
104	struct file *fp;
105	struct cdev_privdata *p;
106	int error;
107
108	fp = curthread->td_fpop;
109	if (fp == NULL)
110		return (EBADF);
111	p = fp->f_cdevpriv;
112	if (p != NULL) {
113		error = 0;
114		*datap = p->cdpd_data;
115	} else
116		error = ENOENT;
117	return (error);
118}
119
120int
121devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr)
122{
123	struct file *fp;
124	struct cdev_priv *cdp;
125	struct cdev_privdata *p;
126	int error;
127
128	fp = curthread->td_fpop;
129	if (fp == NULL)
130		return (ENOENT);
131	cdp = cdev2priv((struct cdev *)fp->f_data);
132	p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
133	p->cdpd_data = priv;
134	p->cdpd_dtr = priv_dtr;
135	p->cdpd_fp = fp;
136	mtx_lock(&cdevpriv_mtx);
137	if (fp->f_cdevpriv == NULL) {
138		LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
139		fp->f_cdevpriv = p;
140		mtx_unlock(&cdevpriv_mtx);
141		error = 0;
142	} else {
143		mtx_unlock(&cdevpriv_mtx);
144		free(p, M_CDEVPDATA);
145		error = EBUSY;
146	}
147	return (error);
148}
149
150void
151devfs_destroy_cdevpriv(struct cdev_privdata *p)
152{
153
154	mtx_assert(&cdevpriv_mtx, MA_OWNED);
155	p->cdpd_fp->f_cdevpriv = NULL;
156	LIST_REMOVE(p, cdpd_list);
157	mtx_unlock(&cdevpriv_mtx);
158	(p->cdpd_dtr)(p->cdpd_data);
159	free(p, M_CDEVPDATA);
160}
161
162void
163devfs_fpdrop(struct file *fp)
164{
165	struct cdev_privdata *p;
166
167	mtx_lock(&cdevpriv_mtx);
168	if ((p = fp->f_cdevpriv) == NULL) {
169		mtx_unlock(&cdevpriv_mtx);
170		return;
171	}
172	devfs_destroy_cdevpriv(p);
173}
174
175void
176devfs_clear_cdevpriv(void)
177{
178	struct file *fp;
179
180	fp = curthread->td_fpop;
181	if (fp == NULL)
182		return;
183	devfs_fpdrop(fp);
184}
185
186/*
187 * On success devfs_populate_vp() returns with dmp->dm_lock held.
188 */
189static int
190devfs_populate_vp(struct vnode *vp)
191{
192	struct devfs_dirent *de;
193	struct devfs_mount *dmp;
194	int locked;
195
196	ASSERT_VOP_LOCKED(vp, "devfs_populate_vp");
197
198	dmp = VFSTODEVFS(vp->v_mount);
199	locked = VOP_ISLOCKED(vp);
200
201	sx_xlock(&dmp->dm_lock);
202	DEVFS_DMP_HOLD(dmp);
203
204	/* Can't call devfs_populate() with the vnode lock held. */
205	VOP_UNLOCK(vp, 0);
206	devfs_populate(dmp);
207
208	sx_xunlock(&dmp->dm_lock);
209	vn_lock(vp, locked | LK_RETRY);
210	sx_xlock(&dmp->dm_lock);
211	if (DEVFS_DMP_DROP(dmp)) {
212		sx_xunlock(&dmp->dm_lock);
213		devfs_unmount_final(dmp);
214		return (EBADF);
215	}
216	if ((vp->v_iflag & VI_DOOMED) != 0) {
217		sx_xunlock(&dmp->dm_lock);
218		return (EBADF);
219	}
220	de = vp->v_data;
221	KASSERT(de != NULL,
222	    ("devfs_populate_vp: vp->v_data == NULL but vnode not doomed"));
223	if ((de->de_flags & DE_DOOMED) != 0) {
224		sx_xunlock(&dmp->dm_lock);
225		return (EBADF);
226	}
227
228	return (0);
229}
230
231static int
232devfs_vptocnp(struct vop_vptocnp_args *ap)
233{
234	struct vnode *vp = ap->a_vp;
235	struct vnode **dvp = ap->a_vpp;
236	struct devfs_mount *dmp;
237	char *buf = ap->a_buf;
238	int *buflen = ap->a_buflen;
239	struct devfs_dirent *dd, *de;
240	int i, error;
241
242	dmp = VFSTODEVFS(vp->v_mount);
243
244	error = devfs_populate_vp(vp);
245	if (error != 0)
246		return (error);
247
248	i = *buflen;
249	dd = vp->v_data;
250
251	if (vp->v_type == VCHR) {
252		i -= strlen(dd->de_cdp->cdp_c.si_name);
253		if (i < 0) {
254			error = ENOMEM;
255			goto finished;
256		}
257		bcopy(dd->de_cdp->cdp_c.si_name, buf + i,
258		    strlen(dd->de_cdp->cdp_c.si_name));
259		de = dd->de_dir;
260	} else if (vp->v_type == VDIR) {
261		if (dd == dmp->dm_rootdir) {
262			*dvp = vp;
263			vref(*dvp);
264			goto finished;
265		}
266		i -= dd->de_dirent->d_namlen;
267		if (i < 0) {
268			error = ENOMEM;
269			goto finished;
270		}
271		bcopy(dd->de_dirent->d_name, buf + i,
272		    dd->de_dirent->d_namlen);
273		de = dd;
274	} else {
275		error = ENOENT;
276		goto finished;
277	}
278	*buflen = i;
279	de = devfs_parent_dirent(de);
280	if (de == NULL) {
281		error = ENOENT;
282		goto finished;
283	}
284	mtx_lock(&devfs_de_interlock);
285	*dvp = de->de_vnode;
286	if (*dvp != NULL) {
287		VI_LOCK(*dvp);
288		mtx_unlock(&devfs_de_interlock);
289		vholdl(*dvp);
290		VI_UNLOCK(*dvp);
291		vref(*dvp);
292		vdrop(*dvp);
293	} else {
294		mtx_unlock(&devfs_de_interlock);
295		error = ENOENT;
296	}
297finished:
298	sx_xunlock(&dmp->dm_lock);
299	return (error);
300}
301
302/*
303 * Construct the fully qualified path name relative to the mountpoint.
304 * If a NULL cnp is provided, no '/' is appended to the resulting path.
305 */
306char *
307devfs_fqpn(char *buf, struct devfs_mount *dmp, struct devfs_dirent *dd,
308    struct componentname *cnp)
309{
310	int i;
311	struct devfs_dirent *de;
312
313	sx_assert(&dmp->dm_lock, SA_LOCKED);
314
315	i = SPECNAMELEN;
316	buf[i] = '\0';
317	if (cnp != NULL)
318		i -= cnp->cn_namelen;
319	if (i < 0)
320		 return (NULL);
321	if (cnp != NULL)
322		bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
323	de = dd;
324	while (de != dmp->dm_rootdir) {
325		if (cnp != NULL || i < SPECNAMELEN) {
326			i--;
327			if (i < 0)
328				 return (NULL);
329			buf[i] = '/';
330		}
331		i -= de->de_dirent->d_namlen;
332		if (i < 0)
333			 return (NULL);
334		bcopy(de->de_dirent->d_name, buf + i,
335		    de->de_dirent->d_namlen);
336		de = devfs_parent_dirent(de);
337		if (de == NULL)
338			return (NULL);
339	}
340	return (buf + i);
341}
342
343static int
344devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
345	struct devfs_dirent *de)
346{
347	int not_found;
348
349	not_found = 0;
350	if (de->de_flags & DE_DOOMED)
351		not_found = 1;
352	if (DEVFS_DE_DROP(de)) {
353		KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
354		devfs_dirent_free(de);
355	}
356	if (DEVFS_DMP_DROP(dmp)) {
357		KASSERT(not_found == 1,
358			("DEVFS mount struct freed before dirent"));
359		not_found = 2;
360		sx_xunlock(&dmp->dm_lock);
361		devfs_unmount_final(dmp);
362	}
363	if (not_found == 1 || (drop_dm_lock && not_found != 2))
364		sx_unlock(&dmp->dm_lock);
365	return (not_found);
366}
367
368static void
369devfs_insmntque_dtr(struct vnode *vp, void *arg)
370{
371	struct devfs_dirent *de;
372
373	de = (struct devfs_dirent *)arg;
374	mtx_lock(&devfs_de_interlock);
375	vp->v_data = NULL;
376	de->de_vnode = NULL;
377	mtx_unlock(&devfs_de_interlock);
378	vgone(vp);
379	vput(vp);
380}
381
382/*
383 * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
384 * it on return.
385 */
386int
387devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
388    struct vnode **vpp)
389{
390	int error;
391	struct vnode *vp;
392	struct cdev *dev;
393	struct devfs_mount *dmp;
394	struct cdevsw *dsw;
395
396	dmp = VFSTODEVFS(mp);
397	if (de->de_flags & DE_DOOMED) {
398		sx_xunlock(&dmp->dm_lock);
399		return (ENOENT);
400	}
401loop:
402	DEVFS_DE_HOLD(de);
403	DEVFS_DMP_HOLD(dmp);
404	mtx_lock(&devfs_de_interlock);
405	vp = de->de_vnode;
406	if (vp != NULL) {
407		VI_LOCK(vp);
408		mtx_unlock(&devfs_de_interlock);
409		sx_xunlock(&dmp->dm_lock);
410		vget(vp, lockmode | LK_INTERLOCK | LK_RETRY, curthread);
411		sx_xlock(&dmp->dm_lock);
412		if (devfs_allocv_drop_refs(0, dmp, de)) {
413			vput(vp);
414			return (ENOENT);
415		}
416		else if ((vp->v_iflag & VI_DOOMED) != 0) {
417			mtx_lock(&devfs_de_interlock);
418			if (de->de_vnode == vp) {
419				de->de_vnode = NULL;
420				vp->v_data = NULL;
421			}
422			mtx_unlock(&devfs_de_interlock);
423			vput(vp);
424			goto loop;
425		}
426		sx_xunlock(&dmp->dm_lock);
427		*vpp = vp;
428		return (0);
429	}
430	mtx_unlock(&devfs_de_interlock);
431	if (de->de_dirent->d_type == DT_CHR) {
432		if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
433			devfs_allocv_drop_refs(1, dmp, de);
434			return (ENOENT);
435		}
436		dev = &de->de_cdp->cdp_c;
437	} else {
438		dev = NULL;
439	}
440	error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
441	if (error != 0) {
442		devfs_allocv_drop_refs(1, dmp, de);
443		printf("devfs_allocv: failed to allocate new vnode\n");
444		return (error);
445	}
446
447	if (de->de_dirent->d_type == DT_CHR) {
448		vp->v_type = VCHR;
449		VI_LOCK(vp);
450		dev_lock();
451		dev_refl(dev);
452		/* XXX: v_rdev should be protect by vnode lock */
453		vp->v_rdev = dev;
454		KASSERT(vp->v_usecount == 1,
455		    ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
456		dev->si_usecount += vp->v_usecount;
457		/* Special casing of ttys for deadfs.  Probably redundant. */
458		dsw = dev->si_devsw;
459		if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
460			vp->v_vflag |= VV_ISTTY;
461		dev_unlock();
462		VI_UNLOCK(vp);
463		if ((dev->si_flags & SI_ETERNAL) != 0)
464			vp->v_vflag |= VV_ETERNALDEV;
465		vp->v_op = &devfs_specops;
466	} else if (de->de_dirent->d_type == DT_DIR) {
467		vp->v_type = VDIR;
468	} else if (de->de_dirent->d_type == DT_LNK) {
469		vp->v_type = VLNK;
470	} else {
471		vp->v_type = VBAD;
472	}
473	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
474	VN_LOCK_ASHARE(vp);
475	mtx_lock(&devfs_de_interlock);
476	vp->v_data = de;
477	de->de_vnode = vp;
478	mtx_unlock(&devfs_de_interlock);
479	error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
480	if (error != 0) {
481		(void) devfs_allocv_drop_refs(1, dmp, de);
482		return (error);
483	}
484	if (devfs_allocv_drop_refs(0, dmp, de)) {
485		vput(vp);
486		return (ENOENT);
487	}
488#ifdef MAC
489	mac_devfs_vnode_associate(mp, de, vp);
490#endif
491	sx_xunlock(&dmp->dm_lock);
492	*vpp = vp;
493	return (0);
494}
495
496static int
497devfs_access(struct vop_access_args *ap)
498{
499	struct vnode *vp = ap->a_vp;
500	struct devfs_dirent *de;
501	int error;
502
503	de = vp->v_data;
504	if (vp->v_type == VDIR)
505		de = de->de_dir;
506
507	error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
508	    ap->a_accmode, ap->a_cred, NULL);
509	if (error == 0)
510		return (0);
511	if (error != EACCES)
512		return (error);
513	/* We do, however, allow access to the controlling terminal */
514	if (!(ap->a_td->td_proc->p_flag & P_CONTROLT))
515		return (error);
516	if (ap->a_td->td_proc->p_session->s_ttydp == de->de_cdp)
517		return (0);
518	return (error);
519}
520
521/* ARGSUSED */
522static int
523devfs_close(struct vop_close_args *ap)
524{
525	struct vnode *vp = ap->a_vp, *oldvp;
526	struct thread *td = ap->a_td;
527	struct cdev *dev = vp->v_rdev;
528	struct cdevsw *dsw;
529	int vp_locked, error, ref;
530
531	/*
532	 * XXX: Don't call d_close() if we were called because of
533	 * XXX: insmntque1() failure.
534	 */
535	if (vp->v_data == NULL)
536		return (0);
537
538	/*
539	 * Hack: a tty device that is a controlling terminal
540	 * has a reference from the session structure.
541	 * We cannot easily tell that a character device is
542	 * a controlling terminal, unless it is the closing
543	 * process' controlling terminal.  In that case,
544	 * if the reference count is 2 (this last descriptor
545	 * plus the session), release the reference from the session.
546	 */
547	oldvp = NULL;
548	sx_xlock(&proctree_lock);
549	if (td && vp == td->td_proc->p_session->s_ttyvp) {
550		SESS_LOCK(td->td_proc->p_session);
551		VI_LOCK(vp);
552		if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) {
553			td->td_proc->p_session->s_ttyvp = NULL;
554			td->td_proc->p_session->s_ttydp = NULL;
555			oldvp = vp;
556		}
557		VI_UNLOCK(vp);
558		SESS_UNLOCK(td->td_proc->p_session);
559	}
560	sx_xunlock(&proctree_lock);
561	if (oldvp != NULL)
562		vrele(oldvp);
563	/*
564	 * We do not want to really close the device if it
565	 * is still in use unless we are trying to close it
566	 * forcibly. Since every use (buffer, vnode, swap, cmap)
567	 * holds a reference to the vnode, and because we mark
568	 * any other vnodes that alias this device, when the
569	 * sum of the reference counts on all the aliased
570	 * vnodes descends to one, we are on last close.
571	 */
572	dsw = dev_refthread(dev, &ref);
573	if (dsw == NULL)
574		return (ENXIO);
575	VI_LOCK(vp);
576	if (vp->v_iflag & VI_DOOMED) {
577		/* Forced close. */
578	} else if (dsw->d_flags & D_TRACKCLOSE) {
579		/* Keep device updated on status. */
580	} else if (count_dev(dev) > 1) {
581		VI_UNLOCK(vp);
582		dev_relthread(dev, ref);
583		return (0);
584	}
585	vholdl(vp);
586	VI_UNLOCK(vp);
587	vp_locked = VOP_ISLOCKED(vp);
588	VOP_UNLOCK(vp, 0);
589	KASSERT(dev->si_refcount > 0,
590	    ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
591	error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
592	dev_relthread(dev, ref);
593	vn_lock(vp, vp_locked | LK_RETRY);
594	vdrop(vp);
595	return (error);
596}
597
598static int
599devfs_close_f(struct file *fp, struct thread *td)
600{
601	int error;
602	struct file *fpop;
603
604	/*
605	 * NB: td may be NULL if this descriptor is closed due to
606	 * garbage collection from a closed UNIX domain socket.
607	 */
608	fpop = curthread->td_fpop;
609	curthread->td_fpop = fp;
610	error = vnops.fo_close(fp, td);
611	curthread->td_fpop = fpop;
612
613	/*
614	 * The f_cdevpriv cannot be assigned non-NULL value while we
615	 * are destroying the file.
616	 */
617	if (fp->f_cdevpriv != NULL)
618		devfs_fpdrop(fp);
619	return (error);
620}
621
622static int
623devfs_fsync(struct vop_fsync_args *ap)
624{
625	int error;
626	struct bufobj *bo;
627	struct devfs_dirent *de;
628
629	if (!vn_isdisk(ap->a_vp, &error)) {
630		bo = &ap->a_vp->v_bufobj;
631		de = ap->a_vp->v_data;
632		if (error == ENXIO && bo->bo_dirty.bv_cnt > 0) {
633			printf("Device %s went missing before all of the data "
634			    "could be written to it; expect data loss.\n",
635			    de->de_dirent->d_name);
636
637			error = vop_stdfsync(ap);
638			if (bo->bo_dirty.bv_cnt != 0 || error != 0)
639				panic("devfs_fsync: vop_stdfsync failed.");
640		}
641
642		return (0);
643	}
644
645	return (vop_stdfsync(ap));
646}
647
648static int
649devfs_getattr(struct vop_getattr_args *ap)
650{
651	struct vnode *vp = ap->a_vp;
652	struct vattr *vap = ap->a_vap;
653	int error;
654	struct devfs_dirent *de;
655	struct devfs_mount *dmp;
656	struct cdev *dev;
657
658	error = devfs_populate_vp(vp);
659	if (error != 0)
660		return (error);
661
662	dmp = VFSTODEVFS(vp->v_mount);
663	sx_xunlock(&dmp->dm_lock);
664
665	de = vp->v_data;
666	KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
667	if (vp->v_type == VDIR) {
668		de = de->de_dir;
669		KASSERT(de != NULL,
670		    ("Null dir dirent in devfs_getattr vp=%p", vp));
671	}
672	vap->va_uid = de->de_uid;
673	vap->va_gid = de->de_gid;
674	vap->va_mode = de->de_mode;
675	if (vp->v_type == VLNK)
676		vap->va_size = strlen(de->de_symlink);
677	else if (vp->v_type == VDIR)
678		vap->va_size = vap->va_bytes = DEV_BSIZE;
679	else
680		vap->va_size = 0;
681	if (vp->v_type != VDIR)
682		vap->va_bytes = 0;
683	vap->va_blocksize = DEV_BSIZE;
684	vap->va_type = vp->v_type;
685
686#define fix(aa)							\
687	do {							\
688		if ((aa).tv_sec <= 3600) {			\
689			(aa).tv_sec = boottime.tv_sec;		\
690			(aa).tv_nsec = boottime.tv_usec * 1000; \
691		}						\
692	} while (0)
693
694	if (vp->v_type != VCHR)  {
695		fix(de->de_atime);
696		vap->va_atime = de->de_atime;
697		fix(de->de_mtime);
698		vap->va_mtime = de->de_mtime;
699		fix(de->de_ctime);
700		vap->va_ctime = de->de_ctime;
701	} else {
702		dev = vp->v_rdev;
703		fix(dev->si_atime);
704		vap->va_atime = dev->si_atime;
705		fix(dev->si_mtime);
706		vap->va_mtime = dev->si_mtime;
707		fix(dev->si_ctime);
708		vap->va_ctime = dev->si_ctime;
709
710		vap->va_rdev = cdev2priv(dev)->cdp_inode;
711	}
712	vap->va_gen = 0;
713	vap->va_flags = 0;
714	vap->va_filerev = 0;
715	vap->va_nlink = de->de_links;
716	vap->va_fileid = de->de_inode;
717
718	return (error);
719}
720
721/* ARGSUSED */
722static int
723devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
724{
725	struct cdev *dev;
726	struct cdevsw *dsw;
727	struct vnode *vp;
728	struct vnode *vpold;
729	int error, i, ref;
730	const char *p;
731	struct fiodgname_arg *fgn;
732	struct file *fpop;
733
734	fpop = td->td_fpop;
735	error = devfs_fp_check(fp, &dev, &dsw, &ref);
736	if (error != 0) {
737		error = vnops.fo_ioctl(fp, com, data, cred, td);
738		return (error);
739	}
740
741	if (com == FIODTYPE) {
742		*(int *)data = dsw->d_flags & D_TYPEMASK;
743		td->td_fpop = fpop;
744		dev_relthread(dev, ref);
745		return (0);
746	} else if (com == FIODGNAME) {
747		fgn = data;
748		p = devtoname(dev);
749		i = strlen(p) + 1;
750		if (i > fgn->len)
751			error = EINVAL;
752		else
753			error = copyout(p, fgn->buf, i);
754		td->td_fpop = fpop;
755		dev_relthread(dev, ref);
756		return (error);
757	}
758	error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
759	td->td_fpop = NULL;
760	dev_relthread(dev, ref);
761	if (error == ENOIOCTL)
762		error = ENOTTY;
763	if (error == 0 && com == TIOCSCTTY) {
764		vp = fp->f_vnode;
765
766		/* Do nothing if reassigning same control tty */
767		sx_slock(&proctree_lock);
768		if (td->td_proc->p_session->s_ttyvp == vp) {
769			sx_sunlock(&proctree_lock);
770			return (0);
771		}
772
773		vpold = td->td_proc->p_session->s_ttyvp;
774		VREF(vp);
775		SESS_LOCK(td->td_proc->p_session);
776		td->td_proc->p_session->s_ttyvp = vp;
777		td->td_proc->p_session->s_ttydp = cdev2priv(dev);
778		SESS_UNLOCK(td->td_proc->p_session);
779
780		sx_sunlock(&proctree_lock);
781
782		/* Get rid of reference to old control tty */
783		if (vpold)
784			vrele(vpold);
785	}
786	return (error);
787}
788
789/* ARGSUSED */
790static int
791devfs_kqfilter_f(struct file *fp, struct knote *kn)
792{
793	struct cdev *dev;
794	struct cdevsw *dsw;
795	int error, ref;
796	struct file *fpop;
797	struct thread *td;
798
799	td = curthread;
800	fpop = td->td_fpop;
801	error = devfs_fp_check(fp, &dev, &dsw, &ref);
802	if (error)
803		return (error);
804	error = dsw->d_kqfilter(dev, kn);
805	td->td_fpop = fpop;
806	dev_relthread(dev, ref);
807	return (error);
808}
809
810static inline int
811devfs_prison_check(struct devfs_dirent *de, struct thread *td)
812{
813	struct cdev_priv *cdp;
814	struct ucred *dcr;
815	int error;
816
817	cdp = de->de_cdp;
818	if (cdp == NULL)
819		return (0);
820	dcr = cdp->cdp_c.si_cred;
821	if (dcr == NULL)
822		return (0);
823
824	error = prison_check(td->td_ucred, dcr);
825	if (error == 0)
826		return (0);
827	/* We do, however, allow access to the controlling terminal */
828	if (!(td->td_proc->p_flag & P_CONTROLT))
829		return (error);
830	if (td->td_proc->p_session->s_ttydp == cdp)
831		return (0);
832	return (error);
833}
834
835static int
836devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
837{
838	struct componentname *cnp;
839	struct vnode *dvp, **vpp;
840	struct thread *td;
841	struct devfs_dirent *de, *dd;
842	struct devfs_dirent **dde;
843	struct devfs_mount *dmp;
844	struct cdev *cdev;
845	int error, flags, nameiop, dvplocked;
846	char specname[SPECNAMELEN + 1], *pname;
847
848	cnp = ap->a_cnp;
849	vpp = ap->a_vpp;
850	dvp = ap->a_dvp;
851	pname = cnp->cn_nameptr;
852	td = cnp->cn_thread;
853	flags = cnp->cn_flags;
854	nameiop = cnp->cn_nameiop;
855	dmp = VFSTODEVFS(dvp->v_mount);
856	dd = dvp->v_data;
857	*vpp = NULLVP;
858
859	if ((flags & ISLASTCN) && nameiop == RENAME)
860		return (EOPNOTSUPP);
861
862	if (dvp->v_type != VDIR)
863		return (ENOTDIR);
864
865	if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
866		return (EIO);
867
868	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
869	if (error)
870		return (error);
871
872	if (cnp->cn_namelen == 1 && *pname == '.') {
873		if ((flags & ISLASTCN) && nameiop != LOOKUP)
874			return (EINVAL);
875		*vpp = dvp;
876		VREF(dvp);
877		return (0);
878	}
879
880	if (flags & ISDOTDOT) {
881		if ((flags & ISLASTCN) && nameiop != LOOKUP)
882			return (EINVAL);
883		de = devfs_parent_dirent(dd);
884		if (de == NULL)
885			return (ENOENT);
886		dvplocked = VOP_ISLOCKED(dvp);
887		VOP_UNLOCK(dvp, 0);
888		error = devfs_allocv(de, dvp->v_mount,
889		    cnp->cn_lkflags & LK_TYPE_MASK, vpp);
890		*dm_unlock = 0;
891		vn_lock(dvp, dvplocked | LK_RETRY);
892		return (error);
893	}
894
895	dd = dvp->v_data;
896	de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen, 0);
897	while (de == NULL) {	/* While(...) so we can use break */
898
899		if (nameiop == DELETE)
900			return (ENOENT);
901
902		/*
903		 * OK, we didn't have an entry for the name we were asked for
904		 * so we try to see if anybody can create it on demand.
905		 */
906		pname = devfs_fqpn(specname, dmp, dd, cnp);
907		if (pname == NULL)
908			break;
909
910		cdev = NULL;
911		DEVFS_DMP_HOLD(dmp);
912		sx_xunlock(&dmp->dm_lock);
913		sx_slock(&clone_drain_lock);
914		EVENTHANDLER_INVOKE(dev_clone,
915		    td->td_ucred, pname, strlen(pname), &cdev);
916		sx_sunlock(&clone_drain_lock);
917
918		if (cdev == NULL)
919			sx_xlock(&dmp->dm_lock);
920		else if (devfs_populate_vp(dvp) != 0) {
921			*dm_unlock = 0;
922			sx_xlock(&dmp->dm_lock);
923			if (DEVFS_DMP_DROP(dmp)) {
924				sx_xunlock(&dmp->dm_lock);
925				devfs_unmount_final(dmp);
926			} else
927				sx_xunlock(&dmp->dm_lock);
928			dev_rel(cdev);
929			return (ENOENT);
930		}
931		if (DEVFS_DMP_DROP(dmp)) {
932			*dm_unlock = 0;
933			sx_xunlock(&dmp->dm_lock);
934			devfs_unmount_final(dmp);
935			if (cdev != NULL)
936				dev_rel(cdev);
937			return (ENOENT);
938		}
939
940		if (cdev == NULL)
941			break;
942
943		dev_lock();
944		dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx];
945		if (dde != NULL && *dde != NULL)
946			de = *dde;
947		dev_unlock();
948		dev_rel(cdev);
949		break;
950	}
951
952	if (de == NULL || de->de_flags & DE_WHITEOUT) {
953		if ((nameiop == CREATE || nameiop == RENAME) &&
954		    (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
955			cnp->cn_flags |= SAVENAME;
956			return (EJUSTRETURN);
957		}
958		return (ENOENT);
959	}
960
961	if (devfs_prison_check(de, td))
962		return (ENOENT);
963
964	if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
965		error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
966		if (error)
967			return (error);
968		if (*vpp == dvp) {
969			VREF(dvp);
970			*vpp = dvp;
971			return (0);
972		}
973	}
974	error = devfs_allocv(de, dvp->v_mount, cnp->cn_lkflags & LK_TYPE_MASK,
975	    vpp);
976	*dm_unlock = 0;
977	return (error);
978}
979
980static int
981devfs_lookup(struct vop_lookup_args *ap)
982{
983	int j;
984	struct devfs_mount *dmp;
985	int dm_unlock;
986
987	if (devfs_populate_vp(ap->a_dvp) != 0)
988		return (ENOTDIR);
989
990	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
991	dm_unlock = 1;
992	j = devfs_lookupx(ap, &dm_unlock);
993	if (dm_unlock == 1)
994		sx_xunlock(&dmp->dm_lock);
995	return (j);
996}
997
998static int
999devfs_mknod(struct vop_mknod_args *ap)
1000{
1001	struct componentname *cnp;
1002	struct vnode *dvp, **vpp;
1003	struct devfs_dirent *dd, *de;
1004	struct devfs_mount *dmp;
1005	int error;
1006
1007	/*
1008	 * The only type of node we should be creating here is a
1009	 * character device, for anything else return EOPNOTSUPP.
1010	 */
1011	if (ap->a_vap->va_type != VCHR)
1012		return (EOPNOTSUPP);
1013	dvp = ap->a_dvp;
1014	dmp = VFSTODEVFS(dvp->v_mount);
1015
1016	cnp = ap->a_cnp;
1017	vpp = ap->a_vpp;
1018	dd = dvp->v_data;
1019
1020	error = ENOENT;
1021	sx_xlock(&dmp->dm_lock);
1022	TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
1023		if (cnp->cn_namelen != de->de_dirent->d_namlen)
1024			continue;
1025		if (de->de_dirent->d_type == DT_CHR &&
1026		    (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0)
1027			continue;
1028		if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
1029		    de->de_dirent->d_namlen) != 0)
1030			continue;
1031		if (de->de_flags & DE_WHITEOUT)
1032			break;
1033		goto notfound;
1034	}
1035	if (de == NULL)
1036		goto notfound;
1037	de->de_flags &= ~DE_WHITEOUT;
1038	error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp);
1039	return (error);
1040notfound:
1041	sx_xunlock(&dmp->dm_lock);
1042	return (error);
1043}
1044
1045/* ARGSUSED */
1046static int
1047devfs_open(struct vop_open_args *ap)
1048{
1049	struct thread *td = ap->a_td;
1050	struct vnode *vp = ap->a_vp;
1051	struct cdev *dev = vp->v_rdev;
1052	struct file *fp = ap->a_fp;
1053	int error, ref, vlocked;
1054	struct cdevsw *dsw;
1055	struct file *fpop;
1056	struct mtx *mtxp;
1057
1058	if (vp->v_type == VBLK)
1059		return (ENXIO);
1060
1061	if (dev == NULL)
1062		return (ENXIO);
1063
1064	/* Make this field valid before any I/O in d_open. */
1065	if (dev->si_iosize_max == 0)
1066		dev->si_iosize_max = DFLTPHYS;
1067
1068	dsw = dev_refthread(dev, &ref);
1069	if (dsw == NULL)
1070		return (ENXIO);
1071	if (fp == NULL && dsw->d_fdopen != NULL) {
1072		dev_relthread(dev, ref);
1073		return (ENXIO);
1074	}
1075
1076	vlocked = VOP_ISLOCKED(vp);
1077	VOP_UNLOCK(vp, 0);
1078
1079	fpop = td->td_fpop;
1080	td->td_fpop = fp;
1081	if (fp != NULL) {
1082		fp->f_data = dev;
1083		fp->f_vnode = vp;
1084	}
1085	if (dsw->d_fdopen != NULL)
1086		error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
1087	else
1088		error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
1089	/* cleanup any cdevpriv upon error */
1090	if (error != 0)
1091		devfs_clear_cdevpriv();
1092	td->td_fpop = fpop;
1093
1094	vn_lock(vp, vlocked | LK_RETRY);
1095	dev_relthread(dev, ref);
1096	if (error != 0) {
1097		if (error == ERESTART)
1098			error = EINTR;
1099		return (error);
1100	}
1101
1102#if 0	/* /dev/console */
1103	KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp"));
1104#else
1105	if (fp == NULL)
1106		return (error);
1107#endif
1108	if (fp->f_ops == &badfileops)
1109		finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
1110	mtxp = mtx_pool_find(mtxpool_sleep, fp);
1111
1112	/*
1113	 * Hint to the dofilewrite() to not force the buffer draining
1114	 * on the writer to the file.  Most likely, the write would
1115	 * not need normal buffers.
1116	 */
1117	mtx_lock(mtxp);
1118	fp->f_vnread_flags |= FDEVFS_VNODE;
1119	mtx_unlock(mtxp);
1120	return (error);
1121}
1122
1123static int
1124devfs_pathconf(struct vop_pathconf_args *ap)
1125{
1126
1127	switch (ap->a_name) {
1128	case _PC_MAC_PRESENT:
1129#ifdef MAC
1130		/*
1131		 * If MAC is enabled, devfs automatically supports
1132		 * trivial non-persistant label storage.
1133		 */
1134		*ap->a_retval = 1;
1135#else
1136		*ap->a_retval = 0;
1137#endif
1138		return (0);
1139	default:
1140		return (vop_stdpathconf(ap));
1141	}
1142	/* NOTREACHED */
1143}
1144
1145/* ARGSUSED */
1146static int
1147devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
1148{
1149	struct cdev *dev;
1150	struct cdevsw *dsw;
1151	int error, ref;
1152	struct file *fpop;
1153
1154	fpop = td->td_fpop;
1155	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1156	if (error != 0) {
1157		error = vnops.fo_poll(fp, events, cred, td);
1158		return (error);
1159	}
1160	error = dsw->d_poll(dev, events, td);
1161	td->td_fpop = fpop;
1162	dev_relthread(dev, ref);
1163	return(error);
1164}
1165
1166/*
1167 * Print out the contents of a special device vnode.
1168 */
1169static int
1170devfs_print(struct vop_print_args *ap)
1171{
1172
1173	printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
1174	return (0);
1175}
1176
1177static int
1178devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred,
1179    int flags, struct thread *td)
1180{
1181	struct cdev *dev;
1182	int ioflag, error, ref;
1183	ssize_t resid;
1184	struct cdevsw *dsw;
1185	struct file *fpop;
1186
1187	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1188		return (EINVAL);
1189	fpop = td->td_fpop;
1190	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1191	if (error != 0) {
1192		error = vnops.fo_read(fp, uio, cred, flags, td);
1193		return (error);
1194	}
1195	resid = uio->uio_resid;
1196	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
1197	if (ioflag & O_DIRECT)
1198		ioflag |= IO_DIRECT;
1199
1200	foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
1201	error = dsw->d_read(dev, uio, ioflag);
1202	if (uio->uio_resid != resid || (error == 0 && resid != 0))
1203		vfs_timestamp(&dev->si_atime);
1204	td->td_fpop = fpop;
1205	dev_relthread(dev, ref);
1206
1207	foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
1208	return (error);
1209}
1210
1211static int
1212devfs_readdir(struct vop_readdir_args *ap)
1213{
1214	int error;
1215	struct uio *uio;
1216	struct dirent *dp;
1217	struct devfs_dirent *dd;
1218	struct devfs_dirent *de;
1219	struct devfs_mount *dmp;
1220	off_t off;
1221	int *tmp_ncookies = NULL;
1222
1223	if (ap->a_vp->v_type != VDIR)
1224		return (ENOTDIR);
1225
1226	uio = ap->a_uio;
1227	if (uio->uio_offset < 0)
1228		return (EINVAL);
1229
1230	/*
1231	 * XXX: This is a temporary hack to get around this filesystem not
1232	 * supporting cookies. We store the location of the ncookies pointer
1233	 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
1234	 * and set the number of cookies to 0. We then set the pointer to
1235	 * NULL so that vfs_read_dirent doesn't try to call realloc() on
1236	 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
1237	 * pointer to its original location before returning to the caller.
1238	 */
1239	if (ap->a_ncookies != NULL) {
1240		tmp_ncookies = ap->a_ncookies;
1241		*ap->a_ncookies = 0;
1242		ap->a_ncookies = NULL;
1243	}
1244
1245	dmp = VFSTODEVFS(ap->a_vp->v_mount);
1246	if (devfs_populate_vp(ap->a_vp) != 0) {
1247		if (tmp_ncookies != NULL)
1248			ap->a_ncookies = tmp_ncookies;
1249		return (EIO);
1250	}
1251	error = 0;
1252	de = ap->a_vp->v_data;
1253	off = 0;
1254	TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
1255		KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
1256		if (dd->de_flags & (DE_COVERED | DE_WHITEOUT))
1257			continue;
1258		if (devfs_prison_check(dd, uio->uio_td))
1259			continue;
1260		if (dd->de_dirent->d_type == DT_DIR)
1261			de = dd->de_dir;
1262		else
1263			de = dd;
1264		dp = dd->de_dirent;
1265		if (dp->d_reclen > uio->uio_resid)
1266			break;
1267		dp->d_fileno = de->de_inode;
1268		if (off >= uio->uio_offset) {
1269			error = vfs_read_dirent(ap, dp, off);
1270			if (error)
1271				break;
1272		}
1273		off += dp->d_reclen;
1274	}
1275	sx_xunlock(&dmp->dm_lock);
1276	uio->uio_offset = off;
1277
1278	/*
1279	 * Restore ap->a_ncookies if it wasn't originally NULL in the first
1280	 * place.
1281	 */
1282	if (tmp_ncookies != NULL)
1283		ap->a_ncookies = tmp_ncookies;
1284
1285	return (error);
1286}
1287
1288static int
1289devfs_readlink(struct vop_readlink_args *ap)
1290{
1291	struct devfs_dirent *de;
1292
1293	de = ap->a_vp->v_data;
1294	return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
1295}
1296
1297static int
1298devfs_reclaim(struct vop_reclaim_args *ap)
1299{
1300	struct vnode *vp = ap->a_vp;
1301	struct devfs_dirent *de;
1302	struct cdev *dev;
1303
1304	mtx_lock(&devfs_de_interlock);
1305	de = vp->v_data;
1306	if (de != NULL) {
1307		de->de_vnode = NULL;
1308		vp->v_data = NULL;
1309	}
1310	mtx_unlock(&devfs_de_interlock);
1311
1312	vnode_destroy_vobject(vp);
1313
1314	VI_LOCK(vp);
1315	dev_lock();
1316	dev = vp->v_rdev;
1317	vp->v_rdev = NULL;
1318
1319	if (dev == NULL) {
1320		dev_unlock();
1321		VI_UNLOCK(vp);
1322		return (0);
1323	}
1324
1325	dev->si_usecount -= vp->v_usecount;
1326	dev_unlock();
1327	VI_UNLOCK(vp);
1328	dev_rel(dev);
1329	return (0);
1330}
1331
1332static int
1333devfs_remove(struct vop_remove_args *ap)
1334{
1335	struct vnode *dvp = ap->a_dvp;
1336	struct vnode *vp = ap->a_vp;
1337	struct devfs_dirent *dd;
1338	struct devfs_dirent *de, *de_covered;
1339	struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
1340
1341	ASSERT_VOP_ELOCKED(dvp, "devfs_remove");
1342	ASSERT_VOP_ELOCKED(vp, "devfs_remove");
1343
1344	sx_xlock(&dmp->dm_lock);
1345	dd = ap->a_dvp->v_data;
1346	de = vp->v_data;
1347	if (de->de_cdp == NULL) {
1348		TAILQ_REMOVE(&dd->de_dlist, de, de_list);
1349		if (de->de_dirent->d_type == DT_LNK) {
1350			de_covered = devfs_find(dd, de->de_dirent->d_name,
1351			    de->de_dirent->d_namlen, 0);
1352			if (de_covered != NULL)
1353				de_covered->de_flags &= ~DE_COVERED;
1354		}
1355		/* We need to unlock dvp because devfs_delete() may lock it. */
1356		VOP_UNLOCK(vp, 0);
1357		if (dvp != vp)
1358			VOP_UNLOCK(dvp, 0);
1359		devfs_delete(dmp, de, 0);
1360		sx_xunlock(&dmp->dm_lock);
1361		if (dvp != vp)
1362			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1363		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1364	} else {
1365		de->de_flags |= DE_WHITEOUT;
1366		sx_xunlock(&dmp->dm_lock);
1367	}
1368	return (0);
1369}
1370
1371/*
1372 * Revoke is called on a tty when a terminal session ends.  The vnode
1373 * is orphaned by setting v_op to deadfs so we need to let go of it
1374 * as well so that we create a new one next time around.
1375 *
1376 */
1377static int
1378devfs_revoke(struct vop_revoke_args *ap)
1379{
1380	struct vnode *vp = ap->a_vp, *vp2;
1381	struct cdev *dev;
1382	struct cdev_priv *cdp;
1383	struct devfs_dirent *de;
1384	int i;
1385
1386	KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
1387
1388	dev = vp->v_rdev;
1389	cdp = cdev2priv(dev);
1390
1391	dev_lock();
1392	cdp->cdp_inuse++;
1393	dev_unlock();
1394
1395	vhold(vp);
1396	vgone(vp);
1397	vdrop(vp);
1398
1399	VOP_UNLOCK(vp,0);
1400 loop:
1401	for (;;) {
1402		mtx_lock(&devfs_de_interlock);
1403		dev_lock();
1404		vp2 = NULL;
1405		for (i = 0; i <= cdp->cdp_maxdirent; i++) {
1406			de = cdp->cdp_dirents[i];
1407			if (de == NULL)
1408				continue;
1409
1410			vp2 = de->de_vnode;
1411			if (vp2 != NULL) {
1412				dev_unlock();
1413				VI_LOCK(vp2);
1414				mtx_unlock(&devfs_de_interlock);
1415				if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
1416				    curthread))
1417					goto loop;
1418				vhold(vp2);
1419				vgone(vp2);
1420				vdrop(vp2);
1421				vput(vp2);
1422				break;
1423			}
1424		}
1425		if (vp2 != NULL) {
1426			continue;
1427		}
1428		dev_unlock();
1429		mtx_unlock(&devfs_de_interlock);
1430		break;
1431	}
1432	dev_lock();
1433	cdp->cdp_inuse--;
1434	if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
1435		TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
1436		dev_unlock();
1437		dev_rel(&cdp->cdp_c);
1438	} else
1439		dev_unlock();
1440
1441	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1442	return (0);
1443}
1444
1445static int
1446devfs_rioctl(struct vop_ioctl_args *ap)
1447{
1448	struct vnode *vp;
1449	struct devfs_mount *dmp;
1450	int error;
1451
1452	vp = ap->a_vp;
1453	vn_lock(vp, LK_SHARED | LK_RETRY);
1454	if (vp->v_iflag & VI_DOOMED) {
1455		VOP_UNLOCK(vp, 0);
1456		return (EBADF);
1457	}
1458	dmp = VFSTODEVFS(vp->v_mount);
1459	sx_xlock(&dmp->dm_lock);
1460	VOP_UNLOCK(vp, 0);
1461	DEVFS_DMP_HOLD(dmp);
1462	devfs_populate(dmp);
1463	if (DEVFS_DMP_DROP(dmp)) {
1464		sx_xunlock(&dmp->dm_lock);
1465		devfs_unmount_final(dmp);
1466		return (ENOENT);
1467	}
1468	error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
1469	sx_xunlock(&dmp->dm_lock);
1470	return (error);
1471}
1472
1473static int
1474devfs_rread(struct vop_read_args *ap)
1475{
1476
1477	if (ap->a_vp->v_type != VDIR)
1478		return (EINVAL);
1479	return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
1480}
1481
1482static int
1483devfs_setattr(struct vop_setattr_args *ap)
1484{
1485	struct devfs_dirent *de;
1486	struct vattr *vap;
1487	struct vnode *vp;
1488	struct thread *td;
1489	int c, error;
1490	uid_t uid;
1491	gid_t gid;
1492
1493	vap = ap->a_vap;
1494	vp = ap->a_vp;
1495	td = curthread;
1496	if ((vap->va_type != VNON) ||
1497	    (vap->va_nlink != VNOVAL) ||
1498	    (vap->va_fsid != VNOVAL) ||
1499	    (vap->va_fileid != VNOVAL) ||
1500	    (vap->va_blocksize != VNOVAL) ||
1501	    (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1502	    (vap->va_rdev != VNOVAL) ||
1503	    ((int)vap->va_bytes != VNOVAL) ||
1504	    (vap->va_gen != VNOVAL)) {
1505		return (EINVAL);
1506	}
1507
1508	de = vp->v_data;
1509	if (vp->v_type == VDIR)
1510		de = de->de_dir;
1511
1512	error = c = 0;
1513	if (vap->va_uid == (uid_t)VNOVAL)
1514		uid = de->de_uid;
1515	else
1516		uid = vap->va_uid;
1517	if (vap->va_gid == (gid_t)VNOVAL)
1518		gid = de->de_gid;
1519	else
1520		gid = vap->va_gid;
1521	if (uid != de->de_uid || gid != de->de_gid) {
1522		if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
1523		    (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
1524			error = priv_check(td, PRIV_VFS_CHOWN);
1525			if (error)
1526				return (error);
1527		}
1528		de->de_uid = uid;
1529		de->de_gid = gid;
1530		c = 1;
1531	}
1532
1533	if (vap->va_mode != (mode_t)VNOVAL) {
1534		if (ap->a_cred->cr_uid != de->de_uid) {
1535			error = priv_check(td, PRIV_VFS_ADMIN);
1536			if (error)
1537				return (error);
1538		}
1539		de->de_mode = vap->va_mode;
1540		c = 1;
1541	}
1542
1543	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1544		error = vn_utimes_perm(vp, vap, ap->a_cred, td);
1545		if (error != 0)
1546			return (error);
1547		if (vap->va_atime.tv_sec != VNOVAL) {
1548			if (vp->v_type == VCHR)
1549				vp->v_rdev->si_atime = vap->va_atime;
1550			else
1551				de->de_atime = vap->va_atime;
1552		}
1553		if (vap->va_mtime.tv_sec != VNOVAL) {
1554			if (vp->v_type == VCHR)
1555				vp->v_rdev->si_mtime = vap->va_mtime;
1556			else
1557				de->de_mtime = vap->va_mtime;
1558		}
1559		c = 1;
1560	}
1561
1562	if (c) {
1563		if (vp->v_type == VCHR)
1564			vfs_timestamp(&vp->v_rdev->si_ctime);
1565		else
1566			vfs_timestamp(&de->de_mtime);
1567	}
1568	return (0);
1569}
1570
1571#ifdef MAC
1572static int
1573devfs_setlabel(struct vop_setlabel_args *ap)
1574{
1575	struct vnode *vp;
1576	struct devfs_dirent *de;
1577
1578	vp = ap->a_vp;
1579	de = vp->v_data;
1580
1581	mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
1582	mac_devfs_update(vp->v_mount, de, vp);
1583
1584	return (0);
1585}
1586#endif
1587
1588static int
1589devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
1590{
1591
1592	return (vnops.fo_stat(fp, sb, cred, td));
1593}
1594
1595static int
1596devfs_symlink(struct vop_symlink_args *ap)
1597{
1598	int i, error;
1599	struct devfs_dirent *dd;
1600	struct devfs_dirent *de, *de_covered, *de_dotdot;
1601	struct devfs_mount *dmp;
1602
1603	error = priv_check(curthread, PRIV_DEVFS_SYMLINK);
1604	if (error)
1605		return(error);
1606	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1607	if (devfs_populate_vp(ap->a_dvp) != 0)
1608		return (ENOENT);
1609
1610	dd = ap->a_dvp->v_data;
1611	de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
1612	de->de_flags = DE_USER;
1613	de->de_uid = 0;
1614	de->de_gid = 0;
1615	de->de_mode = 0755;
1616	de->de_inode = alloc_unr(devfs_inos);
1617	de->de_dir = dd;
1618	de->de_dirent->d_type = DT_LNK;
1619	i = strlen(ap->a_target) + 1;
1620	de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
1621	bcopy(ap->a_target, de->de_symlink, i);
1622#ifdef MAC
1623	mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
1624#endif
1625	de_covered = devfs_find(dd, de->de_dirent->d_name,
1626	    de->de_dirent->d_namlen, 0);
1627	if (de_covered != NULL) {
1628		if ((de_covered->de_flags & DE_USER) != 0) {
1629			devfs_delete(dmp, de, DEVFS_DEL_NORECURSE);
1630			sx_xunlock(&dmp->dm_lock);
1631			return (EEXIST);
1632		}
1633		KASSERT((de_covered->de_flags & DE_COVERED) == 0,
1634		    ("devfs_symlink: entry %p already covered", de_covered));
1635		de_covered->de_flags |= DE_COVERED;
1636	}
1637
1638	de_dotdot = TAILQ_FIRST(&dd->de_dlist);		/* "." */
1639	de_dotdot = TAILQ_NEXT(de_dotdot, de_list);	/* ".." */
1640	TAILQ_INSERT_AFTER(&dd->de_dlist, de_dotdot, de, de_list);
1641	devfs_dir_ref_de(dmp, dd);
1642	devfs_rules_apply(dmp, de);
1643
1644	return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp));
1645}
1646
1647static int
1648devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
1649{
1650
1651	return (vnops.fo_truncate(fp, length, cred, td));
1652}
1653
1654static int
1655devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred,
1656    int flags, struct thread *td)
1657{
1658	struct cdev *dev;
1659	int error, ioflag, ref;
1660	ssize_t resid;
1661	struct cdevsw *dsw;
1662	struct file *fpop;
1663
1664	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1665		return (EINVAL);
1666	fpop = td->td_fpop;
1667	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1668	if (error != 0) {
1669		error = vnops.fo_write(fp, uio, cred, flags, td);
1670		return (error);
1671	}
1672	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
1673	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
1674	if (ioflag & O_DIRECT)
1675		ioflag |= IO_DIRECT;
1676	foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
1677
1678	resid = uio->uio_resid;
1679
1680	error = dsw->d_write(dev, uio, ioflag);
1681	if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
1682		vfs_timestamp(&dev->si_ctime);
1683		dev->si_mtime = dev->si_ctime;
1684	}
1685	td->td_fpop = fpop;
1686	dev_relthread(dev, ref);
1687
1688	foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
1689	return (error);
1690}
1691
1692dev_t
1693dev2udev(struct cdev *x)
1694{
1695	if (x == NULL)
1696		return (NODEV);
1697	return (cdev2priv(x)->cdp_inode);
1698}
1699
1700static struct fileops devfs_ops_f = {
1701	.fo_read =	devfs_read_f,
1702	.fo_write =	devfs_write_f,
1703	.fo_truncate =	devfs_truncate_f,
1704	.fo_ioctl =	devfs_ioctl_f,
1705	.fo_poll =	devfs_poll_f,
1706	.fo_kqfilter =	devfs_kqfilter_f,
1707	.fo_stat =	devfs_stat_f,
1708	.fo_close =	devfs_close_f,
1709	.fo_chmod =	vn_chmod,
1710	.fo_chown =	vn_chown,
1711	.fo_sendfile =	vn_sendfile,
1712	.fo_seek =	vn_seek,
1713	.fo_flags =	DFLAG_PASSABLE | DFLAG_SEEKABLE
1714};
1715
1716static struct vop_vector devfs_vnodeops = {
1717	.vop_default =		&default_vnodeops,
1718
1719	.vop_access =		devfs_access,
1720	.vop_getattr =		devfs_getattr,
1721	.vop_ioctl =		devfs_rioctl,
1722	.vop_lookup =		devfs_lookup,
1723	.vop_mknod =		devfs_mknod,
1724	.vop_pathconf =		devfs_pathconf,
1725	.vop_read =		devfs_rread,
1726	.vop_readdir =		devfs_readdir,
1727	.vop_readlink =		devfs_readlink,
1728	.vop_reclaim =		devfs_reclaim,
1729	.vop_remove =		devfs_remove,
1730	.vop_revoke =		devfs_revoke,
1731	.vop_setattr =		devfs_setattr,
1732#ifdef MAC
1733	.vop_setlabel =		devfs_setlabel,
1734#endif
1735	.vop_symlink =		devfs_symlink,
1736	.vop_vptocnp =		devfs_vptocnp,
1737};
1738
1739struct vop_vector devfs_specops = {
1740	.vop_default =		&default_vnodeops,
1741
1742	.vop_access =		devfs_access,
1743	.vop_bmap =		VOP_PANIC,
1744	.vop_close =		devfs_close,
1745	.vop_create =		VOP_PANIC,
1746	.vop_fsync =		devfs_fsync,
1747	.vop_getattr =		devfs_getattr,
1748	.vop_link =		VOP_PANIC,
1749	.vop_mkdir =		VOP_PANIC,
1750	.vop_mknod =		VOP_PANIC,
1751	.vop_open =		devfs_open,
1752	.vop_pathconf =		devfs_pathconf,
1753	.vop_poll =		dead_poll,
1754	.vop_print =		devfs_print,
1755	.vop_read =		dead_read,
1756	.vop_readdir =		VOP_PANIC,
1757	.vop_readlink =		VOP_PANIC,
1758	.vop_reallocblks =	VOP_PANIC,
1759	.vop_reclaim =		devfs_reclaim,
1760	.vop_remove =		devfs_remove,
1761	.vop_rename =		VOP_PANIC,
1762	.vop_revoke =		devfs_revoke,
1763	.vop_rmdir =		VOP_PANIC,
1764	.vop_setattr =		devfs_setattr,
1765#ifdef MAC
1766	.vop_setlabel =		devfs_setlabel,
1767#endif
1768	.vop_strategy =		VOP_PANIC,
1769	.vop_symlink =		VOP_PANIC,
1770	.vop_vptocnp =		devfs_vptocnp,
1771	.vop_write =		dead_write,
1772};
1773
1774/*
1775 * Our calling convention to the device drivers used to be that we passed
1776 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_
1777 * flags instead since that's what open(), close() and ioctl() takes and
1778 * we don't really want vnode.h in device drivers.
1779 * We solved the source compatibility by redefining some vnode flags to
1780 * be the same as the fcntl ones and by sending down the bitwise OR of
1781 * the respective fcntl/vnode flags.  These CTASSERTS make sure nobody
1782 * pulls the rug out under this.
1783 */
1784CTASSERT(O_NONBLOCK == IO_NDELAY);
1785CTASSERT(O_FSYNC == IO_SYNC);
1786