devfs_vnops.c revision 302078
1/*-
2 * Copyright (c) 2000-2004
3 *	Poul-Henning Kamp.  All rights reserved.
4 * Copyright (c) 1989, 1992-1993, 1995
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software donated to Berkeley by
8 * Jan-Simon Pendry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	@(#)kernfs_vnops.c	8.15 (Berkeley) 5/21/95
32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
33 *
34 * $FreeBSD: stable/10/sys/fs/devfs/devfs_vnops.c 302078 2016-06-22 09:08:18Z kib $
35 */
36
37/*
38 * TODO:
39 *	mkdir: want it ?
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/conf.h>
45#include <sys/dirent.h>
46#include <sys/fcntl.h>
47#include <sys/file.h>
48#include <sys/filedesc.h>
49#include <sys/filio.h>
50#include <sys/jail.h>
51#include <sys/kernel.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mount.h>
55#include <sys/namei.h>
56#include <sys/priv.h>
57#include <sys/proc.h>
58#include <sys/stat.h>
59#include <sys/sx.h>
60#include <sys/sysctl.h>
61#include <sys/time.h>
62#include <sys/ttycom.h>
63#include <sys/unistd.h>
64#include <sys/vnode.h>
65
66static struct vop_vector devfs_vnodeops;
67static struct vop_vector devfs_specops;
68static struct fileops devfs_ops_f;
69
70#include <fs/devfs/devfs.h>
71#include <fs/devfs/devfs_int.h>
72
73#include <security/mac/mac_framework.h>
74
75static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
76
77struct mtx	devfs_de_interlock;
78MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
79struct sx	clone_drain_lock;
80SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
81struct mtx	cdevpriv_mtx;
82MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
83
84SYSCTL_DECL(_vfs_devfs);
85
86static int devfs_dotimes;
87SYSCTL_INT(_vfs_devfs, OID_AUTO, dotimes, CTLFLAG_RW,
88    &devfs_dotimes, 0, "Update timestamps on DEVFS with default precision");
89
90/*
91 * Update devfs node timestamp.  Note that updates are unlocked and
92 * stat(2) could see partially updated times.
93 */
94static void
95devfs_timestamp(struct timespec *tsp)
96{
97	time_t ts;
98
99	if (devfs_dotimes) {
100		vfs_timestamp(tsp);
101	} else {
102		ts = time_second;
103		if (tsp->tv_sec != ts) {
104			tsp->tv_sec = ts;
105			tsp->tv_nsec = 0;
106		}
107	}
108}
109
110static int
111devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp,
112    int *ref)
113{
114
115	*dswp = devvn_refthread(fp->f_vnode, devp, ref);
116	if (*devp != fp->f_data) {
117		if (*dswp != NULL)
118			dev_relthread(*devp, *ref);
119		return (ENXIO);
120	}
121	KASSERT((*devp)->si_refcount > 0,
122	    ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
123	if (*dswp == NULL)
124		return (ENXIO);
125	curthread->td_fpop = fp;
126	return (0);
127}
128
129int
130devfs_get_cdevpriv(void **datap)
131{
132	struct file *fp;
133	struct cdev_privdata *p;
134	int error;
135
136	fp = curthread->td_fpop;
137	if (fp == NULL)
138		return (EBADF);
139	p = fp->f_cdevpriv;
140	if (p != NULL) {
141		error = 0;
142		*datap = p->cdpd_data;
143	} else
144		error = ENOENT;
145	return (error);
146}
147
148int
149devfs_set_cdevpriv(void *priv, d_priv_dtor_t *priv_dtr)
150{
151	struct file *fp;
152	struct cdev_priv *cdp;
153	struct cdev_privdata *p;
154	int error;
155
156	fp = curthread->td_fpop;
157	if (fp == NULL)
158		return (ENOENT);
159	cdp = cdev2priv((struct cdev *)fp->f_data);
160	p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
161	p->cdpd_data = priv;
162	p->cdpd_dtr = priv_dtr;
163	p->cdpd_fp = fp;
164	mtx_lock(&cdevpriv_mtx);
165	if (fp->f_cdevpriv == NULL) {
166		LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
167		fp->f_cdevpriv = p;
168		mtx_unlock(&cdevpriv_mtx);
169		error = 0;
170	} else {
171		mtx_unlock(&cdevpriv_mtx);
172		free(p, M_CDEVPDATA);
173		error = EBUSY;
174	}
175	return (error);
176}
177
178void
179devfs_destroy_cdevpriv(struct cdev_privdata *p)
180{
181
182	mtx_assert(&cdevpriv_mtx, MA_OWNED);
183	p->cdpd_fp->f_cdevpriv = NULL;
184	LIST_REMOVE(p, cdpd_list);
185	mtx_unlock(&cdevpriv_mtx);
186	(p->cdpd_dtr)(p->cdpd_data);
187	free(p, M_CDEVPDATA);
188}
189
190void
191devfs_fpdrop(struct file *fp)
192{
193	struct cdev_privdata *p;
194
195	mtx_lock(&cdevpriv_mtx);
196	if ((p = fp->f_cdevpriv) == NULL) {
197		mtx_unlock(&cdevpriv_mtx);
198		return;
199	}
200	devfs_destroy_cdevpriv(p);
201}
202
203void
204devfs_clear_cdevpriv(void)
205{
206	struct file *fp;
207
208	fp = curthread->td_fpop;
209	if (fp == NULL)
210		return;
211	devfs_fpdrop(fp);
212}
213
214/*
215 * On success devfs_populate_vp() returns with dmp->dm_lock held.
216 */
217static int
218devfs_populate_vp(struct vnode *vp)
219{
220	struct devfs_dirent *de;
221	struct devfs_mount *dmp;
222	int locked;
223
224	ASSERT_VOP_LOCKED(vp, "devfs_populate_vp");
225
226	dmp = VFSTODEVFS(vp->v_mount);
227	locked = VOP_ISLOCKED(vp);
228
229	sx_xlock(&dmp->dm_lock);
230	DEVFS_DMP_HOLD(dmp);
231
232	/* Can't call devfs_populate() with the vnode lock held. */
233	VOP_UNLOCK(vp, 0);
234	devfs_populate(dmp);
235
236	sx_xunlock(&dmp->dm_lock);
237	vn_lock(vp, locked | LK_RETRY);
238	sx_xlock(&dmp->dm_lock);
239	if (DEVFS_DMP_DROP(dmp)) {
240		sx_xunlock(&dmp->dm_lock);
241		devfs_unmount_final(dmp);
242		return (ERESTART);
243	}
244	if ((vp->v_iflag & VI_DOOMED) != 0) {
245		sx_xunlock(&dmp->dm_lock);
246		return (ERESTART);
247	}
248	de = vp->v_data;
249	KASSERT(de != NULL,
250	    ("devfs_populate_vp: vp->v_data == NULL but vnode not doomed"));
251	if ((de->de_flags & DE_DOOMED) != 0) {
252		sx_xunlock(&dmp->dm_lock);
253		return (ERESTART);
254	}
255
256	return (0);
257}
258
259static int
260devfs_vptocnp(struct vop_vptocnp_args *ap)
261{
262	struct vnode *vp = ap->a_vp;
263	struct vnode **dvp = ap->a_vpp;
264	struct devfs_mount *dmp;
265	char *buf = ap->a_buf;
266	int *buflen = ap->a_buflen;
267	struct devfs_dirent *dd, *de;
268	int i, error;
269
270	dmp = VFSTODEVFS(vp->v_mount);
271
272	error = devfs_populate_vp(vp);
273	if (error != 0)
274		return (error);
275
276	i = *buflen;
277	dd = vp->v_data;
278
279	if (vp->v_type == VCHR) {
280		i -= strlen(dd->de_cdp->cdp_c.si_name);
281		if (i < 0) {
282			error = ENOMEM;
283			goto finished;
284		}
285		bcopy(dd->de_cdp->cdp_c.si_name, buf + i,
286		    strlen(dd->de_cdp->cdp_c.si_name));
287		de = dd->de_dir;
288	} else if (vp->v_type == VDIR) {
289		if (dd == dmp->dm_rootdir) {
290			*dvp = vp;
291			vref(*dvp);
292			goto finished;
293		}
294		i -= dd->de_dirent->d_namlen;
295		if (i < 0) {
296			error = ENOMEM;
297			goto finished;
298		}
299		bcopy(dd->de_dirent->d_name, buf + i,
300		    dd->de_dirent->d_namlen);
301		de = dd;
302	} else {
303		error = ENOENT;
304		goto finished;
305	}
306	*buflen = i;
307	de = devfs_parent_dirent(de);
308	if (de == NULL) {
309		error = ENOENT;
310		goto finished;
311	}
312	mtx_lock(&devfs_de_interlock);
313	*dvp = de->de_vnode;
314	if (*dvp != NULL) {
315		VI_LOCK(*dvp);
316		mtx_unlock(&devfs_de_interlock);
317		vholdl(*dvp);
318		VI_UNLOCK(*dvp);
319		vref(*dvp);
320		vdrop(*dvp);
321	} else {
322		mtx_unlock(&devfs_de_interlock);
323		error = ENOENT;
324	}
325finished:
326	sx_xunlock(&dmp->dm_lock);
327	return (error);
328}
329
330/*
331 * Construct the fully qualified path name relative to the mountpoint.
332 * If a NULL cnp is provided, no '/' is appended to the resulting path.
333 */
334char *
335devfs_fqpn(char *buf, struct devfs_mount *dmp, struct devfs_dirent *dd,
336    struct componentname *cnp)
337{
338	int i;
339	struct devfs_dirent *de;
340
341	sx_assert(&dmp->dm_lock, SA_LOCKED);
342
343	i = SPECNAMELEN;
344	buf[i] = '\0';
345	if (cnp != NULL)
346		i -= cnp->cn_namelen;
347	if (i < 0)
348		 return (NULL);
349	if (cnp != NULL)
350		bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
351	de = dd;
352	while (de != dmp->dm_rootdir) {
353		if (cnp != NULL || i < SPECNAMELEN) {
354			i--;
355			if (i < 0)
356				 return (NULL);
357			buf[i] = '/';
358		}
359		i -= de->de_dirent->d_namlen;
360		if (i < 0)
361			 return (NULL);
362		bcopy(de->de_dirent->d_name, buf + i,
363		    de->de_dirent->d_namlen);
364		de = devfs_parent_dirent(de);
365		if (de == NULL)
366			return (NULL);
367	}
368	return (buf + i);
369}
370
371static int
372devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
373	struct devfs_dirent *de)
374{
375	int not_found;
376
377	not_found = 0;
378	if (de->de_flags & DE_DOOMED)
379		not_found = 1;
380	if (DEVFS_DE_DROP(de)) {
381		KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
382		devfs_dirent_free(de);
383	}
384	if (DEVFS_DMP_DROP(dmp)) {
385		KASSERT(not_found == 1,
386			("DEVFS mount struct freed before dirent"));
387		not_found = 2;
388		sx_xunlock(&dmp->dm_lock);
389		devfs_unmount_final(dmp);
390	}
391	if (not_found == 1 || (drop_dm_lock && not_found != 2))
392		sx_unlock(&dmp->dm_lock);
393	return (not_found);
394}
395
396static void
397devfs_insmntque_dtr(struct vnode *vp, void *arg)
398{
399	struct devfs_dirent *de;
400
401	de = (struct devfs_dirent *)arg;
402	mtx_lock(&devfs_de_interlock);
403	vp->v_data = NULL;
404	de->de_vnode = NULL;
405	mtx_unlock(&devfs_de_interlock);
406	vgone(vp);
407	vput(vp);
408}
409
410/*
411 * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
412 * it on return.
413 */
414int
415devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
416    struct vnode **vpp)
417{
418	int error;
419	struct vnode *vp;
420	struct cdev *dev;
421	struct devfs_mount *dmp;
422	struct cdevsw *dsw;
423
424	dmp = VFSTODEVFS(mp);
425	if (de->de_flags & DE_DOOMED) {
426		sx_xunlock(&dmp->dm_lock);
427		return (ENOENT);
428	}
429loop:
430	DEVFS_DE_HOLD(de);
431	DEVFS_DMP_HOLD(dmp);
432	mtx_lock(&devfs_de_interlock);
433	vp = de->de_vnode;
434	if (vp != NULL) {
435		VI_LOCK(vp);
436		mtx_unlock(&devfs_de_interlock);
437		sx_xunlock(&dmp->dm_lock);
438		vget(vp, lockmode | LK_INTERLOCK | LK_RETRY, curthread);
439		sx_xlock(&dmp->dm_lock);
440		if (devfs_allocv_drop_refs(0, dmp, de)) {
441			vput(vp);
442			return (ENOENT);
443		}
444		else if ((vp->v_iflag & VI_DOOMED) != 0) {
445			mtx_lock(&devfs_de_interlock);
446			if (de->de_vnode == vp) {
447				de->de_vnode = NULL;
448				vp->v_data = NULL;
449			}
450			mtx_unlock(&devfs_de_interlock);
451			vput(vp);
452			goto loop;
453		}
454		sx_xunlock(&dmp->dm_lock);
455		*vpp = vp;
456		return (0);
457	}
458	mtx_unlock(&devfs_de_interlock);
459	if (de->de_dirent->d_type == DT_CHR) {
460		if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
461			devfs_allocv_drop_refs(1, dmp, de);
462			return (ENOENT);
463		}
464		dev = &de->de_cdp->cdp_c;
465	} else {
466		dev = NULL;
467	}
468	error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
469	if (error != 0) {
470		devfs_allocv_drop_refs(1, dmp, de);
471		printf("devfs_allocv: failed to allocate new vnode\n");
472		return (error);
473	}
474
475	if (de->de_dirent->d_type == DT_CHR) {
476		vp->v_type = VCHR;
477		VI_LOCK(vp);
478		dev_lock();
479		dev_refl(dev);
480		/* XXX: v_rdev should be protect by vnode lock */
481		vp->v_rdev = dev;
482		KASSERT(vp->v_usecount == 1,
483		    ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
484		dev->si_usecount += vp->v_usecount;
485		/* Special casing of ttys for deadfs.  Probably redundant. */
486		dsw = dev->si_devsw;
487		if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
488			vp->v_vflag |= VV_ISTTY;
489		dev_unlock();
490		VI_UNLOCK(vp);
491		if ((dev->si_flags & SI_ETERNAL) != 0)
492			vp->v_vflag |= VV_ETERNALDEV;
493		vp->v_op = &devfs_specops;
494	} else if (de->de_dirent->d_type == DT_DIR) {
495		vp->v_type = VDIR;
496	} else if (de->de_dirent->d_type == DT_LNK) {
497		vp->v_type = VLNK;
498	} else {
499		vp->v_type = VBAD;
500	}
501	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
502	VN_LOCK_ASHARE(vp);
503	mtx_lock(&devfs_de_interlock);
504	vp->v_data = de;
505	de->de_vnode = vp;
506	mtx_unlock(&devfs_de_interlock);
507	error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
508	if (error != 0) {
509		(void) devfs_allocv_drop_refs(1, dmp, de);
510		return (error);
511	}
512	if (devfs_allocv_drop_refs(0, dmp, de)) {
513		vput(vp);
514		return (ENOENT);
515	}
516#ifdef MAC
517	mac_devfs_vnode_associate(mp, de, vp);
518#endif
519	sx_xunlock(&dmp->dm_lock);
520	*vpp = vp;
521	return (0);
522}
523
524static int
525devfs_access(struct vop_access_args *ap)
526{
527	struct vnode *vp = ap->a_vp;
528	struct devfs_dirent *de;
529	int error;
530
531	de = vp->v_data;
532	if (vp->v_type == VDIR)
533		de = de->de_dir;
534
535	error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
536	    ap->a_accmode, ap->a_cred, NULL);
537	if (error == 0)
538		return (0);
539	if (error != EACCES)
540		return (error);
541	/* We do, however, allow access to the controlling terminal */
542	if (!(ap->a_td->td_proc->p_flag & P_CONTROLT))
543		return (error);
544	if (ap->a_td->td_proc->p_session->s_ttydp == de->de_cdp)
545		return (0);
546	return (error);
547}
548
549/* ARGSUSED */
550static int
551devfs_close(struct vop_close_args *ap)
552{
553	struct vnode *vp = ap->a_vp, *oldvp;
554	struct thread *td = ap->a_td;
555	struct cdev *dev = vp->v_rdev;
556	struct cdevsw *dsw;
557	int vp_locked, error, ref;
558
559	/*
560	 * XXX: Don't call d_close() if we were called because of
561	 * XXX: insmntque1() failure.
562	 */
563	if (vp->v_data == NULL)
564		return (0);
565
566	/*
567	 * Hack: a tty device that is a controlling terminal
568	 * has a reference from the session structure.
569	 * We cannot easily tell that a character device is
570	 * a controlling terminal, unless it is the closing
571	 * process' controlling terminal.  In that case,
572	 * if the reference count is 2 (this last descriptor
573	 * plus the session), release the reference from the session.
574	 */
575	oldvp = NULL;
576	sx_xlock(&proctree_lock);
577	if (td && vp == td->td_proc->p_session->s_ttyvp) {
578		SESS_LOCK(td->td_proc->p_session);
579		VI_LOCK(vp);
580		if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) {
581			td->td_proc->p_session->s_ttyvp = NULL;
582			td->td_proc->p_session->s_ttydp = NULL;
583			oldvp = vp;
584		}
585		VI_UNLOCK(vp);
586		SESS_UNLOCK(td->td_proc->p_session);
587	}
588	sx_xunlock(&proctree_lock);
589	if (oldvp != NULL)
590		vrele(oldvp);
591	/*
592	 * We do not want to really close the device if it
593	 * is still in use unless we are trying to close it
594	 * forcibly. Since every use (buffer, vnode, swap, cmap)
595	 * holds a reference to the vnode, and because we mark
596	 * any other vnodes that alias this device, when the
597	 * sum of the reference counts on all the aliased
598	 * vnodes descends to one, we are on last close.
599	 */
600	dsw = dev_refthread(dev, &ref);
601	if (dsw == NULL)
602		return (ENXIO);
603	VI_LOCK(vp);
604	if (vp->v_iflag & VI_DOOMED) {
605		/* Forced close. */
606	} else if (dsw->d_flags & D_TRACKCLOSE) {
607		/* Keep device updated on status. */
608	} else if (count_dev(dev) > 1) {
609		VI_UNLOCK(vp);
610		dev_relthread(dev, ref);
611		return (0);
612	}
613	vholdl(vp);
614	VI_UNLOCK(vp);
615	vp_locked = VOP_ISLOCKED(vp);
616	VOP_UNLOCK(vp, 0);
617	KASSERT(dev->si_refcount > 0,
618	    ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
619	error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
620	dev_relthread(dev, ref);
621	vn_lock(vp, vp_locked | LK_RETRY);
622	vdrop(vp);
623	return (error);
624}
625
626static int
627devfs_close_f(struct file *fp, struct thread *td)
628{
629	int error;
630	struct file *fpop;
631
632	/*
633	 * NB: td may be NULL if this descriptor is closed due to
634	 * garbage collection from a closed UNIX domain socket.
635	 */
636	fpop = curthread->td_fpop;
637	curthread->td_fpop = fp;
638	error = vnops.fo_close(fp, td);
639	curthread->td_fpop = fpop;
640
641	/*
642	 * The f_cdevpriv cannot be assigned non-NULL value while we
643	 * are destroying the file.
644	 */
645	if (fp->f_cdevpriv != NULL)
646		devfs_fpdrop(fp);
647	return (error);
648}
649
650static int
651devfs_fsync(struct vop_fsync_args *ap)
652{
653	int error;
654	struct bufobj *bo;
655	struct devfs_dirent *de;
656
657	if (!vn_isdisk(ap->a_vp, &error)) {
658		bo = &ap->a_vp->v_bufobj;
659		de = ap->a_vp->v_data;
660		if (error == ENXIO && bo->bo_dirty.bv_cnt > 0) {
661			printf("Device %s went missing before all of the data "
662			    "could be written to it; expect data loss.\n",
663			    de->de_dirent->d_name);
664
665			error = vop_stdfsync(ap);
666			if (bo->bo_dirty.bv_cnt != 0 || error != 0)
667				panic("devfs_fsync: vop_stdfsync failed.");
668		}
669
670		return (0);
671	}
672
673	return (vop_stdfsync(ap));
674}
675
676static int
677devfs_getattr(struct vop_getattr_args *ap)
678{
679	struct vnode *vp = ap->a_vp;
680	struct vattr *vap = ap->a_vap;
681	int error;
682	struct devfs_dirent *de;
683	struct devfs_mount *dmp;
684	struct cdev *dev;
685
686	error = devfs_populate_vp(vp);
687	if (error != 0)
688		return (error);
689
690	dmp = VFSTODEVFS(vp->v_mount);
691	sx_xunlock(&dmp->dm_lock);
692
693	de = vp->v_data;
694	KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
695	if (vp->v_type == VDIR) {
696		de = de->de_dir;
697		KASSERT(de != NULL,
698		    ("Null dir dirent in devfs_getattr vp=%p", vp));
699	}
700	vap->va_uid = de->de_uid;
701	vap->va_gid = de->de_gid;
702	vap->va_mode = de->de_mode;
703	if (vp->v_type == VLNK)
704		vap->va_size = strlen(de->de_symlink);
705	else if (vp->v_type == VDIR)
706		vap->va_size = vap->va_bytes = DEV_BSIZE;
707	else
708		vap->va_size = 0;
709	if (vp->v_type != VDIR)
710		vap->va_bytes = 0;
711	vap->va_blocksize = DEV_BSIZE;
712	vap->va_type = vp->v_type;
713
714#define fix(aa)							\
715	do {							\
716		if ((aa).tv_sec <= 3600) {			\
717			(aa).tv_sec = boottime.tv_sec;		\
718			(aa).tv_nsec = boottime.tv_usec * 1000; \
719		}						\
720	} while (0)
721
722	if (vp->v_type != VCHR)  {
723		fix(de->de_atime);
724		vap->va_atime = de->de_atime;
725		fix(de->de_mtime);
726		vap->va_mtime = de->de_mtime;
727		fix(de->de_ctime);
728		vap->va_ctime = de->de_ctime;
729	} else {
730		dev = vp->v_rdev;
731		fix(dev->si_atime);
732		vap->va_atime = dev->si_atime;
733		fix(dev->si_mtime);
734		vap->va_mtime = dev->si_mtime;
735		fix(dev->si_ctime);
736		vap->va_ctime = dev->si_ctime;
737
738		vap->va_rdev = cdev2priv(dev)->cdp_inode;
739	}
740	vap->va_gen = 0;
741	vap->va_flags = 0;
742	vap->va_filerev = 0;
743	vap->va_nlink = de->de_links;
744	vap->va_fileid = de->de_inode;
745
746	return (error);
747}
748
749/* ARGSUSED */
750static int
751devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
752{
753	struct cdev *dev;
754	struct cdevsw *dsw;
755	struct vnode *vp;
756	struct vnode *vpold;
757	int error, i, ref;
758	const char *p;
759	struct fiodgname_arg *fgn;
760	struct file *fpop;
761
762	fpop = td->td_fpop;
763	error = devfs_fp_check(fp, &dev, &dsw, &ref);
764	if (error != 0) {
765		error = vnops.fo_ioctl(fp, com, data, cred, td);
766		return (error);
767	}
768
769	if (com == FIODTYPE) {
770		*(int *)data = dsw->d_flags & D_TYPEMASK;
771		td->td_fpop = fpop;
772		dev_relthread(dev, ref);
773		return (0);
774	} else if (com == FIODGNAME) {
775		fgn = data;
776		p = devtoname(dev);
777		i = strlen(p) + 1;
778		if (i > fgn->len)
779			error = EINVAL;
780		else
781			error = copyout(p, fgn->buf, i);
782		td->td_fpop = fpop;
783		dev_relthread(dev, ref);
784		return (error);
785	}
786	error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
787	td->td_fpop = NULL;
788	dev_relthread(dev, ref);
789	if (error == ENOIOCTL)
790		error = ENOTTY;
791	if (error == 0 && com == TIOCSCTTY) {
792		vp = fp->f_vnode;
793
794		/* Do nothing if reassigning same control tty */
795		sx_slock(&proctree_lock);
796		if (td->td_proc->p_session->s_ttyvp == vp) {
797			sx_sunlock(&proctree_lock);
798			return (0);
799		}
800
801		vpold = td->td_proc->p_session->s_ttyvp;
802		VREF(vp);
803		SESS_LOCK(td->td_proc->p_session);
804		td->td_proc->p_session->s_ttyvp = vp;
805		td->td_proc->p_session->s_ttydp = cdev2priv(dev);
806		SESS_UNLOCK(td->td_proc->p_session);
807
808		sx_sunlock(&proctree_lock);
809
810		/* Get rid of reference to old control tty */
811		if (vpold)
812			vrele(vpold);
813	}
814	return (error);
815}
816
817/* ARGSUSED */
818static int
819devfs_kqfilter_f(struct file *fp, struct knote *kn)
820{
821	struct cdev *dev;
822	struct cdevsw *dsw;
823	int error, ref;
824	struct file *fpop;
825	struct thread *td;
826
827	td = curthread;
828	fpop = td->td_fpop;
829	error = devfs_fp_check(fp, &dev, &dsw, &ref);
830	if (error)
831		return (error);
832	error = dsw->d_kqfilter(dev, kn);
833	td->td_fpop = fpop;
834	dev_relthread(dev, ref);
835	return (error);
836}
837
838static inline int
839devfs_prison_check(struct devfs_dirent *de, struct thread *td)
840{
841	struct cdev_priv *cdp;
842	struct ucred *dcr;
843	int error;
844
845	cdp = de->de_cdp;
846	if (cdp == NULL)
847		return (0);
848	dcr = cdp->cdp_c.si_cred;
849	if (dcr == NULL)
850		return (0);
851
852	error = prison_check(td->td_ucred, dcr);
853	if (error == 0)
854		return (0);
855	/* We do, however, allow access to the controlling terminal */
856	if (!(td->td_proc->p_flag & P_CONTROLT))
857		return (error);
858	if (td->td_proc->p_session->s_ttydp == cdp)
859		return (0);
860	return (error);
861}
862
863static int
864devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
865{
866	struct componentname *cnp;
867	struct vnode *dvp, **vpp;
868	struct thread *td;
869	struct devfs_dirent *de, *dd;
870	struct devfs_dirent **dde;
871	struct devfs_mount *dmp;
872	struct cdev *cdev;
873	int error, flags, nameiop, dvplocked;
874	char specname[SPECNAMELEN + 1], *pname;
875
876	cnp = ap->a_cnp;
877	vpp = ap->a_vpp;
878	dvp = ap->a_dvp;
879	pname = cnp->cn_nameptr;
880	td = cnp->cn_thread;
881	flags = cnp->cn_flags;
882	nameiop = cnp->cn_nameiop;
883	dmp = VFSTODEVFS(dvp->v_mount);
884	dd = dvp->v_data;
885	*vpp = NULLVP;
886
887	if ((flags & ISLASTCN) && nameiop == RENAME)
888		return (EOPNOTSUPP);
889
890	if (dvp->v_type != VDIR)
891		return (ENOTDIR);
892
893	if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
894		return (EIO);
895
896	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
897	if (error)
898		return (error);
899
900	if (cnp->cn_namelen == 1 && *pname == '.') {
901		if ((flags & ISLASTCN) && nameiop != LOOKUP)
902			return (EINVAL);
903		*vpp = dvp;
904		VREF(dvp);
905		return (0);
906	}
907
908	if (flags & ISDOTDOT) {
909		if ((flags & ISLASTCN) && nameiop != LOOKUP)
910			return (EINVAL);
911		de = devfs_parent_dirent(dd);
912		if (de == NULL)
913			return (ENOENT);
914		dvplocked = VOP_ISLOCKED(dvp);
915		VOP_UNLOCK(dvp, 0);
916		error = devfs_allocv(de, dvp->v_mount,
917		    cnp->cn_lkflags & LK_TYPE_MASK, vpp);
918		*dm_unlock = 0;
919		vn_lock(dvp, dvplocked | LK_RETRY);
920		return (error);
921	}
922
923	dd = dvp->v_data;
924	de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen, 0);
925	while (de == NULL) {	/* While(...) so we can use break */
926
927		if (nameiop == DELETE)
928			return (ENOENT);
929
930		/*
931		 * OK, we didn't have an entry for the name we were asked for
932		 * so we try to see if anybody can create it on demand.
933		 */
934		pname = devfs_fqpn(specname, dmp, dd, cnp);
935		if (pname == NULL)
936			break;
937
938		cdev = NULL;
939		DEVFS_DMP_HOLD(dmp);
940		sx_xunlock(&dmp->dm_lock);
941		sx_slock(&clone_drain_lock);
942		EVENTHANDLER_INVOKE(dev_clone,
943		    td->td_ucred, pname, strlen(pname), &cdev);
944		sx_sunlock(&clone_drain_lock);
945
946		if (cdev == NULL)
947			sx_xlock(&dmp->dm_lock);
948		else if (devfs_populate_vp(dvp) != 0) {
949			*dm_unlock = 0;
950			sx_xlock(&dmp->dm_lock);
951			if (DEVFS_DMP_DROP(dmp)) {
952				sx_xunlock(&dmp->dm_lock);
953				devfs_unmount_final(dmp);
954			} else
955				sx_xunlock(&dmp->dm_lock);
956			dev_rel(cdev);
957			return (ENOENT);
958		}
959		if (DEVFS_DMP_DROP(dmp)) {
960			*dm_unlock = 0;
961			sx_xunlock(&dmp->dm_lock);
962			devfs_unmount_final(dmp);
963			if (cdev != NULL)
964				dev_rel(cdev);
965			return (ENOENT);
966		}
967
968		if (cdev == NULL)
969			break;
970
971		dev_lock();
972		dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx];
973		if (dde != NULL && *dde != NULL)
974			de = *dde;
975		dev_unlock();
976		dev_rel(cdev);
977		break;
978	}
979
980	if (de == NULL || de->de_flags & DE_WHITEOUT) {
981		if ((nameiop == CREATE || nameiop == RENAME) &&
982		    (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
983			cnp->cn_flags |= SAVENAME;
984			return (EJUSTRETURN);
985		}
986		return (ENOENT);
987	}
988
989	if (devfs_prison_check(de, td))
990		return (ENOENT);
991
992	if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
993		error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
994		if (error)
995			return (error);
996		if (*vpp == dvp) {
997			VREF(dvp);
998			*vpp = dvp;
999			return (0);
1000		}
1001	}
1002	error = devfs_allocv(de, dvp->v_mount, cnp->cn_lkflags & LK_TYPE_MASK,
1003	    vpp);
1004	*dm_unlock = 0;
1005	return (error);
1006}
1007
1008static int
1009devfs_lookup(struct vop_lookup_args *ap)
1010{
1011	int j;
1012	struct devfs_mount *dmp;
1013	int dm_unlock;
1014
1015	if (devfs_populate_vp(ap->a_dvp) != 0)
1016		return (ENOTDIR);
1017
1018	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1019	dm_unlock = 1;
1020	j = devfs_lookupx(ap, &dm_unlock);
1021	if (dm_unlock == 1)
1022		sx_xunlock(&dmp->dm_lock);
1023	return (j);
1024}
1025
1026static int
1027devfs_mknod(struct vop_mknod_args *ap)
1028{
1029	struct componentname *cnp;
1030	struct vnode *dvp, **vpp;
1031	struct devfs_dirent *dd, *de;
1032	struct devfs_mount *dmp;
1033	int error;
1034
1035	/*
1036	 * The only type of node we should be creating here is a
1037	 * character device, for anything else return EOPNOTSUPP.
1038	 */
1039	if (ap->a_vap->va_type != VCHR)
1040		return (EOPNOTSUPP);
1041	dvp = ap->a_dvp;
1042	dmp = VFSTODEVFS(dvp->v_mount);
1043
1044	cnp = ap->a_cnp;
1045	vpp = ap->a_vpp;
1046	dd = dvp->v_data;
1047
1048	error = ENOENT;
1049	sx_xlock(&dmp->dm_lock);
1050	TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
1051		if (cnp->cn_namelen != de->de_dirent->d_namlen)
1052			continue;
1053		if (de->de_dirent->d_type == DT_CHR &&
1054		    (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0)
1055			continue;
1056		if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
1057		    de->de_dirent->d_namlen) != 0)
1058			continue;
1059		if (de->de_flags & DE_WHITEOUT)
1060			break;
1061		goto notfound;
1062	}
1063	if (de == NULL)
1064		goto notfound;
1065	de->de_flags &= ~DE_WHITEOUT;
1066	error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp);
1067	return (error);
1068notfound:
1069	sx_xunlock(&dmp->dm_lock);
1070	return (error);
1071}
1072
1073/* ARGSUSED */
1074static int
1075devfs_open(struct vop_open_args *ap)
1076{
1077	struct thread *td = ap->a_td;
1078	struct vnode *vp = ap->a_vp;
1079	struct cdev *dev = vp->v_rdev;
1080	struct file *fp = ap->a_fp;
1081	int error, ref, vlocked;
1082	struct cdevsw *dsw;
1083	struct file *fpop;
1084	struct mtx *mtxp;
1085
1086	if (vp->v_type == VBLK)
1087		return (ENXIO);
1088
1089	if (dev == NULL)
1090		return (ENXIO);
1091
1092	/* Make this field valid before any I/O in d_open. */
1093	if (dev->si_iosize_max == 0)
1094		dev->si_iosize_max = DFLTPHYS;
1095
1096	dsw = dev_refthread(dev, &ref);
1097	if (dsw == NULL)
1098		return (ENXIO);
1099	if (fp == NULL && dsw->d_fdopen != NULL) {
1100		dev_relthread(dev, ref);
1101		return (ENXIO);
1102	}
1103
1104	vlocked = VOP_ISLOCKED(vp);
1105	VOP_UNLOCK(vp, 0);
1106
1107	fpop = td->td_fpop;
1108	td->td_fpop = fp;
1109	if (fp != NULL) {
1110		fp->f_data = dev;
1111		fp->f_vnode = vp;
1112	}
1113	if (dsw->d_fdopen != NULL)
1114		error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
1115	else
1116		error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
1117	/* Clean up any cdevpriv upon error. */
1118	if (error != 0)
1119		devfs_clear_cdevpriv();
1120	td->td_fpop = fpop;
1121
1122	vn_lock(vp, vlocked | LK_RETRY);
1123	dev_relthread(dev, ref);
1124	if (error != 0) {
1125		if (error == ERESTART)
1126			error = EINTR;
1127		return (error);
1128	}
1129
1130#if 0	/* /dev/console */
1131	KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp"));
1132#else
1133	if (fp == NULL)
1134		return (error);
1135#endif
1136	if (fp->f_ops == &badfileops)
1137		finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
1138	mtxp = mtx_pool_find(mtxpool_sleep, fp);
1139
1140	/*
1141	 * Hint to the dofilewrite() to not force the buffer draining
1142	 * on the writer to the file.  Most likely, the write would
1143	 * not need normal buffers.
1144	 */
1145	mtx_lock(mtxp);
1146	fp->f_vnread_flags |= FDEVFS_VNODE;
1147	mtx_unlock(mtxp);
1148	return (error);
1149}
1150
1151static int
1152devfs_pathconf(struct vop_pathconf_args *ap)
1153{
1154
1155	switch (ap->a_name) {
1156	case _PC_MAC_PRESENT:
1157#ifdef MAC
1158		/*
1159		 * If MAC is enabled, devfs automatically supports
1160		 * trivial non-persistant label storage.
1161		 */
1162		*ap->a_retval = 1;
1163#else
1164		*ap->a_retval = 0;
1165#endif
1166		return (0);
1167	default:
1168		return (vop_stdpathconf(ap));
1169	}
1170	/* NOTREACHED */
1171}
1172
1173/* ARGSUSED */
1174static int
1175devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
1176{
1177	struct cdev *dev;
1178	struct cdevsw *dsw;
1179	int error, ref;
1180	struct file *fpop;
1181
1182	fpop = td->td_fpop;
1183	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1184	if (error != 0) {
1185		error = vnops.fo_poll(fp, events, cred, td);
1186		return (error);
1187	}
1188	error = dsw->d_poll(dev, events, td);
1189	td->td_fpop = fpop;
1190	dev_relthread(dev, ref);
1191	return(error);
1192}
1193
1194/*
1195 * Print out the contents of a special device vnode.
1196 */
1197static int
1198devfs_print(struct vop_print_args *ap)
1199{
1200
1201	printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
1202	return (0);
1203}
1204
1205static int
1206devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred,
1207    int flags, struct thread *td)
1208{
1209	struct cdev *dev;
1210	int ioflag, error, ref;
1211	ssize_t resid;
1212	struct cdevsw *dsw;
1213	struct file *fpop;
1214
1215	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1216		return (EINVAL);
1217	fpop = td->td_fpop;
1218	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1219	if (error != 0) {
1220		error = vnops.fo_read(fp, uio, cred, flags, td);
1221		return (error);
1222	}
1223	resid = uio->uio_resid;
1224	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
1225	if (ioflag & O_DIRECT)
1226		ioflag |= IO_DIRECT;
1227
1228	foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
1229	error = dsw->d_read(dev, uio, ioflag);
1230	if (uio->uio_resid != resid || (error == 0 && resid != 0))
1231		devfs_timestamp(&dev->si_atime);
1232	td->td_fpop = fpop;
1233	dev_relthread(dev, ref);
1234
1235	foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
1236	return (error);
1237}
1238
1239static int
1240devfs_readdir(struct vop_readdir_args *ap)
1241{
1242	int error;
1243	struct uio *uio;
1244	struct dirent *dp;
1245	struct devfs_dirent *dd;
1246	struct devfs_dirent *de;
1247	struct devfs_mount *dmp;
1248	off_t off;
1249	int *tmp_ncookies = NULL;
1250
1251	if (ap->a_vp->v_type != VDIR)
1252		return (ENOTDIR);
1253
1254	uio = ap->a_uio;
1255	if (uio->uio_offset < 0)
1256		return (EINVAL);
1257
1258	/*
1259	 * XXX: This is a temporary hack to get around this filesystem not
1260	 * supporting cookies. We store the location of the ncookies pointer
1261	 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
1262	 * and set the number of cookies to 0. We then set the pointer to
1263	 * NULL so that vfs_read_dirent doesn't try to call realloc() on
1264	 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
1265	 * pointer to its original location before returning to the caller.
1266	 */
1267	if (ap->a_ncookies != NULL) {
1268		tmp_ncookies = ap->a_ncookies;
1269		*ap->a_ncookies = 0;
1270		ap->a_ncookies = NULL;
1271	}
1272
1273	dmp = VFSTODEVFS(ap->a_vp->v_mount);
1274	if (devfs_populate_vp(ap->a_vp) != 0) {
1275		if (tmp_ncookies != NULL)
1276			ap->a_ncookies = tmp_ncookies;
1277		return (EIO);
1278	}
1279	error = 0;
1280	de = ap->a_vp->v_data;
1281	off = 0;
1282	TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
1283		KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
1284		if (dd->de_flags & (DE_COVERED | DE_WHITEOUT))
1285			continue;
1286		if (devfs_prison_check(dd, uio->uio_td))
1287			continue;
1288		if (dd->de_dirent->d_type == DT_DIR)
1289			de = dd->de_dir;
1290		else
1291			de = dd;
1292		dp = dd->de_dirent;
1293		if (dp->d_reclen > uio->uio_resid)
1294			break;
1295		dp->d_fileno = de->de_inode;
1296		if (off >= uio->uio_offset) {
1297			error = vfs_read_dirent(ap, dp, off);
1298			if (error)
1299				break;
1300		}
1301		off += dp->d_reclen;
1302	}
1303	sx_xunlock(&dmp->dm_lock);
1304	uio->uio_offset = off;
1305
1306	/*
1307	 * Restore ap->a_ncookies if it wasn't originally NULL in the first
1308	 * place.
1309	 */
1310	if (tmp_ncookies != NULL)
1311		ap->a_ncookies = tmp_ncookies;
1312
1313	return (error);
1314}
1315
1316static int
1317devfs_readlink(struct vop_readlink_args *ap)
1318{
1319	struct devfs_dirent *de;
1320
1321	de = ap->a_vp->v_data;
1322	return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
1323}
1324
1325static int
1326devfs_reclaim(struct vop_reclaim_args *ap)
1327{
1328	struct vnode *vp;
1329	struct devfs_dirent *de;
1330
1331	vp = ap->a_vp;
1332	mtx_lock(&devfs_de_interlock);
1333	de = vp->v_data;
1334	if (de != NULL) {
1335		de->de_vnode = NULL;
1336		vp->v_data = NULL;
1337	}
1338	mtx_unlock(&devfs_de_interlock);
1339	vnode_destroy_vobject(vp);
1340	return (0);
1341}
1342
1343static int
1344devfs_reclaim_vchr(struct vop_reclaim_args *ap)
1345{
1346	struct vnode *vp;
1347	struct cdev *dev;
1348
1349	vp = ap->a_vp;
1350	MPASS(vp->v_type == VCHR);
1351
1352	devfs_reclaim(ap);
1353
1354	VI_LOCK(vp);
1355	dev_lock();
1356	dev = vp->v_rdev;
1357	vp->v_rdev = NULL;
1358	if (dev != NULL)
1359		dev->si_usecount -= vp->v_usecount;
1360	dev_unlock();
1361	VI_UNLOCK(vp);
1362	if (dev != NULL)
1363		dev_rel(dev);
1364	return (0);
1365}
1366
1367static int
1368devfs_remove(struct vop_remove_args *ap)
1369{
1370	struct vnode *dvp = ap->a_dvp;
1371	struct vnode *vp = ap->a_vp;
1372	struct devfs_dirent *dd;
1373	struct devfs_dirent *de, *de_covered;
1374	struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
1375
1376	ASSERT_VOP_ELOCKED(dvp, "devfs_remove");
1377	ASSERT_VOP_ELOCKED(vp, "devfs_remove");
1378
1379	sx_xlock(&dmp->dm_lock);
1380	dd = ap->a_dvp->v_data;
1381	de = vp->v_data;
1382	if (de->de_cdp == NULL) {
1383		TAILQ_REMOVE(&dd->de_dlist, de, de_list);
1384		if (de->de_dirent->d_type == DT_LNK) {
1385			de_covered = devfs_find(dd, de->de_dirent->d_name,
1386			    de->de_dirent->d_namlen, 0);
1387			if (de_covered != NULL)
1388				de_covered->de_flags &= ~DE_COVERED;
1389		}
1390		/* We need to unlock dvp because devfs_delete() may lock it. */
1391		VOP_UNLOCK(vp, 0);
1392		if (dvp != vp)
1393			VOP_UNLOCK(dvp, 0);
1394		devfs_delete(dmp, de, 0);
1395		sx_xunlock(&dmp->dm_lock);
1396		if (dvp != vp)
1397			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1398		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1399	} else {
1400		de->de_flags |= DE_WHITEOUT;
1401		sx_xunlock(&dmp->dm_lock);
1402	}
1403	return (0);
1404}
1405
1406/*
1407 * Revoke is called on a tty when a terminal session ends.  The vnode
1408 * is orphaned by setting v_op to deadfs so we need to let go of it
1409 * as well so that we create a new one next time around.
1410 *
1411 */
1412static int
1413devfs_revoke(struct vop_revoke_args *ap)
1414{
1415	struct vnode *vp = ap->a_vp, *vp2;
1416	struct cdev *dev;
1417	struct cdev_priv *cdp;
1418	struct devfs_dirent *de;
1419	u_int i;
1420
1421	KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
1422
1423	dev = vp->v_rdev;
1424	cdp = cdev2priv(dev);
1425
1426	dev_lock();
1427	cdp->cdp_inuse++;
1428	dev_unlock();
1429
1430	vhold(vp);
1431	vgone(vp);
1432	vdrop(vp);
1433
1434	VOP_UNLOCK(vp,0);
1435 loop:
1436	for (;;) {
1437		mtx_lock(&devfs_de_interlock);
1438		dev_lock();
1439		vp2 = NULL;
1440		for (i = 0; i <= cdp->cdp_maxdirent; i++) {
1441			de = cdp->cdp_dirents[i];
1442			if (de == NULL)
1443				continue;
1444
1445			vp2 = de->de_vnode;
1446			if (vp2 != NULL) {
1447				dev_unlock();
1448				VI_LOCK(vp2);
1449				mtx_unlock(&devfs_de_interlock);
1450				if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
1451				    curthread))
1452					goto loop;
1453				vhold(vp2);
1454				vgone(vp2);
1455				vdrop(vp2);
1456				vput(vp2);
1457				break;
1458			}
1459		}
1460		if (vp2 != NULL) {
1461			continue;
1462		}
1463		dev_unlock();
1464		mtx_unlock(&devfs_de_interlock);
1465		break;
1466	}
1467	dev_lock();
1468	cdp->cdp_inuse--;
1469	if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
1470		TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
1471		dev_unlock();
1472		dev_rel(&cdp->cdp_c);
1473	} else
1474		dev_unlock();
1475
1476	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1477	return (0);
1478}
1479
1480static int
1481devfs_rioctl(struct vop_ioctl_args *ap)
1482{
1483	struct vnode *vp;
1484	struct devfs_mount *dmp;
1485	int error;
1486
1487	vp = ap->a_vp;
1488	vn_lock(vp, LK_SHARED | LK_RETRY);
1489	if (vp->v_iflag & VI_DOOMED) {
1490		VOP_UNLOCK(vp, 0);
1491		return (EBADF);
1492	}
1493	dmp = VFSTODEVFS(vp->v_mount);
1494	sx_xlock(&dmp->dm_lock);
1495	VOP_UNLOCK(vp, 0);
1496	DEVFS_DMP_HOLD(dmp);
1497	devfs_populate(dmp);
1498	if (DEVFS_DMP_DROP(dmp)) {
1499		sx_xunlock(&dmp->dm_lock);
1500		devfs_unmount_final(dmp);
1501		return (ENOENT);
1502	}
1503	error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
1504	sx_xunlock(&dmp->dm_lock);
1505	return (error);
1506}
1507
1508static int
1509devfs_rread(struct vop_read_args *ap)
1510{
1511
1512	if (ap->a_vp->v_type != VDIR)
1513		return (EINVAL);
1514	return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
1515}
1516
1517static int
1518devfs_setattr(struct vop_setattr_args *ap)
1519{
1520	struct devfs_dirent *de;
1521	struct vattr *vap;
1522	struct vnode *vp;
1523	struct thread *td;
1524	int c, error;
1525	uid_t uid;
1526	gid_t gid;
1527
1528	vap = ap->a_vap;
1529	vp = ap->a_vp;
1530	td = curthread;
1531	if ((vap->va_type != VNON) ||
1532	    (vap->va_nlink != VNOVAL) ||
1533	    (vap->va_fsid != VNOVAL) ||
1534	    (vap->va_fileid != VNOVAL) ||
1535	    (vap->va_blocksize != VNOVAL) ||
1536	    (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1537	    (vap->va_rdev != VNOVAL) ||
1538	    ((int)vap->va_bytes != VNOVAL) ||
1539	    (vap->va_gen != VNOVAL)) {
1540		return (EINVAL);
1541	}
1542
1543	error = devfs_populate_vp(vp);
1544	if (error != 0)
1545		return (error);
1546
1547	de = vp->v_data;
1548	if (vp->v_type == VDIR)
1549		de = de->de_dir;
1550
1551	c = 0;
1552	if (vap->va_uid == (uid_t)VNOVAL)
1553		uid = de->de_uid;
1554	else
1555		uid = vap->va_uid;
1556	if (vap->va_gid == (gid_t)VNOVAL)
1557		gid = de->de_gid;
1558	else
1559		gid = vap->va_gid;
1560	if (uid != de->de_uid || gid != de->de_gid) {
1561		if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
1562		    (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
1563			error = priv_check(td, PRIV_VFS_CHOWN);
1564			if (error != 0)
1565				goto ret;
1566		}
1567		de->de_uid = uid;
1568		de->de_gid = gid;
1569		c = 1;
1570	}
1571
1572	if (vap->va_mode != (mode_t)VNOVAL) {
1573		if (ap->a_cred->cr_uid != de->de_uid) {
1574			error = priv_check(td, PRIV_VFS_ADMIN);
1575			if (error != 0)
1576				goto ret;
1577		}
1578		de->de_mode = vap->va_mode;
1579		c = 1;
1580	}
1581
1582	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1583		error = vn_utimes_perm(vp, vap, ap->a_cred, td);
1584		if (error != 0)
1585			goto ret;
1586		if (vap->va_atime.tv_sec != VNOVAL) {
1587			if (vp->v_type == VCHR)
1588				vp->v_rdev->si_atime = vap->va_atime;
1589			else
1590				de->de_atime = vap->va_atime;
1591		}
1592		if (vap->va_mtime.tv_sec != VNOVAL) {
1593			if (vp->v_type == VCHR)
1594				vp->v_rdev->si_mtime = vap->va_mtime;
1595			else
1596				de->de_mtime = vap->va_mtime;
1597		}
1598		c = 1;
1599	}
1600
1601	if (c) {
1602		if (vp->v_type == VCHR)
1603			vfs_timestamp(&vp->v_rdev->si_ctime);
1604		else
1605			vfs_timestamp(&de->de_mtime);
1606	}
1607
1608ret:
1609	sx_xunlock(&VFSTODEVFS(vp->v_mount)->dm_lock);
1610	return (error);
1611}
1612
1613#ifdef MAC
1614static int
1615devfs_setlabel(struct vop_setlabel_args *ap)
1616{
1617	struct vnode *vp;
1618	struct devfs_dirent *de;
1619
1620	vp = ap->a_vp;
1621	de = vp->v_data;
1622
1623	mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
1624	mac_devfs_update(vp->v_mount, de, vp);
1625
1626	return (0);
1627}
1628#endif
1629
1630static int
1631devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
1632{
1633
1634	return (vnops.fo_stat(fp, sb, cred, td));
1635}
1636
1637static int
1638devfs_symlink(struct vop_symlink_args *ap)
1639{
1640	int i, error;
1641	struct devfs_dirent *dd;
1642	struct devfs_dirent *de, *de_covered, *de_dotdot;
1643	struct devfs_mount *dmp;
1644
1645	error = priv_check(curthread, PRIV_DEVFS_SYMLINK);
1646	if (error)
1647		return(error);
1648	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1649	if (devfs_populate_vp(ap->a_dvp) != 0)
1650		return (ENOENT);
1651
1652	dd = ap->a_dvp->v_data;
1653	de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
1654	de->de_flags = DE_USER;
1655	de->de_uid = 0;
1656	de->de_gid = 0;
1657	de->de_mode = 0755;
1658	de->de_inode = alloc_unr(devfs_inos);
1659	de->de_dir = dd;
1660	de->de_dirent->d_type = DT_LNK;
1661	i = strlen(ap->a_target) + 1;
1662	de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
1663	bcopy(ap->a_target, de->de_symlink, i);
1664#ifdef MAC
1665	mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
1666#endif
1667	de_covered = devfs_find(dd, de->de_dirent->d_name,
1668	    de->de_dirent->d_namlen, 0);
1669	if (de_covered != NULL) {
1670		if ((de_covered->de_flags & DE_USER) != 0) {
1671			devfs_delete(dmp, de, DEVFS_DEL_NORECURSE);
1672			sx_xunlock(&dmp->dm_lock);
1673			return (EEXIST);
1674		}
1675		KASSERT((de_covered->de_flags & DE_COVERED) == 0,
1676		    ("devfs_symlink: entry %p already covered", de_covered));
1677		de_covered->de_flags |= DE_COVERED;
1678	}
1679
1680	de_dotdot = TAILQ_FIRST(&dd->de_dlist);		/* "." */
1681	de_dotdot = TAILQ_NEXT(de_dotdot, de_list);	/* ".." */
1682	TAILQ_INSERT_AFTER(&dd->de_dlist, de_dotdot, de, de_list);
1683	devfs_dir_ref_de(dmp, dd);
1684	devfs_rules_apply(dmp, de);
1685
1686	return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp));
1687}
1688
1689static int
1690devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
1691{
1692
1693	return (vnops.fo_truncate(fp, length, cred, td));
1694}
1695
1696static int
1697devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred,
1698    int flags, struct thread *td)
1699{
1700	struct cdev *dev;
1701	int error, ioflag, ref;
1702	ssize_t resid;
1703	struct cdevsw *dsw;
1704	struct file *fpop;
1705
1706	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1707		return (EINVAL);
1708	fpop = td->td_fpop;
1709	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1710	if (error != 0) {
1711		error = vnops.fo_write(fp, uio, cred, flags, td);
1712		return (error);
1713	}
1714	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
1715	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
1716	if (ioflag & O_DIRECT)
1717		ioflag |= IO_DIRECT;
1718	foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
1719
1720	resid = uio->uio_resid;
1721
1722	error = dsw->d_write(dev, uio, ioflag);
1723	if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
1724		devfs_timestamp(&dev->si_ctime);
1725		dev->si_mtime = dev->si_ctime;
1726	}
1727	td->td_fpop = fpop;
1728	dev_relthread(dev, ref);
1729
1730	foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
1731	return (error);
1732}
1733
1734dev_t
1735dev2udev(struct cdev *x)
1736{
1737	if (x == NULL)
1738		return (NODEV);
1739	return (cdev2priv(x)->cdp_inode);
1740}
1741
1742static struct fileops devfs_ops_f = {
1743	.fo_read =	devfs_read_f,
1744	.fo_write =	devfs_write_f,
1745	.fo_truncate =	devfs_truncate_f,
1746	.fo_ioctl =	devfs_ioctl_f,
1747	.fo_poll =	devfs_poll_f,
1748	.fo_kqfilter =	devfs_kqfilter_f,
1749	.fo_stat =	devfs_stat_f,
1750	.fo_close =	devfs_close_f,
1751	.fo_chmod =	vn_chmod,
1752	.fo_chown =	vn_chown,
1753	.fo_sendfile =	vn_sendfile,
1754	.fo_seek =	vn_seek,
1755	.fo_flags =	DFLAG_PASSABLE | DFLAG_SEEKABLE
1756};
1757
1758static struct vop_vector devfs_vnodeops = {
1759	.vop_default =		&default_vnodeops,
1760
1761	.vop_access =		devfs_access,
1762	.vop_getattr =		devfs_getattr,
1763	.vop_ioctl =		devfs_rioctl,
1764	.vop_lookup =		devfs_lookup,
1765	.vop_mknod =		devfs_mknod,
1766	.vop_pathconf =		devfs_pathconf,
1767	.vop_read =		devfs_rread,
1768	.vop_readdir =		devfs_readdir,
1769	.vop_readlink =		devfs_readlink,
1770	.vop_reclaim =		devfs_reclaim,
1771	.vop_remove =		devfs_remove,
1772	.vop_revoke =		devfs_revoke,
1773	.vop_setattr =		devfs_setattr,
1774#ifdef MAC
1775	.vop_setlabel =		devfs_setlabel,
1776#endif
1777	.vop_symlink =		devfs_symlink,
1778	.vop_vptocnp =		devfs_vptocnp,
1779};
1780
1781static struct vop_vector devfs_specops = {
1782	.vop_default =		&default_vnodeops,
1783
1784	.vop_access =		devfs_access,
1785	.vop_bmap =		VOP_PANIC,
1786	.vop_close =		devfs_close,
1787	.vop_create =		VOP_PANIC,
1788	.vop_fsync =		devfs_fsync,
1789	.vop_getattr =		devfs_getattr,
1790	.vop_link =		VOP_PANIC,
1791	.vop_mkdir =		VOP_PANIC,
1792	.vop_mknod =		VOP_PANIC,
1793	.vop_open =		devfs_open,
1794	.vop_pathconf =		devfs_pathconf,
1795	.vop_poll =		dead_poll,
1796	.vop_print =		devfs_print,
1797	.vop_read =		dead_read,
1798	.vop_readdir =		VOP_PANIC,
1799	.vop_readlink =		VOP_PANIC,
1800	.vop_reallocblks =	VOP_PANIC,
1801	.vop_reclaim =		devfs_reclaim_vchr,
1802	.vop_remove =		devfs_remove,
1803	.vop_rename =		VOP_PANIC,
1804	.vop_revoke =		devfs_revoke,
1805	.vop_rmdir =		VOP_PANIC,
1806	.vop_setattr =		devfs_setattr,
1807#ifdef MAC
1808	.vop_setlabel =		devfs_setlabel,
1809#endif
1810	.vop_strategy =		VOP_PANIC,
1811	.vop_symlink =		VOP_PANIC,
1812	.vop_vptocnp =		devfs_vptocnp,
1813	.vop_write =		dead_write,
1814};
1815
1816/*
1817 * Our calling convention to the device drivers used to be that we passed
1818 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_
1819 * flags instead since that's what open(), close() and ioctl() takes and
1820 * we don't really want vnode.h in device drivers.
1821 * We solved the source compatibility by redefining some vnode flags to
1822 * be the same as the fcntl ones and by sending down the bitwise OR of
1823 * the respective fcntl/vnode flags.  These CTASSERTS make sure nobody
1824 * pulls the rug out under this.
1825 */
1826CTASSERT(O_NONBLOCK == IO_NDELAY);
1827CTASSERT(O_FSYNC == IO_SYNC);
1828