null_vnops.c revision 270319
1/*-
2 * Copyright (c) 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * John Heidemann of the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
33 *
34 * Ancestors:
35 *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
36 *	...and...
37 *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
38 *
39 * $FreeBSD: stable/10/sys/fs/nullfs/null_vnops.c 270319 2014-08-22 07:09:54Z kib $
40 */
41
42/*
43 * Null Layer
44 *
45 * (See mount_nullfs(8) for more information.)
46 *
47 * The null layer duplicates a portion of the filesystem
48 * name space under a new name.  In this respect, it is
49 * similar to the loopback filesystem.  It differs from
50 * the loopback fs in two respects:  it is implemented using
51 * a stackable layers techniques, and its "null-node"s stack above
52 * all lower-layer vnodes, not just over directory vnodes.
53 *
54 * The null layer has two purposes.  First, it serves as a demonstration
55 * of layering by proving a layer which does nothing.  (It actually
56 * does everything the loopback filesystem does, which is slightly
57 * more than nothing.)  Second, the null layer can serve as a prototype
58 * layer.  Since it provides all necessary layer framework,
59 * new filesystem layers can be created very easily be starting
60 * with a null layer.
61 *
62 * The remainder of this man page examines the null layer as a basis
63 * for constructing new layers.
64 *
65 *
66 * INSTANTIATING NEW NULL LAYERS
67 *
68 * New null layers are created with mount_nullfs(8).
69 * Mount_nullfs(8) takes two arguments, the pathname
70 * of the lower vfs (target-pn) and the pathname where the null
71 * layer will appear in the namespace (alias-pn).  After
72 * the null layer is put into place, the contents
73 * of target-pn subtree will be aliased under alias-pn.
74 *
75 *
76 * OPERATION OF A NULL LAYER
77 *
78 * The null layer is the minimum filesystem layer,
79 * simply bypassing all possible operations to the lower layer
80 * for processing there.  The majority of its activity centers
81 * on the bypass routine, through which nearly all vnode operations
82 * pass.
83 *
84 * The bypass routine accepts arbitrary vnode operations for
85 * handling by the lower layer.  It begins by examing vnode
86 * operation arguments and replacing any null-nodes by their
87 * lower-layer equivlants.  It then invokes the operation
88 * on the lower layer.  Finally, it replaces the null-nodes
89 * in the arguments and, if a vnode is return by the operation,
90 * stacks a null-node on top of the returned vnode.
91 *
92 * Although bypass handles most operations, vop_getattr, vop_lock,
93 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
94 * bypassed. Vop_getattr must change the fsid being returned.
95 * Vop_lock and vop_unlock must handle any locking for the
96 * current vnode as well as pass the lock request down.
97 * Vop_inactive and vop_reclaim are not bypassed so that
98 * they can handle freeing null-layer specific data. Vop_print
99 * is not bypassed to avoid excessive debugging information.
100 * Also, certain vnode operations change the locking state within
101 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
102 * and symlink). Ideally these operations should not change the
103 * lock state, but should be changed to let the caller of the
104 * function unlock them. Otherwise all intermediate vnode layers
105 * (such as union, umapfs, etc) must catch these functions to do
106 * the necessary locking at their layer.
107 *
108 *
109 * INSTANTIATING VNODE STACKS
110 *
111 * Mounting associates the null layer with a lower layer,
112 * effect stacking two VFSes.  Vnode stacks are instead
113 * created on demand as files are accessed.
114 *
115 * The initial mount creates a single vnode stack for the
116 * root of the new null layer.  All other vnode stacks
117 * are created as a result of vnode operations on
118 * this or other null vnode stacks.
119 *
120 * New vnode stacks come into existance as a result of
121 * an operation which returns a vnode.
122 * The bypass routine stacks a null-node above the new
123 * vnode before returning it to the caller.
124 *
125 * For example, imagine mounting a null layer with
126 * "mount_nullfs /usr/include /dev/layer/null".
127 * Changing directory to /dev/layer/null will assign
128 * the root null-node (which was created when the null layer was mounted).
129 * Now consider opening "sys".  A vop_lookup would be
130 * done on the root null-node.  This operation would bypass through
131 * to the lower layer which would return a vnode representing
132 * the UFS "sys".  Null_bypass then builds a null-node
133 * aliasing the UFS "sys" and returns this to the caller.
134 * Later operations on the null-node "sys" will repeat this
135 * process when constructing other vnode stacks.
136 *
137 *
138 * CREATING OTHER FILE SYSTEM LAYERS
139 *
140 * One of the easiest ways to construct new filesystem layers is to make
141 * a copy of the null layer, rename all files and variables, and
142 * then begin modifing the copy.  Sed can be used to easily rename
143 * all variables.
144 *
145 * The umap layer is an example of a layer descended from the
146 * null layer.
147 *
148 *
149 * INVOKING OPERATIONS ON LOWER LAYERS
150 *
151 * There are two techniques to invoke operations on a lower layer
152 * when the operation cannot be completely bypassed.  Each method
153 * is appropriate in different situations.  In both cases,
154 * it is the responsibility of the aliasing layer to make
155 * the operation arguments "correct" for the lower layer
156 * by mapping a vnode arguments to the lower layer.
157 *
158 * The first approach is to call the aliasing layer's bypass routine.
159 * This method is most suitable when you wish to invoke the operation
160 * currently being handled on the lower layer.  It has the advantage
161 * that the bypass routine already must do argument mapping.
162 * An example of this is null_getattrs in the null layer.
163 *
164 * A second approach is to directly invoke vnode operations on
165 * the lower layer with the VOP_OPERATIONNAME interface.
166 * The advantage of this method is that it is easy to invoke
167 * arbitrary operations on the lower layer.  The disadvantage
168 * is that vnode arguments must be manualy mapped.
169 *
170 */
171
172#include <sys/param.h>
173#include <sys/systm.h>
174#include <sys/conf.h>
175#include <sys/kernel.h>
176#include <sys/lock.h>
177#include <sys/malloc.h>
178#include <sys/mount.h>
179#include <sys/mutex.h>
180#include <sys/namei.h>
181#include <sys/sysctl.h>
182#include <sys/vnode.h>
183
184#include <fs/nullfs/null.h>
185
186#include <vm/vm.h>
187#include <vm/vm_extern.h>
188#include <vm/vm_object.h>
189#include <vm/vnode_pager.h>
190
191static int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
192SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
193	&null_bug_bypass, 0, "");
194
195/*
196 * This is the 10-Apr-92 bypass routine.
197 *    This version has been optimized for speed, throwing away some
198 * safety checks.  It should still always work, but it's not as
199 * robust to programmer errors.
200 *
201 * In general, we map all vnodes going down and unmap them on the way back.
202 * As an exception to this, vnodes can be marked "unmapped" by setting
203 * the Nth bit in operation's vdesc_flags.
204 *
205 * Also, some BSD vnode operations have the side effect of vrele'ing
206 * their arguments.  With stacking, the reference counts are held
207 * by the upper node, not the lower one, so we must handle these
208 * side-effects here.  This is not of concern in Sun-derived systems
209 * since there are no such side-effects.
210 *
211 * This makes the following assumptions:
212 * - only one returned vpp
213 * - no INOUT vpp's (Sun's vop_open has one of these)
214 * - the vnode operation vector of the first vnode should be used
215 *   to determine what implementation of the op should be invoked
216 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
217 *   problems on rmdir'ing mount points and renaming?)
218 */
219int
220null_bypass(struct vop_generic_args *ap)
221{
222	struct vnode **this_vp_p;
223	int error;
224	struct vnode *old_vps[VDESC_MAX_VPS];
225	struct vnode **vps_p[VDESC_MAX_VPS];
226	struct vnode ***vppp;
227	struct vnodeop_desc *descp = ap->a_desc;
228	int reles, i;
229
230	if (null_bug_bypass)
231		printf ("null_bypass: %s\n", descp->vdesc_name);
232
233#ifdef DIAGNOSTIC
234	/*
235	 * We require at least one vp.
236	 */
237	if (descp->vdesc_vp_offsets == NULL ||
238	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
239		panic ("null_bypass: no vp's in map");
240#endif
241
242	/*
243	 * Map the vnodes going in.
244	 * Later, we'll invoke the operation based on
245	 * the first mapped vnode's operation vector.
246	 */
247	reles = descp->vdesc_flags;
248	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
249		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
250			break;   /* bail out at end of list */
251		vps_p[i] = this_vp_p =
252			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
253		/*
254		 * We're not guaranteed that any but the first vnode
255		 * are of our type.  Check for and don't map any
256		 * that aren't.  (We must always map first vp or vclean fails.)
257		 */
258		if (i && (*this_vp_p == NULLVP ||
259		    (*this_vp_p)->v_op != &null_vnodeops)) {
260			old_vps[i] = NULLVP;
261		} else {
262			old_vps[i] = *this_vp_p;
263			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
264			/*
265			 * XXX - Several operations have the side effect
266			 * of vrele'ing their vp's.  We must account for
267			 * that.  (This should go away in the future.)
268			 */
269			if (reles & VDESC_VP0_WILLRELE)
270				VREF(*this_vp_p);
271		}
272
273	}
274
275	/*
276	 * Call the operation on the lower layer
277	 * with the modified argument structure.
278	 */
279	if (vps_p[0] && *vps_p[0])
280		error = VCALL(ap);
281	else {
282		printf("null_bypass: no map for %s\n", descp->vdesc_name);
283		error = EINVAL;
284	}
285
286	/*
287	 * Maintain the illusion of call-by-value
288	 * by restoring vnodes in the argument structure
289	 * to their original value.
290	 */
291	reles = descp->vdesc_flags;
292	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
293		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
294			break;   /* bail out at end of list */
295		if (old_vps[i]) {
296			*(vps_p[i]) = old_vps[i];
297#if 0
298			if (reles & VDESC_VP0_WILLUNLOCK)
299				VOP_UNLOCK(*(vps_p[i]), 0);
300#endif
301			if (reles & VDESC_VP0_WILLRELE)
302				vrele(*(vps_p[i]));
303		}
304	}
305
306	/*
307	 * Map the possible out-going vpp
308	 * (Assumes that the lower layer always returns
309	 * a VREF'ed vpp unless it gets an error.)
310	 */
311	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
312	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
313	    !error) {
314		/*
315		 * XXX - even though some ops have vpp returned vp's,
316		 * several ops actually vrele this before returning.
317		 * We must avoid these ops.
318		 * (This should go away when these ops are regularized.)
319		 */
320		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
321			goto out;
322		vppp = VOPARG_OFFSETTO(struct vnode***,
323				 descp->vdesc_vpp_offset,ap);
324		if (*vppp)
325			error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
326	}
327
328 out:
329	return (error);
330}
331
332static int
333null_add_writecount(struct vop_add_writecount_args *ap)
334{
335	struct vnode *lvp, *vp;
336	int error;
337
338	vp = ap->a_vp;
339	lvp = NULLVPTOLOWERVP(vp);
340	KASSERT(vp->v_writecount + ap->a_inc >= 0, ("wrong writecount inc"));
341	if (vp->v_writecount > 0 && vp->v_writecount + ap->a_inc == 0)
342		error = VOP_ADD_WRITECOUNT(lvp, -1);
343	else if (vp->v_writecount == 0 && vp->v_writecount + ap->a_inc > 0)
344		error = VOP_ADD_WRITECOUNT(lvp, 1);
345	else
346		error = 0;
347	if (error == 0)
348		vp->v_writecount += ap->a_inc;
349	return (error);
350}
351
352/*
353 * We have to carry on the locking protocol on the null layer vnodes
354 * as we progress through the tree. We also have to enforce read-only
355 * if this layer is mounted read-only.
356 */
357static int
358null_lookup(struct vop_lookup_args *ap)
359{
360	struct componentname *cnp = ap->a_cnp;
361	struct vnode *dvp = ap->a_dvp;
362	int flags = cnp->cn_flags;
363	struct vnode *vp, *ldvp, *lvp;
364	struct mount *mp;
365	int error;
366
367	mp = dvp->v_mount;
368	if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 &&
369	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
370		return (EROFS);
371	/*
372	 * Although it is possible to call null_bypass(), we'll do
373	 * a direct call to reduce overhead
374	 */
375	ldvp = NULLVPTOLOWERVP(dvp);
376	vp = lvp = NULL;
377	KASSERT((ldvp->v_vflag & VV_ROOT) == 0 ||
378	    ((dvp->v_vflag & VV_ROOT) != 0 && (flags & ISDOTDOT) == 0),
379	    ("ldvp %p fl %#x dvp %p fl %#x flags %#x", ldvp, ldvp->v_vflag,
380	     dvp, dvp->v_vflag, flags));
381
382	/*
383	 * Hold ldvp.  The reference on it, owned by dvp, is lost in
384	 * case of dvp reclamation, and we need ldvp to move our lock
385	 * from ldvp to dvp.
386	 */
387	vhold(ldvp);
388
389	error = VOP_LOOKUP(ldvp, &lvp, cnp);
390
391	/*
392	 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows
393	 * dvp to be reclaimed due to shared v_vnlock.  Check for the
394	 * doomed state and return error.
395	 */
396	if ((error == 0 || error == EJUSTRETURN) &&
397	    (dvp->v_iflag & VI_DOOMED) != 0) {
398		error = ENOENT;
399		if (lvp != NULL)
400			vput(lvp);
401
402		/*
403		 * If vgone() did reclaimed dvp before curthread
404		 * relocked ldvp, the locks of dvp and ldpv are no
405		 * longer shared.  In this case, relock of ldvp in
406		 * lower fs VOP_LOOKUP() does not restore the locking
407		 * state of dvp.  Compensate for this by unlocking
408		 * ldvp and locking dvp, which is also correct if the
409		 * locks are still shared.
410		 */
411		VOP_UNLOCK(ldvp, 0);
412		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
413	}
414	vdrop(ldvp);
415
416	if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 &&
417	    (mp->mnt_flag & MNT_RDONLY) != 0 &&
418	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
419		error = EROFS;
420
421	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
422		if (ldvp == lvp) {
423			*ap->a_vpp = dvp;
424			VREF(dvp);
425			vrele(lvp);
426		} else {
427			error = null_nodeget(mp, lvp, &vp);
428			if (error == 0)
429				*ap->a_vpp = vp;
430		}
431	}
432	return (error);
433}
434
435static int
436null_open(struct vop_open_args *ap)
437{
438	int retval;
439	struct vnode *vp, *ldvp;
440
441	vp = ap->a_vp;
442	ldvp = NULLVPTOLOWERVP(vp);
443	retval = null_bypass(&ap->a_gen);
444	if (retval == 0)
445		vp->v_object = ldvp->v_object;
446	return (retval);
447}
448
449/*
450 * Setattr call. Disallow write attempts if the layer is mounted read-only.
451 */
452static int
453null_setattr(struct vop_setattr_args *ap)
454{
455	struct vnode *vp = ap->a_vp;
456	struct vattr *vap = ap->a_vap;
457
458  	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
459	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
460	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
461	    (vp->v_mount->mnt_flag & MNT_RDONLY))
462		return (EROFS);
463	if (vap->va_size != VNOVAL) {
464 		switch (vp->v_type) {
465 		case VDIR:
466 			return (EISDIR);
467 		case VCHR:
468 		case VBLK:
469 		case VSOCK:
470 		case VFIFO:
471			if (vap->va_flags != VNOVAL)
472				return (EOPNOTSUPP);
473			return (0);
474		case VREG:
475		case VLNK:
476 		default:
477			/*
478			 * Disallow write attempts if the filesystem is
479			 * mounted read-only.
480			 */
481			if (vp->v_mount->mnt_flag & MNT_RDONLY)
482				return (EROFS);
483		}
484	}
485
486	return (null_bypass((struct vop_generic_args *)ap));
487}
488
489/*
490 *  We handle getattr only to change the fsid.
491 */
492static int
493null_getattr(struct vop_getattr_args *ap)
494{
495	int error;
496
497	if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
498		return (error);
499
500	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
501	return (0);
502}
503
504/*
505 * Handle to disallow write access if mounted read-only.
506 */
507static int
508null_access(struct vop_access_args *ap)
509{
510	struct vnode *vp = ap->a_vp;
511	accmode_t accmode = ap->a_accmode;
512
513	/*
514	 * Disallow write attempts on read-only layers;
515	 * unless the file is a socket, fifo, or a block or
516	 * character device resident on the filesystem.
517	 */
518	if (accmode & VWRITE) {
519		switch (vp->v_type) {
520		case VDIR:
521		case VLNK:
522		case VREG:
523			if (vp->v_mount->mnt_flag & MNT_RDONLY)
524				return (EROFS);
525			break;
526		default:
527			break;
528		}
529	}
530	return (null_bypass((struct vop_generic_args *)ap));
531}
532
533static int
534null_accessx(struct vop_accessx_args *ap)
535{
536	struct vnode *vp = ap->a_vp;
537	accmode_t accmode = ap->a_accmode;
538
539	/*
540	 * Disallow write attempts on read-only layers;
541	 * unless the file is a socket, fifo, or a block or
542	 * character device resident on the filesystem.
543	 */
544	if (accmode & VWRITE) {
545		switch (vp->v_type) {
546		case VDIR:
547		case VLNK:
548		case VREG:
549			if (vp->v_mount->mnt_flag & MNT_RDONLY)
550				return (EROFS);
551			break;
552		default:
553			break;
554		}
555	}
556	return (null_bypass((struct vop_generic_args *)ap));
557}
558
559/*
560 * Increasing refcount of lower vnode is needed at least for the case
561 * when lower FS is NFS to do sillyrename if the file is in use.
562 * Unfortunately v_usecount is incremented in many places in
563 * the kernel and, as such, there may be races that result in
564 * the NFS client doing an extraneous silly rename, but that seems
565 * preferable to not doing a silly rename when it is needed.
566 */
567static int
568null_remove(struct vop_remove_args *ap)
569{
570	int retval, vreleit;
571	struct vnode *lvp;
572
573	if (vrefcnt(ap->a_vp) > 1) {
574		lvp = NULLVPTOLOWERVP(ap->a_vp);
575		VREF(lvp);
576		vreleit = 1;
577	} else
578		vreleit = 0;
579	retval = null_bypass(&ap->a_gen);
580	if (vreleit != 0)
581		vrele(lvp);
582	return (retval);
583}
584
585/*
586 * We handle this to eliminate null FS to lower FS
587 * file moving. Don't know why we don't allow this,
588 * possibly we should.
589 */
590static int
591null_rename(struct vop_rename_args *ap)
592{
593	struct vnode *tdvp = ap->a_tdvp;
594	struct vnode *fvp = ap->a_fvp;
595	struct vnode *fdvp = ap->a_fdvp;
596	struct vnode *tvp = ap->a_tvp;
597	struct null_node *tnn;
598
599	/* Check for cross-device rename. */
600	if ((fvp->v_mount != tdvp->v_mount) ||
601	    (tvp && (fvp->v_mount != tvp->v_mount))) {
602		if (tdvp == tvp)
603			vrele(tdvp);
604		else
605			vput(tdvp);
606		if (tvp)
607			vput(tvp);
608		vrele(fdvp);
609		vrele(fvp);
610		return (EXDEV);
611	}
612
613	if (tvp != NULL) {
614		tnn = VTONULL(tvp);
615		tnn->null_flags |= NULLV_DROP;
616	}
617	return (null_bypass((struct vop_generic_args *)ap));
618}
619
620/*
621 * We need to process our own vnode lock and then clear the
622 * interlock flag as it applies only to our vnode, not the
623 * vnodes below us on the stack.
624 */
625static int
626null_lock(struct vop_lock1_args *ap)
627{
628	struct vnode *vp = ap->a_vp;
629	int flags = ap->a_flags;
630	struct null_node *nn;
631	struct vnode *lvp;
632	int error;
633
634
635	if ((flags & LK_INTERLOCK) == 0) {
636		VI_LOCK(vp);
637		ap->a_flags = flags |= LK_INTERLOCK;
638	}
639	nn = VTONULL(vp);
640	/*
641	 * If we're still active we must ask the lower layer to
642	 * lock as ffs has special lock considerations in it's
643	 * vop lock.
644	 */
645	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
646		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
647		VI_UNLOCK(vp);
648		/*
649		 * We have to hold the vnode here to solve a potential
650		 * reclaim race.  If we're forcibly vgone'd while we
651		 * still have refs, a thread could be sleeping inside
652		 * the lowervp's vop_lock routine.  When we vgone we will
653		 * drop our last ref to the lowervp, which would allow it
654		 * to be reclaimed.  The lowervp could then be recycled,
655		 * in which case it is not legal to be sleeping in it's VOP.
656		 * We prevent it from being recycled by holding the vnode
657		 * here.
658		 */
659		vholdl(lvp);
660		error = VOP_LOCK(lvp, flags);
661
662		/*
663		 * We might have slept to get the lock and someone might have
664		 * clean our vnode already, switching vnode lock from one in
665		 * lowervp to v_lock in our own vnode structure.  Handle this
666		 * case by reacquiring correct lock in requested mode.
667		 */
668		if (VTONULL(vp) == NULL && error == 0) {
669			ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
670			switch (flags & LK_TYPE_MASK) {
671			case LK_SHARED:
672				ap->a_flags |= LK_SHARED;
673				break;
674			case LK_UPGRADE:
675			case LK_EXCLUSIVE:
676				ap->a_flags |= LK_EXCLUSIVE;
677				break;
678			default:
679				panic("Unsupported lock request %d\n",
680				    ap->a_flags);
681			}
682			VOP_UNLOCK(lvp, 0);
683			error = vop_stdlock(ap);
684		}
685		vdrop(lvp);
686	} else
687		error = vop_stdlock(ap);
688
689	return (error);
690}
691
692/*
693 * We need to process our own vnode unlock and then clear the
694 * interlock flag as it applies only to our vnode, not the
695 * vnodes below us on the stack.
696 */
697static int
698null_unlock(struct vop_unlock_args *ap)
699{
700	struct vnode *vp = ap->a_vp;
701	int flags = ap->a_flags;
702	int mtxlkflag = 0;
703	struct null_node *nn;
704	struct vnode *lvp;
705	int error;
706
707	if ((flags & LK_INTERLOCK) != 0)
708		mtxlkflag = 1;
709	else if (mtx_owned(VI_MTX(vp)) == 0) {
710		VI_LOCK(vp);
711		mtxlkflag = 2;
712	}
713	nn = VTONULL(vp);
714	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
715		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
716		flags |= LK_INTERLOCK;
717		vholdl(lvp);
718		VI_UNLOCK(vp);
719		error = VOP_UNLOCK(lvp, flags);
720		vdrop(lvp);
721		if (mtxlkflag == 0)
722			VI_LOCK(vp);
723	} else {
724		if (mtxlkflag == 2)
725			VI_UNLOCK(vp);
726		error = vop_stdunlock(ap);
727	}
728
729	return (error);
730}
731
732/*
733 * Do not allow the VOP_INACTIVE to be passed to the lower layer,
734 * since the reference count on the lower vnode is not related to
735 * ours.
736 */
737static int
738null_inactive(struct vop_inactive_args *ap __unused)
739{
740	struct vnode *vp, *lvp;
741	struct null_node *xp;
742	struct mount *mp;
743	struct null_mount *xmp;
744
745	vp = ap->a_vp;
746	xp = VTONULL(vp);
747	lvp = NULLVPTOLOWERVP(vp);
748	mp = vp->v_mount;
749	xmp = MOUNTTONULLMOUNT(mp);
750	if ((xmp->nullm_flags & NULLM_CACHE) == 0 ||
751	    (xp->null_flags & NULLV_DROP) != 0 ||
752	    (lvp->v_vflag & VV_NOSYNC) != 0) {
753		/*
754		 * If this is the last reference and caching of the
755		 * nullfs vnodes is not enabled, or the lower vnode is
756		 * deleted, then free up the vnode so as not to tie up
757		 * the lower vnodes.
758		 */
759		vp->v_object = NULL;
760		vrecycle(vp);
761	}
762	return (0);
763}
764
765/*
766 * Now, the nullfs vnode and, due to the sharing lock, the lower
767 * vnode, are exclusively locked, and we shall destroy the null vnode.
768 */
769static int
770null_reclaim(struct vop_reclaim_args *ap)
771{
772	struct vnode *vp;
773	struct null_node *xp;
774	struct vnode *lowervp;
775
776	vp = ap->a_vp;
777	xp = VTONULL(vp);
778	lowervp = xp->null_lowervp;
779
780	KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
781	    ("Reclaiming incomplete null vnode %p", vp));
782
783	null_hashrem(xp);
784	/*
785	 * Use the interlock to protect the clearing of v_data to
786	 * prevent faults in null_lock().
787	 */
788	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
789	VI_LOCK(vp);
790	vp->v_data = NULL;
791	vp->v_object = NULL;
792	vp->v_vnlock = &vp->v_lock;
793	VI_UNLOCK(vp);
794
795	/*
796	 * If we were opened for write, we leased one write reference
797	 * to the lower vnode.  If this is a reclamation due to the
798	 * forced unmount, undo the reference now.
799	 */
800	if (vp->v_writecount > 0)
801		VOP_ADD_WRITECOUNT(lowervp, -1);
802	if ((xp->null_flags & NULLV_NOUNLOCK) != 0)
803		vunref(lowervp);
804	else
805		vput(lowervp);
806	free(xp, M_NULLFSNODE);
807
808	return (0);
809}
810
811static int
812null_print(struct vop_print_args *ap)
813{
814	struct vnode *vp = ap->a_vp;
815
816	printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
817	return (0);
818}
819
820/* ARGSUSED */
821static int
822null_getwritemount(struct vop_getwritemount_args *ap)
823{
824	struct null_node *xp;
825	struct vnode *lowervp;
826	struct vnode *vp;
827
828	vp = ap->a_vp;
829	VI_LOCK(vp);
830	xp = VTONULL(vp);
831	if (xp && (lowervp = xp->null_lowervp)) {
832		VI_LOCK_FLAGS(lowervp, MTX_DUPOK);
833		VI_UNLOCK(vp);
834		vholdl(lowervp);
835		VI_UNLOCK(lowervp);
836		VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
837		vdrop(lowervp);
838	} else {
839		VI_UNLOCK(vp);
840		*(ap->a_mpp) = NULL;
841	}
842	return (0);
843}
844
845static int
846null_vptofh(struct vop_vptofh_args *ap)
847{
848	struct vnode *lvp;
849
850	lvp = NULLVPTOLOWERVP(ap->a_vp);
851	return VOP_VPTOFH(lvp, ap->a_fhp);
852}
853
854static int
855null_vptocnp(struct vop_vptocnp_args *ap)
856{
857	struct vnode *vp = ap->a_vp;
858	struct vnode **dvp = ap->a_vpp;
859	struct vnode *lvp, *ldvp;
860	struct ucred *cred = ap->a_cred;
861	int error, locked;
862
863	if (vp->v_type == VDIR)
864		return (vop_stdvptocnp(ap));
865
866	locked = VOP_ISLOCKED(vp);
867	lvp = NULLVPTOLOWERVP(vp);
868	vhold(lvp);
869	VOP_UNLOCK(vp, 0); /* vp is held by vn_vptocnp_locked that called us */
870	ldvp = lvp;
871	vref(lvp);
872	error = vn_vptocnp(&ldvp, cred, ap->a_buf, ap->a_buflen);
873	vdrop(lvp);
874	if (error != 0) {
875		vn_lock(vp, locked | LK_RETRY);
876		return (ENOENT);
877	}
878
879	/*
880	 * Exclusive lock is required by insmntque1 call in
881	 * null_nodeget()
882	 */
883	error = vn_lock(ldvp, LK_EXCLUSIVE);
884	if (error != 0) {
885		vrele(ldvp);
886		vn_lock(vp, locked | LK_RETRY);
887		return (ENOENT);
888	}
889	vref(ldvp);
890	error = null_nodeget(vp->v_mount, ldvp, dvp);
891	if (error == 0) {
892#ifdef DIAGNOSTIC
893		NULLVPTOLOWERVP(*dvp);
894#endif
895		VOP_UNLOCK(*dvp, 0); /* keep reference on *dvp */
896	}
897	vn_lock(vp, locked | LK_RETRY);
898	return (error);
899}
900
901/*
902 * Global vfs data structures
903 */
904struct vop_vector null_vnodeops = {
905	.vop_bypass =		null_bypass,
906	.vop_access =		null_access,
907	.vop_accessx =		null_accessx,
908	.vop_advlockpurge =	vop_stdadvlockpurge,
909	.vop_bmap =		VOP_EOPNOTSUPP,
910	.vop_getattr =		null_getattr,
911	.vop_getwritemount =	null_getwritemount,
912	.vop_inactive =		null_inactive,
913	.vop_islocked =		vop_stdislocked,
914	.vop_lock1 =		null_lock,
915	.vop_lookup =		null_lookup,
916	.vop_open =		null_open,
917	.vop_print =		null_print,
918	.vop_reclaim =		null_reclaim,
919	.vop_remove =		null_remove,
920	.vop_rename =		null_rename,
921	.vop_setattr =		null_setattr,
922	.vop_strategy =		VOP_EOPNOTSUPP,
923	.vop_unlock =		null_unlock,
924	.vop_vptocnp =		null_vptocnp,
925	.vop_vptofh =		null_vptofh,
926	.vop_add_writecount =	null_add_writecount,
927};
928