ffs_vnops.c revision 262779
1/*-
2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9 * research program
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Copyright (c) 1982, 1986, 1989, 1993
33 *	The Regents of the University of California.  All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 *    notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 *    notice, this list of conditions and the following disclaimer in the
42 *    documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 *    may be used to endorse or promote products derived from this software
45 *    without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 *	from: @(#)ufs_readwrite.c	8.11 (Berkeley) 5/8/95
60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
61 *	@(#)ffs_vnops.c	8.15 (Berkeley) 5/14/95
62 */
63
64#include <sys/cdefs.h>
65__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_vnops.c 262779 2014-03-05 04:23:19Z pfg $");
66
67#include <sys/param.h>
68#include <sys/bio.h>
69#include <sys/systm.h>
70#include <sys/buf.h>
71#include <sys/conf.h>
72#include <sys/extattr.h>
73#include <sys/kernel.h>
74#include <sys/limits.h>
75#include <sys/malloc.h>
76#include <sys/mount.h>
77#include <sys/priv.h>
78#include <sys/rwlock.h>
79#include <sys/stat.h>
80#include <sys/vmmeter.h>
81#include <sys/vnode.h>
82
83#include <vm/vm.h>
84#include <vm/vm_param.h>
85#include <vm/vm_extern.h>
86#include <vm/vm_object.h>
87#include <vm/vm_page.h>
88#include <vm/vm_pager.h>
89#include <vm/vnode_pager.h>
90
91#include <ufs/ufs/extattr.h>
92#include <ufs/ufs/quota.h>
93#include <ufs/ufs/inode.h>
94#include <ufs/ufs/ufs_extern.h>
95#include <ufs/ufs/ufsmount.h>
96
97#include <ufs/ffs/fs.h>
98#include <ufs/ffs/ffs_extern.h>
99#include "opt_directio.h"
100#include "opt_ffs.h"
101
102#ifdef DIRECTIO
103extern int	ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
104#endif
105static vop_fsync_t	ffs_fsync;
106static vop_lock1_t	ffs_lock;
107static vop_getpages_t	ffs_getpages;
108static vop_read_t	ffs_read;
109static vop_write_t	ffs_write;
110static int	ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
111static int	ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
112		    struct ucred *cred);
113static vop_strategy_t	ffsext_strategy;
114static vop_closeextattr_t	ffs_closeextattr;
115static vop_deleteextattr_t	ffs_deleteextattr;
116static vop_getextattr_t	ffs_getextattr;
117static vop_listextattr_t	ffs_listextattr;
118static vop_openextattr_t	ffs_openextattr;
119static vop_setextattr_t	ffs_setextattr;
120static vop_vptofh_t	ffs_vptofh;
121
122
123/* Global vfs data structures for ufs. */
124struct vop_vector ffs_vnodeops1 = {
125	.vop_default =		&ufs_vnodeops,
126	.vop_fsync =		ffs_fsync,
127	.vop_getpages =		ffs_getpages,
128	.vop_lock1 =		ffs_lock,
129	.vop_read =		ffs_read,
130	.vop_reallocblks =	ffs_reallocblks,
131	.vop_write =		ffs_write,
132	.vop_vptofh =		ffs_vptofh,
133};
134
135struct vop_vector ffs_fifoops1 = {
136	.vop_default =		&ufs_fifoops,
137	.vop_fsync =		ffs_fsync,
138	.vop_reallocblks =	ffs_reallocblks, /* XXX: really ??? */
139	.vop_vptofh =		ffs_vptofh,
140};
141
142/* Global vfs data structures for ufs. */
143struct vop_vector ffs_vnodeops2 = {
144	.vop_default =		&ufs_vnodeops,
145	.vop_fsync =		ffs_fsync,
146	.vop_getpages =		ffs_getpages,
147	.vop_lock1 =		ffs_lock,
148	.vop_read =		ffs_read,
149	.vop_reallocblks =	ffs_reallocblks,
150	.vop_write =		ffs_write,
151	.vop_closeextattr =	ffs_closeextattr,
152	.vop_deleteextattr =	ffs_deleteextattr,
153	.vop_getextattr =	ffs_getextattr,
154	.vop_listextattr =	ffs_listextattr,
155	.vop_openextattr =	ffs_openextattr,
156	.vop_setextattr =	ffs_setextattr,
157	.vop_vptofh =		ffs_vptofh,
158};
159
160struct vop_vector ffs_fifoops2 = {
161	.vop_default =		&ufs_fifoops,
162	.vop_fsync =		ffs_fsync,
163	.vop_lock1 =		ffs_lock,
164	.vop_reallocblks =	ffs_reallocblks,
165	.vop_strategy =		ffsext_strategy,
166	.vop_closeextattr =	ffs_closeextattr,
167	.vop_deleteextattr =	ffs_deleteextattr,
168	.vop_getextattr =	ffs_getextattr,
169	.vop_listextattr =	ffs_listextattr,
170	.vop_openextattr =	ffs_openextattr,
171	.vop_setextattr =	ffs_setextattr,
172	.vop_vptofh =		ffs_vptofh,
173};
174
175/*
176 * Synch an open file.
177 */
178/* ARGSUSED */
179static int
180ffs_fsync(struct vop_fsync_args *ap)
181{
182	struct vnode *vp;
183	struct bufobj *bo;
184	int error;
185
186	vp = ap->a_vp;
187	bo = &vp->v_bufobj;
188retry:
189	error = ffs_syncvnode(vp, ap->a_waitfor, 0);
190	if (error)
191		return (error);
192	if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
193		error = softdep_fsync(vp);
194		if (error)
195			return (error);
196
197		/*
198		 * The softdep_fsync() function may drop vp lock,
199		 * allowing for dirty buffers to reappear on the
200		 * bo_dirty list. Recheck and resync as needed.
201		 */
202		BO_LOCK(bo);
203		if (vp->v_type == VREG && (bo->bo_numoutput > 0 ||
204		    bo->bo_dirty.bv_cnt > 0)) {
205			BO_UNLOCK(bo);
206			goto retry;
207		}
208		BO_UNLOCK(bo);
209	}
210	return (0);
211}
212
213int
214ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
215{
216	struct inode *ip;
217	struct bufobj *bo;
218	struct buf *bp;
219	struct buf *nbp;
220	ufs_lbn_t lbn;
221	int error, wait, passes;
222
223	ip = VTOI(vp);
224	ip->i_flag &= ~IN_NEEDSYNC;
225	bo = &vp->v_bufobj;
226
227	/*
228	 * When doing MNT_WAIT we must first flush all dependencies
229	 * on the inode.
230	 */
231	if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
232	    (error = softdep_sync_metadata(vp)) != 0)
233		return (error);
234
235	/*
236	 * Flush all dirty buffers associated with a vnode.
237	 */
238	error = 0;
239	passes = 0;
240	wait = 0;	/* Always do an async pass first. */
241	lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1));
242	BO_LOCK(bo);
243loop:
244	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
245		bp->b_vflags &= ~BV_SCANNED;
246	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
247		/*
248		 * Reasons to skip this buffer: it has already been considered
249		 * on this pass, the buffer has dependencies that will cause
250		 * it to be redirtied and it has not already been deferred,
251		 * or it is already being written.
252		 */
253		if ((bp->b_vflags & BV_SCANNED) != 0)
254			continue;
255		bp->b_vflags |= BV_SCANNED;
256		/* Flush indirects in order. */
257		if (waitfor == MNT_WAIT && bp->b_lblkno <= -NDADDR &&
258		    lbn_level(bp->b_lblkno) >= passes)
259			continue;
260		if (bp->b_lblkno > lbn)
261			panic("ffs_syncvnode: syncing truncated data.");
262		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
263			continue;
264		BO_UNLOCK(bo);
265		if ((bp->b_flags & B_DELWRI) == 0)
266			panic("ffs_fsync: not dirty");
267		/*
268		 * Check for dependencies and potentially complete them.
269		 */
270		if (!LIST_EMPTY(&bp->b_dep) &&
271		    (error = softdep_sync_buf(vp, bp,
272		    wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
273			/* I/O error. */
274			if (error != EBUSY) {
275				BUF_UNLOCK(bp);
276				return (error);
277			}
278			/* If we deferred once, don't defer again. */
279		    	if ((bp->b_flags & B_DEFERRED) == 0) {
280				bp->b_flags |= B_DEFERRED;
281				BUF_UNLOCK(bp);
282				goto next;
283			}
284		}
285		if (wait) {
286			bremfree(bp);
287			if ((error = bwrite(bp)) != 0)
288				return (error);
289		} else if ((bp->b_flags & B_CLUSTEROK)) {
290			(void) vfs_bio_awrite(bp);
291		} else {
292			bremfree(bp);
293			(void) bawrite(bp);
294		}
295next:
296		/*
297		 * Since we may have slept during the I/O, we need
298		 * to start from a known point.
299		 */
300		BO_LOCK(bo);
301		nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
302	}
303	if (waitfor != MNT_WAIT) {
304		BO_UNLOCK(bo);
305		if ((flags & NO_INO_UPDT) != 0)
306			return (0);
307		else
308			return (ffs_update(vp, 0));
309	}
310	/* Drain IO to see if we're done. */
311	bufobj_wwait(bo, 0, 0);
312	/*
313	 * Block devices associated with filesystems may have new I/O
314	 * requests posted for them even if the vnode is locked, so no
315	 * amount of trying will get them clean.  We make several passes
316	 * as a best effort.
317	 *
318	 * Regular files may need multiple passes to flush all dependency
319	 * work as it is possible that we must write once per indirect
320	 * level, once for the leaf, and once for the inode and each of
321	 * these will be done with one sync and one async pass.
322	 */
323	if (bo->bo_dirty.bv_cnt > 0) {
324		/* Write the inode after sync passes to flush deps. */
325		if (wait && DOINGSOFTDEP(vp) && (flags & NO_INO_UPDT) == 0) {
326			BO_UNLOCK(bo);
327			ffs_update(vp, 1);
328			BO_LOCK(bo);
329		}
330		/* switch between sync/async. */
331		wait = !wait;
332		if (wait == 1 || ++passes < NIADDR + 2)
333			goto loop;
334#ifdef INVARIANTS
335		if (!vn_isdisk(vp, NULL))
336			vprint("ffs_fsync: dirty", vp);
337#endif
338	}
339	BO_UNLOCK(bo);
340	error = 0;
341	if ((flags & NO_INO_UPDT) == 0)
342		error = ffs_update(vp, 1);
343	if (DOINGSUJ(vp))
344		softdep_journal_fsync(VTOI(vp));
345	return (error);
346}
347
348static int
349ffs_lock(ap)
350	struct vop_lock1_args /* {
351		struct vnode *a_vp;
352		int a_flags;
353		struct thread *a_td;
354		char *file;
355		int line;
356	} */ *ap;
357{
358#ifndef NO_FFS_SNAPSHOT
359	struct vnode *vp;
360	int flags;
361	struct lock *lkp;
362	int result;
363
364	switch (ap->a_flags & LK_TYPE_MASK) {
365	case LK_SHARED:
366	case LK_UPGRADE:
367	case LK_EXCLUSIVE:
368		vp = ap->a_vp;
369		flags = ap->a_flags;
370		for (;;) {
371#ifdef DEBUG_VFS_LOCKS
372			KASSERT(vp->v_holdcnt != 0,
373			    ("ffs_lock %p: zero hold count", vp));
374#endif
375			lkp = vp->v_vnlock;
376			result = _lockmgr_args(lkp, flags, VI_MTX(vp),
377			    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
378			    ap->a_file, ap->a_line);
379			if (lkp == vp->v_vnlock || result != 0)
380				break;
381			/*
382			 * Apparent success, except that the vnode
383			 * mutated between snapshot file vnode and
384			 * regular file vnode while this process
385			 * slept.  The lock currently held is not the
386			 * right lock.  Release it, and try to get the
387			 * new lock.
388			 */
389			(void) _lockmgr_args(lkp, LK_RELEASE, NULL,
390			    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
391			    ap->a_file, ap->a_line);
392			if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
393			    (LK_INTERLOCK | LK_NOWAIT))
394				return (EBUSY);
395			if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
396				flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
397			flags &= ~LK_INTERLOCK;
398		}
399		break;
400	default:
401		result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
402	}
403	return (result);
404#else
405	return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
406#endif
407}
408
409/*
410 * Vnode op for reading.
411 */
412static int
413ffs_read(ap)
414	struct vop_read_args /* {
415		struct vnode *a_vp;
416		struct uio *a_uio;
417		int a_ioflag;
418		struct ucred *a_cred;
419	} */ *ap;
420{
421	struct vnode *vp;
422	struct inode *ip;
423	struct uio *uio;
424	struct fs *fs;
425	struct buf *bp;
426	ufs_lbn_t lbn, nextlbn;
427	off_t bytesinfile;
428	long size, xfersize, blkoffset;
429	ssize_t orig_resid;
430	int error;
431	int seqcount;
432	int ioflag;
433
434	vp = ap->a_vp;
435	uio = ap->a_uio;
436	ioflag = ap->a_ioflag;
437	if (ap->a_ioflag & IO_EXT)
438#ifdef notyet
439		return (ffs_extread(vp, uio, ioflag));
440#else
441		panic("ffs_read+IO_EXT");
442#endif
443#ifdef DIRECTIO
444	if ((ioflag & IO_DIRECT) != 0) {
445		int workdone;
446
447		error = ffs_rawread(vp, uio, &workdone);
448		if (error != 0 || workdone != 0)
449			return error;
450	}
451#endif
452
453	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
454	ip = VTOI(vp);
455
456#ifdef INVARIANTS
457	if (uio->uio_rw != UIO_READ)
458		panic("ffs_read: mode");
459
460	if (vp->v_type == VLNK) {
461		if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
462			panic("ffs_read: short symlink");
463	} else if (vp->v_type != VREG && vp->v_type != VDIR)
464		panic("ffs_read: type %d",  vp->v_type);
465#endif
466	orig_resid = uio->uio_resid;
467	KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
468	if (orig_resid == 0)
469		return (0);
470	KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
471	fs = ip->i_fs;
472	if (uio->uio_offset < ip->i_size &&
473	    uio->uio_offset >= fs->fs_maxfilesize)
474		return (EOVERFLOW);
475
476	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
477		if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
478			break;
479		lbn = lblkno(fs, uio->uio_offset);
480		nextlbn = lbn + 1;
481
482		/*
483		 * size of buffer.  The buffer representing the
484		 * end of the file is rounded up to the size of
485		 * the block type ( fragment or full block,
486		 * depending ).
487		 */
488		size = blksize(fs, ip, lbn);
489		blkoffset = blkoff(fs, uio->uio_offset);
490
491		/*
492		 * The amount we want to transfer in this iteration is
493		 * one FS block less the amount of the data before
494		 * our startpoint (duh!)
495		 */
496		xfersize = fs->fs_bsize - blkoffset;
497
498		/*
499		 * But if we actually want less than the block,
500		 * or the file doesn't have a whole block more of data,
501		 * then use the lesser number.
502		 */
503		if (uio->uio_resid < xfersize)
504			xfersize = uio->uio_resid;
505		if (bytesinfile < xfersize)
506			xfersize = bytesinfile;
507
508		if (lblktosize(fs, nextlbn) >= ip->i_size) {
509			/*
510			 * Don't do readahead if this is the end of the file.
511			 */
512			error = bread_gb(vp, lbn, size, NOCRED,
513			    GB_UNMAPPED, &bp);
514		} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
515			/*
516			 * Otherwise if we are allowed to cluster,
517			 * grab as much as we can.
518			 *
519			 * XXX  This may not be a win if we are not
520			 * doing sequential access.
521			 */
522			error = cluster_read(vp, ip->i_size, lbn,
523			    size, NOCRED, blkoffset + uio->uio_resid,
524			    seqcount, GB_UNMAPPED, &bp);
525		} else if (seqcount > 1) {
526			/*
527			 * If we are NOT allowed to cluster, then
528			 * if we appear to be acting sequentially,
529			 * fire off a request for a readahead
530			 * as well as a read. Note that the 4th and 5th
531			 * arguments point to arrays of the size specified in
532			 * the 6th argument.
533			 */
534			u_int nextsize = blksize(fs, ip, nextlbn);
535			error = breadn_flags(vp, lbn, size, &nextlbn,
536			    &nextsize, 1, NOCRED, GB_UNMAPPED, &bp);
537		} else {
538			/*
539			 * Failing all of the above, just read what the
540			 * user asked for. Interestingly, the same as
541			 * the first option above.
542			 */
543			error = bread_gb(vp, lbn, size, NOCRED,
544			    GB_UNMAPPED, &bp);
545		}
546		if (error) {
547			brelse(bp);
548			bp = NULL;
549			break;
550		}
551
552		/*
553		 * If IO_DIRECT then set B_DIRECT for the buffer.  This
554		 * will cause us to attempt to release the buffer later on
555		 * and will cause the buffer cache to attempt to free the
556		 * underlying pages.
557		 */
558		if (ioflag & IO_DIRECT)
559			bp->b_flags |= B_DIRECT;
560
561		/*
562		 * We should only get non-zero b_resid when an I/O error
563		 * has occurred, which should cause us to break above.
564		 * However, if the short read did not cause an error,
565		 * then we want to ensure that we do not uiomove bad
566		 * or uninitialized data.
567		 */
568		size -= bp->b_resid;
569		if (size < xfersize) {
570			if (size == 0)
571				break;
572			xfersize = size;
573		}
574
575		if ((bp->b_flags & B_UNMAPPED) == 0) {
576			error = vn_io_fault_uiomove((char *)bp->b_data +
577			    blkoffset, (int)xfersize, uio);
578		} else {
579			error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
580			    (int)xfersize, uio);
581		}
582		if (error)
583			break;
584
585		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
586		   (LIST_EMPTY(&bp->b_dep))) {
587			/*
588			 * If there are no dependencies, and it's VMIO,
589			 * then we don't need the buf, mark it available
590			 * for freeing.  For non-direct VMIO reads, the VM
591			 * has the data.
592			 */
593			bp->b_flags |= B_RELBUF;
594			brelse(bp);
595		} else {
596			/*
597			 * Otherwise let whoever
598			 * made the request take care of
599			 * freeing it. We just queue
600			 * it onto another list.
601			 */
602			bqrelse(bp);
603		}
604	}
605
606	/*
607	 * This can only happen in the case of an error
608	 * because the loop above resets bp to NULL on each iteration
609	 * and on normal completion has not set a new value into it.
610	 * so it must have come from a 'break' statement
611	 */
612	if (bp != NULL) {
613		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
614		   (LIST_EMPTY(&bp->b_dep))) {
615			bp->b_flags |= B_RELBUF;
616			brelse(bp);
617		} else {
618			bqrelse(bp);
619		}
620	}
621
622	if ((error == 0 || uio->uio_resid != orig_resid) &&
623	    (vp->v_mount->mnt_flag & MNT_NOATIME) == 0 &&
624	    (ip->i_flag & IN_ACCESS) == 0) {
625		VI_LOCK(vp);
626		ip->i_flag |= IN_ACCESS;
627		VI_UNLOCK(vp);
628	}
629	return (error);
630}
631
632/*
633 * Vnode op for writing.
634 */
635static int
636ffs_write(ap)
637	struct vop_write_args /* {
638		struct vnode *a_vp;
639		struct uio *a_uio;
640		int a_ioflag;
641		struct ucred *a_cred;
642	} */ *ap;
643{
644	struct vnode *vp;
645	struct uio *uio;
646	struct inode *ip;
647	struct fs *fs;
648	struct buf *bp;
649	ufs_lbn_t lbn;
650	off_t osize;
651	ssize_t resid;
652	int seqcount;
653	int blkoffset, error, flags, ioflag, size, xfersize;
654
655	vp = ap->a_vp;
656	uio = ap->a_uio;
657	ioflag = ap->a_ioflag;
658	if (ap->a_ioflag & IO_EXT)
659#ifdef notyet
660		return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
661#else
662		panic("ffs_write+IO_EXT");
663#endif
664
665	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
666	ip = VTOI(vp);
667
668#ifdef INVARIANTS
669	if (uio->uio_rw != UIO_WRITE)
670		panic("ffs_write: mode");
671#endif
672
673	switch (vp->v_type) {
674	case VREG:
675		if (ioflag & IO_APPEND)
676			uio->uio_offset = ip->i_size;
677		if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
678			return (EPERM);
679		/* FALLTHROUGH */
680	case VLNK:
681		break;
682	case VDIR:
683		panic("ffs_write: dir write");
684		break;
685	default:
686		panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
687			(int)uio->uio_offset,
688			(int)uio->uio_resid
689		);
690	}
691
692	KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
693	KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
694	fs = ip->i_fs;
695	if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
696		return (EFBIG);
697	/*
698	 * Maybe this should be above the vnode op call, but so long as
699	 * file servers have no limits, I don't think it matters.
700	 */
701	if (vn_rlimit_fsize(vp, uio, uio->uio_td))
702		return (EFBIG);
703
704	resid = uio->uio_resid;
705	osize = ip->i_size;
706	if (seqcount > BA_SEQMAX)
707		flags = BA_SEQMAX << BA_SEQSHIFT;
708	else
709		flags = seqcount << BA_SEQSHIFT;
710	if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
711		flags |= IO_SYNC;
712	flags |= BA_UNMAPPED;
713
714	for (error = 0; uio->uio_resid > 0;) {
715		lbn = lblkno(fs, uio->uio_offset);
716		blkoffset = blkoff(fs, uio->uio_offset);
717		xfersize = fs->fs_bsize - blkoffset;
718		if (uio->uio_resid < xfersize)
719			xfersize = uio->uio_resid;
720		if (uio->uio_offset + xfersize > ip->i_size)
721			vnode_pager_setsize(vp, uio->uio_offset + xfersize);
722
723		/*
724		 * We must perform a read-before-write if the transfer size
725		 * does not cover the entire buffer.
726		 */
727		if (fs->fs_bsize > xfersize)
728			flags |= BA_CLRBUF;
729		else
730			flags &= ~BA_CLRBUF;
731/* XXX is uio->uio_offset the right thing here? */
732		error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
733		    ap->a_cred, flags, &bp);
734		if (error != 0) {
735			vnode_pager_setsize(vp, ip->i_size);
736			break;
737		}
738		if (ioflag & IO_DIRECT)
739			bp->b_flags |= B_DIRECT;
740		if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
741			bp->b_flags |= B_NOCACHE;
742
743		if (uio->uio_offset + xfersize > ip->i_size) {
744			ip->i_size = uio->uio_offset + xfersize;
745			DIP_SET(ip, i_size, ip->i_size);
746		}
747
748		size = blksize(fs, ip, lbn) - bp->b_resid;
749		if (size < xfersize)
750			xfersize = size;
751
752		if ((bp->b_flags & B_UNMAPPED) == 0) {
753			error = vn_io_fault_uiomove((char *)bp->b_data +
754			    blkoffset, (int)xfersize, uio);
755		} else {
756			error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
757			    (int)xfersize, uio);
758		}
759		/*
760		 * If the buffer is not already filled and we encounter an
761		 * error while trying to fill it, we have to clear out any
762		 * garbage data from the pages instantiated for the buffer.
763		 * If we do not, a failed uiomove() during a write can leave
764		 * the prior contents of the pages exposed to a userland mmap.
765		 *
766		 * Note that we need only clear buffers with a transfer size
767		 * equal to the block size because buffers with a shorter
768		 * transfer size were cleared above by the call to UFS_BALLOC()
769		 * with the BA_CLRBUF flag set.
770		 *
771		 * If the source region for uiomove identically mmaps the
772		 * buffer, uiomove() performed the NOP copy, and the buffer
773		 * content remains valid because the page fault handler
774		 * validated the pages.
775		 */
776		if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
777		    fs->fs_bsize == xfersize)
778			vfs_bio_clrbuf(bp);
779		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
780		   (LIST_EMPTY(&bp->b_dep))) {
781			bp->b_flags |= B_RELBUF;
782		}
783
784		/*
785		 * If IO_SYNC each buffer is written synchronously.  Otherwise
786		 * if we have a severe page deficiency write the buffer
787		 * asynchronously.  Otherwise try to cluster, and if that
788		 * doesn't do it then either do an async write (if O_DIRECT),
789		 * or a delayed write (if not).
790		 */
791		if (ioflag & IO_SYNC) {
792			(void)bwrite(bp);
793		} else if (vm_page_count_severe() ||
794			    buf_dirty_count_severe() ||
795			    (ioflag & IO_ASYNC)) {
796			bp->b_flags |= B_CLUSTEROK;
797			bawrite(bp);
798		} else if (xfersize + blkoffset == fs->fs_bsize) {
799			if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
800				bp->b_flags |= B_CLUSTEROK;
801				cluster_write(vp, bp, ip->i_size, seqcount,
802				    GB_UNMAPPED);
803			} else {
804				bawrite(bp);
805			}
806		} else if (ioflag & IO_DIRECT) {
807			bp->b_flags |= B_CLUSTEROK;
808			bawrite(bp);
809		} else {
810			bp->b_flags |= B_CLUSTEROK;
811			bdwrite(bp);
812		}
813		if (error || xfersize == 0)
814			break;
815		ip->i_flag |= IN_CHANGE | IN_UPDATE;
816	}
817	/*
818	 * If we successfully wrote any data, and we are not the superuser
819	 * we clear the setuid and setgid bits as a precaution against
820	 * tampering.
821	 */
822	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
823	    ap->a_cred) {
824		if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
825			ip->i_mode &= ~(ISUID | ISGID);
826			DIP_SET(ip, i_mode, ip->i_mode);
827		}
828	}
829	if (error) {
830		if (ioflag & IO_UNIT) {
831			(void)ffs_truncate(vp, osize,
832			    IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
833			uio->uio_offset -= resid - uio->uio_resid;
834			uio->uio_resid = resid;
835		}
836	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
837		error = ffs_update(vp, 1);
838	return (error);
839}
840
841/*
842 * get page routine
843 */
844static int
845ffs_getpages(ap)
846	struct vop_getpages_args *ap;
847{
848	int i;
849	vm_page_t mreq;
850	int pcount;
851
852	pcount = round_page(ap->a_count) / PAGE_SIZE;
853	mreq = ap->a_m[ap->a_reqpage];
854
855	/*
856	 * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
857	 * then the entire page is valid.  Since the page may be mapped,
858	 * user programs might reference data beyond the actual end of file
859	 * occuring within the page.  We have to zero that data.
860	 */
861	VM_OBJECT_WLOCK(mreq->object);
862	if (mreq->valid) {
863		if (mreq->valid != VM_PAGE_BITS_ALL)
864			vm_page_zero_invalid(mreq, TRUE);
865		for (i = 0; i < pcount; i++) {
866			if (i != ap->a_reqpage) {
867				vm_page_lock(ap->a_m[i]);
868				vm_page_free(ap->a_m[i]);
869				vm_page_unlock(ap->a_m[i]);
870			}
871		}
872		VM_OBJECT_WUNLOCK(mreq->object);
873		return VM_PAGER_OK;
874	}
875	VM_OBJECT_WUNLOCK(mreq->object);
876
877	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
878					    ap->a_count,
879					    ap->a_reqpage);
880}
881
882
883/*
884 * Extended attribute area reading.
885 */
886static int
887ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
888{
889	struct inode *ip;
890	struct ufs2_dinode *dp;
891	struct fs *fs;
892	struct buf *bp;
893	ufs_lbn_t lbn, nextlbn;
894	off_t bytesinfile;
895	long size, xfersize, blkoffset;
896	ssize_t orig_resid;
897	int error;
898
899	ip = VTOI(vp);
900	fs = ip->i_fs;
901	dp = ip->i_din2;
902
903#ifdef INVARIANTS
904	if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
905		panic("ffs_extread: mode");
906
907#endif
908	orig_resid = uio->uio_resid;
909	KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
910	if (orig_resid == 0)
911		return (0);
912	KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
913
914	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
915		if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
916			break;
917		lbn = lblkno(fs, uio->uio_offset);
918		nextlbn = lbn + 1;
919
920		/*
921		 * size of buffer.  The buffer representing the
922		 * end of the file is rounded up to the size of
923		 * the block type ( fragment or full block,
924		 * depending ).
925		 */
926		size = sblksize(fs, dp->di_extsize, lbn);
927		blkoffset = blkoff(fs, uio->uio_offset);
928
929		/*
930		 * The amount we want to transfer in this iteration is
931		 * one FS block less the amount of the data before
932		 * our startpoint (duh!)
933		 */
934		xfersize = fs->fs_bsize - blkoffset;
935
936		/*
937		 * But if we actually want less than the block,
938		 * or the file doesn't have a whole block more of data,
939		 * then use the lesser number.
940		 */
941		if (uio->uio_resid < xfersize)
942			xfersize = uio->uio_resid;
943		if (bytesinfile < xfersize)
944			xfersize = bytesinfile;
945
946		if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
947			/*
948			 * Don't do readahead if this is the end of the info.
949			 */
950			error = bread(vp, -1 - lbn, size, NOCRED, &bp);
951		} else {
952			/*
953			 * If we have a second block, then
954			 * fire off a request for a readahead
955			 * as well as a read. Note that the 4th and 5th
956			 * arguments point to arrays of the size specified in
957			 * the 6th argument.
958			 */
959			u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
960
961			nextlbn = -1 - nextlbn;
962			error = breadn(vp, -1 - lbn,
963			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
964		}
965		if (error) {
966			brelse(bp);
967			bp = NULL;
968			break;
969		}
970
971		/*
972		 * If IO_DIRECT then set B_DIRECT for the buffer.  This
973		 * will cause us to attempt to release the buffer later on
974		 * and will cause the buffer cache to attempt to free the
975		 * underlying pages.
976		 */
977		if (ioflag & IO_DIRECT)
978			bp->b_flags |= B_DIRECT;
979
980		/*
981		 * We should only get non-zero b_resid when an I/O error
982		 * has occurred, which should cause us to break above.
983		 * However, if the short read did not cause an error,
984		 * then we want to ensure that we do not uiomove bad
985		 * or uninitialized data.
986		 */
987		size -= bp->b_resid;
988		if (size < xfersize) {
989			if (size == 0)
990				break;
991			xfersize = size;
992		}
993
994		error = uiomove((char *)bp->b_data + blkoffset,
995					(int)xfersize, uio);
996		if (error)
997			break;
998
999		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1000		   (LIST_EMPTY(&bp->b_dep))) {
1001			/*
1002			 * If there are no dependencies, and it's VMIO,
1003			 * then we don't need the buf, mark it available
1004			 * for freeing.  For non-direct VMIO reads, the VM
1005			 * has the data.
1006			 */
1007			bp->b_flags |= B_RELBUF;
1008			brelse(bp);
1009		} else {
1010			/*
1011			 * Otherwise let whoever
1012			 * made the request take care of
1013			 * freeing it. We just queue
1014			 * it onto another list.
1015			 */
1016			bqrelse(bp);
1017		}
1018	}
1019
1020	/*
1021	 * This can only happen in the case of an error
1022	 * because the loop above resets bp to NULL on each iteration
1023	 * and on normal completion has not set a new value into it.
1024	 * so it must have come from a 'break' statement
1025	 */
1026	if (bp != NULL) {
1027		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1028		   (LIST_EMPTY(&bp->b_dep))) {
1029			bp->b_flags |= B_RELBUF;
1030			brelse(bp);
1031		} else {
1032			bqrelse(bp);
1033		}
1034	}
1035	return (error);
1036}
1037
1038/*
1039 * Extended attribute area writing.
1040 */
1041static int
1042ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
1043{
1044	struct inode *ip;
1045	struct ufs2_dinode *dp;
1046	struct fs *fs;
1047	struct buf *bp;
1048	ufs_lbn_t lbn;
1049	off_t osize;
1050	ssize_t resid;
1051	int blkoffset, error, flags, size, xfersize;
1052
1053	ip = VTOI(vp);
1054	fs = ip->i_fs;
1055	dp = ip->i_din2;
1056
1057#ifdef INVARIANTS
1058	if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
1059		panic("ffs_extwrite: mode");
1060#endif
1061
1062	if (ioflag & IO_APPEND)
1063		uio->uio_offset = dp->di_extsize;
1064	KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1065	KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1066	if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize)
1067		return (EFBIG);
1068
1069	resid = uio->uio_resid;
1070	osize = dp->di_extsize;
1071	flags = IO_EXT;
1072	if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
1073		flags |= IO_SYNC;
1074
1075	for (error = 0; uio->uio_resid > 0;) {
1076		lbn = lblkno(fs, uio->uio_offset);
1077		blkoffset = blkoff(fs, uio->uio_offset);
1078		xfersize = fs->fs_bsize - blkoffset;
1079		if (uio->uio_resid < xfersize)
1080			xfersize = uio->uio_resid;
1081
1082		/*
1083		 * We must perform a read-before-write if the transfer size
1084		 * does not cover the entire buffer.
1085		 */
1086		if (fs->fs_bsize > xfersize)
1087			flags |= BA_CLRBUF;
1088		else
1089			flags &= ~BA_CLRBUF;
1090		error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1091		    ucred, flags, &bp);
1092		if (error != 0)
1093			break;
1094		/*
1095		 * If the buffer is not valid we have to clear out any
1096		 * garbage data from the pages instantiated for the buffer.
1097		 * If we do not, a failed uiomove() during a write can leave
1098		 * the prior contents of the pages exposed to a userland
1099		 * mmap().  XXX deal with uiomove() errors a better way.
1100		 */
1101		if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1102			vfs_bio_clrbuf(bp);
1103		if (ioflag & IO_DIRECT)
1104			bp->b_flags |= B_DIRECT;
1105
1106		if (uio->uio_offset + xfersize > dp->di_extsize)
1107			dp->di_extsize = uio->uio_offset + xfersize;
1108
1109		size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1110		if (size < xfersize)
1111			xfersize = size;
1112
1113		error =
1114		    uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1115		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1116		   (LIST_EMPTY(&bp->b_dep))) {
1117			bp->b_flags |= B_RELBUF;
1118		}
1119
1120		/*
1121		 * If IO_SYNC each buffer is written synchronously.  Otherwise
1122		 * if we have a severe page deficiency write the buffer
1123		 * asynchronously.  Otherwise try to cluster, and if that
1124		 * doesn't do it then either do an async write (if O_DIRECT),
1125		 * or a delayed write (if not).
1126		 */
1127		if (ioflag & IO_SYNC) {
1128			(void)bwrite(bp);
1129		} else if (vm_page_count_severe() ||
1130			    buf_dirty_count_severe() ||
1131			    xfersize + blkoffset == fs->fs_bsize ||
1132			    (ioflag & (IO_ASYNC | IO_DIRECT)))
1133			bawrite(bp);
1134		else
1135			bdwrite(bp);
1136		if (error || xfersize == 0)
1137			break;
1138		ip->i_flag |= IN_CHANGE;
1139	}
1140	/*
1141	 * If we successfully wrote any data, and we are not the superuser
1142	 * we clear the setuid and setgid bits as a precaution against
1143	 * tampering.
1144	 */
1145	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1146		if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
1147			ip->i_mode &= ~(ISUID | ISGID);
1148			dp->di_mode = ip->i_mode;
1149		}
1150	}
1151	if (error) {
1152		if (ioflag & IO_UNIT) {
1153			(void)ffs_truncate(vp, osize,
1154			    IO_EXT | (ioflag&IO_SYNC), ucred);
1155			uio->uio_offset -= resid - uio->uio_resid;
1156			uio->uio_resid = resid;
1157		}
1158	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1159		error = ffs_update(vp, 1);
1160	return (error);
1161}
1162
1163
1164/*
1165 * Vnode operating to retrieve a named extended attribute.
1166 *
1167 * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1168 * the length of the EA, and possibly the pointer to the entry and to the data.
1169 */
1170static int
1171ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac)
1172{
1173	u_char *p, *pe, *pn, *p0;
1174	int eapad1, eapad2, ealength, ealen, nlen;
1175	uint32_t ul;
1176
1177	pe = ptr + length;
1178	nlen = strlen(name);
1179
1180	for (p = ptr; p < pe; p = pn) {
1181		p0 = p;
1182		bcopy(p, &ul, sizeof(ul));
1183		pn = p + ul;
1184		/* make sure this entry is complete */
1185		if (pn > pe)
1186			break;
1187		p += sizeof(uint32_t);
1188		if (*p != nspace)
1189			continue;
1190		p++;
1191		eapad2 = *p++;
1192		if (*p != nlen)
1193			continue;
1194		p++;
1195		if (bcmp(p, name, nlen))
1196			continue;
1197		ealength = sizeof(uint32_t) + 3 + nlen;
1198		eapad1 = 8 - (ealength % 8);
1199		if (eapad1 == 8)
1200			eapad1 = 0;
1201		ealength += eapad1;
1202		ealen = ul - ealength - eapad2;
1203		p += nlen + eapad1;
1204		if (eap != NULL)
1205			*eap = p0;
1206		if (eac != NULL)
1207			*eac = p;
1208		return (ealen);
1209	}
1210	return(-1);
1211}
1212
1213static int
1214ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
1215{
1216	struct inode *ip;
1217	struct ufs2_dinode *dp;
1218	struct fs *fs;
1219	struct uio luio;
1220	struct iovec liovec;
1221	u_int easize;
1222	int error;
1223	u_char *eae;
1224
1225	ip = VTOI(vp);
1226	fs = ip->i_fs;
1227	dp = ip->i_din2;
1228	easize = dp->di_extsize;
1229	if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize)
1230		return (EFBIG);
1231
1232	eae = malloc(easize + extra, M_TEMP, M_WAITOK);
1233
1234	liovec.iov_base = eae;
1235	liovec.iov_len = easize;
1236	luio.uio_iov = &liovec;
1237	luio.uio_iovcnt = 1;
1238	luio.uio_offset = 0;
1239	luio.uio_resid = easize;
1240	luio.uio_segflg = UIO_SYSSPACE;
1241	luio.uio_rw = UIO_READ;
1242	luio.uio_td = td;
1243
1244	error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1245	if (error) {
1246		free(eae, M_TEMP);
1247		return(error);
1248	}
1249	*p = eae;
1250	return (0);
1251}
1252
1253static void
1254ffs_lock_ea(struct vnode *vp)
1255{
1256	struct inode *ip;
1257
1258	ip = VTOI(vp);
1259	VI_LOCK(vp);
1260	while (ip->i_flag & IN_EA_LOCKED) {
1261		ip->i_flag |= IN_EA_LOCKWAIT;
1262		msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
1263		    0);
1264	}
1265	ip->i_flag |= IN_EA_LOCKED;
1266	VI_UNLOCK(vp);
1267}
1268
1269static void
1270ffs_unlock_ea(struct vnode *vp)
1271{
1272	struct inode *ip;
1273
1274	ip = VTOI(vp);
1275	VI_LOCK(vp);
1276	if (ip->i_flag & IN_EA_LOCKWAIT)
1277		wakeup(&ip->i_ea_refs);
1278	ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
1279	VI_UNLOCK(vp);
1280}
1281
1282static int
1283ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1284{
1285	struct inode *ip;
1286	struct ufs2_dinode *dp;
1287	int error;
1288
1289	ip = VTOI(vp);
1290
1291	ffs_lock_ea(vp);
1292	if (ip->i_ea_area != NULL) {
1293		ip->i_ea_refs++;
1294		ffs_unlock_ea(vp);
1295		return (0);
1296	}
1297	dp = ip->i_din2;
1298	error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
1299	if (error) {
1300		ffs_unlock_ea(vp);
1301		return (error);
1302	}
1303	ip->i_ea_len = dp->di_extsize;
1304	ip->i_ea_error = 0;
1305	ip->i_ea_refs++;
1306	ffs_unlock_ea(vp);
1307	return (0);
1308}
1309
1310/*
1311 * Vnode extattr transaction commit/abort
1312 */
1313static int
1314ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1315{
1316	struct inode *ip;
1317	struct uio luio;
1318	struct iovec liovec;
1319	int error;
1320	struct ufs2_dinode *dp;
1321
1322	ip = VTOI(vp);
1323
1324	ffs_lock_ea(vp);
1325	if (ip->i_ea_area == NULL) {
1326		ffs_unlock_ea(vp);
1327		return (EINVAL);
1328	}
1329	dp = ip->i_din2;
1330	error = ip->i_ea_error;
1331	if (commit && error == 0) {
1332		ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
1333		if (cred == NOCRED)
1334			cred =  vp->v_mount->mnt_cred;
1335		liovec.iov_base = ip->i_ea_area;
1336		liovec.iov_len = ip->i_ea_len;
1337		luio.uio_iov = &liovec;
1338		luio.uio_iovcnt = 1;
1339		luio.uio_offset = 0;
1340		luio.uio_resid = ip->i_ea_len;
1341		luio.uio_segflg = UIO_SYSSPACE;
1342		luio.uio_rw = UIO_WRITE;
1343		luio.uio_td = td;
1344		/* XXX: I'm not happy about truncating to zero size */
1345		if (ip->i_ea_len < dp->di_extsize)
1346			error = ffs_truncate(vp, 0, IO_EXT, cred);
1347		error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1348	}
1349	if (--ip->i_ea_refs == 0) {
1350		free(ip->i_ea_area, M_TEMP);
1351		ip->i_ea_area = NULL;
1352		ip->i_ea_len = 0;
1353		ip->i_ea_error = 0;
1354	}
1355	ffs_unlock_ea(vp);
1356	return (error);
1357}
1358
1359/*
1360 * Vnode extattr strategy routine for fifos.
1361 *
1362 * We need to check for a read or write of the external attributes.
1363 * Otherwise we just fall through and do the usual thing.
1364 */
1365static int
1366ffsext_strategy(struct vop_strategy_args *ap)
1367/*
1368struct vop_strategy_args {
1369	struct vnodeop_desc *a_desc;
1370	struct vnode *a_vp;
1371	struct buf *a_bp;
1372};
1373*/
1374{
1375	struct vnode *vp;
1376	daddr_t lbn;
1377
1378	vp = ap->a_vp;
1379	lbn = ap->a_bp->b_lblkno;
1380	if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC &&
1381	    lbn < 0 && lbn >= -NXADDR)
1382		return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1383	if (vp->v_type == VFIFO)
1384		return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1385	panic("spec nodes went here");
1386}
1387
1388/*
1389 * Vnode extattr transaction commit/abort
1390 */
1391static int
1392ffs_openextattr(struct vop_openextattr_args *ap)
1393/*
1394struct vop_openextattr_args {
1395	struct vnodeop_desc *a_desc;
1396	struct vnode *a_vp;
1397	IN struct ucred *a_cred;
1398	IN struct thread *a_td;
1399};
1400*/
1401{
1402	struct inode *ip;
1403	struct fs *fs;
1404
1405	ip = VTOI(ap->a_vp);
1406	fs = ip->i_fs;
1407
1408	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1409		return (EOPNOTSUPP);
1410
1411	return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1412}
1413
1414
1415/*
1416 * Vnode extattr transaction commit/abort
1417 */
1418static int
1419ffs_closeextattr(struct vop_closeextattr_args *ap)
1420/*
1421struct vop_closeextattr_args {
1422	struct vnodeop_desc *a_desc;
1423	struct vnode *a_vp;
1424	int a_commit;
1425	IN struct ucred *a_cred;
1426	IN struct thread *a_td;
1427};
1428*/
1429{
1430	struct inode *ip;
1431	struct fs *fs;
1432
1433	ip = VTOI(ap->a_vp);
1434	fs = ip->i_fs;
1435
1436	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1437		return (EOPNOTSUPP);
1438
1439	if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
1440		return (EROFS);
1441
1442	return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
1443}
1444
1445/*
1446 * Vnode operation to remove a named attribute.
1447 */
1448static int
1449ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1450/*
1451vop_deleteextattr {
1452	IN struct vnode *a_vp;
1453	IN int a_attrnamespace;
1454	IN const char *a_name;
1455	IN struct ucred *a_cred;
1456	IN struct thread *a_td;
1457};
1458*/
1459{
1460	struct inode *ip;
1461	struct fs *fs;
1462	uint32_t ealength, ul;
1463	int ealen, olen, eapad1, eapad2, error, i, easize;
1464	u_char *eae, *p;
1465
1466	ip = VTOI(ap->a_vp);
1467	fs = ip->i_fs;
1468
1469	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1470		return (EOPNOTSUPP);
1471
1472	if (strlen(ap->a_name) == 0)
1473		return (EINVAL);
1474
1475	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1476		return (EROFS);
1477
1478	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1479	    ap->a_cred, ap->a_td, VWRITE);
1480	if (error) {
1481
1482		/*
1483		 * ffs_lock_ea is not needed there, because the vnode
1484		 * must be exclusively locked.
1485		 */
1486		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1487			ip->i_ea_error = error;
1488		return (error);
1489	}
1490
1491	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1492	if (error)
1493		return (error);
1494
1495	ealength = eapad1 = ealen = eapad2 = 0;
1496
1497	eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1498	bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1499	easize = ip->i_ea_len;
1500
1501	olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1502	    &p, NULL);
1503	if (olen == -1) {
1504		/* delete but nonexistent */
1505		free(eae, M_TEMP);
1506		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1507		return(ENOATTR);
1508	}
1509	bcopy(p, &ul, sizeof ul);
1510	i = p - eae + ul;
1511	if (ul != ealength) {
1512		bcopy(p + ul, p + ealength, easize - i);
1513		easize += (ealength - ul);
1514	}
1515	if (easize > NXADDR * fs->fs_bsize) {
1516		free(eae, M_TEMP);
1517		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1518		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1519			ip->i_ea_error = ENOSPC;
1520		return(ENOSPC);
1521	}
1522	p = ip->i_ea_area;
1523	ip->i_ea_area = eae;
1524	ip->i_ea_len = easize;
1525	free(p, M_TEMP);
1526	error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1527	return(error);
1528}
1529
1530/*
1531 * Vnode operation to retrieve a named extended attribute.
1532 */
1533static int
1534ffs_getextattr(struct vop_getextattr_args *ap)
1535/*
1536vop_getextattr {
1537	IN struct vnode *a_vp;
1538	IN int a_attrnamespace;
1539	IN const char *a_name;
1540	INOUT struct uio *a_uio;
1541	OUT size_t *a_size;
1542	IN struct ucred *a_cred;
1543	IN struct thread *a_td;
1544};
1545*/
1546{
1547	struct inode *ip;
1548	struct fs *fs;
1549	u_char *eae, *p;
1550	unsigned easize;
1551	int error, ealen;
1552
1553	ip = VTOI(ap->a_vp);
1554	fs = ip->i_fs;
1555
1556	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1557		return (EOPNOTSUPP);
1558
1559	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1560	    ap->a_cred, ap->a_td, VREAD);
1561	if (error)
1562		return (error);
1563
1564	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1565	if (error)
1566		return (error);
1567
1568	eae = ip->i_ea_area;
1569	easize = ip->i_ea_len;
1570
1571	ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1572	    NULL, &p);
1573	if (ealen >= 0) {
1574		error = 0;
1575		if (ap->a_size != NULL)
1576			*ap->a_size = ealen;
1577		else if (ap->a_uio != NULL)
1578			error = uiomove(p, ealen, ap->a_uio);
1579	} else
1580		error = ENOATTR;
1581
1582	ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1583	return(error);
1584}
1585
1586/*
1587 * Vnode operation to retrieve extended attributes on a vnode.
1588 */
1589static int
1590ffs_listextattr(struct vop_listextattr_args *ap)
1591/*
1592vop_listextattr {
1593	IN struct vnode *a_vp;
1594	IN int a_attrnamespace;
1595	INOUT struct uio *a_uio;
1596	OUT size_t *a_size;
1597	IN struct ucred *a_cred;
1598	IN struct thread *a_td;
1599};
1600*/
1601{
1602	struct inode *ip;
1603	struct fs *fs;
1604	u_char *eae, *p, *pe, *pn;
1605	unsigned easize;
1606	uint32_t ul;
1607	int error, ealen;
1608
1609	ip = VTOI(ap->a_vp);
1610	fs = ip->i_fs;
1611
1612	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1613		return (EOPNOTSUPP);
1614
1615	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1616	    ap->a_cred, ap->a_td, VREAD);
1617	if (error)
1618		return (error);
1619
1620	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1621	if (error)
1622		return (error);
1623	eae = ip->i_ea_area;
1624	easize = ip->i_ea_len;
1625
1626	error = 0;
1627	if (ap->a_size != NULL)
1628		*ap->a_size = 0;
1629	pe = eae + easize;
1630	for(p = eae; error == 0 && p < pe; p = pn) {
1631		bcopy(p, &ul, sizeof(ul));
1632		pn = p + ul;
1633		if (pn > pe)
1634			break;
1635		p += sizeof(ul);
1636		if (*p++ != ap->a_attrnamespace)
1637			continue;
1638		p++;	/* pad2 */
1639		ealen = *p;
1640		if (ap->a_size != NULL) {
1641			*ap->a_size += ealen + 1;
1642		} else if (ap->a_uio != NULL) {
1643			error = uiomove(p, ealen + 1, ap->a_uio);
1644		}
1645	}
1646	ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1647	return(error);
1648}
1649
1650/*
1651 * Vnode operation to set a named attribute.
1652 */
1653static int
1654ffs_setextattr(struct vop_setextattr_args *ap)
1655/*
1656vop_setextattr {
1657	IN struct vnode *a_vp;
1658	IN int a_attrnamespace;
1659	IN const char *a_name;
1660	INOUT struct uio *a_uio;
1661	IN struct ucred *a_cred;
1662	IN struct thread *a_td;
1663};
1664*/
1665{
1666	struct inode *ip;
1667	struct fs *fs;
1668	uint32_t ealength, ul;
1669	ssize_t ealen;
1670	int olen, eapad1, eapad2, error, i, easize;
1671	u_char *eae, *p;
1672
1673	ip = VTOI(ap->a_vp);
1674	fs = ip->i_fs;
1675
1676	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1677		return (EOPNOTSUPP);
1678
1679	if (strlen(ap->a_name) == 0)
1680		return (EINVAL);
1681
1682	/* XXX Now unsupported API to delete EAs using NULL uio. */
1683	if (ap->a_uio == NULL)
1684		return (EOPNOTSUPP);
1685
1686	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1687		return (EROFS);
1688
1689	ealen = ap->a_uio->uio_resid;
1690	if (ealen < 0 || ealen > lblktosize(fs, NXADDR))
1691		return (EINVAL);
1692
1693	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1694	    ap->a_cred, ap->a_td, VWRITE);
1695	if (error) {
1696
1697		/*
1698		 * ffs_lock_ea is not needed there, because the vnode
1699		 * must be exclusively locked.
1700		 */
1701		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1702			ip->i_ea_error = error;
1703		return (error);
1704	}
1705
1706	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1707	if (error)
1708		return (error);
1709
1710	ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1711	eapad1 = 8 - (ealength % 8);
1712	if (eapad1 == 8)
1713		eapad1 = 0;
1714	eapad2 = 8 - (ealen % 8);
1715	if (eapad2 == 8)
1716		eapad2 = 0;
1717	ealength += eapad1 + ealen + eapad2;
1718
1719	eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1720	bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1721	easize = ip->i_ea_len;
1722
1723	olen = ffs_findextattr(eae, easize,
1724	    ap->a_attrnamespace, ap->a_name, &p, NULL);
1725        if (olen == -1) {
1726		/* new, append at end */
1727		p = eae + easize;
1728		easize += ealength;
1729	} else {
1730		bcopy(p, &ul, sizeof ul);
1731		i = p - eae + ul;
1732		if (ul != ealength) {
1733			bcopy(p + ul, p + ealength, easize - i);
1734			easize += (ealength - ul);
1735		}
1736	}
1737	if (easize > lblktosize(fs, NXADDR)) {
1738		free(eae, M_TEMP);
1739		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1740		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1741			ip->i_ea_error = ENOSPC;
1742		return(ENOSPC);
1743	}
1744	bcopy(&ealength, p, sizeof(ealength));
1745	p += sizeof(ealength);
1746	*p++ = ap->a_attrnamespace;
1747	*p++ = eapad2;
1748	*p++ = strlen(ap->a_name);
1749	strcpy(p, ap->a_name);
1750	p += strlen(ap->a_name);
1751	bzero(p, eapad1);
1752	p += eapad1;
1753	error = uiomove(p, ealen, ap->a_uio);
1754	if (error) {
1755		free(eae, M_TEMP);
1756		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1757		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1758			ip->i_ea_error = error;
1759		return(error);
1760	}
1761	p += ealen;
1762	bzero(p, eapad2);
1763
1764	p = ip->i_ea_area;
1765	ip->i_ea_area = eae;
1766	ip->i_ea_len = easize;
1767	free(p, M_TEMP);
1768	error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1769	return(error);
1770}
1771
1772/*
1773 * Vnode pointer to File handle
1774 */
1775static int
1776ffs_vptofh(struct vop_vptofh_args *ap)
1777/*
1778vop_vptofh {
1779	IN struct vnode *a_vp;
1780	IN struct fid *a_fhp;
1781};
1782*/
1783{
1784	struct inode *ip;
1785	struct ufid *ufhp;
1786
1787	ip = VTOI(ap->a_vp);
1788	ufhp = (struct ufid *)ap->a_fhp;
1789	ufhp->ufid_len = sizeof(struct ufid);
1790	ufhp->ufid_ino = ip->i_number;
1791	ufhp->ufid_gen = ip->i_gen;
1792	return (0);
1793}
1794