zvol.c revision 265678
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 *
29 * Portions Copyright 2010 Robert Milkowski
30 *
31 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
32 */
33
34/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
35
36/*
37 * ZFS volume emulation driver.
38 *
39 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
40 * Volumes are accessed through the symbolic links named:
41 *
42 * /dev/zvol/dsk/<pool_name>/<dataset_name>
43 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
44 *
45 * These links are created by the /dev filesystem (sdev_zvolops.c).
46 * Volumes are persistent through reboot.  No user command needs to be
47 * run before opening and using a device.
48 *
49 * FreeBSD notes.
50 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
51 * in the system.
52 */
53
54#include <sys/types.h>
55#include <sys/param.h>
56#include <sys/kernel.h>
57#include <sys/errno.h>
58#include <sys/uio.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kmem.h>
62#include <sys/conf.h>
63#include <sys/cmn_err.h>
64#include <sys/stat.h>
65#include <sys/zap.h>
66#include <sys/spa.h>
67#include <sys/spa_impl.h>
68#include <sys/zio.h>
69#include <sys/disk.h>
70#include <sys/dmu_traverse.h>
71#include <sys/dnode.h>
72#include <sys/dsl_dataset.h>
73#include <sys/dsl_prop.h>
74#include <sys/dkio.h>
75#include <sys/byteorder.h>
76#include <sys/sunddi.h>
77#include <sys/dirent.h>
78#include <sys/policy.h>
79#include <sys/queue.h>
80#include <sys/fs/zfs.h>
81#include <sys/zfs_ioctl.h>
82#include <sys/zil.h>
83#include <sys/refcount.h>
84#include <sys/zfs_znode.h>
85#include <sys/zfs_rlock.h>
86#include <sys/vdev_impl.h>
87#include <sys/vdev_raidz.h>
88#include <sys/zvol.h>
89#include <sys/zil_impl.h>
90#include <sys/dbuf.h>
91#include <sys/dmu_tx.h>
92#include <sys/zfeature.h>
93#include <sys/zio_checksum.h>
94
95#include <geom/geom.h>
96
97#include "zfs_namecheck.h"
98
99struct g_class zfs_zvol_class = {
100	.name = "ZFS::ZVOL",
101	.version = G_VERSION,
102};
103
104DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
105
106void *zfsdev_state;
107static char *zvol_tag = "zvol_tag";
108
109#define	ZVOL_DUMPSIZE		"dumpsize"
110
111/*
112 * The spa_namespace_lock protects the zfsdev_state structure from being
113 * modified while it's being used, e.g. an open that comes in before a
114 * create finishes.  It also protects temporary opens of the dataset so that,
115 * e.g., an open doesn't get a spurious EBUSY.
116 */
117static uint32_t zvol_minors;
118
119SYSCTL_DECL(_vfs_zfs);
120SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
121static int	volmode = ZFS_VOLMODE_GEOM;
122TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
123SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
124    "Expose as GEOM providers (1), device files (2) or neither");
125
126typedef struct zvol_extent {
127	list_node_t	ze_node;
128	dva_t		ze_dva;		/* dva associated with this extent */
129	uint64_t	ze_nblks;	/* number of blocks in extent */
130} zvol_extent_t;
131
132/*
133 * The in-core state of each volume.
134 */
135typedef struct zvol_state {
136	LIST_ENTRY(zvol_state)	zv_links;
137	char		zv_name[MAXPATHLEN]; /* pool/dd name */
138	uint64_t	zv_volsize;	/* amount of space we advertise */
139	uint64_t	zv_volblocksize; /* volume block size */
140	struct cdev	*zv_dev;	/* non-GEOM device */
141	struct g_provider *zv_provider;	/* GEOM provider */
142	uint8_t		zv_min_bs;	/* minimum addressable block shift */
143	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
144	objset_t	*zv_objset;	/* objset handle */
145	uint32_t	zv_total_opens;	/* total open count */
146	zilog_t		*zv_zilog;	/* ZIL handle */
147	list_t		zv_extents;	/* List of extents for dump */
148	znode_t		zv_znode;	/* for range locking */
149	dmu_buf_t	*zv_dbuf;	/* bonus handle */
150	int		zv_state;
151	int		zv_volmode;	/* Provide GEOM or cdev */
152	struct bio_queue_head zv_queue;
153	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
154} zvol_state_t;
155
156static LIST_HEAD(, zvol_state) all_zvols;
157
158/*
159 * zvol specific flags
160 */
161#define	ZVOL_RDONLY	0x1
162#define	ZVOL_DUMPIFIED	0x2
163#define	ZVOL_EXCL	0x4
164#define	ZVOL_WCE	0x8
165
166/*
167 * zvol maximum transfer in one DMU tx.
168 */
169int zvol_maxphys = DMU_MAX_ACCESS/2;
170
171static d_open_t		zvol_d_open;
172static d_close_t	zvol_d_close;
173static d_read_t		zvol_read;
174static d_write_t	zvol_write;
175static d_ioctl_t	zvol_d_ioctl;
176static d_strategy_t	zvol_strategy;
177
178static struct cdevsw zvol_cdevsw = {
179	.d_version =	D_VERSION,
180	.d_open =	zvol_d_open,
181	.d_close =	zvol_d_close,
182	.d_read =	zvol_read,
183	.d_write =	zvol_write,
184	.d_ioctl =	zvol_d_ioctl,
185	.d_strategy =	zvol_strategy,
186	.d_name =	"zvol",
187	.d_flags =	D_DISK | D_TRACKCLOSE,
188};
189
190extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
191    nvlist_t *, nvlist_t *);
192static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
193    uint64_t len, boolean_t sync);
194static int zvol_remove_zv(zvol_state_t *);
195static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
196static int zvol_dumpify(zvol_state_t *zv);
197static int zvol_dump_fini(zvol_state_t *zv);
198static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
199
200static void zvol_geom_run(zvol_state_t *zv);
201static void zvol_geom_destroy(zvol_state_t *zv);
202static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
203static void zvol_geom_start(struct bio *bp);
204static void zvol_geom_worker(void *arg);
205
206static void
207zvol_size_changed(zvol_state_t *zv)
208{
209#ifdef sun
210	dev_t dev = makedevice(maj, min);
211
212	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
213	    "Size", volsize) == DDI_SUCCESS);
214	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
215	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
216
217	/* Notify specfs to invalidate the cached size */
218	spec_size_invalidate(dev, VBLK);
219	spec_size_invalidate(dev, VCHR);
220#else	/* !sun */
221	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
222		struct g_provider *pp;
223
224		pp = zv->zv_provider;
225		if (pp == NULL)
226			return;
227		g_topology_lock();
228		g_resize_provider(pp, zv->zv_volsize);
229		g_topology_unlock();
230	}
231#endif	/* !sun */
232}
233
234int
235zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
236{
237	if (volsize == 0)
238		return (SET_ERROR(EINVAL));
239
240	if (volsize % blocksize != 0)
241		return (SET_ERROR(EINVAL));
242
243#ifdef _ILP32
244	if (volsize - 1 > SPEC_MAXOFFSET_T)
245		return (SET_ERROR(EOVERFLOW));
246#endif
247	return (0);
248}
249
250int
251zvol_check_volblocksize(uint64_t volblocksize)
252{
253	if (volblocksize < SPA_MINBLOCKSIZE ||
254	    volblocksize > SPA_MAXBLOCKSIZE ||
255	    !ISP2(volblocksize))
256		return (SET_ERROR(EDOM));
257
258	return (0);
259}
260
261int
262zvol_get_stats(objset_t *os, nvlist_t *nv)
263{
264	int error;
265	dmu_object_info_t doi;
266	uint64_t val;
267
268	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
269	if (error)
270		return (error);
271
272	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
273
274	error = dmu_object_info(os, ZVOL_OBJ, &doi);
275
276	if (error == 0) {
277		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
278		    doi.doi_data_block_size);
279	}
280
281	return (error);
282}
283
284static zvol_state_t *
285zvol_minor_lookup(const char *name)
286{
287	zvol_state_t *zv;
288
289	ASSERT(MUTEX_HELD(&spa_namespace_lock));
290
291	LIST_FOREACH(zv, &all_zvols, zv_links) {
292		if (strcmp(zv->zv_name, name) == 0)
293			break;
294	}
295
296	return (zv);
297}
298
299/* extent mapping arg */
300struct maparg {
301	zvol_state_t	*ma_zv;
302	uint64_t	ma_blks;
303};
304
305/*ARGSUSED*/
306static int
307zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
308    const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
309{
310	struct maparg *ma = arg;
311	zvol_extent_t *ze;
312	int bs = ma->ma_zv->zv_volblocksize;
313
314	if (BP_IS_HOLE(bp) ||
315	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
316		return (0);
317
318	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
319	ma->ma_blks++;
320
321	/* Abort immediately if we have encountered gang blocks */
322	if (BP_IS_GANG(bp))
323		return (SET_ERROR(EFRAGS));
324
325	/*
326	 * See if the block is at the end of the previous extent.
327	 */
328	ze = list_tail(&ma->ma_zv->zv_extents);
329	if (ze &&
330	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
331	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
332	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
333		ze->ze_nblks++;
334		return (0);
335	}
336
337	dprintf_bp(bp, "%s", "next blkptr:");
338
339	/* start a new extent */
340	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
341	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
342	ze->ze_nblks = 1;
343	list_insert_tail(&ma->ma_zv->zv_extents, ze);
344	return (0);
345}
346
347static void
348zvol_free_extents(zvol_state_t *zv)
349{
350	zvol_extent_t *ze;
351
352	while (ze = list_head(&zv->zv_extents)) {
353		list_remove(&zv->zv_extents, ze);
354		kmem_free(ze, sizeof (zvol_extent_t));
355	}
356}
357
358static int
359zvol_get_lbas(zvol_state_t *zv)
360{
361	objset_t *os = zv->zv_objset;
362	struct maparg	ma;
363	int		err;
364
365	ma.ma_zv = zv;
366	ma.ma_blks = 0;
367	zvol_free_extents(zv);
368
369	/* commit any in-flight changes before traversing the dataset */
370	txg_wait_synced(dmu_objset_pool(os), 0);
371	err = traverse_dataset(dmu_objset_ds(os), 0,
372	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
373	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
374		zvol_free_extents(zv);
375		return (err ? err : EIO);
376	}
377
378	return (0);
379}
380
381/* ARGSUSED */
382void
383zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
384{
385	zfs_creat_t *zct = arg;
386	nvlist_t *nvprops = zct->zct_props;
387	int error;
388	uint64_t volblocksize, volsize;
389
390	VERIFY(nvlist_lookup_uint64(nvprops,
391	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
392	if (nvlist_lookup_uint64(nvprops,
393	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
394		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
395
396	/*
397	 * These properties must be removed from the list so the generic
398	 * property setting step won't apply to them.
399	 */
400	VERIFY(nvlist_remove_all(nvprops,
401	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
402	(void) nvlist_remove_all(nvprops,
403	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
404
405	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
406	    DMU_OT_NONE, 0, tx);
407	ASSERT(error == 0);
408
409	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
410	    DMU_OT_NONE, 0, tx);
411	ASSERT(error == 0);
412
413	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
414	ASSERT(error == 0);
415}
416
417/*
418 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
419 * implement DKIOCFREE/free-long-range.
420 */
421static int
422zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
423{
424	uint64_t offset, length;
425
426	if (byteswap)
427		byteswap_uint64_array(lr, sizeof (*lr));
428
429	offset = lr->lr_offset;
430	length = lr->lr_length;
431
432	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
433}
434
435/*
436 * Replay a TX_WRITE ZIL transaction that didn't get committed
437 * after a system failure
438 */
439static int
440zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
441{
442	objset_t *os = zv->zv_objset;
443	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
444	uint64_t offset, length;
445	dmu_tx_t *tx;
446	int error;
447
448	if (byteswap)
449		byteswap_uint64_array(lr, sizeof (*lr));
450
451	offset = lr->lr_offset;
452	length = lr->lr_length;
453
454	/* If it's a dmu_sync() block, write the whole block */
455	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
456		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
457		if (length < blocksize) {
458			offset -= offset % blocksize;
459			length = blocksize;
460		}
461	}
462
463	tx = dmu_tx_create(os);
464	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
465	error = dmu_tx_assign(tx, TXG_WAIT);
466	if (error) {
467		dmu_tx_abort(tx);
468	} else {
469		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
470		dmu_tx_commit(tx);
471	}
472
473	return (error);
474}
475
476/* ARGSUSED */
477static int
478zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
479{
480	return (SET_ERROR(ENOTSUP));
481}
482
483/*
484 * Callback vectors for replaying records.
485 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
486 */
487zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
488	zvol_replay_err,	/* 0 no such transaction type */
489	zvol_replay_err,	/* TX_CREATE */
490	zvol_replay_err,	/* TX_MKDIR */
491	zvol_replay_err,	/* TX_MKXATTR */
492	zvol_replay_err,	/* TX_SYMLINK */
493	zvol_replay_err,	/* TX_REMOVE */
494	zvol_replay_err,	/* TX_RMDIR */
495	zvol_replay_err,	/* TX_LINK */
496	zvol_replay_err,	/* TX_RENAME */
497	zvol_replay_write,	/* TX_WRITE */
498	zvol_replay_truncate,	/* TX_TRUNCATE */
499	zvol_replay_err,	/* TX_SETATTR */
500	zvol_replay_err,	/* TX_ACL */
501	zvol_replay_err,	/* TX_CREATE_ACL */
502	zvol_replay_err,	/* TX_CREATE_ATTR */
503	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
504	zvol_replay_err,	/* TX_MKDIR_ACL */
505	zvol_replay_err,	/* TX_MKDIR_ATTR */
506	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
507	zvol_replay_err,	/* TX_WRITE2 */
508};
509
510#ifdef sun
511int
512zvol_name2minor(const char *name, minor_t *minor)
513{
514	zvol_state_t *zv;
515
516	mutex_enter(&spa_namespace_lock);
517	zv = zvol_minor_lookup(name);
518	if (minor && zv)
519		*minor = zv->zv_minor;
520	mutex_exit(&spa_namespace_lock);
521	return (zv ? 0 : -1);
522}
523#endif	/* sun */
524
525/*
526 * Create a minor node (plus a whole lot more) for the specified volume.
527 */
528int
529zvol_create_minor(const char *name)
530{
531	zfs_soft_state_t *zs;
532	zvol_state_t *zv;
533	objset_t *os;
534	struct cdev *dev;
535	struct g_provider *pp;
536	struct g_geom *gp;
537	dmu_object_info_t doi;
538	uint64_t volsize, mode;
539	int error;
540
541	ZFS_LOG(1, "Creating ZVOL %s...", name);
542
543	mutex_enter(&spa_namespace_lock);
544
545	if (zvol_minor_lookup(name) != NULL) {
546		mutex_exit(&spa_namespace_lock);
547		return (SET_ERROR(EEXIST));
548	}
549
550	/* lie and say we're read-only */
551	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
552
553	if (error) {
554		mutex_exit(&spa_namespace_lock);
555		return (error);
556	}
557
558#ifdef sun
559	if ((minor = zfsdev_minor_alloc()) == 0) {
560		dmu_objset_disown(os, FTAG);
561		mutex_exit(&spa_namespace_lock);
562		return (SET_ERROR(ENXIO));
563	}
564
565	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
566		dmu_objset_disown(os, FTAG);
567		mutex_exit(&spa_namespace_lock);
568		return (SET_ERROR(EAGAIN));
569	}
570	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
571	    (char *)name);
572
573	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
574
575	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
576	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
577		ddi_soft_state_free(zfsdev_state, minor);
578		dmu_objset_disown(os, FTAG);
579		mutex_exit(&spa_namespace_lock);
580		return (SET_ERROR(EAGAIN));
581	}
582
583	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
584
585	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
586	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
587		ddi_remove_minor_node(zfs_dip, chrbuf);
588		ddi_soft_state_free(zfsdev_state, minor);
589		dmu_objset_disown(os, FTAG);
590		mutex_exit(&spa_namespace_lock);
591		return (SET_ERROR(EAGAIN));
592	}
593
594	zs = ddi_get_soft_state(zfsdev_state, minor);
595	zs->zss_type = ZSST_ZVOL;
596	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
597#else	/* !sun */
598
599	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
600	zv->zv_state = 0;
601	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
602	if (error) {
603		kmem_free(zv, sizeof(*zv));
604		dmu_objset_disown(os, zvol_tag);
605		mutex_exit(&spa_namespace_lock);
606		return (error);
607	}
608	error = dsl_prop_get_integer(name,
609	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
610	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
611		mode = volmode;
612
613	DROP_GIANT();
614	zv->zv_volsize = volsize;
615	zv->zv_volmode = mode;
616	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
617		g_topology_lock();
618		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
619		gp->start = zvol_geom_start;
620		gp->access = zvol_geom_access;
621		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
622		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
623		pp->sectorsize = DEV_BSIZE;
624		pp->mediasize = zv->zv_volsize;
625		pp->private = zv;
626
627		zv->zv_provider = pp;
628		bioq_init(&zv->zv_queue);
629		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
630	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
631		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
632		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
633		    0640, "%s/%s", ZVOL_DRIVER, name) != 0) {
634			kmem_free(zv, sizeof(*zv));
635			dmu_objset_disown(os, FTAG);
636			mutex_exit(&spa_namespace_lock);
637			return (SET_ERROR(ENXIO));
638		}
639		zv->zv_dev = dev;
640		dev->si_iosize_max = MAXPHYS;
641		dev->si_drv2 = zv;
642	}
643	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
644#endif	/* !sun */
645
646	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
647	zv->zv_min_bs = DEV_BSHIFT;
648	zv->zv_objset = os;
649	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
650		zv->zv_flags |= ZVOL_RDONLY;
651	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
652	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
653	    sizeof (rl_t), offsetof(rl_t, r_node));
654	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
655	    offsetof(zvol_extent_t, ze_node));
656	/* get and cache the blocksize */
657	error = dmu_object_info(os, ZVOL_OBJ, &doi);
658	ASSERT(error == 0);
659	zv->zv_volblocksize = doi.doi_data_block_size;
660
661	if (spa_writeable(dmu_objset_spa(os))) {
662		if (zil_replay_disable)
663			zil_destroy(dmu_objset_zil(os), B_FALSE);
664		else
665			zil_replay(os, zv, zvol_replay_vector);
666	}
667	dmu_objset_disown(os, FTAG);
668	zv->zv_objset = NULL;
669
670	zvol_minors++;
671
672	mutex_exit(&spa_namespace_lock);
673
674#ifndef sun
675	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
676		zvol_geom_run(zv);
677		g_topology_unlock();
678	}
679	PICKUP_GIANT();
680#endif
681
682	ZFS_LOG(1, "ZVOL %s created.", name);
683
684	return (0);
685}
686
687/*
688 * Remove minor node for the specified volume.
689 */
690static int
691zvol_remove_zv(zvol_state_t *zv)
692{
693#ifdef sun
694	minor_t minor = zv->zv_minor;
695#endif
696
697	ASSERT(MUTEX_HELD(&spa_namespace_lock));
698	if (zv->zv_total_opens != 0)
699		return (SET_ERROR(EBUSY));
700
701	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
702
703#ifdef sun
704	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
705	ddi_remove_minor_node(zfs_dip, nmbuf);
706#else
707	LIST_REMOVE(zv, zv_links);
708	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
709		g_topology_lock();
710		zvol_geom_destroy(zv);
711		g_topology_unlock();
712	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV)
713		destroy_dev(zv->zv_dev);
714#endif	/* sun */
715
716	avl_destroy(&zv->zv_znode.z_range_avl);
717	mutex_destroy(&zv->zv_znode.z_range_lock);
718
719	kmem_free(zv, sizeof(*zv));
720
721	zvol_minors--;
722	return (0);
723}
724
725int
726zvol_remove_minor(const char *name)
727{
728	zvol_state_t *zv;
729	int rc;
730
731	mutex_enter(&spa_namespace_lock);
732	if ((zv = zvol_minor_lookup(name)) == NULL) {
733		mutex_exit(&spa_namespace_lock);
734		return (SET_ERROR(ENXIO));
735	}
736	rc = zvol_remove_zv(zv);
737	mutex_exit(&spa_namespace_lock);
738	return (rc);
739}
740
741int
742zvol_first_open(zvol_state_t *zv)
743{
744	objset_t *os;
745	uint64_t volsize;
746	int error;
747	uint64_t readonly;
748
749	/* lie and say we're read-only */
750	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
751	    zvol_tag, &os);
752	if (error)
753		return (error);
754
755	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
756	if (error) {
757		ASSERT(error == 0);
758		dmu_objset_disown(os, zvol_tag);
759		return (error);
760	}
761	zv->zv_objset = os;
762	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
763	if (error) {
764		dmu_objset_disown(os, zvol_tag);
765		return (error);
766	}
767	zv->zv_volsize = volsize;
768	zv->zv_zilog = zil_open(os, zvol_get_data);
769	zvol_size_changed(zv);
770
771	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
772	    NULL) == 0);
773	if (readonly || dmu_objset_is_snapshot(os) ||
774	    !spa_writeable(dmu_objset_spa(os)))
775		zv->zv_flags |= ZVOL_RDONLY;
776	else
777		zv->zv_flags &= ~ZVOL_RDONLY;
778	return (error);
779}
780
781void
782zvol_last_close(zvol_state_t *zv)
783{
784	zil_close(zv->zv_zilog);
785	zv->zv_zilog = NULL;
786
787	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
788	zv->zv_dbuf = NULL;
789
790	/*
791	 * Evict cached data
792	 */
793	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
794	    !(zv->zv_flags & ZVOL_RDONLY))
795		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
796	dmu_objset_evict_dbufs(zv->zv_objset);
797
798	dmu_objset_disown(zv->zv_objset, zvol_tag);
799	zv->zv_objset = NULL;
800}
801
802#ifdef sun
803int
804zvol_prealloc(zvol_state_t *zv)
805{
806	objset_t *os = zv->zv_objset;
807	dmu_tx_t *tx;
808	uint64_t refd, avail, usedobjs, availobjs;
809	uint64_t resid = zv->zv_volsize;
810	uint64_t off = 0;
811
812	/* Check the space usage before attempting to allocate the space */
813	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
814	if (avail < zv->zv_volsize)
815		return (SET_ERROR(ENOSPC));
816
817	/* Free old extents if they exist */
818	zvol_free_extents(zv);
819
820	while (resid != 0) {
821		int error;
822		uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
823
824		tx = dmu_tx_create(os);
825		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
826		error = dmu_tx_assign(tx, TXG_WAIT);
827		if (error) {
828			dmu_tx_abort(tx);
829			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
830			return (error);
831		}
832		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
833		dmu_tx_commit(tx);
834		off += bytes;
835		resid -= bytes;
836	}
837	txg_wait_synced(dmu_objset_pool(os), 0);
838
839	return (0);
840}
841#endif	/* sun */
842
843static int
844zvol_update_volsize(objset_t *os, uint64_t volsize)
845{
846	dmu_tx_t *tx;
847	int error;
848
849	ASSERT(MUTEX_HELD(&spa_namespace_lock));
850
851	tx = dmu_tx_create(os);
852	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
853	error = dmu_tx_assign(tx, TXG_WAIT);
854	if (error) {
855		dmu_tx_abort(tx);
856		return (error);
857	}
858
859	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
860	    &volsize, tx);
861	dmu_tx_commit(tx);
862
863	if (error == 0)
864		error = dmu_free_long_range(os,
865		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
866	return (error);
867}
868
869void
870zvol_remove_minors(const char *name)
871{
872	zvol_state_t *zv, *tzv;
873	size_t namelen;
874
875	namelen = strlen(name);
876
877	DROP_GIANT();
878	mutex_enter(&spa_namespace_lock);
879
880	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
881		if (strcmp(zv->zv_name, name) == 0 ||
882		    (strncmp(zv->zv_name, name, namelen) == 0 &&
883		     zv->zv_name[namelen] == '/')) {
884			(void) zvol_remove_zv(zv);
885		}
886	}
887
888	mutex_exit(&spa_namespace_lock);
889	PICKUP_GIANT();
890}
891
892int
893zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
894{
895	zvol_state_t *zv = NULL;
896	objset_t *os;
897	int error;
898	dmu_object_info_t doi;
899	uint64_t old_volsize = 0ULL;
900	uint64_t readonly;
901
902	mutex_enter(&spa_namespace_lock);
903	zv = zvol_minor_lookup(name);
904	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
905		mutex_exit(&spa_namespace_lock);
906		return (error);
907	}
908
909	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
910	    (error = zvol_check_volsize(volsize,
911	    doi.doi_data_block_size)) != 0)
912		goto out;
913
914	VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
915	    NULL) == 0);
916	if (readonly) {
917		error = EROFS;
918		goto out;
919	}
920
921	error = zvol_update_volsize(os, volsize);
922	/*
923	 * Reinitialize the dump area to the new size. If we
924	 * failed to resize the dump area then restore it back to
925	 * its original size.
926	 */
927	if (zv && error == 0) {
928#ifdef ZVOL_DUMP
929		if (zv->zv_flags & ZVOL_DUMPIFIED) {
930			old_volsize = zv->zv_volsize;
931			zv->zv_volsize = volsize;
932			if ((error = zvol_dumpify(zv)) != 0 ||
933			    (error = dumpvp_resize()) != 0) {
934				(void) zvol_update_volsize(os, old_volsize);
935				zv->zv_volsize = old_volsize;
936				error = zvol_dumpify(zv);
937			}
938		}
939#endif	/* ZVOL_DUMP */
940		if (error == 0) {
941			zv->zv_volsize = volsize;
942			zvol_size_changed(zv);
943		}
944	}
945
946#ifdef sun
947	/*
948	 * Generate a LUN expansion event.
949	 */
950	if (zv && error == 0) {
951		sysevent_id_t eid;
952		nvlist_t *attr;
953		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
954
955		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
956		    zv->zv_minor);
957
958		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
959		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
960
961		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
962		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
963
964		nvlist_free(attr);
965		kmem_free(physpath, MAXPATHLEN);
966	}
967#endif	/* sun */
968
969out:
970	dmu_objset_rele(os, FTAG);
971
972	mutex_exit(&spa_namespace_lock);
973
974	return (error);
975}
976
977/*ARGSUSED*/
978static int
979zvol_open(struct g_provider *pp, int flag, int count)
980{
981	zvol_state_t *zv;
982	int err = 0;
983	boolean_t locked = B_FALSE;
984
985	/*
986	 * Protect against recursively entering spa_namespace_lock
987	 * when spa_open() is used for a pool on a (local) ZVOL(s).
988	 * This is needed since we replaced upstream zfsdev_state_lock
989	 * with spa_namespace_lock in the ZVOL code.
990	 * We are using the same trick as spa_open().
991	 * Note that calls in zvol_first_open which need to resolve
992	 * pool name to a spa object will enter spa_open()
993	 * recursively, but that function already has all the
994	 * necessary protection.
995	 */
996	if (!MUTEX_HELD(&spa_namespace_lock)) {
997		mutex_enter(&spa_namespace_lock);
998		locked = B_TRUE;
999	}
1000
1001	zv = pp->private;
1002	if (zv == NULL) {
1003		if (locked)
1004			mutex_exit(&spa_namespace_lock);
1005		return (SET_ERROR(ENXIO));
1006	}
1007
1008	if (zv->zv_total_opens == 0) {
1009		err = zvol_first_open(zv);
1010		if (err) {
1011			if (locked)
1012				mutex_exit(&spa_namespace_lock);
1013			return (err);
1014		}
1015		pp->mediasize = zv->zv_volsize;
1016		pp->stripeoffset = 0;
1017		pp->stripesize = zv->zv_volblocksize;
1018	}
1019	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1020		err = SET_ERROR(EROFS);
1021		goto out;
1022	}
1023	if (zv->zv_flags & ZVOL_EXCL) {
1024		err = SET_ERROR(EBUSY);
1025		goto out;
1026	}
1027#ifdef FEXCL
1028	if (flag & FEXCL) {
1029		if (zv->zv_total_opens != 0) {
1030			err = SET_ERROR(EBUSY);
1031			goto out;
1032		}
1033		zv->zv_flags |= ZVOL_EXCL;
1034	}
1035#endif
1036
1037	zv->zv_total_opens += count;
1038	if (locked)
1039		mutex_exit(&spa_namespace_lock);
1040
1041	return (err);
1042out:
1043	if (zv->zv_total_opens == 0)
1044		zvol_last_close(zv);
1045	if (locked)
1046		mutex_exit(&spa_namespace_lock);
1047	return (err);
1048}
1049
1050/*ARGSUSED*/
1051static int
1052zvol_close(struct g_provider *pp, int flag, int count)
1053{
1054	zvol_state_t *zv;
1055	int error = 0;
1056	boolean_t locked = B_FALSE;
1057
1058	/* See comment in zvol_open(). */
1059	if (!MUTEX_HELD(&spa_namespace_lock)) {
1060		mutex_enter(&spa_namespace_lock);
1061		locked = B_TRUE;
1062	}
1063
1064	zv = pp->private;
1065	if (zv == NULL) {
1066		if (locked)
1067			mutex_exit(&spa_namespace_lock);
1068		return (SET_ERROR(ENXIO));
1069	}
1070
1071	if (zv->zv_flags & ZVOL_EXCL) {
1072		ASSERT(zv->zv_total_opens == 1);
1073		zv->zv_flags &= ~ZVOL_EXCL;
1074	}
1075
1076	/*
1077	 * If the open count is zero, this is a spurious close.
1078	 * That indicates a bug in the kernel / DDI framework.
1079	 */
1080	ASSERT(zv->zv_total_opens != 0);
1081
1082	/*
1083	 * You may get multiple opens, but only one close.
1084	 */
1085	zv->zv_total_opens -= count;
1086
1087	if (zv->zv_total_opens == 0)
1088		zvol_last_close(zv);
1089
1090	if (locked)
1091		mutex_exit(&spa_namespace_lock);
1092	return (error);
1093}
1094
1095static void
1096zvol_get_done(zgd_t *zgd, int error)
1097{
1098	if (zgd->zgd_db)
1099		dmu_buf_rele(zgd->zgd_db, zgd);
1100
1101	zfs_range_unlock(zgd->zgd_rl);
1102
1103	if (error == 0 && zgd->zgd_bp)
1104		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1105
1106	kmem_free(zgd, sizeof (zgd_t));
1107}
1108
1109/*
1110 * Get data to generate a TX_WRITE intent log record.
1111 */
1112static int
1113zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1114{
1115	zvol_state_t *zv = arg;
1116	objset_t *os = zv->zv_objset;
1117	uint64_t object = ZVOL_OBJ;
1118	uint64_t offset = lr->lr_offset;
1119	uint64_t size = lr->lr_length;	/* length of user data */
1120	blkptr_t *bp = &lr->lr_blkptr;
1121	dmu_buf_t *db;
1122	zgd_t *zgd;
1123	int error;
1124
1125	ASSERT(zio != NULL);
1126	ASSERT(size != 0);
1127
1128	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1129	zgd->zgd_zilog = zv->zv_zilog;
1130	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1131
1132	/*
1133	 * Write records come in two flavors: immediate and indirect.
1134	 * For small writes it's cheaper to store the data with the
1135	 * log record (immediate); for large writes it's cheaper to
1136	 * sync the data and get a pointer to it (indirect) so that
1137	 * we don't have to write the data twice.
1138	 */
1139	if (buf != NULL) {	/* immediate write */
1140		error = dmu_read(os, object, offset, size, buf,
1141		    DMU_READ_NO_PREFETCH);
1142	} else {
1143		size = zv->zv_volblocksize;
1144		offset = P2ALIGN(offset, size);
1145		error = dmu_buf_hold(os, object, offset, zgd, &db,
1146		    DMU_READ_NO_PREFETCH);
1147		if (error == 0) {
1148			blkptr_t *obp = dmu_buf_get_blkptr(db);
1149			if (obp) {
1150				ASSERT(BP_IS_HOLE(bp));
1151				*bp = *obp;
1152			}
1153
1154			zgd->zgd_db = db;
1155			zgd->zgd_bp = bp;
1156
1157			ASSERT(db->db_offset == offset);
1158			ASSERT(db->db_size == size);
1159
1160			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1161			    zvol_get_done, zgd);
1162
1163			if (error == 0)
1164				return (0);
1165		}
1166	}
1167
1168	zvol_get_done(zgd, error);
1169
1170	return (error);
1171}
1172
1173/*
1174 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1175 *
1176 * We store data in the log buffers if it's small enough.
1177 * Otherwise we will later flush the data out via dmu_sync().
1178 */
1179ssize_t zvol_immediate_write_sz = 32768;
1180
1181static void
1182zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1183    boolean_t sync)
1184{
1185	uint32_t blocksize = zv->zv_volblocksize;
1186	zilog_t *zilog = zv->zv_zilog;
1187	boolean_t slogging;
1188	ssize_t immediate_write_sz;
1189
1190	if (zil_replaying(zilog, tx))
1191		return;
1192
1193	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1194	    ? 0 : zvol_immediate_write_sz;
1195
1196	slogging = spa_has_slogs(zilog->zl_spa) &&
1197	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1198
1199	while (resid) {
1200		itx_t *itx;
1201		lr_write_t *lr;
1202		ssize_t len;
1203		itx_wr_state_t write_state;
1204
1205		/*
1206		 * Unlike zfs_log_write() we can be called with
1207		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1208		 */
1209		if (blocksize > immediate_write_sz && !slogging &&
1210		    resid >= blocksize && off % blocksize == 0) {
1211			write_state = WR_INDIRECT; /* uses dmu_sync */
1212			len = blocksize;
1213		} else if (sync) {
1214			write_state = WR_COPIED;
1215			len = MIN(ZIL_MAX_LOG_DATA, resid);
1216		} else {
1217			write_state = WR_NEED_COPY;
1218			len = MIN(ZIL_MAX_LOG_DATA, resid);
1219		}
1220
1221		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1222		    (write_state == WR_COPIED ? len : 0));
1223		lr = (lr_write_t *)&itx->itx_lr;
1224		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1225		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1226			zil_itx_destroy(itx);
1227			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1228			lr = (lr_write_t *)&itx->itx_lr;
1229			write_state = WR_NEED_COPY;
1230		}
1231
1232		itx->itx_wr_state = write_state;
1233		if (write_state == WR_NEED_COPY)
1234			itx->itx_sod += len;
1235		lr->lr_foid = ZVOL_OBJ;
1236		lr->lr_offset = off;
1237		lr->lr_length = len;
1238		lr->lr_blkoff = 0;
1239		BP_ZERO(&lr->lr_blkptr);
1240
1241		itx->itx_private = zv;
1242		itx->itx_sync = sync;
1243
1244		zil_itx_assign(zilog, itx, tx);
1245
1246		off += len;
1247		resid -= len;
1248	}
1249}
1250
1251#ifdef sun
1252static int
1253zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1254    uint64_t size, boolean_t doread, boolean_t isdump)
1255{
1256	vdev_disk_t *dvd;
1257	int c;
1258	int numerrors = 0;
1259
1260	if (vd->vdev_ops == &vdev_mirror_ops ||
1261	    vd->vdev_ops == &vdev_replacing_ops ||
1262	    vd->vdev_ops == &vdev_spare_ops) {
1263		for (c = 0; c < vd->vdev_children; c++) {
1264			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1265			    addr, offset, origoffset, size, doread, isdump);
1266			if (err != 0) {
1267				numerrors++;
1268			} else if (doread) {
1269				break;
1270			}
1271		}
1272	}
1273
1274	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1275		return (numerrors < vd->vdev_children ? 0 : EIO);
1276
1277	if (doread && !vdev_readable(vd))
1278		return (SET_ERROR(EIO));
1279	else if (!doread && !vdev_writeable(vd))
1280		return (SET_ERROR(EIO));
1281
1282	if (vd->vdev_ops == &vdev_raidz_ops) {
1283		return (vdev_raidz_physio(vd,
1284		    addr, size, offset, origoffset, doread, isdump));
1285	}
1286
1287	offset += VDEV_LABEL_START_SIZE;
1288
1289	if (ddi_in_panic() || isdump) {
1290		ASSERT(!doread);
1291		if (doread)
1292			return (SET_ERROR(EIO));
1293		dvd = vd->vdev_tsd;
1294		ASSERT3P(dvd, !=, NULL);
1295		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1296		    lbtodb(size)));
1297	} else {
1298		dvd = vd->vdev_tsd;
1299		ASSERT3P(dvd, !=, NULL);
1300		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1301		    offset, doread ? B_READ : B_WRITE));
1302	}
1303}
1304
1305static int
1306zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1307    boolean_t doread, boolean_t isdump)
1308{
1309	vdev_t *vd;
1310	int error;
1311	zvol_extent_t *ze;
1312	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1313
1314	/* Must be sector aligned, and not stradle a block boundary. */
1315	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1316	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1317		return (SET_ERROR(EINVAL));
1318	}
1319	ASSERT(size <= zv->zv_volblocksize);
1320
1321	/* Locate the extent this belongs to */
1322	ze = list_head(&zv->zv_extents);
1323	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1324		offset -= ze->ze_nblks * zv->zv_volblocksize;
1325		ze = list_next(&zv->zv_extents, ze);
1326	}
1327
1328	if (ze == NULL)
1329		return (SET_ERROR(EINVAL));
1330
1331	if (!ddi_in_panic())
1332		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1333
1334	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1335	offset += DVA_GET_OFFSET(&ze->ze_dva);
1336	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1337	    size, doread, isdump);
1338
1339	if (!ddi_in_panic())
1340		spa_config_exit(spa, SCL_STATE, FTAG);
1341
1342	return (error);
1343}
1344#endif	/* sun */
1345
1346void
1347zvol_strategy(struct bio *bp)
1348{
1349	zvol_state_t *zv;
1350	uint64_t off, volsize;
1351	size_t resid;
1352	char *addr;
1353	objset_t *os;
1354	rl_t *rl;
1355	int error = 0;
1356	boolean_t doread = 0;
1357	boolean_t is_dumpified;
1358	boolean_t sync;
1359
1360	if (bp->bio_to)
1361		zv = bp->bio_to->private;
1362	else
1363		zv = bp->bio_dev->si_drv2;
1364
1365	if (zv == NULL) {
1366		error = ENXIO;
1367		goto out;
1368	}
1369
1370	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1371		error = EROFS;
1372		goto out;
1373	}
1374
1375	switch (bp->bio_cmd) {
1376	case BIO_FLUSH:
1377		goto sync;
1378	case BIO_READ:
1379		doread = 1;
1380	case BIO_WRITE:
1381	case BIO_DELETE:
1382		break;
1383	default:
1384		error = EOPNOTSUPP;
1385		goto out;
1386	}
1387
1388	off = bp->bio_offset;
1389	volsize = zv->zv_volsize;
1390
1391	os = zv->zv_objset;
1392	ASSERT(os != NULL);
1393
1394	addr = bp->bio_data;
1395	resid = bp->bio_length;
1396
1397	if (resid > 0 && (off < 0 || off >= volsize)) {
1398		error = EIO;
1399		goto out;
1400	}
1401
1402#ifdef illumos
1403	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1404#else
1405	is_dumpified = B_FALSE;
1406#endif
1407        sync = !doread && !is_dumpified &&
1408	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1409
1410	/*
1411	 * There must be no buffer changes when doing a dmu_sync() because
1412	 * we can't change the data whilst calculating the checksum.
1413	 */
1414	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1415	    doread ? RL_READER : RL_WRITER);
1416
1417	if (bp->bio_cmd == BIO_DELETE) {
1418		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1419		error = dmu_tx_assign(tx, TXG_WAIT);
1420		if (error != 0) {
1421			dmu_tx_abort(tx);
1422		} else {
1423			zvol_log_truncate(zv, tx, off, resid, B_TRUE);
1424			dmu_tx_commit(tx);
1425			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1426			    off, resid);
1427			resid = 0;
1428		}
1429		goto unlock;
1430	}
1431
1432	while (resid != 0 && off < volsize) {
1433		size_t size = MIN(resid, zvol_maxphys);
1434#ifdef illumos
1435		if (is_dumpified) {
1436			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1437			error = zvol_dumpio(zv, addr, off, size,
1438			    doread, B_FALSE);
1439		} else if (doread) {
1440#else
1441		if (doread) {
1442#endif
1443			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1444			    DMU_READ_PREFETCH);
1445		} else {
1446			dmu_tx_t *tx = dmu_tx_create(os);
1447			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1448			error = dmu_tx_assign(tx, TXG_WAIT);
1449			if (error) {
1450				dmu_tx_abort(tx);
1451			} else {
1452				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1453				zvol_log_write(zv, tx, off, size, sync);
1454				dmu_tx_commit(tx);
1455			}
1456		}
1457		if (error) {
1458			/* convert checksum errors into IO errors */
1459			if (error == ECKSUM)
1460				error = SET_ERROR(EIO);
1461			break;
1462		}
1463		off += size;
1464		addr += size;
1465		resid -= size;
1466	}
1467unlock:
1468	zfs_range_unlock(rl);
1469
1470	bp->bio_completed = bp->bio_length - resid;
1471	if (bp->bio_completed < bp->bio_length && off > volsize)
1472		error = EINVAL;
1473
1474	if (sync) {
1475sync:
1476		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1477	}
1478out:
1479	if (bp->bio_to)
1480		g_io_deliver(bp, error);
1481	else
1482		biofinish(bp, NULL, error);
1483}
1484
1485#ifdef sun
1486/*
1487 * Set the buffer count to the zvol maximum transfer.
1488 * Using our own routine instead of the default minphys()
1489 * means that for larger writes we write bigger buffers on X86
1490 * (128K instead of 56K) and flush the disk write cache less often
1491 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1492 * 56K on X86 and 128K on sparc).
1493 */
1494void
1495zvol_minphys(struct buf *bp)
1496{
1497	if (bp->b_bcount > zvol_maxphys)
1498		bp->b_bcount = zvol_maxphys;
1499}
1500
1501int
1502zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1503{
1504	minor_t minor = getminor(dev);
1505	zvol_state_t *zv;
1506	int error = 0;
1507	uint64_t size;
1508	uint64_t boff;
1509	uint64_t resid;
1510
1511	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1512	if (zv == NULL)
1513		return (SET_ERROR(ENXIO));
1514
1515	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1516		return (SET_ERROR(EINVAL));
1517
1518	boff = ldbtob(blkno);
1519	resid = ldbtob(nblocks);
1520
1521	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1522
1523	while (resid) {
1524		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1525		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1526		if (error)
1527			break;
1528		boff += size;
1529		addr += size;
1530		resid -= size;
1531	}
1532
1533	return (error);
1534}
1535
1536/*ARGSUSED*/
1537int
1538zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1539{
1540	minor_t minor = getminor(dev);
1541#else
1542int
1543zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1544{
1545#endif
1546	zvol_state_t *zv;
1547	uint64_t volsize;
1548	rl_t *rl;
1549	int error = 0;
1550
1551#ifdef sun
1552	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1553	if (zv == NULL)
1554		return (SET_ERROR(ENXIO));
1555#else
1556	zv = dev->si_drv2;
1557#endif
1558
1559	volsize = zv->zv_volsize;
1560	if (uio->uio_resid > 0 &&
1561	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1562		return (SET_ERROR(EIO));
1563
1564#ifdef illumos
1565	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1566		error = physio(zvol_strategy, NULL, dev, B_READ,
1567		    zvol_minphys, uio);
1568		return (error);
1569	}
1570#endif
1571
1572	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1573	    RL_READER);
1574	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1575		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1576
1577		/* don't read past the end */
1578		if (bytes > volsize - uio->uio_loffset)
1579			bytes = volsize - uio->uio_loffset;
1580
1581		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1582		if (error) {
1583			/* convert checksum errors into IO errors */
1584			if (error == ECKSUM)
1585				error = SET_ERROR(EIO);
1586			break;
1587		}
1588	}
1589	zfs_range_unlock(rl);
1590	return (error);
1591}
1592
1593#ifdef sun
1594/*ARGSUSED*/
1595int
1596zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1597{
1598	minor_t minor = getminor(dev);
1599#else
1600int
1601zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1602{
1603#endif
1604	zvol_state_t *zv;
1605	uint64_t volsize;
1606	rl_t *rl;
1607	int error = 0;
1608	boolean_t sync;
1609
1610#ifdef sun
1611	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1612	if (zv == NULL)
1613		return (SET_ERROR(ENXIO));
1614#else
1615	zv = dev->si_drv2;
1616#endif
1617
1618	volsize = zv->zv_volsize;
1619	if (uio->uio_resid > 0 &&
1620	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1621		return (SET_ERROR(EIO));
1622
1623#ifdef illumos
1624	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1625		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1626		    zvol_minphys, uio);
1627		return (error);
1628	}
1629#endif
1630
1631	sync = !(zv->zv_flags & ZVOL_WCE) ||
1632	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1633
1634	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1635	    RL_WRITER);
1636	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1637		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1638		uint64_t off = uio->uio_loffset;
1639		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1640
1641		if (bytes > volsize - off)	/* don't write past the end */
1642			bytes = volsize - off;
1643
1644		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1645		error = dmu_tx_assign(tx, TXG_WAIT);
1646		if (error) {
1647			dmu_tx_abort(tx);
1648			break;
1649		}
1650		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1651		if (error == 0)
1652			zvol_log_write(zv, tx, off, bytes, sync);
1653		dmu_tx_commit(tx);
1654
1655		if (error)
1656			break;
1657	}
1658	zfs_range_unlock(rl);
1659	if (sync)
1660		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1661	return (error);
1662}
1663
1664#ifdef sun
1665int
1666zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1667{
1668	struct uuid uuid = EFI_RESERVED;
1669	efi_gpe_t gpe = { 0 };
1670	uint32_t crc;
1671	dk_efi_t efi;
1672	int length;
1673	char *ptr;
1674
1675	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1676		return (SET_ERROR(EFAULT));
1677	ptr = (char *)(uintptr_t)efi.dki_data_64;
1678	length = efi.dki_length;
1679	/*
1680	 * Some clients may attempt to request a PMBR for the
1681	 * zvol.  Currently this interface will return EINVAL to
1682	 * such requests.  These requests could be supported by
1683	 * adding a check for lba == 0 and consing up an appropriate
1684	 * PMBR.
1685	 */
1686	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1687		return (SET_ERROR(EINVAL));
1688
1689	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1690	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1691	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1692
1693	if (efi.dki_lba == 1) {
1694		efi_gpt_t gpt = { 0 };
1695
1696		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1697		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1698		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1699		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1700		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1701		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1702		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1703		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1704		gpt.efi_gpt_SizeOfPartitionEntry =
1705		    LE_32(sizeof (efi_gpe_t));
1706		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1707		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1708		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1709		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1710		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1711		    flag))
1712			return (SET_ERROR(EFAULT));
1713		ptr += sizeof (gpt);
1714		length -= sizeof (gpt);
1715	}
1716	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1717	    length), flag))
1718		return (SET_ERROR(EFAULT));
1719	return (0);
1720}
1721
1722/*
1723 * BEGIN entry points to allow external callers access to the volume.
1724 */
1725/*
1726 * Return the volume parameters needed for access from an external caller.
1727 * These values are invariant as long as the volume is held open.
1728 */
1729int
1730zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1731    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1732    void **rl_hdl, void **bonus_hdl)
1733{
1734	zvol_state_t *zv;
1735
1736	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1737	if (zv == NULL)
1738		return (SET_ERROR(ENXIO));
1739	if (zv->zv_flags & ZVOL_DUMPIFIED)
1740		return (SET_ERROR(ENXIO));
1741
1742	ASSERT(blksize && max_xfer_len && minor_hdl &&
1743	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1744
1745	*blksize = zv->zv_volblocksize;
1746	*max_xfer_len = (uint64_t)zvol_maxphys;
1747	*minor_hdl = zv;
1748	*objset_hdl = zv->zv_objset;
1749	*zil_hdl = zv->zv_zilog;
1750	*rl_hdl = &zv->zv_znode;
1751	*bonus_hdl = zv->zv_dbuf;
1752	return (0);
1753}
1754
1755/*
1756 * Return the current volume size to an external caller.
1757 * The size can change while the volume is open.
1758 */
1759uint64_t
1760zvol_get_volume_size(void *minor_hdl)
1761{
1762	zvol_state_t *zv = minor_hdl;
1763
1764	return (zv->zv_volsize);
1765}
1766
1767/*
1768 * Return the current WCE setting to an external caller.
1769 * The WCE setting can change while the volume is open.
1770 */
1771int
1772zvol_get_volume_wce(void *minor_hdl)
1773{
1774	zvol_state_t *zv = minor_hdl;
1775
1776	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1777}
1778
1779/*
1780 * Entry point for external callers to zvol_log_write
1781 */
1782void
1783zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1784    boolean_t sync)
1785{
1786	zvol_state_t *zv = minor_hdl;
1787
1788	zvol_log_write(zv, tx, off, resid, sync);
1789}
1790/*
1791 * END entry points to allow external callers access to the volume.
1792 */
1793#endif	/* sun */
1794
1795/*
1796 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1797 */
1798static void
1799zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1800    boolean_t sync)
1801{
1802	itx_t *itx;
1803	lr_truncate_t *lr;
1804	zilog_t *zilog = zv->zv_zilog;
1805
1806	if (zil_replaying(zilog, tx))
1807		return;
1808
1809	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1810	lr = (lr_truncate_t *)&itx->itx_lr;
1811	lr->lr_foid = ZVOL_OBJ;
1812	lr->lr_offset = off;
1813	lr->lr_length = len;
1814
1815	itx->itx_sync = sync;
1816	zil_itx_assign(zilog, itx, tx);
1817}
1818
1819#ifdef sun
1820/*
1821 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1822 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1823 */
1824/*ARGSUSED*/
1825int
1826zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1827{
1828	zvol_state_t *zv;
1829	struct dk_callback *dkc;
1830	int error = 0;
1831	rl_t *rl;
1832
1833	mutex_enter(&spa_namespace_lock);
1834
1835	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1836
1837	if (zv == NULL) {
1838		mutex_exit(&spa_namespace_lock);
1839		return (SET_ERROR(ENXIO));
1840	}
1841	ASSERT(zv->zv_total_opens > 0);
1842
1843	switch (cmd) {
1844
1845	case DKIOCINFO:
1846	{
1847		struct dk_cinfo dki;
1848
1849		bzero(&dki, sizeof (dki));
1850		(void) strcpy(dki.dki_cname, "zvol");
1851		(void) strcpy(dki.dki_dname, "zvol");
1852		dki.dki_ctype = DKC_UNKNOWN;
1853		dki.dki_unit = getminor(dev);
1854		dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1855		mutex_exit(&spa_namespace_lock);
1856		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1857			error = SET_ERROR(EFAULT);
1858		return (error);
1859	}
1860
1861	case DKIOCGMEDIAINFO:
1862	{
1863		struct dk_minfo dkm;
1864
1865		bzero(&dkm, sizeof (dkm));
1866		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1867		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1868		dkm.dki_media_type = DK_UNKNOWN;
1869		mutex_exit(&spa_namespace_lock);
1870		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1871			error = SET_ERROR(EFAULT);
1872		return (error);
1873	}
1874
1875	case DKIOCGMEDIAINFOEXT:
1876	{
1877		struct dk_minfo_ext dkmext;
1878
1879		bzero(&dkmext, sizeof (dkmext));
1880		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1881		dkmext.dki_pbsize = zv->zv_volblocksize;
1882		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1883		dkmext.dki_media_type = DK_UNKNOWN;
1884		mutex_exit(&spa_namespace_lock);
1885		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1886			error = SET_ERROR(EFAULT);
1887		return (error);
1888	}
1889
1890	case DKIOCGETEFI:
1891	{
1892		uint64_t vs = zv->zv_volsize;
1893		uint8_t bs = zv->zv_min_bs;
1894
1895		mutex_exit(&spa_namespace_lock);
1896		error = zvol_getefi((void *)arg, flag, vs, bs);
1897		return (error);
1898	}
1899
1900	case DKIOCFLUSHWRITECACHE:
1901		dkc = (struct dk_callback *)arg;
1902		mutex_exit(&spa_namespace_lock);
1903		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1904		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1905			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1906			error = 0;
1907		}
1908		return (error);
1909
1910	case DKIOCGETWCE:
1911	{
1912		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1913		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1914		    flag))
1915			error = SET_ERROR(EFAULT);
1916		break;
1917	}
1918	case DKIOCSETWCE:
1919	{
1920		int wce;
1921		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1922		    flag)) {
1923			error = SET_ERROR(EFAULT);
1924			break;
1925		}
1926		if (wce) {
1927			zv->zv_flags |= ZVOL_WCE;
1928			mutex_exit(&spa_namespace_lock);
1929		} else {
1930			zv->zv_flags &= ~ZVOL_WCE;
1931			mutex_exit(&spa_namespace_lock);
1932			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1933		}
1934		return (0);
1935	}
1936
1937	case DKIOCGGEOM:
1938	case DKIOCGVTOC:
1939		/*
1940		 * commands using these (like prtvtoc) expect ENOTSUP
1941		 * since we're emulating an EFI label
1942		 */
1943		error = SET_ERROR(ENOTSUP);
1944		break;
1945
1946	case DKIOCDUMPINIT:
1947		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1948		    RL_WRITER);
1949		error = zvol_dumpify(zv);
1950		zfs_range_unlock(rl);
1951		break;
1952
1953	case DKIOCDUMPFINI:
1954		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1955			break;
1956		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1957		    RL_WRITER);
1958		error = zvol_dump_fini(zv);
1959		zfs_range_unlock(rl);
1960		break;
1961
1962	case DKIOCFREE:
1963	{
1964		dkioc_free_t df;
1965		dmu_tx_t *tx;
1966
1967		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1968			error = SET_ERROR(EFAULT);
1969			break;
1970		}
1971
1972		/*
1973		 * Apply Postel's Law to length-checking.  If they overshoot,
1974		 * just blank out until the end, if there's a need to blank
1975		 * out anything.
1976		 */
1977		if (df.df_start >= zv->zv_volsize)
1978			break;	/* No need to do anything... */
1979		if (df.df_start + df.df_length > zv->zv_volsize)
1980			df.df_length = DMU_OBJECT_END;
1981
1982		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1983		    RL_WRITER);
1984		tx = dmu_tx_create(zv->zv_objset);
1985		error = dmu_tx_assign(tx, TXG_WAIT);
1986		if (error != 0) {
1987			dmu_tx_abort(tx);
1988		} else {
1989			zvol_log_truncate(zv, tx, df.df_start,
1990			    df.df_length, B_TRUE);
1991			dmu_tx_commit(tx);
1992			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1993			    df.df_start, df.df_length);
1994		}
1995
1996		zfs_range_unlock(rl);
1997
1998		if (error == 0) {
1999			/*
2000			 * If the write-cache is disabled or 'sync' property
2001			 * is set to 'always' then treat this as a synchronous
2002			 * operation (i.e. commit to zil).
2003			 */
2004			if (!(zv->zv_flags & ZVOL_WCE) ||
2005			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2006				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2007
2008			/*
2009			 * If the caller really wants synchronous writes, and
2010			 * can't wait for them, don't return until the write
2011			 * is done.
2012			 */
2013			if (df.df_flags & DF_WAIT_SYNC) {
2014				txg_wait_synced(
2015				    dmu_objset_pool(zv->zv_objset), 0);
2016			}
2017		}
2018		break;
2019	}
2020
2021	default:
2022		error = SET_ERROR(ENOTTY);
2023		break;
2024
2025	}
2026	mutex_exit(&spa_namespace_lock);
2027	return (error);
2028}
2029#endif	/* sun */
2030
2031int
2032zvol_busy(void)
2033{
2034	return (zvol_minors != 0);
2035}
2036
2037void
2038zvol_init(void)
2039{
2040	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2041	    1) == 0);
2042	ZFS_LOG(1, "ZVOL Initialized.");
2043}
2044
2045void
2046zvol_fini(void)
2047{
2048	ddi_soft_state_fini(&zfsdev_state);
2049	ZFS_LOG(1, "ZVOL Deinitialized.");
2050}
2051
2052#ifdef sun
2053/*ARGSUSED*/
2054static int
2055zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2056{
2057	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2058
2059	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2060		return (1);
2061	return (0);
2062}
2063
2064/*ARGSUSED*/
2065static void
2066zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2067{
2068	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2069
2070	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2071}
2072
2073static int
2074zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2075{
2076	dmu_tx_t *tx;
2077	int error;
2078	objset_t *os = zv->zv_objset;
2079	spa_t *spa = dmu_objset_spa(os);
2080	vdev_t *vd = spa->spa_root_vdev;
2081	nvlist_t *nv = NULL;
2082	uint64_t version = spa_version(spa);
2083	enum zio_checksum checksum;
2084
2085	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2086	ASSERT(vd->vdev_ops == &vdev_root_ops);
2087
2088	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2089	    DMU_OBJECT_END);
2090	/* wait for dmu_free_long_range to actually free the blocks */
2091	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2092
2093	/*
2094	 * If the pool on which the dump device is being initialized has more
2095	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2096	 * enabled.  If so, bump that feature's counter to indicate that the
2097	 * feature is active. We also check the vdev type to handle the
2098	 * following case:
2099	 *   # zpool create test raidz disk1 disk2 disk3
2100	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2101	 *   the raidz vdev itself has 3 children.
2102	 */
2103	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2104		if (!spa_feature_is_enabled(spa,
2105		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2106			return (SET_ERROR(ENOTSUP));
2107		(void) dsl_sync_task(spa_name(spa),
2108		    zfs_mvdev_dump_feature_check,
2109		    zfs_mvdev_dump_activate_feature_sync, NULL, 2);
2110	}
2111
2112	tx = dmu_tx_create(os);
2113	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2114	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2115	error = dmu_tx_assign(tx, TXG_WAIT);
2116	if (error) {
2117		dmu_tx_abort(tx);
2118		return (error);
2119	}
2120
2121	/*
2122	 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2123	 * function.  Otherwise, use the old default -- OFF.
2124	 */
2125	checksum = spa_feature_is_active(spa,
2126	    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2127	    ZIO_CHECKSUM_OFF;
2128
2129	/*
2130	 * If we are resizing the dump device then we only need to
2131	 * update the refreservation to match the newly updated
2132	 * zvolsize. Otherwise, we save off the original state of the
2133	 * zvol so that we can restore them if the zvol is ever undumpified.
2134	 */
2135	if (resize) {
2136		error = zap_update(os, ZVOL_ZAP_OBJ,
2137		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2138		    &zv->zv_volsize, tx);
2139	} else {
2140		uint64_t checksum, compress, refresrv, vbs, dedup;
2141
2142		error = dsl_prop_get_integer(zv->zv_name,
2143		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2144		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2145		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
2146		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2147		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
2148		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2149		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
2150		if (version >= SPA_VERSION_DEDUP) {
2151			error = error ? error :
2152			    dsl_prop_get_integer(zv->zv_name,
2153			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2154		}
2155
2156		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2157		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2158		    &compress, tx);
2159		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2160		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
2161		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2162		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2163		    &refresrv, tx);
2164		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2165		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2166		    &vbs, tx);
2167		error = error ? error : dmu_object_set_blocksize(
2168		    os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
2169		if (version >= SPA_VERSION_DEDUP) {
2170			error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2171			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2172			    &dedup, tx);
2173		}
2174		if (error == 0)
2175			zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
2176	}
2177	dmu_tx_commit(tx);
2178
2179	/*
2180	 * We only need update the zvol's property if we are initializing
2181	 * the dump area for the first time.
2182	 */
2183	if (!resize) {
2184		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2185		VERIFY(nvlist_add_uint64(nv,
2186		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2187		VERIFY(nvlist_add_uint64(nv,
2188		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2189		    ZIO_COMPRESS_OFF) == 0);
2190		VERIFY(nvlist_add_uint64(nv,
2191		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2192		    checksum) == 0);
2193		if (version >= SPA_VERSION_DEDUP) {
2194			VERIFY(nvlist_add_uint64(nv,
2195			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2196			    ZIO_CHECKSUM_OFF) == 0);
2197		}
2198
2199		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2200		    nv, NULL);
2201		nvlist_free(nv);
2202
2203		if (error)
2204			return (error);
2205	}
2206
2207	/* Allocate the space for the dump */
2208	error = zvol_prealloc(zv);
2209	return (error);
2210}
2211
2212static int
2213zvol_dumpify(zvol_state_t *zv)
2214{
2215	int error = 0;
2216	uint64_t dumpsize = 0;
2217	dmu_tx_t *tx;
2218	objset_t *os = zv->zv_objset;
2219
2220	if (zv->zv_flags & ZVOL_RDONLY)
2221		return (SET_ERROR(EROFS));
2222
2223	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2224	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2225		boolean_t resize = (dumpsize > 0);
2226
2227		if ((error = zvol_dump_init(zv, resize)) != 0) {
2228			(void) zvol_dump_fini(zv);
2229			return (error);
2230		}
2231	}
2232
2233	/*
2234	 * Build up our lba mapping.
2235	 */
2236	error = zvol_get_lbas(zv);
2237	if (error) {
2238		(void) zvol_dump_fini(zv);
2239		return (error);
2240	}
2241
2242	tx = dmu_tx_create(os);
2243	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2244	error = dmu_tx_assign(tx, TXG_WAIT);
2245	if (error) {
2246		dmu_tx_abort(tx);
2247		(void) zvol_dump_fini(zv);
2248		return (error);
2249	}
2250
2251	zv->zv_flags |= ZVOL_DUMPIFIED;
2252	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2253	    &zv->zv_volsize, tx);
2254	dmu_tx_commit(tx);
2255
2256	if (error) {
2257		(void) zvol_dump_fini(zv);
2258		return (error);
2259	}
2260
2261	txg_wait_synced(dmu_objset_pool(os), 0);
2262	return (0);
2263}
2264
2265static int
2266zvol_dump_fini(zvol_state_t *zv)
2267{
2268	dmu_tx_t *tx;
2269	objset_t *os = zv->zv_objset;
2270	nvlist_t *nv;
2271	int error = 0;
2272	uint64_t checksum, compress, refresrv, vbs, dedup;
2273	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2274
2275	/*
2276	 * Attempt to restore the zvol back to its pre-dumpified state.
2277	 * This is a best-effort attempt as it's possible that not all
2278	 * of these properties were initialized during the dumpify process
2279	 * (i.e. error during zvol_dump_init).
2280	 */
2281
2282	tx = dmu_tx_create(os);
2283	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2284	error = dmu_tx_assign(tx, TXG_WAIT);
2285	if (error) {
2286		dmu_tx_abort(tx);
2287		return (error);
2288	}
2289	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2290	dmu_tx_commit(tx);
2291
2292	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2293	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2294	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2295	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2296	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2297	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2298	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2299	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2300
2301	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2302	(void) nvlist_add_uint64(nv,
2303	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2304	(void) nvlist_add_uint64(nv,
2305	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2306	(void) nvlist_add_uint64(nv,
2307	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2308	if (version >= SPA_VERSION_DEDUP &&
2309	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2310	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2311		(void) nvlist_add_uint64(nv,
2312		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2313	}
2314	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2315	    nv, NULL);
2316	nvlist_free(nv);
2317
2318	zvol_free_extents(zv);
2319	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2320	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2321	/* wait for dmu_free_long_range to actually free the blocks */
2322	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2323	tx = dmu_tx_create(os);
2324	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2325	error = dmu_tx_assign(tx, TXG_WAIT);
2326	if (error) {
2327		dmu_tx_abort(tx);
2328		return (error);
2329	}
2330	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2331		zv->zv_volblocksize = vbs;
2332	dmu_tx_commit(tx);
2333
2334	return (0);
2335}
2336#endif	/* sun */
2337
2338static void
2339zvol_geom_run(zvol_state_t *zv)
2340{
2341	struct g_provider *pp;
2342
2343	pp = zv->zv_provider;
2344	g_error_provider(pp, 0);
2345
2346	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2347	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2348}
2349
2350static void
2351zvol_geom_destroy(zvol_state_t *zv)
2352{
2353	struct g_provider *pp;
2354
2355	g_topology_assert();
2356
2357	mtx_lock(&zv->zv_queue_mtx);
2358	zv->zv_state = 1;
2359	wakeup_one(&zv->zv_queue);
2360	while (zv->zv_state != 2)
2361		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2362	mtx_destroy(&zv->zv_queue_mtx);
2363
2364	pp = zv->zv_provider;
2365	zv->zv_provider = NULL;
2366	pp->private = NULL;
2367	g_wither_geom(pp->geom, ENXIO);
2368}
2369
2370static int
2371zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2372{
2373	int count, error, flags;
2374
2375	g_topology_assert();
2376
2377	/*
2378	 * To make it easier we expect either open or close, but not both
2379	 * at the same time.
2380	 */
2381	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2382	    (acr <= 0 && acw <= 0 && ace <= 0),
2383	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2384	    pp->name, acr, acw, ace));
2385
2386	if (pp->private == NULL) {
2387		if (acr <= 0 && acw <= 0 && ace <= 0)
2388			return (0);
2389		return (pp->error);
2390	}
2391
2392	/*
2393	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2394	 * because GEOM already handles that and handles it a bit differently.
2395	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2396	 * only one exclusive consumer, no matter if it is reader or writer.
2397	 * I like better the way GEOM works so I'll leave it for GEOM to
2398	 * decide what to do.
2399	 */
2400
2401	count = acr + acw + ace;
2402	if (count == 0)
2403		return (0);
2404
2405	flags = 0;
2406	if (acr != 0 || ace != 0)
2407		flags |= FREAD;
2408	if (acw != 0)
2409		flags |= FWRITE;
2410
2411	g_topology_unlock();
2412	if (count > 0)
2413		error = zvol_open(pp, flags, count);
2414	else
2415		error = zvol_close(pp, flags, -count);
2416	g_topology_lock();
2417	return (error);
2418}
2419
2420static void
2421zvol_geom_start(struct bio *bp)
2422{
2423	zvol_state_t *zv;
2424	boolean_t first;
2425
2426	zv = bp->bio_to->private;
2427	ASSERT(zv != NULL);
2428	switch (bp->bio_cmd) {
2429	case BIO_FLUSH:
2430		if (!THREAD_CAN_SLEEP())
2431			goto enqueue;
2432		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2433		g_io_deliver(bp, 0);
2434		break;
2435	case BIO_READ:
2436	case BIO_WRITE:
2437	case BIO_DELETE:
2438		if (!THREAD_CAN_SLEEP())
2439			goto enqueue;
2440		zvol_strategy(bp);
2441		break;
2442	case BIO_GETATTR:
2443		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2444			return;
2445		/* FALLTHROUGH */
2446	default:
2447		g_io_deliver(bp, EOPNOTSUPP);
2448		break;
2449	}
2450	return;
2451
2452enqueue:
2453	mtx_lock(&zv->zv_queue_mtx);
2454	first = (bioq_first(&zv->zv_queue) == NULL);
2455	bioq_insert_tail(&zv->zv_queue, bp);
2456	mtx_unlock(&zv->zv_queue_mtx);
2457	if (first)
2458		wakeup_one(&zv->zv_queue);
2459}
2460
2461static void
2462zvol_geom_worker(void *arg)
2463{
2464	zvol_state_t *zv;
2465	struct bio *bp;
2466
2467	thread_lock(curthread);
2468	sched_prio(curthread, PRIBIO);
2469	thread_unlock(curthread);
2470
2471	zv = arg;
2472	for (;;) {
2473		mtx_lock(&zv->zv_queue_mtx);
2474		bp = bioq_takefirst(&zv->zv_queue);
2475		if (bp == NULL) {
2476			if (zv->zv_state == 1) {
2477				zv->zv_state = 2;
2478				wakeup(&zv->zv_state);
2479				mtx_unlock(&zv->zv_queue_mtx);
2480				kthread_exit();
2481			}
2482			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2483			    "zvol:io", 0);
2484			continue;
2485		}
2486		mtx_unlock(&zv->zv_queue_mtx);
2487		switch (bp->bio_cmd) {
2488		case BIO_FLUSH:
2489			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2490			g_io_deliver(bp, 0);
2491			break;
2492		case BIO_READ:
2493		case BIO_WRITE:
2494			zvol_strategy(bp);
2495			break;
2496		}
2497	}
2498}
2499
2500extern boolean_t dataset_name_hidden(const char *name);
2501
2502static int
2503zvol_create_snapshots(objset_t *os, const char *name)
2504{
2505	uint64_t cookie, obj;
2506	char *sname;
2507	int error, len;
2508
2509	cookie = obj = 0;
2510	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2511
2512#if 0
2513	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2514	    DS_FIND_SNAPSHOTS);
2515#endif
2516
2517	for (;;) {
2518		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2519		if (len >= MAXPATHLEN) {
2520			dmu_objset_rele(os, FTAG);
2521			error = ENAMETOOLONG;
2522			break;
2523		}
2524
2525		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2526		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2527		    sname + len, &obj, &cookie, NULL);
2528		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2529		if (error != 0) {
2530			if (error == ENOENT)
2531				error = 0;
2532			break;
2533		}
2534
2535		if ((error = zvol_create_minor(sname)) != 0) {
2536			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2537			    sname, error);
2538			break;
2539		}
2540	}
2541
2542	kmem_free(sname, MAXPATHLEN);
2543	return (error);
2544}
2545
2546int
2547zvol_create_minors(const char *name)
2548{
2549	uint64_t cookie;
2550	objset_t *os;
2551	char *osname, *p;
2552	int error, len;
2553
2554	if (dataset_name_hidden(name))
2555		return (0);
2556
2557	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2558		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2559		    name, error);
2560		return (error);
2561	}
2562	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2563		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2564		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2565		if ((error = zvol_create_minor(name)) == 0)
2566			error = zvol_create_snapshots(os, name);
2567		else {
2568			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2569			    name, error);
2570		}
2571		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2572		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2573		return (error);
2574	}
2575	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2576		dmu_objset_rele(os, FTAG);
2577		return (0);
2578	}
2579
2580	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2581	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2582		dmu_objset_rele(os, FTAG);
2583		kmem_free(osname, MAXPATHLEN);
2584		return (ENOENT);
2585	}
2586	p = osname + strlen(osname);
2587	len = MAXPATHLEN - (p - osname);
2588
2589#if 0
2590	/* Prefetch the datasets. */
2591	cookie = 0;
2592	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2593		if (!dataset_name_hidden(osname))
2594			(void) dmu_objset_prefetch(osname, NULL);
2595	}
2596#endif
2597
2598	cookie = 0;
2599	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2600	    &cookie) == 0) {
2601		dmu_objset_rele(os, FTAG);
2602		(void)zvol_create_minors(osname);
2603		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2604			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2605			    name, error);
2606			return (error);
2607		}
2608	}
2609
2610	dmu_objset_rele(os, FTAG);
2611	kmem_free(osname, MAXPATHLEN);
2612	return (0);
2613}
2614
2615static void
2616zvol_rename_minor(zvol_state_t *zv, const char *newname)
2617{
2618	struct g_geom *gp;
2619	struct g_provider *pp;
2620	struct cdev *dev;
2621
2622	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2623
2624	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2625		g_topology_lock();
2626		pp = zv->zv_provider;
2627		ASSERT(pp != NULL);
2628		gp = pp->geom;
2629		ASSERT(gp != NULL);
2630
2631		zv->zv_provider = NULL;
2632		g_wither_provider(pp, ENXIO);
2633
2634		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2635		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2636		pp->sectorsize = DEV_BSIZE;
2637		pp->mediasize = zv->zv_volsize;
2638		pp->private = zv;
2639		zv->zv_provider = pp;
2640		g_error_provider(pp, 0);
2641		g_topology_unlock();
2642	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2643		dev = zv->zv_dev;
2644		ASSERT(dev != NULL);
2645		zv->zv_dev = NULL;
2646		destroy_dev(dev);
2647
2648		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
2649		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
2650		    0640, "%s/%s", ZVOL_DRIVER, newname) == 0) {
2651			zv->zv_dev = dev;
2652			dev->si_iosize_max = MAXPHYS;
2653			dev->si_drv2 = zv;
2654		}
2655	}
2656	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2657}
2658
2659void
2660zvol_rename_minors(const char *oldname, const char *newname)
2661{
2662	char name[MAXPATHLEN];
2663	struct g_provider *pp;
2664	struct g_geom *gp;
2665	size_t oldnamelen, newnamelen;
2666	zvol_state_t *zv;
2667	char *namebuf;
2668
2669	oldnamelen = strlen(oldname);
2670	newnamelen = strlen(newname);
2671
2672	DROP_GIANT();
2673	mutex_enter(&spa_namespace_lock);
2674
2675	LIST_FOREACH(zv, &all_zvols, zv_links) {
2676		if (strcmp(zv->zv_name, oldname) == 0) {
2677			zvol_rename_minor(zv, newname);
2678		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2679		    (zv->zv_name[oldnamelen] == '/' ||
2680		     zv->zv_name[oldnamelen] == '@')) {
2681			snprintf(name, sizeof(name), "%s%c%s", newname,
2682			    zv->zv_name[oldnamelen],
2683			    zv->zv_name + oldnamelen + 1);
2684			zvol_rename_minor(zv, name);
2685		}
2686	}
2687
2688	mutex_exit(&spa_namespace_lock);
2689	PICKUP_GIANT();
2690}
2691
2692static int
2693zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2694{
2695	zvol_state_t *zv;
2696	int err = 0;
2697
2698	mutex_enter(&spa_namespace_lock);
2699	zv = dev->si_drv2;
2700	if (zv == NULL) {
2701		mutex_exit(&spa_namespace_lock);
2702		return(ENXIO);		/* zvol_create_minor() not done yet */
2703	}
2704
2705	if (zv->zv_total_opens == 0)
2706		err = zvol_first_open(zv);
2707	if (err) {
2708		mutex_exit(&spa_namespace_lock);
2709		return (err);
2710	}
2711	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
2712		err = SET_ERROR(EROFS);
2713		goto out;
2714	}
2715	if (zv->zv_flags & ZVOL_EXCL) {
2716		err = SET_ERROR(EBUSY);
2717		goto out;
2718	}
2719#ifdef FEXCL
2720	if (flags & FEXCL) {
2721		if (zv->zv_total_opens != 0) {
2722			err = SET_ERROR(EBUSY);
2723			goto out;
2724		}
2725		zv->zv_flags |= ZVOL_EXCL;
2726	}
2727#endif
2728
2729	zv->zv_total_opens++;
2730	mutex_exit(&spa_namespace_lock);
2731	return (err);
2732out:
2733	if (zv->zv_total_opens == 0)
2734		zvol_last_close(zv);
2735	mutex_exit(&spa_namespace_lock);
2736	return (err);
2737}
2738
2739static int
2740zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2741{
2742	zvol_state_t *zv;
2743	int err = 0;
2744
2745	mutex_enter(&spa_namespace_lock);
2746	zv = dev->si_drv2;
2747	if (zv == NULL) {
2748		mutex_exit(&spa_namespace_lock);
2749		return(ENXIO);
2750	}
2751
2752	if (zv->zv_flags & ZVOL_EXCL) {
2753		ASSERT(zv->zv_total_opens == 1);
2754		zv->zv_flags &= ~ZVOL_EXCL;
2755	}
2756
2757	/*
2758	 * If the open count is zero, this is a spurious close.
2759	 * That indicates a bug in the kernel / DDI framework.
2760	 */
2761	ASSERT(zv->zv_total_opens != 0);
2762
2763	/*
2764	 * You may get multiple opens, but only one close.
2765	 */
2766	zv->zv_total_opens--;
2767
2768	if (zv->zv_total_opens == 0)
2769		zvol_last_close(zv);
2770
2771	mutex_exit(&spa_namespace_lock);
2772	return (0);
2773}
2774
2775static int
2776zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
2777{
2778	zvol_state_t *zv;
2779	rl_t *rl;
2780	off_t offset, length, chunk;
2781	int i, error;
2782	u_int u;
2783
2784	zv = dev->si_drv2;
2785
2786	error = 0;
2787	KASSERT(zv->zv_total_opens > 0,
2788	    ("Device with zero access count in zvol_d_ioctl"));
2789
2790	i = IOCPARM_LEN(cmd);
2791	switch (cmd) {
2792	case DIOCGSECTORSIZE:
2793		*(u_int *)data = DEV_BSIZE;
2794		break;
2795	case DIOCGMEDIASIZE:
2796		*(off_t *)data = zv->zv_volsize;
2797		break;
2798	case DIOCGFLUSH:
2799		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2800		break;
2801	case DIOCGDELETE:
2802		offset = ((off_t *)data)[0];
2803		length = ((off_t *)data)[1];
2804		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
2805		    offset < 0 || offset >= zv->zv_volsize ||
2806		    length <= 0) {
2807			printf("%s: offset=%jd length=%jd\n", __func__, offset,
2808			    length);
2809			error = EINVAL;
2810			break;
2811		}
2812
2813		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
2814		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
2815		error = dmu_tx_assign(tx, TXG_WAIT);
2816		if (error != 0) {
2817			dmu_tx_abort(tx);
2818		} else {
2819			zvol_log_truncate(zv, tx, offset, length, B_TRUE);
2820			dmu_tx_commit(tx);
2821			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2822			    offset, length);
2823		}
2824		zfs_range_unlock(rl);
2825		if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
2826			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2827		break;
2828	case DIOCGSTRIPESIZE:
2829		*(off_t *)data = zv->zv_volblocksize;
2830		break;
2831	case DIOCGSTRIPEOFFSET:
2832		*(off_t *)data = 0;
2833		break;
2834	default:
2835		error = ENOIOCTL;
2836	}
2837
2838	return (error);
2839}
2840