zvol.c revision 268274
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 *
29 * Portions Copyright 2010 Robert Milkowski
30 *
31 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
32 */
33
34/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
35
36/*
37 * ZFS volume emulation driver.
38 *
39 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
40 * Volumes are accessed through the symbolic links named:
41 *
42 * /dev/zvol/dsk/<pool_name>/<dataset_name>
43 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
44 *
45 * These links are created by the /dev filesystem (sdev_zvolops.c).
46 * Volumes are persistent through reboot.  No user command needs to be
47 * run before opening and using a device.
48 *
49 * FreeBSD notes.
50 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
51 * in the system.
52 */
53
54#include <sys/types.h>
55#include <sys/param.h>
56#include <sys/kernel.h>
57#include <sys/errno.h>
58#include <sys/uio.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kmem.h>
62#include <sys/conf.h>
63#include <sys/cmn_err.h>
64#include <sys/stat.h>
65#include <sys/zap.h>
66#include <sys/spa.h>
67#include <sys/spa_impl.h>
68#include <sys/zio.h>
69#include <sys/disk.h>
70#include <sys/dmu_traverse.h>
71#include <sys/dnode.h>
72#include <sys/dsl_dataset.h>
73#include <sys/dsl_prop.h>
74#include <sys/dkio.h>
75#include <sys/byteorder.h>
76#include <sys/sunddi.h>
77#include <sys/dirent.h>
78#include <sys/policy.h>
79#include <sys/queue.h>
80#include <sys/fs/zfs.h>
81#include <sys/zfs_ioctl.h>
82#include <sys/zil.h>
83#include <sys/refcount.h>
84#include <sys/zfs_znode.h>
85#include <sys/zfs_rlock.h>
86#include <sys/vdev_impl.h>
87#include <sys/vdev_raidz.h>
88#include <sys/zvol.h>
89#include <sys/zil_impl.h>
90#include <sys/dbuf.h>
91#include <sys/dmu_tx.h>
92#include <sys/zfeature.h>
93#include <sys/zio_checksum.h>
94
95#include <geom/geom.h>
96
97#include "zfs_namecheck.h"
98
99struct g_class zfs_zvol_class = {
100	.name = "ZFS::ZVOL",
101	.version = G_VERSION,
102};
103
104DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
105
106void *zfsdev_state;
107static char *zvol_tag = "zvol_tag";
108
109#define	ZVOL_DUMPSIZE		"dumpsize"
110
111/*
112 * The spa_namespace_lock protects the zfsdev_state structure from being
113 * modified while it's being used, e.g. an open that comes in before a
114 * create finishes.  It also protects temporary opens of the dataset so that,
115 * e.g., an open doesn't get a spurious EBUSY.
116 */
117static uint32_t zvol_minors;
118
119SYSCTL_DECL(_vfs_zfs);
120SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
121static int	volmode = ZFS_VOLMODE_GEOM;
122TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
123SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
124    "Expose as GEOM providers (1), device files (2) or neither");
125
126typedef struct zvol_extent {
127	list_node_t	ze_node;
128	dva_t		ze_dva;		/* dva associated with this extent */
129	uint64_t	ze_nblks;	/* number of blocks in extent */
130} zvol_extent_t;
131
132/*
133 * The in-core state of each volume.
134 */
135typedef struct zvol_state {
136	LIST_ENTRY(zvol_state)	zv_links;
137	char		zv_name[MAXPATHLEN]; /* pool/dd name */
138	uint64_t	zv_volsize;	/* amount of space we advertise */
139	uint64_t	zv_volblocksize; /* volume block size */
140	struct cdev	*zv_dev;	/* non-GEOM device */
141	struct g_provider *zv_provider;	/* GEOM provider */
142	uint8_t		zv_min_bs;	/* minimum addressable block shift */
143	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
144	objset_t	*zv_objset;	/* objset handle */
145	uint32_t	zv_total_opens;	/* total open count */
146	zilog_t		*zv_zilog;	/* ZIL handle */
147	list_t		zv_extents;	/* List of extents for dump */
148	znode_t		zv_znode;	/* for range locking */
149	dmu_buf_t	*zv_dbuf;	/* bonus handle */
150	int		zv_state;
151	int		zv_volmode;	/* Provide GEOM or cdev */
152	struct bio_queue_head zv_queue;
153	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
154} zvol_state_t;
155
156static LIST_HEAD(, zvol_state) all_zvols;
157
158/*
159 * zvol specific flags
160 */
161#define	ZVOL_RDONLY	0x1
162#define	ZVOL_DUMPIFIED	0x2
163#define	ZVOL_EXCL	0x4
164#define	ZVOL_WCE	0x8
165
166/*
167 * zvol maximum transfer in one DMU tx.
168 */
169int zvol_maxphys = DMU_MAX_ACCESS/2;
170
171static d_open_t		zvol_d_open;
172static d_close_t	zvol_d_close;
173static d_read_t		zvol_read;
174static d_write_t	zvol_write;
175static d_ioctl_t	zvol_d_ioctl;
176static d_strategy_t	zvol_strategy;
177
178static struct cdevsw zvol_cdevsw = {
179	.d_version =	D_VERSION,
180	.d_open =	zvol_d_open,
181	.d_close =	zvol_d_close,
182	.d_read =	zvol_read,
183	.d_write =	zvol_write,
184	.d_ioctl =	zvol_d_ioctl,
185	.d_strategy =	zvol_strategy,
186	.d_name =	"zvol",
187	.d_flags =	D_DISK | D_TRACKCLOSE,
188};
189
190extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
191    nvlist_t *, nvlist_t *);
192static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
193    uint64_t len, boolean_t sync);
194static int zvol_remove_zv(zvol_state_t *);
195static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
196static int zvol_dumpify(zvol_state_t *zv);
197static int zvol_dump_fini(zvol_state_t *zv);
198static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
199
200static void zvol_geom_run(zvol_state_t *zv);
201static void zvol_geom_destroy(zvol_state_t *zv);
202static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
203static void zvol_geom_start(struct bio *bp);
204static void zvol_geom_worker(void *arg);
205
206static void
207zvol_size_changed(zvol_state_t *zv)
208{
209#ifdef sun
210	dev_t dev = makedevice(maj, min);
211
212	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
213	    "Size", volsize) == DDI_SUCCESS);
214	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
215	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
216
217	/* Notify specfs to invalidate the cached size */
218	spec_size_invalidate(dev, VBLK);
219	spec_size_invalidate(dev, VCHR);
220#else	/* !sun */
221	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
222		struct g_provider *pp;
223
224		pp = zv->zv_provider;
225		if (pp == NULL)
226			return;
227		g_topology_lock();
228		g_resize_provider(pp, zv->zv_volsize);
229		g_topology_unlock();
230	}
231#endif	/* !sun */
232}
233
234int
235zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
236{
237	if (volsize == 0)
238		return (SET_ERROR(EINVAL));
239
240	if (volsize % blocksize != 0)
241		return (SET_ERROR(EINVAL));
242
243#ifdef _ILP32
244	if (volsize - 1 > SPEC_MAXOFFSET_T)
245		return (SET_ERROR(EOVERFLOW));
246#endif
247	return (0);
248}
249
250int
251zvol_check_volblocksize(uint64_t volblocksize)
252{
253	if (volblocksize < SPA_MINBLOCKSIZE ||
254	    volblocksize > SPA_MAXBLOCKSIZE ||
255	    !ISP2(volblocksize))
256		return (SET_ERROR(EDOM));
257
258	return (0);
259}
260
261int
262zvol_get_stats(objset_t *os, nvlist_t *nv)
263{
264	int error;
265	dmu_object_info_t doi;
266	uint64_t val;
267
268	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
269	if (error)
270		return (error);
271
272	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
273
274	error = dmu_object_info(os, ZVOL_OBJ, &doi);
275
276	if (error == 0) {
277		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
278		    doi.doi_data_block_size);
279	}
280
281	return (error);
282}
283
284static zvol_state_t *
285zvol_minor_lookup(const char *name)
286{
287	zvol_state_t *zv;
288
289	ASSERT(MUTEX_HELD(&spa_namespace_lock));
290
291	LIST_FOREACH(zv, &all_zvols, zv_links) {
292		if (strcmp(zv->zv_name, name) == 0)
293			break;
294	}
295
296	return (zv);
297}
298
299/* extent mapping arg */
300struct maparg {
301	zvol_state_t	*ma_zv;
302	uint64_t	ma_blks;
303};
304
305/*ARGSUSED*/
306static int
307zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
308    const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
309{
310	struct maparg *ma = arg;
311	zvol_extent_t *ze;
312	int bs = ma->ma_zv->zv_volblocksize;
313
314	if (BP_IS_HOLE(bp) ||
315	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
316		return (0);
317
318	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
319	ma->ma_blks++;
320
321	/* Abort immediately if we have encountered gang blocks */
322	if (BP_IS_GANG(bp))
323		return (SET_ERROR(EFRAGS));
324
325	/*
326	 * See if the block is at the end of the previous extent.
327	 */
328	ze = list_tail(&ma->ma_zv->zv_extents);
329	if (ze &&
330	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
331	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
332	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
333		ze->ze_nblks++;
334		return (0);
335	}
336
337	dprintf_bp(bp, "%s", "next blkptr:");
338
339	/* start a new extent */
340	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
341	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
342	ze->ze_nblks = 1;
343	list_insert_tail(&ma->ma_zv->zv_extents, ze);
344	return (0);
345}
346
347static void
348zvol_free_extents(zvol_state_t *zv)
349{
350	zvol_extent_t *ze;
351
352	while (ze = list_head(&zv->zv_extents)) {
353		list_remove(&zv->zv_extents, ze);
354		kmem_free(ze, sizeof (zvol_extent_t));
355	}
356}
357
358static int
359zvol_get_lbas(zvol_state_t *zv)
360{
361	objset_t *os = zv->zv_objset;
362	struct maparg	ma;
363	int		err;
364
365	ma.ma_zv = zv;
366	ma.ma_blks = 0;
367	zvol_free_extents(zv);
368
369	/* commit any in-flight changes before traversing the dataset */
370	txg_wait_synced(dmu_objset_pool(os), 0);
371	err = traverse_dataset(dmu_objset_ds(os), 0,
372	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
373	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
374		zvol_free_extents(zv);
375		return (err ? err : EIO);
376	}
377
378	return (0);
379}
380
381/* ARGSUSED */
382void
383zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
384{
385	zfs_creat_t *zct = arg;
386	nvlist_t *nvprops = zct->zct_props;
387	int error;
388	uint64_t volblocksize, volsize;
389
390	VERIFY(nvlist_lookup_uint64(nvprops,
391	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
392	if (nvlist_lookup_uint64(nvprops,
393	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
394		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
395
396	/*
397	 * These properties must be removed from the list so the generic
398	 * property setting step won't apply to them.
399	 */
400	VERIFY(nvlist_remove_all(nvprops,
401	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
402	(void) nvlist_remove_all(nvprops,
403	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
404
405	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
406	    DMU_OT_NONE, 0, tx);
407	ASSERT(error == 0);
408
409	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
410	    DMU_OT_NONE, 0, tx);
411	ASSERT(error == 0);
412
413	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
414	ASSERT(error == 0);
415}
416
417/*
418 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
419 * implement DKIOCFREE/free-long-range.
420 */
421static int
422zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
423{
424	uint64_t offset, length;
425
426	if (byteswap)
427		byteswap_uint64_array(lr, sizeof (*lr));
428
429	offset = lr->lr_offset;
430	length = lr->lr_length;
431
432	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
433}
434
435/*
436 * Replay a TX_WRITE ZIL transaction that didn't get committed
437 * after a system failure
438 */
439static int
440zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
441{
442	objset_t *os = zv->zv_objset;
443	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
444	uint64_t offset, length;
445	dmu_tx_t *tx;
446	int error;
447
448	if (byteswap)
449		byteswap_uint64_array(lr, sizeof (*lr));
450
451	offset = lr->lr_offset;
452	length = lr->lr_length;
453
454	/* If it's a dmu_sync() block, write the whole block */
455	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
456		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
457		if (length < blocksize) {
458			offset -= offset % blocksize;
459			length = blocksize;
460		}
461	}
462
463	tx = dmu_tx_create(os);
464	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
465	error = dmu_tx_assign(tx, TXG_WAIT);
466	if (error) {
467		dmu_tx_abort(tx);
468	} else {
469		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
470		dmu_tx_commit(tx);
471	}
472
473	return (error);
474}
475
476/* ARGSUSED */
477static int
478zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
479{
480	return (SET_ERROR(ENOTSUP));
481}
482
483/*
484 * Callback vectors for replaying records.
485 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
486 */
487zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
488	zvol_replay_err,	/* 0 no such transaction type */
489	zvol_replay_err,	/* TX_CREATE */
490	zvol_replay_err,	/* TX_MKDIR */
491	zvol_replay_err,	/* TX_MKXATTR */
492	zvol_replay_err,	/* TX_SYMLINK */
493	zvol_replay_err,	/* TX_REMOVE */
494	zvol_replay_err,	/* TX_RMDIR */
495	zvol_replay_err,	/* TX_LINK */
496	zvol_replay_err,	/* TX_RENAME */
497	zvol_replay_write,	/* TX_WRITE */
498	zvol_replay_truncate,	/* TX_TRUNCATE */
499	zvol_replay_err,	/* TX_SETATTR */
500	zvol_replay_err,	/* TX_ACL */
501	zvol_replay_err,	/* TX_CREATE_ACL */
502	zvol_replay_err,	/* TX_CREATE_ATTR */
503	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
504	zvol_replay_err,	/* TX_MKDIR_ACL */
505	zvol_replay_err,	/* TX_MKDIR_ATTR */
506	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
507	zvol_replay_err,	/* TX_WRITE2 */
508};
509
510#ifdef sun
511int
512zvol_name2minor(const char *name, minor_t *minor)
513{
514	zvol_state_t *zv;
515
516	mutex_enter(&spa_namespace_lock);
517	zv = zvol_minor_lookup(name);
518	if (minor && zv)
519		*minor = zv->zv_minor;
520	mutex_exit(&spa_namespace_lock);
521	return (zv ? 0 : -1);
522}
523#endif	/* sun */
524
525/*
526 * Create a minor node (plus a whole lot more) for the specified volume.
527 */
528int
529zvol_create_minor(const char *name)
530{
531	zfs_soft_state_t *zs;
532	zvol_state_t *zv;
533	objset_t *os;
534	struct cdev *dev;
535	struct g_provider *pp;
536	struct g_geom *gp;
537	dmu_object_info_t doi;
538	uint64_t volsize, mode;
539	int error;
540
541	ZFS_LOG(1, "Creating ZVOL %s...", name);
542
543	mutex_enter(&spa_namespace_lock);
544
545	if (zvol_minor_lookup(name) != NULL) {
546		mutex_exit(&spa_namespace_lock);
547		return (SET_ERROR(EEXIST));
548	}
549
550	/* lie and say we're read-only */
551	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
552
553	if (error) {
554		mutex_exit(&spa_namespace_lock);
555		return (error);
556	}
557
558#ifdef sun
559	if ((minor = zfsdev_minor_alloc()) == 0) {
560		dmu_objset_disown(os, FTAG);
561		mutex_exit(&spa_namespace_lock);
562		return (SET_ERROR(ENXIO));
563	}
564
565	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
566		dmu_objset_disown(os, FTAG);
567		mutex_exit(&spa_namespace_lock);
568		return (SET_ERROR(EAGAIN));
569	}
570	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
571	    (char *)name);
572
573	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
574
575	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
576	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
577		ddi_soft_state_free(zfsdev_state, minor);
578		dmu_objset_disown(os, FTAG);
579		mutex_exit(&spa_namespace_lock);
580		return (SET_ERROR(EAGAIN));
581	}
582
583	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
584
585	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
586	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
587		ddi_remove_minor_node(zfs_dip, chrbuf);
588		ddi_soft_state_free(zfsdev_state, minor);
589		dmu_objset_disown(os, FTAG);
590		mutex_exit(&spa_namespace_lock);
591		return (SET_ERROR(EAGAIN));
592	}
593
594	zs = ddi_get_soft_state(zfsdev_state, minor);
595	zs->zss_type = ZSST_ZVOL;
596	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
597#else	/* !sun */
598
599	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
600	zv->zv_state = 0;
601	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
602	if (error) {
603		kmem_free(zv, sizeof(*zv));
604		dmu_objset_disown(os, zvol_tag);
605		mutex_exit(&spa_namespace_lock);
606		return (error);
607	}
608	error = dsl_prop_get_integer(name,
609	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
610	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
611		mode = volmode;
612
613	DROP_GIANT();
614	zv->zv_volsize = volsize;
615	zv->zv_volmode = mode;
616	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
617		g_topology_lock();
618		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
619		gp->start = zvol_geom_start;
620		gp->access = zvol_geom_access;
621		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
622		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
623		pp->sectorsize = DEV_BSIZE;
624		pp->mediasize = zv->zv_volsize;
625		pp->private = zv;
626
627		zv->zv_provider = pp;
628		bioq_init(&zv->zv_queue);
629		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
630	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
631		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
632		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
633		    0640, "%s/%s", ZVOL_DRIVER, name) != 0) {
634			kmem_free(zv, sizeof(*zv));
635			dmu_objset_disown(os, FTAG);
636			mutex_exit(&spa_namespace_lock);
637			return (SET_ERROR(ENXIO));
638		}
639		zv->zv_dev = dev;
640		dev->si_iosize_max = MAXPHYS;
641		dev->si_drv2 = zv;
642	}
643	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
644#endif	/* !sun */
645
646	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
647	zv->zv_min_bs = DEV_BSHIFT;
648	zv->zv_objset = os;
649	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
650		zv->zv_flags |= ZVOL_RDONLY;
651	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
652	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
653	    sizeof (rl_t), offsetof(rl_t, r_node));
654	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
655	    offsetof(zvol_extent_t, ze_node));
656	/* get and cache the blocksize */
657	error = dmu_object_info(os, ZVOL_OBJ, &doi);
658	ASSERT(error == 0);
659	zv->zv_volblocksize = doi.doi_data_block_size;
660
661	if (spa_writeable(dmu_objset_spa(os))) {
662		if (zil_replay_disable)
663			zil_destroy(dmu_objset_zil(os), B_FALSE);
664		else
665			zil_replay(os, zv, zvol_replay_vector);
666	}
667	dmu_objset_disown(os, FTAG);
668	zv->zv_objset = NULL;
669
670	zvol_minors++;
671
672	mutex_exit(&spa_namespace_lock);
673
674#ifndef sun
675	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
676		zvol_geom_run(zv);
677		g_topology_unlock();
678	}
679	PICKUP_GIANT();
680#endif
681
682	ZFS_LOG(1, "ZVOL %s created.", name);
683
684	return (0);
685}
686
687/*
688 * Remove minor node for the specified volume.
689 */
690static int
691zvol_remove_zv(zvol_state_t *zv)
692{
693#ifdef sun
694	minor_t minor = zv->zv_minor;
695#endif
696
697	ASSERT(MUTEX_HELD(&spa_namespace_lock));
698	if (zv->zv_total_opens != 0)
699		return (SET_ERROR(EBUSY));
700
701	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
702
703#ifdef sun
704	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
705	ddi_remove_minor_node(zfs_dip, nmbuf);
706#else
707	LIST_REMOVE(zv, zv_links);
708	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
709		g_topology_lock();
710		zvol_geom_destroy(zv);
711		g_topology_unlock();
712	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV)
713		destroy_dev(zv->zv_dev);
714#endif	/* sun */
715
716	avl_destroy(&zv->zv_znode.z_range_avl);
717	mutex_destroy(&zv->zv_znode.z_range_lock);
718
719	kmem_free(zv, sizeof(*zv));
720
721	zvol_minors--;
722	return (0);
723}
724
725int
726zvol_remove_minor(const char *name)
727{
728	zvol_state_t *zv;
729	int rc;
730
731	mutex_enter(&spa_namespace_lock);
732	if ((zv = zvol_minor_lookup(name)) == NULL) {
733		mutex_exit(&spa_namespace_lock);
734		return (SET_ERROR(ENXIO));
735	}
736	rc = zvol_remove_zv(zv);
737	mutex_exit(&spa_namespace_lock);
738	return (rc);
739}
740
741int
742zvol_first_open(zvol_state_t *zv)
743{
744	objset_t *os;
745	uint64_t volsize;
746	int error;
747	uint64_t readonly;
748
749	/* lie and say we're read-only */
750	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
751	    zvol_tag, &os);
752	if (error)
753		return (error);
754
755	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
756	if (error) {
757		ASSERT(error == 0);
758		dmu_objset_disown(os, zvol_tag);
759		return (error);
760	}
761	zv->zv_objset = os;
762	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
763	if (error) {
764		dmu_objset_disown(os, zvol_tag);
765		return (error);
766	}
767	zv->zv_volsize = volsize;
768	zv->zv_zilog = zil_open(os, zvol_get_data);
769	zvol_size_changed(zv);
770
771	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
772	    NULL) == 0);
773	if (readonly || dmu_objset_is_snapshot(os) ||
774	    !spa_writeable(dmu_objset_spa(os)))
775		zv->zv_flags |= ZVOL_RDONLY;
776	else
777		zv->zv_flags &= ~ZVOL_RDONLY;
778	return (error);
779}
780
781void
782zvol_last_close(zvol_state_t *zv)
783{
784	zil_close(zv->zv_zilog);
785	zv->zv_zilog = NULL;
786
787	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
788	zv->zv_dbuf = NULL;
789
790	/*
791	 * Evict cached data
792	 */
793	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
794	    !(zv->zv_flags & ZVOL_RDONLY))
795		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
796	dmu_objset_evict_dbufs(zv->zv_objset);
797
798	dmu_objset_disown(zv->zv_objset, zvol_tag);
799	zv->zv_objset = NULL;
800}
801
802#ifdef sun
803int
804zvol_prealloc(zvol_state_t *zv)
805{
806	objset_t *os = zv->zv_objset;
807	dmu_tx_t *tx;
808	uint64_t refd, avail, usedobjs, availobjs;
809	uint64_t resid = zv->zv_volsize;
810	uint64_t off = 0;
811
812	/* Check the space usage before attempting to allocate the space */
813	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
814	if (avail < zv->zv_volsize)
815		return (SET_ERROR(ENOSPC));
816
817	/* Free old extents if they exist */
818	zvol_free_extents(zv);
819
820	while (resid != 0) {
821		int error;
822		uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
823
824		tx = dmu_tx_create(os);
825		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
826		error = dmu_tx_assign(tx, TXG_WAIT);
827		if (error) {
828			dmu_tx_abort(tx);
829			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
830			return (error);
831		}
832		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
833		dmu_tx_commit(tx);
834		off += bytes;
835		resid -= bytes;
836	}
837	txg_wait_synced(dmu_objset_pool(os), 0);
838
839	return (0);
840}
841#endif	/* sun */
842
843static int
844zvol_update_volsize(objset_t *os, uint64_t volsize)
845{
846	dmu_tx_t *tx;
847	int error;
848
849	ASSERT(MUTEX_HELD(&spa_namespace_lock));
850
851	tx = dmu_tx_create(os);
852	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
853	error = dmu_tx_assign(tx, TXG_WAIT);
854	if (error) {
855		dmu_tx_abort(tx);
856		return (error);
857	}
858
859	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
860	    &volsize, tx);
861	dmu_tx_commit(tx);
862
863	if (error == 0)
864		error = dmu_free_long_range(os,
865		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
866	return (error);
867}
868
869void
870zvol_remove_minors(const char *name)
871{
872	zvol_state_t *zv, *tzv;
873	size_t namelen;
874
875	namelen = strlen(name);
876
877	DROP_GIANT();
878	mutex_enter(&spa_namespace_lock);
879
880	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
881		if (strcmp(zv->zv_name, name) == 0 ||
882		    (strncmp(zv->zv_name, name, namelen) == 0 &&
883		     zv->zv_name[namelen] == '/')) {
884			(void) zvol_remove_zv(zv);
885		}
886	}
887
888	mutex_exit(&spa_namespace_lock);
889	PICKUP_GIANT();
890}
891
892int
893zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
894{
895	zvol_state_t *zv = NULL;
896	objset_t *os;
897	int error;
898	dmu_object_info_t doi;
899	uint64_t old_volsize = 0ULL;
900	uint64_t readonly;
901
902	mutex_enter(&spa_namespace_lock);
903	zv = zvol_minor_lookup(name);
904	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
905		mutex_exit(&spa_namespace_lock);
906		return (error);
907	}
908
909	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
910	    (error = zvol_check_volsize(volsize,
911	    doi.doi_data_block_size)) != 0)
912		goto out;
913
914	VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
915	    NULL) == 0);
916	if (readonly) {
917		error = EROFS;
918		goto out;
919	}
920
921	error = zvol_update_volsize(os, volsize);
922	/*
923	 * Reinitialize the dump area to the new size. If we
924	 * failed to resize the dump area then restore it back to
925	 * its original size.
926	 */
927	if (zv && error == 0) {
928#ifdef ZVOL_DUMP
929		if (zv->zv_flags & ZVOL_DUMPIFIED) {
930			old_volsize = zv->zv_volsize;
931			zv->zv_volsize = volsize;
932			if ((error = zvol_dumpify(zv)) != 0 ||
933			    (error = dumpvp_resize()) != 0) {
934				(void) zvol_update_volsize(os, old_volsize);
935				zv->zv_volsize = old_volsize;
936				error = zvol_dumpify(zv);
937			}
938		}
939#endif	/* ZVOL_DUMP */
940		if (error == 0) {
941			zv->zv_volsize = volsize;
942			zvol_size_changed(zv);
943		}
944	}
945
946#ifdef sun
947	/*
948	 * Generate a LUN expansion event.
949	 */
950	if (zv && error == 0) {
951		sysevent_id_t eid;
952		nvlist_t *attr;
953		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
954
955		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
956		    zv->zv_minor);
957
958		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
959		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
960
961		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
962		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
963
964		nvlist_free(attr);
965		kmem_free(physpath, MAXPATHLEN);
966	}
967#endif	/* sun */
968
969out:
970	dmu_objset_rele(os, FTAG);
971
972	mutex_exit(&spa_namespace_lock);
973
974	return (error);
975}
976
977/*ARGSUSED*/
978static int
979zvol_open(struct g_provider *pp, int flag, int count)
980{
981	zvol_state_t *zv;
982	int err = 0;
983	boolean_t locked = B_FALSE;
984
985	/*
986	 * Protect against recursively entering spa_namespace_lock
987	 * when spa_open() is used for a pool on a (local) ZVOL(s).
988	 * This is needed since we replaced upstream zfsdev_state_lock
989	 * with spa_namespace_lock in the ZVOL code.
990	 * We are using the same trick as spa_open().
991	 * Note that calls in zvol_first_open which need to resolve
992	 * pool name to a spa object will enter spa_open()
993	 * recursively, but that function already has all the
994	 * necessary protection.
995	 */
996	if (!MUTEX_HELD(&spa_namespace_lock)) {
997		mutex_enter(&spa_namespace_lock);
998		locked = B_TRUE;
999	}
1000
1001	zv = pp->private;
1002	if (zv == NULL) {
1003		if (locked)
1004			mutex_exit(&spa_namespace_lock);
1005		return (SET_ERROR(ENXIO));
1006	}
1007
1008	if (zv->zv_total_opens == 0) {
1009		err = zvol_first_open(zv);
1010		if (err) {
1011			if (locked)
1012				mutex_exit(&spa_namespace_lock);
1013			return (err);
1014		}
1015		pp->mediasize = zv->zv_volsize;
1016		pp->stripeoffset = 0;
1017		pp->stripesize = zv->zv_volblocksize;
1018	}
1019	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1020		err = SET_ERROR(EROFS);
1021		goto out;
1022	}
1023	if (zv->zv_flags & ZVOL_EXCL) {
1024		err = SET_ERROR(EBUSY);
1025		goto out;
1026	}
1027#ifdef FEXCL
1028	if (flag & FEXCL) {
1029		if (zv->zv_total_opens != 0) {
1030			err = SET_ERROR(EBUSY);
1031			goto out;
1032		}
1033		zv->zv_flags |= ZVOL_EXCL;
1034	}
1035#endif
1036
1037	zv->zv_total_opens += count;
1038	if (locked)
1039		mutex_exit(&spa_namespace_lock);
1040
1041	return (err);
1042out:
1043	if (zv->zv_total_opens == 0)
1044		zvol_last_close(zv);
1045	if (locked)
1046		mutex_exit(&spa_namespace_lock);
1047	return (err);
1048}
1049
1050/*ARGSUSED*/
1051static int
1052zvol_close(struct g_provider *pp, int flag, int count)
1053{
1054	zvol_state_t *zv;
1055	int error = 0;
1056	boolean_t locked = B_FALSE;
1057
1058	/* See comment in zvol_open(). */
1059	if (!MUTEX_HELD(&spa_namespace_lock)) {
1060		mutex_enter(&spa_namespace_lock);
1061		locked = B_TRUE;
1062	}
1063
1064	zv = pp->private;
1065	if (zv == NULL) {
1066		if (locked)
1067			mutex_exit(&spa_namespace_lock);
1068		return (SET_ERROR(ENXIO));
1069	}
1070
1071	if (zv->zv_flags & ZVOL_EXCL) {
1072		ASSERT(zv->zv_total_opens == 1);
1073		zv->zv_flags &= ~ZVOL_EXCL;
1074	}
1075
1076	/*
1077	 * If the open count is zero, this is a spurious close.
1078	 * That indicates a bug in the kernel / DDI framework.
1079	 */
1080	ASSERT(zv->zv_total_opens != 0);
1081
1082	/*
1083	 * You may get multiple opens, but only one close.
1084	 */
1085	zv->zv_total_opens -= count;
1086
1087	if (zv->zv_total_opens == 0)
1088		zvol_last_close(zv);
1089
1090	if (locked)
1091		mutex_exit(&spa_namespace_lock);
1092	return (error);
1093}
1094
1095static void
1096zvol_get_done(zgd_t *zgd, int error)
1097{
1098	if (zgd->zgd_db)
1099		dmu_buf_rele(zgd->zgd_db, zgd);
1100
1101	zfs_range_unlock(zgd->zgd_rl);
1102
1103	if (error == 0 && zgd->zgd_bp)
1104		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1105
1106	kmem_free(zgd, sizeof (zgd_t));
1107}
1108
1109/*
1110 * Get data to generate a TX_WRITE intent log record.
1111 */
1112static int
1113zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1114{
1115	zvol_state_t *zv = arg;
1116	objset_t *os = zv->zv_objset;
1117	uint64_t object = ZVOL_OBJ;
1118	uint64_t offset = lr->lr_offset;
1119	uint64_t size = lr->lr_length;	/* length of user data */
1120	blkptr_t *bp = &lr->lr_blkptr;
1121	dmu_buf_t *db;
1122	zgd_t *zgd;
1123	int error;
1124
1125	ASSERT(zio != NULL);
1126	ASSERT(size != 0);
1127
1128	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1129	zgd->zgd_zilog = zv->zv_zilog;
1130	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1131
1132	/*
1133	 * Write records come in two flavors: immediate and indirect.
1134	 * For small writes it's cheaper to store the data with the
1135	 * log record (immediate); for large writes it's cheaper to
1136	 * sync the data and get a pointer to it (indirect) so that
1137	 * we don't have to write the data twice.
1138	 */
1139	if (buf != NULL) {	/* immediate write */
1140		error = dmu_read(os, object, offset, size, buf,
1141		    DMU_READ_NO_PREFETCH);
1142	} else {
1143		size = zv->zv_volblocksize;
1144		offset = P2ALIGN(offset, size);
1145		error = dmu_buf_hold(os, object, offset, zgd, &db,
1146		    DMU_READ_NO_PREFETCH);
1147		if (error == 0) {
1148			blkptr_t *obp = dmu_buf_get_blkptr(db);
1149			if (obp) {
1150				ASSERT(BP_IS_HOLE(bp));
1151				*bp = *obp;
1152			}
1153
1154			zgd->zgd_db = db;
1155			zgd->zgd_bp = bp;
1156
1157			ASSERT(db->db_offset == offset);
1158			ASSERT(db->db_size == size);
1159
1160			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1161			    zvol_get_done, zgd);
1162
1163			if (error == 0)
1164				return (0);
1165		}
1166	}
1167
1168	zvol_get_done(zgd, error);
1169
1170	return (error);
1171}
1172
1173/*
1174 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1175 *
1176 * We store data in the log buffers if it's small enough.
1177 * Otherwise we will later flush the data out via dmu_sync().
1178 */
1179ssize_t zvol_immediate_write_sz = 32768;
1180
1181static void
1182zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1183    boolean_t sync)
1184{
1185	uint32_t blocksize = zv->zv_volblocksize;
1186	zilog_t *zilog = zv->zv_zilog;
1187	boolean_t slogging;
1188	ssize_t immediate_write_sz;
1189
1190	if (zil_replaying(zilog, tx))
1191		return;
1192
1193	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1194	    ? 0 : zvol_immediate_write_sz;
1195
1196	slogging = spa_has_slogs(zilog->zl_spa) &&
1197	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1198
1199	while (resid) {
1200		itx_t *itx;
1201		lr_write_t *lr;
1202		ssize_t len;
1203		itx_wr_state_t write_state;
1204
1205		/*
1206		 * Unlike zfs_log_write() we can be called with
1207		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1208		 */
1209		if (blocksize > immediate_write_sz && !slogging &&
1210		    resid >= blocksize && off % blocksize == 0) {
1211			write_state = WR_INDIRECT; /* uses dmu_sync */
1212			len = blocksize;
1213		} else if (sync) {
1214			write_state = WR_COPIED;
1215			len = MIN(ZIL_MAX_LOG_DATA, resid);
1216		} else {
1217			write_state = WR_NEED_COPY;
1218			len = MIN(ZIL_MAX_LOG_DATA, resid);
1219		}
1220
1221		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1222		    (write_state == WR_COPIED ? len : 0));
1223		lr = (lr_write_t *)&itx->itx_lr;
1224		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1225		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1226			zil_itx_destroy(itx);
1227			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1228			lr = (lr_write_t *)&itx->itx_lr;
1229			write_state = WR_NEED_COPY;
1230		}
1231
1232		itx->itx_wr_state = write_state;
1233		if (write_state == WR_NEED_COPY)
1234			itx->itx_sod += len;
1235		lr->lr_foid = ZVOL_OBJ;
1236		lr->lr_offset = off;
1237		lr->lr_length = len;
1238		lr->lr_blkoff = 0;
1239		BP_ZERO(&lr->lr_blkptr);
1240
1241		itx->itx_private = zv;
1242		itx->itx_sync = sync;
1243
1244		zil_itx_assign(zilog, itx, tx);
1245
1246		off += len;
1247		resid -= len;
1248	}
1249}
1250
1251#ifdef sun
1252static int
1253zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1254    uint64_t size, boolean_t doread, boolean_t isdump)
1255{
1256	vdev_disk_t *dvd;
1257	int c;
1258	int numerrors = 0;
1259
1260	if (vd->vdev_ops == &vdev_mirror_ops ||
1261	    vd->vdev_ops == &vdev_replacing_ops ||
1262	    vd->vdev_ops == &vdev_spare_ops) {
1263		for (c = 0; c < vd->vdev_children; c++) {
1264			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1265			    addr, offset, origoffset, size, doread, isdump);
1266			if (err != 0) {
1267				numerrors++;
1268			} else if (doread) {
1269				break;
1270			}
1271		}
1272	}
1273
1274	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1275		return (numerrors < vd->vdev_children ? 0 : EIO);
1276
1277	if (doread && !vdev_readable(vd))
1278		return (SET_ERROR(EIO));
1279	else if (!doread && !vdev_writeable(vd))
1280		return (SET_ERROR(EIO));
1281
1282	if (vd->vdev_ops == &vdev_raidz_ops) {
1283		return (vdev_raidz_physio(vd,
1284		    addr, size, offset, origoffset, doread, isdump));
1285	}
1286
1287	offset += VDEV_LABEL_START_SIZE;
1288
1289	if (ddi_in_panic() || isdump) {
1290		ASSERT(!doread);
1291		if (doread)
1292			return (SET_ERROR(EIO));
1293		dvd = vd->vdev_tsd;
1294		ASSERT3P(dvd, !=, NULL);
1295		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1296		    lbtodb(size)));
1297	} else {
1298		dvd = vd->vdev_tsd;
1299		ASSERT3P(dvd, !=, NULL);
1300		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1301		    offset, doread ? B_READ : B_WRITE));
1302	}
1303}
1304
1305static int
1306zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1307    boolean_t doread, boolean_t isdump)
1308{
1309	vdev_t *vd;
1310	int error;
1311	zvol_extent_t *ze;
1312	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1313
1314	/* Must be sector aligned, and not stradle a block boundary. */
1315	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1316	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1317		return (SET_ERROR(EINVAL));
1318	}
1319	ASSERT(size <= zv->zv_volblocksize);
1320
1321	/* Locate the extent this belongs to */
1322	ze = list_head(&zv->zv_extents);
1323	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1324		offset -= ze->ze_nblks * zv->zv_volblocksize;
1325		ze = list_next(&zv->zv_extents, ze);
1326	}
1327
1328	if (ze == NULL)
1329		return (SET_ERROR(EINVAL));
1330
1331	if (!ddi_in_panic())
1332		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1333
1334	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1335	offset += DVA_GET_OFFSET(&ze->ze_dva);
1336	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1337	    size, doread, isdump);
1338
1339	if (!ddi_in_panic())
1340		spa_config_exit(spa, SCL_STATE, FTAG);
1341
1342	return (error);
1343}
1344#endif	/* sun */
1345
1346void
1347zvol_strategy(struct bio *bp)
1348{
1349	zvol_state_t *zv;
1350	uint64_t off, volsize;
1351	size_t resid;
1352	char *addr;
1353	objset_t *os;
1354	rl_t *rl;
1355	int error = 0;
1356	boolean_t doread = 0;
1357	boolean_t is_dumpified;
1358	boolean_t sync;
1359
1360	if (bp->bio_to)
1361		zv = bp->bio_to->private;
1362	else
1363		zv = bp->bio_dev->si_drv2;
1364
1365	if (zv == NULL) {
1366		error = ENXIO;
1367		goto out;
1368	}
1369
1370	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1371		error = EROFS;
1372		goto out;
1373	}
1374
1375	switch (bp->bio_cmd) {
1376	case BIO_FLUSH:
1377		goto sync;
1378	case BIO_READ:
1379		doread = 1;
1380	case BIO_WRITE:
1381	case BIO_DELETE:
1382		break;
1383	default:
1384		error = EOPNOTSUPP;
1385		goto out;
1386	}
1387
1388	off = bp->bio_offset;
1389	volsize = zv->zv_volsize;
1390
1391	os = zv->zv_objset;
1392	ASSERT(os != NULL);
1393
1394	addr = bp->bio_data;
1395	resid = bp->bio_length;
1396
1397	if (resid > 0 && (off < 0 || off >= volsize)) {
1398		error = EIO;
1399		goto out;
1400	}
1401
1402#ifdef illumos
1403	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1404#else
1405	is_dumpified = B_FALSE;
1406#endif
1407        sync = !doread && !is_dumpified &&
1408	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1409
1410	/*
1411	 * There must be no buffer changes when doing a dmu_sync() because
1412	 * we can't change the data whilst calculating the checksum.
1413	 */
1414	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1415	    doread ? RL_READER : RL_WRITER);
1416
1417	if (bp->bio_cmd == BIO_DELETE) {
1418		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1419		error = dmu_tx_assign(tx, TXG_WAIT);
1420		if (error != 0) {
1421			dmu_tx_abort(tx);
1422		} else {
1423			zvol_log_truncate(zv, tx, off, resid, B_TRUE);
1424			dmu_tx_commit(tx);
1425			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1426			    off, resid);
1427			resid = 0;
1428		}
1429		goto unlock;
1430	}
1431
1432	while (resid != 0 && off < volsize) {
1433		size_t size = MIN(resid, zvol_maxphys);
1434#ifdef illumos
1435		if (is_dumpified) {
1436			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1437			error = zvol_dumpio(zv, addr, off, size,
1438			    doread, B_FALSE);
1439		} else if (doread) {
1440#else
1441		if (doread) {
1442#endif
1443			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1444			    DMU_READ_PREFETCH);
1445		} else {
1446			dmu_tx_t *tx = dmu_tx_create(os);
1447			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1448			error = dmu_tx_assign(tx, TXG_WAIT);
1449			if (error) {
1450				dmu_tx_abort(tx);
1451			} else {
1452				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1453				zvol_log_write(zv, tx, off, size, sync);
1454				dmu_tx_commit(tx);
1455			}
1456		}
1457		if (error) {
1458			/* convert checksum errors into IO errors */
1459			if (error == ECKSUM)
1460				error = SET_ERROR(EIO);
1461			break;
1462		}
1463		off += size;
1464		addr += size;
1465		resid -= size;
1466	}
1467unlock:
1468	zfs_range_unlock(rl);
1469
1470	bp->bio_completed = bp->bio_length - resid;
1471	if (bp->bio_completed < bp->bio_length && off > volsize)
1472		error = EINVAL;
1473
1474	if (sync) {
1475sync:
1476		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1477	}
1478out:
1479	if (bp->bio_to)
1480		g_io_deliver(bp, error);
1481	else
1482		biofinish(bp, NULL, error);
1483}
1484
1485#ifdef sun
1486/*
1487 * Set the buffer count to the zvol maximum transfer.
1488 * Using our own routine instead of the default minphys()
1489 * means that for larger writes we write bigger buffers on X86
1490 * (128K instead of 56K) and flush the disk write cache less often
1491 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1492 * 56K on X86 and 128K on sparc).
1493 */
1494void
1495zvol_minphys(struct buf *bp)
1496{
1497	if (bp->b_bcount > zvol_maxphys)
1498		bp->b_bcount = zvol_maxphys;
1499}
1500
1501int
1502zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1503{
1504	minor_t minor = getminor(dev);
1505	zvol_state_t *zv;
1506	int error = 0;
1507	uint64_t size;
1508	uint64_t boff;
1509	uint64_t resid;
1510
1511	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1512	if (zv == NULL)
1513		return (SET_ERROR(ENXIO));
1514
1515	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1516		return (SET_ERROR(EINVAL));
1517
1518	boff = ldbtob(blkno);
1519	resid = ldbtob(nblocks);
1520
1521	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1522
1523	while (resid) {
1524		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1525		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1526		if (error)
1527			break;
1528		boff += size;
1529		addr += size;
1530		resid -= size;
1531	}
1532
1533	return (error);
1534}
1535
1536/*ARGSUSED*/
1537int
1538zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1539{
1540	minor_t minor = getminor(dev);
1541#else
1542int
1543zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1544{
1545#endif
1546	zvol_state_t *zv;
1547	uint64_t volsize;
1548	rl_t *rl;
1549	int error = 0;
1550
1551#ifdef sun
1552	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1553	if (zv == NULL)
1554		return (SET_ERROR(ENXIO));
1555#else
1556	zv = dev->si_drv2;
1557#endif
1558
1559	volsize = zv->zv_volsize;
1560	if (uio->uio_resid > 0 &&
1561	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1562		return (SET_ERROR(EIO));
1563
1564#ifdef illumos
1565	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1566		error = physio(zvol_strategy, NULL, dev, B_READ,
1567		    zvol_minphys, uio);
1568		return (error);
1569	}
1570#endif
1571
1572	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1573	    RL_READER);
1574	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1575		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1576
1577		/* don't read past the end */
1578		if (bytes > volsize - uio->uio_loffset)
1579			bytes = volsize - uio->uio_loffset;
1580
1581		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1582		if (error) {
1583			/* convert checksum errors into IO errors */
1584			if (error == ECKSUM)
1585				error = SET_ERROR(EIO);
1586			break;
1587		}
1588	}
1589	zfs_range_unlock(rl);
1590	return (error);
1591}
1592
1593#ifdef sun
1594/*ARGSUSED*/
1595int
1596zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1597{
1598	minor_t minor = getminor(dev);
1599#else
1600int
1601zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1602{
1603#endif
1604	zvol_state_t *zv;
1605	uint64_t volsize;
1606	rl_t *rl;
1607	int error = 0;
1608	boolean_t sync;
1609
1610#ifdef sun
1611	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1612	if (zv == NULL)
1613		return (SET_ERROR(ENXIO));
1614#else
1615	zv = dev->si_drv2;
1616#endif
1617
1618	volsize = zv->zv_volsize;
1619	if (uio->uio_resid > 0 &&
1620	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1621		return (SET_ERROR(EIO));
1622
1623#ifdef illumos
1624	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1625		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1626		    zvol_minphys, uio);
1627		return (error);
1628	}
1629#endif
1630
1631#ifdef sun
1632	sync = !(zv->zv_flags & ZVOL_WCE) ||
1633#else
1634	sync =
1635#endif
1636	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1637
1638	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1639	    RL_WRITER);
1640	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1641		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1642		uint64_t off = uio->uio_loffset;
1643		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1644
1645		if (bytes > volsize - off)	/* don't write past the end */
1646			bytes = volsize - off;
1647
1648		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1649		error = dmu_tx_assign(tx, TXG_WAIT);
1650		if (error) {
1651			dmu_tx_abort(tx);
1652			break;
1653		}
1654		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1655		if (error == 0)
1656			zvol_log_write(zv, tx, off, bytes, sync);
1657		dmu_tx_commit(tx);
1658
1659		if (error)
1660			break;
1661	}
1662	zfs_range_unlock(rl);
1663	if (sync)
1664		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1665	return (error);
1666}
1667
1668#ifdef sun
1669int
1670zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1671{
1672	struct uuid uuid = EFI_RESERVED;
1673	efi_gpe_t gpe = { 0 };
1674	uint32_t crc;
1675	dk_efi_t efi;
1676	int length;
1677	char *ptr;
1678
1679	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1680		return (SET_ERROR(EFAULT));
1681	ptr = (char *)(uintptr_t)efi.dki_data_64;
1682	length = efi.dki_length;
1683	/*
1684	 * Some clients may attempt to request a PMBR for the
1685	 * zvol.  Currently this interface will return EINVAL to
1686	 * such requests.  These requests could be supported by
1687	 * adding a check for lba == 0 and consing up an appropriate
1688	 * PMBR.
1689	 */
1690	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1691		return (SET_ERROR(EINVAL));
1692
1693	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1694	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1695	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1696
1697	if (efi.dki_lba == 1) {
1698		efi_gpt_t gpt = { 0 };
1699
1700		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1701		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1702		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1703		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1704		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1705		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1706		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1707		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1708		gpt.efi_gpt_SizeOfPartitionEntry =
1709		    LE_32(sizeof (efi_gpe_t));
1710		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1711		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1712		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1713		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1714		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1715		    flag))
1716			return (SET_ERROR(EFAULT));
1717		ptr += sizeof (gpt);
1718		length -= sizeof (gpt);
1719	}
1720	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1721	    length), flag))
1722		return (SET_ERROR(EFAULT));
1723	return (0);
1724}
1725
1726/*
1727 * BEGIN entry points to allow external callers access to the volume.
1728 */
1729/*
1730 * Return the volume parameters needed for access from an external caller.
1731 * These values are invariant as long as the volume is held open.
1732 */
1733int
1734zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1735    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1736    void **rl_hdl, void **bonus_hdl)
1737{
1738	zvol_state_t *zv;
1739
1740	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1741	if (zv == NULL)
1742		return (SET_ERROR(ENXIO));
1743	if (zv->zv_flags & ZVOL_DUMPIFIED)
1744		return (SET_ERROR(ENXIO));
1745
1746	ASSERT(blksize && max_xfer_len && minor_hdl &&
1747	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1748
1749	*blksize = zv->zv_volblocksize;
1750	*max_xfer_len = (uint64_t)zvol_maxphys;
1751	*minor_hdl = zv;
1752	*objset_hdl = zv->zv_objset;
1753	*zil_hdl = zv->zv_zilog;
1754	*rl_hdl = &zv->zv_znode;
1755	*bonus_hdl = zv->zv_dbuf;
1756	return (0);
1757}
1758
1759/*
1760 * Return the current volume size to an external caller.
1761 * The size can change while the volume is open.
1762 */
1763uint64_t
1764zvol_get_volume_size(void *minor_hdl)
1765{
1766	zvol_state_t *zv = minor_hdl;
1767
1768	return (zv->zv_volsize);
1769}
1770
1771/*
1772 * Return the current WCE setting to an external caller.
1773 * The WCE setting can change while the volume is open.
1774 */
1775int
1776zvol_get_volume_wce(void *minor_hdl)
1777{
1778	zvol_state_t *zv = minor_hdl;
1779
1780	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1781}
1782
1783/*
1784 * Entry point for external callers to zvol_log_write
1785 */
1786void
1787zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1788    boolean_t sync)
1789{
1790	zvol_state_t *zv = minor_hdl;
1791
1792	zvol_log_write(zv, tx, off, resid, sync);
1793}
1794/*
1795 * END entry points to allow external callers access to the volume.
1796 */
1797#endif	/* sun */
1798
1799/*
1800 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1801 */
1802static void
1803zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1804    boolean_t sync)
1805{
1806	itx_t *itx;
1807	lr_truncate_t *lr;
1808	zilog_t *zilog = zv->zv_zilog;
1809
1810	if (zil_replaying(zilog, tx))
1811		return;
1812
1813	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1814	lr = (lr_truncate_t *)&itx->itx_lr;
1815	lr->lr_foid = ZVOL_OBJ;
1816	lr->lr_offset = off;
1817	lr->lr_length = len;
1818
1819	itx->itx_sync = sync;
1820	zil_itx_assign(zilog, itx, tx);
1821}
1822
1823#ifdef sun
1824/*
1825 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1826 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1827 */
1828/*ARGSUSED*/
1829int
1830zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1831{
1832	zvol_state_t *zv;
1833	struct dk_callback *dkc;
1834	int error = 0;
1835	rl_t *rl;
1836
1837	mutex_enter(&spa_namespace_lock);
1838
1839	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1840
1841	if (zv == NULL) {
1842		mutex_exit(&spa_namespace_lock);
1843		return (SET_ERROR(ENXIO));
1844	}
1845	ASSERT(zv->zv_total_opens > 0);
1846
1847	switch (cmd) {
1848
1849	case DKIOCINFO:
1850	{
1851		struct dk_cinfo dki;
1852
1853		bzero(&dki, sizeof (dki));
1854		(void) strcpy(dki.dki_cname, "zvol");
1855		(void) strcpy(dki.dki_dname, "zvol");
1856		dki.dki_ctype = DKC_UNKNOWN;
1857		dki.dki_unit = getminor(dev);
1858		dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1859		mutex_exit(&spa_namespace_lock);
1860		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1861			error = SET_ERROR(EFAULT);
1862		return (error);
1863	}
1864
1865	case DKIOCGMEDIAINFO:
1866	{
1867		struct dk_minfo dkm;
1868
1869		bzero(&dkm, sizeof (dkm));
1870		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1871		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1872		dkm.dki_media_type = DK_UNKNOWN;
1873		mutex_exit(&spa_namespace_lock);
1874		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1875			error = SET_ERROR(EFAULT);
1876		return (error);
1877	}
1878
1879	case DKIOCGMEDIAINFOEXT:
1880	{
1881		struct dk_minfo_ext dkmext;
1882
1883		bzero(&dkmext, sizeof (dkmext));
1884		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1885		dkmext.dki_pbsize = zv->zv_volblocksize;
1886		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1887		dkmext.dki_media_type = DK_UNKNOWN;
1888		mutex_exit(&spa_namespace_lock);
1889		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1890			error = SET_ERROR(EFAULT);
1891		return (error);
1892	}
1893
1894	case DKIOCGETEFI:
1895	{
1896		uint64_t vs = zv->zv_volsize;
1897		uint8_t bs = zv->zv_min_bs;
1898
1899		mutex_exit(&spa_namespace_lock);
1900		error = zvol_getefi((void *)arg, flag, vs, bs);
1901		return (error);
1902	}
1903
1904	case DKIOCFLUSHWRITECACHE:
1905		dkc = (struct dk_callback *)arg;
1906		mutex_exit(&spa_namespace_lock);
1907		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1908		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1909			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1910			error = 0;
1911		}
1912		return (error);
1913
1914	case DKIOCGETWCE:
1915	{
1916		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1917		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1918		    flag))
1919			error = SET_ERROR(EFAULT);
1920		break;
1921	}
1922	case DKIOCSETWCE:
1923	{
1924		int wce;
1925		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1926		    flag)) {
1927			error = SET_ERROR(EFAULT);
1928			break;
1929		}
1930		if (wce) {
1931			zv->zv_flags |= ZVOL_WCE;
1932			mutex_exit(&spa_namespace_lock);
1933		} else {
1934			zv->zv_flags &= ~ZVOL_WCE;
1935			mutex_exit(&spa_namespace_lock);
1936			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1937		}
1938		return (0);
1939	}
1940
1941	case DKIOCGGEOM:
1942	case DKIOCGVTOC:
1943		/*
1944		 * commands using these (like prtvtoc) expect ENOTSUP
1945		 * since we're emulating an EFI label
1946		 */
1947		error = SET_ERROR(ENOTSUP);
1948		break;
1949
1950	case DKIOCDUMPINIT:
1951		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1952		    RL_WRITER);
1953		error = zvol_dumpify(zv);
1954		zfs_range_unlock(rl);
1955		break;
1956
1957	case DKIOCDUMPFINI:
1958		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1959			break;
1960		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1961		    RL_WRITER);
1962		error = zvol_dump_fini(zv);
1963		zfs_range_unlock(rl);
1964		break;
1965
1966	case DKIOCFREE:
1967	{
1968		dkioc_free_t df;
1969		dmu_tx_t *tx;
1970
1971		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1972			error = SET_ERROR(EFAULT);
1973			break;
1974		}
1975
1976		/*
1977		 * Apply Postel's Law to length-checking.  If they overshoot,
1978		 * just blank out until the end, if there's a need to blank
1979		 * out anything.
1980		 */
1981		if (df.df_start >= zv->zv_volsize)
1982			break;	/* No need to do anything... */
1983		if (df.df_start + df.df_length > zv->zv_volsize)
1984			df.df_length = DMU_OBJECT_END;
1985
1986		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1987		    RL_WRITER);
1988		tx = dmu_tx_create(zv->zv_objset);
1989		error = dmu_tx_assign(tx, TXG_WAIT);
1990		if (error != 0) {
1991			dmu_tx_abort(tx);
1992		} else {
1993			zvol_log_truncate(zv, tx, df.df_start,
1994			    df.df_length, B_TRUE);
1995			dmu_tx_commit(tx);
1996			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1997			    df.df_start, df.df_length);
1998		}
1999
2000		zfs_range_unlock(rl);
2001
2002		if (error == 0) {
2003			/*
2004			 * If the write-cache is disabled or 'sync' property
2005			 * is set to 'always' then treat this as a synchronous
2006			 * operation (i.e. commit to zil).
2007			 */
2008			if (!(zv->zv_flags & ZVOL_WCE) ||
2009			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2010				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2011
2012			/*
2013			 * If the caller really wants synchronous writes, and
2014			 * can't wait for them, don't return until the write
2015			 * is done.
2016			 */
2017			if (df.df_flags & DF_WAIT_SYNC) {
2018				txg_wait_synced(
2019				    dmu_objset_pool(zv->zv_objset), 0);
2020			}
2021		}
2022		break;
2023	}
2024
2025	default:
2026		error = SET_ERROR(ENOTTY);
2027		break;
2028
2029	}
2030	mutex_exit(&spa_namespace_lock);
2031	return (error);
2032}
2033#endif	/* sun */
2034
2035int
2036zvol_busy(void)
2037{
2038	return (zvol_minors != 0);
2039}
2040
2041void
2042zvol_init(void)
2043{
2044	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2045	    1) == 0);
2046	ZFS_LOG(1, "ZVOL Initialized.");
2047}
2048
2049void
2050zvol_fini(void)
2051{
2052	ddi_soft_state_fini(&zfsdev_state);
2053	ZFS_LOG(1, "ZVOL Deinitialized.");
2054}
2055
2056#ifdef sun
2057/*ARGSUSED*/
2058static int
2059zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2060{
2061	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2062
2063	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2064		return (1);
2065	return (0);
2066}
2067
2068/*ARGSUSED*/
2069static void
2070zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2071{
2072	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2073
2074	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2075}
2076
2077static int
2078zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2079{
2080	dmu_tx_t *tx;
2081	int error;
2082	objset_t *os = zv->zv_objset;
2083	spa_t *spa = dmu_objset_spa(os);
2084	vdev_t *vd = spa->spa_root_vdev;
2085	nvlist_t *nv = NULL;
2086	uint64_t version = spa_version(spa);
2087	enum zio_checksum checksum;
2088
2089	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2090	ASSERT(vd->vdev_ops == &vdev_root_ops);
2091
2092	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2093	    DMU_OBJECT_END);
2094	/* wait for dmu_free_long_range to actually free the blocks */
2095	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2096
2097	/*
2098	 * If the pool on which the dump device is being initialized has more
2099	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2100	 * enabled.  If so, bump that feature's counter to indicate that the
2101	 * feature is active. We also check the vdev type to handle the
2102	 * following case:
2103	 *   # zpool create test raidz disk1 disk2 disk3
2104	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2105	 *   the raidz vdev itself has 3 children.
2106	 */
2107	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2108		if (!spa_feature_is_enabled(spa,
2109		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2110			return (SET_ERROR(ENOTSUP));
2111		(void) dsl_sync_task(spa_name(spa),
2112		    zfs_mvdev_dump_feature_check,
2113		    zfs_mvdev_dump_activate_feature_sync, NULL, 2);
2114	}
2115
2116	tx = dmu_tx_create(os);
2117	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2118	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2119	error = dmu_tx_assign(tx, TXG_WAIT);
2120	if (error) {
2121		dmu_tx_abort(tx);
2122		return (error);
2123	}
2124
2125	/*
2126	 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2127	 * function.  Otherwise, use the old default -- OFF.
2128	 */
2129	checksum = spa_feature_is_active(spa,
2130	    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2131	    ZIO_CHECKSUM_OFF;
2132
2133	/*
2134	 * If we are resizing the dump device then we only need to
2135	 * update the refreservation to match the newly updated
2136	 * zvolsize. Otherwise, we save off the original state of the
2137	 * zvol so that we can restore them if the zvol is ever undumpified.
2138	 */
2139	if (resize) {
2140		error = zap_update(os, ZVOL_ZAP_OBJ,
2141		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2142		    &zv->zv_volsize, tx);
2143	} else {
2144		uint64_t checksum, compress, refresrv, vbs, dedup;
2145
2146		error = dsl_prop_get_integer(zv->zv_name,
2147		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2148		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2149		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
2150		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2151		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
2152		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2153		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
2154		if (version >= SPA_VERSION_DEDUP) {
2155			error = error ? error :
2156			    dsl_prop_get_integer(zv->zv_name,
2157			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2158		}
2159
2160		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2161		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2162		    &compress, tx);
2163		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2164		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
2165		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2166		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2167		    &refresrv, tx);
2168		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2169		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2170		    &vbs, tx);
2171		error = error ? error : dmu_object_set_blocksize(
2172		    os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
2173		if (version >= SPA_VERSION_DEDUP) {
2174			error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2175			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2176			    &dedup, tx);
2177		}
2178		if (error == 0)
2179			zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
2180	}
2181	dmu_tx_commit(tx);
2182
2183	/*
2184	 * We only need update the zvol's property if we are initializing
2185	 * the dump area for the first time.
2186	 */
2187	if (!resize) {
2188		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2189		VERIFY(nvlist_add_uint64(nv,
2190		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2191		VERIFY(nvlist_add_uint64(nv,
2192		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2193		    ZIO_COMPRESS_OFF) == 0);
2194		VERIFY(nvlist_add_uint64(nv,
2195		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2196		    checksum) == 0);
2197		if (version >= SPA_VERSION_DEDUP) {
2198			VERIFY(nvlist_add_uint64(nv,
2199			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2200			    ZIO_CHECKSUM_OFF) == 0);
2201		}
2202
2203		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2204		    nv, NULL);
2205		nvlist_free(nv);
2206
2207		if (error)
2208			return (error);
2209	}
2210
2211	/* Allocate the space for the dump */
2212	error = zvol_prealloc(zv);
2213	return (error);
2214}
2215
2216static int
2217zvol_dumpify(zvol_state_t *zv)
2218{
2219	int error = 0;
2220	uint64_t dumpsize = 0;
2221	dmu_tx_t *tx;
2222	objset_t *os = zv->zv_objset;
2223
2224	if (zv->zv_flags & ZVOL_RDONLY)
2225		return (SET_ERROR(EROFS));
2226
2227	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2228	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2229		boolean_t resize = (dumpsize > 0);
2230
2231		if ((error = zvol_dump_init(zv, resize)) != 0) {
2232			(void) zvol_dump_fini(zv);
2233			return (error);
2234		}
2235	}
2236
2237	/*
2238	 * Build up our lba mapping.
2239	 */
2240	error = zvol_get_lbas(zv);
2241	if (error) {
2242		(void) zvol_dump_fini(zv);
2243		return (error);
2244	}
2245
2246	tx = dmu_tx_create(os);
2247	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2248	error = dmu_tx_assign(tx, TXG_WAIT);
2249	if (error) {
2250		dmu_tx_abort(tx);
2251		(void) zvol_dump_fini(zv);
2252		return (error);
2253	}
2254
2255	zv->zv_flags |= ZVOL_DUMPIFIED;
2256	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2257	    &zv->zv_volsize, tx);
2258	dmu_tx_commit(tx);
2259
2260	if (error) {
2261		(void) zvol_dump_fini(zv);
2262		return (error);
2263	}
2264
2265	txg_wait_synced(dmu_objset_pool(os), 0);
2266	return (0);
2267}
2268
2269static int
2270zvol_dump_fini(zvol_state_t *zv)
2271{
2272	dmu_tx_t *tx;
2273	objset_t *os = zv->zv_objset;
2274	nvlist_t *nv;
2275	int error = 0;
2276	uint64_t checksum, compress, refresrv, vbs, dedup;
2277	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2278
2279	/*
2280	 * Attempt to restore the zvol back to its pre-dumpified state.
2281	 * This is a best-effort attempt as it's possible that not all
2282	 * of these properties were initialized during the dumpify process
2283	 * (i.e. error during zvol_dump_init).
2284	 */
2285
2286	tx = dmu_tx_create(os);
2287	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2288	error = dmu_tx_assign(tx, TXG_WAIT);
2289	if (error) {
2290		dmu_tx_abort(tx);
2291		return (error);
2292	}
2293	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2294	dmu_tx_commit(tx);
2295
2296	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2297	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2298	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2299	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2300	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2301	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2302	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2303	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2304
2305	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2306	(void) nvlist_add_uint64(nv,
2307	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2308	(void) nvlist_add_uint64(nv,
2309	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2310	(void) nvlist_add_uint64(nv,
2311	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2312	if (version >= SPA_VERSION_DEDUP &&
2313	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2314	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2315		(void) nvlist_add_uint64(nv,
2316		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2317	}
2318	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2319	    nv, NULL);
2320	nvlist_free(nv);
2321
2322	zvol_free_extents(zv);
2323	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2324	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2325	/* wait for dmu_free_long_range to actually free the blocks */
2326	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2327	tx = dmu_tx_create(os);
2328	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2329	error = dmu_tx_assign(tx, TXG_WAIT);
2330	if (error) {
2331		dmu_tx_abort(tx);
2332		return (error);
2333	}
2334	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2335		zv->zv_volblocksize = vbs;
2336	dmu_tx_commit(tx);
2337
2338	return (0);
2339}
2340#endif	/* sun */
2341
2342static void
2343zvol_geom_run(zvol_state_t *zv)
2344{
2345	struct g_provider *pp;
2346
2347	pp = zv->zv_provider;
2348	g_error_provider(pp, 0);
2349
2350	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2351	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2352}
2353
2354static void
2355zvol_geom_destroy(zvol_state_t *zv)
2356{
2357	struct g_provider *pp;
2358
2359	g_topology_assert();
2360
2361	mtx_lock(&zv->zv_queue_mtx);
2362	zv->zv_state = 1;
2363	wakeup_one(&zv->zv_queue);
2364	while (zv->zv_state != 2)
2365		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2366	mtx_destroy(&zv->zv_queue_mtx);
2367
2368	pp = zv->zv_provider;
2369	zv->zv_provider = NULL;
2370	pp->private = NULL;
2371	g_wither_geom(pp->geom, ENXIO);
2372}
2373
2374static int
2375zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2376{
2377	int count, error, flags;
2378
2379	g_topology_assert();
2380
2381	/*
2382	 * To make it easier we expect either open or close, but not both
2383	 * at the same time.
2384	 */
2385	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2386	    (acr <= 0 && acw <= 0 && ace <= 0),
2387	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2388	    pp->name, acr, acw, ace));
2389
2390	if (pp->private == NULL) {
2391		if (acr <= 0 && acw <= 0 && ace <= 0)
2392			return (0);
2393		return (pp->error);
2394	}
2395
2396	/*
2397	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2398	 * because GEOM already handles that and handles it a bit differently.
2399	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2400	 * only one exclusive consumer, no matter if it is reader or writer.
2401	 * I like better the way GEOM works so I'll leave it for GEOM to
2402	 * decide what to do.
2403	 */
2404
2405	count = acr + acw + ace;
2406	if (count == 0)
2407		return (0);
2408
2409	flags = 0;
2410	if (acr != 0 || ace != 0)
2411		flags |= FREAD;
2412	if (acw != 0)
2413		flags |= FWRITE;
2414
2415	g_topology_unlock();
2416	if (count > 0)
2417		error = zvol_open(pp, flags, count);
2418	else
2419		error = zvol_close(pp, flags, -count);
2420	g_topology_lock();
2421	return (error);
2422}
2423
2424static void
2425zvol_geom_start(struct bio *bp)
2426{
2427	zvol_state_t *zv;
2428	boolean_t first;
2429
2430	zv = bp->bio_to->private;
2431	ASSERT(zv != NULL);
2432	switch (bp->bio_cmd) {
2433	case BIO_FLUSH:
2434		if (!THREAD_CAN_SLEEP())
2435			goto enqueue;
2436		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2437		g_io_deliver(bp, 0);
2438		break;
2439	case BIO_READ:
2440	case BIO_WRITE:
2441	case BIO_DELETE:
2442		if (!THREAD_CAN_SLEEP())
2443			goto enqueue;
2444		zvol_strategy(bp);
2445		break;
2446	case BIO_GETATTR:
2447		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2448			return;
2449		/* FALLTHROUGH */
2450	default:
2451		g_io_deliver(bp, EOPNOTSUPP);
2452		break;
2453	}
2454	return;
2455
2456enqueue:
2457	mtx_lock(&zv->zv_queue_mtx);
2458	first = (bioq_first(&zv->zv_queue) == NULL);
2459	bioq_insert_tail(&zv->zv_queue, bp);
2460	mtx_unlock(&zv->zv_queue_mtx);
2461	if (first)
2462		wakeup_one(&zv->zv_queue);
2463}
2464
2465static void
2466zvol_geom_worker(void *arg)
2467{
2468	zvol_state_t *zv;
2469	struct bio *bp;
2470
2471	thread_lock(curthread);
2472	sched_prio(curthread, PRIBIO);
2473	thread_unlock(curthread);
2474
2475	zv = arg;
2476	for (;;) {
2477		mtx_lock(&zv->zv_queue_mtx);
2478		bp = bioq_takefirst(&zv->zv_queue);
2479		if (bp == NULL) {
2480			if (zv->zv_state == 1) {
2481				zv->zv_state = 2;
2482				wakeup(&zv->zv_state);
2483				mtx_unlock(&zv->zv_queue_mtx);
2484				kthread_exit();
2485			}
2486			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2487			    "zvol:io", 0);
2488			continue;
2489		}
2490		mtx_unlock(&zv->zv_queue_mtx);
2491		switch (bp->bio_cmd) {
2492		case BIO_FLUSH:
2493			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2494			g_io_deliver(bp, 0);
2495			break;
2496		case BIO_READ:
2497		case BIO_WRITE:
2498			zvol_strategy(bp);
2499			break;
2500		}
2501	}
2502}
2503
2504extern boolean_t dataset_name_hidden(const char *name);
2505
2506static int
2507zvol_create_snapshots(objset_t *os, const char *name)
2508{
2509	uint64_t cookie, obj;
2510	char *sname;
2511	int error, len;
2512
2513	cookie = obj = 0;
2514	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2515
2516#if 0
2517	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2518	    DS_FIND_SNAPSHOTS);
2519#endif
2520
2521	for (;;) {
2522		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2523		if (len >= MAXPATHLEN) {
2524			dmu_objset_rele(os, FTAG);
2525			error = ENAMETOOLONG;
2526			break;
2527		}
2528
2529		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2530		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2531		    sname + len, &obj, &cookie, NULL);
2532		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2533		if (error != 0) {
2534			if (error == ENOENT)
2535				error = 0;
2536			break;
2537		}
2538
2539		if ((error = zvol_create_minor(sname)) != 0) {
2540			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2541			    sname, error);
2542			break;
2543		}
2544	}
2545
2546	kmem_free(sname, MAXPATHLEN);
2547	return (error);
2548}
2549
2550int
2551zvol_create_minors(const char *name)
2552{
2553	uint64_t cookie;
2554	objset_t *os;
2555	char *osname, *p;
2556	int error, len;
2557
2558	if (dataset_name_hidden(name))
2559		return (0);
2560
2561	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2562		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2563		    name, error);
2564		return (error);
2565	}
2566	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2567		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2568		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2569		if ((error = zvol_create_minor(name)) == 0)
2570			error = zvol_create_snapshots(os, name);
2571		else {
2572			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2573			    name, error);
2574		}
2575		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2576		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2577		return (error);
2578	}
2579	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2580		dmu_objset_rele(os, FTAG);
2581		return (0);
2582	}
2583
2584	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2585	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2586		dmu_objset_rele(os, FTAG);
2587		kmem_free(osname, MAXPATHLEN);
2588		return (ENOENT);
2589	}
2590	p = osname + strlen(osname);
2591	len = MAXPATHLEN - (p - osname);
2592
2593#if 0
2594	/* Prefetch the datasets. */
2595	cookie = 0;
2596	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2597		if (!dataset_name_hidden(osname))
2598			(void) dmu_objset_prefetch(osname, NULL);
2599	}
2600#endif
2601
2602	cookie = 0;
2603	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2604	    &cookie) == 0) {
2605		dmu_objset_rele(os, FTAG);
2606		(void)zvol_create_minors(osname);
2607		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2608			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2609			    name, error);
2610			return (error);
2611		}
2612	}
2613
2614	dmu_objset_rele(os, FTAG);
2615	kmem_free(osname, MAXPATHLEN);
2616	return (0);
2617}
2618
2619static void
2620zvol_rename_minor(zvol_state_t *zv, const char *newname)
2621{
2622	struct g_geom *gp;
2623	struct g_provider *pp;
2624	struct cdev *dev;
2625
2626	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2627
2628	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2629		g_topology_lock();
2630		pp = zv->zv_provider;
2631		ASSERT(pp != NULL);
2632		gp = pp->geom;
2633		ASSERT(gp != NULL);
2634
2635		zv->zv_provider = NULL;
2636		g_wither_provider(pp, ENXIO);
2637
2638		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2639		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2640		pp->sectorsize = DEV_BSIZE;
2641		pp->mediasize = zv->zv_volsize;
2642		pp->private = zv;
2643		zv->zv_provider = pp;
2644		g_error_provider(pp, 0);
2645		g_topology_unlock();
2646	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2647		dev = zv->zv_dev;
2648		ASSERT(dev != NULL);
2649		zv->zv_dev = NULL;
2650		destroy_dev(dev);
2651
2652		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
2653		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
2654		    0640, "%s/%s", ZVOL_DRIVER, newname) == 0) {
2655			zv->zv_dev = dev;
2656			dev->si_iosize_max = MAXPHYS;
2657			dev->si_drv2 = zv;
2658		}
2659	}
2660	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2661}
2662
2663void
2664zvol_rename_minors(const char *oldname, const char *newname)
2665{
2666	char name[MAXPATHLEN];
2667	struct g_provider *pp;
2668	struct g_geom *gp;
2669	size_t oldnamelen, newnamelen;
2670	zvol_state_t *zv;
2671	char *namebuf;
2672
2673	oldnamelen = strlen(oldname);
2674	newnamelen = strlen(newname);
2675
2676	DROP_GIANT();
2677	mutex_enter(&spa_namespace_lock);
2678
2679	LIST_FOREACH(zv, &all_zvols, zv_links) {
2680		if (strcmp(zv->zv_name, oldname) == 0) {
2681			zvol_rename_minor(zv, newname);
2682		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2683		    (zv->zv_name[oldnamelen] == '/' ||
2684		     zv->zv_name[oldnamelen] == '@')) {
2685			snprintf(name, sizeof(name), "%s%c%s", newname,
2686			    zv->zv_name[oldnamelen],
2687			    zv->zv_name + oldnamelen + 1);
2688			zvol_rename_minor(zv, name);
2689		}
2690	}
2691
2692	mutex_exit(&spa_namespace_lock);
2693	PICKUP_GIANT();
2694}
2695
2696static int
2697zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2698{
2699	zvol_state_t *zv;
2700	int err = 0;
2701
2702	mutex_enter(&spa_namespace_lock);
2703	zv = dev->si_drv2;
2704	if (zv == NULL) {
2705		mutex_exit(&spa_namespace_lock);
2706		return(ENXIO);		/* zvol_create_minor() not done yet */
2707	}
2708
2709	if (zv->zv_total_opens == 0)
2710		err = zvol_first_open(zv);
2711	if (err) {
2712		mutex_exit(&spa_namespace_lock);
2713		return (err);
2714	}
2715	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
2716		err = SET_ERROR(EROFS);
2717		goto out;
2718	}
2719	if (zv->zv_flags & ZVOL_EXCL) {
2720		err = SET_ERROR(EBUSY);
2721		goto out;
2722	}
2723#ifdef FEXCL
2724	if (flags & FEXCL) {
2725		if (zv->zv_total_opens != 0) {
2726			err = SET_ERROR(EBUSY);
2727			goto out;
2728		}
2729		zv->zv_flags |= ZVOL_EXCL;
2730	}
2731#endif
2732
2733	zv->zv_total_opens++;
2734	mutex_exit(&spa_namespace_lock);
2735	return (err);
2736out:
2737	if (zv->zv_total_opens == 0)
2738		zvol_last_close(zv);
2739	mutex_exit(&spa_namespace_lock);
2740	return (err);
2741}
2742
2743static int
2744zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2745{
2746	zvol_state_t *zv;
2747	int err = 0;
2748
2749	mutex_enter(&spa_namespace_lock);
2750	zv = dev->si_drv2;
2751	if (zv == NULL) {
2752		mutex_exit(&spa_namespace_lock);
2753		return(ENXIO);
2754	}
2755
2756	if (zv->zv_flags & ZVOL_EXCL) {
2757		ASSERT(zv->zv_total_opens == 1);
2758		zv->zv_flags &= ~ZVOL_EXCL;
2759	}
2760
2761	/*
2762	 * If the open count is zero, this is a spurious close.
2763	 * That indicates a bug in the kernel / DDI framework.
2764	 */
2765	ASSERT(zv->zv_total_opens != 0);
2766
2767	/*
2768	 * You may get multiple opens, but only one close.
2769	 */
2770	zv->zv_total_opens--;
2771
2772	if (zv->zv_total_opens == 0)
2773		zvol_last_close(zv);
2774
2775	mutex_exit(&spa_namespace_lock);
2776	return (0);
2777}
2778
2779static int
2780zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
2781{
2782	zvol_state_t *zv;
2783	rl_t *rl;
2784	off_t offset, length, chunk;
2785	int i, error;
2786	u_int u;
2787
2788	zv = dev->si_drv2;
2789
2790	error = 0;
2791	KASSERT(zv->zv_total_opens > 0,
2792	    ("Device with zero access count in zvol_d_ioctl"));
2793
2794	i = IOCPARM_LEN(cmd);
2795	switch (cmd) {
2796	case DIOCGSECTORSIZE:
2797		*(u_int *)data = DEV_BSIZE;
2798		break;
2799	case DIOCGMEDIASIZE:
2800		*(off_t *)data = zv->zv_volsize;
2801		break;
2802	case DIOCGFLUSH:
2803		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2804		break;
2805	case DIOCGDELETE:
2806		offset = ((off_t *)data)[0];
2807		length = ((off_t *)data)[1];
2808		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
2809		    offset < 0 || offset >= zv->zv_volsize ||
2810		    length <= 0) {
2811			printf("%s: offset=%jd length=%jd\n", __func__, offset,
2812			    length);
2813			error = EINVAL;
2814			break;
2815		}
2816
2817		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
2818		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
2819		error = dmu_tx_assign(tx, TXG_WAIT);
2820		if (error != 0) {
2821			dmu_tx_abort(tx);
2822		} else {
2823			zvol_log_truncate(zv, tx, offset, length, B_TRUE);
2824			dmu_tx_commit(tx);
2825			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2826			    offset, length);
2827		}
2828		zfs_range_unlock(rl);
2829		if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
2830			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2831		break;
2832	case DIOCGSTRIPESIZE:
2833		*(off_t *)data = zv->zv_volblocksize;
2834		break;
2835	case DIOCGSTRIPEOFFSET:
2836		*(off_t *)data = 0;
2837		break;
2838	default:
2839		error = ENOIOCTL;
2840	}
2841
2842	return (error);
2843}
2844