zvol.c revision 269006
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 *
27 * Portions Copyright 2010 Robert Milkowski
28 *
29 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
31 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
32 */
33
34/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
35
36/*
37 * ZFS volume emulation driver.
38 *
39 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
40 * Volumes are accessed through the symbolic links named:
41 *
42 * /dev/zvol/dsk/<pool_name>/<dataset_name>
43 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
44 *
45 * These links are created by the /dev filesystem (sdev_zvolops.c).
46 * Volumes are persistent through reboot.  No user command needs to be
47 * run before opening and using a device.
48 *
49 * FreeBSD notes.
50 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
51 * in the system.
52 */
53
54#include <sys/types.h>
55#include <sys/param.h>
56#include <sys/kernel.h>
57#include <sys/errno.h>
58#include <sys/uio.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kmem.h>
62#include <sys/conf.h>
63#include <sys/cmn_err.h>
64#include <sys/stat.h>
65#include <sys/zap.h>
66#include <sys/spa.h>
67#include <sys/spa_impl.h>
68#include <sys/zio.h>
69#include <sys/disk.h>
70#include <sys/dmu_traverse.h>
71#include <sys/dnode.h>
72#include <sys/dsl_dataset.h>
73#include <sys/dsl_prop.h>
74#include <sys/dkio.h>
75#include <sys/byteorder.h>
76#include <sys/sunddi.h>
77#include <sys/dirent.h>
78#include <sys/policy.h>
79#include <sys/queue.h>
80#include <sys/fs/zfs.h>
81#include <sys/zfs_ioctl.h>
82#include <sys/zil.h>
83#include <sys/refcount.h>
84#include <sys/zfs_znode.h>
85#include <sys/zfs_rlock.h>
86#include <sys/vdev_impl.h>
87#include <sys/vdev_raidz.h>
88#include <sys/zvol.h>
89#include <sys/zil_impl.h>
90#include <sys/dbuf.h>
91#include <sys/dmu_tx.h>
92#include <sys/zfeature.h>
93#include <sys/zio_checksum.h>
94
95#include <geom/geom.h>
96
97#include "zfs_namecheck.h"
98
99struct g_class zfs_zvol_class = {
100	.name = "ZFS::ZVOL",
101	.version = G_VERSION,
102};
103
104DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
105
106void *zfsdev_state;
107static char *zvol_tag = "zvol_tag";
108
109#define	ZVOL_DUMPSIZE		"dumpsize"
110
111/*
112 * The spa_namespace_lock protects the zfsdev_state structure from being
113 * modified while it's being used, e.g. an open that comes in before a
114 * create finishes.  It also protects temporary opens of the dataset so that,
115 * e.g., an open doesn't get a spurious EBUSY.
116 */
117static uint32_t zvol_minors;
118
119SYSCTL_DECL(_vfs_zfs);
120SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
121static int	volmode = ZFS_VOLMODE_GEOM;
122TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
123SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
124    "Expose as GEOM providers (1), device files (2) or neither");
125
126typedef struct zvol_extent {
127	list_node_t	ze_node;
128	dva_t		ze_dva;		/* dva associated with this extent */
129	uint64_t	ze_nblks;	/* number of blocks in extent */
130} zvol_extent_t;
131
132/*
133 * The in-core state of each volume.
134 */
135typedef struct zvol_state {
136	LIST_ENTRY(zvol_state)	zv_links;
137	char		zv_name[MAXPATHLEN]; /* pool/dd name */
138	uint64_t	zv_volsize;	/* amount of space we advertise */
139	uint64_t	zv_volblocksize; /* volume block size */
140	struct cdev	*zv_dev;	/* non-GEOM device */
141	struct g_provider *zv_provider;	/* GEOM provider */
142	uint8_t		zv_min_bs;	/* minimum addressable block shift */
143	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
144	objset_t	*zv_objset;	/* objset handle */
145	uint32_t	zv_total_opens;	/* total open count */
146	zilog_t		*zv_zilog;	/* ZIL handle */
147	list_t		zv_extents;	/* List of extents for dump */
148	znode_t		zv_znode;	/* for range locking */
149	dmu_buf_t	*zv_dbuf;	/* bonus handle */
150	int		zv_state;
151	int		zv_volmode;	/* Provide GEOM or cdev */
152	struct bio_queue_head zv_queue;
153	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
154} zvol_state_t;
155
156static LIST_HEAD(, zvol_state) all_zvols;
157
158/*
159 * zvol specific flags
160 */
161#define	ZVOL_RDONLY	0x1
162#define	ZVOL_DUMPIFIED	0x2
163#define	ZVOL_EXCL	0x4
164#define	ZVOL_WCE	0x8
165
166/*
167 * zvol maximum transfer in one DMU tx.
168 */
169int zvol_maxphys = DMU_MAX_ACCESS/2;
170
171static d_open_t		zvol_d_open;
172static d_close_t	zvol_d_close;
173static d_read_t		zvol_read;
174static d_write_t	zvol_write;
175static d_ioctl_t	zvol_d_ioctl;
176static d_strategy_t	zvol_strategy;
177
178static struct cdevsw zvol_cdevsw = {
179	.d_version =	D_VERSION,
180	.d_open =	zvol_d_open,
181	.d_close =	zvol_d_close,
182	.d_read =	zvol_read,
183	.d_write =	zvol_write,
184	.d_ioctl =	zvol_d_ioctl,
185	.d_strategy =	zvol_strategy,
186	.d_name =	"zvol",
187	.d_flags =	D_DISK | D_TRACKCLOSE,
188};
189
190extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
191    nvlist_t *, nvlist_t *);
192static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
193    uint64_t len, boolean_t sync);
194static int zvol_remove_zv(zvol_state_t *);
195static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
196static int zvol_dumpify(zvol_state_t *zv);
197static int zvol_dump_fini(zvol_state_t *zv);
198static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
199
200static void zvol_geom_run(zvol_state_t *zv);
201static void zvol_geom_destroy(zvol_state_t *zv);
202static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
203static void zvol_geom_start(struct bio *bp);
204static void zvol_geom_worker(void *arg);
205
206static void
207zvol_size_changed(zvol_state_t *zv)
208{
209#ifdef sun
210	dev_t dev = makedevice(maj, min);
211
212	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
213	    "Size", volsize) == DDI_SUCCESS);
214	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
215	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
216
217	/* Notify specfs to invalidate the cached size */
218	spec_size_invalidate(dev, VBLK);
219	spec_size_invalidate(dev, VCHR);
220#else	/* !sun */
221	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
222		struct g_provider *pp;
223
224		pp = zv->zv_provider;
225		if (pp == NULL)
226			return;
227		g_topology_lock();
228		g_resize_provider(pp, zv->zv_volsize);
229		g_topology_unlock();
230	}
231#endif	/* !sun */
232}
233
234int
235zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
236{
237	if (volsize == 0)
238		return (SET_ERROR(EINVAL));
239
240	if (volsize % blocksize != 0)
241		return (SET_ERROR(EINVAL));
242
243#ifdef _ILP32
244	if (volsize - 1 > SPEC_MAXOFFSET_T)
245		return (SET_ERROR(EOVERFLOW));
246#endif
247	return (0);
248}
249
250int
251zvol_check_volblocksize(uint64_t volblocksize)
252{
253	if (volblocksize < SPA_MINBLOCKSIZE ||
254	    volblocksize > SPA_MAXBLOCKSIZE ||
255	    !ISP2(volblocksize))
256		return (SET_ERROR(EDOM));
257
258	return (0);
259}
260
261int
262zvol_get_stats(objset_t *os, nvlist_t *nv)
263{
264	int error;
265	dmu_object_info_t doi;
266	uint64_t val;
267
268	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
269	if (error)
270		return (error);
271
272	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
273
274	error = dmu_object_info(os, ZVOL_OBJ, &doi);
275
276	if (error == 0) {
277		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
278		    doi.doi_data_block_size);
279	}
280
281	return (error);
282}
283
284static zvol_state_t *
285zvol_minor_lookup(const char *name)
286{
287	zvol_state_t *zv;
288
289	ASSERT(MUTEX_HELD(&spa_namespace_lock));
290
291	LIST_FOREACH(zv, &all_zvols, zv_links) {
292		if (strcmp(zv->zv_name, name) == 0)
293			break;
294	}
295
296	return (zv);
297}
298
299/* extent mapping arg */
300struct maparg {
301	zvol_state_t	*ma_zv;
302	uint64_t	ma_blks;
303};
304
305/*ARGSUSED*/
306static int
307zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
308    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
309{
310	struct maparg *ma = arg;
311	zvol_extent_t *ze;
312	int bs = ma->ma_zv->zv_volblocksize;
313
314	if (BP_IS_HOLE(bp) ||
315	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
316		return (0);
317
318	VERIFY(!BP_IS_EMBEDDED(bp));
319
320	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
321	ma->ma_blks++;
322
323	/* Abort immediately if we have encountered gang blocks */
324	if (BP_IS_GANG(bp))
325		return (SET_ERROR(EFRAGS));
326
327	/*
328	 * See if the block is at the end of the previous extent.
329	 */
330	ze = list_tail(&ma->ma_zv->zv_extents);
331	if (ze &&
332	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
333	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
334	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
335		ze->ze_nblks++;
336		return (0);
337	}
338
339	dprintf_bp(bp, "%s", "next blkptr:");
340
341	/* start a new extent */
342	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
343	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
344	ze->ze_nblks = 1;
345	list_insert_tail(&ma->ma_zv->zv_extents, ze);
346	return (0);
347}
348
349static void
350zvol_free_extents(zvol_state_t *zv)
351{
352	zvol_extent_t *ze;
353
354	while (ze = list_head(&zv->zv_extents)) {
355		list_remove(&zv->zv_extents, ze);
356		kmem_free(ze, sizeof (zvol_extent_t));
357	}
358}
359
360static int
361zvol_get_lbas(zvol_state_t *zv)
362{
363	objset_t *os = zv->zv_objset;
364	struct maparg	ma;
365	int		err;
366
367	ma.ma_zv = zv;
368	ma.ma_blks = 0;
369	zvol_free_extents(zv);
370
371	/* commit any in-flight changes before traversing the dataset */
372	txg_wait_synced(dmu_objset_pool(os), 0);
373	err = traverse_dataset(dmu_objset_ds(os), 0,
374	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
375	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
376		zvol_free_extents(zv);
377		return (err ? err : EIO);
378	}
379
380	return (0);
381}
382
383/* ARGSUSED */
384void
385zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
386{
387	zfs_creat_t *zct = arg;
388	nvlist_t *nvprops = zct->zct_props;
389	int error;
390	uint64_t volblocksize, volsize;
391
392	VERIFY(nvlist_lookup_uint64(nvprops,
393	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
394	if (nvlist_lookup_uint64(nvprops,
395	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
396		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
397
398	/*
399	 * These properties must be removed from the list so the generic
400	 * property setting step won't apply to them.
401	 */
402	VERIFY(nvlist_remove_all(nvprops,
403	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
404	(void) nvlist_remove_all(nvprops,
405	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
406
407	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
408	    DMU_OT_NONE, 0, tx);
409	ASSERT(error == 0);
410
411	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
412	    DMU_OT_NONE, 0, tx);
413	ASSERT(error == 0);
414
415	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
416	ASSERT(error == 0);
417}
418
419/*
420 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
421 * implement DKIOCFREE/free-long-range.
422 */
423static int
424zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
425{
426	uint64_t offset, length;
427
428	if (byteswap)
429		byteswap_uint64_array(lr, sizeof (*lr));
430
431	offset = lr->lr_offset;
432	length = lr->lr_length;
433
434	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
435}
436
437/*
438 * Replay a TX_WRITE ZIL transaction that didn't get committed
439 * after a system failure
440 */
441static int
442zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
443{
444	objset_t *os = zv->zv_objset;
445	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
446	uint64_t offset, length;
447	dmu_tx_t *tx;
448	int error;
449
450	if (byteswap)
451		byteswap_uint64_array(lr, sizeof (*lr));
452
453	offset = lr->lr_offset;
454	length = lr->lr_length;
455
456	/* If it's a dmu_sync() block, write the whole block */
457	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
458		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
459		if (length < blocksize) {
460			offset -= offset % blocksize;
461			length = blocksize;
462		}
463	}
464
465	tx = dmu_tx_create(os);
466	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
467	error = dmu_tx_assign(tx, TXG_WAIT);
468	if (error) {
469		dmu_tx_abort(tx);
470	} else {
471		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
472		dmu_tx_commit(tx);
473	}
474
475	return (error);
476}
477
478/* ARGSUSED */
479static int
480zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
481{
482	return (SET_ERROR(ENOTSUP));
483}
484
485/*
486 * Callback vectors for replaying records.
487 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
488 */
489zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
490	zvol_replay_err,	/* 0 no such transaction type */
491	zvol_replay_err,	/* TX_CREATE */
492	zvol_replay_err,	/* TX_MKDIR */
493	zvol_replay_err,	/* TX_MKXATTR */
494	zvol_replay_err,	/* TX_SYMLINK */
495	zvol_replay_err,	/* TX_REMOVE */
496	zvol_replay_err,	/* TX_RMDIR */
497	zvol_replay_err,	/* TX_LINK */
498	zvol_replay_err,	/* TX_RENAME */
499	zvol_replay_write,	/* TX_WRITE */
500	zvol_replay_truncate,	/* TX_TRUNCATE */
501	zvol_replay_err,	/* TX_SETATTR */
502	zvol_replay_err,	/* TX_ACL */
503	zvol_replay_err,	/* TX_CREATE_ACL */
504	zvol_replay_err,	/* TX_CREATE_ATTR */
505	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
506	zvol_replay_err,	/* TX_MKDIR_ACL */
507	zvol_replay_err,	/* TX_MKDIR_ATTR */
508	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
509	zvol_replay_err,	/* TX_WRITE2 */
510};
511
512#ifdef sun
513int
514zvol_name2minor(const char *name, minor_t *minor)
515{
516	zvol_state_t *zv;
517
518	mutex_enter(&spa_namespace_lock);
519	zv = zvol_minor_lookup(name);
520	if (minor && zv)
521		*minor = zv->zv_minor;
522	mutex_exit(&spa_namespace_lock);
523	return (zv ? 0 : -1);
524}
525#endif	/* sun */
526
527/*
528 * Create a minor node (plus a whole lot more) for the specified volume.
529 */
530int
531zvol_create_minor(const char *name)
532{
533	zfs_soft_state_t *zs;
534	zvol_state_t *zv;
535	objset_t *os;
536	struct cdev *dev;
537	struct g_provider *pp;
538	struct g_geom *gp;
539	dmu_object_info_t doi;
540	uint64_t volsize, mode;
541	int error;
542
543	ZFS_LOG(1, "Creating ZVOL %s...", name);
544
545	mutex_enter(&spa_namespace_lock);
546
547	if (zvol_minor_lookup(name) != NULL) {
548		mutex_exit(&spa_namespace_lock);
549		return (SET_ERROR(EEXIST));
550	}
551
552	/* lie and say we're read-only */
553	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
554
555	if (error) {
556		mutex_exit(&spa_namespace_lock);
557		return (error);
558	}
559
560#ifdef sun
561	if ((minor = zfsdev_minor_alloc()) == 0) {
562		dmu_objset_disown(os, FTAG);
563		mutex_exit(&spa_namespace_lock);
564		return (SET_ERROR(ENXIO));
565	}
566
567	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
568		dmu_objset_disown(os, FTAG);
569		mutex_exit(&spa_namespace_lock);
570		return (SET_ERROR(EAGAIN));
571	}
572	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
573	    (char *)name);
574
575	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
576
577	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
578	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
579		ddi_soft_state_free(zfsdev_state, minor);
580		dmu_objset_disown(os, FTAG);
581		mutex_exit(&spa_namespace_lock);
582		return (SET_ERROR(EAGAIN));
583	}
584
585	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
586
587	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
588	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
589		ddi_remove_minor_node(zfs_dip, chrbuf);
590		ddi_soft_state_free(zfsdev_state, minor);
591		dmu_objset_disown(os, FTAG);
592		mutex_exit(&spa_namespace_lock);
593		return (SET_ERROR(EAGAIN));
594	}
595
596	zs = ddi_get_soft_state(zfsdev_state, minor);
597	zs->zss_type = ZSST_ZVOL;
598	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
599#else	/* !sun */
600
601	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
602	zv->zv_state = 0;
603	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
604	if (error) {
605		kmem_free(zv, sizeof(*zv));
606		dmu_objset_disown(os, zvol_tag);
607		mutex_exit(&spa_namespace_lock);
608		return (error);
609	}
610	error = dsl_prop_get_integer(name,
611	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
612	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
613		mode = volmode;
614
615	DROP_GIANT();
616	zv->zv_volsize = volsize;
617	zv->zv_volmode = mode;
618	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
619		g_topology_lock();
620		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
621		gp->start = zvol_geom_start;
622		gp->access = zvol_geom_access;
623		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
624		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
625		pp->sectorsize = DEV_BSIZE;
626		pp->mediasize = zv->zv_volsize;
627		pp->private = zv;
628
629		zv->zv_provider = pp;
630		bioq_init(&zv->zv_queue);
631		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
632	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
633		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
634		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
635		    0640, "%s/%s", ZVOL_DRIVER, name) != 0) {
636			kmem_free(zv, sizeof(*zv));
637			dmu_objset_disown(os, FTAG);
638			mutex_exit(&spa_namespace_lock);
639			return (SET_ERROR(ENXIO));
640		}
641		zv->zv_dev = dev;
642		dev->si_iosize_max = MAXPHYS;
643		dev->si_drv2 = zv;
644	}
645	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
646#endif	/* !sun */
647
648	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
649	zv->zv_min_bs = DEV_BSHIFT;
650	zv->zv_objset = os;
651	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
652		zv->zv_flags |= ZVOL_RDONLY;
653	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
654	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
655	    sizeof (rl_t), offsetof(rl_t, r_node));
656	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
657	    offsetof(zvol_extent_t, ze_node));
658	/* get and cache the blocksize */
659	error = dmu_object_info(os, ZVOL_OBJ, &doi);
660	ASSERT(error == 0);
661	zv->zv_volblocksize = doi.doi_data_block_size;
662
663	if (spa_writeable(dmu_objset_spa(os))) {
664		if (zil_replay_disable)
665			zil_destroy(dmu_objset_zil(os), B_FALSE);
666		else
667			zil_replay(os, zv, zvol_replay_vector);
668	}
669	dmu_objset_disown(os, FTAG);
670	zv->zv_objset = NULL;
671
672	zvol_minors++;
673
674	mutex_exit(&spa_namespace_lock);
675
676#ifndef sun
677	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
678		zvol_geom_run(zv);
679		g_topology_unlock();
680	}
681	PICKUP_GIANT();
682#endif
683
684	ZFS_LOG(1, "ZVOL %s created.", name);
685
686	return (0);
687}
688
689/*
690 * Remove minor node for the specified volume.
691 */
692static int
693zvol_remove_zv(zvol_state_t *zv)
694{
695#ifdef sun
696	minor_t minor = zv->zv_minor;
697#endif
698
699	ASSERT(MUTEX_HELD(&spa_namespace_lock));
700	if (zv->zv_total_opens != 0)
701		return (SET_ERROR(EBUSY));
702
703	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
704
705#ifdef sun
706	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
707	ddi_remove_minor_node(zfs_dip, nmbuf);
708#else
709	LIST_REMOVE(zv, zv_links);
710	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
711		g_topology_lock();
712		zvol_geom_destroy(zv);
713		g_topology_unlock();
714	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV)
715		destroy_dev(zv->zv_dev);
716#endif	/* sun */
717
718	avl_destroy(&zv->zv_znode.z_range_avl);
719	mutex_destroy(&zv->zv_znode.z_range_lock);
720
721	kmem_free(zv, sizeof(*zv));
722
723	zvol_minors--;
724	return (0);
725}
726
727int
728zvol_remove_minor(const char *name)
729{
730	zvol_state_t *zv;
731	int rc;
732
733	mutex_enter(&spa_namespace_lock);
734	if ((zv = zvol_minor_lookup(name)) == NULL) {
735		mutex_exit(&spa_namespace_lock);
736		return (SET_ERROR(ENXIO));
737	}
738	rc = zvol_remove_zv(zv);
739	mutex_exit(&spa_namespace_lock);
740	return (rc);
741}
742
743int
744zvol_first_open(zvol_state_t *zv)
745{
746	objset_t *os;
747	uint64_t volsize;
748	int error;
749	uint64_t readonly;
750
751	/* lie and say we're read-only */
752	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
753	    zvol_tag, &os);
754	if (error)
755		return (error);
756
757	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
758	if (error) {
759		ASSERT(error == 0);
760		dmu_objset_disown(os, zvol_tag);
761		return (error);
762	}
763	zv->zv_objset = os;
764	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
765	if (error) {
766		dmu_objset_disown(os, zvol_tag);
767		return (error);
768	}
769	zv->zv_volsize = volsize;
770	zv->zv_zilog = zil_open(os, zvol_get_data);
771	zvol_size_changed(zv);
772
773	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
774	    NULL) == 0);
775	if (readonly || dmu_objset_is_snapshot(os) ||
776	    !spa_writeable(dmu_objset_spa(os)))
777		zv->zv_flags |= ZVOL_RDONLY;
778	else
779		zv->zv_flags &= ~ZVOL_RDONLY;
780	return (error);
781}
782
783void
784zvol_last_close(zvol_state_t *zv)
785{
786	zil_close(zv->zv_zilog);
787	zv->zv_zilog = NULL;
788
789	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
790	zv->zv_dbuf = NULL;
791
792	/*
793	 * Evict cached data
794	 */
795	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
796	    !(zv->zv_flags & ZVOL_RDONLY))
797		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
798	dmu_objset_evict_dbufs(zv->zv_objset);
799
800	dmu_objset_disown(zv->zv_objset, zvol_tag);
801	zv->zv_objset = NULL;
802}
803
804#ifdef sun
805int
806zvol_prealloc(zvol_state_t *zv)
807{
808	objset_t *os = zv->zv_objset;
809	dmu_tx_t *tx;
810	uint64_t refd, avail, usedobjs, availobjs;
811	uint64_t resid = zv->zv_volsize;
812	uint64_t off = 0;
813
814	/* Check the space usage before attempting to allocate the space */
815	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
816	if (avail < zv->zv_volsize)
817		return (SET_ERROR(ENOSPC));
818
819	/* Free old extents if they exist */
820	zvol_free_extents(zv);
821
822	while (resid != 0) {
823		int error;
824		uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
825
826		tx = dmu_tx_create(os);
827		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
828		error = dmu_tx_assign(tx, TXG_WAIT);
829		if (error) {
830			dmu_tx_abort(tx);
831			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
832			return (error);
833		}
834		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
835		dmu_tx_commit(tx);
836		off += bytes;
837		resid -= bytes;
838	}
839	txg_wait_synced(dmu_objset_pool(os), 0);
840
841	return (0);
842}
843#endif	/* sun */
844
845static int
846zvol_update_volsize(objset_t *os, uint64_t volsize)
847{
848	dmu_tx_t *tx;
849	int error;
850
851	ASSERT(MUTEX_HELD(&spa_namespace_lock));
852
853	tx = dmu_tx_create(os);
854	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
855	dmu_tx_mark_netfree(tx);
856	error = dmu_tx_assign(tx, TXG_WAIT);
857	if (error) {
858		dmu_tx_abort(tx);
859		return (error);
860	}
861
862	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
863	    &volsize, tx);
864	dmu_tx_commit(tx);
865
866	if (error == 0)
867		error = dmu_free_long_range(os,
868		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
869	return (error);
870}
871
872void
873zvol_remove_minors(const char *name)
874{
875	zvol_state_t *zv, *tzv;
876	size_t namelen;
877
878	namelen = strlen(name);
879
880	DROP_GIANT();
881	mutex_enter(&spa_namespace_lock);
882
883	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
884		if (strcmp(zv->zv_name, name) == 0 ||
885		    (strncmp(zv->zv_name, name, namelen) == 0 &&
886		     zv->zv_name[namelen] == '/')) {
887			(void) zvol_remove_zv(zv);
888		}
889	}
890
891	mutex_exit(&spa_namespace_lock);
892	PICKUP_GIANT();
893}
894
895int
896zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
897{
898	zvol_state_t *zv = NULL;
899	objset_t *os;
900	int error;
901	dmu_object_info_t doi;
902	uint64_t old_volsize = 0ULL;
903	uint64_t readonly;
904
905	mutex_enter(&spa_namespace_lock);
906	zv = zvol_minor_lookup(name);
907	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
908		mutex_exit(&spa_namespace_lock);
909		return (error);
910	}
911
912	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
913	    (error = zvol_check_volsize(volsize,
914	    doi.doi_data_block_size)) != 0)
915		goto out;
916
917	VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
918	    NULL) == 0);
919	if (readonly) {
920		error = EROFS;
921		goto out;
922	}
923
924	error = zvol_update_volsize(os, volsize);
925	/*
926	 * Reinitialize the dump area to the new size. If we
927	 * failed to resize the dump area then restore it back to
928	 * its original size.
929	 */
930	if (zv && error == 0) {
931#ifdef ZVOL_DUMP
932		if (zv->zv_flags & ZVOL_DUMPIFIED) {
933			old_volsize = zv->zv_volsize;
934			zv->zv_volsize = volsize;
935			if ((error = zvol_dumpify(zv)) != 0 ||
936			    (error = dumpvp_resize()) != 0) {
937				(void) zvol_update_volsize(os, old_volsize);
938				zv->zv_volsize = old_volsize;
939				error = zvol_dumpify(zv);
940			}
941		}
942#endif	/* ZVOL_DUMP */
943		if (error == 0) {
944			zv->zv_volsize = volsize;
945			zvol_size_changed(zv);
946		}
947	}
948
949#ifdef sun
950	/*
951	 * Generate a LUN expansion event.
952	 */
953	if (zv && error == 0) {
954		sysevent_id_t eid;
955		nvlist_t *attr;
956		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
957
958		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
959		    zv->zv_minor);
960
961		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
962		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
963
964		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
965		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
966
967		nvlist_free(attr);
968		kmem_free(physpath, MAXPATHLEN);
969	}
970#endif	/* sun */
971
972out:
973	dmu_objset_rele(os, FTAG);
974
975	mutex_exit(&spa_namespace_lock);
976
977	return (error);
978}
979
980/*ARGSUSED*/
981static int
982zvol_open(struct g_provider *pp, int flag, int count)
983{
984	zvol_state_t *zv;
985	int err = 0;
986	boolean_t locked = B_FALSE;
987
988	/*
989	 * Protect against recursively entering spa_namespace_lock
990	 * when spa_open() is used for a pool on a (local) ZVOL(s).
991	 * This is needed since we replaced upstream zfsdev_state_lock
992	 * with spa_namespace_lock in the ZVOL code.
993	 * We are using the same trick as spa_open().
994	 * Note that calls in zvol_first_open which need to resolve
995	 * pool name to a spa object will enter spa_open()
996	 * recursively, but that function already has all the
997	 * necessary protection.
998	 */
999	if (!MUTEX_HELD(&spa_namespace_lock)) {
1000		mutex_enter(&spa_namespace_lock);
1001		locked = B_TRUE;
1002	}
1003
1004	zv = pp->private;
1005	if (zv == NULL) {
1006		if (locked)
1007			mutex_exit(&spa_namespace_lock);
1008		return (SET_ERROR(ENXIO));
1009	}
1010
1011	if (zv->zv_total_opens == 0) {
1012		err = zvol_first_open(zv);
1013		if (err) {
1014			if (locked)
1015				mutex_exit(&spa_namespace_lock);
1016			return (err);
1017		}
1018		pp->mediasize = zv->zv_volsize;
1019		pp->stripeoffset = 0;
1020		pp->stripesize = zv->zv_volblocksize;
1021	}
1022	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1023		err = SET_ERROR(EROFS);
1024		goto out;
1025	}
1026	if (zv->zv_flags & ZVOL_EXCL) {
1027		err = SET_ERROR(EBUSY);
1028		goto out;
1029	}
1030#ifdef FEXCL
1031	if (flag & FEXCL) {
1032		if (zv->zv_total_opens != 0) {
1033			err = SET_ERROR(EBUSY);
1034			goto out;
1035		}
1036		zv->zv_flags |= ZVOL_EXCL;
1037	}
1038#endif
1039
1040	zv->zv_total_opens += count;
1041	if (locked)
1042		mutex_exit(&spa_namespace_lock);
1043
1044	return (err);
1045out:
1046	if (zv->zv_total_opens == 0)
1047		zvol_last_close(zv);
1048	if (locked)
1049		mutex_exit(&spa_namespace_lock);
1050	return (err);
1051}
1052
1053/*ARGSUSED*/
1054static int
1055zvol_close(struct g_provider *pp, int flag, int count)
1056{
1057	zvol_state_t *zv;
1058	int error = 0;
1059	boolean_t locked = B_FALSE;
1060
1061	/* See comment in zvol_open(). */
1062	if (!MUTEX_HELD(&spa_namespace_lock)) {
1063		mutex_enter(&spa_namespace_lock);
1064		locked = B_TRUE;
1065	}
1066
1067	zv = pp->private;
1068	if (zv == NULL) {
1069		if (locked)
1070			mutex_exit(&spa_namespace_lock);
1071		return (SET_ERROR(ENXIO));
1072	}
1073
1074	if (zv->zv_flags & ZVOL_EXCL) {
1075		ASSERT(zv->zv_total_opens == 1);
1076		zv->zv_flags &= ~ZVOL_EXCL;
1077	}
1078
1079	/*
1080	 * If the open count is zero, this is a spurious close.
1081	 * That indicates a bug in the kernel / DDI framework.
1082	 */
1083	ASSERT(zv->zv_total_opens != 0);
1084
1085	/*
1086	 * You may get multiple opens, but only one close.
1087	 */
1088	zv->zv_total_opens -= count;
1089
1090	if (zv->zv_total_opens == 0)
1091		zvol_last_close(zv);
1092
1093	if (locked)
1094		mutex_exit(&spa_namespace_lock);
1095	return (error);
1096}
1097
1098static void
1099zvol_get_done(zgd_t *zgd, int error)
1100{
1101	if (zgd->zgd_db)
1102		dmu_buf_rele(zgd->zgd_db, zgd);
1103
1104	zfs_range_unlock(zgd->zgd_rl);
1105
1106	if (error == 0 && zgd->zgd_bp)
1107		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1108
1109	kmem_free(zgd, sizeof (zgd_t));
1110}
1111
1112/*
1113 * Get data to generate a TX_WRITE intent log record.
1114 */
1115static int
1116zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1117{
1118	zvol_state_t *zv = arg;
1119	objset_t *os = zv->zv_objset;
1120	uint64_t object = ZVOL_OBJ;
1121	uint64_t offset = lr->lr_offset;
1122	uint64_t size = lr->lr_length;	/* length of user data */
1123	blkptr_t *bp = &lr->lr_blkptr;
1124	dmu_buf_t *db;
1125	zgd_t *zgd;
1126	int error;
1127
1128	ASSERT(zio != NULL);
1129	ASSERT(size != 0);
1130
1131	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1132	zgd->zgd_zilog = zv->zv_zilog;
1133	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1134
1135	/*
1136	 * Write records come in two flavors: immediate and indirect.
1137	 * For small writes it's cheaper to store the data with the
1138	 * log record (immediate); for large writes it's cheaper to
1139	 * sync the data and get a pointer to it (indirect) so that
1140	 * we don't have to write the data twice.
1141	 */
1142	if (buf != NULL) {	/* immediate write */
1143		error = dmu_read(os, object, offset, size, buf,
1144		    DMU_READ_NO_PREFETCH);
1145	} else {
1146		size = zv->zv_volblocksize;
1147		offset = P2ALIGN(offset, size);
1148		error = dmu_buf_hold(os, object, offset, zgd, &db,
1149		    DMU_READ_NO_PREFETCH);
1150		if (error == 0) {
1151			blkptr_t *obp = dmu_buf_get_blkptr(db);
1152			if (obp) {
1153				ASSERT(BP_IS_HOLE(bp));
1154				*bp = *obp;
1155			}
1156
1157			zgd->zgd_db = db;
1158			zgd->zgd_bp = bp;
1159
1160			ASSERT(db->db_offset == offset);
1161			ASSERT(db->db_size == size);
1162
1163			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1164			    zvol_get_done, zgd);
1165
1166			if (error == 0)
1167				return (0);
1168		}
1169	}
1170
1171	zvol_get_done(zgd, error);
1172
1173	return (error);
1174}
1175
1176/*
1177 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1178 *
1179 * We store data in the log buffers if it's small enough.
1180 * Otherwise we will later flush the data out via dmu_sync().
1181 */
1182ssize_t zvol_immediate_write_sz = 32768;
1183
1184static void
1185zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1186    boolean_t sync)
1187{
1188	uint32_t blocksize = zv->zv_volblocksize;
1189	zilog_t *zilog = zv->zv_zilog;
1190	boolean_t slogging;
1191	ssize_t immediate_write_sz;
1192
1193	if (zil_replaying(zilog, tx))
1194		return;
1195
1196	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1197	    ? 0 : zvol_immediate_write_sz;
1198
1199	slogging = spa_has_slogs(zilog->zl_spa) &&
1200	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1201
1202	while (resid) {
1203		itx_t *itx;
1204		lr_write_t *lr;
1205		ssize_t len;
1206		itx_wr_state_t write_state;
1207
1208		/*
1209		 * Unlike zfs_log_write() we can be called with
1210		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1211		 */
1212		if (blocksize > immediate_write_sz && !slogging &&
1213		    resid >= blocksize && off % blocksize == 0) {
1214			write_state = WR_INDIRECT; /* uses dmu_sync */
1215			len = blocksize;
1216		} else if (sync) {
1217			write_state = WR_COPIED;
1218			len = MIN(ZIL_MAX_LOG_DATA, resid);
1219		} else {
1220			write_state = WR_NEED_COPY;
1221			len = MIN(ZIL_MAX_LOG_DATA, resid);
1222		}
1223
1224		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1225		    (write_state == WR_COPIED ? len : 0));
1226		lr = (lr_write_t *)&itx->itx_lr;
1227		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1228		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1229			zil_itx_destroy(itx);
1230			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1231			lr = (lr_write_t *)&itx->itx_lr;
1232			write_state = WR_NEED_COPY;
1233		}
1234
1235		itx->itx_wr_state = write_state;
1236		if (write_state == WR_NEED_COPY)
1237			itx->itx_sod += len;
1238		lr->lr_foid = ZVOL_OBJ;
1239		lr->lr_offset = off;
1240		lr->lr_length = len;
1241		lr->lr_blkoff = 0;
1242		BP_ZERO(&lr->lr_blkptr);
1243
1244		itx->itx_private = zv;
1245		itx->itx_sync = sync;
1246
1247		zil_itx_assign(zilog, itx, tx);
1248
1249		off += len;
1250		resid -= len;
1251	}
1252}
1253
1254#ifdef sun
1255static int
1256zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1257    uint64_t size, boolean_t doread, boolean_t isdump)
1258{
1259	vdev_disk_t *dvd;
1260	int c;
1261	int numerrors = 0;
1262
1263	if (vd->vdev_ops == &vdev_mirror_ops ||
1264	    vd->vdev_ops == &vdev_replacing_ops ||
1265	    vd->vdev_ops == &vdev_spare_ops) {
1266		for (c = 0; c < vd->vdev_children; c++) {
1267			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1268			    addr, offset, origoffset, size, doread, isdump);
1269			if (err != 0) {
1270				numerrors++;
1271			} else if (doread) {
1272				break;
1273			}
1274		}
1275	}
1276
1277	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1278		return (numerrors < vd->vdev_children ? 0 : EIO);
1279
1280	if (doread && !vdev_readable(vd))
1281		return (SET_ERROR(EIO));
1282	else if (!doread && !vdev_writeable(vd))
1283		return (SET_ERROR(EIO));
1284
1285	if (vd->vdev_ops == &vdev_raidz_ops) {
1286		return (vdev_raidz_physio(vd,
1287		    addr, size, offset, origoffset, doread, isdump));
1288	}
1289
1290	offset += VDEV_LABEL_START_SIZE;
1291
1292	if (ddi_in_panic() || isdump) {
1293		ASSERT(!doread);
1294		if (doread)
1295			return (SET_ERROR(EIO));
1296		dvd = vd->vdev_tsd;
1297		ASSERT3P(dvd, !=, NULL);
1298		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1299		    lbtodb(size)));
1300	} else {
1301		dvd = vd->vdev_tsd;
1302		ASSERT3P(dvd, !=, NULL);
1303		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1304		    offset, doread ? B_READ : B_WRITE));
1305	}
1306}
1307
1308static int
1309zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1310    boolean_t doread, boolean_t isdump)
1311{
1312	vdev_t *vd;
1313	int error;
1314	zvol_extent_t *ze;
1315	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1316
1317	/* Must be sector aligned, and not stradle a block boundary. */
1318	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1319	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1320		return (SET_ERROR(EINVAL));
1321	}
1322	ASSERT(size <= zv->zv_volblocksize);
1323
1324	/* Locate the extent this belongs to */
1325	ze = list_head(&zv->zv_extents);
1326	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1327		offset -= ze->ze_nblks * zv->zv_volblocksize;
1328		ze = list_next(&zv->zv_extents, ze);
1329	}
1330
1331	if (ze == NULL)
1332		return (SET_ERROR(EINVAL));
1333
1334	if (!ddi_in_panic())
1335		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1336
1337	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1338	offset += DVA_GET_OFFSET(&ze->ze_dva);
1339	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1340	    size, doread, isdump);
1341
1342	if (!ddi_in_panic())
1343		spa_config_exit(spa, SCL_STATE, FTAG);
1344
1345	return (error);
1346}
1347#endif	/* sun */
1348
1349void
1350zvol_strategy(struct bio *bp)
1351{
1352	zvol_state_t *zv;
1353	uint64_t off, volsize;
1354	size_t resid;
1355	char *addr;
1356	objset_t *os;
1357	rl_t *rl;
1358	int error = 0;
1359	boolean_t doread = 0;
1360	boolean_t is_dumpified;
1361	boolean_t sync;
1362
1363	if (bp->bio_to)
1364		zv = bp->bio_to->private;
1365	else
1366		zv = bp->bio_dev->si_drv2;
1367
1368	if (zv == NULL) {
1369		error = ENXIO;
1370		goto out;
1371	}
1372
1373	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1374		error = EROFS;
1375		goto out;
1376	}
1377
1378	switch (bp->bio_cmd) {
1379	case BIO_FLUSH:
1380		goto sync;
1381	case BIO_READ:
1382		doread = 1;
1383	case BIO_WRITE:
1384	case BIO_DELETE:
1385		break;
1386	default:
1387		error = EOPNOTSUPP;
1388		goto out;
1389	}
1390
1391	off = bp->bio_offset;
1392	volsize = zv->zv_volsize;
1393
1394	os = zv->zv_objset;
1395	ASSERT(os != NULL);
1396
1397	addr = bp->bio_data;
1398	resid = bp->bio_length;
1399
1400	if (resid > 0 && (off < 0 || off >= volsize)) {
1401		error = EIO;
1402		goto out;
1403	}
1404
1405#ifdef illumos
1406	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1407#else
1408	is_dumpified = B_FALSE;
1409#endif
1410        sync = !doread && !is_dumpified &&
1411	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1412
1413	/*
1414	 * There must be no buffer changes when doing a dmu_sync() because
1415	 * we can't change the data whilst calculating the checksum.
1416	 */
1417	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1418	    doread ? RL_READER : RL_WRITER);
1419
1420	if (bp->bio_cmd == BIO_DELETE) {
1421		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1422		error = dmu_tx_assign(tx, TXG_WAIT);
1423		if (error != 0) {
1424			dmu_tx_abort(tx);
1425		} else {
1426			zvol_log_truncate(zv, tx, off, resid, B_TRUE);
1427			dmu_tx_commit(tx);
1428			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1429			    off, resid);
1430			resid = 0;
1431		}
1432		goto unlock;
1433	}
1434
1435	while (resid != 0 && off < volsize) {
1436		size_t size = MIN(resid, zvol_maxphys);
1437#ifdef illumos
1438		if (is_dumpified) {
1439			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1440			error = zvol_dumpio(zv, addr, off, size,
1441			    doread, B_FALSE);
1442		} else if (doread) {
1443#else
1444		if (doread) {
1445#endif
1446			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1447			    DMU_READ_PREFETCH);
1448		} else {
1449			dmu_tx_t *tx = dmu_tx_create(os);
1450			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1451			error = dmu_tx_assign(tx, TXG_WAIT);
1452			if (error) {
1453				dmu_tx_abort(tx);
1454			} else {
1455				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1456				zvol_log_write(zv, tx, off, size, sync);
1457				dmu_tx_commit(tx);
1458			}
1459		}
1460		if (error) {
1461			/* convert checksum errors into IO errors */
1462			if (error == ECKSUM)
1463				error = SET_ERROR(EIO);
1464			break;
1465		}
1466		off += size;
1467		addr += size;
1468		resid -= size;
1469	}
1470unlock:
1471	zfs_range_unlock(rl);
1472
1473	bp->bio_completed = bp->bio_length - resid;
1474	if (bp->bio_completed < bp->bio_length && off > volsize)
1475		error = EINVAL;
1476
1477	if (sync) {
1478sync:
1479		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1480	}
1481out:
1482	if (bp->bio_to)
1483		g_io_deliver(bp, error);
1484	else
1485		biofinish(bp, NULL, error);
1486}
1487
1488#ifdef sun
1489/*
1490 * Set the buffer count to the zvol maximum transfer.
1491 * Using our own routine instead of the default minphys()
1492 * means that for larger writes we write bigger buffers on X86
1493 * (128K instead of 56K) and flush the disk write cache less often
1494 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1495 * 56K on X86 and 128K on sparc).
1496 */
1497void
1498zvol_minphys(struct buf *bp)
1499{
1500	if (bp->b_bcount > zvol_maxphys)
1501		bp->b_bcount = zvol_maxphys;
1502}
1503
1504int
1505zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1506{
1507	minor_t minor = getminor(dev);
1508	zvol_state_t *zv;
1509	int error = 0;
1510	uint64_t size;
1511	uint64_t boff;
1512	uint64_t resid;
1513
1514	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1515	if (zv == NULL)
1516		return (SET_ERROR(ENXIO));
1517
1518	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1519		return (SET_ERROR(EINVAL));
1520
1521	boff = ldbtob(blkno);
1522	resid = ldbtob(nblocks);
1523
1524	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1525
1526	while (resid) {
1527		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1528		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1529		if (error)
1530			break;
1531		boff += size;
1532		addr += size;
1533		resid -= size;
1534	}
1535
1536	return (error);
1537}
1538
1539/*ARGSUSED*/
1540int
1541zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1542{
1543	minor_t minor = getminor(dev);
1544#else
1545int
1546zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1547{
1548#endif
1549	zvol_state_t *zv;
1550	uint64_t volsize;
1551	rl_t *rl;
1552	int error = 0;
1553
1554#ifdef sun
1555	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1556	if (zv == NULL)
1557		return (SET_ERROR(ENXIO));
1558#else
1559	zv = dev->si_drv2;
1560#endif
1561
1562	volsize = zv->zv_volsize;
1563	if (uio->uio_resid > 0 &&
1564	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1565		return (SET_ERROR(EIO));
1566
1567#ifdef illumos
1568	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1569		error = physio(zvol_strategy, NULL, dev, B_READ,
1570		    zvol_minphys, uio);
1571		return (error);
1572	}
1573#endif
1574
1575	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1576	    RL_READER);
1577	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1578		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1579
1580		/* don't read past the end */
1581		if (bytes > volsize - uio->uio_loffset)
1582			bytes = volsize - uio->uio_loffset;
1583
1584		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1585		if (error) {
1586			/* convert checksum errors into IO errors */
1587			if (error == ECKSUM)
1588				error = SET_ERROR(EIO);
1589			break;
1590		}
1591	}
1592	zfs_range_unlock(rl);
1593	return (error);
1594}
1595
1596#ifdef sun
1597/*ARGSUSED*/
1598int
1599zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1600{
1601	minor_t minor = getminor(dev);
1602#else
1603int
1604zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1605{
1606#endif
1607	zvol_state_t *zv;
1608	uint64_t volsize;
1609	rl_t *rl;
1610	int error = 0;
1611	boolean_t sync;
1612
1613#ifdef sun
1614	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1615	if (zv == NULL)
1616		return (SET_ERROR(ENXIO));
1617#else
1618	zv = dev->si_drv2;
1619#endif
1620
1621	volsize = zv->zv_volsize;
1622	if (uio->uio_resid > 0 &&
1623	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1624		return (SET_ERROR(EIO));
1625
1626#ifdef illumos
1627	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1628		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1629		    zvol_minphys, uio);
1630		return (error);
1631	}
1632#endif
1633
1634#ifdef sun
1635	sync = !(zv->zv_flags & ZVOL_WCE) ||
1636#else
1637	sync =
1638#endif
1639	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1640
1641	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1642	    RL_WRITER);
1643	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1644		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1645		uint64_t off = uio->uio_loffset;
1646		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1647
1648		if (bytes > volsize - off)	/* don't write past the end */
1649			bytes = volsize - off;
1650
1651		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1652		error = dmu_tx_assign(tx, TXG_WAIT);
1653		if (error) {
1654			dmu_tx_abort(tx);
1655			break;
1656		}
1657		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1658		if (error == 0)
1659			zvol_log_write(zv, tx, off, bytes, sync);
1660		dmu_tx_commit(tx);
1661
1662		if (error)
1663			break;
1664	}
1665	zfs_range_unlock(rl);
1666	if (sync)
1667		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1668	return (error);
1669}
1670
1671#ifdef sun
1672int
1673zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1674{
1675	struct uuid uuid = EFI_RESERVED;
1676	efi_gpe_t gpe = { 0 };
1677	uint32_t crc;
1678	dk_efi_t efi;
1679	int length;
1680	char *ptr;
1681
1682	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1683		return (SET_ERROR(EFAULT));
1684	ptr = (char *)(uintptr_t)efi.dki_data_64;
1685	length = efi.dki_length;
1686	/*
1687	 * Some clients may attempt to request a PMBR for the
1688	 * zvol.  Currently this interface will return EINVAL to
1689	 * such requests.  These requests could be supported by
1690	 * adding a check for lba == 0 and consing up an appropriate
1691	 * PMBR.
1692	 */
1693	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1694		return (SET_ERROR(EINVAL));
1695
1696	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1697	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1698	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1699
1700	if (efi.dki_lba == 1) {
1701		efi_gpt_t gpt = { 0 };
1702
1703		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1704		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1705		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1706		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1707		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1708		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1709		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1710		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1711		gpt.efi_gpt_SizeOfPartitionEntry =
1712		    LE_32(sizeof (efi_gpe_t));
1713		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1714		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1715		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1716		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1717		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1718		    flag))
1719			return (SET_ERROR(EFAULT));
1720		ptr += sizeof (gpt);
1721		length -= sizeof (gpt);
1722	}
1723	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1724	    length), flag))
1725		return (SET_ERROR(EFAULT));
1726	return (0);
1727}
1728
1729/*
1730 * BEGIN entry points to allow external callers access to the volume.
1731 */
1732/*
1733 * Return the volume parameters needed for access from an external caller.
1734 * These values are invariant as long as the volume is held open.
1735 */
1736int
1737zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1738    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1739    void **rl_hdl, void **bonus_hdl)
1740{
1741	zvol_state_t *zv;
1742
1743	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1744	if (zv == NULL)
1745		return (SET_ERROR(ENXIO));
1746	if (zv->zv_flags & ZVOL_DUMPIFIED)
1747		return (SET_ERROR(ENXIO));
1748
1749	ASSERT(blksize && max_xfer_len && minor_hdl &&
1750	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1751
1752	*blksize = zv->zv_volblocksize;
1753	*max_xfer_len = (uint64_t)zvol_maxphys;
1754	*minor_hdl = zv;
1755	*objset_hdl = zv->zv_objset;
1756	*zil_hdl = zv->zv_zilog;
1757	*rl_hdl = &zv->zv_znode;
1758	*bonus_hdl = zv->zv_dbuf;
1759	return (0);
1760}
1761
1762/*
1763 * Return the current volume size to an external caller.
1764 * The size can change while the volume is open.
1765 */
1766uint64_t
1767zvol_get_volume_size(void *minor_hdl)
1768{
1769	zvol_state_t *zv = minor_hdl;
1770
1771	return (zv->zv_volsize);
1772}
1773
1774/*
1775 * Return the current WCE setting to an external caller.
1776 * The WCE setting can change while the volume is open.
1777 */
1778int
1779zvol_get_volume_wce(void *minor_hdl)
1780{
1781	zvol_state_t *zv = minor_hdl;
1782
1783	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1784}
1785
1786/*
1787 * Entry point for external callers to zvol_log_write
1788 */
1789void
1790zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1791    boolean_t sync)
1792{
1793	zvol_state_t *zv = minor_hdl;
1794
1795	zvol_log_write(zv, tx, off, resid, sync);
1796}
1797/*
1798 * END entry points to allow external callers access to the volume.
1799 */
1800#endif	/* sun */
1801
1802/*
1803 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1804 */
1805static void
1806zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1807    boolean_t sync)
1808{
1809	itx_t *itx;
1810	lr_truncate_t *lr;
1811	zilog_t *zilog = zv->zv_zilog;
1812
1813	if (zil_replaying(zilog, tx))
1814		return;
1815
1816	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1817	lr = (lr_truncate_t *)&itx->itx_lr;
1818	lr->lr_foid = ZVOL_OBJ;
1819	lr->lr_offset = off;
1820	lr->lr_length = len;
1821
1822	itx->itx_sync = sync;
1823	zil_itx_assign(zilog, itx, tx);
1824}
1825
1826#ifdef sun
1827/*
1828 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1829 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1830 */
1831/*ARGSUSED*/
1832int
1833zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1834{
1835	zvol_state_t *zv;
1836	struct dk_callback *dkc;
1837	int error = 0;
1838	rl_t *rl;
1839
1840	mutex_enter(&spa_namespace_lock);
1841
1842	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1843
1844	if (zv == NULL) {
1845		mutex_exit(&spa_namespace_lock);
1846		return (SET_ERROR(ENXIO));
1847	}
1848	ASSERT(zv->zv_total_opens > 0);
1849
1850	switch (cmd) {
1851
1852	case DKIOCINFO:
1853	{
1854		struct dk_cinfo dki;
1855
1856		bzero(&dki, sizeof (dki));
1857		(void) strcpy(dki.dki_cname, "zvol");
1858		(void) strcpy(dki.dki_dname, "zvol");
1859		dki.dki_ctype = DKC_UNKNOWN;
1860		dki.dki_unit = getminor(dev);
1861		dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1862		mutex_exit(&spa_namespace_lock);
1863		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1864			error = SET_ERROR(EFAULT);
1865		return (error);
1866	}
1867
1868	case DKIOCGMEDIAINFO:
1869	{
1870		struct dk_minfo dkm;
1871
1872		bzero(&dkm, sizeof (dkm));
1873		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1874		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1875		dkm.dki_media_type = DK_UNKNOWN;
1876		mutex_exit(&spa_namespace_lock);
1877		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1878			error = SET_ERROR(EFAULT);
1879		return (error);
1880	}
1881
1882	case DKIOCGMEDIAINFOEXT:
1883	{
1884		struct dk_minfo_ext dkmext;
1885
1886		bzero(&dkmext, sizeof (dkmext));
1887		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1888		dkmext.dki_pbsize = zv->zv_volblocksize;
1889		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1890		dkmext.dki_media_type = DK_UNKNOWN;
1891		mutex_exit(&spa_namespace_lock);
1892		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1893			error = SET_ERROR(EFAULT);
1894		return (error);
1895	}
1896
1897	case DKIOCGETEFI:
1898	{
1899		uint64_t vs = zv->zv_volsize;
1900		uint8_t bs = zv->zv_min_bs;
1901
1902		mutex_exit(&spa_namespace_lock);
1903		error = zvol_getefi((void *)arg, flag, vs, bs);
1904		return (error);
1905	}
1906
1907	case DKIOCFLUSHWRITECACHE:
1908		dkc = (struct dk_callback *)arg;
1909		mutex_exit(&spa_namespace_lock);
1910		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1911		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1912			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1913			error = 0;
1914		}
1915		return (error);
1916
1917	case DKIOCGETWCE:
1918	{
1919		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1920		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1921		    flag))
1922			error = SET_ERROR(EFAULT);
1923		break;
1924	}
1925	case DKIOCSETWCE:
1926	{
1927		int wce;
1928		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1929		    flag)) {
1930			error = SET_ERROR(EFAULT);
1931			break;
1932		}
1933		if (wce) {
1934			zv->zv_flags |= ZVOL_WCE;
1935			mutex_exit(&spa_namespace_lock);
1936		} else {
1937			zv->zv_flags &= ~ZVOL_WCE;
1938			mutex_exit(&spa_namespace_lock);
1939			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1940		}
1941		return (0);
1942	}
1943
1944	case DKIOCGGEOM:
1945	case DKIOCGVTOC:
1946		/*
1947		 * commands using these (like prtvtoc) expect ENOTSUP
1948		 * since we're emulating an EFI label
1949		 */
1950		error = SET_ERROR(ENOTSUP);
1951		break;
1952
1953	case DKIOCDUMPINIT:
1954		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1955		    RL_WRITER);
1956		error = zvol_dumpify(zv);
1957		zfs_range_unlock(rl);
1958		break;
1959
1960	case DKIOCDUMPFINI:
1961		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1962			break;
1963		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1964		    RL_WRITER);
1965		error = zvol_dump_fini(zv);
1966		zfs_range_unlock(rl);
1967		break;
1968
1969	case DKIOCFREE:
1970	{
1971		dkioc_free_t df;
1972		dmu_tx_t *tx;
1973
1974		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1975			error = SET_ERROR(EFAULT);
1976			break;
1977		}
1978
1979		/*
1980		 * Apply Postel's Law to length-checking.  If they overshoot,
1981		 * just blank out until the end, if there's a need to blank
1982		 * out anything.
1983		 */
1984		if (df.df_start >= zv->zv_volsize)
1985			break;	/* No need to do anything... */
1986		if (df.df_start + df.df_length > zv->zv_volsize)
1987			df.df_length = DMU_OBJECT_END;
1988
1989		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1990		    RL_WRITER);
1991		tx = dmu_tx_create(zv->zv_objset);
1992		dmu_tx_mark_netfree(tx);
1993		error = dmu_tx_assign(tx, TXG_WAIT);
1994		if (error != 0) {
1995			dmu_tx_abort(tx);
1996		} else {
1997			zvol_log_truncate(zv, tx, df.df_start,
1998			    df.df_length, B_TRUE);
1999			dmu_tx_commit(tx);
2000			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2001			    df.df_start, df.df_length);
2002		}
2003
2004		zfs_range_unlock(rl);
2005
2006		if (error == 0) {
2007			/*
2008			 * If the write-cache is disabled or 'sync' property
2009			 * is set to 'always' then treat this as a synchronous
2010			 * operation (i.e. commit to zil).
2011			 */
2012			if (!(zv->zv_flags & ZVOL_WCE) ||
2013			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2014				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2015
2016			/*
2017			 * If the caller really wants synchronous writes, and
2018			 * can't wait for them, don't return until the write
2019			 * is done.
2020			 */
2021			if (df.df_flags & DF_WAIT_SYNC) {
2022				txg_wait_synced(
2023				    dmu_objset_pool(zv->zv_objset), 0);
2024			}
2025		}
2026		break;
2027	}
2028
2029	default:
2030		error = SET_ERROR(ENOTTY);
2031		break;
2032
2033	}
2034	mutex_exit(&spa_namespace_lock);
2035	return (error);
2036}
2037#endif	/* sun */
2038
2039int
2040zvol_busy(void)
2041{
2042	return (zvol_minors != 0);
2043}
2044
2045void
2046zvol_init(void)
2047{
2048	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2049	    1) == 0);
2050	ZFS_LOG(1, "ZVOL Initialized.");
2051}
2052
2053void
2054zvol_fini(void)
2055{
2056	ddi_soft_state_fini(&zfsdev_state);
2057	ZFS_LOG(1, "ZVOL Deinitialized.");
2058}
2059
2060#ifdef sun
2061/*ARGSUSED*/
2062static int
2063zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2064{
2065	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2066
2067	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2068		return (1);
2069	return (0);
2070}
2071
2072/*ARGSUSED*/
2073static void
2074zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2075{
2076	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2077
2078	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2079}
2080
2081static int
2082zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2083{
2084	dmu_tx_t *tx;
2085	int error;
2086	objset_t *os = zv->zv_objset;
2087	spa_t *spa = dmu_objset_spa(os);
2088	vdev_t *vd = spa->spa_root_vdev;
2089	nvlist_t *nv = NULL;
2090	uint64_t version = spa_version(spa);
2091	enum zio_checksum checksum;
2092
2093	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2094	ASSERT(vd->vdev_ops == &vdev_root_ops);
2095
2096	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2097	    DMU_OBJECT_END);
2098	/* wait for dmu_free_long_range to actually free the blocks */
2099	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2100
2101	/*
2102	 * If the pool on which the dump device is being initialized has more
2103	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2104	 * enabled.  If so, bump that feature's counter to indicate that the
2105	 * feature is active. We also check the vdev type to handle the
2106	 * following case:
2107	 *   # zpool create test raidz disk1 disk2 disk3
2108	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2109	 *   the raidz vdev itself has 3 children.
2110	 */
2111	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2112		if (!spa_feature_is_enabled(spa,
2113		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2114			return (SET_ERROR(ENOTSUP));
2115		(void) dsl_sync_task(spa_name(spa),
2116		    zfs_mvdev_dump_feature_check,
2117		    zfs_mvdev_dump_activate_feature_sync, NULL,
2118		    2, ZFS_SPACE_CHECK_RESERVED);
2119	}
2120
2121	tx = dmu_tx_create(os);
2122	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2123	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2124	error = dmu_tx_assign(tx, TXG_WAIT);
2125	if (error) {
2126		dmu_tx_abort(tx);
2127		return (error);
2128	}
2129
2130	/*
2131	 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2132	 * function.  Otherwise, use the old default -- OFF.
2133	 */
2134	checksum = spa_feature_is_active(spa,
2135	    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2136	    ZIO_CHECKSUM_OFF;
2137
2138	/*
2139	 * If we are resizing the dump device then we only need to
2140	 * update the refreservation to match the newly updated
2141	 * zvolsize. Otherwise, we save off the original state of the
2142	 * zvol so that we can restore them if the zvol is ever undumpified.
2143	 */
2144	if (resize) {
2145		error = zap_update(os, ZVOL_ZAP_OBJ,
2146		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2147		    &zv->zv_volsize, tx);
2148	} else {
2149		uint64_t checksum, compress, refresrv, vbs, dedup;
2150
2151		error = dsl_prop_get_integer(zv->zv_name,
2152		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2153		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2154		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
2155		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2156		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
2157		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2158		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
2159		if (version >= SPA_VERSION_DEDUP) {
2160			error = error ? error :
2161			    dsl_prop_get_integer(zv->zv_name,
2162			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2163		}
2164
2165		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2166		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2167		    &compress, tx);
2168		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2169		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
2170		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2171		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2172		    &refresrv, tx);
2173		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2174		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2175		    &vbs, tx);
2176		error = error ? error : dmu_object_set_blocksize(
2177		    os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
2178		if (version >= SPA_VERSION_DEDUP) {
2179			error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2180			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2181			    &dedup, tx);
2182		}
2183		if (error == 0)
2184			zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
2185	}
2186	dmu_tx_commit(tx);
2187
2188	/*
2189	 * We only need update the zvol's property if we are initializing
2190	 * the dump area for the first time.
2191	 */
2192	if (!resize) {
2193		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2194		VERIFY(nvlist_add_uint64(nv,
2195		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2196		VERIFY(nvlist_add_uint64(nv,
2197		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2198		    ZIO_COMPRESS_OFF) == 0);
2199		VERIFY(nvlist_add_uint64(nv,
2200		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2201		    checksum) == 0);
2202		if (version >= SPA_VERSION_DEDUP) {
2203			VERIFY(nvlist_add_uint64(nv,
2204			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2205			    ZIO_CHECKSUM_OFF) == 0);
2206		}
2207
2208		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2209		    nv, NULL);
2210		nvlist_free(nv);
2211
2212		if (error)
2213			return (error);
2214	}
2215
2216	/* Allocate the space for the dump */
2217	error = zvol_prealloc(zv);
2218	return (error);
2219}
2220
2221static int
2222zvol_dumpify(zvol_state_t *zv)
2223{
2224	int error = 0;
2225	uint64_t dumpsize = 0;
2226	dmu_tx_t *tx;
2227	objset_t *os = zv->zv_objset;
2228
2229	if (zv->zv_flags & ZVOL_RDONLY)
2230		return (SET_ERROR(EROFS));
2231
2232	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2233	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2234		boolean_t resize = (dumpsize > 0);
2235
2236		if ((error = zvol_dump_init(zv, resize)) != 0) {
2237			(void) zvol_dump_fini(zv);
2238			return (error);
2239		}
2240	}
2241
2242	/*
2243	 * Build up our lba mapping.
2244	 */
2245	error = zvol_get_lbas(zv);
2246	if (error) {
2247		(void) zvol_dump_fini(zv);
2248		return (error);
2249	}
2250
2251	tx = dmu_tx_create(os);
2252	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2253	error = dmu_tx_assign(tx, TXG_WAIT);
2254	if (error) {
2255		dmu_tx_abort(tx);
2256		(void) zvol_dump_fini(zv);
2257		return (error);
2258	}
2259
2260	zv->zv_flags |= ZVOL_DUMPIFIED;
2261	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2262	    &zv->zv_volsize, tx);
2263	dmu_tx_commit(tx);
2264
2265	if (error) {
2266		(void) zvol_dump_fini(zv);
2267		return (error);
2268	}
2269
2270	txg_wait_synced(dmu_objset_pool(os), 0);
2271	return (0);
2272}
2273
2274static int
2275zvol_dump_fini(zvol_state_t *zv)
2276{
2277	dmu_tx_t *tx;
2278	objset_t *os = zv->zv_objset;
2279	nvlist_t *nv;
2280	int error = 0;
2281	uint64_t checksum, compress, refresrv, vbs, dedup;
2282	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2283
2284	/*
2285	 * Attempt to restore the zvol back to its pre-dumpified state.
2286	 * This is a best-effort attempt as it's possible that not all
2287	 * of these properties were initialized during the dumpify process
2288	 * (i.e. error during zvol_dump_init).
2289	 */
2290
2291	tx = dmu_tx_create(os);
2292	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2293	error = dmu_tx_assign(tx, TXG_WAIT);
2294	if (error) {
2295		dmu_tx_abort(tx);
2296		return (error);
2297	}
2298	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2299	dmu_tx_commit(tx);
2300
2301	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2302	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2303	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2304	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2305	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2306	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2307	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2308	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2309
2310	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2311	(void) nvlist_add_uint64(nv,
2312	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2313	(void) nvlist_add_uint64(nv,
2314	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2315	(void) nvlist_add_uint64(nv,
2316	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2317	if (version >= SPA_VERSION_DEDUP &&
2318	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2319	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2320		(void) nvlist_add_uint64(nv,
2321		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2322	}
2323	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2324	    nv, NULL);
2325	nvlist_free(nv);
2326
2327	zvol_free_extents(zv);
2328	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2329	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2330	/* wait for dmu_free_long_range to actually free the blocks */
2331	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2332	tx = dmu_tx_create(os);
2333	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2334	error = dmu_tx_assign(tx, TXG_WAIT);
2335	if (error) {
2336		dmu_tx_abort(tx);
2337		return (error);
2338	}
2339	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2340		zv->zv_volblocksize = vbs;
2341	dmu_tx_commit(tx);
2342
2343	return (0);
2344}
2345#endif	/* sun */
2346
2347static void
2348zvol_geom_run(zvol_state_t *zv)
2349{
2350	struct g_provider *pp;
2351
2352	pp = zv->zv_provider;
2353	g_error_provider(pp, 0);
2354
2355	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2356	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2357}
2358
2359static void
2360zvol_geom_destroy(zvol_state_t *zv)
2361{
2362	struct g_provider *pp;
2363
2364	g_topology_assert();
2365
2366	mtx_lock(&zv->zv_queue_mtx);
2367	zv->zv_state = 1;
2368	wakeup_one(&zv->zv_queue);
2369	while (zv->zv_state != 2)
2370		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2371	mtx_destroy(&zv->zv_queue_mtx);
2372
2373	pp = zv->zv_provider;
2374	zv->zv_provider = NULL;
2375	pp->private = NULL;
2376	g_wither_geom(pp->geom, ENXIO);
2377}
2378
2379static int
2380zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2381{
2382	int count, error, flags;
2383
2384	g_topology_assert();
2385
2386	/*
2387	 * To make it easier we expect either open or close, but not both
2388	 * at the same time.
2389	 */
2390	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2391	    (acr <= 0 && acw <= 0 && ace <= 0),
2392	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2393	    pp->name, acr, acw, ace));
2394
2395	if (pp->private == NULL) {
2396		if (acr <= 0 && acw <= 0 && ace <= 0)
2397			return (0);
2398		return (pp->error);
2399	}
2400
2401	/*
2402	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2403	 * because GEOM already handles that and handles it a bit differently.
2404	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2405	 * only one exclusive consumer, no matter if it is reader or writer.
2406	 * I like better the way GEOM works so I'll leave it for GEOM to
2407	 * decide what to do.
2408	 */
2409
2410	count = acr + acw + ace;
2411	if (count == 0)
2412		return (0);
2413
2414	flags = 0;
2415	if (acr != 0 || ace != 0)
2416		flags |= FREAD;
2417	if (acw != 0)
2418		flags |= FWRITE;
2419
2420	g_topology_unlock();
2421	if (count > 0)
2422		error = zvol_open(pp, flags, count);
2423	else
2424		error = zvol_close(pp, flags, -count);
2425	g_topology_lock();
2426	return (error);
2427}
2428
2429static void
2430zvol_geom_start(struct bio *bp)
2431{
2432	zvol_state_t *zv;
2433	boolean_t first;
2434
2435	zv = bp->bio_to->private;
2436	ASSERT(zv != NULL);
2437	switch (bp->bio_cmd) {
2438	case BIO_FLUSH:
2439		if (!THREAD_CAN_SLEEP())
2440			goto enqueue;
2441		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2442		g_io_deliver(bp, 0);
2443		break;
2444	case BIO_READ:
2445	case BIO_WRITE:
2446	case BIO_DELETE:
2447		if (!THREAD_CAN_SLEEP())
2448			goto enqueue;
2449		zvol_strategy(bp);
2450		break;
2451	case BIO_GETATTR:
2452		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2453			return;
2454		/* FALLTHROUGH */
2455	default:
2456		g_io_deliver(bp, EOPNOTSUPP);
2457		break;
2458	}
2459	return;
2460
2461enqueue:
2462	mtx_lock(&zv->zv_queue_mtx);
2463	first = (bioq_first(&zv->zv_queue) == NULL);
2464	bioq_insert_tail(&zv->zv_queue, bp);
2465	mtx_unlock(&zv->zv_queue_mtx);
2466	if (first)
2467		wakeup_one(&zv->zv_queue);
2468}
2469
2470static void
2471zvol_geom_worker(void *arg)
2472{
2473	zvol_state_t *zv;
2474	struct bio *bp;
2475
2476	thread_lock(curthread);
2477	sched_prio(curthread, PRIBIO);
2478	thread_unlock(curthread);
2479
2480	zv = arg;
2481	for (;;) {
2482		mtx_lock(&zv->zv_queue_mtx);
2483		bp = bioq_takefirst(&zv->zv_queue);
2484		if (bp == NULL) {
2485			if (zv->zv_state == 1) {
2486				zv->zv_state = 2;
2487				wakeup(&zv->zv_state);
2488				mtx_unlock(&zv->zv_queue_mtx);
2489				kthread_exit();
2490			}
2491			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2492			    "zvol:io", 0);
2493			continue;
2494		}
2495		mtx_unlock(&zv->zv_queue_mtx);
2496		switch (bp->bio_cmd) {
2497		case BIO_FLUSH:
2498			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2499			g_io_deliver(bp, 0);
2500			break;
2501		case BIO_READ:
2502		case BIO_WRITE:
2503			zvol_strategy(bp);
2504			break;
2505		}
2506	}
2507}
2508
2509extern boolean_t dataset_name_hidden(const char *name);
2510
2511static int
2512zvol_create_snapshots(objset_t *os, const char *name)
2513{
2514	uint64_t cookie, obj;
2515	char *sname;
2516	int error, len;
2517
2518	cookie = obj = 0;
2519	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2520
2521#if 0
2522	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2523	    DS_FIND_SNAPSHOTS);
2524#endif
2525
2526	for (;;) {
2527		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2528		if (len >= MAXPATHLEN) {
2529			dmu_objset_rele(os, FTAG);
2530			error = ENAMETOOLONG;
2531			break;
2532		}
2533
2534		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2535		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2536		    sname + len, &obj, &cookie, NULL);
2537		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2538		if (error != 0) {
2539			if (error == ENOENT)
2540				error = 0;
2541			break;
2542		}
2543
2544		if ((error = zvol_create_minor(sname)) != 0) {
2545			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2546			    sname, error);
2547			break;
2548		}
2549	}
2550
2551	kmem_free(sname, MAXPATHLEN);
2552	return (error);
2553}
2554
2555int
2556zvol_create_minors(const char *name)
2557{
2558	uint64_t cookie;
2559	objset_t *os;
2560	char *osname, *p;
2561	int error, len;
2562
2563	if (dataset_name_hidden(name))
2564		return (0);
2565
2566	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2567		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2568		    name, error);
2569		return (error);
2570	}
2571	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2572		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2573		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2574		if ((error = zvol_create_minor(name)) == 0)
2575			error = zvol_create_snapshots(os, name);
2576		else {
2577			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2578			    name, error);
2579		}
2580		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2581		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2582		return (error);
2583	}
2584	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2585		dmu_objset_rele(os, FTAG);
2586		return (0);
2587	}
2588
2589	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2590	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2591		dmu_objset_rele(os, FTAG);
2592		kmem_free(osname, MAXPATHLEN);
2593		return (ENOENT);
2594	}
2595	p = osname + strlen(osname);
2596	len = MAXPATHLEN - (p - osname);
2597
2598#if 0
2599	/* Prefetch the datasets. */
2600	cookie = 0;
2601	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2602		if (!dataset_name_hidden(osname))
2603			(void) dmu_objset_prefetch(osname, NULL);
2604	}
2605#endif
2606
2607	cookie = 0;
2608	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2609	    &cookie) == 0) {
2610		dmu_objset_rele(os, FTAG);
2611		(void)zvol_create_minors(osname);
2612		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2613			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2614			    name, error);
2615			return (error);
2616		}
2617	}
2618
2619	dmu_objset_rele(os, FTAG);
2620	kmem_free(osname, MAXPATHLEN);
2621	return (0);
2622}
2623
2624static void
2625zvol_rename_minor(zvol_state_t *zv, const char *newname)
2626{
2627	struct g_geom *gp;
2628	struct g_provider *pp;
2629	struct cdev *dev;
2630
2631	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2632
2633	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2634		g_topology_lock();
2635		pp = zv->zv_provider;
2636		ASSERT(pp != NULL);
2637		gp = pp->geom;
2638		ASSERT(gp != NULL);
2639
2640		zv->zv_provider = NULL;
2641		g_wither_provider(pp, ENXIO);
2642
2643		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2644		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2645		pp->sectorsize = DEV_BSIZE;
2646		pp->mediasize = zv->zv_volsize;
2647		pp->private = zv;
2648		zv->zv_provider = pp;
2649		g_error_provider(pp, 0);
2650		g_topology_unlock();
2651	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2652		dev = zv->zv_dev;
2653		ASSERT(dev != NULL);
2654		zv->zv_dev = NULL;
2655		destroy_dev(dev);
2656
2657		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
2658		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
2659		    0640, "%s/%s", ZVOL_DRIVER, newname) == 0) {
2660			zv->zv_dev = dev;
2661			dev->si_iosize_max = MAXPHYS;
2662			dev->si_drv2 = zv;
2663		}
2664	}
2665	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2666}
2667
2668void
2669zvol_rename_minors(const char *oldname, const char *newname)
2670{
2671	char name[MAXPATHLEN];
2672	struct g_provider *pp;
2673	struct g_geom *gp;
2674	size_t oldnamelen, newnamelen;
2675	zvol_state_t *zv;
2676	char *namebuf;
2677
2678	oldnamelen = strlen(oldname);
2679	newnamelen = strlen(newname);
2680
2681	DROP_GIANT();
2682	mutex_enter(&spa_namespace_lock);
2683
2684	LIST_FOREACH(zv, &all_zvols, zv_links) {
2685		if (strcmp(zv->zv_name, oldname) == 0) {
2686			zvol_rename_minor(zv, newname);
2687		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2688		    (zv->zv_name[oldnamelen] == '/' ||
2689		     zv->zv_name[oldnamelen] == '@')) {
2690			snprintf(name, sizeof(name), "%s%c%s", newname,
2691			    zv->zv_name[oldnamelen],
2692			    zv->zv_name + oldnamelen + 1);
2693			zvol_rename_minor(zv, name);
2694		}
2695	}
2696
2697	mutex_exit(&spa_namespace_lock);
2698	PICKUP_GIANT();
2699}
2700
2701static int
2702zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2703{
2704	zvol_state_t *zv;
2705	int err = 0;
2706
2707	mutex_enter(&spa_namespace_lock);
2708	zv = dev->si_drv2;
2709	if (zv == NULL) {
2710		mutex_exit(&spa_namespace_lock);
2711		return(ENXIO);		/* zvol_create_minor() not done yet */
2712	}
2713
2714	if (zv->zv_total_opens == 0)
2715		err = zvol_first_open(zv);
2716	if (err) {
2717		mutex_exit(&spa_namespace_lock);
2718		return (err);
2719	}
2720	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
2721		err = SET_ERROR(EROFS);
2722		goto out;
2723	}
2724	if (zv->zv_flags & ZVOL_EXCL) {
2725		err = SET_ERROR(EBUSY);
2726		goto out;
2727	}
2728#ifdef FEXCL
2729	if (flags & FEXCL) {
2730		if (zv->zv_total_opens != 0) {
2731			err = SET_ERROR(EBUSY);
2732			goto out;
2733		}
2734		zv->zv_flags |= ZVOL_EXCL;
2735	}
2736#endif
2737
2738	zv->zv_total_opens++;
2739	mutex_exit(&spa_namespace_lock);
2740	return (err);
2741out:
2742	if (zv->zv_total_opens == 0)
2743		zvol_last_close(zv);
2744	mutex_exit(&spa_namespace_lock);
2745	return (err);
2746}
2747
2748static int
2749zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2750{
2751	zvol_state_t *zv;
2752	int err = 0;
2753
2754	mutex_enter(&spa_namespace_lock);
2755	zv = dev->si_drv2;
2756	if (zv == NULL) {
2757		mutex_exit(&spa_namespace_lock);
2758		return(ENXIO);
2759	}
2760
2761	if (zv->zv_flags & ZVOL_EXCL) {
2762		ASSERT(zv->zv_total_opens == 1);
2763		zv->zv_flags &= ~ZVOL_EXCL;
2764	}
2765
2766	/*
2767	 * If the open count is zero, this is a spurious close.
2768	 * That indicates a bug in the kernel / DDI framework.
2769	 */
2770	ASSERT(zv->zv_total_opens != 0);
2771
2772	/*
2773	 * You may get multiple opens, but only one close.
2774	 */
2775	zv->zv_total_opens--;
2776
2777	if (zv->zv_total_opens == 0)
2778		zvol_last_close(zv);
2779
2780	mutex_exit(&spa_namespace_lock);
2781	return (0);
2782}
2783
2784static int
2785zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
2786{
2787	zvol_state_t *zv;
2788	rl_t *rl;
2789	off_t offset, length, chunk;
2790	int i, error;
2791	u_int u;
2792
2793	zv = dev->si_drv2;
2794
2795	error = 0;
2796	KASSERT(zv->zv_total_opens > 0,
2797	    ("Device with zero access count in zvol_d_ioctl"));
2798
2799	i = IOCPARM_LEN(cmd);
2800	switch (cmd) {
2801	case DIOCGSECTORSIZE:
2802		*(u_int *)data = DEV_BSIZE;
2803		break;
2804	case DIOCGMEDIASIZE:
2805		*(off_t *)data = zv->zv_volsize;
2806		break;
2807	case DIOCGFLUSH:
2808		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2809		break;
2810	case DIOCGDELETE:
2811		offset = ((off_t *)data)[0];
2812		length = ((off_t *)data)[1];
2813		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
2814		    offset < 0 || offset >= zv->zv_volsize ||
2815		    length <= 0) {
2816			printf("%s: offset=%jd length=%jd\n", __func__, offset,
2817			    length);
2818			error = EINVAL;
2819			break;
2820		}
2821
2822		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
2823		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
2824		error = dmu_tx_assign(tx, TXG_WAIT);
2825		if (error != 0) {
2826			dmu_tx_abort(tx);
2827		} else {
2828			zvol_log_truncate(zv, tx, offset, length, B_TRUE);
2829			dmu_tx_commit(tx);
2830			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2831			    offset, length);
2832		}
2833		zfs_range_unlock(rl);
2834		if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
2835			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2836		break;
2837	case DIOCGSTRIPESIZE:
2838		*(off_t *)data = zv->zv_volblocksize;
2839		break;
2840	case DIOCGSTRIPEOFFSET:
2841		*(off_t *)data = 0;
2842		break;
2843	default:
2844		error = ENOIOCTL;
2845	}
2846
2847	return (error);
2848}
2849