zvol.c revision 320496
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 *
27 * Portions Copyright 2010 Robert Milkowski
28 *
29 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
31 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
32 * Copyright (c) 2014 Integros [integros.com]
33 */
34
35/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
36
37/*
38 * ZFS volume emulation driver.
39 *
40 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
41 * Volumes are accessed through the symbolic links named:
42 *
43 * /dev/zvol/dsk/<pool_name>/<dataset_name>
44 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
45 *
46 * These links are created by the /dev filesystem (sdev_zvolops.c).
47 * Volumes are persistent through reboot.  No user command needs to be
48 * run before opening and using a device.
49 *
50 * FreeBSD notes.
51 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
52 * in the system.
53 */
54
55#include <sys/types.h>
56#include <sys/param.h>
57#include <sys/kernel.h>
58#include <sys/errno.h>
59#include <sys/uio.h>
60#include <sys/bio.h>
61#include <sys/buf.h>
62#include <sys/kmem.h>
63#include <sys/conf.h>
64#include <sys/cmn_err.h>
65#include <sys/stat.h>
66#include <sys/zap.h>
67#include <sys/spa.h>
68#include <sys/spa_impl.h>
69#include <sys/zio.h>
70#include <sys/disk.h>
71#include <sys/dmu_traverse.h>
72#include <sys/dnode.h>
73#include <sys/dsl_dataset.h>
74#include <sys/dsl_prop.h>
75#include <sys/dkio.h>
76#include <sys/byteorder.h>
77#include <sys/sunddi.h>
78#include <sys/dirent.h>
79#include <sys/policy.h>
80#include <sys/queue.h>
81#include <sys/fs/zfs.h>
82#include <sys/zfs_ioctl.h>
83#include <sys/zil.h>
84#include <sys/refcount.h>
85#include <sys/zfs_znode.h>
86#include <sys/zfs_rlock.h>
87#include <sys/vdev_impl.h>
88#include <sys/vdev_raidz.h>
89#include <sys/zvol.h>
90#include <sys/zil_impl.h>
91#include <sys/dbuf.h>
92#include <sys/dmu_tx.h>
93#include <sys/zfeature.h>
94#include <sys/zio_checksum.h>
95#include <sys/filio.h>
96
97#include <geom/geom.h>
98
99#include "zfs_namecheck.h"
100
101#ifndef illumos
102struct g_class zfs_zvol_class = {
103	.name = "ZFS::ZVOL",
104	.version = G_VERSION,
105};
106
107DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
108
109#endif
110void *zfsdev_state;
111static char *zvol_tag = "zvol_tag";
112
113#define	ZVOL_DUMPSIZE		"dumpsize"
114
115/*
116 * This lock protects the zfsdev_state structure from being modified
117 * while it's being used, e.g. an open that comes in before a create
118 * finishes.  It also protects temporary opens of the dataset so that,
119 * e.g., an open doesn't get a spurious EBUSY.
120 */
121#ifdef illumos
122kmutex_t zfsdev_state_lock;
123#else
124/*
125 * In FreeBSD we've replaced the upstream zfsdev_state_lock with the
126 * spa_namespace_lock in the ZVOL code.
127 */
128#define zfsdev_state_lock spa_namespace_lock
129#endif
130static uint32_t zvol_minors;
131
132#ifndef illumos
133SYSCTL_DECL(_vfs_zfs);
134SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
135static int	volmode = ZFS_VOLMODE_GEOM;
136TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
137SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
138    "Expose as GEOM providers (1), device files (2) or neither");
139
140#endif
141typedef struct zvol_extent {
142	list_node_t	ze_node;
143	dva_t		ze_dva;		/* dva associated with this extent */
144	uint64_t	ze_nblks;	/* number of blocks in extent */
145} zvol_extent_t;
146
147/*
148 * The in-core state of each volume.
149 */
150typedef struct zvol_state {
151#ifndef illumos
152	LIST_ENTRY(zvol_state)	zv_links;
153#endif
154	char		zv_name[MAXPATHLEN]; /* pool/dd name */
155	uint64_t	zv_volsize;	/* amount of space we advertise */
156	uint64_t	zv_volblocksize; /* volume block size */
157#ifdef illumos
158	minor_t		zv_minor;	/* minor number */
159#else
160	struct cdev	*zv_dev;	/* non-GEOM device */
161	struct g_provider *zv_provider;	/* GEOM provider */
162#endif
163	uint8_t		zv_min_bs;	/* minimum addressable block shift */
164	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
165	objset_t	*zv_objset;	/* objset handle */
166#ifdef illumos
167	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
168#endif
169	uint32_t	zv_total_opens;	/* total open count */
170	uint32_t	zv_sync_cnt;	/* synchronous open count */
171	zilog_t		*zv_zilog;	/* ZIL handle */
172	list_t		zv_extents;	/* List of extents for dump */
173	znode_t		zv_znode;	/* for range locking */
174	dmu_buf_t	*zv_dbuf;	/* bonus handle */
175#ifndef illumos
176	int		zv_state;
177	int		zv_volmode;	/* Provide GEOM or cdev */
178	struct bio_queue_head zv_queue;
179	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
180#endif
181} zvol_state_t;
182
183#ifndef illumos
184static LIST_HEAD(, zvol_state) all_zvols;
185#endif
186/*
187 * zvol specific flags
188 */
189#define	ZVOL_RDONLY	0x1
190#define	ZVOL_DUMPIFIED	0x2
191#define	ZVOL_EXCL	0x4
192#define	ZVOL_WCE	0x8
193
194/*
195 * zvol maximum transfer in one DMU tx.
196 */
197int zvol_maxphys = DMU_MAX_ACCESS/2;
198
199/*
200 * Toggle unmap functionality.
201 */
202boolean_t zvol_unmap_enabled = B_TRUE;
203#ifndef illumos
204SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
205    &zvol_unmap_enabled, 0,
206    "Enable UNMAP functionality");
207
208static d_open_t		zvol_d_open;
209static d_close_t	zvol_d_close;
210static d_read_t		zvol_read;
211static d_write_t	zvol_write;
212static d_ioctl_t	zvol_d_ioctl;
213static d_strategy_t	zvol_strategy;
214
215static struct cdevsw zvol_cdevsw = {
216	.d_version =	D_VERSION,
217	.d_open =	zvol_d_open,
218	.d_close =	zvol_d_close,
219	.d_read =	zvol_read,
220	.d_write =	zvol_write,
221	.d_ioctl =	zvol_d_ioctl,
222	.d_strategy =	zvol_strategy,
223	.d_name =	"zvol",
224	.d_flags =	D_DISK | D_TRACKCLOSE,
225};
226
227static void zvol_geom_run(zvol_state_t *zv);
228static void zvol_geom_destroy(zvol_state_t *zv);
229static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
230static void zvol_geom_start(struct bio *bp);
231static void zvol_geom_worker(void *arg);
232static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
233    uint64_t len, boolean_t sync);
234#endif	/* !illumos */
235
236extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
237    nvlist_t *, nvlist_t *);
238static int zvol_remove_zv(zvol_state_t *);
239static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
240static int zvol_dumpify(zvol_state_t *zv);
241static int zvol_dump_fini(zvol_state_t *zv);
242static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
243
244static void
245zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
246{
247#ifdef illumos
248	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
249
250	zv->zv_volsize = volsize;
251	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
252	    "Size", volsize) == DDI_SUCCESS);
253	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
254	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
255
256	/* Notify specfs to invalidate the cached size */
257	spec_size_invalidate(dev, VBLK);
258	spec_size_invalidate(dev, VCHR);
259#else	/* !illumos */
260	zv->zv_volsize = volsize;
261	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
262		struct g_provider *pp;
263
264		pp = zv->zv_provider;
265		if (pp == NULL)
266			return;
267		g_topology_lock();
268		g_resize_provider(pp, zv->zv_volsize);
269		g_topology_unlock();
270	}
271#endif	/* illumos */
272}
273
274int
275zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
276{
277	if (volsize == 0)
278		return (SET_ERROR(EINVAL));
279
280	if (volsize % blocksize != 0)
281		return (SET_ERROR(EINVAL));
282
283#ifdef _ILP32
284	if (volsize - 1 > SPEC_MAXOFFSET_T)
285		return (SET_ERROR(EOVERFLOW));
286#endif
287	return (0);
288}
289
290int
291zvol_check_volblocksize(uint64_t volblocksize)
292{
293	if (volblocksize < SPA_MINBLOCKSIZE ||
294	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
295	    !ISP2(volblocksize))
296		return (SET_ERROR(EDOM));
297
298	return (0);
299}
300
301int
302zvol_get_stats(objset_t *os, nvlist_t *nv)
303{
304	int error;
305	dmu_object_info_t doi;
306	uint64_t val;
307
308	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
309	if (error)
310		return (error);
311
312	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
313
314	error = dmu_object_info(os, ZVOL_OBJ, &doi);
315
316	if (error == 0) {
317		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
318		    doi.doi_data_block_size);
319	}
320
321	return (error);
322}
323
324static zvol_state_t *
325zvol_minor_lookup(const char *name)
326{
327#ifdef illumos
328	minor_t minor;
329#endif
330	zvol_state_t *zv;
331
332	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
333
334#ifdef illumos
335	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
336		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
337		if (zv == NULL)
338			continue;
339#else
340	LIST_FOREACH(zv, &all_zvols, zv_links) {
341#endif
342		if (strcmp(zv->zv_name, name) == 0)
343			return (zv);
344	}
345
346	return (NULL);
347}
348
349/* extent mapping arg */
350struct maparg {
351	zvol_state_t	*ma_zv;
352	uint64_t	ma_blks;
353};
354
355/*ARGSUSED*/
356static int
357zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
358    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
359{
360	struct maparg *ma = arg;
361	zvol_extent_t *ze;
362	int bs = ma->ma_zv->zv_volblocksize;
363
364	if (bp == NULL || BP_IS_HOLE(bp) ||
365	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
366		return (0);
367
368	VERIFY(!BP_IS_EMBEDDED(bp));
369
370	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
371	ma->ma_blks++;
372
373	/* Abort immediately if we have encountered gang blocks */
374	if (BP_IS_GANG(bp))
375		return (SET_ERROR(EFRAGS));
376
377	/*
378	 * See if the block is at the end of the previous extent.
379	 */
380	ze = list_tail(&ma->ma_zv->zv_extents);
381	if (ze &&
382	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
383	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
384	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
385		ze->ze_nblks++;
386		return (0);
387	}
388
389	dprintf_bp(bp, "%s", "next blkptr:");
390
391	/* start a new extent */
392	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
393	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
394	ze->ze_nblks = 1;
395	list_insert_tail(&ma->ma_zv->zv_extents, ze);
396	return (0);
397}
398
399static void
400zvol_free_extents(zvol_state_t *zv)
401{
402	zvol_extent_t *ze;
403
404	while (ze = list_head(&zv->zv_extents)) {
405		list_remove(&zv->zv_extents, ze);
406		kmem_free(ze, sizeof (zvol_extent_t));
407	}
408}
409
410static int
411zvol_get_lbas(zvol_state_t *zv)
412{
413	objset_t *os = zv->zv_objset;
414	struct maparg	ma;
415	int		err;
416
417	ma.ma_zv = zv;
418	ma.ma_blks = 0;
419	zvol_free_extents(zv);
420
421	/* commit any in-flight changes before traversing the dataset */
422	txg_wait_synced(dmu_objset_pool(os), 0);
423	err = traverse_dataset(dmu_objset_ds(os), 0,
424	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
425	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
426		zvol_free_extents(zv);
427		return (err ? err : EIO);
428	}
429
430	return (0);
431}
432
433/* ARGSUSED */
434void
435zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
436{
437	zfs_creat_t *zct = arg;
438	nvlist_t *nvprops = zct->zct_props;
439	int error;
440	uint64_t volblocksize, volsize;
441
442	VERIFY(nvlist_lookup_uint64(nvprops,
443	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
444	if (nvlist_lookup_uint64(nvprops,
445	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
446		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
447
448	/*
449	 * These properties must be removed from the list so the generic
450	 * property setting step won't apply to them.
451	 */
452	VERIFY(nvlist_remove_all(nvprops,
453	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
454	(void) nvlist_remove_all(nvprops,
455	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
456
457	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
458	    DMU_OT_NONE, 0, tx);
459	ASSERT(error == 0);
460
461	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
462	    DMU_OT_NONE, 0, tx);
463	ASSERT(error == 0);
464
465	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
466	ASSERT(error == 0);
467}
468
469/*
470 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
471 * implement DKIOCFREE/free-long-range.
472 */
473static int
474zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
475{
476	uint64_t offset, length;
477
478	if (byteswap)
479		byteswap_uint64_array(lr, sizeof (*lr));
480
481	offset = lr->lr_offset;
482	length = lr->lr_length;
483
484	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
485}
486
487/*
488 * Replay a TX_WRITE ZIL transaction that didn't get committed
489 * after a system failure
490 */
491static int
492zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
493{
494	objset_t *os = zv->zv_objset;
495	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
496	uint64_t offset, length;
497	dmu_tx_t *tx;
498	int error;
499
500	if (byteswap)
501		byteswap_uint64_array(lr, sizeof (*lr));
502
503	offset = lr->lr_offset;
504	length = lr->lr_length;
505
506	/* If it's a dmu_sync() block, write the whole block */
507	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
508		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
509		if (length < blocksize) {
510			offset -= offset % blocksize;
511			length = blocksize;
512		}
513	}
514
515	tx = dmu_tx_create(os);
516	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
517	error = dmu_tx_assign(tx, TXG_WAIT);
518	if (error) {
519		dmu_tx_abort(tx);
520	} else {
521		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
522		dmu_tx_commit(tx);
523	}
524
525	return (error);
526}
527
528/* ARGSUSED */
529static int
530zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
531{
532	return (SET_ERROR(ENOTSUP));
533}
534
535/*
536 * Callback vectors for replaying records.
537 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
538 */
539zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
540	zvol_replay_err,	/* 0 no such transaction type */
541	zvol_replay_err,	/* TX_CREATE */
542	zvol_replay_err,	/* TX_MKDIR */
543	zvol_replay_err,	/* TX_MKXATTR */
544	zvol_replay_err,	/* TX_SYMLINK */
545	zvol_replay_err,	/* TX_REMOVE */
546	zvol_replay_err,	/* TX_RMDIR */
547	zvol_replay_err,	/* TX_LINK */
548	zvol_replay_err,	/* TX_RENAME */
549	zvol_replay_write,	/* TX_WRITE */
550	zvol_replay_truncate,	/* TX_TRUNCATE */
551	zvol_replay_err,	/* TX_SETATTR */
552	zvol_replay_err,	/* TX_ACL */
553	zvol_replay_err,	/* TX_CREATE_ACL */
554	zvol_replay_err,	/* TX_CREATE_ATTR */
555	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
556	zvol_replay_err,	/* TX_MKDIR_ACL */
557	zvol_replay_err,	/* TX_MKDIR_ATTR */
558	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
559	zvol_replay_err,	/* TX_WRITE2 */
560};
561
562#ifdef illumos
563int
564zvol_name2minor(const char *name, minor_t *minor)
565{
566	zvol_state_t *zv;
567
568	mutex_enter(&zfsdev_state_lock);
569	zv = zvol_minor_lookup(name);
570	if (minor && zv)
571		*minor = zv->zv_minor;
572	mutex_exit(&zfsdev_state_lock);
573	return (zv ? 0 : -1);
574}
575#endif	/* illumos */
576
577/*
578 * Create a minor node (plus a whole lot more) for the specified volume.
579 */
580int
581zvol_create_minor(const char *name)
582{
583	zfs_soft_state_t *zs;
584	zvol_state_t *zv;
585	objset_t *os;
586	dmu_object_info_t doi;
587#ifdef illumos
588	minor_t minor = 0;
589	char chrbuf[30], blkbuf[30];
590#else
591	struct g_provider *pp;
592	struct g_geom *gp;
593	uint64_t volsize, mode;
594#endif
595	int error;
596
597#ifndef illumos
598	ZFS_LOG(1, "Creating ZVOL %s...", name);
599#endif
600
601	mutex_enter(&zfsdev_state_lock);
602
603	if (zvol_minor_lookup(name) != NULL) {
604		mutex_exit(&zfsdev_state_lock);
605		return (SET_ERROR(EEXIST));
606	}
607
608	/* lie and say we're read-only */
609	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
610
611	if (error) {
612		mutex_exit(&zfsdev_state_lock);
613		return (error);
614	}
615
616#ifdef illumos
617	if ((minor = zfsdev_minor_alloc()) == 0) {
618		dmu_objset_disown(os, FTAG);
619		mutex_exit(&zfsdev_state_lock);
620		return (SET_ERROR(ENXIO));
621	}
622
623	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
624		dmu_objset_disown(os, FTAG);
625		mutex_exit(&zfsdev_state_lock);
626		return (SET_ERROR(EAGAIN));
627	}
628	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
629	    (char *)name);
630
631	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
632
633	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
634	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
635		ddi_soft_state_free(zfsdev_state, minor);
636		dmu_objset_disown(os, FTAG);
637		mutex_exit(&zfsdev_state_lock);
638		return (SET_ERROR(EAGAIN));
639	}
640
641	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
642
643	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
644	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
645		ddi_remove_minor_node(zfs_dip, chrbuf);
646		ddi_soft_state_free(zfsdev_state, minor);
647		dmu_objset_disown(os, FTAG);
648		mutex_exit(&zfsdev_state_lock);
649		return (SET_ERROR(EAGAIN));
650	}
651
652	zs = ddi_get_soft_state(zfsdev_state, minor);
653	zs->zss_type = ZSST_ZVOL;
654	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
655#else	/* !illumos */
656
657	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
658	zv->zv_state = 0;
659	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
660	if (error) {
661		kmem_free(zv, sizeof(*zv));
662		dmu_objset_disown(os, zvol_tag);
663		mutex_exit(&zfsdev_state_lock);
664		return (error);
665	}
666	error = dsl_prop_get_integer(name,
667	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
668	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
669		mode = volmode;
670
671	DROP_GIANT();
672	zv->zv_volsize = volsize;
673	zv->zv_volmode = mode;
674	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
675		g_topology_lock();
676		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
677		gp->start = zvol_geom_start;
678		gp->access = zvol_geom_access;
679		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
680		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
681		pp->sectorsize = DEV_BSIZE;
682		pp->mediasize = zv->zv_volsize;
683		pp->private = zv;
684
685		zv->zv_provider = pp;
686		bioq_init(&zv->zv_queue);
687		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
688	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
689		struct make_dev_args args;
690
691		make_dev_args_init(&args);
692		args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
693		args.mda_devsw = &zvol_cdevsw;
694		args.mda_cr = NULL;
695		args.mda_uid = UID_ROOT;
696		args.mda_gid = GID_OPERATOR;
697		args.mda_mode = 0640;
698		args.mda_si_drv2 = zv;
699		error = make_dev_s(&args, &zv->zv_dev,
700		    "%s/%s", ZVOL_DRIVER, name);
701		if (error != 0) {
702			kmem_free(zv, sizeof(*zv));
703			dmu_objset_disown(os, FTAG);
704			mutex_exit(&zfsdev_state_lock);
705			return (error);
706		}
707		zv->zv_dev->si_iosize_max = MAXPHYS;
708	}
709	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
710#endif	/* illumos */
711
712	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
713	zv->zv_min_bs = DEV_BSHIFT;
714#ifdef illumos
715	zv->zv_minor = minor;
716#endif
717	zv->zv_objset = os;
718	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
719		zv->zv_flags |= ZVOL_RDONLY;
720	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
721	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
722	    sizeof (rl_t), offsetof(rl_t, r_node));
723	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
724	    offsetof(zvol_extent_t, ze_node));
725	/* get and cache the blocksize */
726	error = dmu_object_info(os, ZVOL_OBJ, &doi);
727	ASSERT(error == 0);
728	zv->zv_volblocksize = doi.doi_data_block_size;
729
730	if (spa_writeable(dmu_objset_spa(os))) {
731		if (zil_replay_disable)
732			zil_destroy(dmu_objset_zil(os), B_FALSE);
733		else
734			zil_replay(os, zv, zvol_replay_vector);
735	}
736	dmu_objset_disown(os, FTAG);
737	zv->zv_objset = NULL;
738
739	zvol_minors++;
740
741	mutex_exit(&zfsdev_state_lock);
742#ifndef illumos
743	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
744		zvol_geom_run(zv);
745		g_topology_unlock();
746	}
747	PICKUP_GIANT();
748
749	ZFS_LOG(1, "ZVOL %s created.", name);
750#endif
751
752	return (0);
753}
754
755/*
756 * Remove minor node for the specified volume.
757 */
758static int
759zvol_remove_zv(zvol_state_t *zv)
760{
761#ifdef illumos
762	char nmbuf[20];
763	minor_t minor = zv->zv_minor;
764#endif
765
766	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
767	if (zv->zv_total_opens != 0)
768		return (SET_ERROR(EBUSY));
769
770#ifdef illumos
771	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
772	ddi_remove_minor_node(zfs_dip, nmbuf);
773
774	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
775	ddi_remove_minor_node(zfs_dip, nmbuf);
776#else
777	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
778
779	LIST_REMOVE(zv, zv_links);
780	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
781		g_topology_lock();
782		zvol_geom_destroy(zv);
783		g_topology_unlock();
784	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
785		if (zv->zv_dev != NULL)
786			destroy_dev(zv->zv_dev);
787	}
788#endif
789
790	avl_destroy(&zv->zv_znode.z_range_avl);
791	mutex_destroy(&zv->zv_znode.z_range_lock);
792
793	kmem_free(zv, sizeof (zvol_state_t));
794#ifdef illumos
795	ddi_soft_state_free(zfsdev_state, minor);
796#endif
797	zvol_minors--;
798	return (0);
799}
800
801int
802zvol_remove_minor(const char *name)
803{
804	zvol_state_t *zv;
805	int rc;
806
807	mutex_enter(&zfsdev_state_lock);
808	if ((zv = zvol_minor_lookup(name)) == NULL) {
809		mutex_exit(&zfsdev_state_lock);
810		return (SET_ERROR(ENXIO));
811	}
812	rc = zvol_remove_zv(zv);
813	mutex_exit(&zfsdev_state_lock);
814	return (rc);
815}
816
817int
818zvol_first_open(zvol_state_t *zv)
819{
820	objset_t *os;
821	uint64_t volsize;
822	int error;
823	uint64_t readonly;
824
825	/* lie and say we're read-only */
826	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
827	    zvol_tag, &os);
828	if (error)
829		return (error);
830
831	zv->zv_objset = os;
832	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
833	if (error) {
834		ASSERT(error == 0);
835		dmu_objset_disown(os, zvol_tag);
836		return (error);
837	}
838
839	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
840	if (error) {
841		dmu_objset_disown(os, zvol_tag);
842		return (error);
843	}
844
845	zvol_size_changed(zv, volsize);
846	zv->zv_zilog = zil_open(os, zvol_get_data);
847
848	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
849	    NULL) == 0);
850	if (readonly || dmu_objset_is_snapshot(os) ||
851	    !spa_writeable(dmu_objset_spa(os)))
852		zv->zv_flags |= ZVOL_RDONLY;
853	else
854		zv->zv_flags &= ~ZVOL_RDONLY;
855	return (error);
856}
857
858void
859zvol_last_close(zvol_state_t *zv)
860{
861	zil_close(zv->zv_zilog);
862	zv->zv_zilog = NULL;
863
864	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
865	zv->zv_dbuf = NULL;
866
867	/*
868	 * Evict cached data
869	 */
870	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
871	    !(zv->zv_flags & ZVOL_RDONLY))
872		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
873	dmu_objset_evict_dbufs(zv->zv_objset);
874
875	dmu_objset_disown(zv->zv_objset, zvol_tag);
876	zv->zv_objset = NULL;
877}
878
879#ifdef illumos
880int
881zvol_prealloc(zvol_state_t *zv)
882{
883	objset_t *os = zv->zv_objset;
884	dmu_tx_t *tx;
885	uint64_t refd, avail, usedobjs, availobjs;
886	uint64_t resid = zv->zv_volsize;
887	uint64_t off = 0;
888
889	/* Check the space usage before attempting to allocate the space */
890	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
891	if (avail < zv->zv_volsize)
892		return (SET_ERROR(ENOSPC));
893
894	/* Free old extents if they exist */
895	zvol_free_extents(zv);
896
897	while (resid != 0) {
898		int error;
899		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
900
901		tx = dmu_tx_create(os);
902		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
903		error = dmu_tx_assign(tx, TXG_WAIT);
904		if (error) {
905			dmu_tx_abort(tx);
906			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
907			return (error);
908		}
909		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
910		dmu_tx_commit(tx);
911		off += bytes;
912		resid -= bytes;
913	}
914	txg_wait_synced(dmu_objset_pool(os), 0);
915
916	return (0);
917}
918#endif	/* illumos */
919
920static int
921zvol_update_volsize(objset_t *os, uint64_t volsize)
922{
923	dmu_tx_t *tx;
924	int error;
925
926	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
927
928	tx = dmu_tx_create(os);
929	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
930	dmu_tx_mark_netfree(tx);
931	error = dmu_tx_assign(tx, TXG_WAIT);
932	if (error) {
933		dmu_tx_abort(tx);
934		return (error);
935	}
936
937	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
938	    &volsize, tx);
939	dmu_tx_commit(tx);
940
941	if (error == 0)
942		error = dmu_free_long_range(os,
943		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
944	return (error);
945}
946
947void
948zvol_remove_minors(const char *name)
949{
950#ifdef illumos
951	zvol_state_t *zv;
952	char *namebuf;
953	minor_t minor;
954
955	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
956	(void) strncpy(namebuf, name, strlen(name));
957	(void) strcat(namebuf, "/");
958	mutex_enter(&zfsdev_state_lock);
959	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
960
961		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
962		if (zv == NULL)
963			continue;
964		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
965			(void) zvol_remove_zv(zv);
966	}
967	kmem_free(namebuf, strlen(name) + 2);
968
969	mutex_exit(&zfsdev_state_lock);
970#else	/* !illumos */
971	zvol_state_t *zv, *tzv;
972	size_t namelen;
973
974	namelen = strlen(name);
975
976	DROP_GIANT();
977	mutex_enter(&zfsdev_state_lock);
978
979	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
980		if (strcmp(zv->zv_name, name) == 0 ||
981		    (strncmp(zv->zv_name, name, namelen) == 0 &&
982		    strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' ||
983		    zv->zv_name[namelen] == '@'))) {
984			(void) zvol_remove_zv(zv);
985		}
986	}
987
988	mutex_exit(&zfsdev_state_lock);
989	PICKUP_GIANT();
990#endif	/* illumos */
991}
992
993static int
994zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
995{
996	uint64_t old_volsize = 0ULL;
997	int error = 0;
998
999	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1000
1001	/*
1002	 * Reinitialize the dump area to the new size. If we
1003	 * failed to resize the dump area then restore it back to
1004	 * its original size.  We must set the new volsize prior
1005	 * to calling dumpvp_resize() to ensure that the devices'
1006	 * size(9P) is not visible by the dump subsystem.
1007	 */
1008	old_volsize = zv->zv_volsize;
1009	zvol_size_changed(zv, volsize);
1010
1011#ifdef ZVOL_DUMP
1012	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1013		if ((error = zvol_dumpify(zv)) != 0 ||
1014		    (error = dumpvp_resize()) != 0) {
1015			int dumpify_error;
1016
1017			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
1018			zvol_size_changed(zv, old_volsize);
1019			dumpify_error = zvol_dumpify(zv);
1020			error = dumpify_error ? dumpify_error : error;
1021		}
1022	}
1023#endif	/* ZVOL_DUMP */
1024
1025#ifdef illumos
1026	/*
1027	 * Generate a LUN expansion event.
1028	 */
1029	if (error == 0) {
1030		sysevent_id_t eid;
1031		nvlist_t *attr;
1032		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1033
1034		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
1035		    zv->zv_minor);
1036
1037		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1038		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
1039
1040		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
1041		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
1042
1043		nvlist_free(attr);
1044		kmem_free(physpath, MAXPATHLEN);
1045	}
1046#endif	/* illumos */
1047	return (error);
1048}
1049
1050int
1051zvol_set_volsize(const char *name, uint64_t volsize)
1052{
1053	zvol_state_t *zv = NULL;
1054	objset_t *os;
1055	int error;
1056	dmu_object_info_t doi;
1057	uint64_t readonly;
1058	boolean_t owned = B_FALSE;
1059
1060	error = dsl_prop_get_integer(name,
1061	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
1062	if (error != 0)
1063		return (error);
1064	if (readonly)
1065		return (SET_ERROR(EROFS));
1066
1067	mutex_enter(&zfsdev_state_lock);
1068	zv = zvol_minor_lookup(name);
1069
1070	if (zv == NULL || zv->zv_objset == NULL) {
1071		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
1072		    FTAG, &os)) != 0) {
1073			mutex_exit(&zfsdev_state_lock);
1074			return (error);
1075		}
1076		owned = B_TRUE;
1077		if (zv != NULL)
1078			zv->zv_objset = os;
1079	} else {
1080		os = zv->zv_objset;
1081	}
1082
1083	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
1084	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
1085		goto out;
1086
1087	error = zvol_update_volsize(os, volsize);
1088
1089	if (error == 0 && zv != NULL)
1090		error = zvol_update_live_volsize(zv, volsize);
1091out:
1092	if (owned) {
1093		dmu_objset_disown(os, FTAG);
1094		if (zv != NULL)
1095			zv->zv_objset = NULL;
1096	}
1097	mutex_exit(&zfsdev_state_lock);
1098	return (error);
1099}
1100
1101/*ARGSUSED*/
1102#ifdef illumos
1103int
1104zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
1105#else
1106static int
1107zvol_open(struct g_provider *pp, int flag, int count)
1108#endif
1109{
1110	zvol_state_t *zv;
1111	int err = 0;
1112#ifdef illumos
1113
1114	mutex_enter(&zfsdev_state_lock);
1115
1116	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
1117	if (zv == NULL) {
1118		mutex_exit(&zfsdev_state_lock);
1119		return (SET_ERROR(ENXIO));
1120	}
1121
1122	if (zv->zv_total_opens == 0)
1123		err = zvol_first_open(zv);
1124	if (err) {
1125		mutex_exit(&zfsdev_state_lock);
1126		return (err);
1127	}
1128#else	/* !illumos */
1129	if (tsd_get(zfs_geom_probe_vdev_key) != NULL) {
1130		/*
1131		 * if zfs_geom_probe_vdev_key is set, that means that zfs is
1132		 * attempting to probe geom providers while looking for a
1133		 * replacement for a missing VDEV.  In this case, the
1134		 * spa_namespace_lock will not be held, but it is still illegal
1135		 * to use a zvol as a vdev.  Deadlocks can result if another
1136		 * thread has spa_namespace_lock
1137		 */
1138		return (EOPNOTSUPP);
1139	}
1140
1141	mutex_enter(&zfsdev_state_lock);
1142
1143	zv = pp->private;
1144	if (zv == NULL) {
1145		mutex_exit(&zfsdev_state_lock);
1146		return (SET_ERROR(ENXIO));
1147	}
1148
1149	if (zv->zv_total_opens == 0) {
1150		err = zvol_first_open(zv);
1151		if (err) {
1152			mutex_exit(&zfsdev_state_lock);
1153			return (err);
1154		}
1155		pp->mediasize = zv->zv_volsize;
1156		pp->stripeoffset = 0;
1157		pp->stripesize = zv->zv_volblocksize;
1158	}
1159#endif	/* illumos */
1160	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1161		err = SET_ERROR(EROFS);
1162		goto out;
1163	}
1164	if (zv->zv_flags & ZVOL_EXCL) {
1165		err = SET_ERROR(EBUSY);
1166		goto out;
1167	}
1168#ifdef FEXCL
1169	if (flag & FEXCL) {
1170		if (zv->zv_total_opens != 0) {
1171			err = SET_ERROR(EBUSY);
1172			goto out;
1173		}
1174		zv->zv_flags |= ZVOL_EXCL;
1175	}
1176#endif
1177
1178#ifdef illumos
1179	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
1180		zv->zv_open_count[otyp]++;
1181		zv->zv_total_opens++;
1182	}
1183	mutex_exit(&zfsdev_state_lock);
1184#else
1185	zv->zv_total_opens += count;
1186	mutex_exit(&zfsdev_state_lock);
1187#endif
1188
1189	return (err);
1190out:
1191	if (zv->zv_total_opens == 0)
1192		zvol_last_close(zv);
1193#ifdef illumos
1194	mutex_exit(&zfsdev_state_lock);
1195#else
1196	mutex_exit(&zfsdev_state_lock);
1197#endif
1198	return (err);
1199}
1200
1201/*ARGSUSED*/
1202#ifdef illumos
1203int
1204zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
1205{
1206	minor_t minor = getminor(dev);
1207	zvol_state_t *zv;
1208	int error = 0;
1209
1210	mutex_enter(&zfsdev_state_lock);
1211
1212	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1213	if (zv == NULL) {
1214		mutex_exit(&zfsdev_state_lock);
1215#else	/* !illumos */
1216static int
1217zvol_close(struct g_provider *pp, int flag, int count)
1218{
1219	zvol_state_t *zv;
1220	int error = 0;
1221	boolean_t locked = B_FALSE;
1222
1223	/* See comment in zvol_open(). */
1224	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1225		mutex_enter(&zfsdev_state_lock);
1226		locked = B_TRUE;
1227	}
1228
1229	zv = pp->private;
1230	if (zv == NULL) {
1231		if (locked)
1232			mutex_exit(&zfsdev_state_lock);
1233#endif	/* illumos */
1234		return (SET_ERROR(ENXIO));
1235	}
1236
1237	if (zv->zv_flags & ZVOL_EXCL) {
1238		ASSERT(zv->zv_total_opens == 1);
1239		zv->zv_flags &= ~ZVOL_EXCL;
1240	}
1241
1242	/*
1243	 * If the open count is zero, this is a spurious close.
1244	 * That indicates a bug in the kernel / DDI framework.
1245	 */
1246#ifdef illumos
1247	ASSERT(zv->zv_open_count[otyp] != 0);
1248#endif
1249	ASSERT(zv->zv_total_opens != 0);
1250
1251	/*
1252	 * You may get multiple opens, but only one close.
1253	 */
1254#ifdef illumos
1255	zv->zv_open_count[otyp]--;
1256	zv->zv_total_opens--;
1257#else
1258	zv->zv_total_opens -= count;
1259#endif
1260
1261	if (zv->zv_total_opens == 0)
1262		zvol_last_close(zv);
1263
1264#ifdef illumos
1265	mutex_exit(&zfsdev_state_lock);
1266#else
1267	if (locked)
1268		mutex_exit(&zfsdev_state_lock);
1269#endif
1270	return (error);
1271}
1272
1273static void
1274zvol_get_done(zgd_t *zgd, int error)
1275{
1276	if (zgd->zgd_db)
1277		dmu_buf_rele(zgd->zgd_db, zgd);
1278
1279	zfs_range_unlock(zgd->zgd_rl);
1280
1281	if (error == 0 && zgd->zgd_bp)
1282		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1283
1284	kmem_free(zgd, sizeof (zgd_t));
1285}
1286
1287/*
1288 * Get data to generate a TX_WRITE intent log record.
1289 */
1290static int
1291zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1292{
1293	zvol_state_t *zv = arg;
1294	objset_t *os = zv->zv_objset;
1295	uint64_t object = ZVOL_OBJ;
1296	uint64_t offset = lr->lr_offset;
1297	uint64_t size = lr->lr_length;	/* length of user data */
1298	blkptr_t *bp = &lr->lr_blkptr;
1299	dmu_buf_t *db;
1300	zgd_t *zgd;
1301	int error;
1302
1303	ASSERT(zio != NULL);
1304	ASSERT(size != 0);
1305
1306	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1307	zgd->zgd_zilog = zv->zv_zilog;
1308	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1309
1310	/*
1311	 * Write records come in two flavors: immediate and indirect.
1312	 * For small writes it's cheaper to store the data with the
1313	 * log record (immediate); for large writes it's cheaper to
1314	 * sync the data and get a pointer to it (indirect) so that
1315	 * we don't have to write the data twice.
1316	 */
1317	if (buf != NULL) {	/* immediate write */
1318		error = dmu_read(os, object, offset, size, buf,
1319		    DMU_READ_NO_PREFETCH);
1320	} else {
1321		size = zv->zv_volblocksize;
1322		offset = P2ALIGN(offset, size);
1323		error = dmu_buf_hold(os, object, offset, zgd, &db,
1324		    DMU_READ_NO_PREFETCH);
1325		if (error == 0) {
1326			blkptr_t *obp = dmu_buf_get_blkptr(db);
1327			if (obp) {
1328				ASSERT(BP_IS_HOLE(bp));
1329				*bp = *obp;
1330			}
1331
1332			zgd->zgd_db = db;
1333			zgd->zgd_bp = bp;
1334
1335			ASSERT(db->db_offset == offset);
1336			ASSERT(db->db_size == size);
1337
1338			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1339			    zvol_get_done, zgd);
1340
1341			if (error == 0)
1342				return (0);
1343		}
1344	}
1345
1346	zvol_get_done(zgd, error);
1347
1348	return (error);
1349}
1350
1351/*
1352 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1353 *
1354 * We store data in the log buffers if it's small enough.
1355 * Otherwise we will later flush the data out via dmu_sync().
1356 */
1357ssize_t zvol_immediate_write_sz = 32768;
1358
1359static void
1360zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1361    boolean_t sync)
1362{
1363	uint32_t blocksize = zv->zv_volblocksize;
1364	zilog_t *zilog = zv->zv_zilog;
1365	itx_wr_state_t write_state;
1366
1367	if (zil_replaying(zilog, tx))
1368		return;
1369
1370	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1371		write_state = WR_INDIRECT;
1372	else if (!spa_has_slogs(zilog->zl_spa) &&
1373	    resid >= blocksize && blocksize > zvol_immediate_write_sz)
1374		write_state = WR_INDIRECT;
1375	else if (sync)
1376		write_state = WR_COPIED;
1377	else
1378		write_state = WR_NEED_COPY;
1379
1380	while (resid) {
1381		itx_t *itx;
1382		lr_write_t *lr;
1383		itx_wr_state_t wr_state = write_state;
1384		ssize_t len = resid;
1385
1386		if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1387			wr_state = WR_NEED_COPY;
1388		else if (wr_state == WR_INDIRECT)
1389			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1390
1391		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1392		    (wr_state == WR_COPIED ? len : 0));
1393		lr = (lr_write_t *)&itx->itx_lr;
1394		if (wr_state == WR_COPIED && dmu_read(zv->zv_objset,
1395		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1396			zil_itx_destroy(itx);
1397			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1398			lr = (lr_write_t *)&itx->itx_lr;
1399			wr_state = WR_NEED_COPY;
1400		}
1401
1402		itx->itx_wr_state = wr_state;
1403		lr->lr_foid = ZVOL_OBJ;
1404		lr->lr_offset = off;
1405		lr->lr_length = len;
1406		lr->lr_blkoff = 0;
1407		BP_ZERO(&lr->lr_blkptr);
1408
1409		itx->itx_private = zv;
1410
1411		if (!sync && (zv->zv_sync_cnt == 0))
1412			itx->itx_sync = B_FALSE;
1413
1414		zil_itx_assign(zilog, itx, tx);
1415
1416		off += len;
1417		resid -= len;
1418	}
1419}
1420
1421#ifdef illumos
1422static int
1423zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1424    uint64_t size, boolean_t doread, boolean_t isdump)
1425{
1426	vdev_disk_t *dvd;
1427	int c;
1428	int numerrors = 0;
1429
1430	if (vd->vdev_ops == &vdev_mirror_ops ||
1431	    vd->vdev_ops == &vdev_replacing_ops ||
1432	    vd->vdev_ops == &vdev_spare_ops) {
1433		for (c = 0; c < vd->vdev_children; c++) {
1434			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1435			    addr, offset, origoffset, size, doread, isdump);
1436			if (err != 0) {
1437				numerrors++;
1438			} else if (doread) {
1439				break;
1440			}
1441		}
1442	}
1443
1444	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1445		return (numerrors < vd->vdev_children ? 0 : EIO);
1446
1447	if (doread && !vdev_readable(vd))
1448		return (SET_ERROR(EIO));
1449	else if (!doread && !vdev_writeable(vd))
1450		return (SET_ERROR(EIO));
1451
1452	if (vd->vdev_ops == &vdev_raidz_ops) {
1453		return (vdev_raidz_physio(vd,
1454		    addr, size, offset, origoffset, doread, isdump));
1455	}
1456
1457	offset += VDEV_LABEL_START_SIZE;
1458
1459	if (ddi_in_panic() || isdump) {
1460		ASSERT(!doread);
1461		if (doread)
1462			return (SET_ERROR(EIO));
1463		dvd = vd->vdev_tsd;
1464		ASSERT3P(dvd, !=, NULL);
1465		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1466		    lbtodb(size)));
1467	} else {
1468		dvd = vd->vdev_tsd;
1469		ASSERT3P(dvd, !=, NULL);
1470		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1471		    offset, doread ? B_READ : B_WRITE));
1472	}
1473}
1474
1475static int
1476zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1477    boolean_t doread, boolean_t isdump)
1478{
1479	vdev_t *vd;
1480	int error;
1481	zvol_extent_t *ze;
1482	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1483
1484	/* Must be sector aligned, and not stradle a block boundary. */
1485	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1486	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1487		return (SET_ERROR(EINVAL));
1488	}
1489	ASSERT(size <= zv->zv_volblocksize);
1490
1491	/* Locate the extent this belongs to */
1492	ze = list_head(&zv->zv_extents);
1493	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1494		offset -= ze->ze_nblks * zv->zv_volblocksize;
1495		ze = list_next(&zv->zv_extents, ze);
1496	}
1497
1498	if (ze == NULL)
1499		return (SET_ERROR(EINVAL));
1500
1501	if (!ddi_in_panic())
1502		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1503
1504	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1505	offset += DVA_GET_OFFSET(&ze->ze_dva);
1506	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1507	    size, doread, isdump);
1508
1509	if (!ddi_in_panic())
1510		spa_config_exit(spa, SCL_STATE, FTAG);
1511
1512	return (error);
1513}
1514
1515int
1516zvol_strategy(buf_t *bp)
1517{
1518	zfs_soft_state_t *zs = NULL;
1519#else	/* !illumos */
1520void
1521zvol_strategy(struct bio *bp)
1522{
1523#endif	/* illumos */
1524	zvol_state_t *zv;
1525	uint64_t off, volsize;
1526	size_t resid;
1527	char *addr;
1528	objset_t *os;
1529	rl_t *rl;
1530	int error = 0;
1531#ifdef illumos
1532	boolean_t doread = bp->b_flags & B_READ;
1533#else
1534	boolean_t doread = 0;
1535#endif
1536	boolean_t is_dumpified;
1537	boolean_t sync;
1538
1539#ifdef illumos
1540	if (getminor(bp->b_edev) == 0) {
1541		error = SET_ERROR(EINVAL);
1542	} else {
1543		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1544		if (zs == NULL)
1545			error = SET_ERROR(ENXIO);
1546		else if (zs->zss_type != ZSST_ZVOL)
1547			error = SET_ERROR(EINVAL);
1548	}
1549
1550	if (error) {
1551		bioerror(bp, error);
1552		biodone(bp);
1553		return (0);
1554	}
1555
1556	zv = zs->zss_data;
1557
1558	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1559		bioerror(bp, EROFS);
1560		biodone(bp);
1561		return (0);
1562	}
1563
1564	off = ldbtob(bp->b_blkno);
1565#else	/* !illumos */
1566	if (bp->bio_to)
1567		zv = bp->bio_to->private;
1568	else
1569		zv = bp->bio_dev->si_drv2;
1570
1571	if (zv == NULL) {
1572		error = SET_ERROR(ENXIO);
1573		goto out;
1574	}
1575
1576	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1577		error = SET_ERROR(EROFS);
1578		goto out;
1579	}
1580
1581	switch (bp->bio_cmd) {
1582	case BIO_FLUSH:
1583		goto sync;
1584	case BIO_READ:
1585		doread = 1;
1586	case BIO_WRITE:
1587	case BIO_DELETE:
1588		break;
1589	default:
1590		error = EOPNOTSUPP;
1591		goto out;
1592	}
1593
1594	off = bp->bio_offset;
1595#endif	/* illumos */
1596	volsize = zv->zv_volsize;
1597
1598	os = zv->zv_objset;
1599	ASSERT(os != NULL);
1600
1601#ifdef illumos
1602	bp_mapin(bp);
1603	addr = bp->b_un.b_addr;
1604	resid = bp->b_bcount;
1605
1606	if (resid > 0 && (off < 0 || off >= volsize)) {
1607		bioerror(bp, EIO);
1608		biodone(bp);
1609		return (0);
1610	}
1611
1612	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1613	sync = ((!(bp->b_flags & B_ASYNC) &&
1614	    !(zv->zv_flags & ZVOL_WCE)) ||
1615	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1616	    !doread && !is_dumpified;
1617#else	/* !illumos */
1618	addr = bp->bio_data;
1619	resid = bp->bio_length;
1620
1621	if (resid > 0 && (off < 0 || off >= volsize)) {
1622		error = SET_ERROR(EIO);
1623		goto out;
1624	}
1625
1626	is_dumpified = B_FALSE;
1627	sync = !doread && !is_dumpified &&
1628	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1629#endif	/* illumos */
1630
1631	/*
1632	 * There must be no buffer changes when doing a dmu_sync() because
1633	 * we can't change the data whilst calculating the checksum.
1634	 */
1635	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1636	    doread ? RL_READER : RL_WRITER);
1637
1638#ifndef illumos
1639	if (bp->bio_cmd == BIO_DELETE) {
1640		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1641		error = dmu_tx_assign(tx, TXG_WAIT);
1642		if (error != 0) {
1643			dmu_tx_abort(tx);
1644		} else {
1645			zvol_log_truncate(zv, tx, off, resid, sync);
1646			dmu_tx_commit(tx);
1647			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1648			    off, resid);
1649			resid = 0;
1650		}
1651		goto unlock;
1652	}
1653#endif
1654	while (resid != 0 && off < volsize) {
1655		size_t size = MIN(resid, zvol_maxphys);
1656#ifdef illumos
1657		if (is_dumpified) {
1658			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1659			error = zvol_dumpio(zv, addr, off, size,
1660			    doread, B_FALSE);
1661		} else if (doread) {
1662#else
1663		if (doread) {
1664#endif
1665			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1666			    DMU_READ_PREFETCH);
1667		} else {
1668			dmu_tx_t *tx = dmu_tx_create(os);
1669			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1670			error = dmu_tx_assign(tx, TXG_WAIT);
1671			if (error) {
1672				dmu_tx_abort(tx);
1673			} else {
1674				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1675				zvol_log_write(zv, tx, off, size, sync);
1676				dmu_tx_commit(tx);
1677			}
1678		}
1679		if (error) {
1680			/* convert checksum errors into IO errors */
1681			if (error == ECKSUM)
1682				error = SET_ERROR(EIO);
1683			break;
1684		}
1685		off += size;
1686		addr += size;
1687		resid -= size;
1688	}
1689#ifndef illumos
1690unlock:
1691#endif
1692	zfs_range_unlock(rl);
1693
1694#ifdef illumos
1695	if ((bp->b_resid = resid) == bp->b_bcount)
1696		bioerror(bp, off > volsize ? EINVAL : error);
1697
1698	if (sync)
1699		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1700	biodone(bp);
1701
1702	return (0);
1703#else	/* !illumos */
1704	bp->bio_completed = bp->bio_length - resid;
1705	if (bp->bio_completed < bp->bio_length && off > volsize)
1706		error = EINVAL;
1707
1708	if (sync) {
1709sync:
1710		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1711	}
1712out:
1713	if (bp->bio_to)
1714		g_io_deliver(bp, error);
1715	else
1716		biofinish(bp, NULL, error);
1717#endif	/* illumos */
1718}
1719
1720#ifdef illumos
1721/*
1722 * Set the buffer count to the zvol maximum transfer.
1723 * Using our own routine instead of the default minphys()
1724 * means that for larger writes we write bigger buffers on X86
1725 * (128K instead of 56K) and flush the disk write cache less often
1726 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1727 * 56K on X86 and 128K on sparc).
1728 */
1729void
1730zvol_minphys(struct buf *bp)
1731{
1732	if (bp->b_bcount > zvol_maxphys)
1733		bp->b_bcount = zvol_maxphys;
1734}
1735
1736int
1737zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1738{
1739	minor_t minor = getminor(dev);
1740	zvol_state_t *zv;
1741	int error = 0;
1742	uint64_t size;
1743	uint64_t boff;
1744	uint64_t resid;
1745
1746	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1747	if (zv == NULL)
1748		return (SET_ERROR(ENXIO));
1749
1750	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1751		return (SET_ERROR(EINVAL));
1752
1753	boff = ldbtob(blkno);
1754	resid = ldbtob(nblocks);
1755
1756	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1757
1758	while (resid) {
1759		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1760		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1761		if (error)
1762			break;
1763		boff += size;
1764		addr += size;
1765		resid -= size;
1766	}
1767
1768	return (error);
1769}
1770
1771/*ARGSUSED*/
1772int
1773zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1774{
1775	minor_t minor = getminor(dev);
1776#else	/* !illumos */
1777int
1778zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1779{
1780#endif	/* illumos */
1781	zvol_state_t *zv;
1782	uint64_t volsize;
1783	rl_t *rl;
1784	int error = 0;
1785
1786#ifdef illumos
1787	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1788	if (zv == NULL)
1789		return (SET_ERROR(ENXIO));
1790#else
1791	zv = dev->si_drv2;
1792#endif
1793
1794	volsize = zv->zv_volsize;
1795	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1796	if (uio->uio_resid > 0 &&
1797	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1798		return (SET_ERROR(EIO));
1799
1800#ifdef illumos
1801	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1802		error = physio(zvol_strategy, NULL, dev, B_READ,
1803		    zvol_minphys, uio);
1804		return (error);
1805	}
1806#endif
1807
1808	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1809	    RL_READER);
1810	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1811		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1812
1813		/* don't read past the end */
1814		if (bytes > volsize - uio->uio_loffset)
1815			bytes = volsize - uio->uio_loffset;
1816
1817		error =  dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes);
1818		if (error) {
1819			/* convert checksum errors into IO errors */
1820			if (error == ECKSUM)
1821				error = SET_ERROR(EIO);
1822			break;
1823		}
1824	}
1825	zfs_range_unlock(rl);
1826	return (error);
1827}
1828
1829#ifdef illumos
1830/*ARGSUSED*/
1831int
1832zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1833{
1834	minor_t minor = getminor(dev);
1835#else	/* !illumos */
1836int
1837zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1838{
1839#endif	/* illumos */
1840	zvol_state_t *zv;
1841	uint64_t volsize;
1842	rl_t *rl;
1843	int error = 0;
1844	boolean_t sync;
1845
1846#ifdef illumos
1847	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1848	if (zv == NULL)
1849		return (SET_ERROR(ENXIO));
1850#else
1851	zv = dev->si_drv2;
1852#endif
1853
1854	volsize = zv->zv_volsize;
1855	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1856	if (uio->uio_resid > 0 &&
1857	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1858		return (SET_ERROR(EIO));
1859
1860#ifdef illumos
1861	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1862		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1863		    zvol_minphys, uio);
1864		return (error);
1865	}
1866
1867	sync = !(zv->zv_flags & ZVOL_WCE) ||
1868#else
1869	sync = (ioflag & IO_SYNC) ||
1870#endif
1871	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1872
1873	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1874	    RL_WRITER);
1875	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1876		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1877		uint64_t off = uio->uio_loffset;
1878		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1879
1880		if (bytes > volsize - off)	/* don't write past the end */
1881			bytes = volsize - off;
1882
1883		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1884		error = dmu_tx_assign(tx, TXG_WAIT);
1885		if (error) {
1886			dmu_tx_abort(tx);
1887			break;
1888		}
1889		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1890		if (error == 0)
1891			zvol_log_write(zv, tx, off, bytes, sync);
1892		dmu_tx_commit(tx);
1893
1894		if (error)
1895			break;
1896	}
1897	zfs_range_unlock(rl);
1898	if (sync)
1899		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1900	return (error);
1901}
1902
1903#ifdef illumos
1904int
1905zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1906{
1907	struct uuid uuid = EFI_RESERVED;
1908	efi_gpe_t gpe = { 0 };
1909	uint32_t crc;
1910	dk_efi_t efi;
1911	int length;
1912	char *ptr;
1913
1914	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1915		return (SET_ERROR(EFAULT));
1916	ptr = (char *)(uintptr_t)efi.dki_data_64;
1917	length = efi.dki_length;
1918	/*
1919	 * Some clients may attempt to request a PMBR for the
1920	 * zvol.  Currently this interface will return EINVAL to
1921	 * such requests.  These requests could be supported by
1922	 * adding a check for lba == 0 and consing up an appropriate
1923	 * PMBR.
1924	 */
1925	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1926		return (SET_ERROR(EINVAL));
1927
1928	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1929	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1930	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1931
1932	if (efi.dki_lba == 1) {
1933		efi_gpt_t gpt = { 0 };
1934
1935		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1936		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1937		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1938		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1939		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1940		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1941		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1942		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1943		gpt.efi_gpt_SizeOfPartitionEntry =
1944		    LE_32(sizeof (efi_gpe_t));
1945		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1946		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1947		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1948		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1949		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1950		    flag))
1951			return (SET_ERROR(EFAULT));
1952		ptr += sizeof (gpt);
1953		length -= sizeof (gpt);
1954	}
1955	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1956	    length), flag))
1957		return (SET_ERROR(EFAULT));
1958	return (0);
1959}
1960
1961/*
1962 * BEGIN entry points to allow external callers access to the volume.
1963 */
1964/*
1965 * Return the volume parameters needed for access from an external caller.
1966 * These values are invariant as long as the volume is held open.
1967 */
1968int
1969zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1970    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1971    void **rl_hdl, void **bonus_hdl)
1972{
1973	zvol_state_t *zv;
1974
1975	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1976	if (zv == NULL)
1977		return (SET_ERROR(ENXIO));
1978	if (zv->zv_flags & ZVOL_DUMPIFIED)
1979		return (SET_ERROR(ENXIO));
1980
1981	ASSERT(blksize && max_xfer_len && minor_hdl &&
1982	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1983
1984	*blksize = zv->zv_volblocksize;
1985	*max_xfer_len = (uint64_t)zvol_maxphys;
1986	*minor_hdl = zv;
1987	*objset_hdl = zv->zv_objset;
1988	*zil_hdl = zv->zv_zilog;
1989	*rl_hdl = &zv->zv_znode;
1990	*bonus_hdl = zv->zv_dbuf;
1991	return (0);
1992}
1993
1994/*
1995 * Return the current volume size to an external caller.
1996 * The size can change while the volume is open.
1997 */
1998uint64_t
1999zvol_get_volume_size(void *minor_hdl)
2000{
2001	zvol_state_t *zv = minor_hdl;
2002
2003	return (zv->zv_volsize);
2004}
2005
2006/*
2007 * Return the current WCE setting to an external caller.
2008 * The WCE setting can change while the volume is open.
2009 */
2010int
2011zvol_get_volume_wce(void *minor_hdl)
2012{
2013	zvol_state_t *zv = minor_hdl;
2014
2015	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
2016}
2017
2018/*
2019 * Entry point for external callers to zvol_log_write
2020 */
2021void
2022zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
2023    boolean_t sync)
2024{
2025	zvol_state_t *zv = minor_hdl;
2026
2027	zvol_log_write(zv, tx, off, resid, sync);
2028}
2029/*
2030 * END entry points to allow external callers access to the volume.
2031 */
2032#endif	/* illumos */
2033
2034/*
2035 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
2036 */
2037static void
2038zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
2039    boolean_t sync)
2040{
2041	itx_t *itx;
2042	lr_truncate_t *lr;
2043	zilog_t *zilog = zv->zv_zilog;
2044
2045	if (zil_replaying(zilog, tx))
2046		return;
2047
2048	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
2049	lr = (lr_truncate_t *)&itx->itx_lr;
2050	lr->lr_foid = ZVOL_OBJ;
2051	lr->lr_offset = off;
2052	lr->lr_length = len;
2053
2054	itx->itx_sync = (sync || zv->zv_sync_cnt != 0);
2055	zil_itx_assign(zilog, itx, tx);
2056}
2057
2058#ifdef illumos
2059/*
2060 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
2061 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
2062 */
2063/*ARGSUSED*/
2064int
2065zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
2066{
2067	zvol_state_t *zv;
2068	struct dk_callback *dkc;
2069	int error = 0;
2070	rl_t *rl;
2071
2072	mutex_enter(&zfsdev_state_lock);
2073
2074	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
2075
2076	if (zv == NULL) {
2077		mutex_exit(&zfsdev_state_lock);
2078		return (SET_ERROR(ENXIO));
2079	}
2080	ASSERT(zv->zv_total_opens > 0);
2081
2082	switch (cmd) {
2083
2084	case DKIOCINFO:
2085	{
2086		struct dk_cinfo dki;
2087
2088		bzero(&dki, sizeof (dki));
2089		(void) strcpy(dki.dki_cname, "zvol");
2090		(void) strcpy(dki.dki_dname, "zvol");
2091		dki.dki_ctype = DKC_UNKNOWN;
2092		dki.dki_unit = getminor(dev);
2093		dki.dki_maxtransfer =
2094		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
2095		mutex_exit(&zfsdev_state_lock);
2096		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
2097			error = SET_ERROR(EFAULT);
2098		return (error);
2099	}
2100
2101	case DKIOCGMEDIAINFO:
2102	{
2103		struct dk_minfo dkm;
2104
2105		bzero(&dkm, sizeof (dkm));
2106		dkm.dki_lbsize = 1U << zv->zv_min_bs;
2107		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2108		dkm.dki_media_type = DK_UNKNOWN;
2109		mutex_exit(&zfsdev_state_lock);
2110		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
2111			error = SET_ERROR(EFAULT);
2112		return (error);
2113	}
2114
2115	case DKIOCGMEDIAINFOEXT:
2116	{
2117		struct dk_minfo_ext dkmext;
2118
2119		bzero(&dkmext, sizeof (dkmext));
2120		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
2121		dkmext.dki_pbsize = zv->zv_volblocksize;
2122		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2123		dkmext.dki_media_type = DK_UNKNOWN;
2124		mutex_exit(&zfsdev_state_lock);
2125		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
2126			error = SET_ERROR(EFAULT);
2127		return (error);
2128	}
2129
2130	case DKIOCGETEFI:
2131	{
2132		uint64_t vs = zv->zv_volsize;
2133		uint8_t bs = zv->zv_min_bs;
2134
2135		mutex_exit(&zfsdev_state_lock);
2136		error = zvol_getefi((void *)arg, flag, vs, bs);
2137		return (error);
2138	}
2139
2140	case DKIOCFLUSHWRITECACHE:
2141		dkc = (struct dk_callback *)arg;
2142		mutex_exit(&zfsdev_state_lock);
2143		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2144		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
2145			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
2146			error = 0;
2147		}
2148		return (error);
2149
2150	case DKIOCGETWCE:
2151	{
2152		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
2153		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
2154		    flag))
2155			error = SET_ERROR(EFAULT);
2156		break;
2157	}
2158	case DKIOCSETWCE:
2159	{
2160		int wce;
2161		if (ddi_copyin((void *)arg, &wce, sizeof (int),
2162		    flag)) {
2163			error = SET_ERROR(EFAULT);
2164			break;
2165		}
2166		if (wce) {
2167			zv->zv_flags |= ZVOL_WCE;
2168			mutex_exit(&zfsdev_state_lock);
2169		} else {
2170			zv->zv_flags &= ~ZVOL_WCE;
2171			mutex_exit(&zfsdev_state_lock);
2172			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2173		}
2174		return (0);
2175	}
2176
2177	case DKIOCGGEOM:
2178	case DKIOCGVTOC:
2179		/*
2180		 * commands using these (like prtvtoc) expect ENOTSUP
2181		 * since we're emulating an EFI label
2182		 */
2183		error = SET_ERROR(ENOTSUP);
2184		break;
2185
2186	case DKIOCDUMPINIT:
2187		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2188		    RL_WRITER);
2189		error = zvol_dumpify(zv);
2190		zfs_range_unlock(rl);
2191		break;
2192
2193	case DKIOCDUMPFINI:
2194		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
2195			break;
2196		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2197		    RL_WRITER);
2198		error = zvol_dump_fini(zv);
2199		zfs_range_unlock(rl);
2200		break;
2201
2202	case DKIOCFREE:
2203	{
2204		dkioc_free_t df;
2205		dmu_tx_t *tx;
2206
2207		if (!zvol_unmap_enabled)
2208			break;
2209
2210		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
2211			error = SET_ERROR(EFAULT);
2212			break;
2213		}
2214
2215		/*
2216		 * Apply Postel's Law to length-checking.  If they overshoot,
2217		 * just blank out until the end, if there's a need to blank
2218		 * out anything.
2219		 */
2220		if (df.df_start >= zv->zv_volsize)
2221			break;	/* No need to do anything... */
2222
2223		mutex_exit(&zfsdev_state_lock);
2224
2225		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
2226		    RL_WRITER);
2227		tx = dmu_tx_create(zv->zv_objset);
2228		dmu_tx_mark_netfree(tx);
2229		error = dmu_tx_assign(tx, TXG_WAIT);
2230		if (error != 0) {
2231			dmu_tx_abort(tx);
2232		} else {
2233			zvol_log_truncate(zv, tx, df.df_start,
2234			    df.df_length, B_TRUE);
2235			dmu_tx_commit(tx);
2236			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2237			    df.df_start, df.df_length);
2238		}
2239
2240		zfs_range_unlock(rl);
2241
2242		if (error == 0) {
2243			/*
2244			 * If the write-cache is disabled or 'sync' property
2245			 * is set to 'always' then treat this as a synchronous
2246			 * operation (i.e. commit to zil).
2247			 */
2248			if (!(zv->zv_flags & ZVOL_WCE) ||
2249			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2250				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2251
2252			/*
2253			 * If the caller really wants synchronous writes, and
2254			 * can't wait for them, don't return until the write
2255			 * is done.
2256			 */
2257			if (df.df_flags & DF_WAIT_SYNC) {
2258				txg_wait_synced(
2259				    dmu_objset_pool(zv->zv_objset), 0);
2260			}
2261		}
2262		return (error);
2263	}
2264
2265	default:
2266		error = SET_ERROR(ENOTTY);
2267		break;
2268
2269	}
2270	mutex_exit(&zfsdev_state_lock);
2271	return (error);
2272}
2273#endif	/* illumos */
2274
2275int
2276zvol_busy(void)
2277{
2278	return (zvol_minors != 0);
2279}
2280
2281void
2282zvol_init(void)
2283{
2284	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2285	    1) == 0);
2286#ifdef illumos
2287	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
2288#else
2289	ZFS_LOG(1, "ZVOL Initialized.");
2290#endif
2291}
2292
2293void
2294zvol_fini(void)
2295{
2296#ifdef illumos
2297	mutex_destroy(&zfsdev_state_lock);
2298#endif
2299	ddi_soft_state_fini(&zfsdev_state);
2300	ZFS_LOG(1, "ZVOL Deinitialized.");
2301}
2302
2303#ifdef illumos
2304/*ARGSUSED*/
2305static int
2306zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2307{
2308	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2309
2310	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2311		return (1);
2312	return (0);
2313}
2314
2315/*ARGSUSED*/
2316static void
2317zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2318{
2319	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2320
2321	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2322}
2323
2324static int
2325zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2326{
2327	dmu_tx_t *tx;
2328	int error;
2329	objset_t *os = zv->zv_objset;
2330	spa_t *spa = dmu_objset_spa(os);
2331	vdev_t *vd = spa->spa_root_vdev;
2332	nvlist_t *nv = NULL;
2333	uint64_t version = spa_version(spa);
2334	uint64_t checksum, compress, refresrv, vbs, dedup;
2335
2336	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2337	ASSERT(vd->vdev_ops == &vdev_root_ops);
2338
2339	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2340	    DMU_OBJECT_END);
2341	if (error != 0)
2342		return (error);
2343	/* wait for dmu_free_long_range to actually free the blocks */
2344	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2345
2346	/*
2347	 * If the pool on which the dump device is being initialized has more
2348	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2349	 * enabled.  If so, bump that feature's counter to indicate that the
2350	 * feature is active. We also check the vdev type to handle the
2351	 * following case:
2352	 *   # zpool create test raidz disk1 disk2 disk3
2353	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2354	 *   the raidz vdev itself has 3 children.
2355	 */
2356	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2357		if (!spa_feature_is_enabled(spa,
2358		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2359			return (SET_ERROR(ENOTSUP));
2360		(void) dsl_sync_task(spa_name(spa),
2361		    zfs_mvdev_dump_feature_check,
2362		    zfs_mvdev_dump_activate_feature_sync, NULL,
2363		    2, ZFS_SPACE_CHECK_RESERVED);
2364	}
2365
2366	if (!resize) {
2367		error = dsl_prop_get_integer(zv->zv_name,
2368		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2369		if (error == 0) {
2370			error = dsl_prop_get_integer(zv->zv_name,
2371			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
2372			    NULL);
2373		}
2374		if (error == 0) {
2375			error = dsl_prop_get_integer(zv->zv_name,
2376			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
2377			    &refresrv, NULL);
2378		}
2379		if (error == 0) {
2380			error = dsl_prop_get_integer(zv->zv_name,
2381			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
2382			    NULL);
2383		}
2384		if (version >= SPA_VERSION_DEDUP && error == 0) {
2385			error = dsl_prop_get_integer(zv->zv_name,
2386			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2387		}
2388	}
2389	if (error != 0)
2390		return (error);
2391
2392	tx = dmu_tx_create(os);
2393	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2394	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2395	error = dmu_tx_assign(tx, TXG_WAIT);
2396	if (error != 0) {
2397		dmu_tx_abort(tx);
2398		return (error);
2399	}
2400
2401	/*
2402	 * If we are resizing the dump device then we only need to
2403	 * update the refreservation to match the newly updated
2404	 * zvolsize. Otherwise, we save off the original state of the
2405	 * zvol so that we can restore them if the zvol is ever undumpified.
2406	 */
2407	if (resize) {
2408		error = zap_update(os, ZVOL_ZAP_OBJ,
2409		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2410		    &zv->zv_volsize, tx);
2411	} else {
2412		error = zap_update(os, ZVOL_ZAP_OBJ,
2413		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2414		    &compress, tx);
2415		if (error == 0) {
2416			error = zap_update(os, ZVOL_ZAP_OBJ,
2417			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2418			    &checksum, tx);
2419		}
2420		if (error == 0) {
2421			error = zap_update(os, ZVOL_ZAP_OBJ,
2422			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2423			    &refresrv, tx);
2424		}
2425		if (error == 0) {
2426			error = zap_update(os, ZVOL_ZAP_OBJ,
2427			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2428			    &vbs, tx);
2429		}
2430		if (error == 0) {
2431			error = dmu_object_set_blocksize(
2432			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2433		}
2434		if (version >= SPA_VERSION_DEDUP && error == 0) {
2435			error = zap_update(os, ZVOL_ZAP_OBJ,
2436			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2437			    &dedup, tx);
2438		}
2439		if (error == 0)
2440			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2441	}
2442	dmu_tx_commit(tx);
2443
2444	/*
2445	 * We only need update the zvol's property if we are initializing
2446	 * the dump area for the first time.
2447	 */
2448	if (error == 0 && !resize) {
2449		/*
2450		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2451		 * function.  Otherwise, use the old default -- OFF.
2452		 */
2453		checksum = spa_feature_is_active(spa,
2454		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2455		    ZIO_CHECKSUM_OFF;
2456
2457		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2458		VERIFY(nvlist_add_uint64(nv,
2459		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2460		VERIFY(nvlist_add_uint64(nv,
2461		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2462		    ZIO_COMPRESS_OFF) == 0);
2463		VERIFY(nvlist_add_uint64(nv,
2464		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2465		    checksum) == 0);
2466		if (version >= SPA_VERSION_DEDUP) {
2467			VERIFY(nvlist_add_uint64(nv,
2468			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2469			    ZIO_CHECKSUM_OFF) == 0);
2470		}
2471
2472		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2473		    nv, NULL);
2474		nvlist_free(nv);
2475	}
2476
2477	/* Allocate the space for the dump */
2478	if (error == 0)
2479		error = zvol_prealloc(zv);
2480	return (error);
2481}
2482
2483static int
2484zvol_dumpify(zvol_state_t *zv)
2485{
2486	int error = 0;
2487	uint64_t dumpsize = 0;
2488	dmu_tx_t *tx;
2489	objset_t *os = zv->zv_objset;
2490
2491	if (zv->zv_flags & ZVOL_RDONLY)
2492		return (SET_ERROR(EROFS));
2493
2494	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2495	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2496		boolean_t resize = (dumpsize > 0);
2497
2498		if ((error = zvol_dump_init(zv, resize)) != 0) {
2499			(void) zvol_dump_fini(zv);
2500			return (error);
2501		}
2502	}
2503
2504	/*
2505	 * Build up our lba mapping.
2506	 */
2507	error = zvol_get_lbas(zv);
2508	if (error) {
2509		(void) zvol_dump_fini(zv);
2510		return (error);
2511	}
2512
2513	tx = dmu_tx_create(os);
2514	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2515	error = dmu_tx_assign(tx, TXG_WAIT);
2516	if (error) {
2517		dmu_tx_abort(tx);
2518		(void) zvol_dump_fini(zv);
2519		return (error);
2520	}
2521
2522	zv->zv_flags |= ZVOL_DUMPIFIED;
2523	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2524	    &zv->zv_volsize, tx);
2525	dmu_tx_commit(tx);
2526
2527	if (error) {
2528		(void) zvol_dump_fini(zv);
2529		return (error);
2530	}
2531
2532	txg_wait_synced(dmu_objset_pool(os), 0);
2533	return (0);
2534}
2535
2536static int
2537zvol_dump_fini(zvol_state_t *zv)
2538{
2539	dmu_tx_t *tx;
2540	objset_t *os = zv->zv_objset;
2541	nvlist_t *nv;
2542	int error = 0;
2543	uint64_t checksum, compress, refresrv, vbs, dedup;
2544	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2545
2546	/*
2547	 * Attempt to restore the zvol back to its pre-dumpified state.
2548	 * This is a best-effort attempt as it's possible that not all
2549	 * of these properties were initialized during the dumpify process
2550	 * (i.e. error during zvol_dump_init).
2551	 */
2552
2553	tx = dmu_tx_create(os);
2554	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2555	error = dmu_tx_assign(tx, TXG_WAIT);
2556	if (error) {
2557		dmu_tx_abort(tx);
2558		return (error);
2559	}
2560	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2561	dmu_tx_commit(tx);
2562
2563	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2564	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2565	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2566	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2567	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2568	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2569	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2570	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2571
2572	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2573	(void) nvlist_add_uint64(nv,
2574	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2575	(void) nvlist_add_uint64(nv,
2576	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2577	(void) nvlist_add_uint64(nv,
2578	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2579	if (version >= SPA_VERSION_DEDUP &&
2580	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2581	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2582		(void) nvlist_add_uint64(nv,
2583		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2584	}
2585	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2586	    nv, NULL);
2587	nvlist_free(nv);
2588
2589	zvol_free_extents(zv);
2590	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2591	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2592	/* wait for dmu_free_long_range to actually free the blocks */
2593	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2594	tx = dmu_tx_create(os);
2595	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2596	error = dmu_tx_assign(tx, TXG_WAIT);
2597	if (error) {
2598		dmu_tx_abort(tx);
2599		return (error);
2600	}
2601	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2602		zv->zv_volblocksize = vbs;
2603	dmu_tx_commit(tx);
2604
2605	return (0);
2606}
2607#else	/* !illumos */
2608
2609static void
2610zvol_geom_run(zvol_state_t *zv)
2611{
2612	struct g_provider *pp;
2613
2614	pp = zv->zv_provider;
2615	g_error_provider(pp, 0);
2616
2617	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2618	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2619}
2620
2621static void
2622zvol_geom_destroy(zvol_state_t *zv)
2623{
2624	struct g_provider *pp;
2625
2626	g_topology_assert();
2627
2628	mtx_lock(&zv->zv_queue_mtx);
2629	zv->zv_state = 1;
2630	wakeup_one(&zv->zv_queue);
2631	while (zv->zv_state != 2)
2632		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2633	mtx_destroy(&zv->zv_queue_mtx);
2634
2635	pp = zv->zv_provider;
2636	zv->zv_provider = NULL;
2637	pp->private = NULL;
2638	g_wither_geom(pp->geom, ENXIO);
2639}
2640
2641static int
2642zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2643{
2644	int count, error, flags;
2645
2646	g_topology_assert();
2647
2648	/*
2649	 * To make it easier we expect either open or close, but not both
2650	 * at the same time.
2651	 */
2652	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2653	    (acr <= 0 && acw <= 0 && ace <= 0),
2654	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2655	    pp->name, acr, acw, ace));
2656
2657	if (pp->private == NULL) {
2658		if (acr <= 0 && acw <= 0 && ace <= 0)
2659			return (0);
2660		return (pp->error);
2661	}
2662
2663	/*
2664	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2665	 * because GEOM already handles that and handles it a bit differently.
2666	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2667	 * only one exclusive consumer, no matter if it is reader or writer.
2668	 * I like better the way GEOM works so I'll leave it for GEOM to
2669	 * decide what to do.
2670	 */
2671
2672	count = acr + acw + ace;
2673	if (count == 0)
2674		return (0);
2675
2676	flags = 0;
2677	if (acr != 0 || ace != 0)
2678		flags |= FREAD;
2679	if (acw != 0)
2680		flags |= FWRITE;
2681
2682	g_topology_unlock();
2683	if (count > 0)
2684		error = zvol_open(pp, flags, count);
2685	else
2686		error = zvol_close(pp, flags, -count);
2687	g_topology_lock();
2688	return (error);
2689}
2690
2691static void
2692zvol_geom_start(struct bio *bp)
2693{
2694	zvol_state_t *zv;
2695	boolean_t first;
2696
2697	zv = bp->bio_to->private;
2698	ASSERT(zv != NULL);
2699	switch (bp->bio_cmd) {
2700	case BIO_FLUSH:
2701		if (!THREAD_CAN_SLEEP())
2702			goto enqueue;
2703		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2704		g_io_deliver(bp, 0);
2705		break;
2706	case BIO_READ:
2707	case BIO_WRITE:
2708	case BIO_DELETE:
2709		if (!THREAD_CAN_SLEEP())
2710			goto enqueue;
2711		zvol_strategy(bp);
2712		break;
2713	case BIO_GETATTR: {
2714		spa_t *spa = dmu_objset_spa(zv->zv_objset);
2715		uint64_t refd, avail, usedobjs, availobjs, val;
2716
2717		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2718			return;
2719		if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
2720			dmu_objset_space(zv->zv_objset, &refd, &avail,
2721			    &usedobjs, &availobjs);
2722			if (g_handleattr_off_t(bp, "blocksavail",
2723			    avail / DEV_BSIZE))
2724				return;
2725		} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
2726			dmu_objset_space(zv->zv_objset, &refd, &avail,
2727			    &usedobjs, &availobjs);
2728			if (g_handleattr_off_t(bp, "blocksused",
2729			    refd / DEV_BSIZE))
2730				return;
2731		} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
2732			avail = metaslab_class_get_space(spa_normal_class(spa));
2733			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
2734			if (g_handleattr_off_t(bp, "poolblocksavail",
2735			    avail / DEV_BSIZE))
2736				return;
2737		} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
2738			refd = metaslab_class_get_alloc(spa_normal_class(spa));
2739			if (g_handleattr_off_t(bp, "poolblocksused",
2740			    refd / DEV_BSIZE))
2741				return;
2742		}
2743		/* FALLTHROUGH */
2744	}
2745	default:
2746		g_io_deliver(bp, EOPNOTSUPP);
2747		break;
2748	}
2749	return;
2750
2751enqueue:
2752	mtx_lock(&zv->zv_queue_mtx);
2753	first = (bioq_first(&zv->zv_queue) == NULL);
2754	bioq_insert_tail(&zv->zv_queue, bp);
2755	mtx_unlock(&zv->zv_queue_mtx);
2756	if (first)
2757		wakeup_one(&zv->zv_queue);
2758}
2759
2760static void
2761zvol_geom_worker(void *arg)
2762{
2763	zvol_state_t *zv;
2764	struct bio *bp;
2765
2766	thread_lock(curthread);
2767	sched_prio(curthread, PRIBIO);
2768	thread_unlock(curthread);
2769
2770	zv = arg;
2771	for (;;) {
2772		mtx_lock(&zv->zv_queue_mtx);
2773		bp = bioq_takefirst(&zv->zv_queue);
2774		if (bp == NULL) {
2775			if (zv->zv_state == 1) {
2776				zv->zv_state = 2;
2777				wakeup(&zv->zv_state);
2778				mtx_unlock(&zv->zv_queue_mtx);
2779				kthread_exit();
2780			}
2781			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2782			    "zvol:io", 0);
2783			continue;
2784		}
2785		mtx_unlock(&zv->zv_queue_mtx);
2786		switch (bp->bio_cmd) {
2787		case BIO_FLUSH:
2788			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2789			g_io_deliver(bp, 0);
2790			break;
2791		case BIO_READ:
2792		case BIO_WRITE:
2793		case BIO_DELETE:
2794			zvol_strategy(bp);
2795			break;
2796		default:
2797			g_io_deliver(bp, EOPNOTSUPP);
2798			break;
2799		}
2800	}
2801}
2802
2803extern boolean_t dataset_name_hidden(const char *name);
2804
2805static int
2806zvol_create_snapshots(objset_t *os, const char *name)
2807{
2808	uint64_t cookie, obj;
2809	char *sname;
2810	int error, len;
2811
2812	cookie = obj = 0;
2813	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2814
2815#if 0
2816	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2817	    DS_FIND_SNAPSHOTS);
2818#endif
2819
2820	for (;;) {
2821		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2822		if (len >= MAXPATHLEN) {
2823			dmu_objset_rele(os, FTAG);
2824			error = ENAMETOOLONG;
2825			break;
2826		}
2827
2828		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2829		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2830		    sname + len, &obj, &cookie, NULL);
2831		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2832		if (error != 0) {
2833			if (error == ENOENT)
2834				error = 0;
2835			break;
2836		}
2837
2838		error = zvol_create_minor(sname);
2839		if (error != 0 && error != EEXIST) {
2840			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2841			    sname, error);
2842			break;
2843		}
2844	}
2845
2846	kmem_free(sname, MAXPATHLEN);
2847	return (error);
2848}
2849
2850int
2851zvol_create_minors(const char *name)
2852{
2853	uint64_t cookie;
2854	objset_t *os;
2855	char *osname, *p;
2856	int error, len;
2857
2858	if (dataset_name_hidden(name))
2859		return (0);
2860
2861	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2862		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2863		    name, error);
2864		return (error);
2865	}
2866	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2867		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2868		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2869		error = zvol_create_minor(name);
2870		if (error == 0 || error == EEXIST) {
2871			error = zvol_create_snapshots(os, name);
2872		} else {
2873			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2874			    name, error);
2875		}
2876		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2877		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2878		return (error);
2879	}
2880	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2881		dmu_objset_rele(os, FTAG);
2882		return (0);
2883	}
2884
2885	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2886	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2887		dmu_objset_rele(os, FTAG);
2888		kmem_free(osname, MAXPATHLEN);
2889		return (ENOENT);
2890	}
2891	p = osname + strlen(osname);
2892	len = MAXPATHLEN - (p - osname);
2893
2894#if 0
2895	/* Prefetch the datasets. */
2896	cookie = 0;
2897	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2898		if (!dataset_name_hidden(osname))
2899			(void) dmu_objset_prefetch(osname, NULL);
2900	}
2901#endif
2902
2903	cookie = 0;
2904	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2905	    &cookie) == 0) {
2906		dmu_objset_rele(os, FTAG);
2907		(void)zvol_create_minors(osname);
2908		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2909			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2910			    name, error);
2911			return (error);
2912		}
2913	}
2914
2915	dmu_objset_rele(os, FTAG);
2916	kmem_free(osname, MAXPATHLEN);
2917	return (0);
2918}
2919
2920static void
2921zvol_rename_minor(zvol_state_t *zv, const char *newname)
2922{
2923	struct g_geom *gp;
2924	struct g_provider *pp;
2925	struct cdev *dev;
2926
2927	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2928
2929	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2930		g_topology_lock();
2931		pp = zv->zv_provider;
2932		ASSERT(pp != NULL);
2933		gp = pp->geom;
2934		ASSERT(gp != NULL);
2935
2936		zv->zv_provider = NULL;
2937		g_wither_provider(pp, ENXIO);
2938
2939		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2940		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2941		pp->sectorsize = DEV_BSIZE;
2942		pp->mediasize = zv->zv_volsize;
2943		pp->private = zv;
2944		zv->zv_provider = pp;
2945		g_error_provider(pp, 0);
2946		g_topology_unlock();
2947	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2948		struct make_dev_args args;
2949
2950		if ((dev = zv->zv_dev) != NULL) {
2951			zv->zv_dev = NULL;
2952			destroy_dev(dev);
2953			if (zv->zv_total_opens > 0) {
2954				zv->zv_flags &= ~ZVOL_EXCL;
2955				zv->zv_total_opens = 0;
2956				zvol_last_close(zv);
2957			}
2958		}
2959
2960		make_dev_args_init(&args);
2961		args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
2962		args.mda_devsw = &zvol_cdevsw;
2963		args.mda_cr = NULL;
2964		args.mda_uid = UID_ROOT;
2965		args.mda_gid = GID_OPERATOR;
2966		args.mda_mode = 0640;
2967		args.mda_si_drv2 = zv;
2968		if (make_dev_s(&args, &zv->zv_dev,
2969		    "%s/%s", ZVOL_DRIVER, newname) == 0)
2970			zv->zv_dev->si_iosize_max = MAXPHYS;
2971	}
2972	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2973}
2974
2975void
2976zvol_rename_minors(const char *oldname, const char *newname)
2977{
2978	char name[MAXPATHLEN];
2979	struct g_provider *pp;
2980	struct g_geom *gp;
2981	size_t oldnamelen, newnamelen;
2982	zvol_state_t *zv;
2983	char *namebuf;
2984	boolean_t locked = B_FALSE;
2985
2986	oldnamelen = strlen(oldname);
2987	newnamelen = strlen(newname);
2988
2989	DROP_GIANT();
2990	/* See comment in zvol_open(). */
2991	if (!MUTEX_HELD(&zfsdev_state_lock)) {
2992		mutex_enter(&zfsdev_state_lock);
2993		locked = B_TRUE;
2994	}
2995
2996	LIST_FOREACH(zv, &all_zvols, zv_links) {
2997		if (strcmp(zv->zv_name, oldname) == 0) {
2998			zvol_rename_minor(zv, newname);
2999		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
3000		    (zv->zv_name[oldnamelen] == '/' ||
3001		     zv->zv_name[oldnamelen] == '@')) {
3002			snprintf(name, sizeof(name), "%s%c%s", newname,
3003			    zv->zv_name[oldnamelen],
3004			    zv->zv_name + oldnamelen + 1);
3005			zvol_rename_minor(zv, name);
3006		}
3007	}
3008
3009	if (locked)
3010		mutex_exit(&zfsdev_state_lock);
3011	PICKUP_GIANT();
3012}
3013
3014static int
3015zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
3016{
3017	zvol_state_t *zv = dev->si_drv2;
3018	int err = 0;
3019
3020	mutex_enter(&zfsdev_state_lock);
3021	if (zv->zv_total_opens == 0)
3022		err = zvol_first_open(zv);
3023	if (err) {
3024		mutex_exit(&zfsdev_state_lock);
3025		return (err);
3026	}
3027	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
3028		err = SET_ERROR(EROFS);
3029		goto out;
3030	}
3031	if (zv->zv_flags & ZVOL_EXCL) {
3032		err = SET_ERROR(EBUSY);
3033		goto out;
3034	}
3035#ifdef FEXCL
3036	if (flags & FEXCL) {
3037		if (zv->zv_total_opens != 0) {
3038			err = SET_ERROR(EBUSY);
3039			goto out;
3040		}
3041		zv->zv_flags |= ZVOL_EXCL;
3042	}
3043#endif
3044
3045	zv->zv_total_opens++;
3046	if (flags & (FSYNC | FDSYNC)) {
3047		zv->zv_sync_cnt++;
3048		if (zv->zv_sync_cnt == 1)
3049			zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ);
3050	}
3051	mutex_exit(&zfsdev_state_lock);
3052	return (err);
3053out:
3054	if (zv->zv_total_opens == 0)
3055		zvol_last_close(zv);
3056	mutex_exit(&zfsdev_state_lock);
3057	return (err);
3058}
3059
3060static int
3061zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3062{
3063	zvol_state_t *zv = dev->si_drv2;
3064
3065	mutex_enter(&zfsdev_state_lock);
3066	if (zv->zv_flags & ZVOL_EXCL) {
3067		ASSERT(zv->zv_total_opens == 1);
3068		zv->zv_flags &= ~ZVOL_EXCL;
3069	}
3070
3071	/*
3072	 * If the open count is zero, this is a spurious close.
3073	 * That indicates a bug in the kernel / DDI framework.
3074	 */
3075	ASSERT(zv->zv_total_opens != 0);
3076
3077	/*
3078	 * You may get multiple opens, but only one close.
3079	 */
3080	zv->zv_total_opens--;
3081	if (flags & (FSYNC | FDSYNC))
3082		zv->zv_sync_cnt--;
3083
3084	if (zv->zv_total_opens == 0)
3085		zvol_last_close(zv);
3086
3087	mutex_exit(&zfsdev_state_lock);
3088	return (0);
3089}
3090
3091static int
3092zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
3093{
3094	zvol_state_t *zv;
3095	rl_t *rl;
3096	off_t offset, length;
3097	int i, error;
3098	boolean_t sync;
3099
3100	zv = dev->si_drv2;
3101
3102	error = 0;
3103	KASSERT(zv->zv_total_opens > 0,
3104	    ("Device with zero access count in zvol_d_ioctl"));
3105
3106	i = IOCPARM_LEN(cmd);
3107	switch (cmd) {
3108	case DIOCGSECTORSIZE:
3109		*(u_int *)data = DEV_BSIZE;
3110		break;
3111	case DIOCGMEDIASIZE:
3112		*(off_t *)data = zv->zv_volsize;
3113		break;
3114	case DIOCGFLUSH:
3115		zil_commit(zv->zv_zilog, ZVOL_OBJ);
3116		break;
3117	case DIOCGDELETE:
3118		if (!zvol_unmap_enabled)
3119			break;
3120
3121		offset = ((off_t *)data)[0];
3122		length = ((off_t *)data)[1];
3123		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
3124		    offset < 0 || offset >= zv->zv_volsize ||
3125		    length <= 0) {
3126			printf("%s: offset=%jd length=%jd\n", __func__, offset,
3127			    length);
3128			error = EINVAL;
3129			break;
3130		}
3131
3132		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
3133		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
3134		error = dmu_tx_assign(tx, TXG_WAIT);
3135		if (error != 0) {
3136			sync = FALSE;
3137			dmu_tx_abort(tx);
3138		} else {
3139			sync = (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
3140			zvol_log_truncate(zv, tx, offset, length, sync);
3141			dmu_tx_commit(tx);
3142			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
3143			    offset, length);
3144		}
3145		zfs_range_unlock(rl);
3146		if (sync)
3147			zil_commit(zv->zv_zilog, ZVOL_OBJ);
3148		break;
3149	case DIOCGSTRIPESIZE:
3150		*(off_t *)data = zv->zv_volblocksize;
3151		break;
3152	case DIOCGSTRIPEOFFSET:
3153		*(off_t *)data = 0;
3154		break;
3155	case DIOCGATTR: {
3156		spa_t *spa = dmu_objset_spa(zv->zv_objset);
3157		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
3158		uint64_t refd, avail, usedobjs, availobjs;
3159
3160		if (strcmp(arg->name, "GEOM::candelete") == 0)
3161			arg->value.i = 1;
3162		else if (strcmp(arg->name, "blocksavail") == 0) {
3163			dmu_objset_space(zv->zv_objset, &refd, &avail,
3164			    &usedobjs, &availobjs);
3165			arg->value.off = avail / DEV_BSIZE;
3166		} else if (strcmp(arg->name, "blocksused") == 0) {
3167			dmu_objset_space(zv->zv_objset, &refd, &avail,
3168			    &usedobjs, &availobjs);
3169			arg->value.off = refd / DEV_BSIZE;
3170		} else if (strcmp(arg->name, "poolblocksavail") == 0) {
3171			avail = metaslab_class_get_space(spa_normal_class(spa));
3172			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
3173			arg->value.off = avail / DEV_BSIZE;
3174		} else if (strcmp(arg->name, "poolblocksused") == 0) {
3175			refd = metaslab_class_get_alloc(spa_normal_class(spa));
3176			arg->value.off = refd / DEV_BSIZE;
3177		} else
3178			error = ENOIOCTL;
3179		break;
3180	}
3181	case FIOSEEKHOLE:
3182	case FIOSEEKDATA: {
3183		off_t *off = (off_t *)data;
3184		uint64_t noff;
3185		boolean_t hole;
3186
3187		hole = (cmd == FIOSEEKHOLE);
3188		noff = *off;
3189		error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
3190		*off = noff;
3191		break;
3192	}
3193	default:
3194		error = ENOIOCTL;
3195	}
3196
3197	return (error);
3198}
3199#endif	/* illumos */
3200