zvol.c revision 288571
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 *
27 * Portions Copyright 2010 Robert Milkowski
28 *
29 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
31 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
32 */
33
34/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
35
36/*
37 * ZFS volume emulation driver.
38 *
39 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
40 * Volumes are accessed through the symbolic links named:
41 *
42 * /dev/zvol/dsk/<pool_name>/<dataset_name>
43 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
44 *
45 * These links are created by the /dev filesystem (sdev_zvolops.c).
46 * Volumes are persistent through reboot.  No user command needs to be
47 * run before opening and using a device.
48 *
49 * FreeBSD notes.
50 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
51 * in the system.
52 */
53
54#include <sys/types.h>
55#include <sys/param.h>
56#include <sys/kernel.h>
57#include <sys/errno.h>
58#include <sys/uio.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kmem.h>
62#include <sys/conf.h>
63#include <sys/cmn_err.h>
64#include <sys/stat.h>
65#include <sys/zap.h>
66#include <sys/spa.h>
67#include <sys/spa_impl.h>
68#include <sys/zio.h>
69#include <sys/disk.h>
70#include <sys/dmu_traverse.h>
71#include <sys/dnode.h>
72#include <sys/dsl_dataset.h>
73#include <sys/dsl_prop.h>
74#include <sys/dkio.h>
75#include <sys/byteorder.h>
76#include <sys/sunddi.h>
77#include <sys/dirent.h>
78#include <sys/policy.h>
79#include <sys/queue.h>
80#include <sys/fs/zfs.h>
81#include <sys/zfs_ioctl.h>
82#include <sys/zil.h>
83#include <sys/refcount.h>
84#include <sys/zfs_znode.h>
85#include <sys/zfs_rlock.h>
86#include <sys/vdev_impl.h>
87#include <sys/vdev_raidz.h>
88#include <sys/zvol.h>
89#include <sys/zil_impl.h>
90#include <sys/dbuf.h>
91#include <sys/dmu_tx.h>
92#include <sys/zfeature.h>
93#include <sys/zio_checksum.h>
94#include <sys/filio.h>
95
96#include <geom/geom.h>
97
98#include "zfs_namecheck.h"
99
100#ifndef illumos
101struct g_class zfs_zvol_class = {
102	.name = "ZFS::ZVOL",
103	.version = G_VERSION,
104};
105
106DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
107
108#endif
109void *zfsdev_state;
110static char *zvol_tag = "zvol_tag";
111
112#define	ZVOL_DUMPSIZE		"dumpsize"
113
114/*
115 * This lock protects the zfsdev_state structure from being modified
116 * while it's being used, e.g. an open that comes in before a create
117 * finishes.  It also protects temporary opens of the dataset so that,
118 * e.g., an open doesn't get a spurious EBUSY.
119 */
120#ifdef illumos
121kmutex_t zfsdev_state_lock;
122#else
123/*
124 * In FreeBSD we've replaced the upstream zfsdev_state_lock with the
125 * spa_namespace_lock in the ZVOL code.
126 */
127#define zfsdev_state_lock spa_namespace_lock
128#endif
129static uint32_t zvol_minors;
130
131#ifndef illumos
132SYSCTL_DECL(_vfs_zfs);
133SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
134static int	volmode = ZFS_VOLMODE_GEOM;
135TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
136SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
137    "Expose as GEOM providers (1), device files (2) or neither");
138
139#endif
140typedef struct zvol_extent {
141	list_node_t	ze_node;
142	dva_t		ze_dva;		/* dva associated with this extent */
143	uint64_t	ze_nblks;	/* number of blocks in extent */
144} zvol_extent_t;
145
146/*
147 * The in-core state of each volume.
148 */
149typedef struct zvol_state {
150#ifndef illumos
151	LIST_ENTRY(zvol_state)	zv_links;
152#endif
153	char		zv_name[MAXPATHLEN]; /* pool/dd name */
154	uint64_t	zv_volsize;	/* amount of space we advertise */
155	uint64_t	zv_volblocksize; /* volume block size */
156#ifdef illumos
157	minor_t		zv_minor;	/* minor number */
158#else
159	struct cdev	*zv_dev;	/* non-GEOM device */
160	struct g_provider *zv_provider;	/* GEOM provider */
161#endif
162	uint8_t		zv_min_bs;	/* minimum addressable block shift */
163	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
164	objset_t	*zv_objset;	/* objset handle */
165#ifdef illumos
166	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
167#endif
168	uint32_t	zv_total_opens;	/* total open count */
169	zilog_t		*zv_zilog;	/* ZIL handle */
170	list_t		zv_extents;	/* List of extents for dump */
171	znode_t		zv_znode;	/* for range locking */
172	dmu_buf_t	*zv_dbuf;	/* bonus handle */
173#ifndef illumos
174	int		zv_state;
175	int		zv_volmode;	/* Provide GEOM or cdev */
176	struct bio_queue_head zv_queue;
177	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
178#endif
179} zvol_state_t;
180
181#ifndef illumos
182static LIST_HEAD(, zvol_state) all_zvols;
183#endif
184/*
185 * zvol specific flags
186 */
187#define	ZVOL_RDONLY	0x1
188#define	ZVOL_DUMPIFIED	0x2
189#define	ZVOL_EXCL	0x4
190#define	ZVOL_WCE	0x8
191
192/*
193 * zvol maximum transfer in one DMU tx.
194 */
195int zvol_maxphys = DMU_MAX_ACCESS/2;
196
197/*
198 * Toggle unmap functionality.
199 */
200boolean_t zvol_unmap_enabled = B_TRUE;
201#ifndef illumos
202SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
203    &zvol_unmap_enabled, 0,
204    "Enable UNMAP functionality");
205
206static d_open_t		zvol_d_open;
207static d_close_t	zvol_d_close;
208static d_read_t		zvol_read;
209static d_write_t	zvol_write;
210static d_ioctl_t	zvol_d_ioctl;
211static d_strategy_t	zvol_strategy;
212
213static struct cdevsw zvol_cdevsw = {
214	.d_version =	D_VERSION,
215	.d_open =	zvol_d_open,
216	.d_close =	zvol_d_close,
217	.d_read =	zvol_read,
218	.d_write =	zvol_write,
219	.d_ioctl =	zvol_d_ioctl,
220	.d_strategy =	zvol_strategy,
221	.d_name =	"zvol",
222	.d_flags =	D_DISK | D_TRACKCLOSE,
223};
224
225static void zvol_geom_run(zvol_state_t *zv);
226static void zvol_geom_destroy(zvol_state_t *zv);
227static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
228static void zvol_geom_start(struct bio *bp);
229static void zvol_geom_worker(void *arg);
230static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
231    uint64_t len, boolean_t sync);
232#endif	/* !illumos */
233
234extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
235    nvlist_t *, nvlist_t *);
236static int zvol_remove_zv(zvol_state_t *);
237static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
238static int zvol_dumpify(zvol_state_t *zv);
239static int zvol_dump_fini(zvol_state_t *zv);
240static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
241
242static void
243zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
244{
245#ifdef illumos
246	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
247
248	zv->zv_volsize = volsize;
249	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
250	    "Size", volsize) == DDI_SUCCESS);
251	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
252	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
253
254	/* Notify specfs to invalidate the cached size */
255	spec_size_invalidate(dev, VBLK);
256	spec_size_invalidate(dev, VCHR);
257#else	/* !illumos */
258	zv->zv_volsize = volsize;
259	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
260		struct g_provider *pp;
261
262		pp = zv->zv_provider;
263		if (pp == NULL)
264			return;
265		g_topology_lock();
266		g_resize_provider(pp, zv->zv_volsize);
267		g_topology_unlock();
268	}
269#endif	/* illumos */
270}
271
272int
273zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
274{
275	if (volsize == 0)
276		return (SET_ERROR(EINVAL));
277
278	if (volsize % blocksize != 0)
279		return (SET_ERROR(EINVAL));
280
281#ifdef _ILP32
282	if (volsize - 1 > SPEC_MAXOFFSET_T)
283		return (SET_ERROR(EOVERFLOW));
284#endif
285	return (0);
286}
287
288int
289zvol_check_volblocksize(uint64_t volblocksize)
290{
291	if (volblocksize < SPA_MINBLOCKSIZE ||
292	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
293	    !ISP2(volblocksize))
294		return (SET_ERROR(EDOM));
295
296	return (0);
297}
298
299int
300zvol_get_stats(objset_t *os, nvlist_t *nv)
301{
302	int error;
303	dmu_object_info_t doi;
304	uint64_t val;
305
306	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
307	if (error)
308		return (error);
309
310	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
311
312	error = dmu_object_info(os, ZVOL_OBJ, &doi);
313
314	if (error == 0) {
315		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
316		    doi.doi_data_block_size);
317	}
318
319	return (error);
320}
321
322static zvol_state_t *
323zvol_minor_lookup(const char *name)
324{
325#ifdef illumos
326	minor_t minor;
327#endif
328	zvol_state_t *zv;
329
330	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
331
332#ifdef illumos
333	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
334		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
335		if (zv == NULL)
336			continue;
337#else
338	LIST_FOREACH(zv, &all_zvols, zv_links) {
339#endif
340		if (strcmp(zv->zv_name, name) == 0)
341			return (zv);
342	}
343
344	return (NULL);
345}
346
347/* extent mapping arg */
348struct maparg {
349	zvol_state_t	*ma_zv;
350	uint64_t	ma_blks;
351};
352
353/*ARGSUSED*/
354static int
355zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
356    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
357{
358	struct maparg *ma = arg;
359	zvol_extent_t *ze;
360	int bs = ma->ma_zv->zv_volblocksize;
361
362	if (bp == NULL || BP_IS_HOLE(bp) ||
363	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
364		return (0);
365
366	VERIFY(!BP_IS_EMBEDDED(bp));
367
368	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
369	ma->ma_blks++;
370
371	/* Abort immediately if we have encountered gang blocks */
372	if (BP_IS_GANG(bp))
373		return (SET_ERROR(EFRAGS));
374
375	/*
376	 * See if the block is at the end of the previous extent.
377	 */
378	ze = list_tail(&ma->ma_zv->zv_extents);
379	if (ze &&
380	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
381	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
382	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
383		ze->ze_nblks++;
384		return (0);
385	}
386
387	dprintf_bp(bp, "%s", "next blkptr:");
388
389	/* start a new extent */
390	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
391	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
392	ze->ze_nblks = 1;
393	list_insert_tail(&ma->ma_zv->zv_extents, ze);
394	return (0);
395}
396
397static void
398zvol_free_extents(zvol_state_t *zv)
399{
400	zvol_extent_t *ze;
401
402	while (ze = list_head(&zv->zv_extents)) {
403		list_remove(&zv->zv_extents, ze);
404		kmem_free(ze, sizeof (zvol_extent_t));
405	}
406}
407
408static int
409zvol_get_lbas(zvol_state_t *zv)
410{
411	objset_t *os = zv->zv_objset;
412	struct maparg	ma;
413	int		err;
414
415	ma.ma_zv = zv;
416	ma.ma_blks = 0;
417	zvol_free_extents(zv);
418
419	/* commit any in-flight changes before traversing the dataset */
420	txg_wait_synced(dmu_objset_pool(os), 0);
421	err = traverse_dataset(dmu_objset_ds(os), 0,
422	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
423	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
424		zvol_free_extents(zv);
425		return (err ? err : EIO);
426	}
427
428	return (0);
429}
430
431/* ARGSUSED */
432void
433zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
434{
435	zfs_creat_t *zct = arg;
436	nvlist_t *nvprops = zct->zct_props;
437	int error;
438	uint64_t volblocksize, volsize;
439
440	VERIFY(nvlist_lookup_uint64(nvprops,
441	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
442	if (nvlist_lookup_uint64(nvprops,
443	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
444		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
445
446	/*
447	 * These properties must be removed from the list so the generic
448	 * property setting step won't apply to them.
449	 */
450	VERIFY(nvlist_remove_all(nvprops,
451	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
452	(void) nvlist_remove_all(nvprops,
453	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
454
455	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
456	    DMU_OT_NONE, 0, tx);
457	ASSERT(error == 0);
458
459	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
460	    DMU_OT_NONE, 0, tx);
461	ASSERT(error == 0);
462
463	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
464	ASSERT(error == 0);
465}
466
467/*
468 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
469 * implement DKIOCFREE/free-long-range.
470 */
471static int
472zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
473{
474	uint64_t offset, length;
475
476	if (byteswap)
477		byteswap_uint64_array(lr, sizeof (*lr));
478
479	offset = lr->lr_offset;
480	length = lr->lr_length;
481
482	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
483}
484
485/*
486 * Replay a TX_WRITE ZIL transaction that didn't get committed
487 * after a system failure
488 */
489static int
490zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
491{
492	objset_t *os = zv->zv_objset;
493	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
494	uint64_t offset, length;
495	dmu_tx_t *tx;
496	int error;
497
498	if (byteswap)
499		byteswap_uint64_array(lr, sizeof (*lr));
500
501	offset = lr->lr_offset;
502	length = lr->lr_length;
503
504	/* If it's a dmu_sync() block, write the whole block */
505	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
506		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
507		if (length < blocksize) {
508			offset -= offset % blocksize;
509			length = blocksize;
510		}
511	}
512
513	tx = dmu_tx_create(os);
514	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
515	error = dmu_tx_assign(tx, TXG_WAIT);
516	if (error) {
517		dmu_tx_abort(tx);
518	} else {
519		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
520		dmu_tx_commit(tx);
521	}
522
523	return (error);
524}
525
526/* ARGSUSED */
527static int
528zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
529{
530	return (SET_ERROR(ENOTSUP));
531}
532
533/*
534 * Callback vectors for replaying records.
535 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
536 */
537zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
538	zvol_replay_err,	/* 0 no such transaction type */
539	zvol_replay_err,	/* TX_CREATE */
540	zvol_replay_err,	/* TX_MKDIR */
541	zvol_replay_err,	/* TX_MKXATTR */
542	zvol_replay_err,	/* TX_SYMLINK */
543	zvol_replay_err,	/* TX_REMOVE */
544	zvol_replay_err,	/* TX_RMDIR */
545	zvol_replay_err,	/* TX_LINK */
546	zvol_replay_err,	/* TX_RENAME */
547	zvol_replay_write,	/* TX_WRITE */
548	zvol_replay_truncate,	/* TX_TRUNCATE */
549	zvol_replay_err,	/* TX_SETATTR */
550	zvol_replay_err,	/* TX_ACL */
551	zvol_replay_err,	/* TX_CREATE_ACL */
552	zvol_replay_err,	/* TX_CREATE_ATTR */
553	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
554	zvol_replay_err,	/* TX_MKDIR_ACL */
555	zvol_replay_err,	/* TX_MKDIR_ATTR */
556	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
557	zvol_replay_err,	/* TX_WRITE2 */
558};
559
560#ifdef illumos
561int
562zvol_name2minor(const char *name, minor_t *minor)
563{
564	zvol_state_t *zv;
565
566	mutex_enter(&zfsdev_state_lock);
567	zv = zvol_minor_lookup(name);
568	if (minor && zv)
569		*minor = zv->zv_minor;
570	mutex_exit(&zfsdev_state_lock);
571	return (zv ? 0 : -1);
572}
573#endif	/* illumos */
574
575/*
576 * Create a minor node (plus a whole lot more) for the specified volume.
577 */
578int
579zvol_create_minor(const char *name)
580{
581	zfs_soft_state_t *zs;
582	zvol_state_t *zv;
583	objset_t *os;
584	dmu_object_info_t doi;
585#ifdef illumos
586	minor_t minor = 0;
587	char chrbuf[30], blkbuf[30];
588#else
589	struct cdev *dev;
590	struct g_provider *pp;
591	struct g_geom *gp;
592	uint64_t volsize, mode;
593#endif
594	int error;
595
596#ifndef illumos
597	ZFS_LOG(1, "Creating ZVOL %s...", name);
598#endif
599
600	mutex_enter(&zfsdev_state_lock);
601
602	if (zvol_minor_lookup(name) != NULL) {
603		mutex_exit(&zfsdev_state_lock);
604		return (SET_ERROR(EEXIST));
605	}
606
607	/* lie and say we're read-only */
608	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
609
610	if (error) {
611		mutex_exit(&zfsdev_state_lock);
612		return (error);
613	}
614
615#ifdef illumos
616	if ((minor = zfsdev_minor_alloc()) == 0) {
617		dmu_objset_disown(os, FTAG);
618		mutex_exit(&zfsdev_state_lock);
619		return (SET_ERROR(ENXIO));
620	}
621
622	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
623		dmu_objset_disown(os, FTAG);
624		mutex_exit(&zfsdev_state_lock);
625		return (SET_ERROR(EAGAIN));
626	}
627	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
628	    (char *)name);
629
630	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
631
632	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
633	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
634		ddi_soft_state_free(zfsdev_state, minor);
635		dmu_objset_disown(os, FTAG);
636		mutex_exit(&zfsdev_state_lock);
637		return (SET_ERROR(EAGAIN));
638	}
639
640	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
641
642	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
643	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
644		ddi_remove_minor_node(zfs_dip, chrbuf);
645		ddi_soft_state_free(zfsdev_state, minor);
646		dmu_objset_disown(os, FTAG);
647		mutex_exit(&zfsdev_state_lock);
648		return (SET_ERROR(EAGAIN));
649	}
650
651	zs = ddi_get_soft_state(zfsdev_state, minor);
652	zs->zss_type = ZSST_ZVOL;
653	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
654#else	/* !illumos */
655
656	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
657	zv->zv_state = 0;
658	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
659	if (error) {
660		kmem_free(zv, sizeof(*zv));
661		dmu_objset_disown(os, zvol_tag);
662		mutex_exit(&zfsdev_state_lock);
663		return (error);
664	}
665	error = dsl_prop_get_integer(name,
666	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
667	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
668		mode = volmode;
669
670	DROP_GIANT();
671	zv->zv_volsize = volsize;
672	zv->zv_volmode = mode;
673	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
674		g_topology_lock();
675		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
676		gp->start = zvol_geom_start;
677		gp->access = zvol_geom_access;
678		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
679		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
680		pp->sectorsize = DEV_BSIZE;
681		pp->mediasize = zv->zv_volsize;
682		pp->private = zv;
683
684		zv->zv_provider = pp;
685		bioq_init(&zv->zv_queue);
686		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
687	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
688		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
689		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
690		    0640, "%s/%s", ZVOL_DRIVER, name) != 0) {
691			kmem_free(zv, sizeof(*zv));
692			dmu_objset_disown(os, FTAG);
693			mutex_exit(&zfsdev_state_lock);
694			return (SET_ERROR(ENXIO));
695		}
696		zv->zv_dev = dev;
697		dev->si_iosize_max = MAXPHYS;
698		dev->si_drv2 = zv;
699	}
700	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
701#endif	/* illumos */
702
703	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
704	zv->zv_min_bs = DEV_BSHIFT;
705#ifdef illumos
706	zv->zv_minor = minor;
707#endif
708	zv->zv_objset = os;
709	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
710		zv->zv_flags |= ZVOL_RDONLY;
711	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
712	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
713	    sizeof (rl_t), offsetof(rl_t, r_node));
714	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
715	    offsetof(zvol_extent_t, ze_node));
716	/* get and cache the blocksize */
717	error = dmu_object_info(os, ZVOL_OBJ, &doi);
718	ASSERT(error == 0);
719	zv->zv_volblocksize = doi.doi_data_block_size;
720
721	if (spa_writeable(dmu_objset_spa(os))) {
722		if (zil_replay_disable)
723			zil_destroy(dmu_objset_zil(os), B_FALSE);
724		else
725			zil_replay(os, zv, zvol_replay_vector);
726	}
727	dmu_objset_disown(os, FTAG);
728	zv->zv_objset = NULL;
729
730	zvol_minors++;
731
732	mutex_exit(&zfsdev_state_lock);
733#ifndef illumos
734	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
735		zvol_geom_run(zv);
736		g_topology_unlock();
737	}
738	PICKUP_GIANT();
739
740	ZFS_LOG(1, "ZVOL %s created.", name);
741#endif
742
743	return (0);
744}
745
746/*
747 * Remove minor node for the specified volume.
748 */
749static int
750zvol_remove_zv(zvol_state_t *zv)
751{
752#ifdef illumos
753	char nmbuf[20];
754	minor_t minor = zv->zv_minor;
755#endif
756
757	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
758	if (zv->zv_total_opens != 0)
759		return (SET_ERROR(EBUSY));
760
761#ifdef illumos
762	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
763	ddi_remove_minor_node(zfs_dip, nmbuf);
764
765	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
766	ddi_remove_minor_node(zfs_dip, nmbuf);
767#else
768	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
769
770	LIST_REMOVE(zv, zv_links);
771	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
772		g_topology_lock();
773		zvol_geom_destroy(zv);
774		g_topology_unlock();
775	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV)
776		destroy_dev(zv->zv_dev);
777#endif
778
779	avl_destroy(&zv->zv_znode.z_range_avl);
780	mutex_destroy(&zv->zv_znode.z_range_lock);
781
782	kmem_free(zv, sizeof (zvol_state_t));
783#ifdef illumos
784	ddi_soft_state_free(zfsdev_state, minor);
785#endif
786	zvol_minors--;
787	return (0);
788}
789
790int
791zvol_remove_minor(const char *name)
792{
793	zvol_state_t *zv;
794	int rc;
795
796	mutex_enter(&zfsdev_state_lock);
797	if ((zv = zvol_minor_lookup(name)) == NULL) {
798		mutex_exit(&zfsdev_state_lock);
799		return (SET_ERROR(ENXIO));
800	}
801	rc = zvol_remove_zv(zv);
802	mutex_exit(&zfsdev_state_lock);
803	return (rc);
804}
805
806int
807zvol_first_open(zvol_state_t *zv)
808{
809	objset_t *os;
810	uint64_t volsize;
811	int error;
812	uint64_t readonly;
813
814	/* lie and say we're read-only */
815	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
816	    zvol_tag, &os);
817	if (error)
818		return (error);
819
820	zv->zv_objset = os;
821	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
822	if (error) {
823		ASSERT(error == 0);
824		dmu_objset_disown(os, zvol_tag);
825		return (error);
826	}
827
828	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
829	if (error) {
830		dmu_objset_disown(os, zvol_tag);
831		return (error);
832	}
833
834	zvol_size_changed(zv, volsize);
835	zv->zv_zilog = zil_open(os, zvol_get_data);
836
837	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
838	    NULL) == 0);
839	if (readonly || dmu_objset_is_snapshot(os) ||
840	    !spa_writeable(dmu_objset_spa(os)))
841		zv->zv_flags |= ZVOL_RDONLY;
842	else
843		zv->zv_flags &= ~ZVOL_RDONLY;
844	return (error);
845}
846
847void
848zvol_last_close(zvol_state_t *zv)
849{
850	zil_close(zv->zv_zilog);
851	zv->zv_zilog = NULL;
852
853	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
854	zv->zv_dbuf = NULL;
855
856	/*
857	 * Evict cached data
858	 */
859	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
860	    !(zv->zv_flags & ZVOL_RDONLY))
861		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
862	dmu_objset_evict_dbufs(zv->zv_objset);
863
864	dmu_objset_disown(zv->zv_objset, zvol_tag);
865	zv->zv_objset = NULL;
866}
867
868#ifdef illumos
869int
870zvol_prealloc(zvol_state_t *zv)
871{
872	objset_t *os = zv->zv_objset;
873	dmu_tx_t *tx;
874	uint64_t refd, avail, usedobjs, availobjs;
875	uint64_t resid = zv->zv_volsize;
876	uint64_t off = 0;
877
878	/* Check the space usage before attempting to allocate the space */
879	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
880	if (avail < zv->zv_volsize)
881		return (SET_ERROR(ENOSPC));
882
883	/* Free old extents if they exist */
884	zvol_free_extents(zv);
885
886	while (resid != 0) {
887		int error;
888		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
889
890		tx = dmu_tx_create(os);
891		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
892		error = dmu_tx_assign(tx, TXG_WAIT);
893		if (error) {
894			dmu_tx_abort(tx);
895			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
896			return (error);
897		}
898		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
899		dmu_tx_commit(tx);
900		off += bytes;
901		resid -= bytes;
902	}
903	txg_wait_synced(dmu_objset_pool(os), 0);
904
905	return (0);
906}
907#endif	/* illumos */
908
909static int
910zvol_update_volsize(objset_t *os, uint64_t volsize)
911{
912	dmu_tx_t *tx;
913	int error;
914
915	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
916
917	tx = dmu_tx_create(os);
918	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
919	dmu_tx_mark_netfree(tx);
920	error = dmu_tx_assign(tx, TXG_WAIT);
921	if (error) {
922		dmu_tx_abort(tx);
923		return (error);
924	}
925
926	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
927	    &volsize, tx);
928	dmu_tx_commit(tx);
929
930	if (error == 0)
931		error = dmu_free_long_range(os,
932		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
933	return (error);
934}
935
936void
937zvol_remove_minors(const char *name)
938{
939#ifdef illumos
940	zvol_state_t *zv;
941	char *namebuf;
942	minor_t minor;
943
944	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
945	(void) strncpy(namebuf, name, strlen(name));
946	(void) strcat(namebuf, "/");
947	mutex_enter(&zfsdev_state_lock);
948	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
949
950		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
951		if (zv == NULL)
952			continue;
953		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
954			(void) zvol_remove_zv(zv);
955	}
956	kmem_free(namebuf, strlen(name) + 2);
957
958	mutex_exit(&zfsdev_state_lock);
959#else	/* !illumos */
960	zvol_state_t *zv, *tzv;
961	size_t namelen;
962
963	namelen = strlen(name);
964
965	DROP_GIANT();
966	mutex_enter(&zfsdev_state_lock);
967
968	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
969		if (strcmp(zv->zv_name, name) == 0 ||
970		    (strncmp(zv->zv_name, name, namelen) == 0 &&
971		    strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' ||
972		    zv->zv_name[namelen] == '@'))) {
973			(void) zvol_remove_zv(zv);
974		}
975	}
976
977	mutex_exit(&zfsdev_state_lock);
978	PICKUP_GIANT();
979#endif	/* illumos */
980}
981
982static int
983zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
984{
985	uint64_t old_volsize = 0ULL;
986	int error = 0;
987
988	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
989
990	/*
991	 * Reinitialize the dump area to the new size. If we
992	 * failed to resize the dump area then restore it back to
993	 * its original size.  We must set the new volsize prior
994	 * to calling dumpvp_resize() to ensure that the devices'
995	 * size(9P) is not visible by the dump subsystem.
996	 */
997	old_volsize = zv->zv_volsize;
998	zvol_size_changed(zv, volsize);
999
1000#ifdef ZVOL_DUMP
1001	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1002		if ((error = zvol_dumpify(zv)) != 0 ||
1003		    (error = dumpvp_resize()) != 0) {
1004			int dumpify_error;
1005
1006			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
1007			zvol_size_changed(zv, old_volsize);
1008			dumpify_error = zvol_dumpify(zv);
1009			error = dumpify_error ? dumpify_error : error;
1010		}
1011	}
1012#endif	/* ZVOL_DUMP */
1013
1014#ifdef illumos
1015	/*
1016	 * Generate a LUN expansion event.
1017	 */
1018	if (error == 0) {
1019		sysevent_id_t eid;
1020		nvlist_t *attr;
1021		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1022
1023		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
1024		    zv->zv_minor);
1025
1026		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1027		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
1028
1029		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
1030		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
1031
1032		nvlist_free(attr);
1033		kmem_free(physpath, MAXPATHLEN);
1034	}
1035#endif	/* illumos */
1036	return (error);
1037}
1038
1039int
1040zvol_set_volsize(const char *name, uint64_t volsize)
1041{
1042	zvol_state_t *zv = NULL;
1043	objset_t *os;
1044	int error;
1045	dmu_object_info_t doi;
1046	uint64_t readonly;
1047	boolean_t owned = B_FALSE;
1048
1049	error = dsl_prop_get_integer(name,
1050	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
1051	if (error != 0)
1052		return (error);
1053	if (readonly)
1054		return (SET_ERROR(EROFS));
1055
1056	mutex_enter(&zfsdev_state_lock);
1057	zv = zvol_minor_lookup(name);
1058
1059	if (zv == NULL || zv->zv_objset == NULL) {
1060		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
1061		    FTAG, &os)) != 0) {
1062			mutex_exit(&zfsdev_state_lock);
1063			return (error);
1064		}
1065		owned = B_TRUE;
1066		if (zv != NULL)
1067			zv->zv_objset = os;
1068	} else {
1069		os = zv->zv_objset;
1070	}
1071
1072	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
1073	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
1074		goto out;
1075
1076	error = zvol_update_volsize(os, volsize);
1077
1078	if (error == 0 && zv != NULL)
1079		error = zvol_update_live_volsize(zv, volsize);
1080out:
1081	if (owned) {
1082		dmu_objset_disown(os, FTAG);
1083		if (zv != NULL)
1084			zv->zv_objset = NULL;
1085	}
1086	mutex_exit(&zfsdev_state_lock);
1087	return (error);
1088}
1089
1090/*ARGSUSED*/
1091#ifdef illumos
1092int
1093zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
1094#else
1095static int
1096zvol_open(struct g_provider *pp, int flag, int count)
1097#endif
1098{
1099	zvol_state_t *zv;
1100	int err = 0;
1101#ifdef illumos
1102
1103	mutex_enter(&zfsdev_state_lock);
1104
1105	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
1106	if (zv == NULL) {
1107		mutex_exit(&zfsdev_state_lock);
1108		return (SET_ERROR(ENXIO));
1109	}
1110
1111	if (zv->zv_total_opens == 0)
1112		err = zvol_first_open(zv);
1113	if (err) {
1114		mutex_exit(&zfsdev_state_lock);
1115		return (err);
1116	}
1117#else	/* !illumos */
1118	boolean_t locked = B_FALSE;
1119
1120	/*
1121	 * Protect against recursively entering spa_namespace_lock
1122	 * when spa_open() is used for a pool on a (local) ZVOL(s).
1123	 * This is needed since we replaced upstream zfsdev_state_lock
1124	 * with spa_namespace_lock in the ZVOL code.
1125	 * We are using the same trick as spa_open().
1126	 * Note that calls in zvol_first_open which need to resolve
1127	 * pool name to a spa object will enter spa_open()
1128	 * recursively, but that function already has all the
1129	 * necessary protection.
1130	 */
1131	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1132		mutex_enter(&zfsdev_state_lock);
1133		locked = B_TRUE;
1134	}
1135
1136	zv = pp->private;
1137	if (zv == NULL) {
1138		if (locked)
1139			mutex_exit(&zfsdev_state_lock);
1140		return (SET_ERROR(ENXIO));
1141	}
1142
1143	if (zv->zv_total_opens == 0) {
1144		err = zvol_first_open(zv);
1145		if (err) {
1146			if (locked)
1147				mutex_exit(&zfsdev_state_lock);
1148			return (err);
1149		}
1150		pp->mediasize = zv->zv_volsize;
1151		pp->stripeoffset = 0;
1152		pp->stripesize = zv->zv_volblocksize;
1153	}
1154#endif	/* illumos */
1155	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1156		err = SET_ERROR(EROFS);
1157		goto out;
1158	}
1159	if (zv->zv_flags & ZVOL_EXCL) {
1160		err = SET_ERROR(EBUSY);
1161		goto out;
1162	}
1163#ifdef FEXCL
1164	if (flag & FEXCL) {
1165		if (zv->zv_total_opens != 0) {
1166			err = SET_ERROR(EBUSY);
1167			goto out;
1168		}
1169		zv->zv_flags |= ZVOL_EXCL;
1170	}
1171#endif
1172
1173#ifdef illumos
1174	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
1175		zv->zv_open_count[otyp]++;
1176		zv->zv_total_opens++;
1177	}
1178	mutex_exit(&zfsdev_state_lock);
1179#else
1180	zv->zv_total_opens += count;
1181	if (locked)
1182		mutex_exit(&zfsdev_state_lock);
1183#endif
1184
1185	return (err);
1186out:
1187	if (zv->zv_total_opens == 0)
1188		zvol_last_close(zv);
1189#ifdef illumos
1190	mutex_exit(&zfsdev_state_lock);
1191#else
1192	if (locked)
1193		mutex_exit(&zfsdev_state_lock);
1194#endif
1195	return (err);
1196}
1197
1198/*ARGSUSED*/
1199#ifdef illumos
1200int
1201zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
1202{
1203	minor_t minor = getminor(dev);
1204	zvol_state_t *zv;
1205	int error = 0;
1206
1207	mutex_enter(&zfsdev_state_lock);
1208
1209	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1210	if (zv == NULL) {
1211		mutex_exit(&zfsdev_state_lock);
1212#else	/* !illumos */
1213static int
1214zvol_close(struct g_provider *pp, int flag, int count)
1215{
1216	zvol_state_t *zv;
1217	int error = 0;
1218	boolean_t locked = B_FALSE;
1219
1220	/* See comment in zvol_open(). */
1221	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1222		mutex_enter(&zfsdev_state_lock);
1223		locked = B_TRUE;
1224	}
1225
1226	zv = pp->private;
1227	if (zv == NULL) {
1228		if (locked)
1229			mutex_exit(&zfsdev_state_lock);
1230#endif	/* illumos */
1231		return (SET_ERROR(ENXIO));
1232	}
1233
1234	if (zv->zv_flags & ZVOL_EXCL) {
1235		ASSERT(zv->zv_total_opens == 1);
1236		zv->zv_flags &= ~ZVOL_EXCL;
1237	}
1238
1239	/*
1240	 * If the open count is zero, this is a spurious close.
1241	 * That indicates a bug in the kernel / DDI framework.
1242	 */
1243#ifdef illumos
1244	ASSERT(zv->zv_open_count[otyp] != 0);
1245#endif
1246	ASSERT(zv->zv_total_opens != 0);
1247
1248	/*
1249	 * You may get multiple opens, but only one close.
1250	 */
1251#ifdef illumos
1252	zv->zv_open_count[otyp]--;
1253	zv->zv_total_opens--;
1254#else
1255	zv->zv_total_opens -= count;
1256#endif
1257
1258	if (zv->zv_total_opens == 0)
1259		zvol_last_close(zv);
1260
1261#ifdef illumos
1262	mutex_exit(&zfsdev_state_lock);
1263#else
1264	if (locked)
1265		mutex_exit(&zfsdev_state_lock);
1266#endif
1267	return (error);
1268}
1269
1270static void
1271zvol_get_done(zgd_t *zgd, int error)
1272{
1273	if (zgd->zgd_db)
1274		dmu_buf_rele(zgd->zgd_db, zgd);
1275
1276	zfs_range_unlock(zgd->zgd_rl);
1277
1278	if (error == 0 && zgd->zgd_bp)
1279		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1280
1281	kmem_free(zgd, sizeof (zgd_t));
1282}
1283
1284/*
1285 * Get data to generate a TX_WRITE intent log record.
1286 */
1287static int
1288zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1289{
1290	zvol_state_t *zv = arg;
1291	objset_t *os = zv->zv_objset;
1292	uint64_t object = ZVOL_OBJ;
1293	uint64_t offset = lr->lr_offset;
1294	uint64_t size = lr->lr_length;	/* length of user data */
1295	blkptr_t *bp = &lr->lr_blkptr;
1296	dmu_buf_t *db;
1297	zgd_t *zgd;
1298	int error;
1299
1300	ASSERT(zio != NULL);
1301	ASSERT(size != 0);
1302
1303	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1304	zgd->zgd_zilog = zv->zv_zilog;
1305	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1306
1307	/*
1308	 * Write records come in two flavors: immediate and indirect.
1309	 * For small writes it's cheaper to store the data with the
1310	 * log record (immediate); for large writes it's cheaper to
1311	 * sync the data and get a pointer to it (indirect) so that
1312	 * we don't have to write the data twice.
1313	 */
1314	if (buf != NULL) {	/* immediate write */
1315		error = dmu_read(os, object, offset, size, buf,
1316		    DMU_READ_NO_PREFETCH);
1317	} else {
1318		size = zv->zv_volblocksize;
1319		offset = P2ALIGN(offset, size);
1320		error = dmu_buf_hold(os, object, offset, zgd, &db,
1321		    DMU_READ_NO_PREFETCH);
1322		if (error == 0) {
1323			blkptr_t *obp = dmu_buf_get_blkptr(db);
1324			if (obp) {
1325				ASSERT(BP_IS_HOLE(bp));
1326				*bp = *obp;
1327			}
1328
1329			zgd->zgd_db = db;
1330			zgd->zgd_bp = bp;
1331
1332			ASSERT(db->db_offset == offset);
1333			ASSERT(db->db_size == size);
1334
1335			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1336			    zvol_get_done, zgd);
1337
1338			if (error == 0)
1339				return (0);
1340		}
1341	}
1342
1343	zvol_get_done(zgd, error);
1344
1345	return (error);
1346}
1347
1348/*
1349 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1350 *
1351 * We store data in the log buffers if it's small enough.
1352 * Otherwise we will later flush the data out via dmu_sync().
1353 */
1354ssize_t zvol_immediate_write_sz = 32768;
1355
1356static void
1357zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1358    boolean_t sync)
1359{
1360	uint32_t blocksize = zv->zv_volblocksize;
1361	zilog_t *zilog = zv->zv_zilog;
1362	boolean_t slogging;
1363	ssize_t immediate_write_sz;
1364
1365	if (zil_replaying(zilog, tx))
1366		return;
1367
1368	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1369	    ? 0 : zvol_immediate_write_sz;
1370
1371	slogging = spa_has_slogs(zilog->zl_spa) &&
1372	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1373
1374	while (resid) {
1375		itx_t *itx;
1376		lr_write_t *lr;
1377		ssize_t len;
1378		itx_wr_state_t write_state;
1379
1380		/*
1381		 * Unlike zfs_log_write() we can be called with
1382		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1383		 */
1384		if (blocksize > immediate_write_sz && !slogging &&
1385		    resid >= blocksize && off % blocksize == 0) {
1386			write_state = WR_INDIRECT; /* uses dmu_sync */
1387			len = blocksize;
1388		} else if (sync) {
1389			write_state = WR_COPIED;
1390			len = MIN(ZIL_MAX_LOG_DATA, resid);
1391		} else {
1392			write_state = WR_NEED_COPY;
1393			len = MIN(ZIL_MAX_LOG_DATA, resid);
1394		}
1395
1396		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1397		    (write_state == WR_COPIED ? len : 0));
1398		lr = (lr_write_t *)&itx->itx_lr;
1399		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1400		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1401			zil_itx_destroy(itx);
1402			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1403			lr = (lr_write_t *)&itx->itx_lr;
1404			write_state = WR_NEED_COPY;
1405		}
1406
1407		itx->itx_wr_state = write_state;
1408		if (write_state == WR_NEED_COPY)
1409			itx->itx_sod += len;
1410		lr->lr_foid = ZVOL_OBJ;
1411		lr->lr_offset = off;
1412		lr->lr_length = len;
1413		lr->lr_blkoff = 0;
1414		BP_ZERO(&lr->lr_blkptr);
1415
1416		itx->itx_private = zv;
1417		itx->itx_sync = sync;
1418
1419		zil_itx_assign(zilog, itx, tx);
1420
1421		off += len;
1422		resid -= len;
1423	}
1424}
1425
1426#ifdef illumos
1427static int
1428zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1429    uint64_t size, boolean_t doread, boolean_t isdump)
1430{
1431	vdev_disk_t *dvd;
1432	int c;
1433	int numerrors = 0;
1434
1435	if (vd->vdev_ops == &vdev_mirror_ops ||
1436	    vd->vdev_ops == &vdev_replacing_ops ||
1437	    vd->vdev_ops == &vdev_spare_ops) {
1438		for (c = 0; c < vd->vdev_children; c++) {
1439			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1440			    addr, offset, origoffset, size, doread, isdump);
1441			if (err != 0) {
1442				numerrors++;
1443			} else if (doread) {
1444				break;
1445			}
1446		}
1447	}
1448
1449	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1450		return (numerrors < vd->vdev_children ? 0 : EIO);
1451
1452	if (doread && !vdev_readable(vd))
1453		return (SET_ERROR(EIO));
1454	else if (!doread && !vdev_writeable(vd))
1455		return (SET_ERROR(EIO));
1456
1457	if (vd->vdev_ops == &vdev_raidz_ops) {
1458		return (vdev_raidz_physio(vd,
1459		    addr, size, offset, origoffset, doread, isdump));
1460	}
1461
1462	offset += VDEV_LABEL_START_SIZE;
1463
1464	if (ddi_in_panic() || isdump) {
1465		ASSERT(!doread);
1466		if (doread)
1467			return (SET_ERROR(EIO));
1468		dvd = vd->vdev_tsd;
1469		ASSERT3P(dvd, !=, NULL);
1470		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1471		    lbtodb(size)));
1472	} else {
1473		dvd = vd->vdev_tsd;
1474		ASSERT3P(dvd, !=, NULL);
1475		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1476		    offset, doread ? B_READ : B_WRITE));
1477	}
1478}
1479
1480static int
1481zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1482    boolean_t doread, boolean_t isdump)
1483{
1484	vdev_t *vd;
1485	int error;
1486	zvol_extent_t *ze;
1487	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1488
1489	/* Must be sector aligned, and not stradle a block boundary. */
1490	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1491	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1492		return (SET_ERROR(EINVAL));
1493	}
1494	ASSERT(size <= zv->zv_volblocksize);
1495
1496	/* Locate the extent this belongs to */
1497	ze = list_head(&zv->zv_extents);
1498	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1499		offset -= ze->ze_nblks * zv->zv_volblocksize;
1500		ze = list_next(&zv->zv_extents, ze);
1501	}
1502
1503	if (ze == NULL)
1504		return (SET_ERROR(EINVAL));
1505
1506	if (!ddi_in_panic())
1507		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1508
1509	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1510	offset += DVA_GET_OFFSET(&ze->ze_dva);
1511	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1512	    size, doread, isdump);
1513
1514	if (!ddi_in_panic())
1515		spa_config_exit(spa, SCL_STATE, FTAG);
1516
1517	return (error);
1518}
1519
1520int
1521zvol_strategy(buf_t *bp)
1522{
1523	zfs_soft_state_t *zs = NULL;
1524#else	/* !illumos */
1525void
1526zvol_strategy(struct bio *bp)
1527{
1528#endif	/* illumos */
1529	zvol_state_t *zv;
1530	uint64_t off, volsize;
1531	size_t resid;
1532	char *addr;
1533	objset_t *os;
1534	rl_t *rl;
1535	int error = 0;
1536#ifdef illumos
1537	boolean_t doread = bp->b_flags & B_READ;
1538#else
1539	boolean_t doread = 0;
1540#endif
1541	boolean_t is_dumpified;
1542	boolean_t sync;
1543
1544#ifdef illumos
1545	if (getminor(bp->b_edev) == 0) {
1546		error = SET_ERROR(EINVAL);
1547	} else {
1548		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1549		if (zs == NULL)
1550			error = SET_ERROR(ENXIO);
1551		else if (zs->zss_type != ZSST_ZVOL)
1552			error = SET_ERROR(EINVAL);
1553	}
1554
1555	if (error) {
1556		bioerror(bp, error);
1557		biodone(bp);
1558		return (0);
1559	}
1560
1561	zv = zs->zss_data;
1562
1563	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1564		bioerror(bp, EROFS);
1565		biodone(bp);
1566		return (0);
1567	}
1568
1569	off = ldbtob(bp->b_blkno);
1570#else	/* !illumos */
1571	if (bp->bio_to)
1572		zv = bp->bio_to->private;
1573	else
1574		zv = bp->bio_dev->si_drv2;
1575
1576	if (zv == NULL) {
1577		error = SET_ERROR(ENXIO);
1578		goto out;
1579	}
1580
1581	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1582		error = SET_ERROR(EROFS);
1583		goto out;
1584	}
1585
1586	switch (bp->bio_cmd) {
1587	case BIO_FLUSH:
1588		goto sync;
1589	case BIO_READ:
1590		doread = 1;
1591	case BIO_WRITE:
1592	case BIO_DELETE:
1593		break;
1594	default:
1595		error = EOPNOTSUPP;
1596		goto out;
1597	}
1598
1599	off = bp->bio_offset;
1600#endif	/* illumos */
1601	volsize = zv->zv_volsize;
1602
1603	os = zv->zv_objset;
1604	ASSERT(os != NULL);
1605
1606#ifdef illumos
1607	bp_mapin(bp);
1608	addr = bp->b_un.b_addr;
1609	resid = bp->b_bcount;
1610
1611	if (resid > 0 && (off < 0 || off >= volsize)) {
1612		bioerror(bp, EIO);
1613		biodone(bp);
1614		return (0);
1615	}
1616
1617	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1618	sync = ((!(bp->b_flags & B_ASYNC) &&
1619	    !(zv->zv_flags & ZVOL_WCE)) ||
1620	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1621	    !doread && !is_dumpified;
1622#else	/* !illumos */
1623	addr = bp->bio_data;
1624	resid = bp->bio_length;
1625
1626	if (resid > 0 && (off < 0 || off >= volsize)) {
1627		error = SET_ERROR(EIO);
1628		goto out;
1629	}
1630
1631	is_dumpified = B_FALSE;
1632	sync = !doread && !is_dumpified &&
1633	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1634#endif	/* illumos */
1635
1636	/*
1637	 * There must be no buffer changes when doing a dmu_sync() because
1638	 * we can't change the data whilst calculating the checksum.
1639	 */
1640	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1641	    doread ? RL_READER : RL_WRITER);
1642
1643#ifndef illumos
1644	if (bp->bio_cmd == BIO_DELETE) {
1645		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1646		error = dmu_tx_assign(tx, TXG_WAIT);
1647		if (error != 0) {
1648			dmu_tx_abort(tx);
1649		} else {
1650			zvol_log_truncate(zv, tx, off, resid, B_TRUE);
1651			dmu_tx_commit(tx);
1652			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1653			    off, resid);
1654			resid = 0;
1655		}
1656		goto unlock;
1657	}
1658#endif
1659	while (resid != 0 && off < volsize) {
1660		size_t size = MIN(resid, zvol_maxphys);
1661#ifdef illumos
1662		if (is_dumpified) {
1663			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1664			error = zvol_dumpio(zv, addr, off, size,
1665			    doread, B_FALSE);
1666		} else if (doread) {
1667#else
1668		if (doread) {
1669#endif
1670			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1671			    DMU_READ_PREFETCH);
1672		} else {
1673			dmu_tx_t *tx = dmu_tx_create(os);
1674			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1675			error = dmu_tx_assign(tx, TXG_WAIT);
1676			if (error) {
1677				dmu_tx_abort(tx);
1678			} else {
1679				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1680				zvol_log_write(zv, tx, off, size, sync);
1681				dmu_tx_commit(tx);
1682			}
1683		}
1684		if (error) {
1685			/* convert checksum errors into IO errors */
1686			if (error == ECKSUM)
1687				error = SET_ERROR(EIO);
1688			break;
1689		}
1690		off += size;
1691		addr += size;
1692		resid -= size;
1693	}
1694#ifndef illumos
1695unlock:
1696#endif
1697	zfs_range_unlock(rl);
1698
1699#ifdef illumos
1700	if ((bp->b_resid = resid) == bp->b_bcount)
1701		bioerror(bp, off > volsize ? EINVAL : error);
1702
1703	if (sync)
1704		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1705	biodone(bp);
1706
1707	return (0);
1708#else	/* !illumos */
1709	bp->bio_completed = bp->bio_length - resid;
1710	if (bp->bio_completed < bp->bio_length && off > volsize)
1711		error = EINVAL;
1712
1713	if (sync) {
1714sync:
1715		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1716	}
1717out:
1718	if (bp->bio_to)
1719		g_io_deliver(bp, error);
1720	else
1721		biofinish(bp, NULL, error);
1722#endif	/* illumos */
1723}
1724
1725#ifdef illumos
1726/*
1727 * Set the buffer count to the zvol maximum transfer.
1728 * Using our own routine instead of the default minphys()
1729 * means that for larger writes we write bigger buffers on X86
1730 * (128K instead of 56K) and flush the disk write cache less often
1731 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1732 * 56K on X86 and 128K on sparc).
1733 */
1734void
1735zvol_minphys(struct buf *bp)
1736{
1737	if (bp->b_bcount > zvol_maxphys)
1738		bp->b_bcount = zvol_maxphys;
1739}
1740
1741int
1742zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1743{
1744	minor_t minor = getminor(dev);
1745	zvol_state_t *zv;
1746	int error = 0;
1747	uint64_t size;
1748	uint64_t boff;
1749	uint64_t resid;
1750
1751	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1752	if (zv == NULL)
1753		return (SET_ERROR(ENXIO));
1754
1755	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1756		return (SET_ERROR(EINVAL));
1757
1758	boff = ldbtob(blkno);
1759	resid = ldbtob(nblocks);
1760
1761	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1762
1763	while (resid) {
1764		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1765		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1766		if (error)
1767			break;
1768		boff += size;
1769		addr += size;
1770		resid -= size;
1771	}
1772
1773	return (error);
1774}
1775
1776/*ARGSUSED*/
1777int
1778zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1779{
1780	minor_t minor = getminor(dev);
1781#else	/* !illumos */
1782int
1783zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1784{
1785#endif	/* illumos */
1786	zvol_state_t *zv;
1787	uint64_t volsize;
1788	rl_t *rl;
1789	int error = 0;
1790
1791#ifdef illumos
1792	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1793	if (zv == NULL)
1794		return (SET_ERROR(ENXIO));
1795#else
1796	zv = dev->si_drv2;
1797#endif
1798
1799	volsize = zv->zv_volsize;
1800	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1801	if (uio->uio_resid > 0 &&
1802	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1803		return (SET_ERROR(EIO));
1804
1805#ifdef illumos
1806	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1807		error = physio(zvol_strategy, NULL, dev, B_READ,
1808		    zvol_minphys, uio);
1809		return (error);
1810	}
1811#endif
1812
1813	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1814	    RL_READER);
1815	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1816		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1817
1818		/* don't read past the end */
1819		if (bytes > volsize - uio->uio_loffset)
1820			bytes = volsize - uio->uio_loffset;
1821
1822		error =  dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes);
1823		if (error) {
1824			/* convert checksum errors into IO errors */
1825			if (error == ECKSUM)
1826				error = SET_ERROR(EIO);
1827			break;
1828		}
1829	}
1830	zfs_range_unlock(rl);
1831	return (error);
1832}
1833
1834#ifdef illumos
1835/*ARGSUSED*/
1836int
1837zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1838{
1839	minor_t minor = getminor(dev);
1840#else	/* !illumos */
1841int
1842zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1843{
1844#endif	/* illumos */
1845	zvol_state_t *zv;
1846	uint64_t volsize;
1847	rl_t *rl;
1848	int error = 0;
1849	boolean_t sync;
1850
1851#ifdef illumos
1852	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1853	if (zv == NULL)
1854		return (SET_ERROR(ENXIO));
1855#else
1856	zv = dev->si_drv2;
1857#endif
1858
1859	volsize = zv->zv_volsize;
1860	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1861	if (uio->uio_resid > 0 &&
1862	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1863		return (SET_ERROR(EIO));
1864
1865#ifdef illumos
1866	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1867		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1868		    zvol_minphys, uio);
1869		return (error);
1870	}
1871
1872	sync = !(zv->zv_flags & ZVOL_WCE) ||
1873#else
1874	sync = (ioflag & IO_SYNC) ||
1875#endif
1876	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1877
1878	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1879	    RL_WRITER);
1880	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1881		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1882		uint64_t off = uio->uio_loffset;
1883		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1884
1885		if (bytes > volsize - off)	/* don't write past the end */
1886			bytes = volsize - off;
1887
1888		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1889		error = dmu_tx_assign(tx, TXG_WAIT);
1890		if (error) {
1891			dmu_tx_abort(tx);
1892			break;
1893		}
1894		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1895		if (error == 0)
1896			zvol_log_write(zv, tx, off, bytes, sync);
1897		dmu_tx_commit(tx);
1898
1899		if (error)
1900			break;
1901	}
1902	zfs_range_unlock(rl);
1903	if (sync)
1904		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1905	return (error);
1906}
1907
1908#ifdef illumos
1909int
1910zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1911{
1912	struct uuid uuid = EFI_RESERVED;
1913	efi_gpe_t gpe = { 0 };
1914	uint32_t crc;
1915	dk_efi_t efi;
1916	int length;
1917	char *ptr;
1918
1919	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1920		return (SET_ERROR(EFAULT));
1921	ptr = (char *)(uintptr_t)efi.dki_data_64;
1922	length = efi.dki_length;
1923	/*
1924	 * Some clients may attempt to request a PMBR for the
1925	 * zvol.  Currently this interface will return EINVAL to
1926	 * such requests.  These requests could be supported by
1927	 * adding a check for lba == 0 and consing up an appropriate
1928	 * PMBR.
1929	 */
1930	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1931		return (SET_ERROR(EINVAL));
1932
1933	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1934	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1935	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1936
1937	if (efi.dki_lba == 1) {
1938		efi_gpt_t gpt = { 0 };
1939
1940		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1941		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1942		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1943		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1944		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1945		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1946		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1947		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1948		gpt.efi_gpt_SizeOfPartitionEntry =
1949		    LE_32(sizeof (efi_gpe_t));
1950		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1951		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1952		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1953		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1954		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1955		    flag))
1956			return (SET_ERROR(EFAULT));
1957		ptr += sizeof (gpt);
1958		length -= sizeof (gpt);
1959	}
1960	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1961	    length), flag))
1962		return (SET_ERROR(EFAULT));
1963	return (0);
1964}
1965
1966/*
1967 * BEGIN entry points to allow external callers access to the volume.
1968 */
1969/*
1970 * Return the volume parameters needed for access from an external caller.
1971 * These values are invariant as long as the volume is held open.
1972 */
1973int
1974zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1975    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1976    void **rl_hdl, void **bonus_hdl)
1977{
1978	zvol_state_t *zv;
1979
1980	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1981	if (zv == NULL)
1982		return (SET_ERROR(ENXIO));
1983	if (zv->zv_flags & ZVOL_DUMPIFIED)
1984		return (SET_ERROR(ENXIO));
1985
1986	ASSERT(blksize && max_xfer_len && minor_hdl &&
1987	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1988
1989	*blksize = zv->zv_volblocksize;
1990	*max_xfer_len = (uint64_t)zvol_maxphys;
1991	*minor_hdl = zv;
1992	*objset_hdl = zv->zv_objset;
1993	*zil_hdl = zv->zv_zilog;
1994	*rl_hdl = &zv->zv_znode;
1995	*bonus_hdl = zv->zv_dbuf;
1996	return (0);
1997}
1998
1999/*
2000 * Return the current volume size to an external caller.
2001 * The size can change while the volume is open.
2002 */
2003uint64_t
2004zvol_get_volume_size(void *minor_hdl)
2005{
2006	zvol_state_t *zv = minor_hdl;
2007
2008	return (zv->zv_volsize);
2009}
2010
2011/*
2012 * Return the current WCE setting to an external caller.
2013 * The WCE setting can change while the volume is open.
2014 */
2015int
2016zvol_get_volume_wce(void *minor_hdl)
2017{
2018	zvol_state_t *zv = minor_hdl;
2019
2020	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
2021}
2022
2023/*
2024 * Entry point for external callers to zvol_log_write
2025 */
2026void
2027zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
2028    boolean_t sync)
2029{
2030	zvol_state_t *zv = minor_hdl;
2031
2032	zvol_log_write(zv, tx, off, resid, sync);
2033}
2034/*
2035 * END entry points to allow external callers access to the volume.
2036 */
2037#endif	/* illumos */
2038
2039/*
2040 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
2041 */
2042static void
2043zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
2044    boolean_t sync)
2045{
2046	itx_t *itx;
2047	lr_truncate_t *lr;
2048	zilog_t *zilog = zv->zv_zilog;
2049
2050	if (zil_replaying(zilog, tx))
2051		return;
2052
2053	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
2054	lr = (lr_truncate_t *)&itx->itx_lr;
2055	lr->lr_foid = ZVOL_OBJ;
2056	lr->lr_offset = off;
2057	lr->lr_length = len;
2058
2059	itx->itx_sync = sync;
2060	zil_itx_assign(zilog, itx, tx);
2061}
2062
2063#ifdef illumos
2064/*
2065 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
2066 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
2067 */
2068/*ARGSUSED*/
2069int
2070zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
2071{
2072	zvol_state_t *zv;
2073	struct dk_callback *dkc;
2074	int error = 0;
2075	rl_t *rl;
2076
2077	mutex_enter(&zfsdev_state_lock);
2078
2079	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
2080
2081	if (zv == NULL) {
2082		mutex_exit(&zfsdev_state_lock);
2083		return (SET_ERROR(ENXIO));
2084	}
2085	ASSERT(zv->zv_total_opens > 0);
2086
2087	switch (cmd) {
2088
2089	case DKIOCINFO:
2090	{
2091		struct dk_cinfo dki;
2092
2093		bzero(&dki, sizeof (dki));
2094		(void) strcpy(dki.dki_cname, "zvol");
2095		(void) strcpy(dki.dki_dname, "zvol");
2096		dki.dki_ctype = DKC_UNKNOWN;
2097		dki.dki_unit = getminor(dev);
2098		dki.dki_maxtransfer =
2099		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
2100		mutex_exit(&zfsdev_state_lock);
2101		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
2102			error = SET_ERROR(EFAULT);
2103		return (error);
2104	}
2105
2106	case DKIOCGMEDIAINFO:
2107	{
2108		struct dk_minfo dkm;
2109
2110		bzero(&dkm, sizeof (dkm));
2111		dkm.dki_lbsize = 1U << zv->zv_min_bs;
2112		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2113		dkm.dki_media_type = DK_UNKNOWN;
2114		mutex_exit(&zfsdev_state_lock);
2115		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
2116			error = SET_ERROR(EFAULT);
2117		return (error);
2118	}
2119
2120	case DKIOCGMEDIAINFOEXT:
2121	{
2122		struct dk_minfo_ext dkmext;
2123
2124		bzero(&dkmext, sizeof (dkmext));
2125		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
2126		dkmext.dki_pbsize = zv->zv_volblocksize;
2127		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2128		dkmext.dki_media_type = DK_UNKNOWN;
2129		mutex_exit(&zfsdev_state_lock);
2130		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
2131			error = SET_ERROR(EFAULT);
2132		return (error);
2133	}
2134
2135	case DKIOCGETEFI:
2136	{
2137		uint64_t vs = zv->zv_volsize;
2138		uint8_t bs = zv->zv_min_bs;
2139
2140		mutex_exit(&zfsdev_state_lock);
2141		error = zvol_getefi((void *)arg, flag, vs, bs);
2142		return (error);
2143	}
2144
2145	case DKIOCFLUSHWRITECACHE:
2146		dkc = (struct dk_callback *)arg;
2147		mutex_exit(&zfsdev_state_lock);
2148		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2149		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
2150			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
2151			error = 0;
2152		}
2153		return (error);
2154
2155	case DKIOCGETWCE:
2156	{
2157		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
2158		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
2159		    flag))
2160			error = SET_ERROR(EFAULT);
2161		break;
2162	}
2163	case DKIOCSETWCE:
2164	{
2165		int wce;
2166		if (ddi_copyin((void *)arg, &wce, sizeof (int),
2167		    flag)) {
2168			error = SET_ERROR(EFAULT);
2169			break;
2170		}
2171		if (wce) {
2172			zv->zv_flags |= ZVOL_WCE;
2173			mutex_exit(&zfsdev_state_lock);
2174		} else {
2175			zv->zv_flags &= ~ZVOL_WCE;
2176			mutex_exit(&zfsdev_state_lock);
2177			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2178		}
2179		return (0);
2180	}
2181
2182	case DKIOCGGEOM:
2183	case DKIOCGVTOC:
2184		/*
2185		 * commands using these (like prtvtoc) expect ENOTSUP
2186		 * since we're emulating an EFI label
2187		 */
2188		error = SET_ERROR(ENOTSUP);
2189		break;
2190
2191	case DKIOCDUMPINIT:
2192		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2193		    RL_WRITER);
2194		error = zvol_dumpify(zv);
2195		zfs_range_unlock(rl);
2196		break;
2197
2198	case DKIOCDUMPFINI:
2199		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
2200			break;
2201		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2202		    RL_WRITER);
2203		error = zvol_dump_fini(zv);
2204		zfs_range_unlock(rl);
2205		break;
2206
2207	case DKIOCFREE:
2208	{
2209		dkioc_free_t df;
2210		dmu_tx_t *tx;
2211
2212		if (!zvol_unmap_enabled)
2213			break;
2214
2215		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
2216			error = SET_ERROR(EFAULT);
2217			break;
2218		}
2219
2220		/*
2221		 * Apply Postel's Law to length-checking.  If they overshoot,
2222		 * just blank out until the end, if there's a need to blank
2223		 * out anything.
2224		 */
2225		if (df.df_start >= zv->zv_volsize)
2226			break;	/* No need to do anything... */
2227
2228		mutex_exit(&zfsdev_state_lock);
2229
2230		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
2231		    RL_WRITER);
2232		tx = dmu_tx_create(zv->zv_objset);
2233		dmu_tx_mark_netfree(tx);
2234		error = dmu_tx_assign(tx, TXG_WAIT);
2235		if (error != 0) {
2236			dmu_tx_abort(tx);
2237		} else {
2238			zvol_log_truncate(zv, tx, df.df_start,
2239			    df.df_length, B_TRUE);
2240			dmu_tx_commit(tx);
2241			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2242			    df.df_start, df.df_length);
2243		}
2244
2245		zfs_range_unlock(rl);
2246
2247		if (error == 0) {
2248			/*
2249			 * If the write-cache is disabled or 'sync' property
2250			 * is set to 'always' then treat this as a synchronous
2251			 * operation (i.e. commit to zil).
2252			 */
2253			if (!(zv->zv_flags & ZVOL_WCE) ||
2254			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2255				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2256
2257			/*
2258			 * If the caller really wants synchronous writes, and
2259			 * can't wait for them, don't return until the write
2260			 * is done.
2261			 */
2262			if (df.df_flags & DF_WAIT_SYNC) {
2263				txg_wait_synced(
2264				    dmu_objset_pool(zv->zv_objset), 0);
2265			}
2266		}
2267		return (error);
2268	}
2269
2270	default:
2271		error = SET_ERROR(ENOTTY);
2272		break;
2273
2274	}
2275	mutex_exit(&zfsdev_state_lock);
2276	return (error);
2277}
2278#endif	/* illumos */
2279
2280int
2281zvol_busy(void)
2282{
2283	return (zvol_minors != 0);
2284}
2285
2286void
2287zvol_init(void)
2288{
2289	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2290	    1) == 0);
2291#ifdef illumos
2292	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
2293#else
2294	ZFS_LOG(1, "ZVOL Initialized.");
2295#endif
2296}
2297
2298void
2299zvol_fini(void)
2300{
2301#ifdef illumos
2302	mutex_destroy(&zfsdev_state_lock);
2303#endif
2304	ddi_soft_state_fini(&zfsdev_state);
2305	ZFS_LOG(1, "ZVOL Deinitialized.");
2306}
2307
2308#ifdef illumos
2309/*ARGSUSED*/
2310static int
2311zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2312{
2313	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2314
2315	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2316		return (1);
2317	return (0);
2318}
2319
2320/*ARGSUSED*/
2321static void
2322zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2323{
2324	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2325
2326	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2327}
2328
2329static int
2330zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2331{
2332	dmu_tx_t *tx;
2333	int error;
2334	objset_t *os = zv->zv_objset;
2335	spa_t *spa = dmu_objset_spa(os);
2336	vdev_t *vd = spa->spa_root_vdev;
2337	nvlist_t *nv = NULL;
2338	uint64_t version = spa_version(spa);
2339	enum zio_checksum checksum;
2340
2341	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2342	ASSERT(vd->vdev_ops == &vdev_root_ops);
2343
2344	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2345	    DMU_OBJECT_END);
2346	/* wait for dmu_free_long_range to actually free the blocks */
2347	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2348
2349	/*
2350	 * If the pool on which the dump device is being initialized has more
2351	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2352	 * enabled.  If so, bump that feature's counter to indicate that the
2353	 * feature is active. We also check the vdev type to handle the
2354	 * following case:
2355	 *   # zpool create test raidz disk1 disk2 disk3
2356	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2357	 *   the raidz vdev itself has 3 children.
2358	 */
2359	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2360		if (!spa_feature_is_enabled(spa,
2361		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2362			return (SET_ERROR(ENOTSUP));
2363		(void) dsl_sync_task(spa_name(spa),
2364		    zfs_mvdev_dump_feature_check,
2365		    zfs_mvdev_dump_activate_feature_sync, NULL,
2366		    2, ZFS_SPACE_CHECK_RESERVED);
2367	}
2368
2369	tx = dmu_tx_create(os);
2370	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2371	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2372	error = dmu_tx_assign(tx, TXG_WAIT);
2373	if (error) {
2374		dmu_tx_abort(tx);
2375		return (error);
2376	}
2377
2378	/*
2379	 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2380	 * function.  Otherwise, use the old default -- OFF.
2381	 */
2382	checksum = spa_feature_is_active(spa,
2383	    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2384	    ZIO_CHECKSUM_OFF;
2385
2386	/*
2387	 * If we are resizing the dump device then we only need to
2388	 * update the refreservation to match the newly updated
2389	 * zvolsize. Otherwise, we save off the original state of the
2390	 * zvol so that we can restore them if the zvol is ever undumpified.
2391	 */
2392	if (resize) {
2393		error = zap_update(os, ZVOL_ZAP_OBJ,
2394		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2395		    &zv->zv_volsize, tx);
2396	} else {
2397		uint64_t checksum, compress, refresrv, vbs, dedup;
2398
2399		error = dsl_prop_get_integer(zv->zv_name,
2400		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2401		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2402		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
2403		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2404		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
2405		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2406		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
2407		if (version >= SPA_VERSION_DEDUP) {
2408			error = error ? error :
2409			    dsl_prop_get_integer(zv->zv_name,
2410			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2411		}
2412
2413		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2414		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2415		    &compress, tx);
2416		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2417		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
2418		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2419		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2420		    &refresrv, tx);
2421		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2422		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2423		    &vbs, tx);
2424		error = error ? error : dmu_object_set_blocksize(
2425		    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2426		if (version >= SPA_VERSION_DEDUP) {
2427			error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2428			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2429			    &dedup, tx);
2430		}
2431		if (error == 0)
2432			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2433	}
2434	dmu_tx_commit(tx);
2435
2436	/*
2437	 * We only need update the zvol's property if we are initializing
2438	 * the dump area for the first time.
2439	 */
2440	if (!resize) {
2441		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2442		VERIFY(nvlist_add_uint64(nv,
2443		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2444		VERIFY(nvlist_add_uint64(nv,
2445		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2446		    ZIO_COMPRESS_OFF) == 0);
2447		VERIFY(nvlist_add_uint64(nv,
2448		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2449		    checksum) == 0);
2450		if (version >= SPA_VERSION_DEDUP) {
2451			VERIFY(nvlist_add_uint64(nv,
2452			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2453			    ZIO_CHECKSUM_OFF) == 0);
2454		}
2455
2456		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2457		    nv, NULL);
2458		nvlist_free(nv);
2459
2460		if (error)
2461			return (error);
2462	}
2463
2464	/* Allocate the space for the dump */
2465	error = zvol_prealloc(zv);
2466	return (error);
2467}
2468
2469static int
2470zvol_dumpify(zvol_state_t *zv)
2471{
2472	int error = 0;
2473	uint64_t dumpsize = 0;
2474	dmu_tx_t *tx;
2475	objset_t *os = zv->zv_objset;
2476
2477	if (zv->zv_flags & ZVOL_RDONLY)
2478		return (SET_ERROR(EROFS));
2479
2480	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2481	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2482		boolean_t resize = (dumpsize > 0);
2483
2484		if ((error = zvol_dump_init(zv, resize)) != 0) {
2485			(void) zvol_dump_fini(zv);
2486			return (error);
2487		}
2488	}
2489
2490	/*
2491	 * Build up our lba mapping.
2492	 */
2493	error = zvol_get_lbas(zv);
2494	if (error) {
2495		(void) zvol_dump_fini(zv);
2496		return (error);
2497	}
2498
2499	tx = dmu_tx_create(os);
2500	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2501	error = dmu_tx_assign(tx, TXG_WAIT);
2502	if (error) {
2503		dmu_tx_abort(tx);
2504		(void) zvol_dump_fini(zv);
2505		return (error);
2506	}
2507
2508	zv->zv_flags |= ZVOL_DUMPIFIED;
2509	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2510	    &zv->zv_volsize, tx);
2511	dmu_tx_commit(tx);
2512
2513	if (error) {
2514		(void) zvol_dump_fini(zv);
2515		return (error);
2516	}
2517
2518	txg_wait_synced(dmu_objset_pool(os), 0);
2519	return (0);
2520}
2521
2522static int
2523zvol_dump_fini(zvol_state_t *zv)
2524{
2525	dmu_tx_t *tx;
2526	objset_t *os = zv->zv_objset;
2527	nvlist_t *nv;
2528	int error = 0;
2529	uint64_t checksum, compress, refresrv, vbs, dedup;
2530	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2531
2532	/*
2533	 * Attempt to restore the zvol back to its pre-dumpified state.
2534	 * This is a best-effort attempt as it's possible that not all
2535	 * of these properties were initialized during the dumpify process
2536	 * (i.e. error during zvol_dump_init).
2537	 */
2538
2539	tx = dmu_tx_create(os);
2540	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2541	error = dmu_tx_assign(tx, TXG_WAIT);
2542	if (error) {
2543		dmu_tx_abort(tx);
2544		return (error);
2545	}
2546	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2547	dmu_tx_commit(tx);
2548
2549	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2550	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2551	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2552	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2553	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2554	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2555	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2556	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2557
2558	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2559	(void) nvlist_add_uint64(nv,
2560	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2561	(void) nvlist_add_uint64(nv,
2562	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2563	(void) nvlist_add_uint64(nv,
2564	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2565	if (version >= SPA_VERSION_DEDUP &&
2566	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2567	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2568		(void) nvlist_add_uint64(nv,
2569		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2570	}
2571	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2572	    nv, NULL);
2573	nvlist_free(nv);
2574
2575	zvol_free_extents(zv);
2576	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2577	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2578	/* wait for dmu_free_long_range to actually free the blocks */
2579	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2580	tx = dmu_tx_create(os);
2581	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2582	error = dmu_tx_assign(tx, TXG_WAIT);
2583	if (error) {
2584		dmu_tx_abort(tx);
2585		return (error);
2586	}
2587	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2588		zv->zv_volblocksize = vbs;
2589	dmu_tx_commit(tx);
2590
2591	return (0);
2592}
2593#else	/* !illumos */
2594
2595static void
2596zvol_geom_run(zvol_state_t *zv)
2597{
2598	struct g_provider *pp;
2599
2600	pp = zv->zv_provider;
2601	g_error_provider(pp, 0);
2602
2603	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2604	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2605}
2606
2607static void
2608zvol_geom_destroy(zvol_state_t *zv)
2609{
2610	struct g_provider *pp;
2611
2612	g_topology_assert();
2613
2614	mtx_lock(&zv->zv_queue_mtx);
2615	zv->zv_state = 1;
2616	wakeup_one(&zv->zv_queue);
2617	while (zv->zv_state != 2)
2618		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2619	mtx_destroy(&zv->zv_queue_mtx);
2620
2621	pp = zv->zv_provider;
2622	zv->zv_provider = NULL;
2623	pp->private = NULL;
2624	g_wither_geom(pp->geom, ENXIO);
2625}
2626
2627static int
2628zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2629{
2630	int count, error, flags;
2631
2632	g_topology_assert();
2633
2634	/*
2635	 * To make it easier we expect either open or close, but not both
2636	 * at the same time.
2637	 */
2638	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2639	    (acr <= 0 && acw <= 0 && ace <= 0),
2640	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2641	    pp->name, acr, acw, ace));
2642
2643	if (pp->private == NULL) {
2644		if (acr <= 0 && acw <= 0 && ace <= 0)
2645			return (0);
2646		return (pp->error);
2647	}
2648
2649	/*
2650	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2651	 * because GEOM already handles that and handles it a bit differently.
2652	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2653	 * only one exclusive consumer, no matter if it is reader or writer.
2654	 * I like better the way GEOM works so I'll leave it for GEOM to
2655	 * decide what to do.
2656	 */
2657
2658	count = acr + acw + ace;
2659	if (count == 0)
2660		return (0);
2661
2662	flags = 0;
2663	if (acr != 0 || ace != 0)
2664		flags |= FREAD;
2665	if (acw != 0)
2666		flags |= FWRITE;
2667
2668	g_topology_unlock();
2669	if (count > 0)
2670		error = zvol_open(pp, flags, count);
2671	else
2672		error = zvol_close(pp, flags, -count);
2673	g_topology_lock();
2674	return (error);
2675}
2676
2677static void
2678zvol_geom_start(struct bio *bp)
2679{
2680	zvol_state_t *zv;
2681	boolean_t first;
2682
2683	zv = bp->bio_to->private;
2684	ASSERT(zv != NULL);
2685	switch (bp->bio_cmd) {
2686	case BIO_FLUSH:
2687		if (!THREAD_CAN_SLEEP())
2688			goto enqueue;
2689		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2690		g_io_deliver(bp, 0);
2691		break;
2692	case BIO_READ:
2693	case BIO_WRITE:
2694	case BIO_DELETE:
2695		if (!THREAD_CAN_SLEEP())
2696			goto enqueue;
2697		zvol_strategy(bp);
2698		break;
2699	case BIO_GETATTR: {
2700		spa_t *spa = dmu_objset_spa(zv->zv_objset);
2701		uint64_t refd, avail, usedobjs, availobjs, val;
2702
2703		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2704			return;
2705		if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
2706			dmu_objset_space(zv->zv_objset, &refd, &avail,
2707			    &usedobjs, &availobjs);
2708			if (g_handleattr_off_t(bp, "blocksavail",
2709			    avail / DEV_BSIZE))
2710				return;
2711		} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
2712			dmu_objset_space(zv->zv_objset, &refd, &avail,
2713			    &usedobjs, &availobjs);
2714			if (g_handleattr_off_t(bp, "blocksused",
2715			    refd / DEV_BSIZE))
2716				return;
2717		} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
2718			avail = metaslab_class_get_space(spa_normal_class(spa));
2719			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
2720			if (g_handleattr_off_t(bp, "poolblocksavail",
2721			    avail / DEV_BSIZE))
2722				return;
2723		} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
2724			refd = metaslab_class_get_alloc(spa_normal_class(spa));
2725			if (g_handleattr_off_t(bp, "poolblocksused",
2726			    refd / DEV_BSIZE))
2727				return;
2728		}
2729		/* FALLTHROUGH */
2730	}
2731	default:
2732		g_io_deliver(bp, EOPNOTSUPP);
2733		break;
2734	}
2735	return;
2736
2737enqueue:
2738	mtx_lock(&zv->zv_queue_mtx);
2739	first = (bioq_first(&zv->zv_queue) == NULL);
2740	bioq_insert_tail(&zv->zv_queue, bp);
2741	mtx_unlock(&zv->zv_queue_mtx);
2742	if (first)
2743		wakeup_one(&zv->zv_queue);
2744}
2745
2746static void
2747zvol_geom_worker(void *arg)
2748{
2749	zvol_state_t *zv;
2750	struct bio *bp;
2751
2752	thread_lock(curthread);
2753	sched_prio(curthread, PRIBIO);
2754	thread_unlock(curthread);
2755
2756	zv = arg;
2757	for (;;) {
2758		mtx_lock(&zv->zv_queue_mtx);
2759		bp = bioq_takefirst(&zv->zv_queue);
2760		if (bp == NULL) {
2761			if (zv->zv_state == 1) {
2762				zv->zv_state = 2;
2763				wakeup(&zv->zv_state);
2764				mtx_unlock(&zv->zv_queue_mtx);
2765				kthread_exit();
2766			}
2767			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2768			    "zvol:io", 0);
2769			continue;
2770		}
2771		mtx_unlock(&zv->zv_queue_mtx);
2772		switch (bp->bio_cmd) {
2773		case BIO_FLUSH:
2774			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2775			g_io_deliver(bp, 0);
2776			break;
2777		case BIO_READ:
2778		case BIO_WRITE:
2779		case BIO_DELETE:
2780			zvol_strategy(bp);
2781			break;
2782		default:
2783			g_io_deliver(bp, EOPNOTSUPP);
2784			break;
2785		}
2786	}
2787}
2788
2789extern boolean_t dataset_name_hidden(const char *name);
2790
2791static int
2792zvol_create_snapshots(objset_t *os, const char *name)
2793{
2794	uint64_t cookie, obj;
2795	char *sname;
2796	int error, len;
2797
2798	cookie = obj = 0;
2799	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2800
2801#if 0
2802	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2803	    DS_FIND_SNAPSHOTS);
2804#endif
2805
2806	for (;;) {
2807		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2808		if (len >= MAXPATHLEN) {
2809			dmu_objset_rele(os, FTAG);
2810			error = ENAMETOOLONG;
2811			break;
2812		}
2813
2814		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2815		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2816		    sname + len, &obj, &cookie, NULL);
2817		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2818		if (error != 0) {
2819			if (error == ENOENT)
2820				error = 0;
2821			break;
2822		}
2823
2824		if ((error = zvol_create_minor(sname)) != 0) {
2825			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2826			    sname, error);
2827			break;
2828		}
2829	}
2830
2831	kmem_free(sname, MAXPATHLEN);
2832	return (error);
2833}
2834
2835int
2836zvol_create_minors(const char *name)
2837{
2838	uint64_t cookie;
2839	objset_t *os;
2840	char *osname, *p;
2841	int error, len;
2842
2843	if (dataset_name_hidden(name))
2844		return (0);
2845
2846	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2847		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2848		    name, error);
2849		return (error);
2850	}
2851	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2852		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2853		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2854		error = zvol_create_minor(name);
2855		if (error == 0 || error == EEXIST) {
2856			error = zvol_create_snapshots(os, name);
2857		} else {
2858			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2859			    name, error);
2860		}
2861		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2862		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2863		return (error);
2864	}
2865	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2866		dmu_objset_rele(os, FTAG);
2867		return (0);
2868	}
2869
2870	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2871	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2872		dmu_objset_rele(os, FTAG);
2873		kmem_free(osname, MAXPATHLEN);
2874		return (ENOENT);
2875	}
2876	p = osname + strlen(osname);
2877	len = MAXPATHLEN - (p - osname);
2878
2879#if 0
2880	/* Prefetch the datasets. */
2881	cookie = 0;
2882	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2883		if (!dataset_name_hidden(osname))
2884			(void) dmu_objset_prefetch(osname, NULL);
2885	}
2886#endif
2887
2888	cookie = 0;
2889	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2890	    &cookie) == 0) {
2891		dmu_objset_rele(os, FTAG);
2892		(void)zvol_create_minors(osname);
2893		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2894			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2895			    name, error);
2896			return (error);
2897		}
2898	}
2899
2900	dmu_objset_rele(os, FTAG);
2901	kmem_free(osname, MAXPATHLEN);
2902	return (0);
2903}
2904
2905static void
2906zvol_rename_minor(zvol_state_t *zv, const char *newname)
2907{
2908	struct g_geom *gp;
2909	struct g_provider *pp;
2910	struct cdev *dev;
2911
2912	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2913
2914	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2915		g_topology_lock();
2916		pp = zv->zv_provider;
2917		ASSERT(pp != NULL);
2918		gp = pp->geom;
2919		ASSERT(gp != NULL);
2920
2921		zv->zv_provider = NULL;
2922		g_wither_provider(pp, ENXIO);
2923
2924		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2925		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2926		pp->sectorsize = DEV_BSIZE;
2927		pp->mediasize = zv->zv_volsize;
2928		pp->private = zv;
2929		zv->zv_provider = pp;
2930		g_error_provider(pp, 0);
2931		g_topology_unlock();
2932	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2933		dev = zv->zv_dev;
2934		ASSERT(dev != NULL);
2935		zv->zv_dev = NULL;
2936		destroy_dev(dev);
2937
2938		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
2939		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
2940		    0640, "%s/%s", ZVOL_DRIVER, newname) == 0) {
2941			zv->zv_dev = dev;
2942			dev->si_iosize_max = MAXPHYS;
2943			dev->si_drv2 = zv;
2944		}
2945	}
2946	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2947}
2948
2949void
2950zvol_rename_minors(const char *oldname, const char *newname)
2951{
2952	char name[MAXPATHLEN];
2953	struct g_provider *pp;
2954	struct g_geom *gp;
2955	size_t oldnamelen, newnamelen;
2956	zvol_state_t *zv;
2957	char *namebuf;
2958	boolean_t locked = B_FALSE;
2959
2960	oldnamelen = strlen(oldname);
2961	newnamelen = strlen(newname);
2962
2963	DROP_GIANT();
2964	/* See comment in zvol_open(). */
2965	if (!MUTEX_HELD(&zfsdev_state_lock)) {
2966		mutex_enter(&zfsdev_state_lock);
2967		locked = B_TRUE;
2968	}
2969
2970	LIST_FOREACH(zv, &all_zvols, zv_links) {
2971		if (strcmp(zv->zv_name, oldname) == 0) {
2972			zvol_rename_minor(zv, newname);
2973		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2974		    (zv->zv_name[oldnamelen] == '/' ||
2975		     zv->zv_name[oldnamelen] == '@')) {
2976			snprintf(name, sizeof(name), "%s%c%s", newname,
2977			    zv->zv_name[oldnamelen],
2978			    zv->zv_name + oldnamelen + 1);
2979			zvol_rename_minor(zv, name);
2980		}
2981	}
2982
2983	if (locked)
2984		mutex_exit(&zfsdev_state_lock);
2985	PICKUP_GIANT();
2986}
2987
2988static int
2989zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2990{
2991	zvol_state_t *zv;
2992	int err = 0;
2993
2994	mutex_enter(&zfsdev_state_lock);
2995	zv = dev->si_drv2;
2996	if (zv == NULL) {
2997		mutex_exit(&zfsdev_state_lock);
2998		return(ENXIO);		/* zvol_create_minor() not done yet */
2999	}
3000
3001	if (zv->zv_total_opens == 0)
3002		err = zvol_first_open(zv);
3003	if (err) {
3004		mutex_exit(&zfsdev_state_lock);
3005		return (err);
3006	}
3007	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
3008		err = SET_ERROR(EROFS);
3009		goto out;
3010	}
3011	if (zv->zv_flags & ZVOL_EXCL) {
3012		err = SET_ERROR(EBUSY);
3013		goto out;
3014	}
3015#ifdef FEXCL
3016	if (flags & FEXCL) {
3017		if (zv->zv_total_opens != 0) {
3018			err = SET_ERROR(EBUSY);
3019			goto out;
3020		}
3021		zv->zv_flags |= ZVOL_EXCL;
3022	}
3023#endif
3024
3025	zv->zv_total_opens++;
3026	mutex_exit(&zfsdev_state_lock);
3027	return (err);
3028out:
3029	if (zv->zv_total_opens == 0)
3030		zvol_last_close(zv);
3031	mutex_exit(&zfsdev_state_lock);
3032	return (err);
3033}
3034
3035static int
3036zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3037{
3038	zvol_state_t *zv;
3039	int err = 0;
3040
3041	mutex_enter(&zfsdev_state_lock);
3042	zv = dev->si_drv2;
3043	if (zv == NULL) {
3044		mutex_exit(&zfsdev_state_lock);
3045		return(ENXIO);
3046	}
3047
3048	if (zv->zv_flags & ZVOL_EXCL) {
3049		ASSERT(zv->zv_total_opens == 1);
3050		zv->zv_flags &= ~ZVOL_EXCL;
3051	}
3052
3053	/*
3054	 * If the open count is zero, this is a spurious close.
3055	 * That indicates a bug in the kernel / DDI framework.
3056	 */
3057	ASSERT(zv->zv_total_opens != 0);
3058
3059	/*
3060	 * You may get multiple opens, but only one close.
3061	 */
3062	zv->zv_total_opens--;
3063
3064	if (zv->zv_total_opens == 0)
3065		zvol_last_close(zv);
3066
3067	mutex_exit(&zfsdev_state_lock);
3068	return (0);
3069}
3070
3071static int
3072zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
3073{
3074	zvol_state_t *zv;
3075	rl_t *rl;
3076	off_t offset, length, chunk;
3077	int i, error;
3078	u_int u;
3079
3080	zv = dev->si_drv2;
3081
3082	error = 0;
3083	KASSERT(zv->zv_total_opens > 0,
3084	    ("Device with zero access count in zvol_d_ioctl"));
3085
3086	i = IOCPARM_LEN(cmd);
3087	switch (cmd) {
3088	case DIOCGSECTORSIZE:
3089		*(u_int *)data = DEV_BSIZE;
3090		break;
3091	case DIOCGMEDIASIZE:
3092		*(off_t *)data = zv->zv_volsize;
3093		break;
3094	case DIOCGFLUSH:
3095		zil_commit(zv->zv_zilog, ZVOL_OBJ);
3096		break;
3097	case DIOCGDELETE:
3098		if (!zvol_unmap_enabled)
3099			break;
3100
3101		offset = ((off_t *)data)[0];
3102		length = ((off_t *)data)[1];
3103		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
3104		    offset < 0 || offset >= zv->zv_volsize ||
3105		    length <= 0) {
3106			printf("%s: offset=%jd length=%jd\n", __func__, offset,
3107			    length);
3108			error = EINVAL;
3109			break;
3110		}
3111
3112		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
3113		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
3114		error = dmu_tx_assign(tx, TXG_WAIT);
3115		if (error != 0) {
3116			dmu_tx_abort(tx);
3117		} else {
3118			zvol_log_truncate(zv, tx, offset, length, B_TRUE);
3119			dmu_tx_commit(tx);
3120			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
3121			    offset, length);
3122		}
3123		zfs_range_unlock(rl);
3124		if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
3125			zil_commit(zv->zv_zilog, ZVOL_OBJ);
3126		break;
3127	case DIOCGSTRIPESIZE:
3128		*(off_t *)data = zv->zv_volblocksize;
3129		break;
3130	case DIOCGSTRIPEOFFSET:
3131		*(off_t *)data = 0;
3132		break;
3133	case DIOCGATTR: {
3134		spa_t *spa = dmu_objset_spa(zv->zv_objset);
3135		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
3136		uint64_t refd, avail, usedobjs, availobjs;
3137
3138		if (strcmp(arg->name, "GEOM::candelete") == 0)
3139			arg->value.i = 1;
3140		else if (strcmp(arg->name, "blocksavail") == 0) {
3141			dmu_objset_space(zv->zv_objset, &refd, &avail,
3142			    &usedobjs, &availobjs);
3143			arg->value.off = avail / DEV_BSIZE;
3144		} else if (strcmp(arg->name, "blocksused") == 0) {
3145			dmu_objset_space(zv->zv_objset, &refd, &avail,
3146			    &usedobjs, &availobjs);
3147			arg->value.off = refd / DEV_BSIZE;
3148		} else if (strcmp(arg->name, "poolblocksavail") == 0) {
3149			avail = metaslab_class_get_space(spa_normal_class(spa));
3150			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
3151			arg->value.off = avail / DEV_BSIZE;
3152		} else if (strcmp(arg->name, "poolblocksused") == 0) {
3153			refd = metaslab_class_get_alloc(spa_normal_class(spa));
3154			arg->value.off = refd / DEV_BSIZE;
3155		} else
3156			error = ENOIOCTL;
3157		break;
3158	}
3159	case FIOSEEKHOLE:
3160	case FIOSEEKDATA: {
3161		off_t *off = (off_t *)data;
3162		uint64_t noff;
3163		boolean_t hole;
3164
3165		hole = (cmd == FIOSEEKHOLE);
3166		noff = *off;
3167		error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
3168		*off = noff;
3169		break;
3170	}
3171	default:
3172		error = ENOIOCTL;
3173	}
3174
3175	return (error);
3176}
3177#endif	/* illumos */
3178