zvol.c revision 297112
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 *
27 * Portions Copyright 2010 Robert Milkowski
28 *
29 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
31 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
32 * Copyright (c) 2014 Integros [integros.com]
33 */
34
35/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
36
37/*
38 * ZFS volume emulation driver.
39 *
40 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
41 * Volumes are accessed through the symbolic links named:
42 *
43 * /dev/zvol/dsk/<pool_name>/<dataset_name>
44 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
45 *
46 * These links are created by the /dev filesystem (sdev_zvolops.c).
47 * Volumes are persistent through reboot.  No user command needs to be
48 * run before opening and using a device.
49 *
50 * FreeBSD notes.
51 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
52 * in the system.
53 */
54
55#include <sys/types.h>
56#include <sys/param.h>
57#include <sys/kernel.h>
58#include <sys/errno.h>
59#include <sys/uio.h>
60#include <sys/bio.h>
61#include <sys/buf.h>
62#include <sys/kmem.h>
63#include <sys/conf.h>
64#include <sys/cmn_err.h>
65#include <sys/stat.h>
66#include <sys/zap.h>
67#include <sys/spa.h>
68#include <sys/spa_impl.h>
69#include <sys/zio.h>
70#include <sys/disk.h>
71#include <sys/dmu_traverse.h>
72#include <sys/dnode.h>
73#include <sys/dsl_dataset.h>
74#include <sys/dsl_prop.h>
75#include <sys/dkio.h>
76#include <sys/byteorder.h>
77#include <sys/sunddi.h>
78#include <sys/dirent.h>
79#include <sys/policy.h>
80#include <sys/queue.h>
81#include <sys/fs/zfs.h>
82#include <sys/zfs_ioctl.h>
83#include <sys/zil.h>
84#include <sys/refcount.h>
85#include <sys/zfs_znode.h>
86#include <sys/zfs_rlock.h>
87#include <sys/vdev_impl.h>
88#include <sys/vdev_raidz.h>
89#include <sys/zvol.h>
90#include <sys/zil_impl.h>
91#include <sys/dbuf.h>
92#include <sys/dmu_tx.h>
93#include <sys/zfeature.h>
94#include <sys/zio_checksum.h>
95#include <sys/filio.h>
96
97#include <geom/geom.h>
98
99#include "zfs_namecheck.h"
100
101#ifndef illumos
102struct g_class zfs_zvol_class = {
103	.name = "ZFS::ZVOL",
104	.version = G_VERSION,
105};
106
107DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
108
109#endif
110void *zfsdev_state;
111static char *zvol_tag = "zvol_tag";
112
113#define	ZVOL_DUMPSIZE		"dumpsize"
114
115/*
116 * This lock protects the zfsdev_state structure from being modified
117 * while it's being used, e.g. an open that comes in before a create
118 * finishes.  It also protects temporary opens of the dataset so that,
119 * e.g., an open doesn't get a spurious EBUSY.
120 */
121#ifdef illumos
122kmutex_t zfsdev_state_lock;
123#else
124/*
125 * In FreeBSD we've replaced the upstream zfsdev_state_lock with the
126 * spa_namespace_lock in the ZVOL code.
127 */
128#define zfsdev_state_lock spa_namespace_lock
129#endif
130static uint32_t zvol_minors;
131
132#ifndef illumos
133SYSCTL_DECL(_vfs_zfs);
134SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
135static int	volmode = ZFS_VOLMODE_GEOM;
136TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
137SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
138    "Expose as GEOM providers (1), device files (2) or neither");
139
140#endif
141typedef struct zvol_extent {
142	list_node_t	ze_node;
143	dva_t		ze_dva;		/* dva associated with this extent */
144	uint64_t	ze_nblks;	/* number of blocks in extent */
145} zvol_extent_t;
146
147/*
148 * The in-core state of each volume.
149 */
150typedef struct zvol_state {
151#ifndef illumos
152	LIST_ENTRY(zvol_state)	zv_links;
153#endif
154	char		zv_name[MAXPATHLEN]; /* pool/dd name */
155	uint64_t	zv_volsize;	/* amount of space we advertise */
156	uint64_t	zv_volblocksize; /* volume block size */
157#ifdef illumos
158	minor_t		zv_minor;	/* minor number */
159#else
160	struct cdev	*zv_dev;	/* non-GEOM device */
161	struct g_provider *zv_provider;	/* GEOM provider */
162#endif
163	uint8_t		zv_min_bs;	/* minimum addressable block shift */
164	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
165	objset_t	*zv_objset;	/* objset handle */
166#ifdef illumos
167	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
168#endif
169	uint32_t	zv_total_opens;	/* total open count */
170	zilog_t		*zv_zilog;	/* ZIL handle */
171	list_t		zv_extents;	/* List of extents for dump */
172	znode_t		zv_znode;	/* for range locking */
173	dmu_buf_t	*zv_dbuf;	/* bonus handle */
174#ifndef illumos
175	int		zv_state;
176	int		zv_volmode;	/* Provide GEOM or cdev */
177	struct bio_queue_head zv_queue;
178	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
179#endif
180} zvol_state_t;
181
182#ifndef illumos
183static LIST_HEAD(, zvol_state) all_zvols;
184#endif
185/*
186 * zvol specific flags
187 */
188#define	ZVOL_RDONLY	0x1
189#define	ZVOL_DUMPIFIED	0x2
190#define	ZVOL_EXCL	0x4
191#define	ZVOL_WCE	0x8
192
193/*
194 * zvol maximum transfer in one DMU tx.
195 */
196int zvol_maxphys = DMU_MAX_ACCESS/2;
197
198/*
199 * Toggle unmap functionality.
200 */
201boolean_t zvol_unmap_enabled = B_TRUE;
202#ifndef illumos
203SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
204    &zvol_unmap_enabled, 0,
205    "Enable UNMAP functionality");
206
207static d_open_t		zvol_d_open;
208static d_close_t	zvol_d_close;
209static d_read_t		zvol_read;
210static d_write_t	zvol_write;
211static d_ioctl_t	zvol_d_ioctl;
212static d_strategy_t	zvol_strategy;
213
214static struct cdevsw zvol_cdevsw = {
215	.d_version =	D_VERSION,
216	.d_open =	zvol_d_open,
217	.d_close =	zvol_d_close,
218	.d_read =	zvol_read,
219	.d_write =	zvol_write,
220	.d_ioctl =	zvol_d_ioctl,
221	.d_strategy =	zvol_strategy,
222	.d_name =	"zvol",
223	.d_flags =	D_DISK | D_TRACKCLOSE,
224};
225
226static void zvol_geom_run(zvol_state_t *zv);
227static void zvol_geom_destroy(zvol_state_t *zv);
228static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
229static void zvol_geom_start(struct bio *bp);
230static void zvol_geom_worker(void *arg);
231static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
232    uint64_t len, boolean_t sync);
233#endif	/* !illumos */
234
235extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
236    nvlist_t *, nvlist_t *);
237static int zvol_remove_zv(zvol_state_t *);
238static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
239static int zvol_dumpify(zvol_state_t *zv);
240static int zvol_dump_fini(zvol_state_t *zv);
241static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
242
243static void
244zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
245{
246#ifdef illumos
247	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
248
249	zv->zv_volsize = volsize;
250	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
251	    "Size", volsize) == DDI_SUCCESS);
252	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
253	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
254
255	/* Notify specfs to invalidate the cached size */
256	spec_size_invalidate(dev, VBLK);
257	spec_size_invalidate(dev, VCHR);
258#else	/* !illumos */
259	zv->zv_volsize = volsize;
260	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
261		struct g_provider *pp;
262
263		pp = zv->zv_provider;
264		if (pp == NULL)
265			return;
266		g_topology_lock();
267		g_resize_provider(pp, zv->zv_volsize);
268		g_topology_unlock();
269	}
270#endif	/* illumos */
271}
272
273int
274zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
275{
276	if (volsize == 0)
277		return (SET_ERROR(EINVAL));
278
279	if (volsize % blocksize != 0)
280		return (SET_ERROR(EINVAL));
281
282#ifdef _ILP32
283	if (volsize - 1 > SPEC_MAXOFFSET_T)
284		return (SET_ERROR(EOVERFLOW));
285#endif
286	return (0);
287}
288
289int
290zvol_check_volblocksize(uint64_t volblocksize)
291{
292	if (volblocksize < SPA_MINBLOCKSIZE ||
293	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
294	    !ISP2(volblocksize))
295		return (SET_ERROR(EDOM));
296
297	return (0);
298}
299
300int
301zvol_get_stats(objset_t *os, nvlist_t *nv)
302{
303	int error;
304	dmu_object_info_t doi;
305	uint64_t val;
306
307	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
308	if (error)
309		return (error);
310
311	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
312
313	error = dmu_object_info(os, ZVOL_OBJ, &doi);
314
315	if (error == 0) {
316		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
317		    doi.doi_data_block_size);
318	}
319
320	return (error);
321}
322
323static zvol_state_t *
324zvol_minor_lookup(const char *name)
325{
326#ifdef illumos
327	minor_t minor;
328#endif
329	zvol_state_t *zv;
330
331	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
332
333#ifdef illumos
334	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
335		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
336		if (zv == NULL)
337			continue;
338#else
339	LIST_FOREACH(zv, &all_zvols, zv_links) {
340#endif
341		if (strcmp(zv->zv_name, name) == 0)
342			return (zv);
343	}
344
345	return (NULL);
346}
347
348/* extent mapping arg */
349struct maparg {
350	zvol_state_t	*ma_zv;
351	uint64_t	ma_blks;
352};
353
354/*ARGSUSED*/
355static int
356zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
357    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
358{
359	struct maparg *ma = arg;
360	zvol_extent_t *ze;
361	int bs = ma->ma_zv->zv_volblocksize;
362
363	if (bp == NULL || BP_IS_HOLE(bp) ||
364	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
365		return (0);
366
367	VERIFY(!BP_IS_EMBEDDED(bp));
368
369	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
370	ma->ma_blks++;
371
372	/* Abort immediately if we have encountered gang blocks */
373	if (BP_IS_GANG(bp))
374		return (SET_ERROR(EFRAGS));
375
376	/*
377	 * See if the block is at the end of the previous extent.
378	 */
379	ze = list_tail(&ma->ma_zv->zv_extents);
380	if (ze &&
381	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
382	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
383	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
384		ze->ze_nblks++;
385		return (0);
386	}
387
388	dprintf_bp(bp, "%s", "next blkptr:");
389
390	/* start a new extent */
391	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
392	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
393	ze->ze_nblks = 1;
394	list_insert_tail(&ma->ma_zv->zv_extents, ze);
395	return (0);
396}
397
398static void
399zvol_free_extents(zvol_state_t *zv)
400{
401	zvol_extent_t *ze;
402
403	while (ze = list_head(&zv->zv_extents)) {
404		list_remove(&zv->zv_extents, ze);
405		kmem_free(ze, sizeof (zvol_extent_t));
406	}
407}
408
409static int
410zvol_get_lbas(zvol_state_t *zv)
411{
412	objset_t *os = zv->zv_objset;
413	struct maparg	ma;
414	int		err;
415
416	ma.ma_zv = zv;
417	ma.ma_blks = 0;
418	zvol_free_extents(zv);
419
420	/* commit any in-flight changes before traversing the dataset */
421	txg_wait_synced(dmu_objset_pool(os), 0);
422	err = traverse_dataset(dmu_objset_ds(os), 0,
423	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
424	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
425		zvol_free_extents(zv);
426		return (err ? err : EIO);
427	}
428
429	return (0);
430}
431
432/* ARGSUSED */
433void
434zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
435{
436	zfs_creat_t *zct = arg;
437	nvlist_t *nvprops = zct->zct_props;
438	int error;
439	uint64_t volblocksize, volsize;
440
441	VERIFY(nvlist_lookup_uint64(nvprops,
442	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
443	if (nvlist_lookup_uint64(nvprops,
444	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
445		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
446
447	/*
448	 * These properties must be removed from the list so the generic
449	 * property setting step won't apply to them.
450	 */
451	VERIFY(nvlist_remove_all(nvprops,
452	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
453	(void) nvlist_remove_all(nvprops,
454	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
455
456	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
457	    DMU_OT_NONE, 0, tx);
458	ASSERT(error == 0);
459
460	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
461	    DMU_OT_NONE, 0, tx);
462	ASSERT(error == 0);
463
464	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
465	ASSERT(error == 0);
466}
467
468/*
469 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
470 * implement DKIOCFREE/free-long-range.
471 */
472static int
473zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
474{
475	uint64_t offset, length;
476
477	if (byteswap)
478		byteswap_uint64_array(lr, sizeof (*lr));
479
480	offset = lr->lr_offset;
481	length = lr->lr_length;
482
483	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
484}
485
486/*
487 * Replay a TX_WRITE ZIL transaction that didn't get committed
488 * after a system failure
489 */
490static int
491zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
492{
493	objset_t *os = zv->zv_objset;
494	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
495	uint64_t offset, length;
496	dmu_tx_t *tx;
497	int error;
498
499	if (byteswap)
500		byteswap_uint64_array(lr, sizeof (*lr));
501
502	offset = lr->lr_offset;
503	length = lr->lr_length;
504
505	/* If it's a dmu_sync() block, write the whole block */
506	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
507		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
508		if (length < blocksize) {
509			offset -= offset % blocksize;
510			length = blocksize;
511		}
512	}
513
514	tx = dmu_tx_create(os);
515	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
516	error = dmu_tx_assign(tx, TXG_WAIT);
517	if (error) {
518		dmu_tx_abort(tx);
519	} else {
520		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
521		dmu_tx_commit(tx);
522	}
523
524	return (error);
525}
526
527/* ARGSUSED */
528static int
529zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
530{
531	return (SET_ERROR(ENOTSUP));
532}
533
534/*
535 * Callback vectors for replaying records.
536 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
537 */
538zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
539	zvol_replay_err,	/* 0 no such transaction type */
540	zvol_replay_err,	/* TX_CREATE */
541	zvol_replay_err,	/* TX_MKDIR */
542	zvol_replay_err,	/* TX_MKXATTR */
543	zvol_replay_err,	/* TX_SYMLINK */
544	zvol_replay_err,	/* TX_REMOVE */
545	zvol_replay_err,	/* TX_RMDIR */
546	zvol_replay_err,	/* TX_LINK */
547	zvol_replay_err,	/* TX_RENAME */
548	zvol_replay_write,	/* TX_WRITE */
549	zvol_replay_truncate,	/* TX_TRUNCATE */
550	zvol_replay_err,	/* TX_SETATTR */
551	zvol_replay_err,	/* TX_ACL */
552	zvol_replay_err,	/* TX_CREATE_ACL */
553	zvol_replay_err,	/* TX_CREATE_ATTR */
554	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
555	zvol_replay_err,	/* TX_MKDIR_ACL */
556	zvol_replay_err,	/* TX_MKDIR_ATTR */
557	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
558	zvol_replay_err,	/* TX_WRITE2 */
559};
560
561#ifdef illumos
562int
563zvol_name2minor(const char *name, minor_t *minor)
564{
565	zvol_state_t *zv;
566
567	mutex_enter(&zfsdev_state_lock);
568	zv = zvol_minor_lookup(name);
569	if (minor && zv)
570		*minor = zv->zv_minor;
571	mutex_exit(&zfsdev_state_lock);
572	return (zv ? 0 : -1);
573}
574#endif	/* illumos */
575
576/*
577 * Create a minor node (plus a whole lot more) for the specified volume.
578 */
579int
580zvol_create_minor(const char *name)
581{
582	zfs_soft_state_t *zs;
583	zvol_state_t *zv;
584	objset_t *os;
585	dmu_object_info_t doi;
586#ifdef illumos
587	minor_t minor = 0;
588	char chrbuf[30], blkbuf[30];
589#else
590	struct cdev *dev;
591	struct g_provider *pp;
592	struct g_geom *gp;
593	uint64_t volsize, mode;
594#endif
595	int error;
596
597#ifndef illumos
598	ZFS_LOG(1, "Creating ZVOL %s...", name);
599#endif
600
601	mutex_enter(&zfsdev_state_lock);
602
603	if (zvol_minor_lookup(name) != NULL) {
604		mutex_exit(&zfsdev_state_lock);
605		return (SET_ERROR(EEXIST));
606	}
607
608	/* lie and say we're read-only */
609	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
610
611	if (error) {
612		mutex_exit(&zfsdev_state_lock);
613		return (error);
614	}
615
616#ifdef illumos
617	if ((minor = zfsdev_minor_alloc()) == 0) {
618		dmu_objset_disown(os, FTAG);
619		mutex_exit(&zfsdev_state_lock);
620		return (SET_ERROR(ENXIO));
621	}
622
623	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
624		dmu_objset_disown(os, FTAG);
625		mutex_exit(&zfsdev_state_lock);
626		return (SET_ERROR(EAGAIN));
627	}
628	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
629	    (char *)name);
630
631	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
632
633	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
634	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
635		ddi_soft_state_free(zfsdev_state, minor);
636		dmu_objset_disown(os, FTAG);
637		mutex_exit(&zfsdev_state_lock);
638		return (SET_ERROR(EAGAIN));
639	}
640
641	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
642
643	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
644	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
645		ddi_remove_minor_node(zfs_dip, chrbuf);
646		ddi_soft_state_free(zfsdev_state, minor);
647		dmu_objset_disown(os, FTAG);
648		mutex_exit(&zfsdev_state_lock);
649		return (SET_ERROR(EAGAIN));
650	}
651
652	zs = ddi_get_soft_state(zfsdev_state, minor);
653	zs->zss_type = ZSST_ZVOL;
654	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
655#else	/* !illumos */
656
657	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
658	zv->zv_state = 0;
659	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
660	if (error) {
661		kmem_free(zv, sizeof(*zv));
662		dmu_objset_disown(os, zvol_tag);
663		mutex_exit(&zfsdev_state_lock);
664		return (error);
665	}
666	error = dsl_prop_get_integer(name,
667	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
668	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
669		mode = volmode;
670
671	DROP_GIANT();
672	zv->zv_volsize = volsize;
673	zv->zv_volmode = mode;
674	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
675		g_topology_lock();
676		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
677		gp->start = zvol_geom_start;
678		gp->access = zvol_geom_access;
679		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
680		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
681		pp->sectorsize = DEV_BSIZE;
682		pp->mediasize = zv->zv_volsize;
683		pp->private = zv;
684
685		zv->zv_provider = pp;
686		bioq_init(&zv->zv_queue);
687		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
688	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
689		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
690		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
691		    0640, "%s/%s", ZVOL_DRIVER, name) != 0) {
692			kmem_free(zv, sizeof(*zv));
693			dmu_objset_disown(os, FTAG);
694			mutex_exit(&zfsdev_state_lock);
695			return (SET_ERROR(ENXIO));
696		}
697		zv->zv_dev = dev;
698		dev->si_iosize_max = MAXPHYS;
699		dev->si_drv2 = zv;
700	}
701	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
702#endif	/* illumos */
703
704	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
705	zv->zv_min_bs = DEV_BSHIFT;
706#ifdef illumos
707	zv->zv_minor = minor;
708#endif
709	zv->zv_objset = os;
710	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
711		zv->zv_flags |= ZVOL_RDONLY;
712	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
713	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
714	    sizeof (rl_t), offsetof(rl_t, r_node));
715	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
716	    offsetof(zvol_extent_t, ze_node));
717	/* get and cache the blocksize */
718	error = dmu_object_info(os, ZVOL_OBJ, &doi);
719	ASSERT(error == 0);
720	zv->zv_volblocksize = doi.doi_data_block_size;
721
722	if (spa_writeable(dmu_objset_spa(os))) {
723		if (zil_replay_disable)
724			zil_destroy(dmu_objset_zil(os), B_FALSE);
725		else
726			zil_replay(os, zv, zvol_replay_vector);
727	}
728	dmu_objset_disown(os, FTAG);
729	zv->zv_objset = NULL;
730
731	zvol_minors++;
732
733	mutex_exit(&zfsdev_state_lock);
734#ifndef illumos
735	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
736		zvol_geom_run(zv);
737		g_topology_unlock();
738	}
739	PICKUP_GIANT();
740
741	ZFS_LOG(1, "ZVOL %s created.", name);
742#endif
743
744	return (0);
745}
746
747/*
748 * Remove minor node for the specified volume.
749 */
750static int
751zvol_remove_zv(zvol_state_t *zv)
752{
753#ifdef illumos
754	char nmbuf[20];
755	minor_t minor = zv->zv_minor;
756#endif
757
758	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
759	if (zv->zv_total_opens != 0)
760		return (SET_ERROR(EBUSY));
761
762#ifdef illumos
763	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
764	ddi_remove_minor_node(zfs_dip, nmbuf);
765
766	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
767	ddi_remove_minor_node(zfs_dip, nmbuf);
768#else
769	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
770
771	LIST_REMOVE(zv, zv_links);
772	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
773		g_topology_lock();
774		zvol_geom_destroy(zv);
775		g_topology_unlock();
776	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV)
777		destroy_dev(zv->zv_dev);
778#endif
779
780	avl_destroy(&zv->zv_znode.z_range_avl);
781	mutex_destroy(&zv->zv_znode.z_range_lock);
782
783	kmem_free(zv, sizeof (zvol_state_t));
784#ifdef illumos
785	ddi_soft_state_free(zfsdev_state, minor);
786#endif
787	zvol_minors--;
788	return (0);
789}
790
791int
792zvol_remove_minor(const char *name)
793{
794	zvol_state_t *zv;
795	int rc;
796
797	mutex_enter(&zfsdev_state_lock);
798	if ((zv = zvol_minor_lookup(name)) == NULL) {
799		mutex_exit(&zfsdev_state_lock);
800		return (SET_ERROR(ENXIO));
801	}
802	rc = zvol_remove_zv(zv);
803	mutex_exit(&zfsdev_state_lock);
804	return (rc);
805}
806
807int
808zvol_first_open(zvol_state_t *zv)
809{
810	objset_t *os;
811	uint64_t volsize;
812	int error;
813	uint64_t readonly;
814
815	/* lie and say we're read-only */
816	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
817	    zvol_tag, &os);
818	if (error)
819		return (error);
820
821	zv->zv_objset = os;
822	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
823	if (error) {
824		ASSERT(error == 0);
825		dmu_objset_disown(os, zvol_tag);
826		return (error);
827	}
828
829	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
830	if (error) {
831		dmu_objset_disown(os, zvol_tag);
832		return (error);
833	}
834
835	zvol_size_changed(zv, volsize);
836	zv->zv_zilog = zil_open(os, zvol_get_data);
837
838	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
839	    NULL) == 0);
840	if (readonly || dmu_objset_is_snapshot(os) ||
841	    !spa_writeable(dmu_objset_spa(os)))
842		zv->zv_flags |= ZVOL_RDONLY;
843	else
844		zv->zv_flags &= ~ZVOL_RDONLY;
845	return (error);
846}
847
848void
849zvol_last_close(zvol_state_t *zv)
850{
851	zil_close(zv->zv_zilog);
852	zv->zv_zilog = NULL;
853
854	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
855	zv->zv_dbuf = NULL;
856
857	/*
858	 * Evict cached data
859	 */
860	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
861	    !(zv->zv_flags & ZVOL_RDONLY))
862		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
863	dmu_objset_evict_dbufs(zv->zv_objset);
864
865	dmu_objset_disown(zv->zv_objset, zvol_tag);
866	zv->zv_objset = NULL;
867}
868
869#ifdef illumos
870int
871zvol_prealloc(zvol_state_t *zv)
872{
873	objset_t *os = zv->zv_objset;
874	dmu_tx_t *tx;
875	uint64_t refd, avail, usedobjs, availobjs;
876	uint64_t resid = zv->zv_volsize;
877	uint64_t off = 0;
878
879	/* Check the space usage before attempting to allocate the space */
880	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
881	if (avail < zv->zv_volsize)
882		return (SET_ERROR(ENOSPC));
883
884	/* Free old extents if they exist */
885	zvol_free_extents(zv);
886
887	while (resid != 0) {
888		int error;
889		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
890
891		tx = dmu_tx_create(os);
892		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
893		error = dmu_tx_assign(tx, TXG_WAIT);
894		if (error) {
895			dmu_tx_abort(tx);
896			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
897			return (error);
898		}
899		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
900		dmu_tx_commit(tx);
901		off += bytes;
902		resid -= bytes;
903	}
904	txg_wait_synced(dmu_objset_pool(os), 0);
905
906	return (0);
907}
908#endif	/* illumos */
909
910static int
911zvol_update_volsize(objset_t *os, uint64_t volsize)
912{
913	dmu_tx_t *tx;
914	int error;
915
916	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
917
918	tx = dmu_tx_create(os);
919	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
920	dmu_tx_mark_netfree(tx);
921	error = dmu_tx_assign(tx, TXG_WAIT);
922	if (error) {
923		dmu_tx_abort(tx);
924		return (error);
925	}
926
927	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
928	    &volsize, tx);
929	dmu_tx_commit(tx);
930
931	if (error == 0)
932		error = dmu_free_long_range(os,
933		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
934	return (error);
935}
936
937void
938zvol_remove_minors(const char *name)
939{
940#ifdef illumos
941	zvol_state_t *zv;
942	char *namebuf;
943	minor_t minor;
944
945	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
946	(void) strncpy(namebuf, name, strlen(name));
947	(void) strcat(namebuf, "/");
948	mutex_enter(&zfsdev_state_lock);
949	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
950
951		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
952		if (zv == NULL)
953			continue;
954		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
955			(void) zvol_remove_zv(zv);
956	}
957	kmem_free(namebuf, strlen(name) + 2);
958
959	mutex_exit(&zfsdev_state_lock);
960#else	/* !illumos */
961	zvol_state_t *zv, *tzv;
962	size_t namelen;
963
964	namelen = strlen(name);
965
966	DROP_GIANT();
967	mutex_enter(&zfsdev_state_lock);
968
969	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
970		if (strcmp(zv->zv_name, name) == 0 ||
971		    (strncmp(zv->zv_name, name, namelen) == 0 &&
972		    strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' ||
973		    zv->zv_name[namelen] == '@'))) {
974			(void) zvol_remove_zv(zv);
975		}
976	}
977
978	mutex_exit(&zfsdev_state_lock);
979	PICKUP_GIANT();
980#endif	/* illumos */
981}
982
983static int
984zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
985{
986	uint64_t old_volsize = 0ULL;
987	int error = 0;
988
989	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
990
991	/*
992	 * Reinitialize the dump area to the new size. If we
993	 * failed to resize the dump area then restore it back to
994	 * its original size.  We must set the new volsize prior
995	 * to calling dumpvp_resize() to ensure that the devices'
996	 * size(9P) is not visible by the dump subsystem.
997	 */
998	old_volsize = zv->zv_volsize;
999	zvol_size_changed(zv, volsize);
1000
1001#ifdef ZVOL_DUMP
1002	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1003		if ((error = zvol_dumpify(zv)) != 0 ||
1004		    (error = dumpvp_resize()) != 0) {
1005			int dumpify_error;
1006
1007			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
1008			zvol_size_changed(zv, old_volsize);
1009			dumpify_error = zvol_dumpify(zv);
1010			error = dumpify_error ? dumpify_error : error;
1011		}
1012	}
1013#endif	/* ZVOL_DUMP */
1014
1015#ifdef illumos
1016	/*
1017	 * Generate a LUN expansion event.
1018	 */
1019	if (error == 0) {
1020		sysevent_id_t eid;
1021		nvlist_t *attr;
1022		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1023
1024		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
1025		    zv->zv_minor);
1026
1027		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1028		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
1029
1030		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
1031		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
1032
1033		nvlist_free(attr);
1034		kmem_free(physpath, MAXPATHLEN);
1035	}
1036#endif	/* illumos */
1037	return (error);
1038}
1039
1040int
1041zvol_set_volsize(const char *name, uint64_t volsize)
1042{
1043	zvol_state_t *zv = NULL;
1044	objset_t *os;
1045	int error;
1046	dmu_object_info_t doi;
1047	uint64_t readonly;
1048	boolean_t owned = B_FALSE;
1049
1050	error = dsl_prop_get_integer(name,
1051	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
1052	if (error != 0)
1053		return (error);
1054	if (readonly)
1055		return (SET_ERROR(EROFS));
1056
1057	mutex_enter(&zfsdev_state_lock);
1058	zv = zvol_minor_lookup(name);
1059
1060	if (zv == NULL || zv->zv_objset == NULL) {
1061		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
1062		    FTAG, &os)) != 0) {
1063			mutex_exit(&zfsdev_state_lock);
1064			return (error);
1065		}
1066		owned = B_TRUE;
1067		if (zv != NULL)
1068			zv->zv_objset = os;
1069	} else {
1070		os = zv->zv_objset;
1071	}
1072
1073	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
1074	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
1075		goto out;
1076
1077	error = zvol_update_volsize(os, volsize);
1078
1079	if (error == 0 && zv != NULL)
1080		error = zvol_update_live_volsize(zv, volsize);
1081out:
1082	if (owned) {
1083		dmu_objset_disown(os, FTAG);
1084		if (zv != NULL)
1085			zv->zv_objset = NULL;
1086	}
1087	mutex_exit(&zfsdev_state_lock);
1088	return (error);
1089}
1090
1091/*ARGSUSED*/
1092#ifdef illumos
1093int
1094zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
1095#else
1096static int
1097zvol_open(struct g_provider *pp, int flag, int count)
1098#endif
1099{
1100	zvol_state_t *zv;
1101	int err = 0;
1102#ifdef illumos
1103
1104	mutex_enter(&zfsdev_state_lock);
1105
1106	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
1107	if (zv == NULL) {
1108		mutex_exit(&zfsdev_state_lock);
1109		return (SET_ERROR(ENXIO));
1110	}
1111
1112	if (zv->zv_total_opens == 0)
1113		err = zvol_first_open(zv);
1114	if (err) {
1115		mutex_exit(&zfsdev_state_lock);
1116		return (err);
1117	}
1118#else	/* !illumos */
1119	boolean_t locked = B_FALSE;
1120
1121	/*
1122	 * Protect against recursively entering spa_namespace_lock
1123	 * when spa_open() is used for a pool on a (local) ZVOL(s).
1124	 * This is needed since we replaced upstream zfsdev_state_lock
1125	 * with spa_namespace_lock in the ZVOL code.
1126	 * We are using the same trick as spa_open().
1127	 * Note that calls in zvol_first_open which need to resolve
1128	 * pool name to a spa object will enter spa_open()
1129	 * recursively, but that function already has all the
1130	 * necessary protection.
1131	 */
1132	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1133		mutex_enter(&zfsdev_state_lock);
1134		locked = B_TRUE;
1135	}
1136
1137	zv = pp->private;
1138	if (zv == NULL) {
1139		if (locked)
1140			mutex_exit(&zfsdev_state_lock);
1141		return (SET_ERROR(ENXIO));
1142	}
1143
1144	if (zv->zv_total_opens == 0) {
1145		err = zvol_first_open(zv);
1146		if (err) {
1147			if (locked)
1148				mutex_exit(&zfsdev_state_lock);
1149			return (err);
1150		}
1151		pp->mediasize = zv->zv_volsize;
1152		pp->stripeoffset = 0;
1153		pp->stripesize = zv->zv_volblocksize;
1154	}
1155#endif	/* illumos */
1156	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1157		err = SET_ERROR(EROFS);
1158		goto out;
1159	}
1160	if (zv->zv_flags & ZVOL_EXCL) {
1161		err = SET_ERROR(EBUSY);
1162		goto out;
1163	}
1164#ifdef FEXCL
1165	if (flag & FEXCL) {
1166		if (zv->zv_total_opens != 0) {
1167			err = SET_ERROR(EBUSY);
1168			goto out;
1169		}
1170		zv->zv_flags |= ZVOL_EXCL;
1171	}
1172#endif
1173
1174#ifdef illumos
1175	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
1176		zv->zv_open_count[otyp]++;
1177		zv->zv_total_opens++;
1178	}
1179	mutex_exit(&zfsdev_state_lock);
1180#else
1181	zv->zv_total_opens += count;
1182	if (locked)
1183		mutex_exit(&zfsdev_state_lock);
1184#endif
1185
1186	return (err);
1187out:
1188	if (zv->zv_total_opens == 0)
1189		zvol_last_close(zv);
1190#ifdef illumos
1191	mutex_exit(&zfsdev_state_lock);
1192#else
1193	if (locked)
1194		mutex_exit(&zfsdev_state_lock);
1195#endif
1196	return (err);
1197}
1198
1199/*ARGSUSED*/
1200#ifdef illumos
1201int
1202zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
1203{
1204	minor_t minor = getminor(dev);
1205	zvol_state_t *zv;
1206	int error = 0;
1207
1208	mutex_enter(&zfsdev_state_lock);
1209
1210	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1211	if (zv == NULL) {
1212		mutex_exit(&zfsdev_state_lock);
1213#else	/* !illumos */
1214static int
1215zvol_close(struct g_provider *pp, int flag, int count)
1216{
1217	zvol_state_t *zv;
1218	int error = 0;
1219	boolean_t locked = B_FALSE;
1220
1221	/* See comment in zvol_open(). */
1222	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1223		mutex_enter(&zfsdev_state_lock);
1224		locked = B_TRUE;
1225	}
1226
1227	zv = pp->private;
1228	if (zv == NULL) {
1229		if (locked)
1230			mutex_exit(&zfsdev_state_lock);
1231#endif	/* illumos */
1232		return (SET_ERROR(ENXIO));
1233	}
1234
1235	if (zv->zv_flags & ZVOL_EXCL) {
1236		ASSERT(zv->zv_total_opens == 1);
1237		zv->zv_flags &= ~ZVOL_EXCL;
1238	}
1239
1240	/*
1241	 * If the open count is zero, this is a spurious close.
1242	 * That indicates a bug in the kernel / DDI framework.
1243	 */
1244#ifdef illumos
1245	ASSERT(zv->zv_open_count[otyp] != 0);
1246#endif
1247	ASSERT(zv->zv_total_opens != 0);
1248
1249	/*
1250	 * You may get multiple opens, but only one close.
1251	 */
1252#ifdef illumos
1253	zv->zv_open_count[otyp]--;
1254	zv->zv_total_opens--;
1255#else
1256	zv->zv_total_opens -= count;
1257#endif
1258
1259	if (zv->zv_total_opens == 0)
1260		zvol_last_close(zv);
1261
1262#ifdef illumos
1263	mutex_exit(&zfsdev_state_lock);
1264#else
1265	if (locked)
1266		mutex_exit(&zfsdev_state_lock);
1267#endif
1268	return (error);
1269}
1270
1271static void
1272zvol_get_done(zgd_t *zgd, int error)
1273{
1274	if (zgd->zgd_db)
1275		dmu_buf_rele(zgd->zgd_db, zgd);
1276
1277	zfs_range_unlock(zgd->zgd_rl);
1278
1279	if (error == 0 && zgd->zgd_bp)
1280		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1281
1282	kmem_free(zgd, sizeof (zgd_t));
1283}
1284
1285/*
1286 * Get data to generate a TX_WRITE intent log record.
1287 */
1288static int
1289zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1290{
1291	zvol_state_t *zv = arg;
1292	objset_t *os = zv->zv_objset;
1293	uint64_t object = ZVOL_OBJ;
1294	uint64_t offset = lr->lr_offset;
1295	uint64_t size = lr->lr_length;	/* length of user data */
1296	blkptr_t *bp = &lr->lr_blkptr;
1297	dmu_buf_t *db;
1298	zgd_t *zgd;
1299	int error;
1300
1301	ASSERT(zio != NULL);
1302	ASSERT(size != 0);
1303
1304	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1305	zgd->zgd_zilog = zv->zv_zilog;
1306	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1307
1308	/*
1309	 * Write records come in two flavors: immediate and indirect.
1310	 * For small writes it's cheaper to store the data with the
1311	 * log record (immediate); for large writes it's cheaper to
1312	 * sync the data and get a pointer to it (indirect) so that
1313	 * we don't have to write the data twice.
1314	 */
1315	if (buf != NULL) {	/* immediate write */
1316		error = dmu_read(os, object, offset, size, buf,
1317		    DMU_READ_NO_PREFETCH);
1318	} else {
1319		size = zv->zv_volblocksize;
1320		offset = P2ALIGN(offset, size);
1321		error = dmu_buf_hold(os, object, offset, zgd, &db,
1322		    DMU_READ_NO_PREFETCH);
1323		if (error == 0) {
1324			blkptr_t *obp = dmu_buf_get_blkptr(db);
1325			if (obp) {
1326				ASSERT(BP_IS_HOLE(bp));
1327				*bp = *obp;
1328			}
1329
1330			zgd->zgd_db = db;
1331			zgd->zgd_bp = bp;
1332
1333			ASSERT(db->db_offset == offset);
1334			ASSERT(db->db_size == size);
1335
1336			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1337			    zvol_get_done, zgd);
1338
1339			if (error == 0)
1340				return (0);
1341		}
1342	}
1343
1344	zvol_get_done(zgd, error);
1345
1346	return (error);
1347}
1348
1349/*
1350 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1351 *
1352 * We store data in the log buffers if it's small enough.
1353 * Otherwise we will later flush the data out via dmu_sync().
1354 */
1355ssize_t zvol_immediate_write_sz = 32768;
1356
1357static void
1358zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1359    boolean_t sync)
1360{
1361	uint32_t blocksize = zv->zv_volblocksize;
1362	zilog_t *zilog = zv->zv_zilog;
1363	boolean_t slogging;
1364	ssize_t immediate_write_sz;
1365
1366	if (zil_replaying(zilog, tx))
1367		return;
1368
1369	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1370	    ? 0 : zvol_immediate_write_sz;
1371
1372	slogging = spa_has_slogs(zilog->zl_spa) &&
1373	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1374
1375	while (resid) {
1376		itx_t *itx;
1377		lr_write_t *lr;
1378		ssize_t len;
1379		itx_wr_state_t write_state;
1380
1381		/*
1382		 * Unlike zfs_log_write() we can be called with
1383		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1384		 */
1385		if (blocksize > immediate_write_sz && !slogging &&
1386		    resid >= blocksize && off % blocksize == 0) {
1387			write_state = WR_INDIRECT; /* uses dmu_sync */
1388			len = blocksize;
1389		} else if (sync) {
1390			write_state = WR_COPIED;
1391			len = MIN(ZIL_MAX_LOG_DATA, resid);
1392		} else {
1393			write_state = WR_NEED_COPY;
1394			len = MIN(ZIL_MAX_LOG_DATA, resid);
1395		}
1396
1397		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1398		    (write_state == WR_COPIED ? len : 0));
1399		lr = (lr_write_t *)&itx->itx_lr;
1400		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1401		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1402			zil_itx_destroy(itx);
1403			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1404			lr = (lr_write_t *)&itx->itx_lr;
1405			write_state = WR_NEED_COPY;
1406		}
1407
1408		itx->itx_wr_state = write_state;
1409		if (write_state == WR_NEED_COPY)
1410			itx->itx_sod += len;
1411		lr->lr_foid = ZVOL_OBJ;
1412		lr->lr_offset = off;
1413		lr->lr_length = len;
1414		lr->lr_blkoff = 0;
1415		BP_ZERO(&lr->lr_blkptr);
1416
1417		itx->itx_private = zv;
1418		itx->itx_sync = sync;
1419
1420		zil_itx_assign(zilog, itx, tx);
1421
1422		off += len;
1423		resid -= len;
1424	}
1425}
1426
1427#ifdef illumos
1428static int
1429zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1430    uint64_t size, boolean_t doread, boolean_t isdump)
1431{
1432	vdev_disk_t *dvd;
1433	int c;
1434	int numerrors = 0;
1435
1436	if (vd->vdev_ops == &vdev_mirror_ops ||
1437	    vd->vdev_ops == &vdev_replacing_ops ||
1438	    vd->vdev_ops == &vdev_spare_ops) {
1439		for (c = 0; c < vd->vdev_children; c++) {
1440			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1441			    addr, offset, origoffset, size, doread, isdump);
1442			if (err != 0) {
1443				numerrors++;
1444			} else if (doread) {
1445				break;
1446			}
1447		}
1448	}
1449
1450	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1451		return (numerrors < vd->vdev_children ? 0 : EIO);
1452
1453	if (doread && !vdev_readable(vd))
1454		return (SET_ERROR(EIO));
1455	else if (!doread && !vdev_writeable(vd))
1456		return (SET_ERROR(EIO));
1457
1458	if (vd->vdev_ops == &vdev_raidz_ops) {
1459		return (vdev_raidz_physio(vd,
1460		    addr, size, offset, origoffset, doread, isdump));
1461	}
1462
1463	offset += VDEV_LABEL_START_SIZE;
1464
1465	if (ddi_in_panic() || isdump) {
1466		ASSERT(!doread);
1467		if (doread)
1468			return (SET_ERROR(EIO));
1469		dvd = vd->vdev_tsd;
1470		ASSERT3P(dvd, !=, NULL);
1471		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1472		    lbtodb(size)));
1473	} else {
1474		dvd = vd->vdev_tsd;
1475		ASSERT3P(dvd, !=, NULL);
1476		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1477		    offset, doread ? B_READ : B_WRITE));
1478	}
1479}
1480
1481static int
1482zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1483    boolean_t doread, boolean_t isdump)
1484{
1485	vdev_t *vd;
1486	int error;
1487	zvol_extent_t *ze;
1488	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1489
1490	/* Must be sector aligned, and not stradle a block boundary. */
1491	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1492	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1493		return (SET_ERROR(EINVAL));
1494	}
1495	ASSERT(size <= zv->zv_volblocksize);
1496
1497	/* Locate the extent this belongs to */
1498	ze = list_head(&zv->zv_extents);
1499	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1500		offset -= ze->ze_nblks * zv->zv_volblocksize;
1501		ze = list_next(&zv->zv_extents, ze);
1502	}
1503
1504	if (ze == NULL)
1505		return (SET_ERROR(EINVAL));
1506
1507	if (!ddi_in_panic())
1508		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1509
1510	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1511	offset += DVA_GET_OFFSET(&ze->ze_dva);
1512	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1513	    size, doread, isdump);
1514
1515	if (!ddi_in_panic())
1516		spa_config_exit(spa, SCL_STATE, FTAG);
1517
1518	return (error);
1519}
1520
1521int
1522zvol_strategy(buf_t *bp)
1523{
1524	zfs_soft_state_t *zs = NULL;
1525#else	/* !illumos */
1526void
1527zvol_strategy(struct bio *bp)
1528{
1529#endif	/* illumos */
1530	zvol_state_t *zv;
1531	uint64_t off, volsize;
1532	size_t resid;
1533	char *addr;
1534	objset_t *os;
1535	rl_t *rl;
1536	int error = 0;
1537#ifdef illumos
1538	boolean_t doread = bp->b_flags & B_READ;
1539#else
1540	boolean_t doread = 0;
1541#endif
1542	boolean_t is_dumpified;
1543	boolean_t sync;
1544
1545#ifdef illumos
1546	if (getminor(bp->b_edev) == 0) {
1547		error = SET_ERROR(EINVAL);
1548	} else {
1549		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1550		if (zs == NULL)
1551			error = SET_ERROR(ENXIO);
1552		else if (zs->zss_type != ZSST_ZVOL)
1553			error = SET_ERROR(EINVAL);
1554	}
1555
1556	if (error) {
1557		bioerror(bp, error);
1558		biodone(bp);
1559		return (0);
1560	}
1561
1562	zv = zs->zss_data;
1563
1564	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1565		bioerror(bp, EROFS);
1566		biodone(bp);
1567		return (0);
1568	}
1569
1570	off = ldbtob(bp->b_blkno);
1571#else	/* !illumos */
1572	if (bp->bio_to)
1573		zv = bp->bio_to->private;
1574	else
1575		zv = bp->bio_dev->si_drv2;
1576
1577	if (zv == NULL) {
1578		error = SET_ERROR(ENXIO);
1579		goto out;
1580	}
1581
1582	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1583		error = SET_ERROR(EROFS);
1584		goto out;
1585	}
1586
1587	switch (bp->bio_cmd) {
1588	case BIO_FLUSH:
1589		goto sync;
1590	case BIO_READ:
1591		doread = 1;
1592	case BIO_WRITE:
1593	case BIO_DELETE:
1594		break;
1595	default:
1596		error = EOPNOTSUPP;
1597		goto out;
1598	}
1599
1600	off = bp->bio_offset;
1601#endif	/* illumos */
1602	volsize = zv->zv_volsize;
1603
1604	os = zv->zv_objset;
1605	ASSERT(os != NULL);
1606
1607#ifdef illumos
1608	bp_mapin(bp);
1609	addr = bp->b_un.b_addr;
1610	resid = bp->b_bcount;
1611
1612	if (resid > 0 && (off < 0 || off >= volsize)) {
1613		bioerror(bp, EIO);
1614		biodone(bp);
1615		return (0);
1616	}
1617
1618	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1619	sync = ((!(bp->b_flags & B_ASYNC) &&
1620	    !(zv->zv_flags & ZVOL_WCE)) ||
1621	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1622	    !doread && !is_dumpified;
1623#else	/* !illumos */
1624	addr = bp->bio_data;
1625	resid = bp->bio_length;
1626
1627	if (resid > 0 && (off < 0 || off >= volsize)) {
1628		error = SET_ERROR(EIO);
1629		goto out;
1630	}
1631
1632	is_dumpified = B_FALSE;
1633	sync = !doread && !is_dumpified &&
1634	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1635#endif	/* illumos */
1636
1637	/*
1638	 * There must be no buffer changes when doing a dmu_sync() because
1639	 * we can't change the data whilst calculating the checksum.
1640	 */
1641	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1642	    doread ? RL_READER : RL_WRITER);
1643
1644#ifndef illumos
1645	if (bp->bio_cmd == BIO_DELETE) {
1646		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1647		error = dmu_tx_assign(tx, TXG_WAIT);
1648		if (error != 0) {
1649			dmu_tx_abort(tx);
1650		} else {
1651			zvol_log_truncate(zv, tx, off, resid, B_TRUE);
1652			dmu_tx_commit(tx);
1653			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1654			    off, resid);
1655			resid = 0;
1656		}
1657		goto unlock;
1658	}
1659#endif
1660	while (resid != 0 && off < volsize) {
1661		size_t size = MIN(resid, zvol_maxphys);
1662#ifdef illumos
1663		if (is_dumpified) {
1664			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1665			error = zvol_dumpio(zv, addr, off, size,
1666			    doread, B_FALSE);
1667		} else if (doread) {
1668#else
1669		if (doread) {
1670#endif
1671			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1672			    DMU_READ_PREFETCH);
1673		} else {
1674			dmu_tx_t *tx = dmu_tx_create(os);
1675			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1676			error = dmu_tx_assign(tx, TXG_WAIT);
1677			if (error) {
1678				dmu_tx_abort(tx);
1679			} else {
1680				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1681				zvol_log_write(zv, tx, off, size, sync);
1682				dmu_tx_commit(tx);
1683			}
1684		}
1685		if (error) {
1686			/* convert checksum errors into IO errors */
1687			if (error == ECKSUM)
1688				error = SET_ERROR(EIO);
1689			break;
1690		}
1691		off += size;
1692		addr += size;
1693		resid -= size;
1694	}
1695#ifndef illumos
1696unlock:
1697#endif
1698	zfs_range_unlock(rl);
1699
1700#ifdef illumos
1701	if ((bp->b_resid = resid) == bp->b_bcount)
1702		bioerror(bp, off > volsize ? EINVAL : error);
1703
1704	if (sync)
1705		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1706	biodone(bp);
1707
1708	return (0);
1709#else	/* !illumos */
1710	bp->bio_completed = bp->bio_length - resid;
1711	if (bp->bio_completed < bp->bio_length && off > volsize)
1712		error = EINVAL;
1713
1714	if (sync) {
1715sync:
1716		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1717	}
1718out:
1719	if (bp->bio_to)
1720		g_io_deliver(bp, error);
1721	else
1722		biofinish(bp, NULL, error);
1723#endif	/* illumos */
1724}
1725
1726#ifdef illumos
1727/*
1728 * Set the buffer count to the zvol maximum transfer.
1729 * Using our own routine instead of the default minphys()
1730 * means that for larger writes we write bigger buffers on X86
1731 * (128K instead of 56K) and flush the disk write cache less often
1732 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1733 * 56K on X86 and 128K on sparc).
1734 */
1735void
1736zvol_minphys(struct buf *bp)
1737{
1738	if (bp->b_bcount > zvol_maxphys)
1739		bp->b_bcount = zvol_maxphys;
1740}
1741
1742int
1743zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1744{
1745	minor_t minor = getminor(dev);
1746	zvol_state_t *zv;
1747	int error = 0;
1748	uint64_t size;
1749	uint64_t boff;
1750	uint64_t resid;
1751
1752	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1753	if (zv == NULL)
1754		return (SET_ERROR(ENXIO));
1755
1756	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1757		return (SET_ERROR(EINVAL));
1758
1759	boff = ldbtob(blkno);
1760	resid = ldbtob(nblocks);
1761
1762	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1763
1764	while (resid) {
1765		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1766		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1767		if (error)
1768			break;
1769		boff += size;
1770		addr += size;
1771		resid -= size;
1772	}
1773
1774	return (error);
1775}
1776
1777/*ARGSUSED*/
1778int
1779zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1780{
1781	minor_t minor = getminor(dev);
1782#else	/* !illumos */
1783int
1784zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1785{
1786#endif	/* illumos */
1787	zvol_state_t *zv;
1788	uint64_t volsize;
1789	rl_t *rl;
1790	int error = 0;
1791
1792#ifdef illumos
1793	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1794	if (zv == NULL)
1795		return (SET_ERROR(ENXIO));
1796#else
1797	zv = dev->si_drv2;
1798#endif
1799
1800	volsize = zv->zv_volsize;
1801	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1802	if (uio->uio_resid > 0 &&
1803	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1804		return (SET_ERROR(EIO));
1805
1806#ifdef illumos
1807	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1808		error = physio(zvol_strategy, NULL, dev, B_READ,
1809		    zvol_minphys, uio);
1810		return (error);
1811	}
1812#endif
1813
1814	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1815	    RL_READER);
1816	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1817		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1818
1819		/* don't read past the end */
1820		if (bytes > volsize - uio->uio_loffset)
1821			bytes = volsize - uio->uio_loffset;
1822
1823		error =  dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes);
1824		if (error) {
1825			/* convert checksum errors into IO errors */
1826			if (error == ECKSUM)
1827				error = SET_ERROR(EIO);
1828			break;
1829		}
1830	}
1831	zfs_range_unlock(rl);
1832	return (error);
1833}
1834
1835#ifdef illumos
1836/*ARGSUSED*/
1837int
1838zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1839{
1840	minor_t minor = getminor(dev);
1841#else	/* !illumos */
1842int
1843zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1844{
1845#endif	/* illumos */
1846	zvol_state_t *zv;
1847	uint64_t volsize;
1848	rl_t *rl;
1849	int error = 0;
1850	boolean_t sync;
1851
1852#ifdef illumos
1853	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1854	if (zv == NULL)
1855		return (SET_ERROR(ENXIO));
1856#else
1857	zv = dev->si_drv2;
1858#endif
1859
1860	volsize = zv->zv_volsize;
1861	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1862	if (uio->uio_resid > 0 &&
1863	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1864		return (SET_ERROR(EIO));
1865
1866#ifdef illumos
1867	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1868		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1869		    zvol_minphys, uio);
1870		return (error);
1871	}
1872
1873	sync = !(zv->zv_flags & ZVOL_WCE) ||
1874#else
1875	sync = (ioflag & IO_SYNC) ||
1876#endif
1877	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1878
1879	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1880	    RL_WRITER);
1881	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1882		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1883		uint64_t off = uio->uio_loffset;
1884		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1885
1886		if (bytes > volsize - off)	/* don't write past the end */
1887			bytes = volsize - off;
1888
1889		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1890		error = dmu_tx_assign(tx, TXG_WAIT);
1891		if (error) {
1892			dmu_tx_abort(tx);
1893			break;
1894		}
1895		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1896		if (error == 0)
1897			zvol_log_write(zv, tx, off, bytes, sync);
1898		dmu_tx_commit(tx);
1899
1900		if (error)
1901			break;
1902	}
1903	zfs_range_unlock(rl);
1904	if (sync)
1905		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1906	return (error);
1907}
1908
1909#ifdef illumos
1910int
1911zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1912{
1913	struct uuid uuid = EFI_RESERVED;
1914	efi_gpe_t gpe = { 0 };
1915	uint32_t crc;
1916	dk_efi_t efi;
1917	int length;
1918	char *ptr;
1919
1920	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1921		return (SET_ERROR(EFAULT));
1922	ptr = (char *)(uintptr_t)efi.dki_data_64;
1923	length = efi.dki_length;
1924	/*
1925	 * Some clients may attempt to request a PMBR for the
1926	 * zvol.  Currently this interface will return EINVAL to
1927	 * such requests.  These requests could be supported by
1928	 * adding a check for lba == 0 and consing up an appropriate
1929	 * PMBR.
1930	 */
1931	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1932		return (SET_ERROR(EINVAL));
1933
1934	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1935	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1936	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1937
1938	if (efi.dki_lba == 1) {
1939		efi_gpt_t gpt = { 0 };
1940
1941		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1942		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1943		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1944		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1945		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1946		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1947		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1948		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1949		gpt.efi_gpt_SizeOfPartitionEntry =
1950		    LE_32(sizeof (efi_gpe_t));
1951		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1952		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1953		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1954		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1955		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1956		    flag))
1957			return (SET_ERROR(EFAULT));
1958		ptr += sizeof (gpt);
1959		length -= sizeof (gpt);
1960	}
1961	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1962	    length), flag))
1963		return (SET_ERROR(EFAULT));
1964	return (0);
1965}
1966
1967/*
1968 * BEGIN entry points to allow external callers access to the volume.
1969 */
1970/*
1971 * Return the volume parameters needed for access from an external caller.
1972 * These values are invariant as long as the volume is held open.
1973 */
1974int
1975zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1976    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1977    void **rl_hdl, void **bonus_hdl)
1978{
1979	zvol_state_t *zv;
1980
1981	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1982	if (zv == NULL)
1983		return (SET_ERROR(ENXIO));
1984	if (zv->zv_flags & ZVOL_DUMPIFIED)
1985		return (SET_ERROR(ENXIO));
1986
1987	ASSERT(blksize && max_xfer_len && minor_hdl &&
1988	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1989
1990	*blksize = zv->zv_volblocksize;
1991	*max_xfer_len = (uint64_t)zvol_maxphys;
1992	*minor_hdl = zv;
1993	*objset_hdl = zv->zv_objset;
1994	*zil_hdl = zv->zv_zilog;
1995	*rl_hdl = &zv->zv_znode;
1996	*bonus_hdl = zv->zv_dbuf;
1997	return (0);
1998}
1999
2000/*
2001 * Return the current volume size to an external caller.
2002 * The size can change while the volume is open.
2003 */
2004uint64_t
2005zvol_get_volume_size(void *minor_hdl)
2006{
2007	zvol_state_t *zv = minor_hdl;
2008
2009	return (zv->zv_volsize);
2010}
2011
2012/*
2013 * Return the current WCE setting to an external caller.
2014 * The WCE setting can change while the volume is open.
2015 */
2016int
2017zvol_get_volume_wce(void *minor_hdl)
2018{
2019	zvol_state_t *zv = minor_hdl;
2020
2021	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
2022}
2023
2024/*
2025 * Entry point for external callers to zvol_log_write
2026 */
2027void
2028zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
2029    boolean_t sync)
2030{
2031	zvol_state_t *zv = minor_hdl;
2032
2033	zvol_log_write(zv, tx, off, resid, sync);
2034}
2035/*
2036 * END entry points to allow external callers access to the volume.
2037 */
2038#endif	/* illumos */
2039
2040/*
2041 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
2042 */
2043static void
2044zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
2045    boolean_t sync)
2046{
2047	itx_t *itx;
2048	lr_truncate_t *lr;
2049	zilog_t *zilog = zv->zv_zilog;
2050
2051	if (zil_replaying(zilog, tx))
2052		return;
2053
2054	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
2055	lr = (lr_truncate_t *)&itx->itx_lr;
2056	lr->lr_foid = ZVOL_OBJ;
2057	lr->lr_offset = off;
2058	lr->lr_length = len;
2059
2060	itx->itx_sync = sync;
2061	zil_itx_assign(zilog, itx, tx);
2062}
2063
2064#ifdef illumos
2065/*
2066 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
2067 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
2068 */
2069/*ARGSUSED*/
2070int
2071zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
2072{
2073	zvol_state_t *zv;
2074	struct dk_callback *dkc;
2075	int error = 0;
2076	rl_t *rl;
2077
2078	mutex_enter(&zfsdev_state_lock);
2079
2080	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
2081
2082	if (zv == NULL) {
2083		mutex_exit(&zfsdev_state_lock);
2084		return (SET_ERROR(ENXIO));
2085	}
2086	ASSERT(zv->zv_total_opens > 0);
2087
2088	switch (cmd) {
2089
2090	case DKIOCINFO:
2091	{
2092		struct dk_cinfo dki;
2093
2094		bzero(&dki, sizeof (dki));
2095		(void) strcpy(dki.dki_cname, "zvol");
2096		(void) strcpy(dki.dki_dname, "zvol");
2097		dki.dki_ctype = DKC_UNKNOWN;
2098		dki.dki_unit = getminor(dev);
2099		dki.dki_maxtransfer =
2100		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
2101		mutex_exit(&zfsdev_state_lock);
2102		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
2103			error = SET_ERROR(EFAULT);
2104		return (error);
2105	}
2106
2107	case DKIOCGMEDIAINFO:
2108	{
2109		struct dk_minfo dkm;
2110
2111		bzero(&dkm, sizeof (dkm));
2112		dkm.dki_lbsize = 1U << zv->zv_min_bs;
2113		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2114		dkm.dki_media_type = DK_UNKNOWN;
2115		mutex_exit(&zfsdev_state_lock);
2116		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
2117			error = SET_ERROR(EFAULT);
2118		return (error);
2119	}
2120
2121	case DKIOCGMEDIAINFOEXT:
2122	{
2123		struct dk_minfo_ext dkmext;
2124
2125		bzero(&dkmext, sizeof (dkmext));
2126		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
2127		dkmext.dki_pbsize = zv->zv_volblocksize;
2128		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2129		dkmext.dki_media_type = DK_UNKNOWN;
2130		mutex_exit(&zfsdev_state_lock);
2131		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
2132			error = SET_ERROR(EFAULT);
2133		return (error);
2134	}
2135
2136	case DKIOCGETEFI:
2137	{
2138		uint64_t vs = zv->zv_volsize;
2139		uint8_t bs = zv->zv_min_bs;
2140
2141		mutex_exit(&zfsdev_state_lock);
2142		error = zvol_getefi((void *)arg, flag, vs, bs);
2143		return (error);
2144	}
2145
2146	case DKIOCFLUSHWRITECACHE:
2147		dkc = (struct dk_callback *)arg;
2148		mutex_exit(&zfsdev_state_lock);
2149		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2150		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
2151			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
2152			error = 0;
2153		}
2154		return (error);
2155
2156	case DKIOCGETWCE:
2157	{
2158		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
2159		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
2160		    flag))
2161			error = SET_ERROR(EFAULT);
2162		break;
2163	}
2164	case DKIOCSETWCE:
2165	{
2166		int wce;
2167		if (ddi_copyin((void *)arg, &wce, sizeof (int),
2168		    flag)) {
2169			error = SET_ERROR(EFAULT);
2170			break;
2171		}
2172		if (wce) {
2173			zv->zv_flags |= ZVOL_WCE;
2174			mutex_exit(&zfsdev_state_lock);
2175		} else {
2176			zv->zv_flags &= ~ZVOL_WCE;
2177			mutex_exit(&zfsdev_state_lock);
2178			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2179		}
2180		return (0);
2181	}
2182
2183	case DKIOCGGEOM:
2184	case DKIOCGVTOC:
2185		/*
2186		 * commands using these (like prtvtoc) expect ENOTSUP
2187		 * since we're emulating an EFI label
2188		 */
2189		error = SET_ERROR(ENOTSUP);
2190		break;
2191
2192	case DKIOCDUMPINIT:
2193		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2194		    RL_WRITER);
2195		error = zvol_dumpify(zv);
2196		zfs_range_unlock(rl);
2197		break;
2198
2199	case DKIOCDUMPFINI:
2200		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
2201			break;
2202		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2203		    RL_WRITER);
2204		error = zvol_dump_fini(zv);
2205		zfs_range_unlock(rl);
2206		break;
2207
2208	case DKIOCFREE:
2209	{
2210		dkioc_free_t df;
2211		dmu_tx_t *tx;
2212
2213		if (!zvol_unmap_enabled)
2214			break;
2215
2216		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
2217			error = SET_ERROR(EFAULT);
2218			break;
2219		}
2220
2221		/*
2222		 * Apply Postel's Law to length-checking.  If they overshoot,
2223		 * just blank out until the end, if there's a need to blank
2224		 * out anything.
2225		 */
2226		if (df.df_start >= zv->zv_volsize)
2227			break;	/* No need to do anything... */
2228
2229		mutex_exit(&zfsdev_state_lock);
2230
2231		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
2232		    RL_WRITER);
2233		tx = dmu_tx_create(zv->zv_objset);
2234		dmu_tx_mark_netfree(tx);
2235		error = dmu_tx_assign(tx, TXG_WAIT);
2236		if (error != 0) {
2237			dmu_tx_abort(tx);
2238		} else {
2239			zvol_log_truncate(zv, tx, df.df_start,
2240			    df.df_length, B_TRUE);
2241			dmu_tx_commit(tx);
2242			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2243			    df.df_start, df.df_length);
2244		}
2245
2246		zfs_range_unlock(rl);
2247
2248		if (error == 0) {
2249			/*
2250			 * If the write-cache is disabled or 'sync' property
2251			 * is set to 'always' then treat this as a synchronous
2252			 * operation (i.e. commit to zil).
2253			 */
2254			if (!(zv->zv_flags & ZVOL_WCE) ||
2255			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2256				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2257
2258			/*
2259			 * If the caller really wants synchronous writes, and
2260			 * can't wait for them, don't return until the write
2261			 * is done.
2262			 */
2263			if (df.df_flags & DF_WAIT_SYNC) {
2264				txg_wait_synced(
2265				    dmu_objset_pool(zv->zv_objset), 0);
2266			}
2267		}
2268		return (error);
2269	}
2270
2271	default:
2272		error = SET_ERROR(ENOTTY);
2273		break;
2274
2275	}
2276	mutex_exit(&zfsdev_state_lock);
2277	return (error);
2278}
2279#endif	/* illumos */
2280
2281int
2282zvol_busy(void)
2283{
2284	return (zvol_minors != 0);
2285}
2286
2287void
2288zvol_init(void)
2289{
2290	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2291	    1) == 0);
2292#ifdef illumos
2293	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
2294#else
2295	ZFS_LOG(1, "ZVOL Initialized.");
2296#endif
2297}
2298
2299void
2300zvol_fini(void)
2301{
2302#ifdef illumos
2303	mutex_destroy(&zfsdev_state_lock);
2304#endif
2305	ddi_soft_state_fini(&zfsdev_state);
2306	ZFS_LOG(1, "ZVOL Deinitialized.");
2307}
2308
2309#ifdef illumos
2310/*ARGSUSED*/
2311static int
2312zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2313{
2314	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2315
2316	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2317		return (1);
2318	return (0);
2319}
2320
2321/*ARGSUSED*/
2322static void
2323zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2324{
2325	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2326
2327	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2328}
2329
2330static int
2331zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2332{
2333	dmu_tx_t *tx;
2334	int error;
2335	objset_t *os = zv->zv_objset;
2336	spa_t *spa = dmu_objset_spa(os);
2337	vdev_t *vd = spa->spa_root_vdev;
2338	nvlist_t *nv = NULL;
2339	uint64_t version = spa_version(spa);
2340	uint64_t checksum, compress, refresrv, vbs, dedup;
2341
2342	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2343	ASSERT(vd->vdev_ops == &vdev_root_ops);
2344
2345	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2346	    DMU_OBJECT_END);
2347	if (error != 0)
2348		return (error);
2349	/* wait for dmu_free_long_range to actually free the blocks */
2350	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2351
2352	/*
2353	 * If the pool on which the dump device is being initialized has more
2354	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2355	 * enabled.  If so, bump that feature's counter to indicate that the
2356	 * feature is active. We also check the vdev type to handle the
2357	 * following case:
2358	 *   # zpool create test raidz disk1 disk2 disk3
2359	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2360	 *   the raidz vdev itself has 3 children.
2361	 */
2362	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2363		if (!spa_feature_is_enabled(spa,
2364		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2365			return (SET_ERROR(ENOTSUP));
2366		(void) dsl_sync_task(spa_name(spa),
2367		    zfs_mvdev_dump_feature_check,
2368		    zfs_mvdev_dump_activate_feature_sync, NULL,
2369		    2, ZFS_SPACE_CHECK_RESERVED);
2370	}
2371
2372	if (!resize) {
2373		error = dsl_prop_get_integer(zv->zv_name,
2374		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2375		if (error == 0) {
2376			error = dsl_prop_get_integer(zv->zv_name,
2377			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
2378			    NULL);
2379		}
2380		if (error == 0) {
2381			error = dsl_prop_get_integer(zv->zv_name,
2382			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
2383			    &refresrv, NULL);
2384		}
2385		if (error == 0) {
2386			error = dsl_prop_get_integer(zv->zv_name,
2387			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
2388			    NULL);
2389		}
2390		if (version >= SPA_VERSION_DEDUP && error == 0) {
2391			error = dsl_prop_get_integer(zv->zv_name,
2392			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2393		}
2394	}
2395	if (error != 0)
2396		return (error);
2397
2398	tx = dmu_tx_create(os);
2399	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2400	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2401	error = dmu_tx_assign(tx, TXG_WAIT);
2402	if (error != 0) {
2403		dmu_tx_abort(tx);
2404		return (error);
2405	}
2406
2407	/*
2408	 * If we are resizing the dump device then we only need to
2409	 * update the refreservation to match the newly updated
2410	 * zvolsize. Otherwise, we save off the original state of the
2411	 * zvol so that we can restore them if the zvol is ever undumpified.
2412	 */
2413	if (resize) {
2414		error = zap_update(os, ZVOL_ZAP_OBJ,
2415		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2416		    &zv->zv_volsize, tx);
2417	} else {
2418		error = zap_update(os, ZVOL_ZAP_OBJ,
2419		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2420		    &compress, tx);
2421		if (error == 0) {
2422			error = zap_update(os, ZVOL_ZAP_OBJ,
2423			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2424			    &checksum, tx);
2425		}
2426		if (error == 0) {
2427			error = zap_update(os, ZVOL_ZAP_OBJ,
2428			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2429			    &refresrv, tx);
2430		}
2431		if (error == 0) {
2432			error = zap_update(os, ZVOL_ZAP_OBJ,
2433			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2434			    &vbs, tx);
2435		}
2436		if (error == 0) {
2437			error = dmu_object_set_blocksize(
2438			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2439		}
2440		if (version >= SPA_VERSION_DEDUP && error == 0) {
2441			error = zap_update(os, ZVOL_ZAP_OBJ,
2442			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2443			    &dedup, tx);
2444		}
2445		if (error == 0)
2446			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2447	}
2448	dmu_tx_commit(tx);
2449
2450	/*
2451	 * We only need update the zvol's property if we are initializing
2452	 * the dump area for the first time.
2453	 */
2454	if (error == 0 && !resize) {
2455		/*
2456		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2457		 * function.  Otherwise, use the old default -- OFF.
2458		 */
2459		checksum = spa_feature_is_active(spa,
2460		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2461		    ZIO_CHECKSUM_OFF;
2462
2463		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2464		VERIFY(nvlist_add_uint64(nv,
2465		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2466		VERIFY(nvlist_add_uint64(nv,
2467		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2468		    ZIO_COMPRESS_OFF) == 0);
2469		VERIFY(nvlist_add_uint64(nv,
2470		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2471		    checksum) == 0);
2472		if (version >= SPA_VERSION_DEDUP) {
2473			VERIFY(nvlist_add_uint64(nv,
2474			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2475			    ZIO_CHECKSUM_OFF) == 0);
2476		}
2477
2478		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2479		    nv, NULL);
2480		nvlist_free(nv);
2481	}
2482
2483	/* Allocate the space for the dump */
2484	if (error == 0)
2485		error = zvol_prealloc(zv);
2486	return (error);
2487}
2488
2489static int
2490zvol_dumpify(zvol_state_t *zv)
2491{
2492	int error = 0;
2493	uint64_t dumpsize = 0;
2494	dmu_tx_t *tx;
2495	objset_t *os = zv->zv_objset;
2496
2497	if (zv->zv_flags & ZVOL_RDONLY)
2498		return (SET_ERROR(EROFS));
2499
2500	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2501	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2502		boolean_t resize = (dumpsize > 0);
2503
2504		if ((error = zvol_dump_init(zv, resize)) != 0) {
2505			(void) zvol_dump_fini(zv);
2506			return (error);
2507		}
2508	}
2509
2510	/*
2511	 * Build up our lba mapping.
2512	 */
2513	error = zvol_get_lbas(zv);
2514	if (error) {
2515		(void) zvol_dump_fini(zv);
2516		return (error);
2517	}
2518
2519	tx = dmu_tx_create(os);
2520	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2521	error = dmu_tx_assign(tx, TXG_WAIT);
2522	if (error) {
2523		dmu_tx_abort(tx);
2524		(void) zvol_dump_fini(zv);
2525		return (error);
2526	}
2527
2528	zv->zv_flags |= ZVOL_DUMPIFIED;
2529	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2530	    &zv->zv_volsize, tx);
2531	dmu_tx_commit(tx);
2532
2533	if (error) {
2534		(void) zvol_dump_fini(zv);
2535		return (error);
2536	}
2537
2538	txg_wait_synced(dmu_objset_pool(os), 0);
2539	return (0);
2540}
2541
2542static int
2543zvol_dump_fini(zvol_state_t *zv)
2544{
2545	dmu_tx_t *tx;
2546	objset_t *os = zv->zv_objset;
2547	nvlist_t *nv;
2548	int error = 0;
2549	uint64_t checksum, compress, refresrv, vbs, dedup;
2550	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2551
2552	/*
2553	 * Attempt to restore the zvol back to its pre-dumpified state.
2554	 * This is a best-effort attempt as it's possible that not all
2555	 * of these properties were initialized during the dumpify process
2556	 * (i.e. error during zvol_dump_init).
2557	 */
2558
2559	tx = dmu_tx_create(os);
2560	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2561	error = dmu_tx_assign(tx, TXG_WAIT);
2562	if (error) {
2563		dmu_tx_abort(tx);
2564		return (error);
2565	}
2566	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2567	dmu_tx_commit(tx);
2568
2569	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2570	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2571	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2572	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2573	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2574	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2575	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2576	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2577
2578	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2579	(void) nvlist_add_uint64(nv,
2580	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2581	(void) nvlist_add_uint64(nv,
2582	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2583	(void) nvlist_add_uint64(nv,
2584	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2585	if (version >= SPA_VERSION_DEDUP &&
2586	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2587	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2588		(void) nvlist_add_uint64(nv,
2589		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2590	}
2591	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2592	    nv, NULL);
2593	nvlist_free(nv);
2594
2595	zvol_free_extents(zv);
2596	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2597	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2598	/* wait for dmu_free_long_range to actually free the blocks */
2599	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2600	tx = dmu_tx_create(os);
2601	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2602	error = dmu_tx_assign(tx, TXG_WAIT);
2603	if (error) {
2604		dmu_tx_abort(tx);
2605		return (error);
2606	}
2607	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2608		zv->zv_volblocksize = vbs;
2609	dmu_tx_commit(tx);
2610
2611	return (0);
2612}
2613#else	/* !illumos */
2614
2615static void
2616zvol_geom_run(zvol_state_t *zv)
2617{
2618	struct g_provider *pp;
2619
2620	pp = zv->zv_provider;
2621	g_error_provider(pp, 0);
2622
2623	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2624	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2625}
2626
2627static void
2628zvol_geom_destroy(zvol_state_t *zv)
2629{
2630	struct g_provider *pp;
2631
2632	g_topology_assert();
2633
2634	mtx_lock(&zv->zv_queue_mtx);
2635	zv->zv_state = 1;
2636	wakeup_one(&zv->zv_queue);
2637	while (zv->zv_state != 2)
2638		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2639	mtx_destroy(&zv->zv_queue_mtx);
2640
2641	pp = zv->zv_provider;
2642	zv->zv_provider = NULL;
2643	pp->private = NULL;
2644	g_wither_geom(pp->geom, ENXIO);
2645}
2646
2647static int
2648zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2649{
2650	int count, error, flags;
2651
2652	g_topology_assert();
2653
2654	/*
2655	 * To make it easier we expect either open or close, but not both
2656	 * at the same time.
2657	 */
2658	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2659	    (acr <= 0 && acw <= 0 && ace <= 0),
2660	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2661	    pp->name, acr, acw, ace));
2662
2663	if (pp->private == NULL) {
2664		if (acr <= 0 && acw <= 0 && ace <= 0)
2665			return (0);
2666		return (pp->error);
2667	}
2668
2669	/*
2670	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2671	 * because GEOM already handles that and handles it a bit differently.
2672	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2673	 * only one exclusive consumer, no matter if it is reader or writer.
2674	 * I like better the way GEOM works so I'll leave it for GEOM to
2675	 * decide what to do.
2676	 */
2677
2678	count = acr + acw + ace;
2679	if (count == 0)
2680		return (0);
2681
2682	flags = 0;
2683	if (acr != 0 || ace != 0)
2684		flags |= FREAD;
2685	if (acw != 0)
2686		flags |= FWRITE;
2687
2688	g_topology_unlock();
2689	if (count > 0)
2690		error = zvol_open(pp, flags, count);
2691	else
2692		error = zvol_close(pp, flags, -count);
2693	g_topology_lock();
2694	return (error);
2695}
2696
2697static void
2698zvol_geom_start(struct bio *bp)
2699{
2700	zvol_state_t *zv;
2701	boolean_t first;
2702
2703	zv = bp->bio_to->private;
2704	ASSERT(zv != NULL);
2705	switch (bp->bio_cmd) {
2706	case BIO_FLUSH:
2707		if (!THREAD_CAN_SLEEP())
2708			goto enqueue;
2709		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2710		g_io_deliver(bp, 0);
2711		break;
2712	case BIO_READ:
2713	case BIO_WRITE:
2714	case BIO_DELETE:
2715		if (!THREAD_CAN_SLEEP())
2716			goto enqueue;
2717		zvol_strategy(bp);
2718		break;
2719	case BIO_GETATTR: {
2720		spa_t *spa = dmu_objset_spa(zv->zv_objset);
2721		uint64_t refd, avail, usedobjs, availobjs, val;
2722
2723		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2724			return;
2725		if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
2726			dmu_objset_space(zv->zv_objset, &refd, &avail,
2727			    &usedobjs, &availobjs);
2728			if (g_handleattr_off_t(bp, "blocksavail",
2729			    avail / DEV_BSIZE))
2730				return;
2731		} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
2732			dmu_objset_space(zv->zv_objset, &refd, &avail,
2733			    &usedobjs, &availobjs);
2734			if (g_handleattr_off_t(bp, "blocksused",
2735			    refd / DEV_BSIZE))
2736				return;
2737		} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
2738			avail = metaslab_class_get_space(spa_normal_class(spa));
2739			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
2740			if (g_handleattr_off_t(bp, "poolblocksavail",
2741			    avail / DEV_BSIZE))
2742				return;
2743		} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
2744			refd = metaslab_class_get_alloc(spa_normal_class(spa));
2745			if (g_handleattr_off_t(bp, "poolblocksused",
2746			    refd / DEV_BSIZE))
2747				return;
2748		}
2749		/* FALLTHROUGH */
2750	}
2751	default:
2752		g_io_deliver(bp, EOPNOTSUPP);
2753		break;
2754	}
2755	return;
2756
2757enqueue:
2758	mtx_lock(&zv->zv_queue_mtx);
2759	first = (bioq_first(&zv->zv_queue) == NULL);
2760	bioq_insert_tail(&zv->zv_queue, bp);
2761	mtx_unlock(&zv->zv_queue_mtx);
2762	if (first)
2763		wakeup_one(&zv->zv_queue);
2764}
2765
2766static void
2767zvol_geom_worker(void *arg)
2768{
2769	zvol_state_t *zv;
2770	struct bio *bp;
2771
2772	thread_lock(curthread);
2773	sched_prio(curthread, PRIBIO);
2774	thread_unlock(curthread);
2775
2776	zv = arg;
2777	for (;;) {
2778		mtx_lock(&zv->zv_queue_mtx);
2779		bp = bioq_takefirst(&zv->zv_queue);
2780		if (bp == NULL) {
2781			if (zv->zv_state == 1) {
2782				zv->zv_state = 2;
2783				wakeup(&zv->zv_state);
2784				mtx_unlock(&zv->zv_queue_mtx);
2785				kthread_exit();
2786			}
2787			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2788			    "zvol:io", 0);
2789			continue;
2790		}
2791		mtx_unlock(&zv->zv_queue_mtx);
2792		switch (bp->bio_cmd) {
2793		case BIO_FLUSH:
2794			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2795			g_io_deliver(bp, 0);
2796			break;
2797		case BIO_READ:
2798		case BIO_WRITE:
2799		case BIO_DELETE:
2800			zvol_strategy(bp);
2801			break;
2802		default:
2803			g_io_deliver(bp, EOPNOTSUPP);
2804			break;
2805		}
2806	}
2807}
2808
2809extern boolean_t dataset_name_hidden(const char *name);
2810
2811static int
2812zvol_create_snapshots(objset_t *os, const char *name)
2813{
2814	uint64_t cookie, obj;
2815	char *sname;
2816	int error, len;
2817
2818	cookie = obj = 0;
2819	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2820
2821#if 0
2822	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2823	    DS_FIND_SNAPSHOTS);
2824#endif
2825
2826	for (;;) {
2827		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2828		if (len >= MAXPATHLEN) {
2829			dmu_objset_rele(os, FTAG);
2830			error = ENAMETOOLONG;
2831			break;
2832		}
2833
2834		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2835		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2836		    sname + len, &obj, &cookie, NULL);
2837		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2838		if (error != 0) {
2839			if (error == ENOENT)
2840				error = 0;
2841			break;
2842		}
2843
2844		if ((error = zvol_create_minor(sname)) != 0) {
2845			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2846			    sname, error);
2847			break;
2848		}
2849	}
2850
2851	kmem_free(sname, MAXPATHLEN);
2852	return (error);
2853}
2854
2855int
2856zvol_create_minors(const char *name)
2857{
2858	uint64_t cookie;
2859	objset_t *os;
2860	char *osname, *p;
2861	int error, len;
2862
2863	if (dataset_name_hidden(name))
2864		return (0);
2865
2866	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2867		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2868		    name, error);
2869		return (error);
2870	}
2871	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2872		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2873		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2874		error = zvol_create_minor(name);
2875		if (error == 0 || error == EEXIST) {
2876			error = zvol_create_snapshots(os, name);
2877		} else {
2878			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2879			    name, error);
2880		}
2881		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2882		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2883		return (error);
2884	}
2885	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2886		dmu_objset_rele(os, FTAG);
2887		return (0);
2888	}
2889
2890	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2891	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2892		dmu_objset_rele(os, FTAG);
2893		kmem_free(osname, MAXPATHLEN);
2894		return (ENOENT);
2895	}
2896	p = osname + strlen(osname);
2897	len = MAXPATHLEN - (p - osname);
2898
2899#if 0
2900	/* Prefetch the datasets. */
2901	cookie = 0;
2902	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2903		if (!dataset_name_hidden(osname))
2904			(void) dmu_objset_prefetch(osname, NULL);
2905	}
2906#endif
2907
2908	cookie = 0;
2909	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2910	    &cookie) == 0) {
2911		dmu_objset_rele(os, FTAG);
2912		(void)zvol_create_minors(osname);
2913		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2914			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2915			    name, error);
2916			return (error);
2917		}
2918	}
2919
2920	dmu_objset_rele(os, FTAG);
2921	kmem_free(osname, MAXPATHLEN);
2922	return (0);
2923}
2924
2925static void
2926zvol_rename_minor(zvol_state_t *zv, const char *newname)
2927{
2928	struct g_geom *gp;
2929	struct g_provider *pp;
2930	struct cdev *dev;
2931
2932	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2933
2934	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2935		g_topology_lock();
2936		pp = zv->zv_provider;
2937		ASSERT(pp != NULL);
2938		gp = pp->geom;
2939		ASSERT(gp != NULL);
2940
2941		zv->zv_provider = NULL;
2942		g_wither_provider(pp, ENXIO);
2943
2944		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2945		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2946		pp->sectorsize = DEV_BSIZE;
2947		pp->mediasize = zv->zv_volsize;
2948		pp->private = zv;
2949		zv->zv_provider = pp;
2950		g_error_provider(pp, 0);
2951		g_topology_unlock();
2952	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2953		dev = zv->zv_dev;
2954		ASSERT(dev != NULL);
2955		zv->zv_dev = NULL;
2956		destroy_dev(dev);
2957
2958		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
2959		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
2960		    0640, "%s/%s", ZVOL_DRIVER, newname) == 0) {
2961			zv->zv_dev = dev;
2962			dev->si_iosize_max = MAXPHYS;
2963			dev->si_drv2 = zv;
2964		}
2965	}
2966	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2967}
2968
2969void
2970zvol_rename_minors(const char *oldname, const char *newname)
2971{
2972	char name[MAXPATHLEN];
2973	struct g_provider *pp;
2974	struct g_geom *gp;
2975	size_t oldnamelen, newnamelen;
2976	zvol_state_t *zv;
2977	char *namebuf;
2978	boolean_t locked = B_FALSE;
2979
2980	oldnamelen = strlen(oldname);
2981	newnamelen = strlen(newname);
2982
2983	DROP_GIANT();
2984	/* See comment in zvol_open(). */
2985	if (!MUTEX_HELD(&zfsdev_state_lock)) {
2986		mutex_enter(&zfsdev_state_lock);
2987		locked = B_TRUE;
2988	}
2989
2990	LIST_FOREACH(zv, &all_zvols, zv_links) {
2991		if (strcmp(zv->zv_name, oldname) == 0) {
2992			zvol_rename_minor(zv, newname);
2993		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2994		    (zv->zv_name[oldnamelen] == '/' ||
2995		     zv->zv_name[oldnamelen] == '@')) {
2996			snprintf(name, sizeof(name), "%s%c%s", newname,
2997			    zv->zv_name[oldnamelen],
2998			    zv->zv_name + oldnamelen + 1);
2999			zvol_rename_minor(zv, name);
3000		}
3001	}
3002
3003	if (locked)
3004		mutex_exit(&zfsdev_state_lock);
3005	PICKUP_GIANT();
3006}
3007
3008static int
3009zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
3010{
3011	zvol_state_t *zv;
3012	int err = 0;
3013
3014	mutex_enter(&zfsdev_state_lock);
3015	zv = dev->si_drv2;
3016	if (zv == NULL) {
3017		mutex_exit(&zfsdev_state_lock);
3018		return(ENXIO);		/* zvol_create_minor() not done yet */
3019	}
3020
3021	if (zv->zv_total_opens == 0)
3022		err = zvol_first_open(zv);
3023	if (err) {
3024		mutex_exit(&zfsdev_state_lock);
3025		return (err);
3026	}
3027	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
3028		err = SET_ERROR(EROFS);
3029		goto out;
3030	}
3031	if (zv->zv_flags & ZVOL_EXCL) {
3032		err = SET_ERROR(EBUSY);
3033		goto out;
3034	}
3035#ifdef FEXCL
3036	if (flags & FEXCL) {
3037		if (zv->zv_total_opens != 0) {
3038			err = SET_ERROR(EBUSY);
3039			goto out;
3040		}
3041		zv->zv_flags |= ZVOL_EXCL;
3042	}
3043#endif
3044
3045	zv->zv_total_opens++;
3046	mutex_exit(&zfsdev_state_lock);
3047	return (err);
3048out:
3049	if (zv->zv_total_opens == 0)
3050		zvol_last_close(zv);
3051	mutex_exit(&zfsdev_state_lock);
3052	return (err);
3053}
3054
3055static int
3056zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3057{
3058	zvol_state_t *zv;
3059	int err = 0;
3060
3061	mutex_enter(&zfsdev_state_lock);
3062	zv = dev->si_drv2;
3063	if (zv == NULL) {
3064		mutex_exit(&zfsdev_state_lock);
3065		return(ENXIO);
3066	}
3067
3068	if (zv->zv_flags & ZVOL_EXCL) {
3069		ASSERT(zv->zv_total_opens == 1);
3070		zv->zv_flags &= ~ZVOL_EXCL;
3071	}
3072
3073	/*
3074	 * If the open count is zero, this is a spurious close.
3075	 * That indicates a bug in the kernel / DDI framework.
3076	 */
3077	ASSERT(zv->zv_total_opens != 0);
3078
3079	/*
3080	 * You may get multiple opens, but only one close.
3081	 */
3082	zv->zv_total_opens--;
3083
3084	if (zv->zv_total_opens == 0)
3085		zvol_last_close(zv);
3086
3087	mutex_exit(&zfsdev_state_lock);
3088	return (0);
3089}
3090
3091static int
3092zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
3093{
3094	zvol_state_t *zv;
3095	rl_t *rl;
3096	off_t offset, length, chunk;
3097	int i, error;
3098	u_int u;
3099
3100	zv = dev->si_drv2;
3101
3102	error = 0;
3103	KASSERT(zv->zv_total_opens > 0,
3104	    ("Device with zero access count in zvol_d_ioctl"));
3105
3106	i = IOCPARM_LEN(cmd);
3107	switch (cmd) {
3108	case DIOCGSECTORSIZE:
3109		*(u_int *)data = DEV_BSIZE;
3110		break;
3111	case DIOCGMEDIASIZE:
3112		*(off_t *)data = zv->zv_volsize;
3113		break;
3114	case DIOCGFLUSH:
3115		zil_commit(zv->zv_zilog, ZVOL_OBJ);
3116		break;
3117	case DIOCGDELETE:
3118		if (!zvol_unmap_enabled)
3119			break;
3120
3121		offset = ((off_t *)data)[0];
3122		length = ((off_t *)data)[1];
3123		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
3124		    offset < 0 || offset >= zv->zv_volsize ||
3125		    length <= 0) {
3126			printf("%s: offset=%jd length=%jd\n", __func__, offset,
3127			    length);
3128			error = EINVAL;
3129			break;
3130		}
3131
3132		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
3133		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
3134		error = dmu_tx_assign(tx, TXG_WAIT);
3135		if (error != 0) {
3136			dmu_tx_abort(tx);
3137		} else {
3138			zvol_log_truncate(zv, tx, offset, length, B_TRUE);
3139			dmu_tx_commit(tx);
3140			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
3141			    offset, length);
3142		}
3143		zfs_range_unlock(rl);
3144		if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
3145			zil_commit(zv->zv_zilog, ZVOL_OBJ);
3146		break;
3147	case DIOCGSTRIPESIZE:
3148		*(off_t *)data = zv->zv_volblocksize;
3149		break;
3150	case DIOCGSTRIPEOFFSET:
3151		*(off_t *)data = 0;
3152		break;
3153	case DIOCGATTR: {
3154		spa_t *spa = dmu_objset_spa(zv->zv_objset);
3155		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
3156		uint64_t refd, avail, usedobjs, availobjs;
3157
3158		if (strcmp(arg->name, "GEOM::candelete") == 0)
3159			arg->value.i = 1;
3160		else if (strcmp(arg->name, "blocksavail") == 0) {
3161			dmu_objset_space(zv->zv_objset, &refd, &avail,
3162			    &usedobjs, &availobjs);
3163			arg->value.off = avail / DEV_BSIZE;
3164		} else if (strcmp(arg->name, "blocksused") == 0) {
3165			dmu_objset_space(zv->zv_objset, &refd, &avail,
3166			    &usedobjs, &availobjs);
3167			arg->value.off = refd / DEV_BSIZE;
3168		} else if (strcmp(arg->name, "poolblocksavail") == 0) {
3169			avail = metaslab_class_get_space(spa_normal_class(spa));
3170			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
3171			arg->value.off = avail / DEV_BSIZE;
3172		} else if (strcmp(arg->name, "poolblocksused") == 0) {
3173			refd = metaslab_class_get_alloc(spa_normal_class(spa));
3174			arg->value.off = refd / DEV_BSIZE;
3175		} else
3176			error = ENOIOCTL;
3177		break;
3178	}
3179	case FIOSEEKHOLE:
3180	case FIOSEEKDATA: {
3181		off_t *off = (off_t *)data;
3182		uint64_t noff;
3183		boolean_t hole;
3184
3185		hole = (cmd == FIOSEEKHOLE);
3186		noff = *off;
3187		error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
3188		*off = noff;
3189		break;
3190	}
3191	default:
3192		error = ENOIOCTL;
3193	}
3194
3195	return (error);
3196}
3197#endif	/* illumos */
3198