zvol.c revision 308448
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 *
27 * Portions Copyright 2010 Robert Milkowski
28 *
29 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
31 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
32 * Copyright (c) 2014 Integros [integros.com]
33 */
34
35/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
36
37/*
38 * ZFS volume emulation driver.
39 *
40 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
41 * Volumes are accessed through the symbolic links named:
42 *
43 * /dev/zvol/dsk/<pool_name>/<dataset_name>
44 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
45 *
46 * These links are created by the /dev filesystem (sdev_zvolops.c).
47 * Volumes are persistent through reboot.  No user command needs to be
48 * run before opening and using a device.
49 *
50 * FreeBSD notes.
51 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
52 * in the system.
53 */
54
55#include <sys/types.h>
56#include <sys/param.h>
57#include <sys/kernel.h>
58#include <sys/errno.h>
59#include <sys/uio.h>
60#include <sys/bio.h>
61#include <sys/buf.h>
62#include <sys/kmem.h>
63#include <sys/conf.h>
64#include <sys/cmn_err.h>
65#include <sys/stat.h>
66#include <sys/zap.h>
67#include <sys/spa.h>
68#include <sys/spa_impl.h>
69#include <sys/zio.h>
70#include <sys/disk.h>
71#include <sys/dmu_traverse.h>
72#include <sys/dnode.h>
73#include <sys/dsl_dataset.h>
74#include <sys/dsl_prop.h>
75#include <sys/dkio.h>
76#include <sys/byteorder.h>
77#include <sys/sunddi.h>
78#include <sys/dirent.h>
79#include <sys/policy.h>
80#include <sys/queue.h>
81#include <sys/fs/zfs.h>
82#include <sys/zfs_ioctl.h>
83#include <sys/zil.h>
84#include <sys/refcount.h>
85#include <sys/zfs_znode.h>
86#include <sys/zfs_rlock.h>
87#include <sys/vdev_impl.h>
88#include <sys/vdev_raidz.h>
89#include <sys/zvol.h>
90#include <sys/zil_impl.h>
91#include <sys/dbuf.h>
92#include <sys/dmu_tx.h>
93#include <sys/zfeature.h>
94#include <sys/zio_checksum.h>
95#include <sys/filio.h>
96
97#include <geom/geom.h>
98
99#include "zfs_namecheck.h"
100
101#ifndef illumos
102struct g_class zfs_zvol_class = {
103	.name = "ZFS::ZVOL",
104	.version = G_VERSION,
105};
106
107DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
108
109#endif
110void *zfsdev_state;
111static char *zvol_tag = "zvol_tag";
112
113#define	ZVOL_DUMPSIZE		"dumpsize"
114
115/*
116 * This lock protects the zfsdev_state structure from being modified
117 * while it's being used, e.g. an open that comes in before a create
118 * finishes.  It also protects temporary opens of the dataset so that,
119 * e.g., an open doesn't get a spurious EBUSY.
120 */
121#ifdef illumos
122kmutex_t zfsdev_state_lock;
123#else
124/*
125 * In FreeBSD we've replaced the upstream zfsdev_state_lock with the
126 * spa_namespace_lock in the ZVOL code.
127 */
128#define zfsdev_state_lock spa_namespace_lock
129#endif
130static uint32_t zvol_minors;
131
132#ifndef illumos
133SYSCTL_DECL(_vfs_zfs);
134SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
135static int	volmode = ZFS_VOLMODE_GEOM;
136TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
137SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
138    "Expose as GEOM providers (1), device files (2) or neither");
139
140#endif
141typedef struct zvol_extent {
142	list_node_t	ze_node;
143	dva_t		ze_dva;		/* dva associated with this extent */
144	uint64_t	ze_nblks;	/* number of blocks in extent */
145} zvol_extent_t;
146
147/*
148 * The in-core state of each volume.
149 */
150typedef struct zvol_state {
151#ifndef illumos
152	LIST_ENTRY(zvol_state)	zv_links;
153#endif
154	char		zv_name[MAXPATHLEN]; /* pool/dd name */
155	uint64_t	zv_volsize;	/* amount of space we advertise */
156	uint64_t	zv_volblocksize; /* volume block size */
157#ifdef illumos
158	minor_t		zv_minor;	/* minor number */
159#else
160	struct cdev	*zv_dev;	/* non-GEOM device */
161	struct g_provider *zv_provider;	/* GEOM provider */
162#endif
163	uint8_t		zv_min_bs;	/* minimum addressable block shift */
164	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
165	objset_t	*zv_objset;	/* objset handle */
166#ifdef illumos
167	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
168#endif
169	uint32_t	zv_total_opens;	/* total open count */
170	zilog_t		*zv_zilog;	/* ZIL handle */
171	list_t		zv_extents;	/* List of extents for dump */
172	znode_t		zv_znode;	/* for range locking */
173	dmu_buf_t	*zv_dbuf;	/* bonus handle */
174#ifndef illumos
175	int		zv_state;
176	int		zv_volmode;	/* Provide GEOM or cdev */
177	struct bio_queue_head zv_queue;
178	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
179#endif
180} zvol_state_t;
181
182#ifndef illumos
183static LIST_HEAD(, zvol_state) all_zvols;
184#endif
185/*
186 * zvol specific flags
187 */
188#define	ZVOL_RDONLY	0x1
189#define	ZVOL_DUMPIFIED	0x2
190#define	ZVOL_EXCL	0x4
191#define	ZVOL_WCE	0x8
192
193/*
194 * zvol maximum transfer in one DMU tx.
195 */
196int zvol_maxphys = DMU_MAX_ACCESS/2;
197
198/*
199 * Toggle unmap functionality.
200 */
201boolean_t zvol_unmap_enabled = B_TRUE;
202#ifndef illumos
203SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
204    &zvol_unmap_enabled, 0,
205    "Enable UNMAP functionality");
206
207static d_open_t		zvol_d_open;
208static d_close_t	zvol_d_close;
209static d_read_t		zvol_read;
210static d_write_t	zvol_write;
211static d_ioctl_t	zvol_d_ioctl;
212static d_strategy_t	zvol_strategy;
213
214static struct cdevsw zvol_cdevsw = {
215	.d_version =	D_VERSION,
216	.d_open =	zvol_d_open,
217	.d_close =	zvol_d_close,
218	.d_read =	zvol_read,
219	.d_write =	zvol_write,
220	.d_ioctl =	zvol_d_ioctl,
221	.d_strategy =	zvol_strategy,
222	.d_name =	"zvol",
223	.d_flags =	D_DISK | D_TRACKCLOSE,
224};
225
226static void zvol_geom_run(zvol_state_t *zv);
227static void zvol_geom_destroy(zvol_state_t *zv);
228static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
229static void zvol_geom_start(struct bio *bp);
230static void zvol_geom_worker(void *arg);
231static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
232    uint64_t len, boolean_t sync);
233#endif	/* !illumos */
234
235extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
236    nvlist_t *, nvlist_t *);
237static int zvol_remove_zv(zvol_state_t *);
238static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
239static int zvol_dumpify(zvol_state_t *zv);
240static int zvol_dump_fini(zvol_state_t *zv);
241static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
242
243static void
244zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
245{
246#ifdef illumos
247	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
248
249	zv->zv_volsize = volsize;
250	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
251	    "Size", volsize) == DDI_SUCCESS);
252	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
253	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
254
255	/* Notify specfs to invalidate the cached size */
256	spec_size_invalidate(dev, VBLK);
257	spec_size_invalidate(dev, VCHR);
258#else	/* !illumos */
259	zv->zv_volsize = volsize;
260	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
261		struct g_provider *pp;
262
263		pp = zv->zv_provider;
264		if (pp == NULL)
265			return;
266		g_topology_lock();
267		g_resize_provider(pp, zv->zv_volsize);
268		g_topology_unlock();
269	}
270#endif	/* illumos */
271}
272
273int
274zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
275{
276	if (volsize == 0)
277		return (SET_ERROR(EINVAL));
278
279	if (volsize % blocksize != 0)
280		return (SET_ERROR(EINVAL));
281
282#ifdef _ILP32
283	if (volsize - 1 > SPEC_MAXOFFSET_T)
284		return (SET_ERROR(EOVERFLOW));
285#endif
286	return (0);
287}
288
289int
290zvol_check_volblocksize(uint64_t volblocksize)
291{
292	if (volblocksize < SPA_MINBLOCKSIZE ||
293	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
294	    !ISP2(volblocksize))
295		return (SET_ERROR(EDOM));
296
297	return (0);
298}
299
300int
301zvol_get_stats(objset_t *os, nvlist_t *nv)
302{
303	int error;
304	dmu_object_info_t doi;
305	uint64_t val;
306
307	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
308	if (error)
309		return (error);
310
311	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
312
313	error = dmu_object_info(os, ZVOL_OBJ, &doi);
314
315	if (error == 0) {
316		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
317		    doi.doi_data_block_size);
318	}
319
320	return (error);
321}
322
323static zvol_state_t *
324zvol_minor_lookup(const char *name)
325{
326#ifdef illumos
327	minor_t minor;
328#endif
329	zvol_state_t *zv;
330
331	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
332
333#ifdef illumos
334	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
335		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
336		if (zv == NULL)
337			continue;
338#else
339	LIST_FOREACH(zv, &all_zvols, zv_links) {
340#endif
341		if (strcmp(zv->zv_name, name) == 0)
342			return (zv);
343	}
344
345	return (NULL);
346}
347
348/* extent mapping arg */
349struct maparg {
350	zvol_state_t	*ma_zv;
351	uint64_t	ma_blks;
352};
353
354/*ARGSUSED*/
355static int
356zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
357    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
358{
359	struct maparg *ma = arg;
360	zvol_extent_t *ze;
361	int bs = ma->ma_zv->zv_volblocksize;
362
363	if (bp == NULL || BP_IS_HOLE(bp) ||
364	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
365		return (0);
366
367	VERIFY(!BP_IS_EMBEDDED(bp));
368
369	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
370	ma->ma_blks++;
371
372	/* Abort immediately if we have encountered gang blocks */
373	if (BP_IS_GANG(bp))
374		return (SET_ERROR(EFRAGS));
375
376	/*
377	 * See if the block is at the end of the previous extent.
378	 */
379	ze = list_tail(&ma->ma_zv->zv_extents);
380	if (ze &&
381	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
382	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
383	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
384		ze->ze_nblks++;
385		return (0);
386	}
387
388	dprintf_bp(bp, "%s", "next blkptr:");
389
390	/* start a new extent */
391	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
392	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
393	ze->ze_nblks = 1;
394	list_insert_tail(&ma->ma_zv->zv_extents, ze);
395	return (0);
396}
397
398static void
399zvol_free_extents(zvol_state_t *zv)
400{
401	zvol_extent_t *ze;
402
403	while (ze = list_head(&zv->zv_extents)) {
404		list_remove(&zv->zv_extents, ze);
405		kmem_free(ze, sizeof (zvol_extent_t));
406	}
407}
408
409static int
410zvol_get_lbas(zvol_state_t *zv)
411{
412	objset_t *os = zv->zv_objset;
413	struct maparg	ma;
414	int		err;
415
416	ma.ma_zv = zv;
417	ma.ma_blks = 0;
418	zvol_free_extents(zv);
419
420	/* commit any in-flight changes before traversing the dataset */
421	txg_wait_synced(dmu_objset_pool(os), 0);
422	err = traverse_dataset(dmu_objset_ds(os), 0,
423	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
424	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
425		zvol_free_extents(zv);
426		return (err ? err : EIO);
427	}
428
429	return (0);
430}
431
432/* ARGSUSED */
433void
434zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
435{
436	zfs_creat_t *zct = arg;
437	nvlist_t *nvprops = zct->zct_props;
438	int error;
439	uint64_t volblocksize, volsize;
440
441	VERIFY(nvlist_lookup_uint64(nvprops,
442	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
443	if (nvlist_lookup_uint64(nvprops,
444	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
445		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
446
447	/*
448	 * These properties must be removed from the list so the generic
449	 * property setting step won't apply to them.
450	 */
451	VERIFY(nvlist_remove_all(nvprops,
452	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
453	(void) nvlist_remove_all(nvprops,
454	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
455
456	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
457	    DMU_OT_NONE, 0, tx);
458	ASSERT(error == 0);
459
460	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
461	    DMU_OT_NONE, 0, tx);
462	ASSERT(error == 0);
463
464	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
465	ASSERT(error == 0);
466}
467
468/*
469 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
470 * implement DKIOCFREE/free-long-range.
471 */
472static int
473zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
474{
475	uint64_t offset, length;
476
477	if (byteswap)
478		byteswap_uint64_array(lr, sizeof (*lr));
479
480	offset = lr->lr_offset;
481	length = lr->lr_length;
482
483	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
484}
485
486/*
487 * Replay a TX_WRITE ZIL transaction that didn't get committed
488 * after a system failure
489 */
490static int
491zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
492{
493	objset_t *os = zv->zv_objset;
494	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
495	uint64_t offset, length;
496	dmu_tx_t *tx;
497	int error;
498
499	if (byteswap)
500		byteswap_uint64_array(lr, sizeof (*lr));
501
502	offset = lr->lr_offset;
503	length = lr->lr_length;
504
505	/* If it's a dmu_sync() block, write the whole block */
506	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
507		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
508		if (length < blocksize) {
509			offset -= offset % blocksize;
510			length = blocksize;
511		}
512	}
513
514	tx = dmu_tx_create(os);
515	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
516	error = dmu_tx_assign(tx, TXG_WAIT);
517	if (error) {
518		dmu_tx_abort(tx);
519	} else {
520		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
521		dmu_tx_commit(tx);
522	}
523
524	return (error);
525}
526
527/* ARGSUSED */
528static int
529zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
530{
531	return (SET_ERROR(ENOTSUP));
532}
533
534/*
535 * Callback vectors for replaying records.
536 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
537 */
538zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
539	zvol_replay_err,	/* 0 no such transaction type */
540	zvol_replay_err,	/* TX_CREATE */
541	zvol_replay_err,	/* TX_MKDIR */
542	zvol_replay_err,	/* TX_MKXATTR */
543	zvol_replay_err,	/* TX_SYMLINK */
544	zvol_replay_err,	/* TX_REMOVE */
545	zvol_replay_err,	/* TX_RMDIR */
546	zvol_replay_err,	/* TX_LINK */
547	zvol_replay_err,	/* TX_RENAME */
548	zvol_replay_write,	/* TX_WRITE */
549	zvol_replay_truncate,	/* TX_TRUNCATE */
550	zvol_replay_err,	/* TX_SETATTR */
551	zvol_replay_err,	/* TX_ACL */
552	zvol_replay_err,	/* TX_CREATE_ACL */
553	zvol_replay_err,	/* TX_CREATE_ATTR */
554	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
555	zvol_replay_err,	/* TX_MKDIR_ACL */
556	zvol_replay_err,	/* TX_MKDIR_ATTR */
557	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
558	zvol_replay_err,	/* TX_WRITE2 */
559};
560
561#ifdef illumos
562int
563zvol_name2minor(const char *name, minor_t *minor)
564{
565	zvol_state_t *zv;
566
567	mutex_enter(&zfsdev_state_lock);
568	zv = zvol_minor_lookup(name);
569	if (minor && zv)
570		*minor = zv->zv_minor;
571	mutex_exit(&zfsdev_state_lock);
572	return (zv ? 0 : -1);
573}
574#endif	/* illumos */
575
576/*
577 * Create a minor node (plus a whole lot more) for the specified volume.
578 */
579int
580zvol_create_minor(const char *name)
581{
582	zfs_soft_state_t *zs;
583	zvol_state_t *zv;
584	objset_t *os;
585	dmu_object_info_t doi;
586#ifdef illumos
587	minor_t minor = 0;
588	char chrbuf[30], blkbuf[30];
589#else
590	struct g_provider *pp;
591	struct g_geom *gp;
592	uint64_t volsize, mode;
593#endif
594	int error;
595
596#ifndef illumos
597	ZFS_LOG(1, "Creating ZVOL %s...", name);
598#endif
599
600	mutex_enter(&zfsdev_state_lock);
601
602	if (zvol_minor_lookup(name) != NULL) {
603		mutex_exit(&zfsdev_state_lock);
604		return (SET_ERROR(EEXIST));
605	}
606
607	/* lie and say we're read-only */
608	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
609
610	if (error) {
611		mutex_exit(&zfsdev_state_lock);
612		return (error);
613	}
614
615#ifdef illumos
616	if ((minor = zfsdev_minor_alloc()) == 0) {
617		dmu_objset_disown(os, FTAG);
618		mutex_exit(&zfsdev_state_lock);
619		return (SET_ERROR(ENXIO));
620	}
621
622	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
623		dmu_objset_disown(os, FTAG);
624		mutex_exit(&zfsdev_state_lock);
625		return (SET_ERROR(EAGAIN));
626	}
627	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
628	    (char *)name);
629
630	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
631
632	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
633	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
634		ddi_soft_state_free(zfsdev_state, minor);
635		dmu_objset_disown(os, FTAG);
636		mutex_exit(&zfsdev_state_lock);
637		return (SET_ERROR(EAGAIN));
638	}
639
640	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
641
642	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
643	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
644		ddi_remove_minor_node(zfs_dip, chrbuf);
645		ddi_soft_state_free(zfsdev_state, minor);
646		dmu_objset_disown(os, FTAG);
647		mutex_exit(&zfsdev_state_lock);
648		return (SET_ERROR(EAGAIN));
649	}
650
651	zs = ddi_get_soft_state(zfsdev_state, minor);
652	zs->zss_type = ZSST_ZVOL;
653	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
654#else	/* !illumos */
655
656	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
657	zv->zv_state = 0;
658	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
659	if (error) {
660		kmem_free(zv, sizeof(*zv));
661		dmu_objset_disown(os, zvol_tag);
662		mutex_exit(&zfsdev_state_lock);
663		return (error);
664	}
665	error = dsl_prop_get_integer(name,
666	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
667	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
668		mode = volmode;
669
670	DROP_GIANT();
671	zv->zv_volsize = volsize;
672	zv->zv_volmode = mode;
673	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
674		g_topology_lock();
675		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
676		gp->start = zvol_geom_start;
677		gp->access = zvol_geom_access;
678		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
679		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
680		pp->sectorsize = DEV_BSIZE;
681		pp->mediasize = zv->zv_volsize;
682		pp->private = zv;
683
684		zv->zv_provider = pp;
685		bioq_init(&zv->zv_queue);
686		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
687	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
688		struct make_dev_args args;
689
690		make_dev_args_init(&args);
691		args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
692		args.mda_devsw = &zvol_cdevsw;
693		args.mda_cr = NULL;
694		args.mda_uid = UID_ROOT;
695		args.mda_gid = GID_OPERATOR;
696		args.mda_mode = 0640;
697		args.mda_si_drv2 = zv;
698		error = make_dev_s(&args, &zv->zv_dev,
699		    "%s/%s", ZVOL_DRIVER, name);
700		if (error != 0) {
701			kmem_free(zv, sizeof(*zv));
702			dmu_objset_disown(os, FTAG);
703			mutex_exit(&zfsdev_state_lock);
704			return (error);
705		}
706		zv->zv_dev->si_iosize_max = MAXPHYS;
707	}
708	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
709#endif	/* illumos */
710
711	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
712	zv->zv_min_bs = DEV_BSHIFT;
713#ifdef illumos
714	zv->zv_minor = minor;
715#endif
716	zv->zv_objset = os;
717	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
718		zv->zv_flags |= ZVOL_RDONLY;
719	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
720	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
721	    sizeof (rl_t), offsetof(rl_t, r_node));
722	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
723	    offsetof(zvol_extent_t, ze_node));
724	/* get and cache the blocksize */
725	error = dmu_object_info(os, ZVOL_OBJ, &doi);
726	ASSERT(error == 0);
727	zv->zv_volblocksize = doi.doi_data_block_size;
728
729	if (spa_writeable(dmu_objset_spa(os))) {
730		if (zil_replay_disable)
731			zil_destroy(dmu_objset_zil(os), B_FALSE);
732		else
733			zil_replay(os, zv, zvol_replay_vector);
734	}
735	dmu_objset_disown(os, FTAG);
736	zv->zv_objset = NULL;
737
738	zvol_minors++;
739
740	mutex_exit(&zfsdev_state_lock);
741#ifndef illumos
742	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
743		zvol_geom_run(zv);
744		g_topology_unlock();
745	}
746	PICKUP_GIANT();
747
748	ZFS_LOG(1, "ZVOL %s created.", name);
749#endif
750
751	return (0);
752}
753
754/*
755 * Remove minor node for the specified volume.
756 */
757static int
758zvol_remove_zv(zvol_state_t *zv)
759{
760#ifdef illumos
761	char nmbuf[20];
762	minor_t minor = zv->zv_minor;
763#endif
764
765	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
766	if (zv->zv_total_opens != 0)
767		return (SET_ERROR(EBUSY));
768
769#ifdef illumos
770	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
771	ddi_remove_minor_node(zfs_dip, nmbuf);
772
773	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
774	ddi_remove_minor_node(zfs_dip, nmbuf);
775#else
776	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
777
778	LIST_REMOVE(zv, zv_links);
779	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
780		g_topology_lock();
781		zvol_geom_destroy(zv);
782		g_topology_unlock();
783	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
784		if (zv->zv_dev != NULL)
785			destroy_dev(zv->zv_dev);
786	}
787#endif
788
789	avl_destroy(&zv->zv_znode.z_range_avl);
790	mutex_destroy(&zv->zv_znode.z_range_lock);
791
792	kmem_free(zv, sizeof (zvol_state_t));
793#ifdef illumos
794	ddi_soft_state_free(zfsdev_state, minor);
795#endif
796	zvol_minors--;
797	return (0);
798}
799
800int
801zvol_remove_minor(const char *name)
802{
803	zvol_state_t *zv;
804	int rc;
805
806	mutex_enter(&zfsdev_state_lock);
807	if ((zv = zvol_minor_lookup(name)) == NULL) {
808		mutex_exit(&zfsdev_state_lock);
809		return (SET_ERROR(ENXIO));
810	}
811	rc = zvol_remove_zv(zv);
812	mutex_exit(&zfsdev_state_lock);
813	return (rc);
814}
815
816int
817zvol_first_open(zvol_state_t *zv)
818{
819	objset_t *os;
820	uint64_t volsize;
821	int error;
822	uint64_t readonly;
823
824	/* lie and say we're read-only */
825	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
826	    zvol_tag, &os);
827	if (error)
828		return (error);
829
830	zv->zv_objset = os;
831	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
832	if (error) {
833		ASSERT(error == 0);
834		dmu_objset_disown(os, zvol_tag);
835		return (error);
836	}
837
838	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
839	if (error) {
840		dmu_objset_disown(os, zvol_tag);
841		return (error);
842	}
843
844	zvol_size_changed(zv, volsize);
845	zv->zv_zilog = zil_open(os, zvol_get_data);
846
847	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
848	    NULL) == 0);
849	if (readonly || dmu_objset_is_snapshot(os) ||
850	    !spa_writeable(dmu_objset_spa(os)))
851		zv->zv_flags |= ZVOL_RDONLY;
852	else
853		zv->zv_flags &= ~ZVOL_RDONLY;
854	return (error);
855}
856
857void
858zvol_last_close(zvol_state_t *zv)
859{
860	zil_close(zv->zv_zilog);
861	zv->zv_zilog = NULL;
862
863	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
864	zv->zv_dbuf = NULL;
865
866	/*
867	 * Evict cached data
868	 */
869	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
870	    !(zv->zv_flags & ZVOL_RDONLY))
871		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
872	dmu_objset_evict_dbufs(zv->zv_objset);
873
874	dmu_objset_disown(zv->zv_objset, zvol_tag);
875	zv->zv_objset = NULL;
876}
877
878#ifdef illumos
879int
880zvol_prealloc(zvol_state_t *zv)
881{
882	objset_t *os = zv->zv_objset;
883	dmu_tx_t *tx;
884	uint64_t refd, avail, usedobjs, availobjs;
885	uint64_t resid = zv->zv_volsize;
886	uint64_t off = 0;
887
888	/* Check the space usage before attempting to allocate the space */
889	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
890	if (avail < zv->zv_volsize)
891		return (SET_ERROR(ENOSPC));
892
893	/* Free old extents if they exist */
894	zvol_free_extents(zv);
895
896	while (resid != 0) {
897		int error;
898		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
899
900		tx = dmu_tx_create(os);
901		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
902		error = dmu_tx_assign(tx, TXG_WAIT);
903		if (error) {
904			dmu_tx_abort(tx);
905			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
906			return (error);
907		}
908		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
909		dmu_tx_commit(tx);
910		off += bytes;
911		resid -= bytes;
912	}
913	txg_wait_synced(dmu_objset_pool(os), 0);
914
915	return (0);
916}
917#endif	/* illumos */
918
919static int
920zvol_update_volsize(objset_t *os, uint64_t volsize)
921{
922	dmu_tx_t *tx;
923	int error;
924
925	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
926
927	tx = dmu_tx_create(os);
928	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
929	dmu_tx_mark_netfree(tx);
930	error = dmu_tx_assign(tx, TXG_WAIT);
931	if (error) {
932		dmu_tx_abort(tx);
933		return (error);
934	}
935
936	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
937	    &volsize, tx);
938	dmu_tx_commit(tx);
939
940	if (error == 0)
941		error = dmu_free_long_range(os,
942		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
943	return (error);
944}
945
946void
947zvol_remove_minors(const char *name)
948{
949#ifdef illumos
950	zvol_state_t *zv;
951	char *namebuf;
952	minor_t minor;
953
954	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
955	(void) strncpy(namebuf, name, strlen(name));
956	(void) strcat(namebuf, "/");
957	mutex_enter(&zfsdev_state_lock);
958	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
959
960		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
961		if (zv == NULL)
962			continue;
963		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
964			(void) zvol_remove_zv(zv);
965	}
966	kmem_free(namebuf, strlen(name) + 2);
967
968	mutex_exit(&zfsdev_state_lock);
969#else	/* !illumos */
970	zvol_state_t *zv, *tzv;
971	size_t namelen;
972
973	namelen = strlen(name);
974
975	DROP_GIANT();
976	mutex_enter(&zfsdev_state_lock);
977
978	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
979		if (strcmp(zv->zv_name, name) == 0 ||
980		    (strncmp(zv->zv_name, name, namelen) == 0 &&
981		    strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' ||
982		    zv->zv_name[namelen] == '@'))) {
983			(void) zvol_remove_zv(zv);
984		}
985	}
986
987	mutex_exit(&zfsdev_state_lock);
988	PICKUP_GIANT();
989#endif	/* illumos */
990}
991
992static int
993zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
994{
995	uint64_t old_volsize = 0ULL;
996	int error = 0;
997
998	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
999
1000	/*
1001	 * Reinitialize the dump area to the new size. If we
1002	 * failed to resize the dump area then restore it back to
1003	 * its original size.  We must set the new volsize prior
1004	 * to calling dumpvp_resize() to ensure that the devices'
1005	 * size(9P) is not visible by the dump subsystem.
1006	 */
1007	old_volsize = zv->zv_volsize;
1008	zvol_size_changed(zv, volsize);
1009
1010#ifdef ZVOL_DUMP
1011	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1012		if ((error = zvol_dumpify(zv)) != 0 ||
1013		    (error = dumpvp_resize()) != 0) {
1014			int dumpify_error;
1015
1016			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
1017			zvol_size_changed(zv, old_volsize);
1018			dumpify_error = zvol_dumpify(zv);
1019			error = dumpify_error ? dumpify_error : error;
1020		}
1021	}
1022#endif	/* ZVOL_DUMP */
1023
1024#ifdef illumos
1025	/*
1026	 * Generate a LUN expansion event.
1027	 */
1028	if (error == 0) {
1029		sysevent_id_t eid;
1030		nvlist_t *attr;
1031		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1032
1033		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
1034		    zv->zv_minor);
1035
1036		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1037		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
1038
1039		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
1040		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
1041
1042		nvlist_free(attr);
1043		kmem_free(physpath, MAXPATHLEN);
1044	}
1045#endif	/* illumos */
1046	return (error);
1047}
1048
1049int
1050zvol_set_volsize(const char *name, uint64_t volsize)
1051{
1052	zvol_state_t *zv = NULL;
1053	objset_t *os;
1054	int error;
1055	dmu_object_info_t doi;
1056	uint64_t readonly;
1057	boolean_t owned = B_FALSE;
1058
1059	error = dsl_prop_get_integer(name,
1060	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
1061	if (error != 0)
1062		return (error);
1063	if (readonly)
1064		return (SET_ERROR(EROFS));
1065
1066	mutex_enter(&zfsdev_state_lock);
1067	zv = zvol_minor_lookup(name);
1068
1069	if (zv == NULL || zv->zv_objset == NULL) {
1070		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
1071		    FTAG, &os)) != 0) {
1072			mutex_exit(&zfsdev_state_lock);
1073			return (error);
1074		}
1075		owned = B_TRUE;
1076		if (zv != NULL)
1077			zv->zv_objset = os;
1078	} else {
1079		os = zv->zv_objset;
1080	}
1081
1082	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
1083	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
1084		goto out;
1085
1086	error = zvol_update_volsize(os, volsize);
1087
1088	if (error == 0 && zv != NULL)
1089		error = zvol_update_live_volsize(zv, volsize);
1090out:
1091	if (owned) {
1092		dmu_objset_disown(os, FTAG);
1093		if (zv != NULL)
1094			zv->zv_objset = NULL;
1095	}
1096	mutex_exit(&zfsdev_state_lock);
1097	return (error);
1098}
1099
1100/*ARGSUSED*/
1101#ifdef illumos
1102int
1103zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
1104#else
1105static int
1106zvol_open(struct g_provider *pp, int flag, int count)
1107#endif
1108{
1109	zvol_state_t *zv;
1110	int err = 0;
1111#ifdef illumos
1112
1113	mutex_enter(&zfsdev_state_lock);
1114
1115	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
1116	if (zv == NULL) {
1117		mutex_exit(&zfsdev_state_lock);
1118		return (SET_ERROR(ENXIO));
1119	}
1120
1121	if (zv->zv_total_opens == 0)
1122		err = zvol_first_open(zv);
1123	if (err) {
1124		mutex_exit(&zfsdev_state_lock);
1125		return (err);
1126	}
1127#else	/* !illumos */
1128	if (tsd_get(zfs_geom_probe_vdev_key) != NULL) {
1129		/*
1130		 * if zfs_geom_probe_vdev_key is set, that means that zfs is
1131		 * attempting to probe geom providers while looking for a
1132		 * replacement for a missing VDEV.  In this case, the
1133		 * spa_namespace_lock will not be held, but it is still illegal
1134		 * to use a zvol as a vdev.  Deadlocks can result if another
1135		 * thread has spa_namespace_lock
1136		 */
1137		return (EOPNOTSUPP);
1138	}
1139
1140	mutex_enter(&zfsdev_state_lock);
1141
1142	zv = pp->private;
1143	if (zv == NULL) {
1144		mutex_exit(&zfsdev_state_lock);
1145		return (SET_ERROR(ENXIO));
1146	}
1147
1148	if (zv->zv_total_opens == 0) {
1149		err = zvol_first_open(zv);
1150		if (err) {
1151			mutex_exit(&zfsdev_state_lock);
1152			return (err);
1153		}
1154		pp->mediasize = zv->zv_volsize;
1155		pp->stripeoffset = 0;
1156		pp->stripesize = zv->zv_volblocksize;
1157	}
1158#endif	/* illumos */
1159	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1160		err = SET_ERROR(EROFS);
1161		goto out;
1162	}
1163	if (zv->zv_flags & ZVOL_EXCL) {
1164		err = SET_ERROR(EBUSY);
1165		goto out;
1166	}
1167#ifdef FEXCL
1168	if (flag & FEXCL) {
1169		if (zv->zv_total_opens != 0) {
1170			err = SET_ERROR(EBUSY);
1171			goto out;
1172		}
1173		zv->zv_flags |= ZVOL_EXCL;
1174	}
1175#endif
1176
1177#ifdef illumos
1178	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
1179		zv->zv_open_count[otyp]++;
1180		zv->zv_total_opens++;
1181	}
1182	mutex_exit(&zfsdev_state_lock);
1183#else
1184	zv->zv_total_opens += count;
1185	mutex_exit(&zfsdev_state_lock);
1186#endif
1187
1188	return (err);
1189out:
1190	if (zv->zv_total_opens == 0)
1191		zvol_last_close(zv);
1192#ifdef illumos
1193	mutex_exit(&zfsdev_state_lock);
1194#else
1195	mutex_exit(&zfsdev_state_lock);
1196#endif
1197	return (err);
1198}
1199
1200/*ARGSUSED*/
1201#ifdef illumos
1202int
1203zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
1204{
1205	minor_t minor = getminor(dev);
1206	zvol_state_t *zv;
1207	int error = 0;
1208
1209	mutex_enter(&zfsdev_state_lock);
1210
1211	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1212	if (zv == NULL) {
1213		mutex_exit(&zfsdev_state_lock);
1214#else	/* !illumos */
1215static int
1216zvol_close(struct g_provider *pp, int flag, int count)
1217{
1218	zvol_state_t *zv;
1219	int error = 0;
1220	boolean_t locked = B_FALSE;
1221
1222	/* See comment in zvol_open(). */
1223	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1224		mutex_enter(&zfsdev_state_lock);
1225		locked = B_TRUE;
1226	}
1227
1228	zv = pp->private;
1229	if (zv == NULL) {
1230		if (locked)
1231			mutex_exit(&zfsdev_state_lock);
1232#endif	/* illumos */
1233		return (SET_ERROR(ENXIO));
1234	}
1235
1236	if (zv->zv_flags & ZVOL_EXCL) {
1237		ASSERT(zv->zv_total_opens == 1);
1238		zv->zv_flags &= ~ZVOL_EXCL;
1239	}
1240
1241	/*
1242	 * If the open count is zero, this is a spurious close.
1243	 * That indicates a bug in the kernel / DDI framework.
1244	 */
1245#ifdef illumos
1246	ASSERT(zv->zv_open_count[otyp] != 0);
1247#endif
1248	ASSERT(zv->zv_total_opens != 0);
1249
1250	/*
1251	 * You may get multiple opens, but only one close.
1252	 */
1253#ifdef illumos
1254	zv->zv_open_count[otyp]--;
1255	zv->zv_total_opens--;
1256#else
1257	zv->zv_total_opens -= count;
1258#endif
1259
1260	if (zv->zv_total_opens == 0)
1261		zvol_last_close(zv);
1262
1263#ifdef illumos
1264	mutex_exit(&zfsdev_state_lock);
1265#else
1266	if (locked)
1267		mutex_exit(&zfsdev_state_lock);
1268#endif
1269	return (error);
1270}
1271
1272static void
1273zvol_get_done(zgd_t *zgd, int error)
1274{
1275	if (zgd->zgd_db)
1276		dmu_buf_rele(zgd->zgd_db, zgd);
1277
1278	zfs_range_unlock(zgd->zgd_rl);
1279
1280	if (error == 0 && zgd->zgd_bp)
1281		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1282
1283	kmem_free(zgd, sizeof (zgd_t));
1284}
1285
1286/*
1287 * Get data to generate a TX_WRITE intent log record.
1288 */
1289static int
1290zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1291{
1292	zvol_state_t *zv = arg;
1293	objset_t *os = zv->zv_objset;
1294	uint64_t object = ZVOL_OBJ;
1295	uint64_t offset = lr->lr_offset;
1296	uint64_t size = lr->lr_length;	/* length of user data */
1297	blkptr_t *bp = &lr->lr_blkptr;
1298	dmu_buf_t *db;
1299	zgd_t *zgd;
1300	int error;
1301
1302	ASSERT(zio != NULL);
1303	ASSERT(size != 0);
1304
1305	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1306	zgd->zgd_zilog = zv->zv_zilog;
1307	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1308
1309	/*
1310	 * Write records come in two flavors: immediate and indirect.
1311	 * For small writes it's cheaper to store the data with the
1312	 * log record (immediate); for large writes it's cheaper to
1313	 * sync the data and get a pointer to it (indirect) so that
1314	 * we don't have to write the data twice.
1315	 */
1316	if (buf != NULL) {	/* immediate write */
1317		error = dmu_read(os, object, offset, size, buf,
1318		    DMU_READ_NO_PREFETCH);
1319	} else {
1320		size = zv->zv_volblocksize;
1321		offset = P2ALIGN(offset, size);
1322		error = dmu_buf_hold(os, object, offset, zgd, &db,
1323		    DMU_READ_NO_PREFETCH);
1324		if (error == 0) {
1325			blkptr_t *obp = dmu_buf_get_blkptr(db);
1326			if (obp) {
1327				ASSERT(BP_IS_HOLE(bp));
1328				*bp = *obp;
1329			}
1330
1331			zgd->zgd_db = db;
1332			zgd->zgd_bp = bp;
1333
1334			ASSERT(db->db_offset == offset);
1335			ASSERT(db->db_size == size);
1336
1337			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1338			    zvol_get_done, zgd);
1339
1340			if (error == 0)
1341				return (0);
1342		}
1343	}
1344
1345	zvol_get_done(zgd, error);
1346
1347	return (error);
1348}
1349
1350/*
1351 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1352 *
1353 * We store data in the log buffers if it's small enough.
1354 * Otherwise we will later flush the data out via dmu_sync().
1355 */
1356ssize_t zvol_immediate_write_sz = 32768;
1357
1358static void
1359zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1360    boolean_t sync)
1361{
1362	uint32_t blocksize = zv->zv_volblocksize;
1363	zilog_t *zilog = zv->zv_zilog;
1364	boolean_t slogging;
1365	ssize_t immediate_write_sz;
1366
1367	if (zil_replaying(zilog, tx))
1368		return;
1369
1370	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1371	    ? 0 : zvol_immediate_write_sz;
1372
1373	slogging = spa_has_slogs(zilog->zl_spa) &&
1374	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1375
1376	while (resid) {
1377		itx_t *itx;
1378		lr_write_t *lr;
1379		ssize_t len;
1380		itx_wr_state_t write_state;
1381
1382		/*
1383		 * Unlike zfs_log_write() we can be called with
1384		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1385		 */
1386		if (blocksize > immediate_write_sz && !slogging &&
1387		    resid >= blocksize && off % blocksize == 0) {
1388			write_state = WR_INDIRECT; /* uses dmu_sync */
1389			len = blocksize;
1390		} else if (sync) {
1391			write_state = WR_COPIED;
1392			len = MIN(ZIL_MAX_LOG_DATA, resid);
1393		} else {
1394			write_state = WR_NEED_COPY;
1395			len = MIN(ZIL_MAX_LOG_DATA, resid);
1396		}
1397
1398		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1399		    (write_state == WR_COPIED ? len : 0));
1400		lr = (lr_write_t *)&itx->itx_lr;
1401		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1402		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1403			zil_itx_destroy(itx);
1404			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1405			lr = (lr_write_t *)&itx->itx_lr;
1406			write_state = WR_NEED_COPY;
1407		}
1408
1409		itx->itx_wr_state = write_state;
1410		if (write_state == WR_NEED_COPY)
1411			itx->itx_sod += len;
1412		lr->lr_foid = ZVOL_OBJ;
1413		lr->lr_offset = off;
1414		lr->lr_length = len;
1415		lr->lr_blkoff = 0;
1416		BP_ZERO(&lr->lr_blkptr);
1417
1418		itx->itx_private = zv;
1419		itx->itx_sync = sync;
1420
1421		zil_itx_assign(zilog, itx, tx);
1422
1423		off += len;
1424		resid -= len;
1425	}
1426}
1427
1428#ifdef illumos
1429static int
1430zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1431    uint64_t size, boolean_t doread, boolean_t isdump)
1432{
1433	vdev_disk_t *dvd;
1434	int c;
1435	int numerrors = 0;
1436
1437	if (vd->vdev_ops == &vdev_mirror_ops ||
1438	    vd->vdev_ops == &vdev_replacing_ops ||
1439	    vd->vdev_ops == &vdev_spare_ops) {
1440		for (c = 0; c < vd->vdev_children; c++) {
1441			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1442			    addr, offset, origoffset, size, doread, isdump);
1443			if (err != 0) {
1444				numerrors++;
1445			} else if (doread) {
1446				break;
1447			}
1448		}
1449	}
1450
1451	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1452		return (numerrors < vd->vdev_children ? 0 : EIO);
1453
1454	if (doread && !vdev_readable(vd))
1455		return (SET_ERROR(EIO));
1456	else if (!doread && !vdev_writeable(vd))
1457		return (SET_ERROR(EIO));
1458
1459	if (vd->vdev_ops == &vdev_raidz_ops) {
1460		return (vdev_raidz_physio(vd,
1461		    addr, size, offset, origoffset, doread, isdump));
1462	}
1463
1464	offset += VDEV_LABEL_START_SIZE;
1465
1466	if (ddi_in_panic() || isdump) {
1467		ASSERT(!doread);
1468		if (doread)
1469			return (SET_ERROR(EIO));
1470		dvd = vd->vdev_tsd;
1471		ASSERT3P(dvd, !=, NULL);
1472		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1473		    lbtodb(size)));
1474	} else {
1475		dvd = vd->vdev_tsd;
1476		ASSERT3P(dvd, !=, NULL);
1477		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1478		    offset, doread ? B_READ : B_WRITE));
1479	}
1480}
1481
1482static int
1483zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1484    boolean_t doread, boolean_t isdump)
1485{
1486	vdev_t *vd;
1487	int error;
1488	zvol_extent_t *ze;
1489	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1490
1491	/* Must be sector aligned, and not stradle a block boundary. */
1492	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1493	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1494		return (SET_ERROR(EINVAL));
1495	}
1496	ASSERT(size <= zv->zv_volblocksize);
1497
1498	/* Locate the extent this belongs to */
1499	ze = list_head(&zv->zv_extents);
1500	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1501		offset -= ze->ze_nblks * zv->zv_volblocksize;
1502		ze = list_next(&zv->zv_extents, ze);
1503	}
1504
1505	if (ze == NULL)
1506		return (SET_ERROR(EINVAL));
1507
1508	if (!ddi_in_panic())
1509		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1510
1511	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1512	offset += DVA_GET_OFFSET(&ze->ze_dva);
1513	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1514	    size, doread, isdump);
1515
1516	if (!ddi_in_panic())
1517		spa_config_exit(spa, SCL_STATE, FTAG);
1518
1519	return (error);
1520}
1521
1522int
1523zvol_strategy(buf_t *bp)
1524{
1525	zfs_soft_state_t *zs = NULL;
1526#else	/* !illumos */
1527void
1528zvol_strategy(struct bio *bp)
1529{
1530#endif	/* illumos */
1531	zvol_state_t *zv;
1532	uint64_t off, volsize;
1533	size_t resid;
1534	char *addr;
1535	objset_t *os;
1536	rl_t *rl;
1537	int error = 0;
1538#ifdef illumos
1539	boolean_t doread = bp->b_flags & B_READ;
1540#else
1541	boolean_t doread = 0;
1542#endif
1543	boolean_t is_dumpified;
1544	boolean_t sync;
1545
1546#ifdef illumos
1547	if (getminor(bp->b_edev) == 0) {
1548		error = SET_ERROR(EINVAL);
1549	} else {
1550		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1551		if (zs == NULL)
1552			error = SET_ERROR(ENXIO);
1553		else if (zs->zss_type != ZSST_ZVOL)
1554			error = SET_ERROR(EINVAL);
1555	}
1556
1557	if (error) {
1558		bioerror(bp, error);
1559		biodone(bp);
1560		return (0);
1561	}
1562
1563	zv = zs->zss_data;
1564
1565	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1566		bioerror(bp, EROFS);
1567		biodone(bp);
1568		return (0);
1569	}
1570
1571	off = ldbtob(bp->b_blkno);
1572#else	/* !illumos */
1573	if (bp->bio_to)
1574		zv = bp->bio_to->private;
1575	else
1576		zv = bp->bio_dev->si_drv2;
1577
1578	if (zv == NULL) {
1579		error = SET_ERROR(ENXIO);
1580		goto out;
1581	}
1582
1583	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1584		error = SET_ERROR(EROFS);
1585		goto out;
1586	}
1587
1588	switch (bp->bio_cmd) {
1589	case BIO_FLUSH:
1590		goto sync;
1591	case BIO_READ:
1592		doread = 1;
1593	case BIO_WRITE:
1594	case BIO_DELETE:
1595		break;
1596	default:
1597		error = EOPNOTSUPP;
1598		goto out;
1599	}
1600
1601	off = bp->bio_offset;
1602#endif	/* illumos */
1603	volsize = zv->zv_volsize;
1604
1605	os = zv->zv_objset;
1606	ASSERT(os != NULL);
1607
1608#ifdef illumos
1609	bp_mapin(bp);
1610	addr = bp->b_un.b_addr;
1611	resid = bp->b_bcount;
1612
1613	if (resid > 0 && (off < 0 || off >= volsize)) {
1614		bioerror(bp, EIO);
1615		biodone(bp);
1616		return (0);
1617	}
1618
1619	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1620	sync = ((!(bp->b_flags & B_ASYNC) &&
1621	    !(zv->zv_flags & ZVOL_WCE)) ||
1622	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1623	    !doread && !is_dumpified;
1624#else	/* !illumos */
1625	addr = bp->bio_data;
1626	resid = bp->bio_length;
1627
1628	if (resid > 0 && (off < 0 || off >= volsize)) {
1629		error = SET_ERROR(EIO);
1630		goto out;
1631	}
1632
1633	is_dumpified = B_FALSE;
1634	sync = !doread && !is_dumpified &&
1635	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1636#endif	/* illumos */
1637
1638	/*
1639	 * There must be no buffer changes when doing a dmu_sync() because
1640	 * we can't change the data whilst calculating the checksum.
1641	 */
1642	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1643	    doread ? RL_READER : RL_WRITER);
1644
1645#ifndef illumos
1646	if (bp->bio_cmd == BIO_DELETE) {
1647		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1648		error = dmu_tx_assign(tx, TXG_WAIT);
1649		if (error != 0) {
1650			dmu_tx_abort(tx);
1651		} else {
1652			zvol_log_truncate(zv, tx, off, resid, B_TRUE);
1653			dmu_tx_commit(tx);
1654			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1655			    off, resid);
1656			resid = 0;
1657		}
1658		goto unlock;
1659	}
1660#endif
1661	while (resid != 0 && off < volsize) {
1662		size_t size = MIN(resid, zvol_maxphys);
1663#ifdef illumos
1664		if (is_dumpified) {
1665			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1666			error = zvol_dumpio(zv, addr, off, size,
1667			    doread, B_FALSE);
1668		} else if (doread) {
1669#else
1670		if (doread) {
1671#endif
1672			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1673			    DMU_READ_PREFETCH);
1674		} else {
1675			dmu_tx_t *tx = dmu_tx_create(os);
1676			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1677			error = dmu_tx_assign(tx, TXG_WAIT);
1678			if (error) {
1679				dmu_tx_abort(tx);
1680			} else {
1681				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1682				zvol_log_write(zv, tx, off, size, sync);
1683				dmu_tx_commit(tx);
1684			}
1685		}
1686		if (error) {
1687			/* convert checksum errors into IO errors */
1688			if (error == ECKSUM)
1689				error = SET_ERROR(EIO);
1690			break;
1691		}
1692		off += size;
1693		addr += size;
1694		resid -= size;
1695	}
1696#ifndef illumos
1697unlock:
1698#endif
1699	zfs_range_unlock(rl);
1700
1701#ifdef illumos
1702	if ((bp->b_resid = resid) == bp->b_bcount)
1703		bioerror(bp, off > volsize ? EINVAL : error);
1704
1705	if (sync)
1706		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1707	biodone(bp);
1708
1709	return (0);
1710#else	/* !illumos */
1711	bp->bio_completed = bp->bio_length - resid;
1712	if (bp->bio_completed < bp->bio_length && off > volsize)
1713		error = EINVAL;
1714
1715	if (sync) {
1716sync:
1717		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1718	}
1719out:
1720	if (bp->bio_to)
1721		g_io_deliver(bp, error);
1722	else
1723		biofinish(bp, NULL, error);
1724#endif	/* illumos */
1725}
1726
1727#ifdef illumos
1728/*
1729 * Set the buffer count to the zvol maximum transfer.
1730 * Using our own routine instead of the default minphys()
1731 * means that for larger writes we write bigger buffers on X86
1732 * (128K instead of 56K) and flush the disk write cache less often
1733 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1734 * 56K on X86 and 128K on sparc).
1735 */
1736void
1737zvol_minphys(struct buf *bp)
1738{
1739	if (bp->b_bcount > zvol_maxphys)
1740		bp->b_bcount = zvol_maxphys;
1741}
1742
1743int
1744zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1745{
1746	minor_t minor = getminor(dev);
1747	zvol_state_t *zv;
1748	int error = 0;
1749	uint64_t size;
1750	uint64_t boff;
1751	uint64_t resid;
1752
1753	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1754	if (zv == NULL)
1755		return (SET_ERROR(ENXIO));
1756
1757	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1758		return (SET_ERROR(EINVAL));
1759
1760	boff = ldbtob(blkno);
1761	resid = ldbtob(nblocks);
1762
1763	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1764
1765	while (resid) {
1766		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1767		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1768		if (error)
1769			break;
1770		boff += size;
1771		addr += size;
1772		resid -= size;
1773	}
1774
1775	return (error);
1776}
1777
1778/*ARGSUSED*/
1779int
1780zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1781{
1782	minor_t minor = getminor(dev);
1783#else	/* !illumos */
1784int
1785zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1786{
1787#endif	/* illumos */
1788	zvol_state_t *zv;
1789	uint64_t volsize;
1790	rl_t *rl;
1791	int error = 0;
1792
1793#ifdef illumos
1794	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1795	if (zv == NULL)
1796		return (SET_ERROR(ENXIO));
1797#else
1798	zv = dev->si_drv2;
1799#endif
1800
1801	volsize = zv->zv_volsize;
1802	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1803	if (uio->uio_resid > 0 &&
1804	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1805		return (SET_ERROR(EIO));
1806
1807#ifdef illumos
1808	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1809		error = physio(zvol_strategy, NULL, dev, B_READ,
1810		    zvol_minphys, uio);
1811		return (error);
1812	}
1813#endif
1814
1815	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1816	    RL_READER);
1817	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1818		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1819
1820		/* don't read past the end */
1821		if (bytes > volsize - uio->uio_loffset)
1822			bytes = volsize - uio->uio_loffset;
1823
1824		error =  dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes);
1825		if (error) {
1826			/* convert checksum errors into IO errors */
1827			if (error == ECKSUM)
1828				error = SET_ERROR(EIO);
1829			break;
1830		}
1831	}
1832	zfs_range_unlock(rl);
1833	return (error);
1834}
1835
1836#ifdef illumos
1837/*ARGSUSED*/
1838int
1839zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1840{
1841	minor_t minor = getminor(dev);
1842#else	/* !illumos */
1843int
1844zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1845{
1846#endif	/* illumos */
1847	zvol_state_t *zv;
1848	uint64_t volsize;
1849	rl_t *rl;
1850	int error = 0;
1851	boolean_t sync;
1852
1853#ifdef illumos
1854	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1855	if (zv == NULL)
1856		return (SET_ERROR(ENXIO));
1857#else
1858	zv = dev->si_drv2;
1859#endif
1860
1861	volsize = zv->zv_volsize;
1862	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1863	if (uio->uio_resid > 0 &&
1864	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1865		return (SET_ERROR(EIO));
1866
1867#ifdef illumos
1868	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1869		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1870		    zvol_minphys, uio);
1871		return (error);
1872	}
1873
1874	sync = !(zv->zv_flags & ZVOL_WCE) ||
1875#else
1876	sync = (ioflag & IO_SYNC) ||
1877#endif
1878	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1879
1880	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1881	    RL_WRITER);
1882	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1883		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1884		uint64_t off = uio->uio_loffset;
1885		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1886
1887		if (bytes > volsize - off)	/* don't write past the end */
1888			bytes = volsize - off;
1889
1890		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1891		error = dmu_tx_assign(tx, TXG_WAIT);
1892		if (error) {
1893			dmu_tx_abort(tx);
1894			break;
1895		}
1896		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1897		if (error == 0)
1898			zvol_log_write(zv, tx, off, bytes, sync);
1899		dmu_tx_commit(tx);
1900
1901		if (error)
1902			break;
1903	}
1904	zfs_range_unlock(rl);
1905	if (sync)
1906		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1907	return (error);
1908}
1909
1910#ifdef illumos
1911int
1912zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1913{
1914	struct uuid uuid = EFI_RESERVED;
1915	efi_gpe_t gpe = { 0 };
1916	uint32_t crc;
1917	dk_efi_t efi;
1918	int length;
1919	char *ptr;
1920
1921	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1922		return (SET_ERROR(EFAULT));
1923	ptr = (char *)(uintptr_t)efi.dki_data_64;
1924	length = efi.dki_length;
1925	/*
1926	 * Some clients may attempt to request a PMBR for the
1927	 * zvol.  Currently this interface will return EINVAL to
1928	 * such requests.  These requests could be supported by
1929	 * adding a check for lba == 0 and consing up an appropriate
1930	 * PMBR.
1931	 */
1932	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1933		return (SET_ERROR(EINVAL));
1934
1935	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1936	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1937	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1938
1939	if (efi.dki_lba == 1) {
1940		efi_gpt_t gpt = { 0 };
1941
1942		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1943		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1944		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1945		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1946		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1947		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1948		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1949		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1950		gpt.efi_gpt_SizeOfPartitionEntry =
1951		    LE_32(sizeof (efi_gpe_t));
1952		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1953		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1954		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1955		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1956		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1957		    flag))
1958			return (SET_ERROR(EFAULT));
1959		ptr += sizeof (gpt);
1960		length -= sizeof (gpt);
1961	}
1962	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1963	    length), flag))
1964		return (SET_ERROR(EFAULT));
1965	return (0);
1966}
1967
1968/*
1969 * BEGIN entry points to allow external callers access to the volume.
1970 */
1971/*
1972 * Return the volume parameters needed for access from an external caller.
1973 * These values are invariant as long as the volume is held open.
1974 */
1975int
1976zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1977    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1978    void **rl_hdl, void **bonus_hdl)
1979{
1980	zvol_state_t *zv;
1981
1982	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1983	if (zv == NULL)
1984		return (SET_ERROR(ENXIO));
1985	if (zv->zv_flags & ZVOL_DUMPIFIED)
1986		return (SET_ERROR(ENXIO));
1987
1988	ASSERT(blksize && max_xfer_len && minor_hdl &&
1989	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1990
1991	*blksize = zv->zv_volblocksize;
1992	*max_xfer_len = (uint64_t)zvol_maxphys;
1993	*minor_hdl = zv;
1994	*objset_hdl = zv->zv_objset;
1995	*zil_hdl = zv->zv_zilog;
1996	*rl_hdl = &zv->zv_znode;
1997	*bonus_hdl = zv->zv_dbuf;
1998	return (0);
1999}
2000
2001/*
2002 * Return the current volume size to an external caller.
2003 * The size can change while the volume is open.
2004 */
2005uint64_t
2006zvol_get_volume_size(void *minor_hdl)
2007{
2008	zvol_state_t *zv = minor_hdl;
2009
2010	return (zv->zv_volsize);
2011}
2012
2013/*
2014 * Return the current WCE setting to an external caller.
2015 * The WCE setting can change while the volume is open.
2016 */
2017int
2018zvol_get_volume_wce(void *minor_hdl)
2019{
2020	zvol_state_t *zv = minor_hdl;
2021
2022	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
2023}
2024
2025/*
2026 * Entry point for external callers to zvol_log_write
2027 */
2028void
2029zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
2030    boolean_t sync)
2031{
2032	zvol_state_t *zv = minor_hdl;
2033
2034	zvol_log_write(zv, tx, off, resid, sync);
2035}
2036/*
2037 * END entry points to allow external callers access to the volume.
2038 */
2039#endif	/* illumos */
2040
2041/*
2042 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
2043 */
2044static void
2045zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
2046    boolean_t sync)
2047{
2048	itx_t *itx;
2049	lr_truncate_t *lr;
2050	zilog_t *zilog = zv->zv_zilog;
2051
2052	if (zil_replaying(zilog, tx))
2053		return;
2054
2055	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
2056	lr = (lr_truncate_t *)&itx->itx_lr;
2057	lr->lr_foid = ZVOL_OBJ;
2058	lr->lr_offset = off;
2059	lr->lr_length = len;
2060
2061	itx->itx_sync = sync;
2062	zil_itx_assign(zilog, itx, tx);
2063}
2064
2065#ifdef illumos
2066/*
2067 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
2068 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
2069 */
2070/*ARGSUSED*/
2071int
2072zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
2073{
2074	zvol_state_t *zv;
2075	struct dk_callback *dkc;
2076	int error = 0;
2077	rl_t *rl;
2078
2079	mutex_enter(&zfsdev_state_lock);
2080
2081	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
2082
2083	if (zv == NULL) {
2084		mutex_exit(&zfsdev_state_lock);
2085		return (SET_ERROR(ENXIO));
2086	}
2087	ASSERT(zv->zv_total_opens > 0);
2088
2089	switch (cmd) {
2090
2091	case DKIOCINFO:
2092	{
2093		struct dk_cinfo dki;
2094
2095		bzero(&dki, sizeof (dki));
2096		(void) strcpy(dki.dki_cname, "zvol");
2097		(void) strcpy(dki.dki_dname, "zvol");
2098		dki.dki_ctype = DKC_UNKNOWN;
2099		dki.dki_unit = getminor(dev);
2100		dki.dki_maxtransfer =
2101		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
2102		mutex_exit(&zfsdev_state_lock);
2103		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
2104			error = SET_ERROR(EFAULT);
2105		return (error);
2106	}
2107
2108	case DKIOCGMEDIAINFO:
2109	{
2110		struct dk_minfo dkm;
2111
2112		bzero(&dkm, sizeof (dkm));
2113		dkm.dki_lbsize = 1U << zv->zv_min_bs;
2114		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2115		dkm.dki_media_type = DK_UNKNOWN;
2116		mutex_exit(&zfsdev_state_lock);
2117		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
2118			error = SET_ERROR(EFAULT);
2119		return (error);
2120	}
2121
2122	case DKIOCGMEDIAINFOEXT:
2123	{
2124		struct dk_minfo_ext dkmext;
2125
2126		bzero(&dkmext, sizeof (dkmext));
2127		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
2128		dkmext.dki_pbsize = zv->zv_volblocksize;
2129		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2130		dkmext.dki_media_type = DK_UNKNOWN;
2131		mutex_exit(&zfsdev_state_lock);
2132		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
2133			error = SET_ERROR(EFAULT);
2134		return (error);
2135	}
2136
2137	case DKIOCGETEFI:
2138	{
2139		uint64_t vs = zv->zv_volsize;
2140		uint8_t bs = zv->zv_min_bs;
2141
2142		mutex_exit(&zfsdev_state_lock);
2143		error = zvol_getefi((void *)arg, flag, vs, bs);
2144		return (error);
2145	}
2146
2147	case DKIOCFLUSHWRITECACHE:
2148		dkc = (struct dk_callback *)arg;
2149		mutex_exit(&zfsdev_state_lock);
2150		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2151		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
2152			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
2153			error = 0;
2154		}
2155		return (error);
2156
2157	case DKIOCGETWCE:
2158	{
2159		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
2160		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
2161		    flag))
2162			error = SET_ERROR(EFAULT);
2163		break;
2164	}
2165	case DKIOCSETWCE:
2166	{
2167		int wce;
2168		if (ddi_copyin((void *)arg, &wce, sizeof (int),
2169		    flag)) {
2170			error = SET_ERROR(EFAULT);
2171			break;
2172		}
2173		if (wce) {
2174			zv->zv_flags |= ZVOL_WCE;
2175			mutex_exit(&zfsdev_state_lock);
2176		} else {
2177			zv->zv_flags &= ~ZVOL_WCE;
2178			mutex_exit(&zfsdev_state_lock);
2179			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2180		}
2181		return (0);
2182	}
2183
2184	case DKIOCGGEOM:
2185	case DKIOCGVTOC:
2186		/*
2187		 * commands using these (like prtvtoc) expect ENOTSUP
2188		 * since we're emulating an EFI label
2189		 */
2190		error = SET_ERROR(ENOTSUP);
2191		break;
2192
2193	case DKIOCDUMPINIT:
2194		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2195		    RL_WRITER);
2196		error = zvol_dumpify(zv);
2197		zfs_range_unlock(rl);
2198		break;
2199
2200	case DKIOCDUMPFINI:
2201		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
2202			break;
2203		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2204		    RL_WRITER);
2205		error = zvol_dump_fini(zv);
2206		zfs_range_unlock(rl);
2207		break;
2208
2209	case DKIOCFREE:
2210	{
2211		dkioc_free_t df;
2212		dmu_tx_t *tx;
2213
2214		if (!zvol_unmap_enabled)
2215			break;
2216
2217		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
2218			error = SET_ERROR(EFAULT);
2219			break;
2220		}
2221
2222		/*
2223		 * Apply Postel's Law to length-checking.  If they overshoot,
2224		 * just blank out until the end, if there's a need to blank
2225		 * out anything.
2226		 */
2227		if (df.df_start >= zv->zv_volsize)
2228			break;	/* No need to do anything... */
2229
2230		mutex_exit(&zfsdev_state_lock);
2231
2232		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
2233		    RL_WRITER);
2234		tx = dmu_tx_create(zv->zv_objset);
2235		dmu_tx_mark_netfree(tx);
2236		error = dmu_tx_assign(tx, TXG_WAIT);
2237		if (error != 0) {
2238			dmu_tx_abort(tx);
2239		} else {
2240			zvol_log_truncate(zv, tx, df.df_start,
2241			    df.df_length, B_TRUE);
2242			dmu_tx_commit(tx);
2243			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2244			    df.df_start, df.df_length);
2245		}
2246
2247		zfs_range_unlock(rl);
2248
2249		if (error == 0) {
2250			/*
2251			 * If the write-cache is disabled or 'sync' property
2252			 * is set to 'always' then treat this as a synchronous
2253			 * operation (i.e. commit to zil).
2254			 */
2255			if (!(zv->zv_flags & ZVOL_WCE) ||
2256			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2257				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2258
2259			/*
2260			 * If the caller really wants synchronous writes, and
2261			 * can't wait for them, don't return until the write
2262			 * is done.
2263			 */
2264			if (df.df_flags & DF_WAIT_SYNC) {
2265				txg_wait_synced(
2266				    dmu_objset_pool(zv->zv_objset), 0);
2267			}
2268		}
2269		return (error);
2270	}
2271
2272	default:
2273		error = SET_ERROR(ENOTTY);
2274		break;
2275
2276	}
2277	mutex_exit(&zfsdev_state_lock);
2278	return (error);
2279}
2280#endif	/* illumos */
2281
2282int
2283zvol_busy(void)
2284{
2285	return (zvol_minors != 0);
2286}
2287
2288void
2289zvol_init(void)
2290{
2291	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2292	    1) == 0);
2293#ifdef illumos
2294	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
2295#else
2296	ZFS_LOG(1, "ZVOL Initialized.");
2297#endif
2298}
2299
2300void
2301zvol_fini(void)
2302{
2303#ifdef illumos
2304	mutex_destroy(&zfsdev_state_lock);
2305#endif
2306	ddi_soft_state_fini(&zfsdev_state);
2307	ZFS_LOG(1, "ZVOL Deinitialized.");
2308}
2309
2310#ifdef illumos
2311/*ARGSUSED*/
2312static int
2313zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2314{
2315	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2316
2317	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2318		return (1);
2319	return (0);
2320}
2321
2322/*ARGSUSED*/
2323static void
2324zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2325{
2326	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2327
2328	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2329}
2330
2331static int
2332zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2333{
2334	dmu_tx_t *tx;
2335	int error;
2336	objset_t *os = zv->zv_objset;
2337	spa_t *spa = dmu_objset_spa(os);
2338	vdev_t *vd = spa->spa_root_vdev;
2339	nvlist_t *nv = NULL;
2340	uint64_t version = spa_version(spa);
2341	uint64_t checksum, compress, refresrv, vbs, dedup;
2342
2343	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2344	ASSERT(vd->vdev_ops == &vdev_root_ops);
2345
2346	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2347	    DMU_OBJECT_END);
2348	if (error != 0)
2349		return (error);
2350	/* wait for dmu_free_long_range to actually free the blocks */
2351	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2352
2353	/*
2354	 * If the pool on which the dump device is being initialized has more
2355	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2356	 * enabled.  If so, bump that feature's counter to indicate that the
2357	 * feature is active. We also check the vdev type to handle the
2358	 * following case:
2359	 *   # zpool create test raidz disk1 disk2 disk3
2360	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2361	 *   the raidz vdev itself has 3 children.
2362	 */
2363	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2364		if (!spa_feature_is_enabled(spa,
2365		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2366			return (SET_ERROR(ENOTSUP));
2367		(void) dsl_sync_task(spa_name(spa),
2368		    zfs_mvdev_dump_feature_check,
2369		    zfs_mvdev_dump_activate_feature_sync, NULL,
2370		    2, ZFS_SPACE_CHECK_RESERVED);
2371	}
2372
2373	if (!resize) {
2374		error = dsl_prop_get_integer(zv->zv_name,
2375		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2376		if (error == 0) {
2377			error = dsl_prop_get_integer(zv->zv_name,
2378			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
2379			    NULL);
2380		}
2381		if (error == 0) {
2382			error = dsl_prop_get_integer(zv->zv_name,
2383			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
2384			    &refresrv, NULL);
2385		}
2386		if (error == 0) {
2387			error = dsl_prop_get_integer(zv->zv_name,
2388			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
2389			    NULL);
2390		}
2391		if (version >= SPA_VERSION_DEDUP && error == 0) {
2392			error = dsl_prop_get_integer(zv->zv_name,
2393			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2394		}
2395	}
2396	if (error != 0)
2397		return (error);
2398
2399	tx = dmu_tx_create(os);
2400	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2401	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2402	error = dmu_tx_assign(tx, TXG_WAIT);
2403	if (error != 0) {
2404		dmu_tx_abort(tx);
2405		return (error);
2406	}
2407
2408	/*
2409	 * If we are resizing the dump device then we only need to
2410	 * update the refreservation to match the newly updated
2411	 * zvolsize. Otherwise, we save off the original state of the
2412	 * zvol so that we can restore them if the zvol is ever undumpified.
2413	 */
2414	if (resize) {
2415		error = zap_update(os, ZVOL_ZAP_OBJ,
2416		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2417		    &zv->zv_volsize, tx);
2418	} else {
2419		error = zap_update(os, ZVOL_ZAP_OBJ,
2420		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2421		    &compress, tx);
2422		if (error == 0) {
2423			error = zap_update(os, ZVOL_ZAP_OBJ,
2424			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2425			    &checksum, tx);
2426		}
2427		if (error == 0) {
2428			error = zap_update(os, ZVOL_ZAP_OBJ,
2429			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2430			    &refresrv, tx);
2431		}
2432		if (error == 0) {
2433			error = zap_update(os, ZVOL_ZAP_OBJ,
2434			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2435			    &vbs, tx);
2436		}
2437		if (error == 0) {
2438			error = dmu_object_set_blocksize(
2439			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2440		}
2441		if (version >= SPA_VERSION_DEDUP && error == 0) {
2442			error = zap_update(os, ZVOL_ZAP_OBJ,
2443			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2444			    &dedup, tx);
2445		}
2446		if (error == 0)
2447			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2448	}
2449	dmu_tx_commit(tx);
2450
2451	/*
2452	 * We only need update the zvol's property if we are initializing
2453	 * the dump area for the first time.
2454	 */
2455	if (error == 0 && !resize) {
2456		/*
2457		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2458		 * function.  Otherwise, use the old default -- OFF.
2459		 */
2460		checksum = spa_feature_is_active(spa,
2461		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2462		    ZIO_CHECKSUM_OFF;
2463
2464		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2465		VERIFY(nvlist_add_uint64(nv,
2466		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2467		VERIFY(nvlist_add_uint64(nv,
2468		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2469		    ZIO_COMPRESS_OFF) == 0);
2470		VERIFY(nvlist_add_uint64(nv,
2471		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2472		    checksum) == 0);
2473		if (version >= SPA_VERSION_DEDUP) {
2474			VERIFY(nvlist_add_uint64(nv,
2475			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2476			    ZIO_CHECKSUM_OFF) == 0);
2477		}
2478
2479		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2480		    nv, NULL);
2481		nvlist_free(nv);
2482	}
2483
2484	/* Allocate the space for the dump */
2485	if (error == 0)
2486		error = zvol_prealloc(zv);
2487	return (error);
2488}
2489
2490static int
2491zvol_dumpify(zvol_state_t *zv)
2492{
2493	int error = 0;
2494	uint64_t dumpsize = 0;
2495	dmu_tx_t *tx;
2496	objset_t *os = zv->zv_objset;
2497
2498	if (zv->zv_flags & ZVOL_RDONLY)
2499		return (SET_ERROR(EROFS));
2500
2501	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2502	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2503		boolean_t resize = (dumpsize > 0);
2504
2505		if ((error = zvol_dump_init(zv, resize)) != 0) {
2506			(void) zvol_dump_fini(zv);
2507			return (error);
2508		}
2509	}
2510
2511	/*
2512	 * Build up our lba mapping.
2513	 */
2514	error = zvol_get_lbas(zv);
2515	if (error) {
2516		(void) zvol_dump_fini(zv);
2517		return (error);
2518	}
2519
2520	tx = dmu_tx_create(os);
2521	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2522	error = dmu_tx_assign(tx, TXG_WAIT);
2523	if (error) {
2524		dmu_tx_abort(tx);
2525		(void) zvol_dump_fini(zv);
2526		return (error);
2527	}
2528
2529	zv->zv_flags |= ZVOL_DUMPIFIED;
2530	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2531	    &zv->zv_volsize, tx);
2532	dmu_tx_commit(tx);
2533
2534	if (error) {
2535		(void) zvol_dump_fini(zv);
2536		return (error);
2537	}
2538
2539	txg_wait_synced(dmu_objset_pool(os), 0);
2540	return (0);
2541}
2542
2543static int
2544zvol_dump_fini(zvol_state_t *zv)
2545{
2546	dmu_tx_t *tx;
2547	objset_t *os = zv->zv_objset;
2548	nvlist_t *nv;
2549	int error = 0;
2550	uint64_t checksum, compress, refresrv, vbs, dedup;
2551	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2552
2553	/*
2554	 * Attempt to restore the zvol back to its pre-dumpified state.
2555	 * This is a best-effort attempt as it's possible that not all
2556	 * of these properties were initialized during the dumpify process
2557	 * (i.e. error during zvol_dump_init).
2558	 */
2559
2560	tx = dmu_tx_create(os);
2561	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2562	error = dmu_tx_assign(tx, TXG_WAIT);
2563	if (error) {
2564		dmu_tx_abort(tx);
2565		return (error);
2566	}
2567	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2568	dmu_tx_commit(tx);
2569
2570	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2571	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2572	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2573	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2574	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2575	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2576	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2577	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2578
2579	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2580	(void) nvlist_add_uint64(nv,
2581	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2582	(void) nvlist_add_uint64(nv,
2583	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2584	(void) nvlist_add_uint64(nv,
2585	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2586	if (version >= SPA_VERSION_DEDUP &&
2587	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2588	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2589		(void) nvlist_add_uint64(nv,
2590		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2591	}
2592	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2593	    nv, NULL);
2594	nvlist_free(nv);
2595
2596	zvol_free_extents(zv);
2597	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2598	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2599	/* wait for dmu_free_long_range to actually free the blocks */
2600	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2601	tx = dmu_tx_create(os);
2602	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2603	error = dmu_tx_assign(tx, TXG_WAIT);
2604	if (error) {
2605		dmu_tx_abort(tx);
2606		return (error);
2607	}
2608	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2609		zv->zv_volblocksize = vbs;
2610	dmu_tx_commit(tx);
2611
2612	return (0);
2613}
2614#else	/* !illumos */
2615
2616static void
2617zvol_geom_run(zvol_state_t *zv)
2618{
2619	struct g_provider *pp;
2620
2621	pp = zv->zv_provider;
2622	g_error_provider(pp, 0);
2623
2624	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2625	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2626}
2627
2628static void
2629zvol_geom_destroy(zvol_state_t *zv)
2630{
2631	struct g_provider *pp;
2632
2633	g_topology_assert();
2634
2635	mtx_lock(&zv->zv_queue_mtx);
2636	zv->zv_state = 1;
2637	wakeup_one(&zv->zv_queue);
2638	while (zv->zv_state != 2)
2639		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2640	mtx_destroy(&zv->zv_queue_mtx);
2641
2642	pp = zv->zv_provider;
2643	zv->zv_provider = NULL;
2644	pp->private = NULL;
2645	g_wither_geom(pp->geom, ENXIO);
2646}
2647
2648static int
2649zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2650{
2651	int count, error, flags;
2652
2653	g_topology_assert();
2654
2655	/*
2656	 * To make it easier we expect either open or close, but not both
2657	 * at the same time.
2658	 */
2659	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2660	    (acr <= 0 && acw <= 0 && ace <= 0),
2661	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2662	    pp->name, acr, acw, ace));
2663
2664	if (pp->private == NULL) {
2665		if (acr <= 0 && acw <= 0 && ace <= 0)
2666			return (0);
2667		return (pp->error);
2668	}
2669
2670	/*
2671	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2672	 * because GEOM already handles that and handles it a bit differently.
2673	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2674	 * only one exclusive consumer, no matter if it is reader or writer.
2675	 * I like better the way GEOM works so I'll leave it for GEOM to
2676	 * decide what to do.
2677	 */
2678
2679	count = acr + acw + ace;
2680	if (count == 0)
2681		return (0);
2682
2683	flags = 0;
2684	if (acr != 0 || ace != 0)
2685		flags |= FREAD;
2686	if (acw != 0)
2687		flags |= FWRITE;
2688
2689	g_topology_unlock();
2690	if (count > 0)
2691		error = zvol_open(pp, flags, count);
2692	else
2693		error = zvol_close(pp, flags, -count);
2694	g_topology_lock();
2695	return (error);
2696}
2697
2698static void
2699zvol_geom_start(struct bio *bp)
2700{
2701	zvol_state_t *zv;
2702	boolean_t first;
2703
2704	zv = bp->bio_to->private;
2705	ASSERT(zv != NULL);
2706	switch (bp->bio_cmd) {
2707	case BIO_FLUSH:
2708		if (!THREAD_CAN_SLEEP())
2709			goto enqueue;
2710		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2711		g_io_deliver(bp, 0);
2712		break;
2713	case BIO_READ:
2714	case BIO_WRITE:
2715	case BIO_DELETE:
2716		if (!THREAD_CAN_SLEEP())
2717			goto enqueue;
2718		zvol_strategy(bp);
2719		break;
2720	case BIO_GETATTR: {
2721		spa_t *spa = dmu_objset_spa(zv->zv_objset);
2722		uint64_t refd, avail, usedobjs, availobjs, val;
2723
2724		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2725			return;
2726		if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
2727			dmu_objset_space(zv->zv_objset, &refd, &avail,
2728			    &usedobjs, &availobjs);
2729			if (g_handleattr_off_t(bp, "blocksavail",
2730			    avail / DEV_BSIZE))
2731				return;
2732		} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
2733			dmu_objset_space(zv->zv_objset, &refd, &avail,
2734			    &usedobjs, &availobjs);
2735			if (g_handleattr_off_t(bp, "blocksused",
2736			    refd / DEV_BSIZE))
2737				return;
2738		} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
2739			avail = metaslab_class_get_space(spa_normal_class(spa));
2740			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
2741			if (g_handleattr_off_t(bp, "poolblocksavail",
2742			    avail / DEV_BSIZE))
2743				return;
2744		} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
2745			refd = metaslab_class_get_alloc(spa_normal_class(spa));
2746			if (g_handleattr_off_t(bp, "poolblocksused",
2747			    refd / DEV_BSIZE))
2748				return;
2749		}
2750		/* FALLTHROUGH */
2751	}
2752	default:
2753		g_io_deliver(bp, EOPNOTSUPP);
2754		break;
2755	}
2756	return;
2757
2758enqueue:
2759	mtx_lock(&zv->zv_queue_mtx);
2760	first = (bioq_first(&zv->zv_queue) == NULL);
2761	bioq_insert_tail(&zv->zv_queue, bp);
2762	mtx_unlock(&zv->zv_queue_mtx);
2763	if (first)
2764		wakeup_one(&zv->zv_queue);
2765}
2766
2767static void
2768zvol_geom_worker(void *arg)
2769{
2770	zvol_state_t *zv;
2771	struct bio *bp;
2772
2773	thread_lock(curthread);
2774	sched_prio(curthread, PRIBIO);
2775	thread_unlock(curthread);
2776
2777	zv = arg;
2778	for (;;) {
2779		mtx_lock(&zv->zv_queue_mtx);
2780		bp = bioq_takefirst(&zv->zv_queue);
2781		if (bp == NULL) {
2782			if (zv->zv_state == 1) {
2783				zv->zv_state = 2;
2784				wakeup(&zv->zv_state);
2785				mtx_unlock(&zv->zv_queue_mtx);
2786				kthread_exit();
2787			}
2788			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2789			    "zvol:io", 0);
2790			continue;
2791		}
2792		mtx_unlock(&zv->zv_queue_mtx);
2793		switch (bp->bio_cmd) {
2794		case BIO_FLUSH:
2795			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2796			g_io_deliver(bp, 0);
2797			break;
2798		case BIO_READ:
2799		case BIO_WRITE:
2800		case BIO_DELETE:
2801			zvol_strategy(bp);
2802			break;
2803		default:
2804			g_io_deliver(bp, EOPNOTSUPP);
2805			break;
2806		}
2807	}
2808}
2809
2810extern boolean_t dataset_name_hidden(const char *name);
2811
2812static int
2813zvol_create_snapshots(objset_t *os, const char *name)
2814{
2815	uint64_t cookie, obj;
2816	char *sname;
2817	int error, len;
2818
2819	cookie = obj = 0;
2820	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2821
2822#if 0
2823	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2824	    DS_FIND_SNAPSHOTS);
2825#endif
2826
2827	for (;;) {
2828		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2829		if (len >= MAXPATHLEN) {
2830			dmu_objset_rele(os, FTAG);
2831			error = ENAMETOOLONG;
2832			break;
2833		}
2834
2835		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2836		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2837		    sname + len, &obj, &cookie, NULL);
2838		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2839		if (error != 0) {
2840			if (error == ENOENT)
2841				error = 0;
2842			break;
2843		}
2844
2845		error = zvol_create_minor(sname);
2846		if (error != 0 && error != EEXIST) {
2847			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2848			    sname, error);
2849			break;
2850		}
2851	}
2852
2853	kmem_free(sname, MAXPATHLEN);
2854	return (error);
2855}
2856
2857int
2858zvol_create_minors(const char *name)
2859{
2860	uint64_t cookie;
2861	objset_t *os;
2862	char *osname, *p;
2863	int error, len;
2864
2865	if (dataset_name_hidden(name))
2866		return (0);
2867
2868	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2869		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2870		    name, error);
2871		return (error);
2872	}
2873	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2874		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2875		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2876		error = zvol_create_minor(name);
2877		if (error == 0 || error == EEXIST) {
2878			error = zvol_create_snapshots(os, name);
2879		} else {
2880			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2881			    name, error);
2882		}
2883		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2884		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2885		return (error);
2886	}
2887	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2888		dmu_objset_rele(os, FTAG);
2889		return (0);
2890	}
2891
2892	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2893	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2894		dmu_objset_rele(os, FTAG);
2895		kmem_free(osname, MAXPATHLEN);
2896		return (ENOENT);
2897	}
2898	p = osname + strlen(osname);
2899	len = MAXPATHLEN - (p - osname);
2900
2901#if 0
2902	/* Prefetch the datasets. */
2903	cookie = 0;
2904	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2905		if (!dataset_name_hidden(osname))
2906			(void) dmu_objset_prefetch(osname, NULL);
2907	}
2908#endif
2909
2910	cookie = 0;
2911	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2912	    &cookie) == 0) {
2913		dmu_objset_rele(os, FTAG);
2914		(void)zvol_create_minors(osname);
2915		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2916			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2917			    name, error);
2918			return (error);
2919		}
2920	}
2921
2922	dmu_objset_rele(os, FTAG);
2923	kmem_free(osname, MAXPATHLEN);
2924	return (0);
2925}
2926
2927static void
2928zvol_rename_minor(zvol_state_t *zv, const char *newname)
2929{
2930	struct g_geom *gp;
2931	struct g_provider *pp;
2932	struct cdev *dev;
2933
2934	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2935
2936	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2937		g_topology_lock();
2938		pp = zv->zv_provider;
2939		ASSERT(pp != NULL);
2940		gp = pp->geom;
2941		ASSERT(gp != NULL);
2942
2943		zv->zv_provider = NULL;
2944		g_wither_provider(pp, ENXIO);
2945
2946		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2947		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2948		pp->sectorsize = DEV_BSIZE;
2949		pp->mediasize = zv->zv_volsize;
2950		pp->private = zv;
2951		zv->zv_provider = pp;
2952		g_error_provider(pp, 0);
2953		g_topology_unlock();
2954	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2955		struct make_dev_args args;
2956
2957		if ((dev = zv->zv_dev) != NULL) {
2958			zv->zv_dev = NULL;
2959			destroy_dev(dev);
2960			if (zv->zv_total_opens > 0) {
2961				zv->zv_flags &= ~ZVOL_EXCL;
2962				zv->zv_total_opens = 0;
2963				zvol_last_close(zv);
2964			}
2965		}
2966
2967		make_dev_args_init(&args);
2968		args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
2969		args.mda_devsw = &zvol_cdevsw;
2970		args.mda_cr = NULL;
2971		args.mda_uid = UID_ROOT;
2972		args.mda_gid = GID_OPERATOR;
2973		args.mda_mode = 0640;
2974		args.mda_si_drv2 = zv;
2975		if (make_dev_s(&args, &zv->zv_dev,
2976		    "%s/%s", ZVOL_DRIVER, newname) == 0)
2977			zv->zv_dev->si_iosize_max = MAXPHYS;
2978	}
2979	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2980}
2981
2982void
2983zvol_rename_minors(const char *oldname, const char *newname)
2984{
2985	char name[MAXPATHLEN];
2986	struct g_provider *pp;
2987	struct g_geom *gp;
2988	size_t oldnamelen, newnamelen;
2989	zvol_state_t *zv;
2990	char *namebuf;
2991	boolean_t locked = B_FALSE;
2992
2993	oldnamelen = strlen(oldname);
2994	newnamelen = strlen(newname);
2995
2996	DROP_GIANT();
2997	/* See comment in zvol_open(). */
2998	if (!MUTEX_HELD(&zfsdev_state_lock)) {
2999		mutex_enter(&zfsdev_state_lock);
3000		locked = B_TRUE;
3001	}
3002
3003	LIST_FOREACH(zv, &all_zvols, zv_links) {
3004		if (strcmp(zv->zv_name, oldname) == 0) {
3005			zvol_rename_minor(zv, newname);
3006		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
3007		    (zv->zv_name[oldnamelen] == '/' ||
3008		     zv->zv_name[oldnamelen] == '@')) {
3009			snprintf(name, sizeof(name), "%s%c%s", newname,
3010			    zv->zv_name[oldnamelen],
3011			    zv->zv_name + oldnamelen + 1);
3012			zvol_rename_minor(zv, name);
3013		}
3014	}
3015
3016	if (locked)
3017		mutex_exit(&zfsdev_state_lock);
3018	PICKUP_GIANT();
3019}
3020
3021static int
3022zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
3023{
3024	zvol_state_t *zv = dev->si_drv2;
3025	int err = 0;
3026
3027	mutex_enter(&zfsdev_state_lock);
3028	if (zv->zv_total_opens == 0)
3029		err = zvol_first_open(zv);
3030	if (err) {
3031		mutex_exit(&zfsdev_state_lock);
3032		return (err);
3033	}
3034	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
3035		err = SET_ERROR(EROFS);
3036		goto out;
3037	}
3038	if (zv->zv_flags & ZVOL_EXCL) {
3039		err = SET_ERROR(EBUSY);
3040		goto out;
3041	}
3042#ifdef FEXCL
3043	if (flags & FEXCL) {
3044		if (zv->zv_total_opens != 0) {
3045			err = SET_ERROR(EBUSY);
3046			goto out;
3047		}
3048		zv->zv_flags |= ZVOL_EXCL;
3049	}
3050#endif
3051
3052	zv->zv_total_opens++;
3053	mutex_exit(&zfsdev_state_lock);
3054	return (err);
3055out:
3056	if (zv->zv_total_opens == 0)
3057		zvol_last_close(zv);
3058	mutex_exit(&zfsdev_state_lock);
3059	return (err);
3060}
3061
3062static int
3063zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3064{
3065	zvol_state_t *zv = dev->si_drv2;
3066
3067	mutex_enter(&zfsdev_state_lock);
3068	if (zv->zv_flags & ZVOL_EXCL) {
3069		ASSERT(zv->zv_total_opens == 1);
3070		zv->zv_flags &= ~ZVOL_EXCL;
3071	}
3072
3073	/*
3074	 * If the open count is zero, this is a spurious close.
3075	 * That indicates a bug in the kernel / DDI framework.
3076	 */
3077	ASSERT(zv->zv_total_opens != 0);
3078
3079	/*
3080	 * You may get multiple opens, but only one close.
3081	 */
3082	zv->zv_total_opens--;
3083
3084	if (zv->zv_total_opens == 0)
3085		zvol_last_close(zv);
3086
3087	mutex_exit(&zfsdev_state_lock);
3088	return (0);
3089}
3090
3091static int
3092zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
3093{
3094	zvol_state_t *zv;
3095	rl_t *rl;
3096	off_t offset, length, chunk;
3097	int i, error;
3098	u_int u;
3099
3100	zv = dev->si_drv2;
3101
3102	error = 0;
3103	KASSERT(zv->zv_total_opens > 0,
3104	    ("Device with zero access count in zvol_d_ioctl"));
3105
3106	i = IOCPARM_LEN(cmd);
3107	switch (cmd) {
3108	case DIOCGSECTORSIZE:
3109		*(u_int *)data = DEV_BSIZE;
3110		break;
3111	case DIOCGMEDIASIZE:
3112		*(off_t *)data = zv->zv_volsize;
3113		break;
3114	case DIOCGFLUSH:
3115		zil_commit(zv->zv_zilog, ZVOL_OBJ);
3116		break;
3117	case DIOCGDELETE:
3118		if (!zvol_unmap_enabled)
3119			break;
3120
3121		offset = ((off_t *)data)[0];
3122		length = ((off_t *)data)[1];
3123		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
3124		    offset < 0 || offset >= zv->zv_volsize ||
3125		    length <= 0) {
3126			printf("%s: offset=%jd length=%jd\n", __func__, offset,
3127			    length);
3128			error = EINVAL;
3129			break;
3130		}
3131
3132		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
3133		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
3134		error = dmu_tx_assign(tx, TXG_WAIT);
3135		if (error != 0) {
3136			dmu_tx_abort(tx);
3137		} else {
3138			zvol_log_truncate(zv, tx, offset, length, B_TRUE);
3139			dmu_tx_commit(tx);
3140			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
3141			    offset, length);
3142		}
3143		zfs_range_unlock(rl);
3144		if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
3145			zil_commit(zv->zv_zilog, ZVOL_OBJ);
3146		break;
3147	case DIOCGSTRIPESIZE:
3148		*(off_t *)data = zv->zv_volblocksize;
3149		break;
3150	case DIOCGSTRIPEOFFSET:
3151		*(off_t *)data = 0;
3152		break;
3153	case DIOCGATTR: {
3154		spa_t *spa = dmu_objset_spa(zv->zv_objset);
3155		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
3156		uint64_t refd, avail, usedobjs, availobjs;
3157
3158		if (strcmp(arg->name, "GEOM::candelete") == 0)
3159			arg->value.i = 1;
3160		else if (strcmp(arg->name, "blocksavail") == 0) {
3161			dmu_objset_space(zv->zv_objset, &refd, &avail,
3162			    &usedobjs, &availobjs);
3163			arg->value.off = avail / DEV_BSIZE;
3164		} else if (strcmp(arg->name, "blocksused") == 0) {
3165			dmu_objset_space(zv->zv_objset, &refd, &avail,
3166			    &usedobjs, &availobjs);
3167			arg->value.off = refd / DEV_BSIZE;
3168		} else if (strcmp(arg->name, "poolblocksavail") == 0) {
3169			avail = metaslab_class_get_space(spa_normal_class(spa));
3170			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
3171			arg->value.off = avail / DEV_BSIZE;
3172		} else if (strcmp(arg->name, "poolblocksused") == 0) {
3173			refd = metaslab_class_get_alloc(spa_normal_class(spa));
3174			arg->value.off = refd / DEV_BSIZE;
3175		} else
3176			error = ENOIOCTL;
3177		break;
3178	}
3179	case FIOSEEKHOLE:
3180	case FIOSEEKDATA: {
3181		off_t *off = (off_t *)data;
3182		uint64_t noff;
3183		boolean_t hole;
3184
3185		hole = (cmd == FIOSEEKHOLE);
3186		noff = *off;
3187		error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
3188		*off = noff;
3189		break;
3190	}
3191	default:
3192		error = ENOIOCTL;
3193	}
3194
3195	return (error);
3196}
3197#endif	/* illumos */
3198