zvol.c revision 308057
1169695Skan/* 2169695Skan * CDDL HEADER START 3169695Skan * 4169695Skan * The contents of this file are subject to the terms of the 5169695Skan * Common Development and Distribution License (the "License"). 6169695Skan * You may not use this file except in compliance with the License. 7169695Skan * 8169695Skan * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9169695Skan * or http://www.opensolaris.org/os/licensing. 10169695Skan * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 25 * All rights reserved. 26 * 27 * Portions Copyright 2010 Robert Milkowski 28 * 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 31 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 32 * Copyright (c) 2014 Integros [integros.com] 33 */ 34 35/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */ 36 37/* 38 * ZFS volume emulation driver. 39 * 40 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 41 * Volumes are accessed through the symbolic links named: 42 * 43 * /dev/zvol/dsk/<pool_name>/<dataset_name> 44 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 45 * 46 * These links are created by the /dev filesystem (sdev_zvolops.c). 47 * Volumes are persistent through reboot. No user command needs to be 48 * run before opening and using a device. 49 * 50 * FreeBSD notes. 51 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device 52 * in the system. 53 */ 54 55#include <sys/types.h> 56#include <sys/param.h> 57#include <sys/kernel.h> 58#include <sys/errno.h> 59#include <sys/uio.h> 60#include <sys/bio.h> 61#include <sys/buf.h> 62#include <sys/kmem.h> 63#include <sys/conf.h> 64#include <sys/cmn_err.h> 65#include <sys/stat.h> 66#include <sys/zap.h> 67#include <sys/spa.h> 68#include <sys/spa_impl.h> 69#include <sys/zio.h> 70#include <sys/disk.h> 71#include <sys/dmu_traverse.h> 72#include <sys/dnode.h> 73#include <sys/dsl_dataset.h> 74#include <sys/dsl_prop.h> 75#include <sys/dkio.h> 76#include <sys/byteorder.h> 77#include <sys/sunddi.h> 78#include <sys/dirent.h> 79#include <sys/policy.h> 80#include <sys/queue.h> 81#include <sys/fs/zfs.h> 82#include <sys/zfs_ioctl.h> 83#include <sys/zil.h> 84#include <sys/refcount.h> 85#include <sys/zfs_znode.h> 86#include <sys/zfs_rlock.h> 87#include <sys/vdev_impl.h> 88#include <sys/vdev_raidz.h> 89#include <sys/zvol.h> 90#include <sys/zil_impl.h> 91#include <sys/dbuf.h> 92#include <sys/dmu_tx.h> 93#include <sys/zfeature.h> 94#include <sys/zio_checksum.h> 95#include <sys/filio.h> 96 97#include <geom/geom.h> 98 99#include "zfs_namecheck.h" 100 101#ifndef illumos 102struct g_class zfs_zvol_class = { 103 .name = "ZFS::ZVOL", 104 .version = G_VERSION, 105}; 106 107DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol); 108 109#endif 110void *zfsdev_state; 111static char *zvol_tag = "zvol_tag"; 112 113#define ZVOL_DUMPSIZE "dumpsize" 114 115/* 116 * This lock protects the zfsdev_state structure from being modified 117 * while it's being used, e.g. an open that comes in before a create 118 * finishes. It also protects temporary opens of the dataset so that, 119 * e.g., an open doesn't get a spurious EBUSY. 120 */ 121#ifdef illumos 122kmutex_t zfsdev_state_lock; 123#else 124/* 125 * In FreeBSD we've replaced the upstream zfsdev_state_lock with the 126 * spa_namespace_lock in the ZVOL code. 127 */ 128#define zfsdev_state_lock spa_namespace_lock 129#endif 130static uint32_t zvol_minors; 131 132#ifndef illumos 133SYSCTL_DECL(_vfs_zfs); 134SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME"); 135static int volmode = ZFS_VOLMODE_GEOM; 136TUNABLE_INT("vfs.zfs.vol.mode", &volmode); 137SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0, 138 "Expose as GEOM providers (1), device files (2) or neither"); 139 140#endif 141typedef struct zvol_extent { 142 list_node_t ze_node; 143 dva_t ze_dva; /* dva associated with this extent */ 144 uint64_t ze_nblks; /* number of blocks in extent */ 145} zvol_extent_t; 146 147/* 148 * The in-core state of each volume. 149 */ 150typedef struct zvol_state { 151#ifndef illumos 152 LIST_ENTRY(zvol_state) zv_links; 153#endif 154 char zv_name[MAXPATHLEN]; /* pool/dd name */ 155 uint64_t zv_volsize; /* amount of space we advertise */ 156 uint64_t zv_volblocksize; /* volume block size */ 157#ifdef illumos 158 minor_t zv_minor; /* minor number */ 159#else 160 struct cdev *zv_dev; /* non-GEOM device */ 161 struct g_provider *zv_provider; /* GEOM provider */ 162#endif 163 uint8_t zv_min_bs; /* minimum addressable block shift */ 164 uint8_t zv_flags; /* readonly, dumpified, etc. */ 165 objset_t *zv_objset; /* objset handle */ 166#ifdef illumos 167 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 168#endif 169 uint32_t zv_total_opens; /* total open count */ 170 zilog_t *zv_zilog; /* ZIL handle */ 171 list_t zv_extents; /* List of extents for dump */ 172 znode_t zv_znode; /* for range locking */ 173 dmu_buf_t *zv_dbuf; /* bonus handle */ 174#ifndef illumos 175 int zv_state; 176 int zv_volmode; /* Provide GEOM or cdev */ 177 struct bio_queue_head zv_queue; 178 struct mtx zv_queue_mtx; /* zv_queue mutex */ 179#endif 180} zvol_state_t; 181 182#ifndef illumos 183static LIST_HEAD(, zvol_state) all_zvols; 184#endif 185/* 186 * zvol specific flags 187 */ 188#define ZVOL_RDONLY 0x1 189#define ZVOL_DUMPIFIED 0x2 190#define ZVOL_EXCL 0x4 191#define ZVOL_WCE 0x8 192 193/* 194 * zvol maximum transfer in one DMU tx. 195 */ 196int zvol_maxphys = DMU_MAX_ACCESS/2; 197 198/* 199 * Toggle unmap functionality. 200 */ 201boolean_t zvol_unmap_enabled = B_TRUE; 202#ifndef illumos 203SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN, 204 &zvol_unmap_enabled, 0, 205 "Enable UNMAP functionality"); 206 207static d_open_t zvol_d_open; 208static d_close_t zvol_d_close; 209static d_read_t zvol_read; 210static d_write_t zvol_write; 211static d_ioctl_t zvol_d_ioctl; 212static d_strategy_t zvol_strategy; 213 214static struct cdevsw zvol_cdevsw = { 215 .d_version = D_VERSION, 216 .d_open = zvol_d_open, 217 .d_close = zvol_d_close, 218 .d_read = zvol_read, 219 .d_write = zvol_write, 220 .d_ioctl = zvol_d_ioctl, 221 .d_strategy = zvol_strategy, 222 .d_name = "zvol", 223 .d_flags = D_DISK | D_TRACKCLOSE, 224}; 225 226static void zvol_geom_run(zvol_state_t *zv); 227static void zvol_geom_destroy(zvol_state_t *zv); 228static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace); 229static void zvol_geom_start(struct bio *bp); 230static void zvol_geom_worker(void *arg); 231static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, 232 uint64_t len, boolean_t sync); 233#endif /* !illumos */ 234 235extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 236 nvlist_t *, nvlist_t *); 237static int zvol_remove_zv(zvol_state_t *); 238static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 239static int zvol_dumpify(zvol_state_t *zv); 240static int zvol_dump_fini(zvol_state_t *zv); 241static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 242 243static void 244zvol_size_changed(zvol_state_t *zv, uint64_t volsize) 245{ 246#ifdef illumos 247 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor); 248 249 zv->zv_volsize = volsize; 250 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 251 "Size", volsize) == DDI_SUCCESS); 252 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 253 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 254 255 /* Notify specfs to invalidate the cached size */ 256 spec_size_invalidate(dev, VBLK); 257 spec_size_invalidate(dev, VCHR); 258#else /* !illumos */ 259 zv->zv_volsize = volsize; 260 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 261 struct g_provider *pp; 262 263 pp = zv->zv_provider; 264 if (pp == NULL) 265 return; 266 g_topology_lock(); 267 g_resize_provider(pp, zv->zv_volsize); 268 g_topology_unlock(); 269 } 270#endif /* illumos */ 271} 272 273int 274zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 275{ 276 if (volsize == 0) 277 return (SET_ERROR(EINVAL)); 278 279 if (volsize % blocksize != 0) 280 return (SET_ERROR(EINVAL)); 281 282#ifdef _ILP32 283 if (volsize - 1 > SPEC_MAXOFFSET_T) 284 return (SET_ERROR(EOVERFLOW)); 285#endif 286 return (0); 287} 288 289int 290zvol_check_volblocksize(uint64_t volblocksize) 291{ 292 if (volblocksize < SPA_MINBLOCKSIZE || 293 volblocksize > SPA_OLD_MAXBLOCKSIZE || 294 !ISP2(volblocksize)) 295 return (SET_ERROR(EDOM)); 296 297 return (0); 298} 299 300int 301zvol_get_stats(objset_t *os, nvlist_t *nv) 302{ 303 int error; 304 dmu_object_info_t doi; 305 uint64_t val; 306 307 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 308 if (error) 309 return (error); 310 311 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 312 313 error = dmu_object_info(os, ZVOL_OBJ, &doi); 314 315 if (error == 0) { 316 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 317 doi.doi_data_block_size); 318 } 319 320 return (error); 321} 322 323static zvol_state_t * 324zvol_minor_lookup(const char *name) 325{ 326#ifdef illumos 327 minor_t minor; 328#endif 329 zvol_state_t *zv; 330 331 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 332 333#ifdef illumos 334 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 335 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 336 if (zv == NULL) 337 continue; 338#else 339 LIST_FOREACH(zv, &all_zvols, zv_links) { 340#endif 341 if (strcmp(zv->zv_name, name) == 0) 342 return (zv); 343 } 344 345 return (NULL); 346} 347 348/* extent mapping arg */ 349struct maparg { 350 zvol_state_t *ma_zv; 351 uint64_t ma_blks; 352}; 353 354/*ARGSUSED*/ 355static int 356zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 357 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 358{ 359 struct maparg *ma = arg; 360 zvol_extent_t *ze; 361 int bs = ma->ma_zv->zv_volblocksize; 362 363 if (bp == NULL || BP_IS_HOLE(bp) || 364 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 365 return (0); 366 367 VERIFY(!BP_IS_EMBEDDED(bp)); 368 369 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 370 ma->ma_blks++; 371 372 /* Abort immediately if we have encountered gang blocks */ 373 if (BP_IS_GANG(bp)) 374 return (SET_ERROR(EFRAGS)); 375 376 /* 377 * See if the block is at the end of the previous extent. 378 */ 379 ze = list_tail(&ma->ma_zv->zv_extents); 380 if (ze && 381 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 382 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 383 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 384 ze->ze_nblks++; 385 return (0); 386 } 387 388 dprintf_bp(bp, "%s", "next blkptr:"); 389 390 /* start a new extent */ 391 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 392 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 393 ze->ze_nblks = 1; 394 list_insert_tail(&ma->ma_zv->zv_extents, ze); 395 return (0); 396} 397 398static void 399zvol_free_extents(zvol_state_t *zv) 400{ 401 zvol_extent_t *ze; 402 403 while (ze = list_head(&zv->zv_extents)) { 404 list_remove(&zv->zv_extents, ze); 405 kmem_free(ze, sizeof (zvol_extent_t)); 406 } 407} 408 409static int 410zvol_get_lbas(zvol_state_t *zv) 411{ 412 objset_t *os = zv->zv_objset; 413 struct maparg ma; 414 int err; 415 416 ma.ma_zv = zv; 417 ma.ma_blks = 0; 418 zvol_free_extents(zv); 419 420 /* commit any in-flight changes before traversing the dataset */ 421 txg_wait_synced(dmu_objset_pool(os), 0); 422 err = traverse_dataset(dmu_objset_ds(os), 0, 423 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 424 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 425 zvol_free_extents(zv); 426 return (err ? err : EIO); 427 } 428 429 return (0); 430} 431 432/* ARGSUSED */ 433void 434zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 435{ 436 zfs_creat_t *zct = arg; 437 nvlist_t *nvprops = zct->zct_props; 438 int error; 439 uint64_t volblocksize, volsize; 440 441 VERIFY(nvlist_lookup_uint64(nvprops, 442 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 443 if (nvlist_lookup_uint64(nvprops, 444 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 445 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 446 447 /* 448 * These properties must be removed from the list so the generic 449 * property setting step won't apply to them. 450 */ 451 VERIFY(nvlist_remove_all(nvprops, 452 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 453 (void) nvlist_remove_all(nvprops, 454 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 455 456 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 457 DMU_OT_NONE, 0, tx); 458 ASSERT(error == 0); 459 460 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 461 DMU_OT_NONE, 0, tx); 462 ASSERT(error == 0); 463 464 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 465 ASSERT(error == 0); 466} 467 468/* 469 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 470 * implement DKIOCFREE/free-long-range. 471 */ 472static int 473zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 474{ 475 uint64_t offset, length; 476 477 if (byteswap) 478 byteswap_uint64_array(lr, sizeof (*lr)); 479 480 offset = lr->lr_offset; 481 length = lr->lr_length; 482 483 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 484} 485 486/* 487 * Replay a TX_WRITE ZIL transaction that didn't get committed 488 * after a system failure 489 */ 490static int 491zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 492{ 493 objset_t *os = zv->zv_objset; 494 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 495 uint64_t offset, length; 496 dmu_tx_t *tx; 497 int error; 498 499 if (byteswap) 500 byteswap_uint64_array(lr, sizeof (*lr)); 501 502 offset = lr->lr_offset; 503 length = lr->lr_length; 504 505 /* If it's a dmu_sync() block, write the whole block */ 506 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 507 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 508 if (length < blocksize) { 509 offset -= offset % blocksize; 510 length = blocksize; 511 } 512 } 513 514 tx = dmu_tx_create(os); 515 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 516 error = dmu_tx_assign(tx, TXG_WAIT); 517 if (error) { 518 dmu_tx_abort(tx); 519 } else { 520 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 521 dmu_tx_commit(tx); 522 } 523 524 return (error); 525} 526 527/* ARGSUSED */ 528static int 529zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 530{ 531 return (SET_ERROR(ENOTSUP)); 532} 533 534/* 535 * Callback vectors for replaying records. 536 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 537 */ 538zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 539 zvol_replay_err, /* 0 no such transaction type */ 540 zvol_replay_err, /* TX_CREATE */ 541 zvol_replay_err, /* TX_MKDIR */ 542 zvol_replay_err, /* TX_MKXATTR */ 543 zvol_replay_err, /* TX_SYMLINK */ 544 zvol_replay_err, /* TX_REMOVE */ 545 zvol_replay_err, /* TX_RMDIR */ 546 zvol_replay_err, /* TX_LINK */ 547 zvol_replay_err, /* TX_RENAME */ 548 zvol_replay_write, /* TX_WRITE */ 549 zvol_replay_truncate, /* TX_TRUNCATE */ 550 zvol_replay_err, /* TX_SETATTR */ 551 zvol_replay_err, /* TX_ACL */ 552 zvol_replay_err, /* TX_CREATE_ACL */ 553 zvol_replay_err, /* TX_CREATE_ATTR */ 554 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 555 zvol_replay_err, /* TX_MKDIR_ACL */ 556 zvol_replay_err, /* TX_MKDIR_ATTR */ 557 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 558 zvol_replay_err, /* TX_WRITE2 */ 559}; 560 561#ifdef illumos 562int 563zvol_name2minor(const char *name, minor_t *minor) 564{ 565 zvol_state_t *zv; 566 567 mutex_enter(&zfsdev_state_lock); 568 zv = zvol_minor_lookup(name); 569 if (minor && zv) 570 *minor = zv->zv_minor; 571 mutex_exit(&zfsdev_state_lock); 572 return (zv ? 0 : -1); 573} 574#endif /* illumos */ 575 576/* 577 * Create a minor node (plus a whole lot more) for the specified volume. 578 */ 579int 580zvol_create_minor(const char *name) 581{ 582 zfs_soft_state_t *zs; 583 zvol_state_t *zv; 584 objset_t *os; 585 dmu_object_info_t doi; 586#ifdef illumos 587 minor_t minor = 0; 588 char chrbuf[30], blkbuf[30]; 589#else 590 struct g_provider *pp; 591 struct g_geom *gp; 592 uint64_t volsize, mode; 593#endif 594 int error; 595 596#ifndef illumos 597 ZFS_LOG(1, "Creating ZVOL %s...", name); 598#endif 599 600 mutex_enter(&zfsdev_state_lock); 601 602 if (zvol_minor_lookup(name) != NULL) { 603 mutex_exit(&zfsdev_state_lock); 604 return (SET_ERROR(EEXIST)); 605 } 606 607 /* lie and say we're read-only */ 608 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 609 610 if (error) { 611 mutex_exit(&zfsdev_state_lock); 612 return (error); 613 } 614 615#ifdef illumos 616 if ((minor = zfsdev_minor_alloc()) == 0) { 617 dmu_objset_disown(os, FTAG); 618 mutex_exit(&zfsdev_state_lock); 619 return (SET_ERROR(ENXIO)); 620 } 621 622 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 623 dmu_objset_disown(os, FTAG); 624 mutex_exit(&zfsdev_state_lock); 625 return (SET_ERROR(EAGAIN)); 626 } 627 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 628 (char *)name); 629 630 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 631 632 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 633 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 634 ddi_soft_state_free(zfsdev_state, minor); 635 dmu_objset_disown(os, FTAG); 636 mutex_exit(&zfsdev_state_lock); 637 return (SET_ERROR(EAGAIN)); 638 } 639 640 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 641 642 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 643 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 644 ddi_remove_minor_node(zfs_dip, chrbuf); 645 ddi_soft_state_free(zfsdev_state, minor); 646 dmu_objset_disown(os, FTAG); 647 mutex_exit(&zfsdev_state_lock); 648 return (SET_ERROR(EAGAIN)); 649 } 650 651 zs = ddi_get_soft_state(zfsdev_state, minor); 652 zs->zss_type = ZSST_ZVOL; 653 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 654#else /* !illumos */ 655 656 zv = kmem_zalloc(sizeof(*zv), KM_SLEEP); 657 zv->zv_state = 0; 658 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 659 if (error) { 660 kmem_free(zv, sizeof(*zv)); 661 dmu_objset_disown(os, zvol_tag); 662 mutex_exit(&zfsdev_state_lock); 663 return (error); 664 } 665 error = dsl_prop_get_integer(name, 666 zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL); 667 if (error != 0 || mode == ZFS_VOLMODE_DEFAULT) 668 mode = volmode; 669 670 DROP_GIANT(); 671 zv->zv_volsize = volsize; 672 zv->zv_volmode = mode; 673 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 674 g_topology_lock(); 675 gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name); 676 gp->start = zvol_geom_start; 677 gp->access = zvol_geom_access; 678 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name); 679 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; 680 pp->sectorsize = DEV_BSIZE; 681 pp->mediasize = zv->zv_volsize; 682 pp->private = zv; 683 684 zv->zv_provider = pp; 685 bioq_init(&zv->zv_queue); 686 mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF); 687 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 688 struct make_dev_args args; 689 690 make_dev_args_init(&args); 691 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 692 args.mda_devsw = &zvol_cdevsw; 693 args.mda_cr = NULL; 694 args.mda_uid = UID_ROOT; 695 args.mda_gid = GID_OPERATOR; 696 args.mda_mode = 0640; 697 args.mda_si_drv2 = zv; 698 error = make_dev_s(&args, &zv->zv_dev, 699 "%s/%s", ZVOL_DRIVER, name); 700 if (error != 0) { 701 kmem_free(zv, sizeof(*zv)); 702 dmu_objset_disown(os, FTAG); 703 mutex_exit(&zfsdev_state_lock); 704 return (error); 705 } 706 zv->zv_dev->si_iosize_max = MAXPHYS; 707 } 708 LIST_INSERT_HEAD(&all_zvols, zv, zv_links); 709#endif /* illumos */ 710 711 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 712 zv->zv_min_bs = DEV_BSHIFT; 713#ifdef illumos 714 zv->zv_minor = minor; 715#endif 716 zv->zv_objset = os; 717 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 718 zv->zv_flags |= ZVOL_RDONLY; 719 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 720 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 721 sizeof (rl_t), offsetof(rl_t, r_node)); 722 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 723 offsetof(zvol_extent_t, ze_node)); 724 /* get and cache the blocksize */ 725 error = dmu_object_info(os, ZVOL_OBJ, &doi); 726 ASSERT(error == 0); 727 zv->zv_volblocksize = doi.doi_data_block_size; 728 729 if (spa_writeable(dmu_objset_spa(os))) { 730 if (zil_replay_disable) 731 zil_destroy(dmu_objset_zil(os), B_FALSE); 732 else 733 zil_replay(os, zv, zvol_replay_vector); 734 } 735 dmu_objset_disown(os, FTAG); 736 zv->zv_objset = NULL; 737 738 zvol_minors++; 739 740 mutex_exit(&zfsdev_state_lock); 741#ifndef illumos 742 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 743 zvol_geom_run(zv); 744 g_topology_unlock(); 745 } 746 PICKUP_GIANT(); 747 748 ZFS_LOG(1, "ZVOL %s created.", name); 749#endif 750 751 return (0); 752} 753 754/* 755 * Remove minor node for the specified volume. 756 */ 757static int 758zvol_remove_zv(zvol_state_t *zv) 759{ 760#ifdef illumos 761 char nmbuf[20]; 762 minor_t minor = zv->zv_minor; 763#endif 764 765 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 766 if (zv->zv_total_opens != 0) 767 return (SET_ERROR(EBUSY)); 768 769#ifdef illumos 770 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 771 ddi_remove_minor_node(zfs_dip, nmbuf); 772 773 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 774 ddi_remove_minor_node(zfs_dip, nmbuf); 775#else 776 ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name); 777 778 LIST_REMOVE(zv, zv_links); 779 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 780 g_topology_lock(); 781 zvol_geom_destroy(zv); 782 g_topology_unlock(); 783 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) 784 destroy_dev(zv->zv_dev); 785#endif 786 787 avl_destroy(&zv->zv_znode.z_range_avl); 788 mutex_destroy(&zv->zv_znode.z_range_lock); 789 790 kmem_free(zv, sizeof (zvol_state_t)); 791#ifdef illumos 792 ddi_soft_state_free(zfsdev_state, minor); 793#endif 794 zvol_minors--; 795 return (0); 796} 797 798int 799zvol_remove_minor(const char *name) 800{ 801 zvol_state_t *zv; 802 int rc; 803 804 mutex_enter(&zfsdev_state_lock); 805 if ((zv = zvol_minor_lookup(name)) == NULL) { 806 mutex_exit(&zfsdev_state_lock); 807 return (SET_ERROR(ENXIO)); 808 } 809 rc = zvol_remove_zv(zv); 810 mutex_exit(&zfsdev_state_lock); 811 return (rc); 812} 813 814int 815zvol_first_open(zvol_state_t *zv) 816{ 817 objset_t *os; 818 uint64_t volsize; 819 int error; 820 uint64_t readonly; 821 822 /* lie and say we're read-only */ 823 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 824 zvol_tag, &os); 825 if (error) 826 return (error); 827 828 zv->zv_objset = os; 829 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 830 if (error) { 831 ASSERT(error == 0); 832 dmu_objset_disown(os, zvol_tag); 833 return (error); 834 } 835 836 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 837 if (error) { 838 dmu_objset_disown(os, zvol_tag); 839 return (error); 840 } 841 842 zvol_size_changed(zv, volsize); 843 zv->zv_zilog = zil_open(os, zvol_get_data); 844 845 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 846 NULL) == 0); 847 if (readonly || dmu_objset_is_snapshot(os) || 848 !spa_writeable(dmu_objset_spa(os))) 849 zv->zv_flags |= ZVOL_RDONLY; 850 else 851 zv->zv_flags &= ~ZVOL_RDONLY; 852 return (error); 853} 854 855void 856zvol_last_close(zvol_state_t *zv) 857{ 858 zil_close(zv->zv_zilog); 859 zv->zv_zilog = NULL; 860 861 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 862 zv->zv_dbuf = NULL; 863 864 /* 865 * Evict cached data 866 */ 867 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 868 !(zv->zv_flags & ZVOL_RDONLY)) 869 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 870 dmu_objset_evict_dbufs(zv->zv_objset); 871 872 dmu_objset_disown(zv->zv_objset, zvol_tag); 873 zv->zv_objset = NULL; 874} 875 876#ifdef illumos 877int 878zvol_prealloc(zvol_state_t *zv) 879{ 880 objset_t *os = zv->zv_objset; 881 dmu_tx_t *tx; 882 uint64_t refd, avail, usedobjs, availobjs; 883 uint64_t resid = zv->zv_volsize; 884 uint64_t off = 0; 885 886 /* Check the space usage before attempting to allocate the space */ 887 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 888 if (avail < zv->zv_volsize) 889 return (SET_ERROR(ENOSPC)); 890 891 /* Free old extents if they exist */ 892 zvol_free_extents(zv); 893 894 while (resid != 0) { 895 int error; 896 uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE); 897 898 tx = dmu_tx_create(os); 899 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 900 error = dmu_tx_assign(tx, TXG_WAIT); 901 if (error) { 902 dmu_tx_abort(tx); 903 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 904 return (error); 905 } 906 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 907 dmu_tx_commit(tx); 908 off += bytes; 909 resid -= bytes; 910 } 911 txg_wait_synced(dmu_objset_pool(os), 0); 912 913 return (0); 914} 915#endif /* illumos */ 916 917static int 918zvol_update_volsize(objset_t *os, uint64_t volsize) 919{ 920 dmu_tx_t *tx; 921 int error; 922 923 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 924 925 tx = dmu_tx_create(os); 926 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 927 dmu_tx_mark_netfree(tx); 928 error = dmu_tx_assign(tx, TXG_WAIT); 929 if (error) { 930 dmu_tx_abort(tx); 931 return (error); 932 } 933 934 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 935 &volsize, tx); 936 dmu_tx_commit(tx); 937 938 if (error == 0) 939 error = dmu_free_long_range(os, 940 ZVOL_OBJ, volsize, DMU_OBJECT_END); 941 return (error); 942} 943 944void 945zvol_remove_minors(const char *name) 946{ 947#ifdef illumos 948 zvol_state_t *zv; 949 char *namebuf; 950 minor_t minor; 951 952 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 953 (void) strncpy(namebuf, name, strlen(name)); 954 (void) strcat(namebuf, "/"); 955 mutex_enter(&zfsdev_state_lock); 956 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 957 958 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 959 if (zv == NULL) 960 continue; 961 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 962 (void) zvol_remove_zv(zv); 963 } 964 kmem_free(namebuf, strlen(name) + 2); 965 966 mutex_exit(&zfsdev_state_lock); 967#else /* !illumos */ 968 zvol_state_t *zv, *tzv; 969 size_t namelen; 970 971 namelen = strlen(name); 972 973 DROP_GIANT(); 974 mutex_enter(&zfsdev_state_lock); 975 976 LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) { 977 if (strcmp(zv->zv_name, name) == 0 || 978 (strncmp(zv->zv_name, name, namelen) == 0 && 979 strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' || 980 zv->zv_name[namelen] == '@'))) { 981 (void) zvol_remove_zv(zv); 982 } 983 } 984 985 mutex_exit(&zfsdev_state_lock); 986 PICKUP_GIANT(); 987#endif /* illumos */ 988} 989 990static int 991zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize) 992{ 993 uint64_t old_volsize = 0ULL; 994 int error = 0; 995 996 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 997 998 /* 999 * Reinitialize the dump area to the new size. If we 1000 * failed to resize the dump area then restore it back to 1001 * its original size. We must set the new volsize prior 1002 * to calling dumpvp_resize() to ensure that the devices' 1003 * size(9P) is not visible by the dump subsystem. 1004 */ 1005 old_volsize = zv->zv_volsize; 1006 zvol_size_changed(zv, volsize); 1007 1008#ifdef ZVOL_DUMP 1009 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1010 if ((error = zvol_dumpify(zv)) != 0 || 1011 (error = dumpvp_resize()) != 0) { 1012 int dumpify_error; 1013 1014 (void) zvol_update_volsize(zv->zv_objset, old_volsize); 1015 zvol_size_changed(zv, old_volsize); 1016 dumpify_error = zvol_dumpify(zv); 1017 error = dumpify_error ? dumpify_error : error; 1018 } 1019 } 1020#endif /* ZVOL_DUMP */ 1021 1022#ifdef illumos 1023 /* 1024 * Generate a LUN expansion event. 1025 */ 1026 if (error == 0) { 1027 sysevent_id_t eid; 1028 nvlist_t *attr; 1029 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1030 1031 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 1032 zv->zv_minor); 1033 1034 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1035 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 1036 1037 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 1038 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 1039 1040 nvlist_free(attr); 1041 kmem_free(physpath, MAXPATHLEN); 1042 } 1043#endif /* illumos */ 1044 return (error); 1045} 1046 1047int 1048zvol_set_volsize(const char *name, uint64_t volsize) 1049{ 1050 zvol_state_t *zv = NULL; 1051 objset_t *os; 1052 int error; 1053 dmu_object_info_t doi; 1054 uint64_t readonly; 1055 boolean_t owned = B_FALSE; 1056 1057 error = dsl_prop_get_integer(name, 1058 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL); 1059 if (error != 0) 1060 return (error); 1061 if (readonly) 1062 return (SET_ERROR(EROFS)); 1063 1064 mutex_enter(&zfsdev_state_lock); 1065 zv = zvol_minor_lookup(name); 1066 1067 if (zv == NULL || zv->zv_objset == NULL) { 1068 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, 1069 FTAG, &os)) != 0) { 1070 mutex_exit(&zfsdev_state_lock); 1071 return (error); 1072 } 1073 owned = B_TRUE; 1074 if (zv != NULL) 1075 zv->zv_objset = os; 1076 } else { 1077 os = zv->zv_objset; 1078 } 1079 1080 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 1081 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0) 1082 goto out; 1083 1084 error = zvol_update_volsize(os, volsize); 1085 1086 if (error == 0 && zv != NULL) 1087 error = zvol_update_live_volsize(zv, volsize); 1088out: 1089 if (owned) { 1090 dmu_objset_disown(os, FTAG); 1091 if (zv != NULL) 1092 zv->zv_objset = NULL; 1093 } 1094 mutex_exit(&zfsdev_state_lock); 1095 return (error); 1096} 1097 1098/*ARGSUSED*/ 1099#ifdef illumos 1100int 1101zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 1102#else 1103static int 1104zvol_open(struct g_provider *pp, int flag, int count) 1105#endif 1106{ 1107 zvol_state_t *zv; 1108 int err = 0; 1109#ifdef illumos 1110 1111 mutex_enter(&zfsdev_state_lock); 1112 1113 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 1114 if (zv == NULL) { 1115 mutex_exit(&zfsdev_state_lock); 1116 return (SET_ERROR(ENXIO)); 1117 } 1118 1119 if (zv->zv_total_opens == 0) 1120 err = zvol_first_open(zv); 1121 if (err) { 1122 mutex_exit(&zfsdev_state_lock); 1123 return (err); 1124 } 1125#else /* !illumos */ 1126 if (tsd_get(zfs_geom_probe_vdev_key) != NULL) { 1127 /* 1128 * if zfs_geom_probe_vdev_key is set, that means that zfs is 1129 * attempting to probe geom providers while looking for a 1130 * replacement for a missing VDEV. In this case, the 1131 * spa_namespace_lock will not be held, but it is still illegal 1132 * to use a zvol as a vdev. Deadlocks can result if another 1133 * thread has spa_namespace_lock 1134 */ 1135 return (EOPNOTSUPP); 1136 } 1137 1138 mutex_enter(&zfsdev_state_lock); 1139 1140 zv = pp->private; 1141 if (zv == NULL) { 1142 mutex_exit(&zfsdev_state_lock); 1143 return (SET_ERROR(ENXIO)); 1144 } 1145 1146 if (zv->zv_total_opens == 0) { 1147 err = zvol_first_open(zv); 1148 if (err) { 1149 mutex_exit(&zfsdev_state_lock); 1150 return (err); 1151 } 1152 pp->mediasize = zv->zv_volsize; 1153 pp->stripeoffset = 0; 1154 pp->stripesize = zv->zv_volblocksize; 1155 } 1156#endif /* illumos */ 1157 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 1158 err = SET_ERROR(EROFS); 1159 goto out; 1160 } 1161 if (zv->zv_flags & ZVOL_EXCL) { 1162 err = SET_ERROR(EBUSY); 1163 goto out; 1164 } 1165#ifdef FEXCL 1166 if (flag & FEXCL) { 1167 if (zv->zv_total_opens != 0) { 1168 err = SET_ERROR(EBUSY); 1169 goto out; 1170 } 1171 zv->zv_flags |= ZVOL_EXCL; 1172 } 1173#endif 1174 1175#ifdef illumos 1176 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 1177 zv->zv_open_count[otyp]++; 1178 zv->zv_total_opens++; 1179 } 1180 mutex_exit(&zfsdev_state_lock); 1181#else 1182 zv->zv_total_opens += count; 1183 mutex_exit(&zfsdev_state_lock); 1184#endif 1185 1186 return (err); 1187out: 1188 if (zv->zv_total_opens == 0) 1189 zvol_last_close(zv); 1190#ifdef illumos 1191 mutex_exit(&zfsdev_state_lock); 1192#else 1193 mutex_exit(&zfsdev_state_lock); 1194#endif 1195 return (err); 1196} 1197 1198/*ARGSUSED*/ 1199#ifdef illumos 1200int 1201zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 1202{ 1203 minor_t minor = getminor(dev); 1204 zvol_state_t *zv; 1205 int error = 0; 1206 1207 mutex_enter(&zfsdev_state_lock); 1208 1209 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1210 if (zv == NULL) { 1211 mutex_exit(&zfsdev_state_lock); 1212#else /* !illumos */ 1213static int 1214zvol_close(struct g_provider *pp, int flag, int count) 1215{ 1216 zvol_state_t *zv; 1217 int error = 0; 1218 boolean_t locked = B_FALSE; 1219 1220 /* See comment in zvol_open(). */ 1221 if (!MUTEX_HELD(&zfsdev_state_lock)) { 1222 mutex_enter(&zfsdev_state_lock); 1223 locked = B_TRUE; 1224 } 1225 1226 zv = pp->private; 1227 if (zv == NULL) { 1228 if (locked) 1229 mutex_exit(&zfsdev_state_lock); 1230#endif /* illumos */ 1231 return (SET_ERROR(ENXIO)); 1232 } 1233 1234 if (zv->zv_flags & ZVOL_EXCL) { 1235 ASSERT(zv->zv_total_opens == 1); 1236 zv->zv_flags &= ~ZVOL_EXCL; 1237 } 1238 1239 /* 1240 * If the open count is zero, this is a spurious close. 1241 * That indicates a bug in the kernel / DDI framework. 1242 */ 1243#ifdef illumos 1244 ASSERT(zv->zv_open_count[otyp] != 0); 1245#endif 1246 ASSERT(zv->zv_total_opens != 0); 1247 1248 /* 1249 * You may get multiple opens, but only one close. 1250 */ 1251#ifdef illumos 1252 zv->zv_open_count[otyp]--; 1253 zv->zv_total_opens--; 1254#else 1255 zv->zv_total_opens -= count; 1256#endif 1257 1258 if (zv->zv_total_opens == 0) 1259 zvol_last_close(zv); 1260 1261#ifdef illumos 1262 mutex_exit(&zfsdev_state_lock); 1263#else 1264 if (locked) 1265 mutex_exit(&zfsdev_state_lock); 1266#endif 1267 return (error); 1268} 1269 1270static void 1271zvol_get_done(zgd_t *zgd, int error) 1272{ 1273 if (zgd->zgd_db) 1274 dmu_buf_rele(zgd->zgd_db, zgd); 1275 1276 zfs_range_unlock(zgd->zgd_rl); 1277 1278 if (error == 0 && zgd->zgd_bp) 1279 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1280 1281 kmem_free(zgd, sizeof (zgd_t)); 1282} 1283 1284/* 1285 * Get data to generate a TX_WRITE intent log record. 1286 */ 1287static int 1288zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1289{ 1290 zvol_state_t *zv = arg; 1291 objset_t *os = zv->zv_objset; 1292 uint64_t object = ZVOL_OBJ; 1293 uint64_t offset = lr->lr_offset; 1294 uint64_t size = lr->lr_length; /* length of user data */ 1295 blkptr_t *bp = &lr->lr_blkptr; 1296 dmu_buf_t *db; 1297 zgd_t *zgd; 1298 int error; 1299 1300 ASSERT(zio != NULL); 1301 ASSERT(size != 0); 1302 1303 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 1304 zgd->zgd_zilog = zv->zv_zilog; 1305 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); 1306 1307 /* 1308 * Write records come in two flavors: immediate and indirect. 1309 * For small writes it's cheaper to store the data with the 1310 * log record (immediate); for large writes it's cheaper to 1311 * sync the data and get a pointer to it (indirect) so that 1312 * we don't have to write the data twice. 1313 */ 1314 if (buf != NULL) { /* immediate write */ 1315 error = dmu_read(os, object, offset, size, buf, 1316 DMU_READ_NO_PREFETCH); 1317 } else { 1318 size = zv->zv_volblocksize; 1319 offset = P2ALIGN(offset, size); 1320 error = dmu_buf_hold(os, object, offset, zgd, &db, 1321 DMU_READ_NO_PREFETCH); 1322 if (error == 0) { 1323 blkptr_t *obp = dmu_buf_get_blkptr(db); 1324 if (obp) { 1325 ASSERT(BP_IS_HOLE(bp)); 1326 *bp = *obp; 1327 } 1328 1329 zgd->zgd_db = db; 1330 zgd->zgd_bp = bp; 1331 1332 ASSERT(db->db_offset == offset); 1333 ASSERT(db->db_size == size); 1334 1335 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1336 zvol_get_done, zgd); 1337 1338 if (error == 0) 1339 return (0); 1340 } 1341 } 1342 1343 zvol_get_done(zgd, error); 1344 1345 return (error); 1346} 1347 1348/* 1349 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 1350 * 1351 * We store data in the log buffers if it's small enough. 1352 * Otherwise we will later flush the data out via dmu_sync(). 1353 */ 1354ssize_t zvol_immediate_write_sz = 32768; 1355 1356static void 1357zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1358 boolean_t sync) 1359{ 1360 uint32_t blocksize = zv->zv_volblocksize; 1361 zilog_t *zilog = zv->zv_zilog; 1362 boolean_t slogging; 1363 ssize_t immediate_write_sz; 1364 1365 if (zil_replaying(zilog, tx)) 1366 return; 1367 1368 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1369 ? 0 : zvol_immediate_write_sz; 1370 1371 slogging = spa_has_slogs(zilog->zl_spa) && 1372 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 1373 1374 while (resid) { 1375 itx_t *itx; 1376 lr_write_t *lr; 1377 ssize_t len; 1378 itx_wr_state_t write_state; 1379 1380 /* 1381 * Unlike zfs_log_write() we can be called with 1382 * upto DMU_MAX_ACCESS/2 (5MB) writes. 1383 */ 1384 if (blocksize > immediate_write_sz && !slogging && 1385 resid >= blocksize && off % blocksize == 0) { 1386 write_state = WR_INDIRECT; /* uses dmu_sync */ 1387 len = blocksize; 1388 } else if (sync) { 1389 write_state = WR_COPIED; 1390 len = MIN(ZIL_MAX_LOG_DATA, resid); 1391 } else { 1392 write_state = WR_NEED_COPY; 1393 len = MIN(ZIL_MAX_LOG_DATA, resid); 1394 } 1395 1396 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1397 (write_state == WR_COPIED ? len : 0)); 1398 lr = (lr_write_t *)&itx->itx_lr; 1399 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1400 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1401 zil_itx_destroy(itx); 1402 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1403 lr = (lr_write_t *)&itx->itx_lr; 1404 write_state = WR_NEED_COPY; 1405 } 1406 1407 itx->itx_wr_state = write_state; 1408 if (write_state == WR_NEED_COPY) 1409 itx->itx_sod += len; 1410 lr->lr_foid = ZVOL_OBJ; 1411 lr->lr_offset = off; 1412 lr->lr_length = len; 1413 lr->lr_blkoff = 0; 1414 BP_ZERO(&lr->lr_blkptr); 1415 1416 itx->itx_private = zv; 1417 itx->itx_sync = sync; 1418 1419 zil_itx_assign(zilog, itx, tx); 1420 1421 off += len; 1422 resid -= len; 1423 } 1424} 1425 1426#ifdef illumos 1427static int 1428zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset, 1429 uint64_t size, boolean_t doread, boolean_t isdump) 1430{ 1431 vdev_disk_t *dvd; 1432 int c; 1433 int numerrors = 0; 1434 1435 if (vd->vdev_ops == &vdev_mirror_ops || 1436 vd->vdev_ops == &vdev_replacing_ops || 1437 vd->vdev_ops == &vdev_spare_ops) { 1438 for (c = 0; c < vd->vdev_children; c++) { 1439 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1440 addr, offset, origoffset, size, doread, isdump); 1441 if (err != 0) { 1442 numerrors++; 1443 } else if (doread) { 1444 break; 1445 } 1446 } 1447 } 1448 1449 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops) 1450 return (numerrors < vd->vdev_children ? 0 : EIO); 1451 1452 if (doread && !vdev_readable(vd)) 1453 return (SET_ERROR(EIO)); 1454 else if (!doread && !vdev_writeable(vd)) 1455 return (SET_ERROR(EIO)); 1456 1457 if (vd->vdev_ops == &vdev_raidz_ops) { 1458 return (vdev_raidz_physio(vd, 1459 addr, size, offset, origoffset, doread, isdump)); 1460 } 1461 1462 offset += VDEV_LABEL_START_SIZE; 1463 1464 if (ddi_in_panic() || isdump) { 1465 ASSERT(!doread); 1466 if (doread) 1467 return (SET_ERROR(EIO)); 1468 dvd = vd->vdev_tsd; 1469 ASSERT3P(dvd, !=, NULL); 1470 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1471 lbtodb(size))); 1472 } else { 1473 dvd = vd->vdev_tsd; 1474 ASSERT3P(dvd, !=, NULL); 1475 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size, 1476 offset, doread ? B_READ : B_WRITE)); 1477 } 1478} 1479 1480static int 1481zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1482 boolean_t doread, boolean_t isdump) 1483{ 1484 vdev_t *vd; 1485 int error; 1486 zvol_extent_t *ze; 1487 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1488 1489 /* Must be sector aligned, and not stradle a block boundary. */ 1490 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1491 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1492 return (SET_ERROR(EINVAL)); 1493 } 1494 ASSERT(size <= zv->zv_volblocksize); 1495 1496 /* Locate the extent this belongs to */ 1497 ze = list_head(&zv->zv_extents); 1498 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1499 offset -= ze->ze_nblks * zv->zv_volblocksize; 1500 ze = list_next(&zv->zv_extents, ze); 1501 } 1502 1503 if (ze == NULL) 1504 return (SET_ERROR(EINVAL)); 1505 1506 if (!ddi_in_panic()) 1507 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1508 1509 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1510 offset += DVA_GET_OFFSET(&ze->ze_dva); 1511 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva), 1512 size, doread, isdump); 1513 1514 if (!ddi_in_panic()) 1515 spa_config_exit(spa, SCL_STATE, FTAG); 1516 1517 return (error); 1518} 1519 1520int 1521zvol_strategy(buf_t *bp) 1522{ 1523 zfs_soft_state_t *zs = NULL; 1524#else /* !illumos */ 1525void 1526zvol_strategy(struct bio *bp) 1527{ 1528#endif /* illumos */ 1529 zvol_state_t *zv; 1530 uint64_t off, volsize; 1531 size_t resid; 1532 char *addr; 1533 objset_t *os; 1534 rl_t *rl; 1535 int error = 0; 1536#ifdef illumos 1537 boolean_t doread = bp->b_flags & B_READ; 1538#else 1539 boolean_t doread = 0; 1540#endif 1541 boolean_t is_dumpified; 1542 boolean_t sync; 1543 1544#ifdef illumos 1545 if (getminor(bp->b_edev) == 0) { 1546 error = SET_ERROR(EINVAL); 1547 } else { 1548 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1549 if (zs == NULL) 1550 error = SET_ERROR(ENXIO); 1551 else if (zs->zss_type != ZSST_ZVOL) 1552 error = SET_ERROR(EINVAL); 1553 } 1554 1555 if (error) { 1556 bioerror(bp, error); 1557 biodone(bp); 1558 return (0); 1559 } 1560 1561 zv = zs->zss_data; 1562 1563 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1564 bioerror(bp, EROFS); 1565 biodone(bp); 1566 return (0); 1567 } 1568 1569 off = ldbtob(bp->b_blkno); 1570#else /* !illumos */ 1571 if (bp->bio_to) 1572 zv = bp->bio_to->private; 1573 else 1574 zv = bp->bio_dev->si_drv2; 1575 1576 if (zv == NULL) { 1577 error = SET_ERROR(ENXIO); 1578 goto out; 1579 } 1580 1581 if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) { 1582 error = SET_ERROR(EROFS); 1583 goto out; 1584 } 1585 1586 switch (bp->bio_cmd) { 1587 case BIO_FLUSH: 1588 goto sync; 1589 case BIO_READ: 1590 doread = 1; 1591 case BIO_WRITE: 1592 case BIO_DELETE: 1593 break; 1594 default: 1595 error = EOPNOTSUPP; 1596 goto out; 1597 } 1598 1599 off = bp->bio_offset; 1600#endif /* illumos */ 1601 volsize = zv->zv_volsize; 1602 1603 os = zv->zv_objset; 1604 ASSERT(os != NULL); 1605 1606#ifdef illumos 1607 bp_mapin(bp); 1608 addr = bp->b_un.b_addr; 1609 resid = bp->b_bcount; 1610 1611 if (resid > 0 && (off < 0 || off >= volsize)) { 1612 bioerror(bp, EIO); 1613 biodone(bp); 1614 return (0); 1615 } 1616 1617 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED; 1618 sync = ((!(bp->b_flags & B_ASYNC) && 1619 !(zv->zv_flags & ZVOL_WCE)) || 1620 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1621 !doread && !is_dumpified; 1622#else /* !illumos */ 1623 addr = bp->bio_data; 1624 resid = bp->bio_length; 1625 1626 if (resid > 0 && (off < 0 || off >= volsize)) { 1627 error = SET_ERROR(EIO); 1628 goto out; 1629 } 1630 1631 is_dumpified = B_FALSE; 1632 sync = !doread && !is_dumpified && 1633 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS; 1634#endif /* illumos */ 1635 1636 /* 1637 * There must be no buffer changes when doing a dmu_sync() because 1638 * we can't change the data whilst calculating the checksum. 1639 */ 1640 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1641 doread ? RL_READER : RL_WRITER); 1642 1643#ifndef illumos 1644 if (bp->bio_cmd == BIO_DELETE) { 1645 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1646 error = dmu_tx_assign(tx, TXG_WAIT); 1647 if (error != 0) { 1648 dmu_tx_abort(tx); 1649 } else { 1650 zvol_log_truncate(zv, tx, off, resid, B_TRUE); 1651 dmu_tx_commit(tx); 1652 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1653 off, resid); 1654 resid = 0; 1655 } 1656 goto unlock; 1657 } 1658#endif 1659 while (resid != 0 && off < volsize) { 1660 size_t size = MIN(resid, zvol_maxphys); 1661#ifdef illumos 1662 if (is_dumpified) { 1663 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1664 error = zvol_dumpio(zv, addr, off, size, 1665 doread, B_FALSE); 1666 } else if (doread) { 1667#else 1668 if (doread) { 1669#endif 1670 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1671 DMU_READ_PREFETCH); 1672 } else { 1673 dmu_tx_t *tx = dmu_tx_create(os); 1674 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1675 error = dmu_tx_assign(tx, TXG_WAIT); 1676 if (error) { 1677 dmu_tx_abort(tx); 1678 } else { 1679 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1680 zvol_log_write(zv, tx, off, size, sync); 1681 dmu_tx_commit(tx); 1682 } 1683 } 1684 if (error) { 1685 /* convert checksum errors into IO errors */ 1686 if (error == ECKSUM) 1687 error = SET_ERROR(EIO); 1688 break; 1689 } 1690 off += size; 1691 addr += size; 1692 resid -= size; 1693 } 1694#ifndef illumos 1695unlock: 1696#endif 1697 zfs_range_unlock(rl); 1698 1699#ifdef illumos 1700 if ((bp->b_resid = resid) == bp->b_bcount) 1701 bioerror(bp, off > volsize ? EINVAL : error); 1702 1703 if (sync) 1704 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1705 biodone(bp); 1706 1707 return (0); 1708#else /* !illumos */ 1709 bp->bio_completed = bp->bio_length - resid; 1710 if (bp->bio_completed < bp->bio_length && off > volsize) 1711 error = EINVAL; 1712 1713 if (sync) { 1714sync: 1715 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1716 } 1717out: 1718 if (bp->bio_to) 1719 g_io_deliver(bp, error); 1720 else 1721 biofinish(bp, NULL, error); 1722#endif /* illumos */ 1723} 1724 1725#ifdef illumos 1726/* 1727 * Set the buffer count to the zvol maximum transfer. 1728 * Using our own routine instead of the default minphys() 1729 * means that for larger writes we write bigger buffers on X86 1730 * (128K instead of 56K) and flush the disk write cache less often 1731 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1732 * 56K on X86 and 128K on sparc). 1733 */ 1734void 1735zvol_minphys(struct buf *bp) 1736{ 1737 if (bp->b_bcount > zvol_maxphys) 1738 bp->b_bcount = zvol_maxphys; 1739} 1740 1741int 1742zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1743{ 1744 minor_t minor = getminor(dev); 1745 zvol_state_t *zv; 1746 int error = 0; 1747 uint64_t size; 1748 uint64_t boff; 1749 uint64_t resid; 1750 1751 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1752 if (zv == NULL) 1753 return (SET_ERROR(ENXIO)); 1754 1755 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0) 1756 return (SET_ERROR(EINVAL)); 1757 1758 boff = ldbtob(blkno); 1759 resid = ldbtob(nblocks); 1760 1761 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1762 1763 while (resid) { 1764 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1765 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1766 if (error) 1767 break; 1768 boff += size; 1769 addr += size; 1770 resid -= size; 1771 } 1772 1773 return (error); 1774} 1775 1776/*ARGSUSED*/ 1777int 1778zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1779{ 1780 minor_t minor = getminor(dev); 1781#else /* !illumos */ 1782int 1783zvol_read(struct cdev *dev, struct uio *uio, int ioflag) 1784{ 1785#endif /* illumos */ 1786 zvol_state_t *zv; 1787 uint64_t volsize; 1788 rl_t *rl; 1789 int error = 0; 1790 1791#ifdef illumos 1792 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1793 if (zv == NULL) 1794 return (SET_ERROR(ENXIO)); 1795#else 1796 zv = dev->si_drv2; 1797#endif 1798 1799 volsize = zv->zv_volsize; 1800 /* uio_loffset == volsize isn't an error as its required for EOF processing. */ 1801 if (uio->uio_resid > 0 && 1802 (uio->uio_loffset < 0 || uio->uio_loffset > volsize)) 1803 return (SET_ERROR(EIO)); 1804 1805#ifdef illumos 1806 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1807 error = physio(zvol_strategy, NULL, dev, B_READ, 1808 zvol_minphys, uio); 1809 return (error); 1810 } 1811#endif 1812 1813 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1814 RL_READER); 1815 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1816 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1817 1818 /* don't read past the end */ 1819 if (bytes > volsize - uio->uio_loffset) 1820 bytes = volsize - uio->uio_loffset; 1821 1822 error = dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes); 1823 if (error) { 1824 /* convert checksum errors into IO errors */ 1825 if (error == ECKSUM) 1826 error = SET_ERROR(EIO); 1827 break; 1828 } 1829 } 1830 zfs_range_unlock(rl); 1831 return (error); 1832} 1833 1834#ifdef illumos 1835/*ARGSUSED*/ 1836int 1837zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1838{ 1839 minor_t minor = getminor(dev); 1840#else /* !illumos */ 1841int 1842zvol_write(struct cdev *dev, struct uio *uio, int ioflag) 1843{ 1844#endif /* illumos */ 1845 zvol_state_t *zv; 1846 uint64_t volsize; 1847 rl_t *rl; 1848 int error = 0; 1849 boolean_t sync; 1850 1851#ifdef illumos 1852 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1853 if (zv == NULL) 1854 return (SET_ERROR(ENXIO)); 1855#else 1856 zv = dev->si_drv2; 1857#endif 1858 1859 volsize = zv->zv_volsize; 1860 /* uio_loffset == volsize isn't an error as its required for EOF processing. */ 1861 if (uio->uio_resid > 0 && 1862 (uio->uio_loffset < 0 || uio->uio_loffset > volsize)) 1863 return (SET_ERROR(EIO)); 1864 1865#ifdef illumos 1866 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1867 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1868 zvol_minphys, uio); 1869 return (error); 1870 } 1871 1872 sync = !(zv->zv_flags & ZVOL_WCE) || 1873#else 1874 sync = (ioflag & IO_SYNC) || 1875#endif 1876 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1877 1878 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1879 RL_WRITER); 1880 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1881 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1882 uint64_t off = uio->uio_loffset; 1883 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1884 1885 if (bytes > volsize - off) /* don't write past the end */ 1886 bytes = volsize - off; 1887 1888 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1889 error = dmu_tx_assign(tx, TXG_WAIT); 1890 if (error) { 1891 dmu_tx_abort(tx); 1892 break; 1893 } 1894 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1895 if (error == 0) 1896 zvol_log_write(zv, tx, off, bytes, sync); 1897 dmu_tx_commit(tx); 1898 1899 if (error) 1900 break; 1901 } 1902 zfs_range_unlock(rl); 1903 if (sync) 1904 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1905 return (error); 1906} 1907 1908#ifdef illumos 1909int 1910zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1911{ 1912 struct uuid uuid = EFI_RESERVED; 1913 efi_gpe_t gpe = { 0 }; 1914 uint32_t crc; 1915 dk_efi_t efi; 1916 int length; 1917 char *ptr; 1918 1919 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1920 return (SET_ERROR(EFAULT)); 1921 ptr = (char *)(uintptr_t)efi.dki_data_64; 1922 length = efi.dki_length; 1923 /* 1924 * Some clients may attempt to request a PMBR for the 1925 * zvol. Currently this interface will return EINVAL to 1926 * such requests. These requests could be supported by 1927 * adding a check for lba == 0 and consing up an appropriate 1928 * PMBR. 1929 */ 1930 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1931 return (SET_ERROR(EINVAL)); 1932 1933 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1934 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1935 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1936 1937 if (efi.dki_lba == 1) { 1938 efi_gpt_t gpt = { 0 }; 1939 1940 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1941 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1942 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1943 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1944 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1945 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1946 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1947 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1948 gpt.efi_gpt_SizeOfPartitionEntry = 1949 LE_32(sizeof (efi_gpe_t)); 1950 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1951 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1952 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1953 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1954 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1955 flag)) 1956 return (SET_ERROR(EFAULT)); 1957 ptr += sizeof (gpt); 1958 length -= sizeof (gpt); 1959 } 1960 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1961 length), flag)) 1962 return (SET_ERROR(EFAULT)); 1963 return (0); 1964} 1965 1966/* 1967 * BEGIN entry points to allow external callers access to the volume. 1968 */ 1969/* 1970 * Return the volume parameters needed for access from an external caller. 1971 * These values are invariant as long as the volume is held open. 1972 */ 1973int 1974zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1975 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1976 void **rl_hdl, void **bonus_hdl) 1977{ 1978 zvol_state_t *zv; 1979 1980 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1981 if (zv == NULL) 1982 return (SET_ERROR(ENXIO)); 1983 if (zv->zv_flags & ZVOL_DUMPIFIED) 1984 return (SET_ERROR(ENXIO)); 1985 1986 ASSERT(blksize && max_xfer_len && minor_hdl && 1987 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1988 1989 *blksize = zv->zv_volblocksize; 1990 *max_xfer_len = (uint64_t)zvol_maxphys; 1991 *minor_hdl = zv; 1992 *objset_hdl = zv->zv_objset; 1993 *zil_hdl = zv->zv_zilog; 1994 *rl_hdl = &zv->zv_znode; 1995 *bonus_hdl = zv->zv_dbuf; 1996 return (0); 1997} 1998 1999/* 2000 * Return the current volume size to an external caller. 2001 * The size can change while the volume is open. 2002 */ 2003uint64_t 2004zvol_get_volume_size(void *minor_hdl) 2005{ 2006 zvol_state_t *zv = minor_hdl; 2007 2008 return (zv->zv_volsize); 2009} 2010 2011/* 2012 * Return the current WCE setting to an external caller. 2013 * The WCE setting can change while the volume is open. 2014 */ 2015int 2016zvol_get_volume_wce(void *minor_hdl) 2017{ 2018 zvol_state_t *zv = minor_hdl; 2019 2020 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 2021} 2022 2023/* 2024 * Entry point for external callers to zvol_log_write 2025 */ 2026void 2027zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 2028 boolean_t sync) 2029{ 2030 zvol_state_t *zv = minor_hdl; 2031 2032 zvol_log_write(zv, tx, off, resid, sync); 2033} 2034/* 2035 * END entry points to allow external callers access to the volume. 2036 */ 2037#endif /* illumos */ 2038 2039/* 2040 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 2041 */ 2042static void 2043zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 2044 boolean_t sync) 2045{ 2046 itx_t *itx; 2047 lr_truncate_t *lr; 2048 zilog_t *zilog = zv->zv_zilog; 2049 2050 if (zil_replaying(zilog, tx)) 2051 return; 2052 2053 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 2054 lr = (lr_truncate_t *)&itx->itx_lr; 2055 lr->lr_foid = ZVOL_OBJ; 2056 lr->lr_offset = off; 2057 lr->lr_length = len; 2058 2059 itx->itx_sync = sync; 2060 zil_itx_assign(zilog, itx, tx); 2061} 2062 2063#ifdef illumos 2064/* 2065 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 2066 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 2067 */ 2068/*ARGSUSED*/ 2069int 2070zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 2071{ 2072 zvol_state_t *zv; 2073 struct dk_callback *dkc; 2074 int error = 0; 2075 rl_t *rl; 2076 2077 mutex_enter(&zfsdev_state_lock); 2078 2079 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 2080 2081 if (zv == NULL) { 2082 mutex_exit(&zfsdev_state_lock); 2083 return (SET_ERROR(ENXIO)); 2084 } 2085 ASSERT(zv->zv_total_opens > 0); 2086 2087 switch (cmd) { 2088 2089 case DKIOCINFO: 2090 { 2091 struct dk_cinfo dki; 2092 2093 bzero(&dki, sizeof (dki)); 2094 (void) strcpy(dki.dki_cname, "zvol"); 2095 (void) strcpy(dki.dki_dname, "zvol"); 2096 dki.dki_ctype = DKC_UNKNOWN; 2097 dki.dki_unit = getminor(dev); 2098 dki.dki_maxtransfer = 2099 1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs); 2100 mutex_exit(&zfsdev_state_lock); 2101 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 2102 error = SET_ERROR(EFAULT); 2103 return (error); 2104 } 2105 2106 case DKIOCGMEDIAINFO: 2107 { 2108 struct dk_minfo dkm; 2109 2110 bzero(&dkm, sizeof (dkm)); 2111 dkm.dki_lbsize = 1U << zv->zv_min_bs; 2112 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 2113 dkm.dki_media_type = DK_UNKNOWN; 2114 mutex_exit(&zfsdev_state_lock); 2115 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 2116 error = SET_ERROR(EFAULT); 2117 return (error); 2118 } 2119 2120 case DKIOCGMEDIAINFOEXT: 2121 { 2122 struct dk_minfo_ext dkmext; 2123 2124 bzero(&dkmext, sizeof (dkmext)); 2125 dkmext.dki_lbsize = 1U << zv->zv_min_bs; 2126 dkmext.dki_pbsize = zv->zv_volblocksize; 2127 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 2128 dkmext.dki_media_type = DK_UNKNOWN; 2129 mutex_exit(&zfsdev_state_lock); 2130 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag)) 2131 error = SET_ERROR(EFAULT); 2132 return (error); 2133 } 2134 2135 case DKIOCGETEFI: 2136 { 2137 uint64_t vs = zv->zv_volsize; 2138 uint8_t bs = zv->zv_min_bs; 2139 2140 mutex_exit(&zfsdev_state_lock); 2141 error = zvol_getefi((void *)arg, flag, vs, bs); 2142 return (error); 2143 } 2144 2145 case DKIOCFLUSHWRITECACHE: 2146 dkc = (struct dk_callback *)arg; 2147 mutex_exit(&zfsdev_state_lock); 2148 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2149 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 2150 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 2151 error = 0; 2152 } 2153 return (error); 2154 2155 case DKIOCGETWCE: 2156 { 2157 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 2158 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 2159 flag)) 2160 error = SET_ERROR(EFAULT); 2161 break; 2162 } 2163 case DKIOCSETWCE: 2164 { 2165 int wce; 2166 if (ddi_copyin((void *)arg, &wce, sizeof (int), 2167 flag)) { 2168 error = SET_ERROR(EFAULT); 2169 break; 2170 } 2171 if (wce) { 2172 zv->zv_flags |= ZVOL_WCE; 2173 mutex_exit(&zfsdev_state_lock); 2174 } else { 2175 zv->zv_flags &= ~ZVOL_WCE; 2176 mutex_exit(&zfsdev_state_lock); 2177 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2178 } 2179 return (0); 2180 } 2181 2182 case DKIOCGGEOM: 2183 case DKIOCGVTOC: 2184 /* 2185 * commands using these (like prtvtoc) expect ENOTSUP 2186 * since we're emulating an EFI label 2187 */ 2188 error = SET_ERROR(ENOTSUP); 2189 break; 2190 2191 case DKIOCDUMPINIT: 2192 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 2193 RL_WRITER); 2194 error = zvol_dumpify(zv); 2195 zfs_range_unlock(rl); 2196 break; 2197 2198 case DKIOCDUMPFINI: 2199 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 2200 break; 2201 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 2202 RL_WRITER); 2203 error = zvol_dump_fini(zv); 2204 zfs_range_unlock(rl); 2205 break; 2206 2207 case DKIOCFREE: 2208 { 2209 dkioc_free_t df; 2210 dmu_tx_t *tx; 2211 2212 if (!zvol_unmap_enabled) 2213 break; 2214 2215 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 2216 error = SET_ERROR(EFAULT); 2217 break; 2218 } 2219 2220 /* 2221 * Apply Postel's Law to length-checking. If they overshoot, 2222 * just blank out until the end, if there's a need to blank 2223 * out anything. 2224 */ 2225 if (df.df_start >= zv->zv_volsize) 2226 break; /* No need to do anything... */ 2227 2228 mutex_exit(&zfsdev_state_lock); 2229 2230 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 2231 RL_WRITER); 2232 tx = dmu_tx_create(zv->zv_objset); 2233 dmu_tx_mark_netfree(tx); 2234 error = dmu_tx_assign(tx, TXG_WAIT); 2235 if (error != 0) { 2236 dmu_tx_abort(tx); 2237 } else { 2238 zvol_log_truncate(zv, tx, df.df_start, 2239 df.df_length, B_TRUE); 2240 dmu_tx_commit(tx); 2241 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 2242 df.df_start, df.df_length); 2243 } 2244 2245 zfs_range_unlock(rl); 2246 2247 if (error == 0) { 2248 /* 2249 * If the write-cache is disabled or 'sync' property 2250 * is set to 'always' then treat this as a synchronous 2251 * operation (i.e. commit to zil). 2252 */ 2253 if (!(zv->zv_flags & ZVOL_WCE) || 2254 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 2255 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2256 2257 /* 2258 * If the caller really wants synchronous writes, and 2259 * can't wait for them, don't return until the write 2260 * is done. 2261 */ 2262 if (df.df_flags & DF_WAIT_SYNC) { 2263 txg_wait_synced( 2264 dmu_objset_pool(zv->zv_objset), 0); 2265 } 2266 } 2267 return (error); 2268 } 2269 2270 default: 2271 error = SET_ERROR(ENOTTY); 2272 break; 2273 2274 } 2275 mutex_exit(&zfsdev_state_lock); 2276 return (error); 2277} 2278#endif /* illumos */ 2279 2280int 2281zvol_busy(void) 2282{ 2283 return (zvol_minors != 0); 2284} 2285 2286void 2287zvol_init(void) 2288{ 2289 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 2290 1) == 0); 2291#ifdef illumos 2292 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 2293#else 2294 ZFS_LOG(1, "ZVOL Initialized."); 2295#endif 2296} 2297 2298void 2299zvol_fini(void) 2300{ 2301#ifdef illumos 2302 mutex_destroy(&zfsdev_state_lock); 2303#endif 2304 ddi_soft_state_fini(&zfsdev_state); 2305 ZFS_LOG(1, "ZVOL Deinitialized."); 2306} 2307 2308#ifdef illumos 2309/*ARGSUSED*/ 2310static int 2311zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx) 2312{ 2313 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 2314 2315 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 2316 return (1); 2317 return (0); 2318} 2319 2320/*ARGSUSED*/ 2321static void 2322zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx) 2323{ 2324 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 2325 2326 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx); 2327} 2328 2329static int 2330zvol_dump_init(zvol_state_t *zv, boolean_t resize) 2331{ 2332 dmu_tx_t *tx; 2333 int error; 2334 objset_t *os = zv->zv_objset; 2335 spa_t *spa = dmu_objset_spa(os); 2336 vdev_t *vd = spa->spa_root_vdev; 2337 nvlist_t *nv = NULL; 2338 uint64_t version = spa_version(spa); 2339 uint64_t checksum, compress, refresrv, vbs, dedup; 2340 2341 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 2342 ASSERT(vd->vdev_ops == &vdev_root_ops); 2343 2344 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 2345 DMU_OBJECT_END); 2346 if (error != 0) 2347 return (error); 2348 /* wait for dmu_free_long_range to actually free the blocks */ 2349 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2350 2351 /* 2352 * If the pool on which the dump device is being initialized has more 2353 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is 2354 * enabled. If so, bump that feature's counter to indicate that the 2355 * feature is active. We also check the vdev type to handle the 2356 * following case: 2357 * # zpool create test raidz disk1 disk2 disk3 2358 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev), 2359 * the raidz vdev itself has 3 children. 2360 */ 2361 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) { 2362 if (!spa_feature_is_enabled(spa, 2363 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 2364 return (SET_ERROR(ENOTSUP)); 2365 (void) dsl_sync_task(spa_name(spa), 2366 zfs_mvdev_dump_feature_check, 2367 zfs_mvdev_dump_activate_feature_sync, NULL, 2368 2, ZFS_SPACE_CHECK_RESERVED); 2369 } 2370 2371 if (!resize) { 2372 error = dsl_prop_get_integer(zv->zv_name, 2373 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 2374 if (error == 0) { 2375 error = dsl_prop_get_integer(zv->zv_name, 2376 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, 2377 NULL); 2378 } 2379 if (error == 0) { 2380 error = dsl_prop_get_integer(zv->zv_name, 2381 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 2382 &refresrv, NULL); 2383 } 2384 if (error == 0) { 2385 error = dsl_prop_get_integer(zv->zv_name, 2386 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, 2387 NULL); 2388 } 2389 if (version >= SPA_VERSION_DEDUP && error == 0) { 2390 error = dsl_prop_get_integer(zv->zv_name, 2391 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 2392 } 2393 } 2394 if (error != 0) 2395 return (error); 2396 2397 tx = dmu_tx_create(os); 2398 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2399 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2400 error = dmu_tx_assign(tx, TXG_WAIT); 2401 if (error != 0) { 2402 dmu_tx_abort(tx); 2403 return (error); 2404 } 2405 2406 /* 2407 * If we are resizing the dump device then we only need to 2408 * update the refreservation to match the newly updated 2409 * zvolsize. Otherwise, we save off the original state of the 2410 * zvol so that we can restore them if the zvol is ever undumpified. 2411 */ 2412 if (resize) { 2413 error = zap_update(os, ZVOL_ZAP_OBJ, 2414 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 2415 &zv->zv_volsize, tx); 2416 } else { 2417 error = zap_update(os, ZVOL_ZAP_OBJ, 2418 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 2419 &compress, tx); 2420 if (error == 0) { 2421 error = zap_update(os, ZVOL_ZAP_OBJ, 2422 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, 2423 &checksum, tx); 2424 } 2425 if (error == 0) { 2426 error = zap_update(os, ZVOL_ZAP_OBJ, 2427 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 2428 &refresrv, tx); 2429 } 2430 if (error == 0) { 2431 error = zap_update(os, ZVOL_ZAP_OBJ, 2432 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 2433 &vbs, tx); 2434 } 2435 if (error == 0) { 2436 error = dmu_object_set_blocksize( 2437 os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx); 2438 } 2439 if (version >= SPA_VERSION_DEDUP && error == 0) { 2440 error = zap_update(os, ZVOL_ZAP_OBJ, 2441 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 2442 &dedup, tx); 2443 } 2444 if (error == 0) 2445 zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE; 2446 } 2447 dmu_tx_commit(tx); 2448 2449 /* 2450 * We only need update the zvol's property if we are initializing 2451 * the dump area for the first time. 2452 */ 2453 if (error == 0 && !resize) { 2454 /* 2455 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum 2456 * function. Otherwise, use the old default -- OFF. 2457 */ 2458 checksum = spa_feature_is_active(spa, 2459 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY : 2460 ZIO_CHECKSUM_OFF; 2461 2462 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2463 VERIFY(nvlist_add_uint64(nv, 2464 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 2465 VERIFY(nvlist_add_uint64(nv, 2466 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 2467 ZIO_COMPRESS_OFF) == 0); 2468 VERIFY(nvlist_add_uint64(nv, 2469 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 2470 checksum) == 0); 2471 if (version >= SPA_VERSION_DEDUP) { 2472 VERIFY(nvlist_add_uint64(nv, 2473 zfs_prop_to_name(ZFS_PROP_DEDUP), 2474 ZIO_CHECKSUM_OFF) == 0); 2475 } 2476 2477 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2478 nv, NULL); 2479 nvlist_free(nv); 2480 } 2481 2482 /* Allocate the space for the dump */ 2483 if (error == 0) 2484 error = zvol_prealloc(zv); 2485 return (error); 2486} 2487 2488static int 2489zvol_dumpify(zvol_state_t *zv) 2490{ 2491 int error = 0; 2492 uint64_t dumpsize = 0; 2493 dmu_tx_t *tx; 2494 objset_t *os = zv->zv_objset; 2495 2496 if (zv->zv_flags & ZVOL_RDONLY) 2497 return (SET_ERROR(EROFS)); 2498 2499 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 2500 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 2501 boolean_t resize = (dumpsize > 0); 2502 2503 if ((error = zvol_dump_init(zv, resize)) != 0) { 2504 (void) zvol_dump_fini(zv); 2505 return (error); 2506 } 2507 } 2508 2509 /* 2510 * Build up our lba mapping. 2511 */ 2512 error = zvol_get_lbas(zv); 2513 if (error) { 2514 (void) zvol_dump_fini(zv); 2515 return (error); 2516 } 2517 2518 tx = dmu_tx_create(os); 2519 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2520 error = dmu_tx_assign(tx, TXG_WAIT); 2521 if (error) { 2522 dmu_tx_abort(tx); 2523 (void) zvol_dump_fini(zv); 2524 return (error); 2525 } 2526 2527 zv->zv_flags |= ZVOL_DUMPIFIED; 2528 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 2529 &zv->zv_volsize, tx); 2530 dmu_tx_commit(tx); 2531 2532 if (error) { 2533 (void) zvol_dump_fini(zv); 2534 return (error); 2535 } 2536 2537 txg_wait_synced(dmu_objset_pool(os), 0); 2538 return (0); 2539} 2540 2541static int 2542zvol_dump_fini(zvol_state_t *zv) 2543{ 2544 dmu_tx_t *tx; 2545 objset_t *os = zv->zv_objset; 2546 nvlist_t *nv; 2547 int error = 0; 2548 uint64_t checksum, compress, refresrv, vbs, dedup; 2549 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 2550 2551 /* 2552 * Attempt to restore the zvol back to its pre-dumpified state. 2553 * This is a best-effort attempt as it's possible that not all 2554 * of these properties were initialized during the dumpify process 2555 * (i.e. error during zvol_dump_init). 2556 */ 2557 2558 tx = dmu_tx_create(os); 2559 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2560 error = dmu_tx_assign(tx, TXG_WAIT); 2561 if (error) { 2562 dmu_tx_abort(tx); 2563 return (error); 2564 } 2565 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 2566 dmu_tx_commit(tx); 2567 2568 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2569 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 2570 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2571 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 2572 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2573 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 2574 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2575 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 2576 2577 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2578 (void) nvlist_add_uint64(nv, 2579 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 2580 (void) nvlist_add_uint64(nv, 2581 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 2582 (void) nvlist_add_uint64(nv, 2583 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 2584 if (version >= SPA_VERSION_DEDUP && 2585 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2586 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 2587 (void) nvlist_add_uint64(nv, 2588 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 2589 } 2590 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2591 nv, NULL); 2592 nvlist_free(nv); 2593 2594 zvol_free_extents(zv); 2595 zv->zv_flags &= ~ZVOL_DUMPIFIED; 2596 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 2597 /* wait for dmu_free_long_range to actually free the blocks */ 2598 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2599 tx = dmu_tx_create(os); 2600 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2601 error = dmu_tx_assign(tx, TXG_WAIT); 2602 if (error) { 2603 dmu_tx_abort(tx); 2604 return (error); 2605 } 2606 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2607 zv->zv_volblocksize = vbs; 2608 dmu_tx_commit(tx); 2609 2610 return (0); 2611} 2612#else /* !illumos */ 2613 2614static void 2615zvol_geom_run(zvol_state_t *zv) 2616{ 2617 struct g_provider *pp; 2618 2619 pp = zv->zv_provider; 2620 g_error_provider(pp, 0); 2621 2622 kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0, 2623 "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER)); 2624} 2625 2626static void 2627zvol_geom_destroy(zvol_state_t *zv) 2628{ 2629 struct g_provider *pp; 2630 2631 g_topology_assert(); 2632 2633 mtx_lock(&zv->zv_queue_mtx); 2634 zv->zv_state = 1; 2635 wakeup_one(&zv->zv_queue); 2636 while (zv->zv_state != 2) 2637 msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0); 2638 mtx_destroy(&zv->zv_queue_mtx); 2639 2640 pp = zv->zv_provider; 2641 zv->zv_provider = NULL; 2642 pp->private = NULL; 2643 g_wither_geom(pp->geom, ENXIO); 2644} 2645 2646static int 2647zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace) 2648{ 2649 int count, error, flags; 2650 2651 g_topology_assert(); 2652 2653 /* 2654 * To make it easier we expect either open or close, but not both 2655 * at the same time. 2656 */ 2657 KASSERT((acr >= 0 && acw >= 0 && ace >= 0) || 2658 (acr <= 0 && acw <= 0 && ace <= 0), 2659 ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).", 2660 pp->name, acr, acw, ace)); 2661 2662 if (pp->private == NULL) { 2663 if (acr <= 0 && acw <= 0 && ace <= 0) 2664 return (0); 2665 return (pp->error); 2666 } 2667 2668 /* 2669 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0, 2670 * because GEOM already handles that and handles it a bit differently. 2671 * GEOM allows for multiple read/exclusive consumers and ZFS allows 2672 * only one exclusive consumer, no matter if it is reader or writer. 2673 * I like better the way GEOM works so I'll leave it for GEOM to 2674 * decide what to do. 2675 */ 2676 2677 count = acr + acw + ace; 2678 if (count == 0) 2679 return (0); 2680 2681 flags = 0; 2682 if (acr != 0 || ace != 0) 2683 flags |= FREAD; 2684 if (acw != 0) 2685 flags |= FWRITE; 2686 2687 g_topology_unlock(); 2688 if (count > 0) 2689 error = zvol_open(pp, flags, count); 2690 else 2691 error = zvol_close(pp, flags, -count); 2692 g_topology_lock(); 2693 return (error); 2694} 2695 2696static void 2697zvol_geom_start(struct bio *bp) 2698{ 2699 zvol_state_t *zv; 2700 boolean_t first; 2701 2702 zv = bp->bio_to->private; 2703 ASSERT(zv != NULL); 2704 switch (bp->bio_cmd) { 2705 case BIO_FLUSH: 2706 if (!THREAD_CAN_SLEEP()) 2707 goto enqueue; 2708 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2709 g_io_deliver(bp, 0); 2710 break; 2711 case BIO_READ: 2712 case BIO_WRITE: 2713 case BIO_DELETE: 2714 if (!THREAD_CAN_SLEEP()) 2715 goto enqueue; 2716 zvol_strategy(bp); 2717 break; 2718 case BIO_GETATTR: { 2719 spa_t *spa = dmu_objset_spa(zv->zv_objset); 2720 uint64_t refd, avail, usedobjs, availobjs, val; 2721 2722 if (g_handleattr_int(bp, "GEOM::candelete", 1)) 2723 return; 2724 if (strcmp(bp->bio_attribute, "blocksavail") == 0) { 2725 dmu_objset_space(zv->zv_objset, &refd, &avail, 2726 &usedobjs, &availobjs); 2727 if (g_handleattr_off_t(bp, "blocksavail", 2728 avail / DEV_BSIZE)) 2729 return; 2730 } else if (strcmp(bp->bio_attribute, "blocksused") == 0) { 2731 dmu_objset_space(zv->zv_objset, &refd, &avail, 2732 &usedobjs, &availobjs); 2733 if (g_handleattr_off_t(bp, "blocksused", 2734 refd / DEV_BSIZE)) 2735 return; 2736 } else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) { 2737 avail = metaslab_class_get_space(spa_normal_class(spa)); 2738 avail -= metaslab_class_get_alloc(spa_normal_class(spa)); 2739 if (g_handleattr_off_t(bp, "poolblocksavail", 2740 avail / DEV_BSIZE)) 2741 return; 2742 } else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) { 2743 refd = metaslab_class_get_alloc(spa_normal_class(spa)); 2744 if (g_handleattr_off_t(bp, "poolblocksused", 2745 refd / DEV_BSIZE)) 2746 return; 2747 } 2748 /* FALLTHROUGH */ 2749 } 2750 default: 2751 g_io_deliver(bp, EOPNOTSUPP); 2752 break; 2753 } 2754 return; 2755 2756enqueue: 2757 mtx_lock(&zv->zv_queue_mtx); 2758 first = (bioq_first(&zv->zv_queue) == NULL); 2759 bioq_insert_tail(&zv->zv_queue, bp); 2760 mtx_unlock(&zv->zv_queue_mtx); 2761 if (first) 2762 wakeup_one(&zv->zv_queue); 2763} 2764 2765static void 2766zvol_geom_worker(void *arg) 2767{ 2768 zvol_state_t *zv; 2769 struct bio *bp; 2770 2771 thread_lock(curthread); 2772 sched_prio(curthread, PRIBIO); 2773 thread_unlock(curthread); 2774 2775 zv = arg; 2776 for (;;) { 2777 mtx_lock(&zv->zv_queue_mtx); 2778 bp = bioq_takefirst(&zv->zv_queue); 2779 if (bp == NULL) { 2780 if (zv->zv_state == 1) { 2781 zv->zv_state = 2; 2782 wakeup(&zv->zv_state); 2783 mtx_unlock(&zv->zv_queue_mtx); 2784 kthread_exit(); 2785 } 2786 msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP, 2787 "zvol:io", 0); 2788 continue; 2789 } 2790 mtx_unlock(&zv->zv_queue_mtx); 2791 switch (bp->bio_cmd) { 2792 case BIO_FLUSH: 2793 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2794 g_io_deliver(bp, 0); 2795 break; 2796 case BIO_READ: 2797 case BIO_WRITE: 2798 case BIO_DELETE: 2799 zvol_strategy(bp); 2800 break; 2801 default: 2802 g_io_deliver(bp, EOPNOTSUPP); 2803 break; 2804 } 2805 } 2806} 2807 2808extern boolean_t dataset_name_hidden(const char *name); 2809 2810static int 2811zvol_create_snapshots(objset_t *os, const char *name) 2812{ 2813 uint64_t cookie, obj; 2814 char *sname; 2815 int error, len; 2816 2817 cookie = obj = 0; 2818 sname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2819 2820#if 0 2821 (void) dmu_objset_find(name, dmu_objset_prefetch, NULL, 2822 DS_FIND_SNAPSHOTS); 2823#endif 2824 2825 for (;;) { 2826 len = snprintf(sname, MAXPATHLEN, "%s@", name); 2827 if (len >= MAXPATHLEN) { 2828 dmu_objset_rele(os, FTAG); 2829 error = ENAMETOOLONG; 2830 break; 2831 } 2832 2833 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 2834 error = dmu_snapshot_list_next(os, MAXPATHLEN - len, 2835 sname + len, &obj, &cookie, NULL); 2836 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 2837 if (error != 0) { 2838 if (error == ENOENT) 2839 error = 0; 2840 break; 2841 } 2842 2843 error = zvol_create_minor(sname); 2844 if (error != 0 && error != EEXIST) { 2845 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n", 2846 sname, error); 2847 break; 2848 } 2849 } 2850 2851 kmem_free(sname, MAXPATHLEN); 2852 return (error); 2853} 2854 2855int 2856zvol_create_minors(const char *name) 2857{ 2858 uint64_t cookie; 2859 objset_t *os; 2860 char *osname, *p; 2861 int error, len; 2862 2863 if (dataset_name_hidden(name)) 2864 return (0); 2865 2866 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) { 2867 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n", 2868 name, error); 2869 return (error); 2870 } 2871 if (dmu_objset_type(os) == DMU_OST_ZVOL) { 2872 dsl_dataset_long_hold(os->os_dsl_dataset, FTAG); 2873 dsl_pool_rele(dmu_objset_pool(os), FTAG); 2874 error = zvol_create_minor(name); 2875 if (error == 0 || error == EEXIST) { 2876 error = zvol_create_snapshots(os, name); 2877 } else { 2878 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n", 2879 name, error); 2880 } 2881 dsl_dataset_long_rele(os->os_dsl_dataset, FTAG); 2882 dsl_dataset_rele(os->os_dsl_dataset, FTAG); 2883 return (error); 2884 } 2885 if (dmu_objset_type(os) != DMU_OST_ZFS) { 2886 dmu_objset_rele(os, FTAG); 2887 return (0); 2888 } 2889 2890 osname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2891 if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) { 2892 dmu_objset_rele(os, FTAG); 2893 kmem_free(osname, MAXPATHLEN); 2894 return (ENOENT); 2895 } 2896 p = osname + strlen(osname); 2897 len = MAXPATHLEN - (p - osname); 2898 2899#if 0 2900 /* Prefetch the datasets. */ 2901 cookie = 0; 2902 while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) { 2903 if (!dataset_name_hidden(osname)) 2904 (void) dmu_objset_prefetch(osname, NULL); 2905 } 2906#endif 2907 2908 cookie = 0; 2909 while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL, 2910 &cookie) == 0) { 2911 dmu_objset_rele(os, FTAG); 2912 (void)zvol_create_minors(osname); 2913 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) { 2914 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n", 2915 name, error); 2916 return (error); 2917 } 2918 } 2919 2920 dmu_objset_rele(os, FTAG); 2921 kmem_free(osname, MAXPATHLEN); 2922 return (0); 2923} 2924 2925static void 2926zvol_rename_minor(zvol_state_t *zv, const char *newname) 2927{ 2928 struct g_geom *gp; 2929 struct g_provider *pp; 2930 struct cdev *dev; 2931 2932 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 2933 2934 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 2935 g_topology_lock(); 2936 pp = zv->zv_provider; 2937 ASSERT(pp != NULL); 2938 gp = pp->geom; 2939 ASSERT(gp != NULL); 2940 2941 zv->zv_provider = NULL; 2942 g_wither_provider(pp, ENXIO); 2943 2944 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname); 2945 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; 2946 pp->sectorsize = DEV_BSIZE; 2947 pp->mediasize = zv->zv_volsize; 2948 pp->private = zv; 2949 zv->zv_provider = pp; 2950 g_error_provider(pp, 0); 2951 g_topology_unlock(); 2952 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 2953 struct make_dev_args args; 2954 2955 dev = zv->zv_dev; 2956 ASSERT(dev != NULL); 2957 zv->zv_dev = NULL; 2958 destroy_dev(dev); 2959 if (zv->zv_total_opens > 0) { 2960 zv->zv_flags &= ~ZVOL_EXCL; 2961 zv->zv_total_opens = 0; 2962 zvol_last_close(zv); 2963 } 2964 2965 make_dev_args_init(&args); 2966 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 2967 args.mda_devsw = &zvol_cdevsw; 2968 args.mda_cr = NULL; 2969 args.mda_uid = UID_ROOT; 2970 args.mda_gid = GID_OPERATOR; 2971 args.mda_mode = 0640; 2972 args.mda_si_drv2 = zv; 2973 if (make_dev_s(&args, &zv->zv_dev, 2974 "%s/%s", ZVOL_DRIVER, newname) == 0) 2975 zv->zv_dev->si_iosize_max = MAXPHYS; 2976 } 2977 strlcpy(zv->zv_name, newname, sizeof(zv->zv_name)); 2978} 2979 2980void 2981zvol_rename_minors(const char *oldname, const char *newname) 2982{ 2983 char name[MAXPATHLEN]; 2984 struct g_provider *pp; 2985 struct g_geom *gp; 2986 size_t oldnamelen, newnamelen; 2987 zvol_state_t *zv; 2988 char *namebuf; 2989 boolean_t locked = B_FALSE; 2990 2991 oldnamelen = strlen(oldname); 2992 newnamelen = strlen(newname); 2993 2994 DROP_GIANT(); 2995 /* See comment in zvol_open(). */ 2996 if (!MUTEX_HELD(&zfsdev_state_lock)) { 2997 mutex_enter(&zfsdev_state_lock); 2998 locked = B_TRUE; 2999 } 3000 3001 LIST_FOREACH(zv, &all_zvols, zv_links) { 3002 if (strcmp(zv->zv_name, oldname) == 0) { 3003 zvol_rename_minor(zv, newname); 3004 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 && 3005 (zv->zv_name[oldnamelen] == '/' || 3006 zv->zv_name[oldnamelen] == '@')) { 3007 snprintf(name, sizeof(name), "%s%c%s", newname, 3008 zv->zv_name[oldnamelen], 3009 zv->zv_name + oldnamelen + 1); 3010 zvol_rename_minor(zv, name); 3011 } 3012 } 3013 3014 if (locked) 3015 mutex_exit(&zfsdev_state_lock); 3016 PICKUP_GIANT(); 3017} 3018 3019static int 3020zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td) 3021{ 3022 zvol_state_t *zv = dev->si_drv2; 3023 int err = 0; 3024 3025 mutex_enter(&zfsdev_state_lock); 3026 if (zv->zv_total_opens == 0) 3027 err = zvol_first_open(zv); 3028 if (err) { 3029 mutex_exit(&zfsdev_state_lock); 3030 return (err); 3031 } 3032 if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 3033 err = SET_ERROR(EROFS); 3034 goto out; 3035 } 3036 if (zv->zv_flags & ZVOL_EXCL) { 3037 err = SET_ERROR(EBUSY); 3038 goto out; 3039 } 3040#ifdef FEXCL 3041 if (flags & FEXCL) { 3042 if (zv->zv_total_opens != 0) { 3043 err = SET_ERROR(EBUSY); 3044 goto out; 3045 } 3046 zv->zv_flags |= ZVOL_EXCL; 3047 } 3048#endif 3049 3050 zv->zv_total_opens++; 3051 mutex_exit(&zfsdev_state_lock); 3052 return (err); 3053out: 3054 if (zv->zv_total_opens == 0) 3055 zvol_last_close(zv); 3056 mutex_exit(&zfsdev_state_lock); 3057 return (err); 3058} 3059 3060static int 3061zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td) 3062{ 3063 zvol_state_t *zv = dev->si_drv2; 3064 3065 mutex_enter(&zfsdev_state_lock); 3066 if (zv->zv_flags & ZVOL_EXCL) { 3067 ASSERT(zv->zv_total_opens == 1); 3068 zv->zv_flags &= ~ZVOL_EXCL; 3069 } 3070 3071 /* 3072 * If the open count is zero, this is a spurious close. 3073 * That indicates a bug in the kernel / DDI framework. 3074 */ 3075 ASSERT(zv->zv_total_opens != 0); 3076 3077 /* 3078 * You may get multiple opens, but only one close. 3079 */ 3080 zv->zv_total_opens--; 3081 3082 if (zv->zv_total_opens == 0) 3083 zvol_last_close(zv); 3084 3085 mutex_exit(&zfsdev_state_lock); 3086 return (0); 3087} 3088 3089static int 3090zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 3091{ 3092 zvol_state_t *zv; 3093 rl_t *rl; 3094 off_t offset, length, chunk; 3095 int i, error; 3096 u_int u; 3097 3098 zv = dev->si_drv2; 3099 3100 error = 0; 3101 KASSERT(zv->zv_total_opens > 0, 3102 ("Device with zero access count in zvol_d_ioctl")); 3103 3104 i = IOCPARM_LEN(cmd); 3105 switch (cmd) { 3106 case DIOCGSECTORSIZE: 3107 *(u_int *)data = DEV_BSIZE; 3108 break; 3109 case DIOCGMEDIASIZE: 3110 *(off_t *)data = zv->zv_volsize; 3111 break; 3112 case DIOCGFLUSH: 3113 zil_commit(zv->zv_zilog, ZVOL_OBJ); 3114 break; 3115 case DIOCGDELETE: 3116 if (!zvol_unmap_enabled) 3117 break; 3118 3119 offset = ((off_t *)data)[0]; 3120 length = ((off_t *)data)[1]; 3121 if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 || 3122 offset < 0 || offset >= zv->zv_volsize || 3123 length <= 0) { 3124 printf("%s: offset=%jd length=%jd\n", __func__, offset, 3125 length); 3126 error = EINVAL; 3127 break; 3128 } 3129 3130 rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER); 3131 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 3132 error = dmu_tx_assign(tx, TXG_WAIT); 3133 if (error != 0) { 3134 dmu_tx_abort(tx); 3135 } else { 3136 zvol_log_truncate(zv, tx, offset, length, B_TRUE); 3137 dmu_tx_commit(tx); 3138 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 3139 offset, length); 3140 } 3141 zfs_range_unlock(rl); 3142 if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) 3143 zil_commit(zv->zv_zilog, ZVOL_OBJ); 3144 break; 3145 case DIOCGSTRIPESIZE: 3146 *(off_t *)data = zv->zv_volblocksize; 3147 break; 3148 case DIOCGSTRIPEOFFSET: 3149 *(off_t *)data = 0; 3150 break; 3151 case DIOCGATTR: { 3152 spa_t *spa = dmu_objset_spa(zv->zv_objset); 3153 struct diocgattr_arg *arg = (struct diocgattr_arg *)data; 3154 uint64_t refd, avail, usedobjs, availobjs; 3155 3156 if (strcmp(arg->name, "GEOM::candelete") == 0) 3157 arg->value.i = 1; 3158 else if (strcmp(arg->name, "blocksavail") == 0) { 3159 dmu_objset_space(zv->zv_objset, &refd, &avail, 3160 &usedobjs, &availobjs); 3161 arg->value.off = avail / DEV_BSIZE; 3162 } else if (strcmp(arg->name, "blocksused") == 0) { 3163 dmu_objset_space(zv->zv_objset, &refd, &avail, 3164 &usedobjs, &availobjs); 3165 arg->value.off = refd / DEV_BSIZE; 3166 } else if (strcmp(arg->name, "poolblocksavail") == 0) { 3167 avail = metaslab_class_get_space(spa_normal_class(spa)); 3168 avail -= metaslab_class_get_alloc(spa_normal_class(spa)); 3169 arg->value.off = avail / DEV_BSIZE; 3170 } else if (strcmp(arg->name, "poolblocksused") == 0) { 3171 refd = metaslab_class_get_alloc(spa_normal_class(spa)); 3172 arg->value.off = refd / DEV_BSIZE; 3173 } else 3174 error = ENOIOCTL; 3175 break; 3176 } 3177 case FIOSEEKHOLE: 3178 case FIOSEEKDATA: { 3179 off_t *off = (off_t *)data; 3180 uint64_t noff; 3181 boolean_t hole; 3182 3183 hole = (cmd == FIOSEEKHOLE); 3184 noff = *off; 3185 error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff); 3186 *off = noff; 3187 break; 3188 } 3189 default: 3190 error = ENOIOCTL; 3191 } 3192 3193 return (error); 3194} 3195#endif /* illumos */ 3196