zvol.c revision 308596
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 25 * All rights reserved. 26 * 27 * Portions Copyright 2010 Robert Milkowski 28 * 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 31 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 32 * Copyright (c) 2014 Integros [integros.com] 33 */ 34 35/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */ 36 37/* 38 * ZFS volume emulation driver. 39 * 40 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 41 * Volumes are accessed through the symbolic links named: 42 * 43 * /dev/zvol/dsk/<pool_name>/<dataset_name> 44 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 45 * 46 * These links are created by the /dev filesystem (sdev_zvolops.c). 47 * Volumes are persistent through reboot. No user command needs to be 48 * run before opening and using a device. 49 * 50 * FreeBSD notes. 51 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device 52 * in the system. 53 */ 54 55#include <sys/types.h> 56#include <sys/param.h> 57#include <sys/kernel.h> 58#include <sys/errno.h> 59#include <sys/uio.h> 60#include <sys/bio.h> 61#include <sys/buf.h> 62#include <sys/kmem.h> 63#include <sys/conf.h> 64#include <sys/cmn_err.h> 65#include <sys/stat.h> 66#include <sys/zap.h> 67#include <sys/spa.h> 68#include <sys/spa_impl.h> 69#include <sys/zio.h> 70#include <sys/disk.h> 71#include <sys/dmu_traverse.h> 72#include <sys/dnode.h> 73#include <sys/dsl_dataset.h> 74#include <sys/dsl_prop.h> 75#include <sys/dkio.h> 76#include <sys/byteorder.h> 77#include <sys/sunddi.h> 78#include <sys/dirent.h> 79#include <sys/policy.h> 80#include <sys/queue.h> 81#include <sys/fs/zfs.h> 82#include <sys/zfs_ioctl.h> 83#include <sys/zil.h> 84#include <sys/refcount.h> 85#include <sys/zfs_znode.h> 86#include <sys/zfs_rlock.h> 87#include <sys/vdev_impl.h> 88#include <sys/vdev_raidz.h> 89#include <sys/zvol.h> 90#include <sys/zil_impl.h> 91#include <sys/dbuf.h> 92#include <sys/dmu_tx.h> 93#include <sys/zfeature.h> 94#include <sys/zio_checksum.h> 95#include <sys/filio.h> 96 97#include <geom/geom.h> 98 99#include "zfs_namecheck.h" 100 101#ifndef illumos 102struct g_class zfs_zvol_class = { 103 .name = "ZFS::ZVOL", 104 .version = G_VERSION, 105}; 106 107DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol); 108 109#endif 110void *zfsdev_state; 111static char *zvol_tag = "zvol_tag"; 112 113#define ZVOL_DUMPSIZE "dumpsize" 114 115/* 116 * This lock protects the zfsdev_state structure from being modified 117 * while it's being used, e.g. an open that comes in before a create 118 * finishes. It also protects temporary opens of the dataset so that, 119 * e.g., an open doesn't get a spurious EBUSY. 120 */ 121#ifdef illumos 122kmutex_t zfsdev_state_lock; 123#else 124/* 125 * In FreeBSD we've replaced the upstream zfsdev_state_lock with the 126 * spa_namespace_lock in the ZVOL code. 127 */ 128#define zfsdev_state_lock spa_namespace_lock 129#endif 130static uint32_t zvol_minors; 131 132#ifndef illumos 133SYSCTL_DECL(_vfs_zfs); 134SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME"); 135static int volmode = ZFS_VOLMODE_GEOM; 136TUNABLE_INT("vfs.zfs.vol.mode", &volmode); 137SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0, 138 "Expose as GEOM providers (1), device files (2) or neither"); 139 140#endif 141typedef struct zvol_extent { 142 list_node_t ze_node; 143 dva_t ze_dva; /* dva associated with this extent */ 144 uint64_t ze_nblks; /* number of blocks in extent */ 145} zvol_extent_t; 146 147/* 148 * The in-core state of each volume. 149 */ 150typedef struct zvol_state { 151#ifndef illumos 152 LIST_ENTRY(zvol_state) zv_links; 153#endif 154 char zv_name[MAXPATHLEN]; /* pool/dd name */ 155 uint64_t zv_volsize; /* amount of space we advertise */ 156 uint64_t zv_volblocksize; /* volume block size */ 157#ifdef illumos 158 minor_t zv_minor; /* minor number */ 159#else 160 struct cdev *zv_dev; /* non-GEOM device */ 161 struct g_provider *zv_provider; /* GEOM provider */ 162#endif 163 uint8_t zv_min_bs; /* minimum addressable block shift */ 164 uint8_t zv_flags; /* readonly, dumpified, etc. */ 165 objset_t *zv_objset; /* objset handle */ 166#ifdef illumos 167 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 168#endif 169 uint32_t zv_total_opens; /* total open count */ 170 uint32_t zv_sync_cnt; /* synchronous open count */ 171 zilog_t *zv_zilog; /* ZIL handle */ 172 list_t zv_extents; /* List of extents for dump */ 173 znode_t zv_znode; /* for range locking */ 174 dmu_buf_t *zv_dbuf; /* bonus handle */ 175#ifndef illumos 176 int zv_state; 177 int zv_volmode; /* Provide GEOM or cdev */ 178 struct bio_queue_head zv_queue; 179 struct mtx zv_queue_mtx; /* zv_queue mutex */ 180#endif 181} zvol_state_t; 182 183#ifndef illumos 184static LIST_HEAD(, zvol_state) all_zvols; 185#endif 186/* 187 * zvol specific flags 188 */ 189#define ZVOL_RDONLY 0x1 190#define ZVOL_DUMPIFIED 0x2 191#define ZVOL_EXCL 0x4 192#define ZVOL_WCE 0x8 193 194/* 195 * zvol maximum transfer in one DMU tx. 196 */ 197int zvol_maxphys = DMU_MAX_ACCESS/2; 198 199/* 200 * Toggle unmap functionality. 201 */ 202boolean_t zvol_unmap_enabled = B_TRUE; 203#ifndef illumos 204SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN, 205 &zvol_unmap_enabled, 0, 206 "Enable UNMAP functionality"); 207 208static d_open_t zvol_d_open; 209static d_close_t zvol_d_close; 210static d_read_t zvol_read; 211static d_write_t zvol_write; 212static d_ioctl_t zvol_d_ioctl; 213static d_strategy_t zvol_strategy; 214 215static struct cdevsw zvol_cdevsw = { 216 .d_version = D_VERSION, 217 .d_open = zvol_d_open, 218 .d_close = zvol_d_close, 219 .d_read = zvol_read, 220 .d_write = zvol_write, 221 .d_ioctl = zvol_d_ioctl, 222 .d_strategy = zvol_strategy, 223 .d_name = "zvol", 224 .d_flags = D_DISK | D_TRACKCLOSE, 225}; 226 227static void zvol_geom_run(zvol_state_t *zv); 228static void zvol_geom_destroy(zvol_state_t *zv); 229static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace); 230static void zvol_geom_start(struct bio *bp); 231static void zvol_geom_worker(void *arg); 232static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, 233 uint64_t len, boolean_t sync); 234#endif /* !illumos */ 235 236extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 237 nvlist_t *, nvlist_t *); 238static int zvol_remove_zv(zvol_state_t *); 239static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 240static int zvol_dumpify(zvol_state_t *zv); 241static int zvol_dump_fini(zvol_state_t *zv); 242static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 243 244static void 245zvol_size_changed(zvol_state_t *zv, uint64_t volsize) 246{ 247#ifdef illumos 248 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor); 249 250 zv->zv_volsize = volsize; 251 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 252 "Size", volsize) == DDI_SUCCESS); 253 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 254 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 255 256 /* Notify specfs to invalidate the cached size */ 257 spec_size_invalidate(dev, VBLK); 258 spec_size_invalidate(dev, VCHR); 259#else /* !illumos */ 260 zv->zv_volsize = volsize; 261 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 262 struct g_provider *pp; 263 264 pp = zv->zv_provider; 265 if (pp == NULL) 266 return; 267 g_topology_lock(); 268 g_resize_provider(pp, zv->zv_volsize); 269 g_topology_unlock(); 270 } 271#endif /* illumos */ 272} 273 274int 275zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 276{ 277 if (volsize == 0) 278 return (SET_ERROR(EINVAL)); 279 280 if (volsize % blocksize != 0) 281 return (SET_ERROR(EINVAL)); 282 283#ifdef _ILP32 284 if (volsize - 1 > SPEC_MAXOFFSET_T) 285 return (SET_ERROR(EOVERFLOW)); 286#endif 287 return (0); 288} 289 290int 291zvol_check_volblocksize(uint64_t volblocksize) 292{ 293 if (volblocksize < SPA_MINBLOCKSIZE || 294 volblocksize > SPA_OLD_MAXBLOCKSIZE || 295 !ISP2(volblocksize)) 296 return (SET_ERROR(EDOM)); 297 298 return (0); 299} 300 301int 302zvol_get_stats(objset_t *os, nvlist_t *nv) 303{ 304 int error; 305 dmu_object_info_t doi; 306 uint64_t val; 307 308 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 309 if (error) 310 return (error); 311 312 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 313 314 error = dmu_object_info(os, ZVOL_OBJ, &doi); 315 316 if (error == 0) { 317 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 318 doi.doi_data_block_size); 319 } 320 321 return (error); 322} 323 324static zvol_state_t * 325zvol_minor_lookup(const char *name) 326{ 327#ifdef illumos 328 minor_t minor; 329#endif 330 zvol_state_t *zv; 331 332 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 333 334#ifdef illumos 335 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 336 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 337 if (zv == NULL) 338 continue; 339#else 340 LIST_FOREACH(zv, &all_zvols, zv_links) { 341#endif 342 if (strcmp(zv->zv_name, name) == 0) 343 return (zv); 344 } 345 346 return (NULL); 347} 348 349/* extent mapping arg */ 350struct maparg { 351 zvol_state_t *ma_zv; 352 uint64_t ma_blks; 353}; 354 355/*ARGSUSED*/ 356static int 357zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 358 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 359{ 360 struct maparg *ma = arg; 361 zvol_extent_t *ze; 362 int bs = ma->ma_zv->zv_volblocksize; 363 364 if (bp == NULL || BP_IS_HOLE(bp) || 365 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 366 return (0); 367 368 VERIFY(!BP_IS_EMBEDDED(bp)); 369 370 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 371 ma->ma_blks++; 372 373 /* Abort immediately if we have encountered gang blocks */ 374 if (BP_IS_GANG(bp)) 375 return (SET_ERROR(EFRAGS)); 376 377 /* 378 * See if the block is at the end of the previous extent. 379 */ 380 ze = list_tail(&ma->ma_zv->zv_extents); 381 if (ze && 382 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 383 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 384 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 385 ze->ze_nblks++; 386 return (0); 387 } 388 389 dprintf_bp(bp, "%s", "next blkptr:"); 390 391 /* start a new extent */ 392 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 393 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 394 ze->ze_nblks = 1; 395 list_insert_tail(&ma->ma_zv->zv_extents, ze); 396 return (0); 397} 398 399static void 400zvol_free_extents(zvol_state_t *zv) 401{ 402 zvol_extent_t *ze; 403 404 while (ze = list_head(&zv->zv_extents)) { 405 list_remove(&zv->zv_extents, ze); 406 kmem_free(ze, sizeof (zvol_extent_t)); 407 } 408} 409 410static int 411zvol_get_lbas(zvol_state_t *zv) 412{ 413 objset_t *os = zv->zv_objset; 414 struct maparg ma; 415 int err; 416 417 ma.ma_zv = zv; 418 ma.ma_blks = 0; 419 zvol_free_extents(zv); 420 421 /* commit any in-flight changes before traversing the dataset */ 422 txg_wait_synced(dmu_objset_pool(os), 0); 423 err = traverse_dataset(dmu_objset_ds(os), 0, 424 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 425 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 426 zvol_free_extents(zv); 427 return (err ? err : EIO); 428 } 429 430 return (0); 431} 432 433/* ARGSUSED */ 434void 435zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 436{ 437 zfs_creat_t *zct = arg; 438 nvlist_t *nvprops = zct->zct_props; 439 int error; 440 uint64_t volblocksize, volsize; 441 442 VERIFY(nvlist_lookup_uint64(nvprops, 443 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 444 if (nvlist_lookup_uint64(nvprops, 445 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 446 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 447 448 /* 449 * These properties must be removed from the list so the generic 450 * property setting step won't apply to them. 451 */ 452 VERIFY(nvlist_remove_all(nvprops, 453 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 454 (void) nvlist_remove_all(nvprops, 455 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 456 457 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 458 DMU_OT_NONE, 0, tx); 459 ASSERT(error == 0); 460 461 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 462 DMU_OT_NONE, 0, tx); 463 ASSERT(error == 0); 464 465 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 466 ASSERT(error == 0); 467} 468 469/* 470 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 471 * implement DKIOCFREE/free-long-range. 472 */ 473static int 474zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 475{ 476 uint64_t offset, length; 477 478 if (byteswap) 479 byteswap_uint64_array(lr, sizeof (*lr)); 480 481 offset = lr->lr_offset; 482 length = lr->lr_length; 483 484 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 485} 486 487/* 488 * Replay a TX_WRITE ZIL transaction that didn't get committed 489 * after a system failure 490 */ 491static int 492zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 493{ 494 objset_t *os = zv->zv_objset; 495 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 496 uint64_t offset, length; 497 dmu_tx_t *tx; 498 int error; 499 500 if (byteswap) 501 byteswap_uint64_array(lr, sizeof (*lr)); 502 503 offset = lr->lr_offset; 504 length = lr->lr_length; 505 506 /* If it's a dmu_sync() block, write the whole block */ 507 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 508 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 509 if (length < blocksize) { 510 offset -= offset % blocksize; 511 length = blocksize; 512 } 513 } 514 515 tx = dmu_tx_create(os); 516 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 517 error = dmu_tx_assign(tx, TXG_WAIT); 518 if (error) { 519 dmu_tx_abort(tx); 520 } else { 521 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 522 dmu_tx_commit(tx); 523 } 524 525 return (error); 526} 527 528/* ARGSUSED */ 529static int 530zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 531{ 532 return (SET_ERROR(ENOTSUP)); 533} 534 535/* 536 * Callback vectors for replaying records. 537 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 538 */ 539zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 540 zvol_replay_err, /* 0 no such transaction type */ 541 zvol_replay_err, /* TX_CREATE */ 542 zvol_replay_err, /* TX_MKDIR */ 543 zvol_replay_err, /* TX_MKXATTR */ 544 zvol_replay_err, /* TX_SYMLINK */ 545 zvol_replay_err, /* TX_REMOVE */ 546 zvol_replay_err, /* TX_RMDIR */ 547 zvol_replay_err, /* TX_LINK */ 548 zvol_replay_err, /* TX_RENAME */ 549 zvol_replay_write, /* TX_WRITE */ 550 zvol_replay_truncate, /* TX_TRUNCATE */ 551 zvol_replay_err, /* TX_SETATTR */ 552 zvol_replay_err, /* TX_ACL */ 553 zvol_replay_err, /* TX_CREATE_ACL */ 554 zvol_replay_err, /* TX_CREATE_ATTR */ 555 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 556 zvol_replay_err, /* TX_MKDIR_ACL */ 557 zvol_replay_err, /* TX_MKDIR_ATTR */ 558 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 559 zvol_replay_err, /* TX_WRITE2 */ 560}; 561 562#ifdef illumos 563int 564zvol_name2minor(const char *name, minor_t *minor) 565{ 566 zvol_state_t *zv; 567 568 mutex_enter(&zfsdev_state_lock); 569 zv = zvol_minor_lookup(name); 570 if (minor && zv) 571 *minor = zv->zv_minor; 572 mutex_exit(&zfsdev_state_lock); 573 return (zv ? 0 : -1); 574} 575#endif /* illumos */ 576 577/* 578 * Create a minor node (plus a whole lot more) for the specified volume. 579 */ 580int 581zvol_create_minor(const char *name) 582{ 583 zfs_soft_state_t *zs; 584 zvol_state_t *zv; 585 objset_t *os; 586 dmu_object_info_t doi; 587#ifdef illumos 588 minor_t minor = 0; 589 char chrbuf[30], blkbuf[30]; 590#else 591 struct g_provider *pp; 592 struct g_geom *gp; 593 uint64_t volsize, mode; 594#endif 595 int error; 596 597#ifndef illumos 598 ZFS_LOG(1, "Creating ZVOL %s...", name); 599#endif 600 601 mutex_enter(&zfsdev_state_lock); 602 603 if (zvol_minor_lookup(name) != NULL) { 604 mutex_exit(&zfsdev_state_lock); 605 return (SET_ERROR(EEXIST)); 606 } 607 608 /* lie and say we're read-only */ 609 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 610 611 if (error) { 612 mutex_exit(&zfsdev_state_lock); 613 return (error); 614 } 615 616#ifdef illumos 617 if ((minor = zfsdev_minor_alloc()) == 0) { 618 dmu_objset_disown(os, FTAG); 619 mutex_exit(&zfsdev_state_lock); 620 return (SET_ERROR(ENXIO)); 621 } 622 623 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 624 dmu_objset_disown(os, FTAG); 625 mutex_exit(&zfsdev_state_lock); 626 return (SET_ERROR(EAGAIN)); 627 } 628 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 629 (char *)name); 630 631 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 632 633 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 634 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 635 ddi_soft_state_free(zfsdev_state, minor); 636 dmu_objset_disown(os, FTAG); 637 mutex_exit(&zfsdev_state_lock); 638 return (SET_ERROR(EAGAIN)); 639 } 640 641 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 642 643 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 644 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 645 ddi_remove_minor_node(zfs_dip, chrbuf); 646 ddi_soft_state_free(zfsdev_state, minor); 647 dmu_objset_disown(os, FTAG); 648 mutex_exit(&zfsdev_state_lock); 649 return (SET_ERROR(EAGAIN)); 650 } 651 652 zs = ddi_get_soft_state(zfsdev_state, minor); 653 zs->zss_type = ZSST_ZVOL; 654 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 655#else /* !illumos */ 656 657 zv = kmem_zalloc(sizeof(*zv), KM_SLEEP); 658 zv->zv_state = 0; 659 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 660 if (error) { 661 kmem_free(zv, sizeof(*zv)); 662 dmu_objset_disown(os, zvol_tag); 663 mutex_exit(&zfsdev_state_lock); 664 return (error); 665 } 666 error = dsl_prop_get_integer(name, 667 zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL); 668 if (error != 0 || mode == ZFS_VOLMODE_DEFAULT) 669 mode = volmode; 670 671 DROP_GIANT(); 672 zv->zv_volsize = volsize; 673 zv->zv_volmode = mode; 674 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 675 g_topology_lock(); 676 gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name); 677 gp->start = zvol_geom_start; 678 gp->access = zvol_geom_access; 679 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name); 680 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; 681 pp->sectorsize = DEV_BSIZE; 682 pp->mediasize = zv->zv_volsize; 683 pp->private = zv; 684 685 zv->zv_provider = pp; 686 bioq_init(&zv->zv_queue); 687 mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF); 688 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 689 struct make_dev_args args; 690 691 make_dev_args_init(&args); 692 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 693 args.mda_devsw = &zvol_cdevsw; 694 args.mda_cr = NULL; 695 args.mda_uid = UID_ROOT; 696 args.mda_gid = GID_OPERATOR; 697 args.mda_mode = 0640; 698 args.mda_si_drv2 = zv; 699 error = make_dev_s(&args, &zv->zv_dev, 700 "%s/%s", ZVOL_DRIVER, name); 701 if (error != 0) { 702 kmem_free(zv, sizeof(*zv)); 703 dmu_objset_disown(os, FTAG); 704 mutex_exit(&zfsdev_state_lock); 705 return (error); 706 } 707 zv->zv_dev->si_iosize_max = MAXPHYS; 708 } 709 LIST_INSERT_HEAD(&all_zvols, zv, zv_links); 710#endif /* illumos */ 711 712 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 713 zv->zv_min_bs = DEV_BSHIFT; 714#ifdef illumos 715 zv->zv_minor = minor; 716#endif 717 zv->zv_objset = os; 718 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 719 zv->zv_flags |= ZVOL_RDONLY; 720 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 721 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 722 sizeof (rl_t), offsetof(rl_t, r_node)); 723 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 724 offsetof(zvol_extent_t, ze_node)); 725 /* get and cache the blocksize */ 726 error = dmu_object_info(os, ZVOL_OBJ, &doi); 727 ASSERT(error == 0); 728 zv->zv_volblocksize = doi.doi_data_block_size; 729 730 if (spa_writeable(dmu_objset_spa(os))) { 731 if (zil_replay_disable) 732 zil_destroy(dmu_objset_zil(os), B_FALSE); 733 else 734 zil_replay(os, zv, zvol_replay_vector); 735 } 736 dmu_objset_disown(os, FTAG); 737 zv->zv_objset = NULL; 738 739 zvol_minors++; 740 741 mutex_exit(&zfsdev_state_lock); 742#ifndef illumos 743 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 744 zvol_geom_run(zv); 745 g_topology_unlock(); 746 } 747 PICKUP_GIANT(); 748 749 ZFS_LOG(1, "ZVOL %s created.", name); 750#endif 751 752 return (0); 753} 754 755/* 756 * Remove minor node for the specified volume. 757 */ 758static int 759zvol_remove_zv(zvol_state_t *zv) 760{ 761#ifdef illumos 762 char nmbuf[20]; 763 minor_t minor = zv->zv_minor; 764#endif 765 766 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 767 if (zv->zv_total_opens != 0) 768 return (SET_ERROR(EBUSY)); 769 770#ifdef illumos 771 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 772 ddi_remove_minor_node(zfs_dip, nmbuf); 773 774 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 775 ddi_remove_minor_node(zfs_dip, nmbuf); 776#else 777 ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name); 778 779 LIST_REMOVE(zv, zv_links); 780 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 781 g_topology_lock(); 782 zvol_geom_destroy(zv); 783 g_topology_unlock(); 784 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 785 if (zv->zv_dev != NULL) 786 destroy_dev(zv->zv_dev); 787 } 788#endif 789 790 avl_destroy(&zv->zv_znode.z_range_avl); 791 mutex_destroy(&zv->zv_znode.z_range_lock); 792 793 kmem_free(zv, sizeof (zvol_state_t)); 794#ifdef illumos 795 ddi_soft_state_free(zfsdev_state, minor); 796#endif 797 zvol_minors--; 798 return (0); 799} 800 801int 802zvol_remove_minor(const char *name) 803{ 804 zvol_state_t *zv; 805 int rc; 806 807 mutex_enter(&zfsdev_state_lock); 808 if ((zv = zvol_minor_lookup(name)) == NULL) { 809 mutex_exit(&zfsdev_state_lock); 810 return (SET_ERROR(ENXIO)); 811 } 812 rc = zvol_remove_zv(zv); 813 mutex_exit(&zfsdev_state_lock); 814 return (rc); 815} 816 817int 818zvol_first_open(zvol_state_t *zv) 819{ 820 objset_t *os; 821 uint64_t volsize; 822 int error; 823 uint64_t readonly; 824 825 /* lie and say we're read-only */ 826 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 827 zvol_tag, &os); 828 if (error) 829 return (error); 830 831 zv->zv_objset = os; 832 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 833 if (error) { 834 ASSERT(error == 0); 835 dmu_objset_disown(os, zvol_tag); 836 return (error); 837 } 838 839 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 840 if (error) { 841 dmu_objset_disown(os, zvol_tag); 842 return (error); 843 } 844 845 zvol_size_changed(zv, volsize); 846 zv->zv_zilog = zil_open(os, zvol_get_data); 847 848 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 849 NULL) == 0); 850 if (readonly || dmu_objset_is_snapshot(os) || 851 !spa_writeable(dmu_objset_spa(os))) 852 zv->zv_flags |= ZVOL_RDONLY; 853 else 854 zv->zv_flags &= ~ZVOL_RDONLY; 855 return (error); 856} 857 858void 859zvol_last_close(zvol_state_t *zv) 860{ 861 zil_close(zv->zv_zilog); 862 zv->zv_zilog = NULL; 863 864 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 865 zv->zv_dbuf = NULL; 866 867 /* 868 * Evict cached data 869 */ 870 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 871 !(zv->zv_flags & ZVOL_RDONLY)) 872 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 873 dmu_objset_evict_dbufs(zv->zv_objset); 874 875 dmu_objset_disown(zv->zv_objset, zvol_tag); 876 zv->zv_objset = NULL; 877} 878 879#ifdef illumos 880int 881zvol_prealloc(zvol_state_t *zv) 882{ 883 objset_t *os = zv->zv_objset; 884 dmu_tx_t *tx; 885 uint64_t refd, avail, usedobjs, availobjs; 886 uint64_t resid = zv->zv_volsize; 887 uint64_t off = 0; 888 889 /* Check the space usage before attempting to allocate the space */ 890 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 891 if (avail < zv->zv_volsize) 892 return (SET_ERROR(ENOSPC)); 893 894 /* Free old extents if they exist */ 895 zvol_free_extents(zv); 896 897 while (resid != 0) { 898 int error; 899 uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE); 900 901 tx = dmu_tx_create(os); 902 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 903 error = dmu_tx_assign(tx, TXG_WAIT); 904 if (error) { 905 dmu_tx_abort(tx); 906 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 907 return (error); 908 } 909 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 910 dmu_tx_commit(tx); 911 off += bytes; 912 resid -= bytes; 913 } 914 txg_wait_synced(dmu_objset_pool(os), 0); 915 916 return (0); 917} 918#endif /* illumos */ 919 920static int 921zvol_update_volsize(objset_t *os, uint64_t volsize) 922{ 923 dmu_tx_t *tx; 924 int error; 925 926 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 927 928 tx = dmu_tx_create(os); 929 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 930 dmu_tx_mark_netfree(tx); 931 error = dmu_tx_assign(tx, TXG_WAIT); 932 if (error) { 933 dmu_tx_abort(tx); 934 return (error); 935 } 936 937 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 938 &volsize, tx); 939 dmu_tx_commit(tx); 940 941 if (error == 0) 942 error = dmu_free_long_range(os, 943 ZVOL_OBJ, volsize, DMU_OBJECT_END); 944 return (error); 945} 946 947void 948zvol_remove_minors(const char *name) 949{ 950#ifdef illumos 951 zvol_state_t *zv; 952 char *namebuf; 953 minor_t minor; 954 955 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 956 (void) strncpy(namebuf, name, strlen(name)); 957 (void) strcat(namebuf, "/"); 958 mutex_enter(&zfsdev_state_lock); 959 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 960 961 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 962 if (zv == NULL) 963 continue; 964 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 965 (void) zvol_remove_zv(zv); 966 } 967 kmem_free(namebuf, strlen(name) + 2); 968 969 mutex_exit(&zfsdev_state_lock); 970#else /* !illumos */ 971 zvol_state_t *zv, *tzv; 972 size_t namelen; 973 974 namelen = strlen(name); 975 976 DROP_GIANT(); 977 mutex_enter(&zfsdev_state_lock); 978 979 LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) { 980 if (strcmp(zv->zv_name, name) == 0 || 981 (strncmp(zv->zv_name, name, namelen) == 0 && 982 strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' || 983 zv->zv_name[namelen] == '@'))) { 984 (void) zvol_remove_zv(zv); 985 } 986 } 987 988 mutex_exit(&zfsdev_state_lock); 989 PICKUP_GIANT(); 990#endif /* illumos */ 991} 992 993static int 994zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize) 995{ 996 uint64_t old_volsize = 0ULL; 997 int error = 0; 998 999 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 1000 1001 /* 1002 * Reinitialize the dump area to the new size. If we 1003 * failed to resize the dump area then restore it back to 1004 * its original size. We must set the new volsize prior 1005 * to calling dumpvp_resize() to ensure that the devices' 1006 * size(9P) is not visible by the dump subsystem. 1007 */ 1008 old_volsize = zv->zv_volsize; 1009 zvol_size_changed(zv, volsize); 1010 1011#ifdef ZVOL_DUMP 1012 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1013 if ((error = zvol_dumpify(zv)) != 0 || 1014 (error = dumpvp_resize()) != 0) { 1015 int dumpify_error; 1016 1017 (void) zvol_update_volsize(zv->zv_objset, old_volsize); 1018 zvol_size_changed(zv, old_volsize); 1019 dumpify_error = zvol_dumpify(zv); 1020 error = dumpify_error ? dumpify_error : error; 1021 } 1022 } 1023#endif /* ZVOL_DUMP */ 1024 1025#ifdef illumos 1026 /* 1027 * Generate a LUN expansion event. 1028 */ 1029 if (error == 0) { 1030 sysevent_id_t eid; 1031 nvlist_t *attr; 1032 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1033 1034 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 1035 zv->zv_minor); 1036 1037 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1038 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 1039 1040 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 1041 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 1042 1043 nvlist_free(attr); 1044 kmem_free(physpath, MAXPATHLEN); 1045 } 1046#endif /* illumos */ 1047 return (error); 1048} 1049 1050int 1051zvol_set_volsize(const char *name, uint64_t volsize) 1052{ 1053 zvol_state_t *zv = NULL; 1054 objset_t *os; 1055 int error; 1056 dmu_object_info_t doi; 1057 uint64_t readonly; 1058 boolean_t owned = B_FALSE; 1059 1060 error = dsl_prop_get_integer(name, 1061 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL); 1062 if (error != 0) 1063 return (error); 1064 if (readonly) 1065 return (SET_ERROR(EROFS)); 1066 1067 mutex_enter(&zfsdev_state_lock); 1068 zv = zvol_minor_lookup(name); 1069 1070 if (zv == NULL || zv->zv_objset == NULL) { 1071 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, 1072 FTAG, &os)) != 0) { 1073 mutex_exit(&zfsdev_state_lock); 1074 return (error); 1075 } 1076 owned = B_TRUE; 1077 if (zv != NULL) 1078 zv->zv_objset = os; 1079 } else { 1080 os = zv->zv_objset; 1081 } 1082 1083 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 1084 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0) 1085 goto out; 1086 1087 error = zvol_update_volsize(os, volsize); 1088 1089 if (error == 0 && zv != NULL) 1090 error = zvol_update_live_volsize(zv, volsize); 1091out: 1092 if (owned) { 1093 dmu_objset_disown(os, FTAG); 1094 if (zv != NULL) 1095 zv->zv_objset = NULL; 1096 } 1097 mutex_exit(&zfsdev_state_lock); 1098 return (error); 1099} 1100 1101/*ARGSUSED*/ 1102#ifdef illumos 1103int 1104zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 1105#else 1106static int 1107zvol_open(struct g_provider *pp, int flag, int count) 1108#endif 1109{ 1110 zvol_state_t *zv; 1111 int err = 0; 1112#ifdef illumos 1113 1114 mutex_enter(&zfsdev_state_lock); 1115 1116 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 1117 if (zv == NULL) { 1118 mutex_exit(&zfsdev_state_lock); 1119 return (SET_ERROR(ENXIO)); 1120 } 1121 1122 if (zv->zv_total_opens == 0) 1123 err = zvol_first_open(zv); 1124 if (err) { 1125 mutex_exit(&zfsdev_state_lock); 1126 return (err); 1127 } 1128#else /* !illumos */ 1129 if (tsd_get(zfs_geom_probe_vdev_key) != NULL) { 1130 /* 1131 * if zfs_geom_probe_vdev_key is set, that means that zfs is 1132 * attempting to probe geom providers while looking for a 1133 * replacement for a missing VDEV. In this case, the 1134 * spa_namespace_lock will not be held, but it is still illegal 1135 * to use a zvol as a vdev. Deadlocks can result if another 1136 * thread has spa_namespace_lock 1137 */ 1138 return (EOPNOTSUPP); 1139 } 1140 1141 mutex_enter(&zfsdev_state_lock); 1142 1143 zv = pp->private; 1144 if (zv == NULL) { 1145 mutex_exit(&zfsdev_state_lock); 1146 return (SET_ERROR(ENXIO)); 1147 } 1148 1149 if (zv->zv_total_opens == 0) { 1150 err = zvol_first_open(zv); 1151 if (err) { 1152 mutex_exit(&zfsdev_state_lock); 1153 return (err); 1154 } 1155 pp->mediasize = zv->zv_volsize; 1156 pp->stripeoffset = 0; 1157 pp->stripesize = zv->zv_volblocksize; 1158 } 1159#endif /* illumos */ 1160 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 1161 err = SET_ERROR(EROFS); 1162 goto out; 1163 } 1164 if (zv->zv_flags & ZVOL_EXCL) { 1165 err = SET_ERROR(EBUSY); 1166 goto out; 1167 } 1168#ifdef FEXCL 1169 if (flag & FEXCL) { 1170 if (zv->zv_total_opens != 0) { 1171 err = SET_ERROR(EBUSY); 1172 goto out; 1173 } 1174 zv->zv_flags |= ZVOL_EXCL; 1175 } 1176#endif 1177 1178#ifdef illumos 1179 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 1180 zv->zv_open_count[otyp]++; 1181 zv->zv_total_opens++; 1182 } 1183 mutex_exit(&zfsdev_state_lock); 1184#else 1185 zv->zv_total_opens += count; 1186 mutex_exit(&zfsdev_state_lock); 1187#endif 1188 1189 return (err); 1190out: 1191 if (zv->zv_total_opens == 0) 1192 zvol_last_close(zv); 1193#ifdef illumos 1194 mutex_exit(&zfsdev_state_lock); 1195#else 1196 mutex_exit(&zfsdev_state_lock); 1197#endif 1198 return (err); 1199} 1200 1201/*ARGSUSED*/ 1202#ifdef illumos 1203int 1204zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 1205{ 1206 minor_t minor = getminor(dev); 1207 zvol_state_t *zv; 1208 int error = 0; 1209 1210 mutex_enter(&zfsdev_state_lock); 1211 1212 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1213 if (zv == NULL) { 1214 mutex_exit(&zfsdev_state_lock); 1215#else /* !illumos */ 1216static int 1217zvol_close(struct g_provider *pp, int flag, int count) 1218{ 1219 zvol_state_t *zv; 1220 int error = 0; 1221 boolean_t locked = B_FALSE; 1222 1223 /* See comment in zvol_open(). */ 1224 if (!MUTEX_HELD(&zfsdev_state_lock)) { 1225 mutex_enter(&zfsdev_state_lock); 1226 locked = B_TRUE; 1227 } 1228 1229 zv = pp->private; 1230 if (zv == NULL) { 1231 if (locked) 1232 mutex_exit(&zfsdev_state_lock); 1233#endif /* illumos */ 1234 return (SET_ERROR(ENXIO)); 1235 } 1236 1237 if (zv->zv_flags & ZVOL_EXCL) { 1238 ASSERT(zv->zv_total_opens == 1); 1239 zv->zv_flags &= ~ZVOL_EXCL; 1240 } 1241 1242 /* 1243 * If the open count is zero, this is a spurious close. 1244 * That indicates a bug in the kernel / DDI framework. 1245 */ 1246#ifdef illumos 1247 ASSERT(zv->zv_open_count[otyp] != 0); 1248#endif 1249 ASSERT(zv->zv_total_opens != 0); 1250 1251 /* 1252 * You may get multiple opens, but only one close. 1253 */ 1254#ifdef illumos 1255 zv->zv_open_count[otyp]--; 1256 zv->zv_total_opens--; 1257#else 1258 zv->zv_total_opens -= count; 1259#endif 1260 1261 if (zv->zv_total_opens == 0) 1262 zvol_last_close(zv); 1263 1264#ifdef illumos 1265 mutex_exit(&zfsdev_state_lock); 1266#else 1267 if (locked) 1268 mutex_exit(&zfsdev_state_lock); 1269#endif 1270 return (error); 1271} 1272 1273static void 1274zvol_get_done(zgd_t *zgd, int error) 1275{ 1276 if (zgd->zgd_db) 1277 dmu_buf_rele(zgd->zgd_db, zgd); 1278 1279 zfs_range_unlock(zgd->zgd_rl); 1280 1281 if (error == 0 && zgd->zgd_bp) 1282 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1283 1284 kmem_free(zgd, sizeof (zgd_t)); 1285} 1286 1287/* 1288 * Get data to generate a TX_WRITE intent log record. 1289 */ 1290static int 1291zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1292{ 1293 zvol_state_t *zv = arg; 1294 objset_t *os = zv->zv_objset; 1295 uint64_t object = ZVOL_OBJ; 1296 uint64_t offset = lr->lr_offset; 1297 uint64_t size = lr->lr_length; /* length of user data */ 1298 blkptr_t *bp = &lr->lr_blkptr; 1299 dmu_buf_t *db; 1300 zgd_t *zgd; 1301 int error; 1302 1303 ASSERT(zio != NULL); 1304 ASSERT(size != 0); 1305 1306 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 1307 zgd->zgd_zilog = zv->zv_zilog; 1308 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); 1309 1310 /* 1311 * Write records come in two flavors: immediate and indirect. 1312 * For small writes it's cheaper to store the data with the 1313 * log record (immediate); for large writes it's cheaper to 1314 * sync the data and get a pointer to it (indirect) so that 1315 * we don't have to write the data twice. 1316 */ 1317 if (buf != NULL) { /* immediate write */ 1318 error = dmu_read(os, object, offset, size, buf, 1319 DMU_READ_NO_PREFETCH); 1320 } else { 1321 size = zv->zv_volblocksize; 1322 offset = P2ALIGN(offset, size); 1323 error = dmu_buf_hold(os, object, offset, zgd, &db, 1324 DMU_READ_NO_PREFETCH); 1325 if (error == 0) { 1326 blkptr_t *obp = dmu_buf_get_blkptr(db); 1327 if (obp) { 1328 ASSERT(BP_IS_HOLE(bp)); 1329 *bp = *obp; 1330 } 1331 1332 zgd->zgd_db = db; 1333 zgd->zgd_bp = bp; 1334 1335 ASSERT(db->db_offset == offset); 1336 ASSERT(db->db_size == size); 1337 1338 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1339 zvol_get_done, zgd); 1340 1341 if (error == 0) 1342 return (0); 1343 } 1344 } 1345 1346 zvol_get_done(zgd, error); 1347 1348 return (error); 1349} 1350 1351/* 1352 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 1353 * 1354 * We store data in the log buffers if it's small enough. 1355 * Otherwise we will later flush the data out via dmu_sync(). 1356 */ 1357ssize_t zvol_immediate_write_sz = 32768; 1358 1359static void 1360zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1361 boolean_t sync) 1362{ 1363 uint32_t blocksize = zv->zv_volblocksize; 1364 zilog_t *zilog = zv->zv_zilog; 1365 boolean_t slogging; 1366 ssize_t immediate_write_sz; 1367 1368 if (zil_replaying(zilog, tx)) 1369 return; 1370 1371 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1372 ? 0 : zvol_immediate_write_sz; 1373 1374 slogging = spa_has_slogs(zilog->zl_spa) && 1375 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 1376 1377 while (resid) { 1378 itx_t *itx; 1379 lr_write_t *lr; 1380 ssize_t len; 1381 itx_wr_state_t write_state; 1382 1383 /* 1384 * Unlike zfs_log_write() we can be called with 1385 * upto DMU_MAX_ACCESS/2 (5MB) writes. 1386 */ 1387 if (blocksize > immediate_write_sz && !slogging && 1388 resid >= blocksize && off % blocksize == 0) { 1389 write_state = WR_INDIRECT; /* uses dmu_sync */ 1390 len = blocksize; 1391 } else if (sync) { 1392 write_state = WR_COPIED; 1393 len = MIN(ZIL_MAX_LOG_DATA, resid); 1394 } else { 1395 write_state = WR_NEED_COPY; 1396 len = MIN(ZIL_MAX_LOG_DATA, resid); 1397 } 1398 1399 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1400 (write_state == WR_COPIED ? len : 0)); 1401 lr = (lr_write_t *)&itx->itx_lr; 1402 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1403 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1404 zil_itx_destroy(itx); 1405 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1406 lr = (lr_write_t *)&itx->itx_lr; 1407 write_state = WR_NEED_COPY; 1408 } 1409 1410 itx->itx_wr_state = write_state; 1411 if (write_state == WR_NEED_COPY) 1412 itx->itx_sod += len; 1413 lr->lr_foid = ZVOL_OBJ; 1414 lr->lr_offset = off; 1415 lr->lr_length = len; 1416 lr->lr_blkoff = 0; 1417 BP_ZERO(&lr->lr_blkptr); 1418 1419 itx->itx_private = zv; 1420 1421 if (!sync && (zv->zv_sync_cnt == 0)) 1422 itx->itx_sync = B_FALSE; 1423 1424 zil_itx_assign(zilog, itx, tx); 1425 1426 off += len; 1427 resid -= len; 1428 } 1429} 1430 1431#ifdef illumos 1432static int 1433zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset, 1434 uint64_t size, boolean_t doread, boolean_t isdump) 1435{ 1436 vdev_disk_t *dvd; 1437 int c; 1438 int numerrors = 0; 1439 1440 if (vd->vdev_ops == &vdev_mirror_ops || 1441 vd->vdev_ops == &vdev_replacing_ops || 1442 vd->vdev_ops == &vdev_spare_ops) { 1443 for (c = 0; c < vd->vdev_children; c++) { 1444 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1445 addr, offset, origoffset, size, doread, isdump); 1446 if (err != 0) { 1447 numerrors++; 1448 } else if (doread) { 1449 break; 1450 } 1451 } 1452 } 1453 1454 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops) 1455 return (numerrors < vd->vdev_children ? 0 : EIO); 1456 1457 if (doread && !vdev_readable(vd)) 1458 return (SET_ERROR(EIO)); 1459 else if (!doread && !vdev_writeable(vd)) 1460 return (SET_ERROR(EIO)); 1461 1462 if (vd->vdev_ops == &vdev_raidz_ops) { 1463 return (vdev_raidz_physio(vd, 1464 addr, size, offset, origoffset, doread, isdump)); 1465 } 1466 1467 offset += VDEV_LABEL_START_SIZE; 1468 1469 if (ddi_in_panic() || isdump) { 1470 ASSERT(!doread); 1471 if (doread) 1472 return (SET_ERROR(EIO)); 1473 dvd = vd->vdev_tsd; 1474 ASSERT3P(dvd, !=, NULL); 1475 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1476 lbtodb(size))); 1477 } else { 1478 dvd = vd->vdev_tsd; 1479 ASSERT3P(dvd, !=, NULL); 1480 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size, 1481 offset, doread ? B_READ : B_WRITE)); 1482 } 1483} 1484 1485static int 1486zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1487 boolean_t doread, boolean_t isdump) 1488{ 1489 vdev_t *vd; 1490 int error; 1491 zvol_extent_t *ze; 1492 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1493 1494 /* Must be sector aligned, and not stradle a block boundary. */ 1495 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1496 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1497 return (SET_ERROR(EINVAL)); 1498 } 1499 ASSERT(size <= zv->zv_volblocksize); 1500 1501 /* Locate the extent this belongs to */ 1502 ze = list_head(&zv->zv_extents); 1503 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1504 offset -= ze->ze_nblks * zv->zv_volblocksize; 1505 ze = list_next(&zv->zv_extents, ze); 1506 } 1507 1508 if (ze == NULL) 1509 return (SET_ERROR(EINVAL)); 1510 1511 if (!ddi_in_panic()) 1512 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1513 1514 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1515 offset += DVA_GET_OFFSET(&ze->ze_dva); 1516 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva), 1517 size, doread, isdump); 1518 1519 if (!ddi_in_panic()) 1520 spa_config_exit(spa, SCL_STATE, FTAG); 1521 1522 return (error); 1523} 1524 1525int 1526zvol_strategy(buf_t *bp) 1527{ 1528 zfs_soft_state_t *zs = NULL; 1529#else /* !illumos */ 1530void 1531zvol_strategy(struct bio *bp) 1532{ 1533#endif /* illumos */ 1534 zvol_state_t *zv; 1535 uint64_t off, volsize; 1536 size_t resid; 1537 char *addr; 1538 objset_t *os; 1539 rl_t *rl; 1540 int error = 0; 1541#ifdef illumos 1542 boolean_t doread = bp->b_flags & B_READ; 1543#else 1544 boolean_t doread = 0; 1545#endif 1546 boolean_t is_dumpified; 1547 boolean_t sync; 1548 1549#ifdef illumos 1550 if (getminor(bp->b_edev) == 0) { 1551 error = SET_ERROR(EINVAL); 1552 } else { 1553 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1554 if (zs == NULL) 1555 error = SET_ERROR(ENXIO); 1556 else if (zs->zss_type != ZSST_ZVOL) 1557 error = SET_ERROR(EINVAL); 1558 } 1559 1560 if (error) { 1561 bioerror(bp, error); 1562 biodone(bp); 1563 return (0); 1564 } 1565 1566 zv = zs->zss_data; 1567 1568 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1569 bioerror(bp, EROFS); 1570 biodone(bp); 1571 return (0); 1572 } 1573 1574 off = ldbtob(bp->b_blkno); 1575#else /* !illumos */ 1576 if (bp->bio_to) 1577 zv = bp->bio_to->private; 1578 else 1579 zv = bp->bio_dev->si_drv2; 1580 1581 if (zv == NULL) { 1582 error = SET_ERROR(ENXIO); 1583 goto out; 1584 } 1585 1586 if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) { 1587 error = SET_ERROR(EROFS); 1588 goto out; 1589 } 1590 1591 switch (bp->bio_cmd) { 1592 case BIO_FLUSH: 1593 goto sync; 1594 case BIO_READ: 1595 doread = 1; 1596 case BIO_WRITE: 1597 case BIO_DELETE: 1598 break; 1599 default: 1600 error = EOPNOTSUPP; 1601 goto out; 1602 } 1603 1604 off = bp->bio_offset; 1605#endif /* illumos */ 1606 volsize = zv->zv_volsize; 1607 1608 os = zv->zv_objset; 1609 ASSERT(os != NULL); 1610 1611#ifdef illumos 1612 bp_mapin(bp); 1613 addr = bp->b_un.b_addr; 1614 resid = bp->b_bcount; 1615 1616 if (resid > 0 && (off < 0 || off >= volsize)) { 1617 bioerror(bp, EIO); 1618 biodone(bp); 1619 return (0); 1620 } 1621 1622 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED; 1623 sync = ((!(bp->b_flags & B_ASYNC) && 1624 !(zv->zv_flags & ZVOL_WCE)) || 1625 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1626 !doread && !is_dumpified; 1627#else /* !illumos */ 1628 addr = bp->bio_data; 1629 resid = bp->bio_length; 1630 1631 if (resid > 0 && (off < 0 || off >= volsize)) { 1632 error = SET_ERROR(EIO); 1633 goto out; 1634 } 1635 1636 is_dumpified = B_FALSE; 1637 sync = !doread && !is_dumpified && 1638 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS; 1639#endif /* illumos */ 1640 1641 /* 1642 * There must be no buffer changes when doing a dmu_sync() because 1643 * we can't change the data whilst calculating the checksum. 1644 */ 1645 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1646 doread ? RL_READER : RL_WRITER); 1647 1648#ifndef illumos 1649 if (bp->bio_cmd == BIO_DELETE) { 1650 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1651 error = dmu_tx_assign(tx, TXG_WAIT); 1652 if (error != 0) { 1653 dmu_tx_abort(tx); 1654 } else { 1655 zvol_log_truncate(zv, tx, off, resid, sync); 1656 dmu_tx_commit(tx); 1657 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1658 off, resid); 1659 resid = 0; 1660 } 1661 goto unlock; 1662 } 1663#endif 1664 while (resid != 0 && off < volsize) { 1665 size_t size = MIN(resid, zvol_maxphys); 1666#ifdef illumos 1667 if (is_dumpified) { 1668 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1669 error = zvol_dumpio(zv, addr, off, size, 1670 doread, B_FALSE); 1671 } else if (doread) { 1672#else 1673 if (doread) { 1674#endif 1675 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1676 DMU_READ_PREFETCH); 1677 } else { 1678 dmu_tx_t *tx = dmu_tx_create(os); 1679 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1680 error = dmu_tx_assign(tx, TXG_WAIT); 1681 if (error) { 1682 dmu_tx_abort(tx); 1683 } else { 1684 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1685 zvol_log_write(zv, tx, off, size, sync); 1686 dmu_tx_commit(tx); 1687 } 1688 } 1689 if (error) { 1690 /* convert checksum errors into IO errors */ 1691 if (error == ECKSUM) 1692 error = SET_ERROR(EIO); 1693 break; 1694 } 1695 off += size; 1696 addr += size; 1697 resid -= size; 1698 } 1699#ifndef illumos 1700unlock: 1701#endif 1702 zfs_range_unlock(rl); 1703 1704#ifdef illumos 1705 if ((bp->b_resid = resid) == bp->b_bcount) 1706 bioerror(bp, off > volsize ? EINVAL : error); 1707 1708 if (sync) 1709 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1710 biodone(bp); 1711 1712 return (0); 1713#else /* !illumos */ 1714 bp->bio_completed = bp->bio_length - resid; 1715 if (bp->bio_completed < bp->bio_length && off > volsize) 1716 error = EINVAL; 1717 1718 if (sync) { 1719sync: 1720 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1721 } 1722out: 1723 if (bp->bio_to) 1724 g_io_deliver(bp, error); 1725 else 1726 biofinish(bp, NULL, error); 1727#endif /* illumos */ 1728} 1729 1730#ifdef illumos 1731/* 1732 * Set the buffer count to the zvol maximum transfer. 1733 * Using our own routine instead of the default minphys() 1734 * means that for larger writes we write bigger buffers on X86 1735 * (128K instead of 56K) and flush the disk write cache less often 1736 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1737 * 56K on X86 and 128K on sparc). 1738 */ 1739void 1740zvol_minphys(struct buf *bp) 1741{ 1742 if (bp->b_bcount > zvol_maxphys) 1743 bp->b_bcount = zvol_maxphys; 1744} 1745 1746int 1747zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1748{ 1749 minor_t minor = getminor(dev); 1750 zvol_state_t *zv; 1751 int error = 0; 1752 uint64_t size; 1753 uint64_t boff; 1754 uint64_t resid; 1755 1756 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1757 if (zv == NULL) 1758 return (SET_ERROR(ENXIO)); 1759 1760 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0) 1761 return (SET_ERROR(EINVAL)); 1762 1763 boff = ldbtob(blkno); 1764 resid = ldbtob(nblocks); 1765 1766 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1767 1768 while (resid) { 1769 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1770 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1771 if (error) 1772 break; 1773 boff += size; 1774 addr += size; 1775 resid -= size; 1776 } 1777 1778 return (error); 1779} 1780 1781/*ARGSUSED*/ 1782int 1783zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1784{ 1785 minor_t minor = getminor(dev); 1786#else /* !illumos */ 1787int 1788zvol_read(struct cdev *dev, struct uio *uio, int ioflag) 1789{ 1790#endif /* illumos */ 1791 zvol_state_t *zv; 1792 uint64_t volsize; 1793 rl_t *rl; 1794 int error = 0; 1795 1796#ifdef illumos 1797 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1798 if (zv == NULL) 1799 return (SET_ERROR(ENXIO)); 1800#else 1801 zv = dev->si_drv2; 1802#endif 1803 1804 volsize = zv->zv_volsize; 1805 /* uio_loffset == volsize isn't an error as its required for EOF processing. */ 1806 if (uio->uio_resid > 0 && 1807 (uio->uio_loffset < 0 || uio->uio_loffset > volsize)) 1808 return (SET_ERROR(EIO)); 1809 1810#ifdef illumos 1811 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1812 error = physio(zvol_strategy, NULL, dev, B_READ, 1813 zvol_minphys, uio); 1814 return (error); 1815 } 1816#endif 1817 1818 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1819 RL_READER); 1820 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1821 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1822 1823 /* don't read past the end */ 1824 if (bytes > volsize - uio->uio_loffset) 1825 bytes = volsize - uio->uio_loffset; 1826 1827 error = dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes); 1828 if (error) { 1829 /* convert checksum errors into IO errors */ 1830 if (error == ECKSUM) 1831 error = SET_ERROR(EIO); 1832 break; 1833 } 1834 } 1835 zfs_range_unlock(rl); 1836 return (error); 1837} 1838 1839#ifdef illumos 1840/*ARGSUSED*/ 1841int 1842zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1843{ 1844 minor_t minor = getminor(dev); 1845#else /* !illumos */ 1846int 1847zvol_write(struct cdev *dev, struct uio *uio, int ioflag) 1848{ 1849#endif /* illumos */ 1850 zvol_state_t *zv; 1851 uint64_t volsize; 1852 rl_t *rl; 1853 int error = 0; 1854 boolean_t sync; 1855 1856#ifdef illumos 1857 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1858 if (zv == NULL) 1859 return (SET_ERROR(ENXIO)); 1860#else 1861 zv = dev->si_drv2; 1862#endif 1863 1864 volsize = zv->zv_volsize; 1865 /* uio_loffset == volsize isn't an error as its required for EOF processing. */ 1866 if (uio->uio_resid > 0 && 1867 (uio->uio_loffset < 0 || uio->uio_loffset > volsize)) 1868 return (SET_ERROR(EIO)); 1869 1870#ifdef illumos 1871 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1872 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1873 zvol_minphys, uio); 1874 return (error); 1875 } 1876 1877 sync = !(zv->zv_flags & ZVOL_WCE) || 1878#else 1879 sync = (ioflag & IO_SYNC) || 1880#endif 1881 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1882 1883 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1884 RL_WRITER); 1885 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1886 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1887 uint64_t off = uio->uio_loffset; 1888 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1889 1890 if (bytes > volsize - off) /* don't write past the end */ 1891 bytes = volsize - off; 1892 1893 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1894 error = dmu_tx_assign(tx, TXG_WAIT); 1895 if (error) { 1896 dmu_tx_abort(tx); 1897 break; 1898 } 1899 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1900 if (error == 0) 1901 zvol_log_write(zv, tx, off, bytes, sync); 1902 dmu_tx_commit(tx); 1903 1904 if (error) 1905 break; 1906 } 1907 zfs_range_unlock(rl); 1908 if (sync) 1909 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1910 return (error); 1911} 1912 1913#ifdef illumos 1914int 1915zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1916{ 1917 struct uuid uuid = EFI_RESERVED; 1918 efi_gpe_t gpe = { 0 }; 1919 uint32_t crc; 1920 dk_efi_t efi; 1921 int length; 1922 char *ptr; 1923 1924 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1925 return (SET_ERROR(EFAULT)); 1926 ptr = (char *)(uintptr_t)efi.dki_data_64; 1927 length = efi.dki_length; 1928 /* 1929 * Some clients may attempt to request a PMBR for the 1930 * zvol. Currently this interface will return EINVAL to 1931 * such requests. These requests could be supported by 1932 * adding a check for lba == 0 and consing up an appropriate 1933 * PMBR. 1934 */ 1935 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1936 return (SET_ERROR(EINVAL)); 1937 1938 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1939 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1940 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1941 1942 if (efi.dki_lba == 1) { 1943 efi_gpt_t gpt = { 0 }; 1944 1945 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1946 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1947 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1948 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1949 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1950 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1951 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1952 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1953 gpt.efi_gpt_SizeOfPartitionEntry = 1954 LE_32(sizeof (efi_gpe_t)); 1955 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1956 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1957 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1958 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1959 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1960 flag)) 1961 return (SET_ERROR(EFAULT)); 1962 ptr += sizeof (gpt); 1963 length -= sizeof (gpt); 1964 } 1965 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1966 length), flag)) 1967 return (SET_ERROR(EFAULT)); 1968 return (0); 1969} 1970 1971/* 1972 * BEGIN entry points to allow external callers access to the volume. 1973 */ 1974/* 1975 * Return the volume parameters needed for access from an external caller. 1976 * These values are invariant as long as the volume is held open. 1977 */ 1978int 1979zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1980 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1981 void **rl_hdl, void **bonus_hdl) 1982{ 1983 zvol_state_t *zv; 1984 1985 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1986 if (zv == NULL) 1987 return (SET_ERROR(ENXIO)); 1988 if (zv->zv_flags & ZVOL_DUMPIFIED) 1989 return (SET_ERROR(ENXIO)); 1990 1991 ASSERT(blksize && max_xfer_len && minor_hdl && 1992 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1993 1994 *blksize = zv->zv_volblocksize; 1995 *max_xfer_len = (uint64_t)zvol_maxphys; 1996 *minor_hdl = zv; 1997 *objset_hdl = zv->zv_objset; 1998 *zil_hdl = zv->zv_zilog; 1999 *rl_hdl = &zv->zv_znode; 2000 *bonus_hdl = zv->zv_dbuf; 2001 return (0); 2002} 2003 2004/* 2005 * Return the current volume size to an external caller. 2006 * The size can change while the volume is open. 2007 */ 2008uint64_t 2009zvol_get_volume_size(void *minor_hdl) 2010{ 2011 zvol_state_t *zv = minor_hdl; 2012 2013 return (zv->zv_volsize); 2014} 2015 2016/* 2017 * Return the current WCE setting to an external caller. 2018 * The WCE setting can change while the volume is open. 2019 */ 2020int 2021zvol_get_volume_wce(void *minor_hdl) 2022{ 2023 zvol_state_t *zv = minor_hdl; 2024 2025 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 2026} 2027 2028/* 2029 * Entry point for external callers to zvol_log_write 2030 */ 2031void 2032zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 2033 boolean_t sync) 2034{ 2035 zvol_state_t *zv = minor_hdl; 2036 2037 zvol_log_write(zv, tx, off, resid, sync); 2038} 2039/* 2040 * END entry points to allow external callers access to the volume. 2041 */ 2042#endif /* illumos */ 2043 2044/* 2045 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 2046 */ 2047static void 2048zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 2049 boolean_t sync) 2050{ 2051 itx_t *itx; 2052 lr_truncate_t *lr; 2053 zilog_t *zilog = zv->zv_zilog; 2054 2055 if (zil_replaying(zilog, tx)) 2056 return; 2057 2058 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 2059 lr = (lr_truncate_t *)&itx->itx_lr; 2060 lr->lr_foid = ZVOL_OBJ; 2061 lr->lr_offset = off; 2062 lr->lr_length = len; 2063 2064 itx->itx_sync = (sync || zv->zv_sync_cnt != 0); 2065 zil_itx_assign(zilog, itx, tx); 2066} 2067 2068#ifdef illumos 2069/* 2070 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 2071 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 2072 */ 2073/*ARGSUSED*/ 2074int 2075zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 2076{ 2077 zvol_state_t *zv; 2078 struct dk_callback *dkc; 2079 int error = 0; 2080 rl_t *rl; 2081 2082 mutex_enter(&zfsdev_state_lock); 2083 2084 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 2085 2086 if (zv == NULL) { 2087 mutex_exit(&zfsdev_state_lock); 2088 return (SET_ERROR(ENXIO)); 2089 } 2090 ASSERT(zv->zv_total_opens > 0); 2091 2092 switch (cmd) { 2093 2094 case DKIOCINFO: 2095 { 2096 struct dk_cinfo dki; 2097 2098 bzero(&dki, sizeof (dki)); 2099 (void) strcpy(dki.dki_cname, "zvol"); 2100 (void) strcpy(dki.dki_dname, "zvol"); 2101 dki.dki_ctype = DKC_UNKNOWN; 2102 dki.dki_unit = getminor(dev); 2103 dki.dki_maxtransfer = 2104 1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs); 2105 mutex_exit(&zfsdev_state_lock); 2106 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 2107 error = SET_ERROR(EFAULT); 2108 return (error); 2109 } 2110 2111 case DKIOCGMEDIAINFO: 2112 { 2113 struct dk_minfo dkm; 2114 2115 bzero(&dkm, sizeof (dkm)); 2116 dkm.dki_lbsize = 1U << zv->zv_min_bs; 2117 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 2118 dkm.dki_media_type = DK_UNKNOWN; 2119 mutex_exit(&zfsdev_state_lock); 2120 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 2121 error = SET_ERROR(EFAULT); 2122 return (error); 2123 } 2124 2125 case DKIOCGMEDIAINFOEXT: 2126 { 2127 struct dk_minfo_ext dkmext; 2128 2129 bzero(&dkmext, sizeof (dkmext)); 2130 dkmext.dki_lbsize = 1U << zv->zv_min_bs; 2131 dkmext.dki_pbsize = zv->zv_volblocksize; 2132 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 2133 dkmext.dki_media_type = DK_UNKNOWN; 2134 mutex_exit(&zfsdev_state_lock); 2135 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag)) 2136 error = SET_ERROR(EFAULT); 2137 return (error); 2138 } 2139 2140 case DKIOCGETEFI: 2141 { 2142 uint64_t vs = zv->zv_volsize; 2143 uint8_t bs = zv->zv_min_bs; 2144 2145 mutex_exit(&zfsdev_state_lock); 2146 error = zvol_getefi((void *)arg, flag, vs, bs); 2147 return (error); 2148 } 2149 2150 case DKIOCFLUSHWRITECACHE: 2151 dkc = (struct dk_callback *)arg; 2152 mutex_exit(&zfsdev_state_lock); 2153 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2154 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 2155 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 2156 error = 0; 2157 } 2158 return (error); 2159 2160 case DKIOCGETWCE: 2161 { 2162 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 2163 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 2164 flag)) 2165 error = SET_ERROR(EFAULT); 2166 break; 2167 } 2168 case DKIOCSETWCE: 2169 { 2170 int wce; 2171 if (ddi_copyin((void *)arg, &wce, sizeof (int), 2172 flag)) { 2173 error = SET_ERROR(EFAULT); 2174 break; 2175 } 2176 if (wce) { 2177 zv->zv_flags |= ZVOL_WCE; 2178 mutex_exit(&zfsdev_state_lock); 2179 } else { 2180 zv->zv_flags &= ~ZVOL_WCE; 2181 mutex_exit(&zfsdev_state_lock); 2182 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2183 } 2184 return (0); 2185 } 2186 2187 case DKIOCGGEOM: 2188 case DKIOCGVTOC: 2189 /* 2190 * commands using these (like prtvtoc) expect ENOTSUP 2191 * since we're emulating an EFI label 2192 */ 2193 error = SET_ERROR(ENOTSUP); 2194 break; 2195 2196 case DKIOCDUMPINIT: 2197 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 2198 RL_WRITER); 2199 error = zvol_dumpify(zv); 2200 zfs_range_unlock(rl); 2201 break; 2202 2203 case DKIOCDUMPFINI: 2204 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 2205 break; 2206 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 2207 RL_WRITER); 2208 error = zvol_dump_fini(zv); 2209 zfs_range_unlock(rl); 2210 break; 2211 2212 case DKIOCFREE: 2213 { 2214 dkioc_free_t df; 2215 dmu_tx_t *tx; 2216 2217 if (!zvol_unmap_enabled) 2218 break; 2219 2220 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 2221 error = SET_ERROR(EFAULT); 2222 break; 2223 } 2224 2225 /* 2226 * Apply Postel's Law to length-checking. If they overshoot, 2227 * just blank out until the end, if there's a need to blank 2228 * out anything. 2229 */ 2230 if (df.df_start >= zv->zv_volsize) 2231 break; /* No need to do anything... */ 2232 2233 mutex_exit(&zfsdev_state_lock); 2234 2235 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 2236 RL_WRITER); 2237 tx = dmu_tx_create(zv->zv_objset); 2238 dmu_tx_mark_netfree(tx); 2239 error = dmu_tx_assign(tx, TXG_WAIT); 2240 if (error != 0) { 2241 dmu_tx_abort(tx); 2242 } else { 2243 zvol_log_truncate(zv, tx, df.df_start, 2244 df.df_length, B_TRUE); 2245 dmu_tx_commit(tx); 2246 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 2247 df.df_start, df.df_length); 2248 } 2249 2250 zfs_range_unlock(rl); 2251 2252 if (error == 0) { 2253 /* 2254 * If the write-cache is disabled or 'sync' property 2255 * is set to 'always' then treat this as a synchronous 2256 * operation (i.e. commit to zil). 2257 */ 2258 if (!(zv->zv_flags & ZVOL_WCE) || 2259 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 2260 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2261 2262 /* 2263 * If the caller really wants synchronous writes, and 2264 * can't wait for them, don't return until the write 2265 * is done. 2266 */ 2267 if (df.df_flags & DF_WAIT_SYNC) { 2268 txg_wait_synced( 2269 dmu_objset_pool(zv->zv_objset), 0); 2270 } 2271 } 2272 return (error); 2273 } 2274 2275 default: 2276 error = SET_ERROR(ENOTTY); 2277 break; 2278 2279 } 2280 mutex_exit(&zfsdev_state_lock); 2281 return (error); 2282} 2283#endif /* illumos */ 2284 2285int 2286zvol_busy(void) 2287{ 2288 return (zvol_minors != 0); 2289} 2290 2291void 2292zvol_init(void) 2293{ 2294 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 2295 1) == 0); 2296#ifdef illumos 2297 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 2298#else 2299 ZFS_LOG(1, "ZVOL Initialized."); 2300#endif 2301} 2302 2303void 2304zvol_fini(void) 2305{ 2306#ifdef illumos 2307 mutex_destroy(&zfsdev_state_lock); 2308#endif 2309 ddi_soft_state_fini(&zfsdev_state); 2310 ZFS_LOG(1, "ZVOL Deinitialized."); 2311} 2312 2313#ifdef illumos 2314/*ARGSUSED*/ 2315static int 2316zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx) 2317{ 2318 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 2319 2320 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 2321 return (1); 2322 return (0); 2323} 2324 2325/*ARGSUSED*/ 2326static void 2327zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx) 2328{ 2329 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 2330 2331 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx); 2332} 2333 2334static int 2335zvol_dump_init(zvol_state_t *zv, boolean_t resize) 2336{ 2337 dmu_tx_t *tx; 2338 int error; 2339 objset_t *os = zv->zv_objset; 2340 spa_t *spa = dmu_objset_spa(os); 2341 vdev_t *vd = spa->spa_root_vdev; 2342 nvlist_t *nv = NULL; 2343 uint64_t version = spa_version(spa); 2344 uint64_t checksum, compress, refresrv, vbs, dedup; 2345 2346 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 2347 ASSERT(vd->vdev_ops == &vdev_root_ops); 2348 2349 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 2350 DMU_OBJECT_END); 2351 if (error != 0) 2352 return (error); 2353 /* wait for dmu_free_long_range to actually free the blocks */ 2354 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2355 2356 /* 2357 * If the pool on which the dump device is being initialized has more 2358 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is 2359 * enabled. If so, bump that feature's counter to indicate that the 2360 * feature is active. We also check the vdev type to handle the 2361 * following case: 2362 * # zpool create test raidz disk1 disk2 disk3 2363 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev), 2364 * the raidz vdev itself has 3 children. 2365 */ 2366 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) { 2367 if (!spa_feature_is_enabled(spa, 2368 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 2369 return (SET_ERROR(ENOTSUP)); 2370 (void) dsl_sync_task(spa_name(spa), 2371 zfs_mvdev_dump_feature_check, 2372 zfs_mvdev_dump_activate_feature_sync, NULL, 2373 2, ZFS_SPACE_CHECK_RESERVED); 2374 } 2375 2376 if (!resize) { 2377 error = dsl_prop_get_integer(zv->zv_name, 2378 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 2379 if (error == 0) { 2380 error = dsl_prop_get_integer(zv->zv_name, 2381 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, 2382 NULL); 2383 } 2384 if (error == 0) { 2385 error = dsl_prop_get_integer(zv->zv_name, 2386 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 2387 &refresrv, NULL); 2388 } 2389 if (error == 0) { 2390 error = dsl_prop_get_integer(zv->zv_name, 2391 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, 2392 NULL); 2393 } 2394 if (version >= SPA_VERSION_DEDUP && error == 0) { 2395 error = dsl_prop_get_integer(zv->zv_name, 2396 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 2397 } 2398 } 2399 if (error != 0) 2400 return (error); 2401 2402 tx = dmu_tx_create(os); 2403 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2404 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2405 error = dmu_tx_assign(tx, TXG_WAIT); 2406 if (error != 0) { 2407 dmu_tx_abort(tx); 2408 return (error); 2409 } 2410 2411 /* 2412 * If we are resizing the dump device then we only need to 2413 * update the refreservation to match the newly updated 2414 * zvolsize. Otherwise, we save off the original state of the 2415 * zvol so that we can restore them if the zvol is ever undumpified. 2416 */ 2417 if (resize) { 2418 error = zap_update(os, ZVOL_ZAP_OBJ, 2419 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 2420 &zv->zv_volsize, tx); 2421 } else { 2422 error = zap_update(os, ZVOL_ZAP_OBJ, 2423 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 2424 &compress, tx); 2425 if (error == 0) { 2426 error = zap_update(os, ZVOL_ZAP_OBJ, 2427 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, 2428 &checksum, tx); 2429 } 2430 if (error == 0) { 2431 error = zap_update(os, ZVOL_ZAP_OBJ, 2432 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 2433 &refresrv, tx); 2434 } 2435 if (error == 0) { 2436 error = zap_update(os, ZVOL_ZAP_OBJ, 2437 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 2438 &vbs, tx); 2439 } 2440 if (error == 0) { 2441 error = dmu_object_set_blocksize( 2442 os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx); 2443 } 2444 if (version >= SPA_VERSION_DEDUP && error == 0) { 2445 error = zap_update(os, ZVOL_ZAP_OBJ, 2446 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 2447 &dedup, tx); 2448 } 2449 if (error == 0) 2450 zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE; 2451 } 2452 dmu_tx_commit(tx); 2453 2454 /* 2455 * We only need update the zvol's property if we are initializing 2456 * the dump area for the first time. 2457 */ 2458 if (error == 0 && !resize) { 2459 /* 2460 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum 2461 * function. Otherwise, use the old default -- OFF. 2462 */ 2463 checksum = spa_feature_is_active(spa, 2464 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY : 2465 ZIO_CHECKSUM_OFF; 2466 2467 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2468 VERIFY(nvlist_add_uint64(nv, 2469 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 2470 VERIFY(nvlist_add_uint64(nv, 2471 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 2472 ZIO_COMPRESS_OFF) == 0); 2473 VERIFY(nvlist_add_uint64(nv, 2474 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 2475 checksum) == 0); 2476 if (version >= SPA_VERSION_DEDUP) { 2477 VERIFY(nvlist_add_uint64(nv, 2478 zfs_prop_to_name(ZFS_PROP_DEDUP), 2479 ZIO_CHECKSUM_OFF) == 0); 2480 } 2481 2482 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2483 nv, NULL); 2484 nvlist_free(nv); 2485 } 2486 2487 /* Allocate the space for the dump */ 2488 if (error == 0) 2489 error = zvol_prealloc(zv); 2490 return (error); 2491} 2492 2493static int 2494zvol_dumpify(zvol_state_t *zv) 2495{ 2496 int error = 0; 2497 uint64_t dumpsize = 0; 2498 dmu_tx_t *tx; 2499 objset_t *os = zv->zv_objset; 2500 2501 if (zv->zv_flags & ZVOL_RDONLY) 2502 return (SET_ERROR(EROFS)); 2503 2504 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 2505 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 2506 boolean_t resize = (dumpsize > 0); 2507 2508 if ((error = zvol_dump_init(zv, resize)) != 0) { 2509 (void) zvol_dump_fini(zv); 2510 return (error); 2511 } 2512 } 2513 2514 /* 2515 * Build up our lba mapping. 2516 */ 2517 error = zvol_get_lbas(zv); 2518 if (error) { 2519 (void) zvol_dump_fini(zv); 2520 return (error); 2521 } 2522 2523 tx = dmu_tx_create(os); 2524 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2525 error = dmu_tx_assign(tx, TXG_WAIT); 2526 if (error) { 2527 dmu_tx_abort(tx); 2528 (void) zvol_dump_fini(zv); 2529 return (error); 2530 } 2531 2532 zv->zv_flags |= ZVOL_DUMPIFIED; 2533 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 2534 &zv->zv_volsize, tx); 2535 dmu_tx_commit(tx); 2536 2537 if (error) { 2538 (void) zvol_dump_fini(zv); 2539 return (error); 2540 } 2541 2542 txg_wait_synced(dmu_objset_pool(os), 0); 2543 return (0); 2544} 2545 2546static int 2547zvol_dump_fini(zvol_state_t *zv) 2548{ 2549 dmu_tx_t *tx; 2550 objset_t *os = zv->zv_objset; 2551 nvlist_t *nv; 2552 int error = 0; 2553 uint64_t checksum, compress, refresrv, vbs, dedup; 2554 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 2555 2556 /* 2557 * Attempt to restore the zvol back to its pre-dumpified state. 2558 * This is a best-effort attempt as it's possible that not all 2559 * of these properties were initialized during the dumpify process 2560 * (i.e. error during zvol_dump_init). 2561 */ 2562 2563 tx = dmu_tx_create(os); 2564 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2565 error = dmu_tx_assign(tx, TXG_WAIT); 2566 if (error) { 2567 dmu_tx_abort(tx); 2568 return (error); 2569 } 2570 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 2571 dmu_tx_commit(tx); 2572 2573 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2574 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 2575 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2576 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 2577 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2578 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 2579 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2580 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 2581 2582 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2583 (void) nvlist_add_uint64(nv, 2584 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 2585 (void) nvlist_add_uint64(nv, 2586 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 2587 (void) nvlist_add_uint64(nv, 2588 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 2589 if (version >= SPA_VERSION_DEDUP && 2590 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2591 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 2592 (void) nvlist_add_uint64(nv, 2593 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 2594 } 2595 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2596 nv, NULL); 2597 nvlist_free(nv); 2598 2599 zvol_free_extents(zv); 2600 zv->zv_flags &= ~ZVOL_DUMPIFIED; 2601 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 2602 /* wait for dmu_free_long_range to actually free the blocks */ 2603 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2604 tx = dmu_tx_create(os); 2605 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2606 error = dmu_tx_assign(tx, TXG_WAIT); 2607 if (error) { 2608 dmu_tx_abort(tx); 2609 return (error); 2610 } 2611 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2612 zv->zv_volblocksize = vbs; 2613 dmu_tx_commit(tx); 2614 2615 return (0); 2616} 2617#else /* !illumos */ 2618 2619static void 2620zvol_geom_run(zvol_state_t *zv) 2621{ 2622 struct g_provider *pp; 2623 2624 pp = zv->zv_provider; 2625 g_error_provider(pp, 0); 2626 2627 kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0, 2628 "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER)); 2629} 2630 2631static void 2632zvol_geom_destroy(zvol_state_t *zv) 2633{ 2634 struct g_provider *pp; 2635 2636 g_topology_assert(); 2637 2638 mtx_lock(&zv->zv_queue_mtx); 2639 zv->zv_state = 1; 2640 wakeup_one(&zv->zv_queue); 2641 while (zv->zv_state != 2) 2642 msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0); 2643 mtx_destroy(&zv->zv_queue_mtx); 2644 2645 pp = zv->zv_provider; 2646 zv->zv_provider = NULL; 2647 pp->private = NULL; 2648 g_wither_geom(pp->geom, ENXIO); 2649} 2650 2651static int 2652zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace) 2653{ 2654 int count, error, flags; 2655 2656 g_topology_assert(); 2657 2658 /* 2659 * To make it easier we expect either open or close, but not both 2660 * at the same time. 2661 */ 2662 KASSERT((acr >= 0 && acw >= 0 && ace >= 0) || 2663 (acr <= 0 && acw <= 0 && ace <= 0), 2664 ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).", 2665 pp->name, acr, acw, ace)); 2666 2667 if (pp->private == NULL) { 2668 if (acr <= 0 && acw <= 0 && ace <= 0) 2669 return (0); 2670 return (pp->error); 2671 } 2672 2673 /* 2674 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0, 2675 * because GEOM already handles that and handles it a bit differently. 2676 * GEOM allows for multiple read/exclusive consumers and ZFS allows 2677 * only one exclusive consumer, no matter if it is reader or writer. 2678 * I like better the way GEOM works so I'll leave it for GEOM to 2679 * decide what to do. 2680 */ 2681 2682 count = acr + acw + ace; 2683 if (count == 0) 2684 return (0); 2685 2686 flags = 0; 2687 if (acr != 0 || ace != 0) 2688 flags |= FREAD; 2689 if (acw != 0) 2690 flags |= FWRITE; 2691 2692 g_topology_unlock(); 2693 if (count > 0) 2694 error = zvol_open(pp, flags, count); 2695 else 2696 error = zvol_close(pp, flags, -count); 2697 g_topology_lock(); 2698 return (error); 2699} 2700 2701static void 2702zvol_geom_start(struct bio *bp) 2703{ 2704 zvol_state_t *zv; 2705 boolean_t first; 2706 2707 zv = bp->bio_to->private; 2708 ASSERT(zv != NULL); 2709 switch (bp->bio_cmd) { 2710 case BIO_FLUSH: 2711 if (!THREAD_CAN_SLEEP()) 2712 goto enqueue; 2713 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2714 g_io_deliver(bp, 0); 2715 break; 2716 case BIO_READ: 2717 case BIO_WRITE: 2718 case BIO_DELETE: 2719 if (!THREAD_CAN_SLEEP()) 2720 goto enqueue; 2721 zvol_strategy(bp); 2722 break; 2723 case BIO_GETATTR: { 2724 spa_t *spa = dmu_objset_spa(zv->zv_objset); 2725 uint64_t refd, avail, usedobjs, availobjs, val; 2726 2727 if (g_handleattr_int(bp, "GEOM::candelete", 1)) 2728 return; 2729 if (strcmp(bp->bio_attribute, "blocksavail") == 0) { 2730 dmu_objset_space(zv->zv_objset, &refd, &avail, 2731 &usedobjs, &availobjs); 2732 if (g_handleattr_off_t(bp, "blocksavail", 2733 avail / DEV_BSIZE)) 2734 return; 2735 } else if (strcmp(bp->bio_attribute, "blocksused") == 0) { 2736 dmu_objset_space(zv->zv_objset, &refd, &avail, 2737 &usedobjs, &availobjs); 2738 if (g_handleattr_off_t(bp, "blocksused", 2739 refd / DEV_BSIZE)) 2740 return; 2741 } else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) { 2742 avail = metaslab_class_get_space(spa_normal_class(spa)); 2743 avail -= metaslab_class_get_alloc(spa_normal_class(spa)); 2744 if (g_handleattr_off_t(bp, "poolblocksavail", 2745 avail / DEV_BSIZE)) 2746 return; 2747 } else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) { 2748 refd = metaslab_class_get_alloc(spa_normal_class(spa)); 2749 if (g_handleattr_off_t(bp, "poolblocksused", 2750 refd / DEV_BSIZE)) 2751 return; 2752 } 2753 /* FALLTHROUGH */ 2754 } 2755 default: 2756 g_io_deliver(bp, EOPNOTSUPP); 2757 break; 2758 } 2759 return; 2760 2761enqueue: 2762 mtx_lock(&zv->zv_queue_mtx); 2763 first = (bioq_first(&zv->zv_queue) == NULL); 2764 bioq_insert_tail(&zv->zv_queue, bp); 2765 mtx_unlock(&zv->zv_queue_mtx); 2766 if (first) 2767 wakeup_one(&zv->zv_queue); 2768} 2769 2770static void 2771zvol_geom_worker(void *arg) 2772{ 2773 zvol_state_t *zv; 2774 struct bio *bp; 2775 2776 thread_lock(curthread); 2777 sched_prio(curthread, PRIBIO); 2778 thread_unlock(curthread); 2779 2780 zv = arg; 2781 for (;;) { 2782 mtx_lock(&zv->zv_queue_mtx); 2783 bp = bioq_takefirst(&zv->zv_queue); 2784 if (bp == NULL) { 2785 if (zv->zv_state == 1) { 2786 zv->zv_state = 2; 2787 wakeup(&zv->zv_state); 2788 mtx_unlock(&zv->zv_queue_mtx); 2789 kthread_exit(); 2790 } 2791 msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP, 2792 "zvol:io", 0); 2793 continue; 2794 } 2795 mtx_unlock(&zv->zv_queue_mtx); 2796 switch (bp->bio_cmd) { 2797 case BIO_FLUSH: 2798 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2799 g_io_deliver(bp, 0); 2800 break; 2801 case BIO_READ: 2802 case BIO_WRITE: 2803 case BIO_DELETE: 2804 zvol_strategy(bp); 2805 break; 2806 default: 2807 g_io_deliver(bp, EOPNOTSUPP); 2808 break; 2809 } 2810 } 2811} 2812 2813extern boolean_t dataset_name_hidden(const char *name); 2814 2815static int 2816zvol_create_snapshots(objset_t *os, const char *name) 2817{ 2818 uint64_t cookie, obj; 2819 char *sname; 2820 int error, len; 2821 2822 cookie = obj = 0; 2823 sname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2824 2825#if 0 2826 (void) dmu_objset_find(name, dmu_objset_prefetch, NULL, 2827 DS_FIND_SNAPSHOTS); 2828#endif 2829 2830 for (;;) { 2831 len = snprintf(sname, MAXPATHLEN, "%s@", name); 2832 if (len >= MAXPATHLEN) { 2833 dmu_objset_rele(os, FTAG); 2834 error = ENAMETOOLONG; 2835 break; 2836 } 2837 2838 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 2839 error = dmu_snapshot_list_next(os, MAXPATHLEN - len, 2840 sname + len, &obj, &cookie, NULL); 2841 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 2842 if (error != 0) { 2843 if (error == ENOENT) 2844 error = 0; 2845 break; 2846 } 2847 2848 error = zvol_create_minor(sname); 2849 if (error != 0 && error != EEXIST) { 2850 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n", 2851 sname, error); 2852 break; 2853 } 2854 } 2855 2856 kmem_free(sname, MAXPATHLEN); 2857 return (error); 2858} 2859 2860int 2861zvol_create_minors(const char *name) 2862{ 2863 uint64_t cookie; 2864 objset_t *os; 2865 char *osname, *p; 2866 int error, len; 2867 2868 if (dataset_name_hidden(name)) 2869 return (0); 2870 2871 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) { 2872 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n", 2873 name, error); 2874 return (error); 2875 } 2876 if (dmu_objset_type(os) == DMU_OST_ZVOL) { 2877 dsl_dataset_long_hold(os->os_dsl_dataset, FTAG); 2878 dsl_pool_rele(dmu_objset_pool(os), FTAG); 2879 error = zvol_create_minor(name); 2880 if (error == 0 || error == EEXIST) { 2881 error = zvol_create_snapshots(os, name); 2882 } else { 2883 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n", 2884 name, error); 2885 } 2886 dsl_dataset_long_rele(os->os_dsl_dataset, FTAG); 2887 dsl_dataset_rele(os->os_dsl_dataset, FTAG); 2888 return (error); 2889 } 2890 if (dmu_objset_type(os) != DMU_OST_ZFS) { 2891 dmu_objset_rele(os, FTAG); 2892 return (0); 2893 } 2894 2895 osname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2896 if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) { 2897 dmu_objset_rele(os, FTAG); 2898 kmem_free(osname, MAXPATHLEN); 2899 return (ENOENT); 2900 } 2901 p = osname + strlen(osname); 2902 len = MAXPATHLEN - (p - osname); 2903 2904#if 0 2905 /* Prefetch the datasets. */ 2906 cookie = 0; 2907 while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) { 2908 if (!dataset_name_hidden(osname)) 2909 (void) dmu_objset_prefetch(osname, NULL); 2910 } 2911#endif 2912 2913 cookie = 0; 2914 while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL, 2915 &cookie) == 0) { 2916 dmu_objset_rele(os, FTAG); 2917 (void)zvol_create_minors(osname); 2918 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) { 2919 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n", 2920 name, error); 2921 return (error); 2922 } 2923 } 2924 2925 dmu_objset_rele(os, FTAG); 2926 kmem_free(osname, MAXPATHLEN); 2927 return (0); 2928} 2929 2930static void 2931zvol_rename_minor(zvol_state_t *zv, const char *newname) 2932{ 2933 struct g_geom *gp; 2934 struct g_provider *pp; 2935 struct cdev *dev; 2936 2937 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 2938 2939 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 2940 g_topology_lock(); 2941 pp = zv->zv_provider; 2942 ASSERT(pp != NULL); 2943 gp = pp->geom; 2944 ASSERT(gp != NULL); 2945 2946 zv->zv_provider = NULL; 2947 g_wither_provider(pp, ENXIO); 2948 2949 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname); 2950 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; 2951 pp->sectorsize = DEV_BSIZE; 2952 pp->mediasize = zv->zv_volsize; 2953 pp->private = zv; 2954 zv->zv_provider = pp; 2955 g_error_provider(pp, 0); 2956 g_topology_unlock(); 2957 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 2958 struct make_dev_args args; 2959 2960 if ((dev = zv->zv_dev) != NULL) { 2961 zv->zv_dev = NULL; 2962 destroy_dev(dev); 2963 if (zv->zv_total_opens > 0) { 2964 zv->zv_flags &= ~ZVOL_EXCL; 2965 zv->zv_total_opens = 0; 2966 zvol_last_close(zv); 2967 } 2968 } 2969 2970 make_dev_args_init(&args); 2971 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 2972 args.mda_devsw = &zvol_cdevsw; 2973 args.mda_cr = NULL; 2974 args.mda_uid = UID_ROOT; 2975 args.mda_gid = GID_OPERATOR; 2976 args.mda_mode = 0640; 2977 args.mda_si_drv2 = zv; 2978 if (make_dev_s(&args, &zv->zv_dev, 2979 "%s/%s", ZVOL_DRIVER, newname) == 0) 2980 zv->zv_dev->si_iosize_max = MAXPHYS; 2981 } 2982 strlcpy(zv->zv_name, newname, sizeof(zv->zv_name)); 2983} 2984 2985void 2986zvol_rename_minors(const char *oldname, const char *newname) 2987{ 2988 char name[MAXPATHLEN]; 2989 struct g_provider *pp; 2990 struct g_geom *gp; 2991 size_t oldnamelen, newnamelen; 2992 zvol_state_t *zv; 2993 char *namebuf; 2994 boolean_t locked = B_FALSE; 2995 2996 oldnamelen = strlen(oldname); 2997 newnamelen = strlen(newname); 2998 2999 DROP_GIANT(); 3000 /* See comment in zvol_open(). */ 3001 if (!MUTEX_HELD(&zfsdev_state_lock)) { 3002 mutex_enter(&zfsdev_state_lock); 3003 locked = B_TRUE; 3004 } 3005 3006 LIST_FOREACH(zv, &all_zvols, zv_links) { 3007 if (strcmp(zv->zv_name, oldname) == 0) { 3008 zvol_rename_minor(zv, newname); 3009 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 && 3010 (zv->zv_name[oldnamelen] == '/' || 3011 zv->zv_name[oldnamelen] == '@')) { 3012 snprintf(name, sizeof(name), "%s%c%s", newname, 3013 zv->zv_name[oldnamelen], 3014 zv->zv_name + oldnamelen + 1); 3015 zvol_rename_minor(zv, name); 3016 } 3017 } 3018 3019 if (locked) 3020 mutex_exit(&zfsdev_state_lock); 3021 PICKUP_GIANT(); 3022} 3023 3024static int 3025zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td) 3026{ 3027 zvol_state_t *zv = dev->si_drv2; 3028 int err = 0; 3029 3030 mutex_enter(&zfsdev_state_lock); 3031 if (zv->zv_total_opens == 0) 3032 err = zvol_first_open(zv); 3033 if (err) { 3034 mutex_exit(&zfsdev_state_lock); 3035 return (err); 3036 } 3037 if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 3038 err = SET_ERROR(EROFS); 3039 goto out; 3040 } 3041 if (zv->zv_flags & ZVOL_EXCL) { 3042 err = SET_ERROR(EBUSY); 3043 goto out; 3044 } 3045#ifdef FEXCL 3046 if (flags & FEXCL) { 3047 if (zv->zv_total_opens != 0) { 3048 err = SET_ERROR(EBUSY); 3049 goto out; 3050 } 3051 zv->zv_flags |= ZVOL_EXCL; 3052 } 3053#endif 3054 3055 zv->zv_total_opens++; 3056 if (flags & (FSYNC | FDSYNC)) { 3057 zv->zv_sync_cnt++; 3058 if (zv->zv_sync_cnt == 1) 3059 zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ); 3060 } 3061 mutex_exit(&zfsdev_state_lock); 3062 return (err); 3063out: 3064 if (zv->zv_total_opens == 0) 3065 zvol_last_close(zv); 3066 mutex_exit(&zfsdev_state_lock); 3067 return (err); 3068} 3069 3070static int 3071zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td) 3072{ 3073 zvol_state_t *zv = dev->si_drv2; 3074 3075 mutex_enter(&zfsdev_state_lock); 3076 if (zv->zv_flags & ZVOL_EXCL) { 3077 ASSERT(zv->zv_total_opens == 1); 3078 zv->zv_flags &= ~ZVOL_EXCL; 3079 } 3080 3081 /* 3082 * If the open count is zero, this is a spurious close. 3083 * That indicates a bug in the kernel / DDI framework. 3084 */ 3085 ASSERT(zv->zv_total_opens != 0); 3086 3087 /* 3088 * You may get multiple opens, but only one close. 3089 */ 3090 zv->zv_total_opens--; 3091 if (flags & (FSYNC | FDSYNC)) 3092 zv->zv_sync_cnt--; 3093 3094 if (zv->zv_total_opens == 0) 3095 zvol_last_close(zv); 3096 3097 mutex_exit(&zfsdev_state_lock); 3098 return (0); 3099} 3100 3101static int 3102zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 3103{ 3104 zvol_state_t *zv; 3105 rl_t *rl; 3106 off_t offset, length; 3107 int i, error; 3108 boolean_t sync; 3109 3110 zv = dev->si_drv2; 3111 3112 error = 0; 3113 KASSERT(zv->zv_total_opens > 0, 3114 ("Device with zero access count in zvol_d_ioctl")); 3115 3116 i = IOCPARM_LEN(cmd); 3117 switch (cmd) { 3118 case DIOCGSECTORSIZE: 3119 *(u_int *)data = DEV_BSIZE; 3120 break; 3121 case DIOCGMEDIASIZE: 3122 *(off_t *)data = zv->zv_volsize; 3123 break; 3124 case DIOCGFLUSH: 3125 zil_commit(zv->zv_zilog, ZVOL_OBJ); 3126 break; 3127 case DIOCGDELETE: 3128 if (!zvol_unmap_enabled) 3129 break; 3130 3131 offset = ((off_t *)data)[0]; 3132 length = ((off_t *)data)[1]; 3133 if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 || 3134 offset < 0 || offset >= zv->zv_volsize || 3135 length <= 0) { 3136 printf("%s: offset=%jd length=%jd\n", __func__, offset, 3137 length); 3138 error = EINVAL; 3139 break; 3140 } 3141 3142 rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER); 3143 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 3144 error = dmu_tx_assign(tx, TXG_WAIT); 3145 if (error != 0) { 3146 sync = FALSE; 3147 dmu_tx_abort(tx); 3148 } else { 3149 sync = (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 3150 zvol_log_truncate(zv, tx, offset, length, sync); 3151 dmu_tx_commit(tx); 3152 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 3153 offset, length); 3154 } 3155 zfs_range_unlock(rl); 3156 if (sync) 3157 zil_commit(zv->zv_zilog, ZVOL_OBJ); 3158 break; 3159 case DIOCGSTRIPESIZE: 3160 *(off_t *)data = zv->zv_volblocksize; 3161 break; 3162 case DIOCGSTRIPEOFFSET: 3163 *(off_t *)data = 0; 3164 break; 3165 case DIOCGATTR: { 3166 spa_t *spa = dmu_objset_spa(zv->zv_objset); 3167 struct diocgattr_arg *arg = (struct diocgattr_arg *)data; 3168 uint64_t refd, avail, usedobjs, availobjs; 3169 3170 if (strcmp(arg->name, "GEOM::candelete") == 0) 3171 arg->value.i = 1; 3172 else if (strcmp(arg->name, "blocksavail") == 0) { 3173 dmu_objset_space(zv->zv_objset, &refd, &avail, 3174 &usedobjs, &availobjs); 3175 arg->value.off = avail / DEV_BSIZE; 3176 } else if (strcmp(arg->name, "blocksused") == 0) { 3177 dmu_objset_space(zv->zv_objset, &refd, &avail, 3178 &usedobjs, &availobjs); 3179 arg->value.off = refd / DEV_BSIZE; 3180 } else if (strcmp(arg->name, "poolblocksavail") == 0) { 3181 avail = metaslab_class_get_space(spa_normal_class(spa)); 3182 avail -= metaslab_class_get_alloc(spa_normal_class(spa)); 3183 arg->value.off = avail / DEV_BSIZE; 3184 } else if (strcmp(arg->name, "poolblocksused") == 0) { 3185 refd = metaslab_class_get_alloc(spa_normal_class(spa)); 3186 arg->value.off = refd / DEV_BSIZE; 3187 } else 3188 error = ENOIOCTL; 3189 break; 3190 } 3191 case FIOSEEKHOLE: 3192 case FIOSEEKDATA: { 3193 off_t *off = (off_t *)data; 3194 uint64_t noff; 3195 boolean_t hole; 3196 3197 hole = (cmd == FIOSEEKHOLE); 3198 noff = *off; 3199 error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff); 3200 *off = noff; 3201 break; 3202 } 3203 default: 3204 error = ENOIOCTL; 3205 } 3206 3207 return (error); 3208} 3209#endif /* illumos */ 3210