zfs_ctldir.c revision 302724
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved. 25 */ 26 27/* 28 * ZFS control directory (a.k.a. ".zfs") 29 * 30 * This directory provides a common location for all ZFS meta-objects. 31 * Currently, this is only the 'snapshot' directory, but this may expand in the 32 * future. The elements are built using the GFS primitives, as the hierarchy 33 * does not actually exist on disk. 34 * 35 * For 'snapshot', we don't want to have all snapshots always mounted, because 36 * this would take up a huge amount of space in /etc/mnttab. We have three 37 * types of objects: 38 * 39 * ctldir ------> snapshotdir -------> snapshot 40 * | 41 * | 42 * V 43 * mounted fs 44 * 45 * The 'snapshot' node contains just enough information to lookup '..' and act 46 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we 47 * perform an automount of the underlying filesystem and return the 48 * corresponding vnode. 49 * 50 * All mounts are handled automatically by the kernel, but unmounts are 51 * (currently) handled from user land. The main reason is that there is no 52 * reliable way to auto-unmount the filesystem when it's "no longer in use". 53 * When the user unmounts a filesystem, we call zfsctl_unmount(), which 54 * unmounts any snapshots within the snapshot directory. 55 * 56 * The '.zfs', '.zfs/snapshot', and all directories created under 57 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and 58 * share the same vfs_t as the head filesystem (what '.zfs' lives under). 59 * 60 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>' 61 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t. 62 * However, vnodes within these mounted on file systems have their v_vfsp 63 * fields set to the head filesystem to make NFS happy (see 64 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t 65 * so that it cannot be freed until all snapshots have been unmounted. 66 */ 67 68#include <sys/zfs_context.h> 69#include <sys/zfs_ctldir.h> 70#include <sys/zfs_ioctl.h> 71#include <sys/zfs_vfsops.h> 72#include <sys/namei.h> 73#include <sys/gfs.h> 74#include <sys/stat.h> 75#include <sys/dmu.h> 76#include <sys/dsl_destroy.h> 77#include <sys/dsl_deleg.h> 78#include <sys/mount.h> 79#include <sys/sunddi.h> 80 81#include "zfs_namecheck.h" 82 83typedef struct zfsctl_node { 84 gfs_dir_t zc_gfs_private; 85 uint64_t zc_id; 86 timestruc_t zc_cmtime; /* ctime and mtime, always the same */ 87} zfsctl_node_t; 88 89typedef struct zfsctl_snapdir { 90 zfsctl_node_t sd_node; 91 kmutex_t sd_lock; 92 avl_tree_t sd_snaps; 93} zfsctl_snapdir_t; 94 95typedef struct { 96 char *se_name; 97 vnode_t *se_root; 98 avl_node_t se_node; 99} zfs_snapentry_t; 100 101static int 102snapentry_compare(const void *a, const void *b) 103{ 104 const zfs_snapentry_t *sa = a; 105 const zfs_snapentry_t *sb = b; 106 int ret = strcmp(sa->se_name, sb->se_name); 107 108 if (ret < 0) 109 return (-1); 110 else if (ret > 0) 111 return (1); 112 else 113 return (0); 114} 115 116#ifdef illumos 117vnodeops_t *zfsctl_ops_root; 118vnodeops_t *zfsctl_ops_snapdir; 119vnodeops_t *zfsctl_ops_snapshot; 120vnodeops_t *zfsctl_ops_shares; 121vnodeops_t *zfsctl_ops_shares_dir; 122 123static const fs_operation_def_t zfsctl_tops_root[]; 124static const fs_operation_def_t zfsctl_tops_snapdir[]; 125static const fs_operation_def_t zfsctl_tops_snapshot[]; 126static const fs_operation_def_t zfsctl_tops_shares[]; 127#else 128static struct vop_vector zfsctl_ops_root; 129static struct vop_vector zfsctl_ops_snapdir; 130static struct vop_vector zfsctl_ops_snapshot; 131static struct vop_vector zfsctl_ops_shares; 132static struct vop_vector zfsctl_ops_shares_dir; 133#endif 134 135static vnode_t *zfsctl_mknode_snapdir(vnode_t *); 136static vnode_t *zfsctl_mknode_shares(vnode_t *); 137static vnode_t *zfsctl_snapshot_mknode(vnode_t *, uint64_t objset); 138static int zfsctl_unmount_snap(zfs_snapentry_t *, int, cred_t *); 139 140#ifdef illumos 141static gfs_opsvec_t zfsctl_opsvec[] = { 142 { ".zfs", zfsctl_tops_root, &zfsctl_ops_root }, 143 { ".zfs/snapshot", zfsctl_tops_snapdir, &zfsctl_ops_snapdir }, 144 { ".zfs/snapshot/vnode", zfsctl_tops_snapshot, &zfsctl_ops_snapshot }, 145 { ".zfs/shares", zfsctl_tops_shares, &zfsctl_ops_shares_dir }, 146 { ".zfs/shares/vnode", zfsctl_tops_shares, &zfsctl_ops_shares }, 147 { NULL } 148}; 149#endif 150 151/* 152 * Root directory elements. We only have two entries 153 * snapshot and shares. 154 */ 155static gfs_dirent_t zfsctl_root_entries[] = { 156 { "snapshot", zfsctl_mknode_snapdir, GFS_CACHE_VNODE }, 157 { "shares", zfsctl_mknode_shares, GFS_CACHE_VNODE }, 158 { NULL } 159}; 160 161/* include . and .. in the calculation */ 162#define NROOT_ENTRIES ((sizeof (zfsctl_root_entries) / \ 163 sizeof (gfs_dirent_t)) + 1) 164 165 166/* 167 * Initialize the various GFS pieces we'll need to create and manipulate .zfs 168 * directories. This is called from the ZFS init routine, and initializes the 169 * vnode ops vectors that we'll be using. 170 */ 171void 172zfsctl_init(void) 173{ 174#ifdef illumos 175 VERIFY(gfs_make_opsvec(zfsctl_opsvec) == 0); 176#endif 177} 178 179void 180zfsctl_fini(void) 181{ 182#ifdef illumos 183 /* 184 * Remove vfsctl vnode ops 185 */ 186 if (zfsctl_ops_root) 187 vn_freevnodeops(zfsctl_ops_root); 188 if (zfsctl_ops_snapdir) 189 vn_freevnodeops(zfsctl_ops_snapdir); 190 if (zfsctl_ops_snapshot) 191 vn_freevnodeops(zfsctl_ops_snapshot); 192 if (zfsctl_ops_shares) 193 vn_freevnodeops(zfsctl_ops_shares); 194 if (zfsctl_ops_shares_dir) 195 vn_freevnodeops(zfsctl_ops_shares_dir); 196 197 zfsctl_ops_root = NULL; 198 zfsctl_ops_snapdir = NULL; 199 zfsctl_ops_snapshot = NULL; 200 zfsctl_ops_shares = NULL; 201 zfsctl_ops_shares_dir = NULL; 202#endif /* illumos */ 203} 204 205boolean_t 206zfsctl_is_node(vnode_t *vp) 207{ 208 return (vn_matchops(vp, zfsctl_ops_root) || 209 vn_matchops(vp, zfsctl_ops_snapdir) || 210 vn_matchops(vp, zfsctl_ops_snapshot) || 211 vn_matchops(vp, zfsctl_ops_shares) || 212 vn_matchops(vp, zfsctl_ops_shares_dir)); 213 214} 215 216/* 217 * Return the inode number associated with the 'snapshot' or 218 * 'shares' directory. 219 */ 220/* ARGSUSED */ 221static ino64_t 222zfsctl_root_inode_cb(vnode_t *vp, int index) 223{ 224 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 225 226 ASSERT(index < 2); 227 228 if (index == 0) 229 return (ZFSCTL_INO_SNAPDIR); 230 231 return (zfsvfs->z_shares_dir); 232} 233 234/* 235 * Create the '.zfs' directory. This directory is cached as part of the VFS 236 * structure. This results in a hold on the vfs_t. The code in zfs_umount() 237 * therefore checks against a vfs_count of 2 instead of 1. This reference 238 * is removed when the ctldir is destroyed in the unmount. 239 */ 240void 241zfsctl_create(zfsvfs_t *zfsvfs) 242{ 243 vnode_t *vp, *rvp; 244 zfsctl_node_t *zcp; 245 uint64_t crtime[2]; 246 247 ASSERT(zfsvfs->z_ctldir == NULL); 248 249 vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs, 250 &zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries, 251 zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL); 252 zcp = vp->v_data; 253 zcp->zc_id = ZFSCTL_INO_ROOT; 254 255 VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0); 256 VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), 257 &crtime, sizeof (crtime))); 258 ZFS_TIME_DECODE(&zcp->zc_cmtime, crtime); 259 VN_URELE(rvp); 260 261 /* 262 * We're only faking the fact that we have a root of a filesystem for 263 * the sake of the GFS interfaces. Undo the flag manipulation it did 264 * for us. 265 */ 266 vp->v_vflag &= ~VV_ROOT; 267 268 zfsvfs->z_ctldir = vp; 269 270 VOP_UNLOCK(vp, 0); 271} 272 273/* 274 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted. 275 * There might still be more references if we were force unmounted, but only 276 * new zfs_inactive() calls can occur and they don't reference .zfs 277 */ 278void 279zfsctl_destroy(zfsvfs_t *zfsvfs) 280{ 281 VN_RELE(zfsvfs->z_ctldir); 282 zfsvfs->z_ctldir = NULL; 283} 284 285/* 286 * Given a root znode, retrieve the associated .zfs directory. 287 * Add a hold to the vnode and return it. 288 */ 289vnode_t * 290zfsctl_root(znode_t *zp) 291{ 292 ASSERT(zfs_has_ctldir(zp)); 293 VN_HOLD(zp->z_zfsvfs->z_ctldir); 294 return (zp->z_zfsvfs->z_ctldir); 295} 296 297/* 298 * Common open routine. Disallow any write access. 299 */ 300/* ARGSUSED */ 301static int 302zfsctl_common_open(struct vop_open_args *ap) 303{ 304 int flags = ap->a_mode; 305 306 if (flags & FWRITE) 307 return (SET_ERROR(EACCES)); 308 309 return (0); 310} 311 312/* 313 * Common close routine. Nothing to do here. 314 */ 315/* ARGSUSED */ 316static int 317zfsctl_common_close(struct vop_close_args *ap) 318{ 319 return (0); 320} 321 322/* 323 * Common access routine. Disallow writes. 324 */ 325/* ARGSUSED */ 326static int 327zfsctl_common_access(ap) 328 struct vop_access_args /* { 329 struct vnode *a_vp; 330 accmode_t a_accmode; 331 struct ucred *a_cred; 332 struct thread *a_td; 333 } */ *ap; 334{ 335 accmode_t accmode = ap->a_accmode; 336 337#ifdef TODO 338 if (flags & V_ACE_MASK) { 339 if (accmode & ACE_ALL_WRITE_PERMS) 340 return (SET_ERROR(EACCES)); 341 } else { 342#endif 343 if (accmode & VWRITE) 344 return (SET_ERROR(EACCES)); 345#ifdef TODO 346 } 347#endif 348 349 return (0); 350} 351 352/* 353 * Common getattr function. Fill in basic information. 354 */ 355static void 356zfsctl_common_getattr(vnode_t *vp, vattr_t *vap) 357{ 358 timestruc_t now; 359 360 vap->va_uid = 0; 361 vap->va_gid = 0; 362 vap->va_rdev = 0; 363 /* 364 * We are a purely virtual object, so we have no 365 * blocksize or allocated blocks. 366 */ 367 vap->va_blksize = 0; 368 vap->va_nblocks = 0; 369 vap->va_seq = 0; 370 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 371 vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | 372 S_IROTH | S_IXOTH; 373 vap->va_type = VDIR; 374 /* 375 * We live in the now (for atime). 376 */ 377 gethrestime(&now); 378 vap->va_atime = now; 379 /* FreeBSD: Reset chflags(2) flags. */ 380 vap->va_flags = 0; 381} 382 383/*ARGSUSED*/ 384static int 385zfsctl_common_fid(ap) 386 struct vop_fid_args /* { 387 struct vnode *a_vp; 388 struct fid *a_fid; 389 } */ *ap; 390{ 391 vnode_t *vp = ap->a_vp; 392 fid_t *fidp = (void *)ap->a_fid; 393 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 394 zfsctl_node_t *zcp = vp->v_data; 395 uint64_t object = zcp->zc_id; 396 zfid_short_t *zfid; 397 int i; 398 399 ZFS_ENTER(zfsvfs); 400 401#ifdef illumos 402 if (fidp->fid_len < SHORT_FID_LEN) { 403 fidp->fid_len = SHORT_FID_LEN; 404 ZFS_EXIT(zfsvfs); 405 return (SET_ERROR(ENOSPC)); 406 } 407#endif 408 409 zfid = (zfid_short_t *)fidp; 410 411 zfid->zf_len = SHORT_FID_LEN; 412 413 for (i = 0; i < sizeof (zfid->zf_object); i++) 414 zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); 415 416 /* .zfs znodes always have a generation number of 0 */ 417 for (i = 0; i < sizeof (zfid->zf_gen); i++) 418 zfid->zf_gen[i] = 0; 419 420 ZFS_EXIT(zfsvfs); 421 return (0); 422} 423 424 425/*ARGSUSED*/ 426static int 427zfsctl_shares_fid(ap) 428 struct vop_fid_args /* { 429 struct vnode *a_vp; 430 struct fid *a_fid; 431 } */ *ap; 432{ 433 vnode_t *vp = ap->a_vp; 434 fid_t *fidp = (void *)ap->a_fid; 435 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 436 znode_t *dzp; 437 int error; 438 439 ZFS_ENTER(zfsvfs); 440 441 if (zfsvfs->z_shares_dir == 0) { 442 ZFS_EXIT(zfsvfs); 443 return (SET_ERROR(ENOTSUP)); 444 } 445 446 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) { 447 error = VOP_FID(ZTOV(dzp), fidp); 448 VN_RELE(ZTOV(dzp)); 449 } 450 451 ZFS_EXIT(zfsvfs); 452 return (error); 453} 454 455static int 456zfsctl_common_reclaim(ap) 457 struct vop_reclaim_args /* { 458 struct vnode *a_vp; 459 struct thread *a_td; 460 } */ *ap; 461{ 462 vnode_t *vp = ap->a_vp; 463 464 /* 465 * Destroy the vm object and flush associated pages. 466 */ 467 vnode_destroy_vobject(vp); 468 VI_LOCK(vp); 469 vp->v_data = NULL; 470 VI_UNLOCK(vp); 471 return (0); 472} 473 474/* 475 * .zfs inode namespace 476 * 477 * We need to generate unique inode numbers for all files and directories 478 * within the .zfs pseudo-filesystem. We use the following scheme: 479 * 480 * ENTRY ZFSCTL_INODE 481 * .zfs 1 482 * .zfs/snapshot 2 483 * .zfs/snapshot/<snap> objectid(snap) 484 */ 485 486#define ZFSCTL_INO_SNAP(id) (id) 487 488/* 489 * Get root directory attributes. 490 */ 491/* ARGSUSED */ 492static int 493zfsctl_root_getattr(ap) 494 struct vop_getattr_args /* { 495 struct vnode *a_vp; 496 struct vattr *a_vap; 497 struct ucred *a_cred; 498 } */ *ap; 499{ 500 struct vnode *vp = ap->a_vp; 501 struct vattr *vap = ap->a_vap; 502 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 503 zfsctl_node_t *zcp = vp->v_data; 504 505 ZFS_ENTER(zfsvfs); 506 vap->va_nodeid = ZFSCTL_INO_ROOT; 507 vap->va_nlink = vap->va_size = NROOT_ENTRIES; 508 vap->va_mtime = vap->va_ctime = zcp->zc_cmtime; 509 vap->va_birthtime = vap->va_ctime; 510 511 zfsctl_common_getattr(vp, vap); 512 ZFS_EXIT(zfsvfs); 513 514 return (0); 515} 516 517/* 518 * Special case the handling of "..". 519 */ 520/* ARGSUSED */ 521int 522zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, 523 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, 524 int *direntflags, pathname_t *realpnp) 525{ 526 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; 527 int err; 528 529 /* 530 * No extended attributes allowed under .zfs 531 */ 532 if (flags & LOOKUP_XATTR) 533 return (SET_ERROR(EINVAL)); 534 535 ZFS_ENTER(zfsvfs); 536 537 if (strcmp(nm, "..") == 0) { 538 err = VFS_ROOT(dvp->v_vfsp, LK_EXCLUSIVE, vpp); 539 if (err == 0) 540 VOP_UNLOCK(*vpp, 0); 541 } else { 542 err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir, 543 cr, ct, direntflags, realpnp); 544 } 545 546 ZFS_EXIT(zfsvfs); 547 548 return (err); 549} 550 551#ifdef illumos 552static int 553zfsctl_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr, 554 caller_context_t *ct) 555{ 556 /* 557 * We only care about ACL_ENABLED so that libsec can 558 * display ACL correctly and not default to POSIX draft. 559 */ 560 if (cmd == _PC_ACL_ENABLED) { 561 *valp = _ACL_ACE_ENABLED; 562 return (0); 563 } 564 565 return (fs_pathconf(vp, cmd, valp, cr, ct)); 566} 567#endif /* illumos */ 568 569#ifdef illumos 570static const fs_operation_def_t zfsctl_tops_root[] = { 571 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } }, 572 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } }, 573 { VOPNAME_IOCTL, { .error = fs_inval } }, 574 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_root_getattr } }, 575 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } }, 576 { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } }, 577 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_root_lookup } }, 578 { VOPNAME_SEEK, { .vop_seek = fs_seek } }, 579 { VOPNAME_INACTIVE, { .vop_inactive = gfs_vop_inactive } }, 580 { VOPNAME_PATHCONF, { .vop_pathconf = zfsctl_pathconf } }, 581 { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } }, 582 { NULL } 583}; 584#endif /* illumos */ 585 586/* 587 * Special case the handling of "..". 588 */ 589/* ARGSUSED */ 590int 591zfsctl_freebsd_root_lookup(ap) 592 struct vop_lookup_args /* { 593 struct vnode *a_dvp; 594 struct vnode **a_vpp; 595 struct componentname *a_cnp; 596 } */ *ap; 597{ 598 vnode_t *dvp = ap->a_dvp; 599 vnode_t **vpp = ap->a_vpp; 600 cred_t *cr = ap->a_cnp->cn_cred; 601 int flags = ap->a_cnp->cn_flags; 602 int nameiop = ap->a_cnp->cn_nameiop; 603 char nm[NAME_MAX + 1]; 604 int err; 605 int ltype; 606 607 if ((flags & ISLASTCN) && (nameiop == RENAME || nameiop == CREATE)) 608 return (EOPNOTSUPP); 609 610 ASSERT(ap->a_cnp->cn_namelen < sizeof(nm)); 611 strlcpy(nm, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1); 612 err = zfsctl_root_lookup(dvp, nm, vpp, NULL, 0, NULL, cr, NULL, NULL, NULL); 613 if (err == 0 && (nm[0] != '.' || nm[1] != '\0')) { 614 ltype = VOP_ISLOCKED(dvp); 615 if (flags & ISDOTDOT) { 616 VN_HOLD(*vpp); 617 VOP_UNLOCK(dvp, 0); 618 } 619 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); 620 if (flags & ISDOTDOT) { 621 VN_RELE(*vpp); 622 vn_lock(dvp, ltype| LK_RETRY); 623 } 624 } 625 626 return (err); 627} 628 629static struct vop_vector zfsctl_ops_root = { 630 .vop_default = &default_vnodeops, 631 .vop_open = zfsctl_common_open, 632 .vop_close = zfsctl_common_close, 633 .vop_ioctl = VOP_EINVAL, 634 .vop_getattr = zfsctl_root_getattr, 635 .vop_access = zfsctl_common_access, 636 .vop_readdir = gfs_vop_readdir, 637 .vop_lookup = zfsctl_freebsd_root_lookup, 638 .vop_inactive = VOP_NULL, 639 .vop_reclaim = gfs_vop_reclaim, 640#ifdef TODO 641 .vop_pathconf = zfsctl_pathconf, 642#endif 643 .vop_fid = zfsctl_common_fid, 644}; 645 646/* 647 * Gets the full dataset name that corresponds to the given snapshot name 648 * Example: 649 * zfsctl_snapshot_zname("snap1") -> "mypool/myfs@snap1" 650 */ 651static int 652zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname) 653{ 654 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os; 655 656 if (zfs_component_namecheck(name, NULL, NULL) != 0) 657 return (SET_ERROR(EILSEQ)); 658 dmu_objset_name(os, zname); 659 if (strlen(zname) + 1 + strlen(name) >= len) 660 return (SET_ERROR(ENAMETOOLONG)); 661 (void) strcat(zname, "@"); 662 (void) strcat(zname, name); 663 return (0); 664} 665 666static int 667zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr) 668{ 669 vnode_t *svp = sep->se_root; 670 int error; 671 672 ASSERT(vn_ismntpt(svp)); 673 674 /* this will be dropped by dounmount() */ 675 if ((error = vn_vfswlock(svp)) != 0) 676 return (error); 677 678#ifdef illumos 679 VN_HOLD(svp); 680 error = dounmount(vn_mountedvfs(svp), fflags, cr); 681 if (error) { 682 VN_RELE(svp); 683 return (error); 684 } 685 686 /* 687 * We can't use VN_RELE(), as that will try to invoke 688 * zfsctl_snapdir_inactive(), which would cause us to destroy 689 * the sd_lock mutex held by our caller. 690 */ 691 ASSERT(svp->v_count == 1); 692 gfs_vop_reclaim(svp, cr, NULL); 693 694 kmem_free(sep->se_name, strlen(sep->se_name) + 1); 695 kmem_free(sep, sizeof (zfs_snapentry_t)); 696 697 return (0); 698#else 699 vfs_ref(vn_mountedvfs(svp)); 700 return (dounmount(vn_mountedvfs(svp), fflags, curthread)); 701#endif 702} 703 704#ifdef illumos 705static void 706zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm) 707{ 708 avl_index_t where; 709 vfs_t *vfsp; 710 refstr_t *pathref; 711 char newpath[MAXNAMELEN]; 712 char *tail; 713 714 ASSERT(MUTEX_HELD(&sdp->sd_lock)); 715 ASSERT(sep != NULL); 716 717 vfsp = vn_mountedvfs(sep->se_root); 718 ASSERT(vfsp != NULL); 719 720 vfs_lock_wait(vfsp); 721 722 /* 723 * Change the name in the AVL tree. 724 */ 725 avl_remove(&sdp->sd_snaps, sep); 726 kmem_free(sep->se_name, strlen(sep->se_name) + 1); 727 sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP); 728 (void) strcpy(sep->se_name, nm); 729 VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL); 730 avl_insert(&sdp->sd_snaps, sep, where); 731 732 /* 733 * Change the current mountpoint info: 734 * - update the tail of the mntpoint path 735 * - update the tail of the resource path 736 */ 737 pathref = vfs_getmntpoint(vfsp); 738 (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath)); 739 VERIFY((tail = strrchr(newpath, '/')) != NULL); 740 *(tail+1) = '\0'; 741 ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath)); 742 (void) strcat(newpath, nm); 743 refstr_rele(pathref); 744 vfs_setmntpoint(vfsp, newpath, 0); 745 746 pathref = vfs_getresource(vfsp); 747 (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath)); 748 VERIFY((tail = strrchr(newpath, '@')) != NULL); 749 *(tail+1) = '\0'; 750 ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath)); 751 (void) strcat(newpath, nm); 752 refstr_rele(pathref); 753 vfs_setresource(vfsp, newpath, 0); 754 755 vfs_unlock(vfsp); 756} 757#endif /* illumos */ 758 759#ifdef illumos 760/*ARGSUSED*/ 761static int 762zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, 763 cred_t *cr, caller_context_t *ct, int flags) 764{ 765 zfsctl_snapdir_t *sdp = sdvp->v_data; 766 zfs_snapentry_t search, *sep; 767 zfsvfs_t *zfsvfs; 768 avl_index_t where; 769 char from[MAXNAMELEN], to[MAXNAMELEN]; 770 char real[MAXNAMELEN], fsname[MAXNAMELEN]; 771 int err; 772 773 zfsvfs = sdvp->v_vfsp->vfs_data; 774 ZFS_ENTER(zfsvfs); 775 776 if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { 777 err = dmu_snapshot_realname(zfsvfs->z_os, snm, real, 778 MAXNAMELEN, NULL); 779 if (err == 0) { 780 snm = real; 781 } else if (err != ENOTSUP) { 782 ZFS_EXIT(zfsvfs); 783 return (err); 784 } 785 } 786 787 ZFS_EXIT(zfsvfs); 788 789 dmu_objset_name(zfsvfs->z_os, fsname); 790 791 err = zfsctl_snapshot_zname(sdvp, snm, MAXNAMELEN, from); 792 if (err == 0) 793 err = zfsctl_snapshot_zname(tdvp, tnm, MAXNAMELEN, to); 794 if (err == 0) 795 err = zfs_secpolicy_rename_perms(from, to, cr); 796 if (err != 0) 797 return (err); 798 799 /* 800 * Cannot move snapshots out of the snapdir. 801 */ 802 if (sdvp != tdvp) 803 return (SET_ERROR(EINVAL)); 804 805 if (strcmp(snm, tnm) == 0) 806 return (0); 807 808 mutex_enter(&sdp->sd_lock); 809 810 search.se_name = (char *)snm; 811 if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) == NULL) { 812 mutex_exit(&sdp->sd_lock); 813 return (SET_ERROR(ENOENT)); 814 } 815 816 err = dsl_dataset_rename_snapshot(fsname, snm, tnm, 0); 817 if (err == 0) 818 zfsctl_rename_snap(sdp, sep, tnm); 819 820 mutex_exit(&sdp->sd_lock); 821 822 return (err); 823} 824#endif /* illumos */ 825 826#ifdef illumos 827/* ARGSUSED */ 828static int 829zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, 830 caller_context_t *ct, int flags) 831{ 832 zfsctl_snapdir_t *sdp = dvp->v_data; 833 zfs_snapentry_t *sep; 834 zfs_snapentry_t search; 835 zfsvfs_t *zfsvfs; 836 char snapname[MAXNAMELEN]; 837 char real[MAXNAMELEN]; 838 int err; 839 840 zfsvfs = dvp->v_vfsp->vfs_data; 841 ZFS_ENTER(zfsvfs); 842 843 if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { 844 845 err = dmu_snapshot_realname(zfsvfs->z_os, name, real, 846 MAXNAMELEN, NULL); 847 if (err == 0) { 848 name = real; 849 } else if (err != ENOTSUP) { 850 ZFS_EXIT(zfsvfs); 851 return (err); 852 } 853 } 854 855 ZFS_EXIT(zfsvfs); 856 857 err = zfsctl_snapshot_zname(dvp, name, MAXNAMELEN, snapname); 858 if (err == 0) 859 err = zfs_secpolicy_destroy_perms(snapname, cr); 860 if (err != 0) 861 return (err); 862 863 mutex_enter(&sdp->sd_lock); 864 865 search.se_name = name; 866 sep = avl_find(&sdp->sd_snaps, &search, NULL); 867 if (sep) { 868 avl_remove(&sdp->sd_snaps, sep); 869 err = zfsctl_unmount_snap(sep, MS_FORCE, cr); 870 if (err != 0) 871 avl_add(&sdp->sd_snaps, sep); 872 else 873 err = dsl_destroy_snapshot(snapname, B_FALSE); 874 } else { 875 err = SET_ERROR(ENOENT); 876 } 877 878 mutex_exit(&sdp->sd_lock); 879 880 return (err); 881} 882#endif /* illumos */ 883 884/* 885 * This creates a snapshot under '.zfs/snapshot'. 886 */ 887/* ARGSUSED */ 888static int 889zfsctl_snapdir_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, 890 cred_t *cr, caller_context_t *cc, int flags, vsecattr_t *vsecp) 891{ 892 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; 893 char name[MAXNAMELEN]; 894 int err; 895 static enum symfollow follow = NO_FOLLOW; 896 static enum uio_seg seg = UIO_SYSSPACE; 897 898 if (zfs_component_namecheck(dirname, NULL, NULL) != 0) 899 return (SET_ERROR(EILSEQ)); 900 901 dmu_objset_name(zfsvfs->z_os, name); 902 903 *vpp = NULL; 904 905 err = zfs_secpolicy_snapshot_perms(name, cr); 906 if (err != 0) 907 return (err); 908 909 if (err == 0) { 910 err = dmu_objset_snapshot_one(name, dirname); 911 if (err != 0) 912 return (err); 913 err = lookupnameat(dirname, seg, follow, NULL, vpp, dvp); 914 } 915 916 return (err); 917} 918 919static int 920zfsctl_freebsd_snapdir_mkdir(ap) 921 struct vop_mkdir_args /* { 922 struct vnode *a_dvp; 923 struct vnode **a_vpp; 924 struct componentname *a_cnp; 925 struct vattr *a_vap; 926 } */ *ap; 927{ 928 929 ASSERT(ap->a_cnp->cn_flags & SAVENAME); 930 931 return (zfsctl_snapdir_mkdir(ap->a_dvp, ap->a_cnp->cn_nameptr, NULL, 932 ap->a_vpp, ap->a_cnp->cn_cred, NULL, 0, NULL)); 933} 934 935/* 936 * Lookup entry point for the 'snapshot' directory. Try to open the 937 * snapshot if it exist, creating the pseudo filesystem vnode as necessary. 938 * Perform a mount of the associated dataset on top of the vnode. 939 */ 940/* ARGSUSED */ 941int 942zfsctl_snapdir_lookup(ap) 943 struct vop_lookup_args /* { 944 struct vnode *a_dvp; 945 struct vnode **a_vpp; 946 struct componentname *a_cnp; 947 } */ *ap; 948{ 949 vnode_t *dvp = ap->a_dvp; 950 vnode_t **vpp = ap->a_vpp; 951 struct componentname *cnp = ap->a_cnp; 952 char nm[NAME_MAX + 1]; 953 zfsctl_snapdir_t *sdp = dvp->v_data; 954 objset_t *snap; 955 char snapname[MAXNAMELEN]; 956 char real[MAXNAMELEN]; 957 char *mountpoint; 958 zfs_snapentry_t *sep, search; 959 size_t mountpoint_len; 960 avl_index_t where; 961 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; 962 int err; 963 int ltype, flags = 0; 964 965 /* 966 * No extended attributes allowed under .zfs 967 */ 968 if (flags & LOOKUP_XATTR) 969 return (SET_ERROR(EINVAL)); 970 ASSERT(ap->a_cnp->cn_namelen < sizeof(nm)); 971 strlcpy(nm, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1); 972 973 ASSERT(dvp->v_type == VDIR); 974 975 *vpp = NULL; 976 977 /* 978 * If we get a recursive call, that means we got called 979 * from the domount() code while it was trying to look up the 980 * spec (which looks like a local path for zfs). We need to 981 * add some flag to domount() to tell it not to do this lookup. 982 */ 983 if (MUTEX_HELD(&sdp->sd_lock)) 984 return (SET_ERROR(ENOENT)); 985 986 ZFS_ENTER(zfsvfs); 987 if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) { 988 ZFS_EXIT(zfsvfs); 989 return (0); 990 } 991 992 if (flags & FIGNORECASE) { 993 boolean_t conflict = B_FALSE; 994 995 err = dmu_snapshot_realname(zfsvfs->z_os, nm, real, 996 MAXNAMELEN, &conflict); 997 if (err == 0) { 998 strlcpy(nm, real, sizeof(nm)); 999 } else if (err != ENOTSUP) { 1000 ZFS_EXIT(zfsvfs); 1001 return (err); 1002 } 1003#if 0 1004 if (realpnp) 1005 (void) strlcpy(realpnp->pn_buf, nm, 1006 realpnp->pn_bufsize); 1007 if (conflict && direntflags) 1008 *direntflags = ED_CASE_CONFLICT; 1009#endif 1010 } 1011 1012 mutex_enter(&sdp->sd_lock); 1013 search.se_name = (char *)nm; 1014 if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) { 1015 *vpp = sep->se_root; 1016 VN_HOLD(*vpp); 1017 err = traverse(vpp, LK_EXCLUSIVE | LK_RETRY); 1018 if (err != 0) { 1019 VN_RELE(*vpp); 1020 *vpp = NULL; 1021 } else if (*vpp == sep->se_root) { 1022 /* 1023 * The snapshot was unmounted behind our backs, 1024 * try to remount it. 1025 */ 1026 VERIFY(zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname) == 0); 1027 goto domount; 1028 } else { 1029 /* 1030 * VROOT was set during the traverse call. We need 1031 * to clear it since we're pretending to be part 1032 * of our parent's vfs. 1033 */ 1034 (*vpp)->v_flag &= ~VROOT; 1035 } 1036 mutex_exit(&sdp->sd_lock); 1037 ZFS_EXIT(zfsvfs); 1038 return (err); 1039 } 1040 1041 /* 1042 * The requested snapshot is not currently mounted, look it up. 1043 */ 1044 err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname); 1045 if (err != 0) { 1046 mutex_exit(&sdp->sd_lock); 1047 ZFS_EXIT(zfsvfs); 1048 /* 1049 * handle "ls *" or "?" in a graceful manner, 1050 * forcing EILSEQ to ENOENT. 1051 * Since shell ultimately passes "*" or "?" as name to lookup 1052 */ 1053 return (err == EILSEQ ? ENOENT : err); 1054 } 1055 if (dmu_objset_hold(snapname, FTAG, &snap) != 0) { 1056 mutex_exit(&sdp->sd_lock); 1057#ifdef illumos 1058 ZFS_EXIT(zfsvfs); 1059 return (SET_ERROR(ENOENT)); 1060#else /* !illumos */ 1061 /* Translate errors and add SAVENAME when needed. */ 1062 if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) { 1063 err = EJUSTRETURN; 1064 cnp->cn_flags |= SAVENAME; 1065 } else { 1066 err = SET_ERROR(ENOENT); 1067 } 1068 ZFS_EXIT(zfsvfs); 1069 return (err); 1070#endif /* illumos */ 1071 } 1072 1073 sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP); 1074 sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP); 1075 (void) strcpy(sep->se_name, nm); 1076 *vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap)); 1077 avl_insert(&sdp->sd_snaps, sep, where); 1078 1079 dmu_objset_rele(snap, FTAG); 1080domount: 1081 mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) + 1082 strlen("/" ZFS_CTLDIR_NAME "/snapshot/") + strlen(nm) + 1; 1083 mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP); 1084 (void) snprintf(mountpoint, mountpoint_len, 1085 "%s/" ZFS_CTLDIR_NAME "/snapshot/%s", 1086 dvp->v_vfsp->mnt_stat.f_mntonname, nm); 1087 err = mount_snapshot(curthread, vpp, "zfs", mountpoint, snapname, 0); 1088 kmem_free(mountpoint, mountpoint_len); 1089 if (err == 0) { 1090 /* 1091 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>. 1092 * 1093 * This is where we lie about our v_vfsp in order to 1094 * make .zfs/snapshot/<snapname> accessible over NFS 1095 * without requiring manual mounts of <snapname>. 1096 */ 1097 ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs); 1098 VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs; 1099 } 1100 mutex_exit(&sdp->sd_lock); 1101 ZFS_EXIT(zfsvfs); 1102 1103#ifdef illumos 1104 /* 1105 * If we had an error, drop our hold on the vnode and 1106 * zfsctl_snapshot_inactive() will clean up. 1107 */ 1108 if (err != 0) { 1109 VN_RELE(*vpp); 1110 *vpp = NULL; 1111 } 1112#else 1113 if (err != 0) 1114 *vpp = NULL; 1115#endif 1116 return (err); 1117} 1118 1119/* ARGSUSED */ 1120int 1121zfsctl_shares_lookup(ap) 1122 struct vop_lookup_args /* { 1123 struct vnode *a_dvp; 1124 struct vnode **a_vpp; 1125 struct componentname *a_cnp; 1126 } */ *ap; 1127{ 1128 vnode_t *dvp = ap->a_dvp; 1129 vnode_t **vpp = ap->a_vpp; 1130 struct componentname *cnp = ap->a_cnp; 1131 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; 1132 char nm[NAME_MAX + 1]; 1133 znode_t *dzp; 1134 int error; 1135 1136 ZFS_ENTER(zfsvfs); 1137 1138 ASSERT(cnp->cn_namelen < sizeof(nm)); 1139 strlcpy(nm, cnp->cn_nameptr, cnp->cn_namelen + 1); 1140 1141 if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) { 1142 ZFS_EXIT(zfsvfs); 1143 return (0); 1144 } 1145 1146 if (zfsvfs->z_shares_dir == 0) { 1147 ZFS_EXIT(zfsvfs); 1148 return (SET_ERROR(ENOTSUP)); 1149 } 1150 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) { 1151 error = VOP_LOOKUP(ZTOV(dzp), vpp, cnp); 1152 VN_RELE(ZTOV(dzp)); 1153 } 1154 1155 ZFS_EXIT(zfsvfs); 1156 1157 return (error); 1158} 1159 1160/* ARGSUSED */ 1161static int 1162zfsctl_snapdir_readdir_cb(vnode_t *vp, void *dp, int *eofp, 1163 offset_t *offp, offset_t *nextp, void *data, int flags) 1164{ 1165 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 1166 char snapname[MAXNAMELEN]; 1167 uint64_t id, cookie; 1168 boolean_t case_conflict; 1169 int error; 1170 1171 ZFS_ENTER(zfsvfs); 1172 1173 cookie = *offp; 1174 dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG); 1175 error = dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN, snapname, &id, 1176 &cookie, &case_conflict); 1177 dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG); 1178 if (error) { 1179 ZFS_EXIT(zfsvfs); 1180 if (error == ENOENT) { 1181 *eofp = 1; 1182 return (0); 1183 } 1184 return (error); 1185 } 1186 1187 if (flags & V_RDDIR_ENTFLAGS) { 1188 edirent_t *eodp = dp; 1189 1190 (void) strcpy(eodp->ed_name, snapname); 1191 eodp->ed_ino = ZFSCTL_INO_SNAP(id); 1192 eodp->ed_eflags = case_conflict ? ED_CASE_CONFLICT : 0; 1193 } else { 1194 struct dirent64 *odp = dp; 1195 1196 (void) strcpy(odp->d_name, snapname); 1197 odp->d_ino = ZFSCTL_INO_SNAP(id); 1198 } 1199 *nextp = cookie; 1200 1201 ZFS_EXIT(zfsvfs); 1202 1203 return (0); 1204} 1205 1206/* ARGSUSED */ 1207static int 1208zfsctl_shares_readdir(ap) 1209 struct vop_readdir_args /* { 1210 struct vnode *a_vp; 1211 struct uio *a_uio; 1212 struct ucred *a_cred; 1213 int *a_eofflag; 1214 int *a_ncookies; 1215 u_long **a_cookies; 1216 } */ *ap; 1217{ 1218 vnode_t *vp = ap->a_vp; 1219 uio_t *uiop = ap->a_uio; 1220 cred_t *cr = ap->a_cred; 1221 int *eofp = ap->a_eofflag; 1222 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 1223 znode_t *dzp; 1224 int error; 1225 1226 ZFS_ENTER(zfsvfs); 1227 1228 if (zfsvfs->z_shares_dir == 0) { 1229 ZFS_EXIT(zfsvfs); 1230 return (SET_ERROR(ENOTSUP)); 1231 } 1232 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) { 1233 vn_lock(ZTOV(dzp), LK_SHARED | LK_RETRY); 1234 error = VOP_READDIR(ZTOV(dzp), uiop, cr, eofp, ap->a_ncookies, ap->a_cookies); 1235 VN_URELE(ZTOV(dzp)); 1236 } else { 1237 *eofp = 1; 1238 error = SET_ERROR(ENOENT); 1239 } 1240 1241 ZFS_EXIT(zfsvfs); 1242 return (error); 1243} 1244 1245/* 1246 * pvp is the '.zfs' directory (zfsctl_node_t). 1247 * 1248 * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t). 1249 * 1250 * This function is the callback to create a GFS vnode for '.zfs/snapshot' 1251 * when a lookup is performed on .zfs for "snapshot". 1252 */ 1253vnode_t * 1254zfsctl_mknode_snapdir(vnode_t *pvp) 1255{ 1256 vnode_t *vp; 1257 zfsctl_snapdir_t *sdp; 1258 1259 vp = gfs_dir_create(sizeof (zfsctl_snapdir_t), pvp, pvp->v_vfsp, 1260 &zfsctl_ops_snapdir, NULL, NULL, MAXNAMELEN, 1261 zfsctl_snapdir_readdir_cb, NULL); 1262 sdp = vp->v_data; 1263 sdp->sd_node.zc_id = ZFSCTL_INO_SNAPDIR; 1264 sdp->sd_node.zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime; 1265 mutex_init(&sdp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 1266 avl_create(&sdp->sd_snaps, snapentry_compare, 1267 sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node)); 1268 VOP_UNLOCK(vp, 0); 1269 return (vp); 1270} 1271 1272vnode_t * 1273zfsctl_mknode_shares(vnode_t *pvp) 1274{ 1275 vnode_t *vp; 1276 zfsctl_node_t *sdp; 1277 1278 vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp, pvp->v_vfsp, 1279 &zfsctl_ops_shares, NULL, NULL, MAXNAMELEN, 1280 NULL, NULL); 1281 sdp = vp->v_data; 1282 sdp->zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime; 1283 VOP_UNLOCK(vp, 0); 1284 return (vp); 1285 1286} 1287 1288/* ARGSUSED */ 1289static int 1290zfsctl_shares_getattr(ap) 1291 struct vop_getattr_args /* { 1292 struct vnode *a_vp; 1293 struct vattr *a_vap; 1294 struct ucred *a_cred; 1295 struct thread *a_td; 1296 } */ *ap; 1297{ 1298 vnode_t *vp = ap->a_vp; 1299 vattr_t *vap = ap->a_vap; 1300 cred_t *cr = ap->a_cred; 1301 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 1302 znode_t *dzp; 1303 int error; 1304 1305 ZFS_ENTER(zfsvfs); 1306 if (zfsvfs->z_shares_dir == 0) { 1307 ZFS_EXIT(zfsvfs); 1308 return (SET_ERROR(ENOTSUP)); 1309 } 1310 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) { 1311 vn_lock(ZTOV(dzp), LK_SHARED | LK_RETRY); 1312 error = VOP_GETATTR(ZTOV(dzp), vap, cr); 1313 VN_URELE(ZTOV(dzp)); 1314 } 1315 ZFS_EXIT(zfsvfs); 1316 return (error); 1317 1318 1319} 1320 1321/* ARGSUSED */ 1322static int 1323zfsctl_snapdir_getattr(ap) 1324 struct vop_getattr_args /* { 1325 struct vnode *a_vp; 1326 struct vattr *a_vap; 1327 struct ucred *a_cred; 1328 } */ *ap; 1329{ 1330 vnode_t *vp = ap->a_vp; 1331 vattr_t *vap = ap->a_vap; 1332 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 1333 zfsctl_snapdir_t *sdp = vp->v_data; 1334 1335 ZFS_ENTER(zfsvfs); 1336 zfsctl_common_getattr(vp, vap); 1337 vap->va_nodeid = gfs_file_inode(vp); 1338 vap->va_nlink = vap->va_size = avl_numnodes(&sdp->sd_snaps) + 2; 1339 vap->va_ctime = vap->va_mtime = dmu_objset_snap_cmtime(zfsvfs->z_os); 1340 vap->va_birthtime = vap->va_ctime; 1341 ZFS_EXIT(zfsvfs); 1342 1343 return (0); 1344} 1345 1346/* ARGSUSED */ 1347static int 1348zfsctl_snapdir_inactive(ap) 1349 struct vop_inactive_args /* { 1350 struct vnode *a_vp; 1351 struct thread *a_td; 1352 } */ *ap; 1353{ 1354 vnode_t *vp = ap->a_vp; 1355 zfsctl_snapdir_t *sdp = vp->v_data; 1356 zfs_snapentry_t *sep; 1357 1358 /* 1359 * On forced unmount we have to free snapshots from here. 1360 */ 1361 mutex_enter(&sdp->sd_lock); 1362 while ((sep = avl_first(&sdp->sd_snaps)) != NULL) { 1363 avl_remove(&sdp->sd_snaps, sep); 1364 kmem_free(sep->se_name, strlen(sep->se_name) + 1); 1365 kmem_free(sep, sizeof (zfs_snapentry_t)); 1366 } 1367 mutex_exit(&sdp->sd_lock); 1368 gfs_dir_inactive(vp); 1369 ASSERT(avl_numnodes(&sdp->sd_snaps) == 0); 1370 mutex_destroy(&sdp->sd_lock); 1371 avl_destroy(&sdp->sd_snaps); 1372 kmem_free(sdp, sizeof (zfsctl_snapdir_t)); 1373 1374 return (0); 1375} 1376 1377#ifdef illumos 1378static const fs_operation_def_t zfsctl_tops_snapdir[] = { 1379 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } }, 1380 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } }, 1381 { VOPNAME_IOCTL, { .error = fs_inval } }, 1382 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_snapdir_getattr } }, 1383 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } }, 1384 { VOPNAME_RENAME, { .vop_rename = zfsctl_snapdir_rename } }, 1385 { VOPNAME_RMDIR, { .vop_rmdir = zfsctl_snapdir_remove } }, 1386 { VOPNAME_MKDIR, { .vop_mkdir = zfsctl_snapdir_mkdir } }, 1387 { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } }, 1388 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_snapdir_lookup } }, 1389 { VOPNAME_SEEK, { .vop_seek = fs_seek } }, 1390 { VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapdir_inactive } }, 1391 { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } }, 1392 { NULL } 1393}; 1394 1395static const fs_operation_def_t zfsctl_tops_shares[] = { 1396 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } }, 1397 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } }, 1398 { VOPNAME_IOCTL, { .error = fs_inval } }, 1399 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_shares_getattr } }, 1400 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } }, 1401 { VOPNAME_READDIR, { .vop_readdir = zfsctl_shares_readdir } }, 1402 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_shares_lookup } }, 1403 { VOPNAME_SEEK, { .vop_seek = fs_seek } }, 1404 { VOPNAME_INACTIVE, { .vop_inactive = gfs_vop_inactive } }, 1405 { VOPNAME_FID, { .vop_fid = zfsctl_shares_fid } }, 1406 { NULL } 1407}; 1408#else /* !illumos */ 1409static struct vop_vector zfsctl_ops_snapdir = { 1410 .vop_default = &default_vnodeops, 1411 .vop_open = zfsctl_common_open, 1412 .vop_close = zfsctl_common_close, 1413 .vop_ioctl = VOP_EINVAL, 1414 .vop_getattr = zfsctl_snapdir_getattr, 1415 .vop_access = zfsctl_common_access, 1416 .vop_mkdir = zfsctl_freebsd_snapdir_mkdir, 1417 .vop_readdir = gfs_vop_readdir, 1418 .vop_lookup = zfsctl_snapdir_lookup, 1419 .vop_inactive = zfsctl_snapdir_inactive, 1420 .vop_reclaim = zfsctl_common_reclaim, 1421 .vop_fid = zfsctl_common_fid, 1422}; 1423 1424static struct vop_vector zfsctl_ops_shares = { 1425 .vop_default = &default_vnodeops, 1426 .vop_open = zfsctl_common_open, 1427 .vop_close = zfsctl_common_close, 1428 .vop_ioctl = VOP_EINVAL, 1429 .vop_getattr = zfsctl_shares_getattr, 1430 .vop_access = zfsctl_common_access, 1431 .vop_readdir = zfsctl_shares_readdir, 1432 .vop_lookup = zfsctl_shares_lookup, 1433 .vop_inactive = VOP_NULL, 1434 .vop_reclaim = gfs_vop_reclaim, 1435 .vop_fid = zfsctl_shares_fid, 1436}; 1437#endif /* illumos */ 1438 1439/* 1440 * pvp is the GFS vnode '.zfs/snapshot'. 1441 * 1442 * This creates a GFS node under '.zfs/snapshot' representing each 1443 * snapshot. This newly created GFS node is what we mount snapshot 1444 * vfs_t's ontop of. 1445 */ 1446static vnode_t * 1447zfsctl_snapshot_mknode(vnode_t *pvp, uint64_t objset) 1448{ 1449 vnode_t *vp; 1450 zfsctl_node_t *zcp; 1451 1452 vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp, pvp->v_vfsp, 1453 &zfsctl_ops_snapshot, NULL, NULL, MAXNAMELEN, NULL, NULL); 1454 zcp = vp->v_data; 1455 zcp->zc_id = objset; 1456 VOP_UNLOCK(vp, 0); 1457 1458 return (vp); 1459} 1460 1461 1462static int 1463zfsctl_snapshot_reclaim(ap) 1464 struct vop_inactive_args /* { 1465 struct vnode *a_vp; 1466 struct thread *a_td; 1467 } */ *ap; 1468{ 1469 vnode_t *vp = ap->a_vp; 1470 cred_t *cr = ap->a_td->td_ucred; 1471 struct vop_reclaim_args iap; 1472 zfsctl_snapdir_t *sdp; 1473 zfs_snapentry_t *sep, *next; 1474 int locked; 1475 vnode_t *dvp; 1476 1477 VERIFY(gfs_dir_lookup(vp, "..", &dvp, cr, 0, NULL, NULL) == 0); 1478 sdp = dvp->v_data; 1479 VOP_UNLOCK(dvp, 0); 1480 /* this may already have been unmounted */ 1481 if (sdp == NULL) { 1482 VN_RELE(dvp); 1483 return (0); 1484 } 1485 if (!(locked = MUTEX_HELD(&sdp->sd_lock))) 1486 mutex_enter(&sdp->sd_lock); 1487 1488 ASSERT(!vn_ismntpt(vp)); 1489 1490 sep = avl_first(&sdp->sd_snaps); 1491 while (sep != NULL) { 1492 next = AVL_NEXT(&sdp->sd_snaps, sep); 1493 1494 if (sep->se_root == vp) { 1495 avl_remove(&sdp->sd_snaps, sep); 1496 kmem_free(sep->se_name, strlen(sep->se_name) + 1); 1497 kmem_free(sep, sizeof (zfs_snapentry_t)); 1498 break; 1499 } 1500 sep = next; 1501 } 1502 ASSERT(sep != NULL); 1503 1504 if (!locked) 1505 mutex_exit(&sdp->sd_lock); 1506 VN_RELE(dvp); 1507 1508 /* 1509 * Dispose of the vnode for the snapshot mount point. 1510 * This is safe to do because once this entry has been removed 1511 * from the AVL tree, it can't be found again, so cannot become 1512 * "active". If we lookup the same name again we will end up 1513 * creating a new vnode. 1514 */ 1515 iap.a_vp = vp; 1516 gfs_vop_reclaim(&iap); 1517 return (0); 1518 1519} 1520 1521static int 1522zfsctl_traverse_begin(vnode_t **vpp, int lktype) 1523{ 1524 1525 VN_HOLD(*vpp); 1526 /* Snapshot should be already mounted, but just in case. */ 1527 if (vn_mountedvfs(*vpp) == NULL) 1528 return (ENOENT); 1529 return (traverse(vpp, lktype)); 1530} 1531 1532static void 1533zfsctl_traverse_end(vnode_t *vp, int err) 1534{ 1535 1536 if (err == 0) 1537 vput(vp); 1538 else 1539 VN_RELE(vp); 1540} 1541 1542static int 1543zfsctl_snapshot_getattr(ap) 1544 struct vop_getattr_args /* { 1545 struct vnode *a_vp; 1546 struct vattr *a_vap; 1547 struct ucred *a_cred; 1548 } */ *ap; 1549{ 1550 vnode_t *vp = ap->a_vp; 1551 int err; 1552 1553 err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY); 1554 if (err == 0) 1555 err = VOP_GETATTR(vp, ap->a_vap, ap->a_cred); 1556 zfsctl_traverse_end(vp, err); 1557 return (err); 1558} 1559 1560static int 1561zfsctl_snapshot_fid(ap) 1562 struct vop_fid_args /* { 1563 struct vnode *a_vp; 1564 struct fid *a_fid; 1565 } */ *ap; 1566{ 1567 vnode_t *vp = ap->a_vp; 1568 int err; 1569 1570 err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY); 1571 if (err == 0) 1572 err = VOP_VPTOFH(vp, (void *)ap->a_fid); 1573 zfsctl_traverse_end(vp, err); 1574 return (err); 1575} 1576 1577static int 1578zfsctl_snapshot_lookup(ap) 1579 struct vop_lookup_args /* { 1580 struct vnode *a_dvp; 1581 struct vnode **a_vpp; 1582 struct componentname *a_cnp; 1583 } */ *ap; 1584{ 1585 vnode_t *dvp = ap->a_dvp; 1586 vnode_t **vpp = ap->a_vpp; 1587 struct componentname *cnp = ap->a_cnp; 1588 cred_t *cr = ap->a_cnp->cn_cred; 1589 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; 1590 int error; 1591 1592 if (cnp->cn_namelen != 2 || cnp->cn_nameptr[0] != '.' || 1593 cnp->cn_nameptr[1] != '.') { 1594 return (ENOENT); 1595 } 1596 1597 ASSERT(dvp->v_type == VDIR); 1598 ASSERT(zfsvfs->z_ctldir != NULL); 1599 1600 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", vpp, 1601 NULL, 0, NULL, cr, NULL, NULL, NULL); 1602 if (error == 0) { 1603 int ltype = VOP_ISLOCKED(dvp); 1604 VN_HOLD(*vpp); 1605 VOP_UNLOCK(dvp, 0); 1606 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); 1607 VN_RELE(*vpp); 1608 vn_lock(dvp, ltype | LK_RETRY); 1609 } 1610 1611 return (error); 1612} 1613 1614static int 1615zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap) 1616{ 1617 zfsvfs_t *zfsvfs = ap->a_vp->v_vfsp->vfs_data; 1618 vnode_t *dvp, *vp; 1619 zfsctl_snapdir_t *sdp; 1620 zfs_snapentry_t *sep; 1621 int error; 1622 1623 ASSERT(zfsvfs->z_ctldir != NULL); 1624 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp, 1625 NULL, 0, NULL, kcred, NULL, NULL, NULL); 1626 if (error != 0) 1627 return (error); 1628 sdp = dvp->v_data; 1629 1630 mutex_enter(&sdp->sd_lock); 1631 sep = avl_first(&sdp->sd_snaps); 1632 while (sep != NULL) { 1633 vp = sep->se_root; 1634 if (vp == ap->a_vp) 1635 break; 1636 sep = AVL_NEXT(&sdp->sd_snaps, sep); 1637 } 1638 if (sep == NULL) { 1639 mutex_exit(&sdp->sd_lock); 1640 error = ENOENT; 1641 } else { 1642 size_t len; 1643 1644 len = strlen(sep->se_name); 1645 *ap->a_buflen -= len; 1646 bcopy(sep->se_name, ap->a_buf + *ap->a_buflen, len); 1647 mutex_exit(&sdp->sd_lock); 1648 vref(dvp); 1649 *ap->a_vpp = dvp; 1650 } 1651 VN_RELE(dvp); 1652 1653 return (error); 1654} 1655 1656/* 1657 * These VP's should never see the light of day. They should always 1658 * be covered. 1659 */ 1660static struct vop_vector zfsctl_ops_snapshot = { 1661 .vop_default = &default_vnodeops, 1662 .vop_inactive = VOP_NULL, 1663 .vop_lookup = zfsctl_snapshot_lookup, 1664 .vop_reclaim = zfsctl_snapshot_reclaim, 1665 .vop_getattr = zfsctl_snapshot_getattr, 1666 .vop_fid = zfsctl_snapshot_fid, 1667 .vop_vptocnp = zfsctl_snapshot_vptocnp, 1668}; 1669 1670int 1671zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp) 1672{ 1673 zfsvfs_t *zfsvfs = vfsp->vfs_data; 1674 vnode_t *dvp, *vp; 1675 zfsctl_snapdir_t *sdp; 1676 zfsctl_node_t *zcp; 1677 zfs_snapentry_t *sep; 1678 int error; 1679 1680 ASSERT(zfsvfs->z_ctldir != NULL); 1681 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp, 1682 NULL, 0, NULL, kcred, NULL, NULL, NULL); 1683 if (error != 0) 1684 return (error); 1685 sdp = dvp->v_data; 1686 1687 mutex_enter(&sdp->sd_lock); 1688 sep = avl_first(&sdp->sd_snaps); 1689 while (sep != NULL) { 1690 vp = sep->se_root; 1691 zcp = vp->v_data; 1692 if (zcp->zc_id == objsetid) 1693 break; 1694 1695 sep = AVL_NEXT(&sdp->sd_snaps, sep); 1696 } 1697 1698 if (sep != NULL) { 1699 VN_HOLD(vp); 1700 /* 1701 * Return the mounted root rather than the covered mount point. 1702 * Takes the GFS vnode at .zfs/snapshot/<snapshot objsetid> 1703 * and returns the ZFS vnode mounted on top of the GFS node. 1704 * This ZFS vnode is the root of the vfs for objset 'objsetid'. 1705 */ 1706 error = traverse(&vp, LK_SHARED | LK_RETRY); 1707 if (error == 0) { 1708 if (vp == sep->se_root) 1709 error = SET_ERROR(EINVAL); 1710 else 1711 *zfsvfsp = VTOZ(vp)->z_zfsvfs; 1712 } 1713 mutex_exit(&sdp->sd_lock); 1714 if (error == 0) 1715 VN_URELE(vp); 1716 else 1717 VN_RELE(vp); 1718 } else { 1719 error = SET_ERROR(EINVAL); 1720 mutex_exit(&sdp->sd_lock); 1721 } 1722 1723 VN_RELE(dvp); 1724 1725 return (error); 1726} 1727 1728/* 1729 * Unmount any snapshots for the given filesystem. This is called from 1730 * zfs_umount() - if we have a ctldir, then go through and unmount all the 1731 * snapshots. 1732 */ 1733int 1734zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr) 1735{ 1736 zfsvfs_t *zfsvfs = vfsp->vfs_data; 1737 vnode_t *dvp; 1738 zfsctl_snapdir_t *sdp; 1739 zfs_snapentry_t *sep, *next; 1740 int error; 1741 1742 ASSERT(zfsvfs->z_ctldir != NULL); 1743 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp, 1744 NULL, 0, NULL, cr, NULL, NULL, NULL); 1745 if (error != 0) 1746 return (error); 1747 sdp = dvp->v_data; 1748 1749 mutex_enter(&sdp->sd_lock); 1750 1751 sep = avl_first(&sdp->sd_snaps); 1752 while (sep != NULL) { 1753 next = AVL_NEXT(&sdp->sd_snaps, sep); 1754 1755 /* 1756 * If this snapshot is not mounted, then it must 1757 * have just been unmounted by somebody else, and 1758 * will be cleaned up by zfsctl_snapdir_inactive(). 1759 */ 1760 if (vn_ismntpt(sep->se_root)) { 1761 error = zfsctl_unmount_snap(sep, fflags, cr); 1762 if (error) { 1763 avl_index_t where; 1764 1765 /* 1766 * Before reinserting snapshot to the tree, 1767 * check if it was actually removed. For example 1768 * when snapshot mount point is busy, we will 1769 * have an error here, but there will be no need 1770 * to reinsert snapshot. 1771 */ 1772 if (avl_find(&sdp->sd_snaps, sep, &where) == NULL) 1773 avl_insert(&sdp->sd_snaps, sep, where); 1774 break; 1775 } 1776 } 1777 sep = next; 1778 } 1779 1780 mutex_exit(&sdp->sd_lock); 1781 VN_RELE(dvp); 1782 1783 return (error); 1784} 1785