ztest.c revision 268649
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2013 Steven Hartland. All rights reserved. 27 */ 28 29/* 30 * The objective of this program is to provide a DMU/ZAP/SPA stress test 31 * that runs entirely in userland, is easy to use, and easy to extend. 32 * 33 * The overall design of the ztest program is as follows: 34 * 35 * (1) For each major functional area (e.g. adding vdevs to a pool, 36 * creating and destroying datasets, reading and writing objects, etc) 37 * we have a simple routine to test that functionality. These 38 * individual routines do not have to do anything "stressful". 39 * 40 * (2) We turn these simple functionality tests into a stress test by 41 * running them all in parallel, with as many threads as desired, 42 * and spread across as many datasets, objects, and vdevs as desired. 43 * 44 * (3) While all this is happening, we inject faults into the pool to 45 * verify that self-healing data really works. 46 * 47 * (4) Every time we open a dataset, we change its checksum and compression 48 * functions. Thus even individual objects vary from block to block 49 * in which checksum they use and whether they're compressed. 50 * 51 * (5) To verify that we never lose on-disk consistency after a crash, 52 * we run the entire test in a child of the main process. 53 * At random times, the child self-immolates with a SIGKILL. 54 * This is the software equivalent of pulling the power cord. 55 * The parent then runs the test again, using the existing 56 * storage pool, as many times as desired. If backwards compatibility 57 * testing is enabled ztest will sometimes run the "older" version 58 * of ztest after a SIGKILL. 59 * 60 * (6) To verify that we don't have future leaks or temporal incursions, 61 * many of the functional tests record the transaction group number 62 * as part of their data. When reading old data, they verify that 63 * the transaction group number is less than the current, open txg. 64 * If you add a new test, please do this if applicable. 65 * 66 * When run with no arguments, ztest runs for about five minutes and 67 * produces no output if successful. To get a little bit of information, 68 * specify -V. To get more information, specify -VV, and so on. 69 * 70 * To turn this into an overnight stress test, use -T to specify run time. 71 * 72 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 73 * to increase the pool capacity, fanout, and overall stress level. 74 * 75 * Use the -k option to set the desired frequency of kills. 76 * 77 * When ztest invokes itself it passes all relevant information through a 78 * temporary file which is mmap-ed in the child process. This allows shared 79 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 80 * stored at offset 0 of this file and contains information on the size and 81 * number of shared structures in the file. The information stored in this file 82 * must remain backwards compatible with older versions of ztest so that 83 * ztest can invoke them during backwards compatibility testing (-B). 84 */ 85 86#include <sys/zfs_context.h> 87#include <sys/spa.h> 88#include <sys/dmu.h> 89#include <sys/txg.h> 90#include <sys/dbuf.h> 91#include <sys/zap.h> 92#include <sys/dmu_objset.h> 93#include <sys/poll.h> 94#include <sys/stat.h> 95#include <sys/time.h> 96#include <sys/wait.h> 97#include <sys/mman.h> 98#include <sys/resource.h> 99#include <sys/zio.h> 100#include <sys/zil.h> 101#include <sys/zil_impl.h> 102#include <sys/vdev_impl.h> 103#include <sys/vdev_file.h> 104#include <sys/spa_impl.h> 105#include <sys/metaslab_impl.h> 106#include <sys/dsl_prop.h> 107#include <sys/dsl_dataset.h> 108#include <sys/dsl_destroy.h> 109#include <sys/dsl_scan.h> 110#include <sys/zio_checksum.h> 111#include <sys/refcount.h> 112#include <sys/zfeature.h> 113#include <sys/dsl_userhold.h> 114#include <stdio.h> 115#include <stdio_ext.h> 116#include <stdlib.h> 117#include <unistd.h> 118#include <signal.h> 119#include <umem.h> 120#include <dlfcn.h> 121#include <ctype.h> 122#include <math.h> 123#include <errno.h> 124#include <sys/fs/zfs.h> 125#include <libnvpair.h> 126 127static int ztest_fd_data = -1; 128static int ztest_fd_rand = -1; 129 130typedef struct ztest_shared_hdr { 131 uint64_t zh_hdr_size; 132 uint64_t zh_opts_size; 133 uint64_t zh_size; 134 uint64_t zh_stats_size; 135 uint64_t zh_stats_count; 136 uint64_t zh_ds_size; 137 uint64_t zh_ds_count; 138} ztest_shared_hdr_t; 139 140static ztest_shared_hdr_t *ztest_shared_hdr; 141 142typedef struct ztest_shared_opts { 143 char zo_pool[MAXNAMELEN]; 144 char zo_dir[MAXNAMELEN]; 145 char zo_alt_ztest[MAXNAMELEN]; 146 char zo_alt_libpath[MAXNAMELEN]; 147 uint64_t zo_vdevs; 148 uint64_t zo_vdevtime; 149 size_t zo_vdev_size; 150 int zo_ashift; 151 int zo_mirrors; 152 int zo_raidz; 153 int zo_raidz_parity; 154 int zo_datasets; 155 int zo_threads; 156 uint64_t zo_passtime; 157 uint64_t zo_killrate; 158 int zo_verbose; 159 int zo_init; 160 uint64_t zo_time; 161 uint64_t zo_maxloops; 162 uint64_t zo_metaslab_gang_bang; 163} ztest_shared_opts_t; 164 165static const ztest_shared_opts_t ztest_opts_defaults = { 166 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 167 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 168 .zo_alt_ztest = { '\0' }, 169 .zo_alt_libpath = { '\0' }, 170 .zo_vdevs = 5, 171 .zo_ashift = SPA_MINBLOCKSHIFT, 172 .zo_mirrors = 2, 173 .zo_raidz = 4, 174 .zo_raidz_parity = 1, 175 .zo_vdev_size = SPA_MINDEVSIZE, 176 .zo_datasets = 7, 177 .zo_threads = 23, 178 .zo_passtime = 60, /* 60 seconds */ 179 .zo_killrate = 70, /* 70% kill rate */ 180 .zo_verbose = 0, 181 .zo_init = 1, 182 .zo_time = 300, /* 5 minutes */ 183 .zo_maxloops = 50, /* max loops during spa_freeze() */ 184 .zo_metaslab_gang_bang = 32 << 10 185}; 186 187extern uint64_t metaslab_gang_bang; 188extern uint64_t metaslab_df_alloc_threshold; 189extern uint64_t zfs_deadman_synctime_ms; 190 191static ztest_shared_opts_t *ztest_shared_opts; 192static ztest_shared_opts_t ztest_opts; 193 194typedef struct ztest_shared_ds { 195 uint64_t zd_seq; 196} ztest_shared_ds_t; 197 198static ztest_shared_ds_t *ztest_shared_ds; 199#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 200 201#define BT_MAGIC 0x123456789abcdefULL 202#define MAXFAULTS() \ 203 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 204 205enum ztest_io_type { 206 ZTEST_IO_WRITE_TAG, 207 ZTEST_IO_WRITE_PATTERN, 208 ZTEST_IO_WRITE_ZEROES, 209 ZTEST_IO_TRUNCATE, 210 ZTEST_IO_SETATTR, 211 ZTEST_IO_REWRITE, 212 ZTEST_IO_TYPES 213}; 214 215typedef struct ztest_block_tag { 216 uint64_t bt_magic; 217 uint64_t bt_objset; 218 uint64_t bt_object; 219 uint64_t bt_offset; 220 uint64_t bt_gen; 221 uint64_t bt_txg; 222 uint64_t bt_crtxg; 223} ztest_block_tag_t; 224 225typedef struct bufwad { 226 uint64_t bw_index; 227 uint64_t bw_txg; 228 uint64_t bw_data; 229} bufwad_t; 230 231/* 232 * XXX -- fix zfs range locks to be generic so we can use them here. 233 */ 234typedef enum { 235 RL_READER, 236 RL_WRITER, 237 RL_APPEND 238} rl_type_t; 239 240typedef struct rll { 241 void *rll_writer; 242 int rll_readers; 243 mutex_t rll_lock; 244 cond_t rll_cv; 245} rll_t; 246 247typedef struct rl { 248 uint64_t rl_object; 249 uint64_t rl_offset; 250 uint64_t rl_size; 251 rll_t *rl_lock; 252} rl_t; 253 254#define ZTEST_RANGE_LOCKS 64 255#define ZTEST_OBJECT_LOCKS 64 256 257/* 258 * Object descriptor. Used as a template for object lookup/create/remove. 259 */ 260typedef struct ztest_od { 261 uint64_t od_dir; 262 uint64_t od_object; 263 dmu_object_type_t od_type; 264 dmu_object_type_t od_crtype; 265 uint64_t od_blocksize; 266 uint64_t od_crblocksize; 267 uint64_t od_gen; 268 uint64_t od_crgen; 269 char od_name[MAXNAMELEN]; 270} ztest_od_t; 271 272/* 273 * Per-dataset state. 274 */ 275typedef struct ztest_ds { 276 ztest_shared_ds_t *zd_shared; 277 objset_t *zd_os; 278 rwlock_t zd_zilog_lock; 279 zilog_t *zd_zilog; 280 ztest_od_t *zd_od; /* debugging aid */ 281 char zd_name[MAXNAMELEN]; 282 mutex_t zd_dirobj_lock; 283 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 284 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 285} ztest_ds_t; 286 287/* 288 * Per-iteration state. 289 */ 290typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 291 292typedef struct ztest_info { 293 ztest_func_t *zi_func; /* test function */ 294 uint64_t zi_iters; /* iterations per execution */ 295 uint64_t *zi_interval; /* execute every <interval> seconds */ 296} ztest_info_t; 297 298typedef struct ztest_shared_callstate { 299 uint64_t zc_count; /* per-pass count */ 300 uint64_t zc_time; /* per-pass time */ 301 uint64_t zc_next; /* next time to call this function */ 302} ztest_shared_callstate_t; 303 304static ztest_shared_callstate_t *ztest_shared_callstate; 305#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 306 307/* 308 * Note: these aren't static because we want dladdr() to work. 309 */ 310ztest_func_t ztest_dmu_read_write; 311ztest_func_t ztest_dmu_write_parallel; 312ztest_func_t ztest_dmu_object_alloc_free; 313ztest_func_t ztest_dmu_commit_callbacks; 314ztest_func_t ztest_zap; 315ztest_func_t ztest_zap_parallel; 316ztest_func_t ztest_zil_commit; 317ztest_func_t ztest_zil_remount; 318ztest_func_t ztest_dmu_read_write_zcopy; 319ztest_func_t ztest_dmu_objset_create_destroy; 320ztest_func_t ztest_dmu_prealloc; 321ztest_func_t ztest_fzap; 322ztest_func_t ztest_dmu_snapshot_create_destroy; 323ztest_func_t ztest_dsl_prop_get_set; 324ztest_func_t ztest_spa_prop_get_set; 325ztest_func_t ztest_spa_create_destroy; 326ztest_func_t ztest_fault_inject; 327ztest_func_t ztest_ddt_repair; 328ztest_func_t ztest_dmu_snapshot_hold; 329ztest_func_t ztest_spa_rename; 330ztest_func_t ztest_scrub; 331ztest_func_t ztest_dsl_dataset_promote_busy; 332ztest_func_t ztest_vdev_attach_detach; 333ztest_func_t ztest_vdev_LUN_growth; 334ztest_func_t ztest_vdev_add_remove; 335ztest_func_t ztest_vdev_aux_add_remove; 336ztest_func_t ztest_split_pool; 337ztest_func_t ztest_reguid; 338ztest_func_t ztest_spa_upgrade; 339 340uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 341uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 342uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 343uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 344uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 345 346ztest_info_t ztest_info[] = { 347 { ztest_dmu_read_write, 1, &zopt_always }, 348 { ztest_dmu_write_parallel, 10, &zopt_always }, 349 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 350 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 351 { ztest_zap, 30, &zopt_always }, 352 { ztest_zap_parallel, 100, &zopt_always }, 353 { ztest_split_pool, 1, &zopt_always }, 354 { ztest_zil_commit, 1, &zopt_incessant }, 355 { ztest_zil_remount, 1, &zopt_sometimes }, 356 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 357 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 358 { ztest_dsl_prop_get_set, 1, &zopt_often }, 359 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 360#if 0 361 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 362#endif 363 { ztest_fzap, 1, &zopt_sometimes }, 364 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 365 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 366 { ztest_fault_inject, 1, &zopt_sometimes }, 367 { ztest_ddt_repair, 1, &zopt_sometimes }, 368 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 369 { ztest_reguid, 1, &zopt_rarely }, 370 { ztest_spa_rename, 1, &zopt_rarely }, 371 { ztest_scrub, 1, &zopt_rarely }, 372 { ztest_spa_upgrade, 1, &zopt_rarely }, 373 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 374 { ztest_vdev_attach_detach, 1, &zopt_sometimes }, 375 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 376 { ztest_vdev_add_remove, 1, 377 &ztest_opts.zo_vdevtime }, 378 { ztest_vdev_aux_add_remove, 1, 379 &ztest_opts.zo_vdevtime }, 380}; 381 382#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 383 384/* 385 * The following struct is used to hold a list of uncalled commit callbacks. 386 * The callbacks are ordered by txg number. 387 */ 388typedef struct ztest_cb_list { 389 mutex_t zcl_callbacks_lock; 390 list_t zcl_callbacks; 391} ztest_cb_list_t; 392 393/* 394 * Stuff we need to share writably between parent and child. 395 */ 396typedef struct ztest_shared { 397 boolean_t zs_do_init; 398 hrtime_t zs_proc_start; 399 hrtime_t zs_proc_stop; 400 hrtime_t zs_thread_start; 401 hrtime_t zs_thread_stop; 402 hrtime_t zs_thread_kill; 403 uint64_t zs_enospc_count; 404 uint64_t zs_vdev_next_leaf; 405 uint64_t zs_vdev_aux; 406 uint64_t zs_alloc; 407 uint64_t zs_space; 408 uint64_t zs_splits; 409 uint64_t zs_mirrors; 410 uint64_t zs_metaslab_sz; 411 uint64_t zs_metaslab_df_alloc_threshold; 412 uint64_t zs_guid; 413} ztest_shared_t; 414 415#define ID_PARALLEL -1ULL 416 417static char ztest_dev_template[] = "%s/%s.%llua"; 418static char ztest_aux_template[] = "%s/%s.%s.%llu"; 419ztest_shared_t *ztest_shared; 420 421static spa_t *ztest_spa = NULL; 422static ztest_ds_t *ztest_ds; 423 424static mutex_t ztest_vdev_lock; 425 426/* 427 * The ztest_name_lock protects the pool and dataset namespace used by 428 * the individual tests. To modify the namespace, consumers must grab 429 * this lock as writer. Grabbing the lock as reader will ensure that the 430 * namespace does not change while the lock is held. 431 */ 432static rwlock_t ztest_name_lock; 433 434static boolean_t ztest_dump_core = B_TRUE; 435static boolean_t ztest_exiting; 436 437/* Global commit callback list */ 438static ztest_cb_list_t zcl; 439 440enum ztest_object { 441 ZTEST_META_DNODE = 0, 442 ZTEST_DIROBJ, 443 ZTEST_OBJECTS 444}; 445 446static void usage(boolean_t) __NORETURN; 447 448/* 449 * These libumem hooks provide a reasonable set of defaults for the allocator's 450 * debugging facilities. 451 */ 452const char * 453_umem_debug_init() 454{ 455 return ("default,verbose"); /* $UMEM_DEBUG setting */ 456} 457 458const char * 459_umem_logging_init(void) 460{ 461 return ("fail,contents"); /* $UMEM_LOGGING setting */ 462} 463 464#define FATAL_MSG_SZ 1024 465 466char *fatal_msg; 467 468static void 469fatal(int do_perror, char *message, ...) 470{ 471 va_list args; 472 int save_errno = errno; 473 char buf[FATAL_MSG_SZ]; 474 475 (void) fflush(stdout); 476 477 va_start(args, message); 478 (void) sprintf(buf, "ztest: "); 479 /* LINTED */ 480 (void) vsprintf(buf + strlen(buf), message, args); 481 va_end(args); 482 if (do_perror) { 483 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 484 ": %s", strerror(save_errno)); 485 } 486 (void) fprintf(stderr, "%s\n", buf); 487 fatal_msg = buf; /* to ease debugging */ 488 if (ztest_dump_core) 489 abort(); 490 exit(3); 491} 492 493static int 494str2shift(const char *buf) 495{ 496 const char *ends = "BKMGTPEZ"; 497 int i; 498 499 if (buf[0] == '\0') 500 return (0); 501 for (i = 0; i < strlen(ends); i++) { 502 if (toupper(buf[0]) == ends[i]) 503 break; 504 } 505 if (i == strlen(ends)) { 506 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 507 buf); 508 usage(B_FALSE); 509 } 510 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 511 return (10*i); 512 } 513 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 514 usage(B_FALSE); 515 /* NOTREACHED */ 516} 517 518static uint64_t 519nicenumtoull(const char *buf) 520{ 521 char *end; 522 uint64_t val; 523 524 val = strtoull(buf, &end, 0); 525 if (end == buf) { 526 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 527 usage(B_FALSE); 528 } else if (end[0] == '.') { 529 double fval = strtod(buf, &end); 530 fval *= pow(2, str2shift(end)); 531 if (fval > UINT64_MAX) { 532 (void) fprintf(stderr, "ztest: value too large: %s\n", 533 buf); 534 usage(B_FALSE); 535 } 536 val = (uint64_t)fval; 537 } else { 538 int shift = str2shift(end); 539 if (shift >= 64 || (val << shift) >> shift != val) { 540 (void) fprintf(stderr, "ztest: value too large: %s\n", 541 buf); 542 usage(B_FALSE); 543 } 544 val <<= shift; 545 } 546 return (val); 547} 548 549static void 550usage(boolean_t requested) 551{ 552 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 553 554 char nice_vdev_size[10]; 555 char nice_gang_bang[10]; 556 FILE *fp = requested ? stdout : stderr; 557 558 nicenum(zo->zo_vdev_size, nice_vdev_size); 559 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang); 560 561 (void) fprintf(fp, "Usage: %s\n" 562 "\t[-v vdevs (default: %llu)]\n" 563 "\t[-s size_of_each_vdev (default: %s)]\n" 564 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 565 "\t[-m mirror_copies (default: %d)]\n" 566 "\t[-r raidz_disks (default: %d)]\n" 567 "\t[-R raidz_parity (default: %d)]\n" 568 "\t[-d datasets (default: %d)]\n" 569 "\t[-t threads (default: %d)]\n" 570 "\t[-g gang_block_threshold (default: %s)]\n" 571 "\t[-i init_count (default: %d)] initialize pool i times\n" 572 "\t[-k kill_percentage (default: %llu%%)]\n" 573 "\t[-p pool_name (default: %s)]\n" 574 "\t[-f dir (default: %s)] file directory for vdev files\n" 575 "\t[-V] verbose (use multiple times for ever more blather)\n" 576 "\t[-E] use existing pool instead of creating new one\n" 577 "\t[-T time (default: %llu sec)] total run time\n" 578 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 579 "\t[-P passtime (default: %llu sec)] time per pass\n" 580 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 581 "\t[-h] (print help)\n" 582 "", 583 zo->zo_pool, 584 (u_longlong_t)zo->zo_vdevs, /* -v */ 585 nice_vdev_size, /* -s */ 586 zo->zo_ashift, /* -a */ 587 zo->zo_mirrors, /* -m */ 588 zo->zo_raidz, /* -r */ 589 zo->zo_raidz_parity, /* -R */ 590 zo->zo_datasets, /* -d */ 591 zo->zo_threads, /* -t */ 592 nice_gang_bang, /* -g */ 593 zo->zo_init, /* -i */ 594 (u_longlong_t)zo->zo_killrate, /* -k */ 595 zo->zo_pool, /* -p */ 596 zo->zo_dir, /* -f */ 597 (u_longlong_t)zo->zo_time, /* -T */ 598 (u_longlong_t)zo->zo_maxloops, /* -F */ 599 (u_longlong_t)zo->zo_passtime); 600 exit(requested ? 0 : 1); 601} 602 603static void 604process_options(int argc, char **argv) 605{ 606 char *path; 607 ztest_shared_opts_t *zo = &ztest_opts; 608 609 int opt; 610 uint64_t value; 611 char altdir[MAXNAMELEN] = { 0 }; 612 613 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 614 615 while ((opt = getopt(argc, argv, 616 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) { 617 value = 0; 618 switch (opt) { 619 case 'v': 620 case 's': 621 case 'a': 622 case 'm': 623 case 'r': 624 case 'R': 625 case 'd': 626 case 't': 627 case 'g': 628 case 'i': 629 case 'k': 630 case 'T': 631 case 'P': 632 case 'F': 633 value = nicenumtoull(optarg); 634 } 635 switch (opt) { 636 case 'v': 637 zo->zo_vdevs = value; 638 break; 639 case 's': 640 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 641 break; 642 case 'a': 643 zo->zo_ashift = value; 644 break; 645 case 'm': 646 zo->zo_mirrors = value; 647 break; 648 case 'r': 649 zo->zo_raidz = MAX(1, value); 650 break; 651 case 'R': 652 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 653 break; 654 case 'd': 655 zo->zo_datasets = MAX(1, value); 656 break; 657 case 't': 658 zo->zo_threads = MAX(1, value); 659 break; 660 case 'g': 661 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, 662 value); 663 break; 664 case 'i': 665 zo->zo_init = value; 666 break; 667 case 'k': 668 zo->zo_killrate = value; 669 break; 670 case 'p': 671 (void) strlcpy(zo->zo_pool, optarg, 672 sizeof (zo->zo_pool)); 673 break; 674 case 'f': 675 path = realpath(optarg, NULL); 676 if (path == NULL) { 677 (void) fprintf(stderr, "error: %s: %s\n", 678 optarg, strerror(errno)); 679 usage(B_FALSE); 680 } else { 681 (void) strlcpy(zo->zo_dir, path, 682 sizeof (zo->zo_dir)); 683 } 684 break; 685 case 'V': 686 zo->zo_verbose++; 687 break; 688 case 'E': 689 zo->zo_init = 0; 690 break; 691 case 'T': 692 zo->zo_time = value; 693 break; 694 case 'P': 695 zo->zo_passtime = MAX(1, value); 696 break; 697 case 'F': 698 zo->zo_maxloops = MAX(1, value); 699 break; 700 case 'B': 701 (void) strlcpy(altdir, optarg, sizeof (altdir)); 702 break; 703 case 'h': 704 usage(B_TRUE); 705 break; 706 case '?': 707 default: 708 usage(B_FALSE); 709 break; 710 } 711 } 712 713 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 714 715 zo->zo_vdevtime = 716 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 717 UINT64_MAX >> 2); 718 719 if (strlen(altdir) > 0) { 720 char *cmd; 721 char *realaltdir; 722 char *bin; 723 char *ztest; 724 char *isa; 725 int isalen; 726 727 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 728 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 729 730 VERIFY(NULL != realpath(getexecname(), cmd)); 731 if (0 != access(altdir, F_OK)) { 732 ztest_dump_core = B_FALSE; 733 fatal(B_TRUE, "invalid alternate ztest path: %s", 734 altdir); 735 } 736 VERIFY(NULL != realpath(altdir, realaltdir)); 737 738 /* 739 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 740 * We want to extract <isa> to determine if we should use 741 * 32 or 64 bit binaries. 742 */ 743 bin = strstr(cmd, "/usr/bin/"); 744 ztest = strstr(bin, "/ztest"); 745 isa = bin + 9; 746 isalen = ztest - isa; 747 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 748 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 749 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 750 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 751 752 if (0 != access(zo->zo_alt_ztest, X_OK)) { 753 ztest_dump_core = B_FALSE; 754 fatal(B_TRUE, "invalid alternate ztest: %s", 755 zo->zo_alt_ztest); 756 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 757 ztest_dump_core = B_FALSE; 758 fatal(B_TRUE, "invalid alternate lib directory %s", 759 zo->zo_alt_libpath); 760 } 761 762 umem_free(cmd, MAXPATHLEN); 763 umem_free(realaltdir, MAXPATHLEN); 764 } 765} 766 767static void 768ztest_kill(ztest_shared_t *zs) 769{ 770 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 771 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 772 773 /* 774 * Before we kill off ztest, make sure that the config is updated. 775 * See comment above spa_config_sync(). 776 */ 777 mutex_enter(&spa_namespace_lock); 778 spa_config_sync(ztest_spa, B_FALSE, B_FALSE); 779 mutex_exit(&spa_namespace_lock); 780 781 zfs_dbgmsg_print(FTAG); 782 (void) kill(getpid(), SIGKILL); 783} 784 785static uint64_t 786ztest_random(uint64_t range) 787{ 788 uint64_t r; 789 790 ASSERT3S(ztest_fd_rand, >=, 0); 791 792 if (range == 0) 793 return (0); 794 795 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r)) 796 fatal(1, "short read from /dev/urandom"); 797 798 return (r % range); 799} 800 801/* ARGSUSED */ 802static void 803ztest_record_enospc(const char *s) 804{ 805 ztest_shared->zs_enospc_count++; 806} 807 808static uint64_t 809ztest_get_ashift(void) 810{ 811 if (ztest_opts.zo_ashift == 0) 812 return (SPA_MINBLOCKSHIFT + ztest_random(3)); 813 return (ztest_opts.zo_ashift); 814} 815 816static nvlist_t * 817make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift) 818{ 819 char pathbuf[MAXPATHLEN]; 820 uint64_t vdev; 821 nvlist_t *file; 822 823 if (ashift == 0) 824 ashift = ztest_get_ashift(); 825 826 if (path == NULL) { 827 path = pathbuf; 828 829 if (aux != NULL) { 830 vdev = ztest_shared->zs_vdev_aux; 831 (void) snprintf(path, sizeof (pathbuf), 832 ztest_aux_template, ztest_opts.zo_dir, 833 pool == NULL ? ztest_opts.zo_pool : pool, 834 aux, vdev); 835 } else { 836 vdev = ztest_shared->zs_vdev_next_leaf++; 837 (void) snprintf(path, sizeof (pathbuf), 838 ztest_dev_template, ztest_opts.zo_dir, 839 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 840 } 841 } 842 843 if (size != 0) { 844 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 845 if (fd == -1) 846 fatal(1, "can't open %s", path); 847 if (ftruncate(fd, size) != 0) 848 fatal(1, "can't ftruncate %s", path); 849 (void) close(fd); 850 } 851 852 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 853 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 854 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 855 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 856 857 return (file); 858} 859 860static nvlist_t * 861make_vdev_raidz(char *path, char *aux, char *pool, size_t size, 862 uint64_t ashift, int r) 863{ 864 nvlist_t *raidz, **child; 865 int c; 866 867 if (r < 2) 868 return (make_vdev_file(path, aux, pool, size, ashift)); 869 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 870 871 for (c = 0; c < r; c++) 872 child[c] = make_vdev_file(path, aux, pool, size, ashift); 873 874 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 875 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 876 VDEV_TYPE_RAIDZ) == 0); 877 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 878 ztest_opts.zo_raidz_parity) == 0); 879 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 880 child, r) == 0); 881 882 for (c = 0; c < r; c++) 883 nvlist_free(child[c]); 884 885 umem_free(child, r * sizeof (nvlist_t *)); 886 887 return (raidz); 888} 889 890static nvlist_t * 891make_vdev_mirror(char *path, char *aux, char *pool, size_t size, 892 uint64_t ashift, int r, int m) 893{ 894 nvlist_t *mirror, **child; 895 int c; 896 897 if (m < 1) 898 return (make_vdev_raidz(path, aux, pool, size, ashift, r)); 899 900 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 901 902 for (c = 0; c < m; c++) 903 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r); 904 905 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 906 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 907 VDEV_TYPE_MIRROR) == 0); 908 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 909 child, m) == 0); 910 911 for (c = 0; c < m; c++) 912 nvlist_free(child[c]); 913 914 umem_free(child, m * sizeof (nvlist_t *)); 915 916 return (mirror); 917} 918 919static nvlist_t * 920make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift, 921 int log, int r, int m, int t) 922{ 923 nvlist_t *root, **child; 924 int c; 925 926 ASSERT(t > 0); 927 928 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 929 930 for (c = 0; c < t; c++) { 931 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 932 r, m); 933 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 934 log) == 0); 935 } 936 937 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 938 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 939 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 940 child, t) == 0); 941 942 for (c = 0; c < t; c++) 943 nvlist_free(child[c]); 944 945 umem_free(child, t * sizeof (nvlist_t *)); 946 947 return (root); 948} 949 950/* 951 * Find a random spa version. Returns back a random spa version in the 952 * range [initial_version, SPA_VERSION_FEATURES]. 953 */ 954static uint64_t 955ztest_random_spa_version(uint64_t initial_version) 956{ 957 uint64_t version = initial_version; 958 959 if (version <= SPA_VERSION_BEFORE_FEATURES) { 960 version = version + 961 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 962 } 963 964 if (version > SPA_VERSION_BEFORE_FEATURES) 965 version = SPA_VERSION_FEATURES; 966 967 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 968 return (version); 969} 970 971static int 972ztest_random_blocksize(void) 973{ 974 return (1 << (SPA_MINBLOCKSHIFT + 975 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1))); 976} 977 978static int 979ztest_random_ibshift(void) 980{ 981 return (DN_MIN_INDBLKSHIFT + 982 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 983} 984 985static uint64_t 986ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 987{ 988 uint64_t top; 989 vdev_t *rvd = spa->spa_root_vdev; 990 vdev_t *tvd; 991 992 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 993 994 do { 995 top = ztest_random(rvd->vdev_children); 996 tvd = rvd->vdev_child[top]; 997 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) || 998 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 999 1000 return (top); 1001} 1002 1003static uint64_t 1004ztest_random_dsl_prop(zfs_prop_t prop) 1005{ 1006 uint64_t value; 1007 1008 do { 1009 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 1010 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 1011 1012 return (value); 1013} 1014 1015static int 1016ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 1017 boolean_t inherit) 1018{ 1019 const char *propname = zfs_prop_to_name(prop); 1020 const char *valname; 1021 char setpoint[MAXPATHLEN]; 1022 uint64_t curval; 1023 int error; 1024 1025 error = dsl_prop_set_int(osname, propname, 1026 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value); 1027 1028 if (error == ENOSPC) { 1029 ztest_record_enospc(FTAG); 1030 return (error); 1031 } 1032 ASSERT0(error); 1033 1034 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint)); 1035 1036 if (ztest_opts.zo_verbose >= 6) { 1037 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 1038 (void) printf("%s %s = %s at '%s'\n", 1039 osname, propname, valname, setpoint); 1040 } 1041 1042 return (error); 1043} 1044 1045static int 1046ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1047{ 1048 spa_t *spa = ztest_spa; 1049 nvlist_t *props = NULL; 1050 int error; 1051 1052 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 1053 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 1054 1055 error = spa_prop_set(spa, props); 1056 1057 nvlist_free(props); 1058 1059 if (error == ENOSPC) { 1060 ztest_record_enospc(FTAG); 1061 return (error); 1062 } 1063 ASSERT0(error); 1064 1065 return (error); 1066} 1067 1068static void 1069ztest_rll_init(rll_t *rll) 1070{ 1071 rll->rll_writer = NULL; 1072 rll->rll_readers = 0; 1073 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); 1074 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); 1075} 1076 1077static void 1078ztest_rll_destroy(rll_t *rll) 1079{ 1080 ASSERT(rll->rll_writer == NULL); 1081 ASSERT(rll->rll_readers == 0); 1082 VERIFY(_mutex_destroy(&rll->rll_lock) == 0); 1083 VERIFY(cond_destroy(&rll->rll_cv) == 0); 1084} 1085 1086static void 1087ztest_rll_lock(rll_t *rll, rl_type_t type) 1088{ 1089 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1090 1091 if (type == RL_READER) { 1092 while (rll->rll_writer != NULL) 1093 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1094 rll->rll_readers++; 1095 } else { 1096 while (rll->rll_writer != NULL || rll->rll_readers) 1097 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1098 rll->rll_writer = curthread; 1099 } 1100 1101 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1102} 1103 1104static void 1105ztest_rll_unlock(rll_t *rll) 1106{ 1107 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1108 1109 if (rll->rll_writer) { 1110 ASSERT(rll->rll_readers == 0); 1111 rll->rll_writer = NULL; 1112 } else { 1113 ASSERT(rll->rll_readers != 0); 1114 ASSERT(rll->rll_writer == NULL); 1115 rll->rll_readers--; 1116 } 1117 1118 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1119 VERIFY(cond_broadcast(&rll->rll_cv) == 0); 1120 1121 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1122} 1123 1124static void 1125ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1126{ 1127 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1128 1129 ztest_rll_lock(rll, type); 1130} 1131 1132static void 1133ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1134{ 1135 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1136 1137 ztest_rll_unlock(rll); 1138} 1139 1140static rl_t * 1141ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1142 uint64_t size, rl_type_t type) 1143{ 1144 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1145 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1146 rl_t *rl; 1147 1148 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1149 rl->rl_object = object; 1150 rl->rl_offset = offset; 1151 rl->rl_size = size; 1152 rl->rl_lock = rll; 1153 1154 ztest_rll_lock(rll, type); 1155 1156 return (rl); 1157} 1158 1159static void 1160ztest_range_unlock(rl_t *rl) 1161{ 1162 rll_t *rll = rl->rl_lock; 1163 1164 ztest_rll_unlock(rll); 1165 1166 umem_free(rl, sizeof (*rl)); 1167} 1168 1169static void 1170ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1171{ 1172 zd->zd_os = os; 1173 zd->zd_zilog = dmu_objset_zil(os); 1174 zd->zd_shared = szd; 1175 dmu_objset_name(os, zd->zd_name); 1176 1177 if (zd->zd_shared != NULL) 1178 zd->zd_shared->zd_seq = 0; 1179 1180 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0); 1181 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); 1182 1183 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1184 ztest_rll_init(&zd->zd_object_lock[l]); 1185 1186 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1187 ztest_rll_init(&zd->zd_range_lock[l]); 1188} 1189 1190static void 1191ztest_zd_fini(ztest_ds_t *zd) 1192{ 1193 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); 1194 1195 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1196 ztest_rll_destroy(&zd->zd_object_lock[l]); 1197 1198 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1199 ztest_rll_destroy(&zd->zd_range_lock[l]); 1200} 1201 1202#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1203 1204static uint64_t 1205ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1206{ 1207 uint64_t txg; 1208 int error; 1209 1210 /* 1211 * Attempt to assign tx to some transaction group. 1212 */ 1213 error = dmu_tx_assign(tx, txg_how); 1214 if (error) { 1215 if (error == ERESTART) { 1216 ASSERT(txg_how == TXG_NOWAIT); 1217 dmu_tx_wait(tx); 1218 } else { 1219 ASSERT3U(error, ==, ENOSPC); 1220 ztest_record_enospc(tag); 1221 } 1222 dmu_tx_abort(tx); 1223 return (0); 1224 } 1225 txg = dmu_tx_get_txg(tx); 1226 ASSERT(txg != 0); 1227 return (txg); 1228} 1229 1230static void 1231ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1232{ 1233 uint64_t *ip = buf; 1234 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1235 1236 while (ip < ip_end) 1237 *ip++ = value; 1238} 1239 1240static boolean_t 1241ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1242{ 1243 uint64_t *ip = buf; 1244 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1245 uint64_t diff = 0; 1246 1247 while (ip < ip_end) 1248 diff |= (value - *ip++); 1249 1250 return (diff == 0); 1251} 1252 1253static void 1254ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1255 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1256{ 1257 bt->bt_magic = BT_MAGIC; 1258 bt->bt_objset = dmu_objset_id(os); 1259 bt->bt_object = object; 1260 bt->bt_offset = offset; 1261 bt->bt_gen = gen; 1262 bt->bt_txg = txg; 1263 bt->bt_crtxg = crtxg; 1264} 1265 1266static void 1267ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1268 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1269{ 1270 ASSERT3U(bt->bt_magic, ==, BT_MAGIC); 1271 ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os)); 1272 ASSERT3U(bt->bt_object, ==, object); 1273 ASSERT3U(bt->bt_offset, ==, offset); 1274 ASSERT3U(bt->bt_gen, <=, gen); 1275 ASSERT3U(bt->bt_txg, <=, txg); 1276 ASSERT3U(bt->bt_crtxg, ==, crtxg); 1277} 1278 1279static ztest_block_tag_t * 1280ztest_bt_bonus(dmu_buf_t *db) 1281{ 1282 dmu_object_info_t doi; 1283 ztest_block_tag_t *bt; 1284 1285 dmu_object_info_from_db(db, &doi); 1286 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1287 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1288 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1289 1290 return (bt); 1291} 1292 1293/* 1294 * ZIL logging ops 1295 */ 1296 1297#define lrz_type lr_mode 1298#define lrz_blocksize lr_uid 1299#define lrz_ibshift lr_gid 1300#define lrz_bonustype lr_rdev 1301#define lrz_bonuslen lr_crtime[1] 1302 1303static void 1304ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1305{ 1306 char *name = (void *)(lr + 1); /* name follows lr */ 1307 size_t namesize = strlen(name) + 1; 1308 itx_t *itx; 1309 1310 if (zil_replaying(zd->zd_zilog, tx)) 1311 return; 1312 1313 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1314 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1315 sizeof (*lr) + namesize - sizeof (lr_t)); 1316 1317 zil_itx_assign(zd->zd_zilog, itx, tx); 1318} 1319 1320static void 1321ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1322{ 1323 char *name = (void *)(lr + 1); /* name follows lr */ 1324 size_t namesize = strlen(name) + 1; 1325 itx_t *itx; 1326 1327 if (zil_replaying(zd->zd_zilog, tx)) 1328 return; 1329 1330 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1331 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1332 sizeof (*lr) + namesize - sizeof (lr_t)); 1333 1334 itx->itx_oid = object; 1335 zil_itx_assign(zd->zd_zilog, itx, tx); 1336} 1337 1338static void 1339ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1340{ 1341 itx_t *itx; 1342 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1343 1344 if (zil_replaying(zd->zd_zilog, tx)) 1345 return; 1346 1347 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1348 write_state = WR_INDIRECT; 1349 1350 itx = zil_itx_create(TX_WRITE, 1351 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1352 1353 if (write_state == WR_COPIED && 1354 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1355 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1356 zil_itx_destroy(itx); 1357 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1358 write_state = WR_NEED_COPY; 1359 } 1360 itx->itx_private = zd; 1361 itx->itx_wr_state = write_state; 1362 itx->itx_sync = (ztest_random(8) == 0); 1363 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); 1364 1365 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1366 sizeof (*lr) - sizeof (lr_t)); 1367 1368 zil_itx_assign(zd->zd_zilog, itx, tx); 1369} 1370 1371static void 1372ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1373{ 1374 itx_t *itx; 1375 1376 if (zil_replaying(zd->zd_zilog, tx)) 1377 return; 1378 1379 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1380 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1381 sizeof (*lr) - sizeof (lr_t)); 1382 1383 itx->itx_sync = B_FALSE; 1384 zil_itx_assign(zd->zd_zilog, itx, tx); 1385} 1386 1387static void 1388ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1389{ 1390 itx_t *itx; 1391 1392 if (zil_replaying(zd->zd_zilog, tx)) 1393 return; 1394 1395 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1396 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1397 sizeof (*lr) - sizeof (lr_t)); 1398 1399 itx->itx_sync = B_FALSE; 1400 zil_itx_assign(zd->zd_zilog, itx, tx); 1401} 1402 1403/* 1404 * ZIL replay ops 1405 */ 1406static int 1407ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) 1408{ 1409 char *name = (void *)(lr + 1); /* name follows lr */ 1410 objset_t *os = zd->zd_os; 1411 ztest_block_tag_t *bbt; 1412 dmu_buf_t *db; 1413 dmu_tx_t *tx; 1414 uint64_t txg; 1415 int error = 0; 1416 1417 if (byteswap) 1418 byteswap_uint64_array(lr, sizeof (*lr)); 1419 1420 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1421 ASSERT(name[0] != '\0'); 1422 1423 tx = dmu_tx_create(os); 1424 1425 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1426 1427 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1428 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1429 } else { 1430 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1431 } 1432 1433 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1434 if (txg == 0) 1435 return (ENOSPC); 1436 1437 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1438 1439 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1440 if (lr->lr_foid == 0) { 1441 lr->lr_foid = zap_create(os, 1442 lr->lrz_type, lr->lrz_bonustype, 1443 lr->lrz_bonuslen, tx); 1444 } else { 1445 error = zap_create_claim(os, lr->lr_foid, 1446 lr->lrz_type, lr->lrz_bonustype, 1447 lr->lrz_bonuslen, tx); 1448 } 1449 } else { 1450 if (lr->lr_foid == 0) { 1451 lr->lr_foid = dmu_object_alloc(os, 1452 lr->lrz_type, 0, lr->lrz_bonustype, 1453 lr->lrz_bonuslen, tx); 1454 } else { 1455 error = dmu_object_claim(os, lr->lr_foid, 1456 lr->lrz_type, 0, lr->lrz_bonustype, 1457 lr->lrz_bonuslen, tx); 1458 } 1459 } 1460 1461 if (error) { 1462 ASSERT3U(error, ==, EEXIST); 1463 ASSERT(zd->zd_zilog->zl_replay); 1464 dmu_tx_commit(tx); 1465 return (error); 1466 } 1467 1468 ASSERT(lr->lr_foid != 0); 1469 1470 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1471 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1472 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1473 1474 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1475 bbt = ztest_bt_bonus(db); 1476 dmu_buf_will_dirty(db, tx); 1477 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1478 dmu_buf_rele(db, FTAG); 1479 1480 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1481 &lr->lr_foid, tx)); 1482 1483 (void) ztest_log_create(zd, tx, lr); 1484 1485 dmu_tx_commit(tx); 1486 1487 return (0); 1488} 1489 1490static int 1491ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) 1492{ 1493 char *name = (void *)(lr + 1); /* name follows lr */ 1494 objset_t *os = zd->zd_os; 1495 dmu_object_info_t doi; 1496 dmu_tx_t *tx; 1497 uint64_t object, txg; 1498 1499 if (byteswap) 1500 byteswap_uint64_array(lr, sizeof (*lr)); 1501 1502 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1503 ASSERT(name[0] != '\0'); 1504 1505 VERIFY3U(0, ==, 1506 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1507 ASSERT(object != 0); 1508 1509 ztest_object_lock(zd, object, RL_WRITER); 1510 1511 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1512 1513 tx = dmu_tx_create(os); 1514 1515 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1516 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1517 1518 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1519 if (txg == 0) { 1520 ztest_object_unlock(zd, object); 1521 return (ENOSPC); 1522 } 1523 1524 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1525 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1526 } else { 1527 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1528 } 1529 1530 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1531 1532 (void) ztest_log_remove(zd, tx, lr, object); 1533 1534 dmu_tx_commit(tx); 1535 1536 ztest_object_unlock(zd, object); 1537 1538 return (0); 1539} 1540 1541static int 1542ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) 1543{ 1544 objset_t *os = zd->zd_os; 1545 void *data = lr + 1; /* data follows lr */ 1546 uint64_t offset, length; 1547 ztest_block_tag_t *bt = data; 1548 ztest_block_tag_t *bbt; 1549 uint64_t gen, txg, lrtxg, crtxg; 1550 dmu_object_info_t doi; 1551 dmu_tx_t *tx; 1552 dmu_buf_t *db; 1553 arc_buf_t *abuf = NULL; 1554 rl_t *rl; 1555 1556 if (byteswap) 1557 byteswap_uint64_array(lr, sizeof (*lr)); 1558 1559 offset = lr->lr_offset; 1560 length = lr->lr_length; 1561 1562 /* If it's a dmu_sync() block, write the whole block */ 1563 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1564 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1565 if (length < blocksize) { 1566 offset -= offset % blocksize; 1567 length = blocksize; 1568 } 1569 } 1570 1571 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1572 byteswap_uint64_array(bt, sizeof (*bt)); 1573 1574 if (bt->bt_magic != BT_MAGIC) 1575 bt = NULL; 1576 1577 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1578 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1579 1580 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1581 1582 dmu_object_info_from_db(db, &doi); 1583 1584 bbt = ztest_bt_bonus(db); 1585 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1586 gen = bbt->bt_gen; 1587 crtxg = bbt->bt_crtxg; 1588 lrtxg = lr->lr_common.lrc_txg; 1589 1590 tx = dmu_tx_create(os); 1591 1592 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1593 1594 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1595 P2PHASE(offset, length) == 0) 1596 abuf = dmu_request_arcbuf(db, length); 1597 1598 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1599 if (txg == 0) { 1600 if (abuf != NULL) 1601 dmu_return_arcbuf(abuf); 1602 dmu_buf_rele(db, FTAG); 1603 ztest_range_unlock(rl); 1604 ztest_object_unlock(zd, lr->lr_foid); 1605 return (ENOSPC); 1606 } 1607 1608 if (bt != NULL) { 1609 /* 1610 * Usually, verify the old data before writing new data -- 1611 * but not always, because we also want to verify correct 1612 * behavior when the data was not recently read into cache. 1613 */ 1614 ASSERT(offset % doi.doi_data_block_size == 0); 1615 if (ztest_random(4) != 0) { 1616 int prefetch = ztest_random(2) ? 1617 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1618 ztest_block_tag_t rbt; 1619 1620 VERIFY(dmu_read(os, lr->lr_foid, offset, 1621 sizeof (rbt), &rbt, prefetch) == 0); 1622 if (rbt.bt_magic == BT_MAGIC) { 1623 ztest_bt_verify(&rbt, os, lr->lr_foid, 1624 offset, gen, txg, crtxg); 1625 } 1626 } 1627 1628 /* 1629 * Writes can appear to be newer than the bonus buffer because 1630 * the ztest_get_data() callback does a dmu_read() of the 1631 * open-context data, which may be different than the data 1632 * as it was when the write was generated. 1633 */ 1634 if (zd->zd_zilog->zl_replay) { 1635 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1636 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1637 bt->bt_crtxg); 1638 } 1639 1640 /* 1641 * Set the bt's gen/txg to the bonus buffer's gen/txg 1642 * so that all of the usual ASSERTs will work. 1643 */ 1644 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1645 } 1646 1647 if (abuf == NULL) { 1648 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1649 } else { 1650 bcopy(data, abuf->b_data, length); 1651 dmu_assign_arcbuf(db, offset, abuf, tx); 1652 } 1653 1654 (void) ztest_log_write(zd, tx, lr); 1655 1656 dmu_buf_rele(db, FTAG); 1657 1658 dmu_tx_commit(tx); 1659 1660 ztest_range_unlock(rl); 1661 ztest_object_unlock(zd, lr->lr_foid); 1662 1663 return (0); 1664} 1665 1666static int 1667ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) 1668{ 1669 objset_t *os = zd->zd_os; 1670 dmu_tx_t *tx; 1671 uint64_t txg; 1672 rl_t *rl; 1673 1674 if (byteswap) 1675 byteswap_uint64_array(lr, sizeof (*lr)); 1676 1677 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1678 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1679 RL_WRITER); 1680 1681 tx = dmu_tx_create(os); 1682 1683 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1684 1685 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1686 if (txg == 0) { 1687 ztest_range_unlock(rl); 1688 ztest_object_unlock(zd, lr->lr_foid); 1689 return (ENOSPC); 1690 } 1691 1692 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1693 lr->lr_length, tx) == 0); 1694 1695 (void) ztest_log_truncate(zd, tx, lr); 1696 1697 dmu_tx_commit(tx); 1698 1699 ztest_range_unlock(rl); 1700 ztest_object_unlock(zd, lr->lr_foid); 1701 1702 return (0); 1703} 1704 1705static int 1706ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) 1707{ 1708 objset_t *os = zd->zd_os; 1709 dmu_tx_t *tx; 1710 dmu_buf_t *db; 1711 ztest_block_tag_t *bbt; 1712 uint64_t txg, lrtxg, crtxg; 1713 1714 if (byteswap) 1715 byteswap_uint64_array(lr, sizeof (*lr)); 1716 1717 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1718 1719 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1720 1721 tx = dmu_tx_create(os); 1722 dmu_tx_hold_bonus(tx, lr->lr_foid); 1723 1724 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1725 if (txg == 0) { 1726 dmu_buf_rele(db, FTAG); 1727 ztest_object_unlock(zd, lr->lr_foid); 1728 return (ENOSPC); 1729 } 1730 1731 bbt = ztest_bt_bonus(db); 1732 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1733 crtxg = bbt->bt_crtxg; 1734 lrtxg = lr->lr_common.lrc_txg; 1735 1736 if (zd->zd_zilog->zl_replay) { 1737 ASSERT(lr->lr_size != 0); 1738 ASSERT(lr->lr_mode != 0); 1739 ASSERT(lrtxg != 0); 1740 } else { 1741 /* 1742 * Randomly change the size and increment the generation. 1743 */ 1744 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1745 sizeof (*bbt); 1746 lr->lr_mode = bbt->bt_gen + 1; 1747 ASSERT(lrtxg == 0); 1748 } 1749 1750 /* 1751 * Verify that the current bonus buffer is not newer than our txg. 1752 */ 1753 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1754 MAX(txg, lrtxg), crtxg); 1755 1756 dmu_buf_will_dirty(db, tx); 1757 1758 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1759 ASSERT3U(lr->lr_size, <=, db->db_size); 1760 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 1761 bbt = ztest_bt_bonus(db); 1762 1763 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1764 1765 dmu_buf_rele(db, FTAG); 1766 1767 (void) ztest_log_setattr(zd, tx, lr); 1768 1769 dmu_tx_commit(tx); 1770 1771 ztest_object_unlock(zd, lr->lr_foid); 1772 1773 return (0); 1774} 1775 1776zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1777 NULL, /* 0 no such transaction type */ 1778 ztest_replay_create, /* TX_CREATE */ 1779 NULL, /* TX_MKDIR */ 1780 NULL, /* TX_MKXATTR */ 1781 NULL, /* TX_SYMLINK */ 1782 ztest_replay_remove, /* TX_REMOVE */ 1783 NULL, /* TX_RMDIR */ 1784 NULL, /* TX_LINK */ 1785 NULL, /* TX_RENAME */ 1786 ztest_replay_write, /* TX_WRITE */ 1787 ztest_replay_truncate, /* TX_TRUNCATE */ 1788 ztest_replay_setattr, /* TX_SETATTR */ 1789 NULL, /* TX_ACL */ 1790 NULL, /* TX_CREATE_ACL */ 1791 NULL, /* TX_CREATE_ATTR */ 1792 NULL, /* TX_CREATE_ACL_ATTR */ 1793 NULL, /* TX_MKDIR_ACL */ 1794 NULL, /* TX_MKDIR_ATTR */ 1795 NULL, /* TX_MKDIR_ACL_ATTR */ 1796 NULL, /* TX_WRITE2 */ 1797}; 1798 1799/* 1800 * ZIL get_data callbacks 1801 */ 1802 1803static void 1804ztest_get_done(zgd_t *zgd, int error) 1805{ 1806 ztest_ds_t *zd = zgd->zgd_private; 1807 uint64_t object = zgd->zgd_rl->rl_object; 1808 1809 if (zgd->zgd_db) 1810 dmu_buf_rele(zgd->zgd_db, zgd); 1811 1812 ztest_range_unlock(zgd->zgd_rl); 1813 ztest_object_unlock(zd, object); 1814 1815 if (error == 0 && zgd->zgd_bp) 1816 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1817 1818 umem_free(zgd, sizeof (*zgd)); 1819} 1820 1821static int 1822ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1823{ 1824 ztest_ds_t *zd = arg; 1825 objset_t *os = zd->zd_os; 1826 uint64_t object = lr->lr_foid; 1827 uint64_t offset = lr->lr_offset; 1828 uint64_t size = lr->lr_length; 1829 blkptr_t *bp = &lr->lr_blkptr; 1830 uint64_t txg = lr->lr_common.lrc_txg; 1831 uint64_t crtxg; 1832 dmu_object_info_t doi; 1833 dmu_buf_t *db; 1834 zgd_t *zgd; 1835 int error; 1836 1837 ztest_object_lock(zd, object, RL_READER); 1838 error = dmu_bonus_hold(os, object, FTAG, &db); 1839 if (error) { 1840 ztest_object_unlock(zd, object); 1841 return (error); 1842 } 1843 1844 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1845 1846 if (crtxg == 0 || crtxg > txg) { 1847 dmu_buf_rele(db, FTAG); 1848 ztest_object_unlock(zd, object); 1849 return (ENOENT); 1850 } 1851 1852 dmu_object_info_from_db(db, &doi); 1853 dmu_buf_rele(db, FTAG); 1854 db = NULL; 1855 1856 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1857 zgd->zgd_zilog = zd->zd_zilog; 1858 zgd->zgd_private = zd; 1859 1860 if (buf != NULL) { /* immediate write */ 1861 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1862 RL_READER); 1863 1864 error = dmu_read(os, object, offset, size, buf, 1865 DMU_READ_NO_PREFETCH); 1866 ASSERT(error == 0); 1867 } else { 1868 size = doi.doi_data_block_size; 1869 if (ISP2(size)) { 1870 offset = P2ALIGN(offset, size); 1871 } else { 1872 ASSERT(offset < size); 1873 offset = 0; 1874 } 1875 1876 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1877 RL_READER); 1878 1879 error = dmu_buf_hold(os, object, offset, zgd, &db, 1880 DMU_READ_NO_PREFETCH); 1881 1882 if (error == 0) { 1883 blkptr_t *obp = dmu_buf_get_blkptr(db); 1884 if (obp) { 1885 ASSERT(BP_IS_HOLE(bp)); 1886 *bp = *obp; 1887 } 1888 1889 zgd->zgd_db = db; 1890 zgd->zgd_bp = bp; 1891 1892 ASSERT(db->db_offset == offset); 1893 ASSERT(db->db_size == size); 1894 1895 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1896 ztest_get_done, zgd); 1897 1898 if (error == 0) 1899 return (0); 1900 } 1901 } 1902 1903 ztest_get_done(zgd, error); 1904 1905 return (error); 1906} 1907 1908static void * 1909ztest_lr_alloc(size_t lrsize, char *name) 1910{ 1911 char *lr; 1912 size_t namesize = name ? strlen(name) + 1 : 0; 1913 1914 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1915 1916 if (name) 1917 bcopy(name, lr + lrsize, namesize); 1918 1919 return (lr); 1920} 1921 1922void 1923ztest_lr_free(void *lr, size_t lrsize, char *name) 1924{ 1925 size_t namesize = name ? strlen(name) + 1 : 0; 1926 1927 umem_free(lr, lrsize + namesize); 1928} 1929 1930/* 1931 * Lookup a bunch of objects. Returns the number of objects not found. 1932 */ 1933static int 1934ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1935{ 1936 int missing = 0; 1937 int error; 1938 1939 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1940 1941 for (int i = 0; i < count; i++, od++) { 1942 od->od_object = 0; 1943 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1944 sizeof (uint64_t), 1, &od->od_object); 1945 if (error) { 1946 ASSERT(error == ENOENT); 1947 ASSERT(od->od_object == 0); 1948 missing++; 1949 } else { 1950 dmu_buf_t *db; 1951 ztest_block_tag_t *bbt; 1952 dmu_object_info_t doi; 1953 1954 ASSERT(od->od_object != 0); 1955 ASSERT(missing == 0); /* there should be no gaps */ 1956 1957 ztest_object_lock(zd, od->od_object, RL_READER); 1958 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1959 od->od_object, FTAG, &db)); 1960 dmu_object_info_from_db(db, &doi); 1961 bbt = ztest_bt_bonus(db); 1962 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1963 od->od_type = doi.doi_type; 1964 od->od_blocksize = doi.doi_data_block_size; 1965 od->od_gen = bbt->bt_gen; 1966 dmu_buf_rele(db, FTAG); 1967 ztest_object_unlock(zd, od->od_object); 1968 } 1969 } 1970 1971 return (missing); 1972} 1973 1974static int 1975ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 1976{ 1977 int missing = 0; 1978 1979 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1980 1981 for (int i = 0; i < count; i++, od++) { 1982 if (missing) { 1983 od->od_object = 0; 1984 missing++; 1985 continue; 1986 } 1987 1988 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1989 1990 lr->lr_doid = od->od_dir; 1991 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 1992 lr->lrz_type = od->od_crtype; 1993 lr->lrz_blocksize = od->od_crblocksize; 1994 lr->lrz_ibshift = ztest_random_ibshift(); 1995 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 1996 lr->lrz_bonuslen = dmu_bonus_max(); 1997 lr->lr_gen = od->od_crgen; 1998 lr->lr_crtime[0] = time(NULL); 1999 2000 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 2001 ASSERT(missing == 0); 2002 od->od_object = 0; 2003 missing++; 2004 } else { 2005 od->od_object = lr->lr_foid; 2006 od->od_type = od->od_crtype; 2007 od->od_blocksize = od->od_crblocksize; 2008 od->od_gen = od->od_crgen; 2009 ASSERT(od->od_object != 0); 2010 } 2011 2012 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2013 } 2014 2015 return (missing); 2016} 2017 2018static int 2019ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2020{ 2021 int missing = 0; 2022 int error; 2023 2024 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 2025 2026 od += count - 1; 2027 2028 for (int i = count - 1; i >= 0; i--, od--) { 2029 if (missing) { 2030 missing++; 2031 continue; 2032 } 2033 2034 /* 2035 * No object was found. 2036 */ 2037 if (od->od_object == 0) 2038 continue; 2039 2040 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2041 2042 lr->lr_doid = od->od_dir; 2043 2044 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2045 ASSERT3U(error, ==, ENOSPC); 2046 missing++; 2047 } else { 2048 od->od_object = 0; 2049 } 2050 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2051 } 2052 2053 return (missing); 2054} 2055 2056static int 2057ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2058 void *data) 2059{ 2060 lr_write_t *lr; 2061 int error; 2062 2063 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2064 2065 lr->lr_foid = object; 2066 lr->lr_offset = offset; 2067 lr->lr_length = size; 2068 lr->lr_blkoff = 0; 2069 BP_ZERO(&lr->lr_blkptr); 2070 2071 bcopy(data, lr + 1, size); 2072 2073 error = ztest_replay_write(zd, lr, B_FALSE); 2074 2075 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2076 2077 return (error); 2078} 2079 2080static int 2081ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2082{ 2083 lr_truncate_t *lr; 2084 int error; 2085 2086 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2087 2088 lr->lr_foid = object; 2089 lr->lr_offset = offset; 2090 lr->lr_length = size; 2091 2092 error = ztest_replay_truncate(zd, lr, B_FALSE); 2093 2094 ztest_lr_free(lr, sizeof (*lr), NULL); 2095 2096 return (error); 2097} 2098 2099static int 2100ztest_setattr(ztest_ds_t *zd, uint64_t object) 2101{ 2102 lr_setattr_t *lr; 2103 int error; 2104 2105 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2106 2107 lr->lr_foid = object; 2108 lr->lr_size = 0; 2109 lr->lr_mode = 0; 2110 2111 error = ztest_replay_setattr(zd, lr, B_FALSE); 2112 2113 ztest_lr_free(lr, sizeof (*lr), NULL); 2114 2115 return (error); 2116} 2117 2118static void 2119ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2120{ 2121 objset_t *os = zd->zd_os; 2122 dmu_tx_t *tx; 2123 uint64_t txg; 2124 rl_t *rl; 2125 2126 txg_wait_synced(dmu_objset_pool(os), 0); 2127 2128 ztest_object_lock(zd, object, RL_READER); 2129 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2130 2131 tx = dmu_tx_create(os); 2132 2133 dmu_tx_hold_write(tx, object, offset, size); 2134 2135 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2136 2137 if (txg != 0) { 2138 dmu_prealloc(os, object, offset, size, tx); 2139 dmu_tx_commit(tx); 2140 txg_wait_synced(dmu_objset_pool(os), txg); 2141 } else { 2142 (void) dmu_free_long_range(os, object, offset, size); 2143 } 2144 2145 ztest_range_unlock(rl); 2146 ztest_object_unlock(zd, object); 2147} 2148 2149static void 2150ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2151{ 2152 int err; 2153 ztest_block_tag_t wbt; 2154 dmu_object_info_t doi; 2155 enum ztest_io_type io_type; 2156 uint64_t blocksize; 2157 void *data; 2158 2159 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2160 blocksize = doi.doi_data_block_size; 2161 data = umem_alloc(blocksize, UMEM_NOFAIL); 2162 2163 /* 2164 * Pick an i/o type at random, biased toward writing block tags. 2165 */ 2166 io_type = ztest_random(ZTEST_IO_TYPES); 2167 if (ztest_random(2) == 0) 2168 io_type = ZTEST_IO_WRITE_TAG; 2169 2170 (void) rw_rdlock(&zd->zd_zilog_lock); 2171 2172 switch (io_type) { 2173 2174 case ZTEST_IO_WRITE_TAG: 2175 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2176 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2177 break; 2178 2179 case ZTEST_IO_WRITE_PATTERN: 2180 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2181 if (ztest_random(2) == 0) { 2182 /* 2183 * Induce fletcher2 collisions to ensure that 2184 * zio_ddt_collision() detects and resolves them 2185 * when using fletcher2-verify for deduplication. 2186 */ 2187 ((uint64_t *)data)[0] ^= 1ULL << 63; 2188 ((uint64_t *)data)[4] ^= 1ULL << 63; 2189 } 2190 (void) ztest_write(zd, object, offset, blocksize, data); 2191 break; 2192 2193 case ZTEST_IO_WRITE_ZEROES: 2194 bzero(data, blocksize); 2195 (void) ztest_write(zd, object, offset, blocksize, data); 2196 break; 2197 2198 case ZTEST_IO_TRUNCATE: 2199 (void) ztest_truncate(zd, object, offset, blocksize); 2200 break; 2201 2202 case ZTEST_IO_SETATTR: 2203 (void) ztest_setattr(zd, object); 2204 break; 2205 2206 case ZTEST_IO_REWRITE: 2207 (void) rw_rdlock(&ztest_name_lock); 2208 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2209 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2210 B_FALSE); 2211 VERIFY(err == 0 || err == ENOSPC); 2212 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2213 ZFS_PROP_COMPRESSION, 2214 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2215 B_FALSE); 2216 VERIFY(err == 0 || err == ENOSPC); 2217 (void) rw_unlock(&ztest_name_lock); 2218 2219 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2220 DMU_READ_NO_PREFETCH)); 2221 2222 (void) ztest_write(zd, object, offset, blocksize, data); 2223 break; 2224 } 2225 2226 (void) rw_unlock(&zd->zd_zilog_lock); 2227 2228 umem_free(data, blocksize); 2229} 2230 2231/* 2232 * Initialize an object description template. 2233 */ 2234static void 2235ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2236 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2237{ 2238 od->od_dir = ZTEST_DIROBJ; 2239 od->od_object = 0; 2240 2241 od->od_crtype = type; 2242 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2243 od->od_crgen = gen; 2244 2245 od->od_type = DMU_OT_NONE; 2246 od->od_blocksize = 0; 2247 od->od_gen = 0; 2248 2249 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2250 tag, (int64_t)id, index); 2251} 2252 2253/* 2254 * Lookup or create the objects for a test using the od template. 2255 * If the objects do not all exist, or if 'remove' is specified, 2256 * remove any existing objects and create new ones. Otherwise, 2257 * use the existing objects. 2258 */ 2259static int 2260ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2261{ 2262 int count = size / sizeof (*od); 2263 int rv = 0; 2264 2265 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2266 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2267 (ztest_remove(zd, od, count) != 0 || 2268 ztest_create(zd, od, count) != 0)) 2269 rv = -1; 2270 zd->zd_od = od; 2271 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2272 2273 return (rv); 2274} 2275 2276/* ARGSUSED */ 2277void 2278ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2279{ 2280 zilog_t *zilog = zd->zd_zilog; 2281 2282 (void) rw_rdlock(&zd->zd_zilog_lock); 2283 2284 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2285 2286 /* 2287 * Remember the committed values in zd, which is in parent/child 2288 * shared memory. If we die, the next iteration of ztest_run() 2289 * will verify that the log really does contain this record. 2290 */ 2291 mutex_enter(&zilog->zl_lock); 2292 ASSERT(zd->zd_shared != NULL); 2293 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2294 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2295 mutex_exit(&zilog->zl_lock); 2296 2297 (void) rw_unlock(&zd->zd_zilog_lock); 2298} 2299 2300/* 2301 * This function is designed to simulate the operations that occur during a 2302 * mount/unmount operation. We hold the dataset across these operations in an 2303 * attempt to expose any implicit assumptions about ZIL management. 2304 */ 2305/* ARGSUSED */ 2306void 2307ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2308{ 2309 objset_t *os = zd->zd_os; 2310 2311 /* 2312 * We grab the zd_dirobj_lock to ensure that no other thread is 2313 * updating the zil (i.e. adding in-memory log records) and the 2314 * zd_zilog_lock to block any I/O. 2315 */ 2316 VERIFY0(mutex_lock(&zd->zd_dirobj_lock)); 2317 (void) rw_wrlock(&zd->zd_zilog_lock); 2318 2319 /* zfsvfs_teardown() */ 2320 zil_close(zd->zd_zilog); 2321 2322 /* zfsvfs_setup() */ 2323 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2324 zil_replay(os, zd, ztest_replay_vector); 2325 2326 (void) rw_unlock(&zd->zd_zilog_lock); 2327 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2328} 2329 2330/* 2331 * Verify that we can't destroy an active pool, create an existing pool, 2332 * or create a pool with a bad vdev spec. 2333 */ 2334/* ARGSUSED */ 2335void 2336ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2337{ 2338 ztest_shared_opts_t *zo = &ztest_opts; 2339 spa_t *spa; 2340 nvlist_t *nvroot; 2341 2342 /* 2343 * Attempt to create using a bad file. 2344 */ 2345 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2346 VERIFY3U(ENOENT, ==, 2347 spa_create("ztest_bad_file", nvroot, NULL, NULL)); 2348 nvlist_free(nvroot); 2349 2350 /* 2351 * Attempt to create using a bad mirror. 2352 */ 2353 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1); 2354 VERIFY3U(ENOENT, ==, 2355 spa_create("ztest_bad_mirror", nvroot, NULL, NULL)); 2356 nvlist_free(nvroot); 2357 2358 /* 2359 * Attempt to create an existing pool. It shouldn't matter 2360 * what's in the nvroot; we should fail with EEXIST. 2361 */ 2362 (void) rw_rdlock(&ztest_name_lock); 2363 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2364 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL)); 2365 nvlist_free(nvroot); 2366 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2367 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2368 spa_close(spa, FTAG); 2369 2370 (void) rw_unlock(&ztest_name_lock); 2371} 2372 2373/* ARGSUSED */ 2374void 2375ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 2376{ 2377 spa_t *spa; 2378 uint64_t initial_version = SPA_VERSION_INITIAL; 2379 uint64_t version, newversion; 2380 nvlist_t *nvroot, *props; 2381 char *name; 2382 2383 VERIFY0(mutex_lock(&ztest_vdev_lock)); 2384 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 2385 2386 /* 2387 * Clean up from previous runs. 2388 */ 2389 (void) spa_destroy(name); 2390 2391 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 2392 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1); 2393 2394 /* 2395 * If we're configuring a RAIDZ device then make sure that the 2396 * the initial version is capable of supporting that feature. 2397 */ 2398 switch (ztest_opts.zo_raidz_parity) { 2399 case 0: 2400 case 1: 2401 initial_version = SPA_VERSION_INITIAL; 2402 break; 2403 case 2: 2404 initial_version = SPA_VERSION_RAIDZ2; 2405 break; 2406 case 3: 2407 initial_version = SPA_VERSION_RAIDZ3; 2408 break; 2409 } 2410 2411 /* 2412 * Create a pool with a spa version that can be upgraded. Pick 2413 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 2414 */ 2415 do { 2416 version = ztest_random_spa_version(initial_version); 2417 } while (version > SPA_VERSION_BEFORE_FEATURES); 2418 2419 props = fnvlist_alloc(); 2420 fnvlist_add_uint64(props, 2421 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 2422 VERIFY0(spa_create(name, nvroot, props, NULL)); 2423 fnvlist_free(nvroot); 2424 fnvlist_free(props); 2425 2426 VERIFY0(spa_open(name, &spa, FTAG)); 2427 VERIFY3U(spa_version(spa), ==, version); 2428 newversion = ztest_random_spa_version(version + 1); 2429 2430 if (ztest_opts.zo_verbose >= 4) { 2431 (void) printf("upgrading spa version from %llu to %llu\n", 2432 (u_longlong_t)version, (u_longlong_t)newversion); 2433 } 2434 2435 spa_upgrade(spa, newversion); 2436 VERIFY3U(spa_version(spa), >, version); 2437 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 2438 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 2439 spa_close(spa, FTAG); 2440 2441 strfree(name); 2442 VERIFY0(mutex_unlock(&ztest_vdev_lock)); 2443} 2444 2445static vdev_t * 2446vdev_lookup_by_path(vdev_t *vd, const char *path) 2447{ 2448 vdev_t *mvd; 2449 2450 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2451 return (vd); 2452 2453 for (int c = 0; c < vd->vdev_children; c++) 2454 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2455 NULL) 2456 return (mvd); 2457 2458 return (NULL); 2459} 2460 2461/* 2462 * Find the first available hole which can be used as a top-level. 2463 */ 2464int 2465find_vdev_hole(spa_t *spa) 2466{ 2467 vdev_t *rvd = spa->spa_root_vdev; 2468 int c; 2469 2470 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2471 2472 for (c = 0; c < rvd->vdev_children; c++) { 2473 vdev_t *cvd = rvd->vdev_child[c]; 2474 2475 if (cvd->vdev_ishole) 2476 break; 2477 } 2478 return (c); 2479} 2480 2481/* 2482 * Verify that vdev_add() works as expected. 2483 */ 2484/* ARGSUSED */ 2485void 2486ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2487{ 2488 ztest_shared_t *zs = ztest_shared; 2489 spa_t *spa = ztest_spa; 2490 uint64_t leaves; 2491 uint64_t guid; 2492 nvlist_t *nvroot; 2493 int error; 2494 2495 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2496 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2497 2498 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2499 2500 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2501 2502 /* 2503 * If we have slogs then remove them 1/4 of the time. 2504 */ 2505 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2506 /* 2507 * Grab the guid from the head of the log class rotor. 2508 */ 2509 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2510 2511 spa_config_exit(spa, SCL_VDEV, FTAG); 2512 2513 /* 2514 * We have to grab the zs_name_lock as writer to 2515 * prevent a race between removing a slog (dmu_objset_find) 2516 * and destroying a dataset. Removing the slog will 2517 * grab a reference on the dataset which may cause 2518 * dmu_objset_destroy() to fail with EBUSY thus 2519 * leaving the dataset in an inconsistent state. 2520 */ 2521 VERIFY(rw_wrlock(&ztest_name_lock) == 0); 2522 error = spa_vdev_remove(spa, guid, B_FALSE); 2523 VERIFY(rw_unlock(&ztest_name_lock) == 0); 2524 2525 if (error && error != EEXIST) 2526 fatal(0, "spa_vdev_remove() = %d", error); 2527 } else { 2528 spa_config_exit(spa, SCL_VDEV, FTAG); 2529 2530 /* 2531 * Make 1/4 of the devices be log devices. 2532 */ 2533 nvroot = make_vdev_root(NULL, NULL, NULL, 2534 ztest_opts.zo_vdev_size, 0, 2535 ztest_random(4) == 0, ztest_opts.zo_raidz, 2536 zs->zs_mirrors, 1); 2537 2538 error = spa_vdev_add(spa, nvroot); 2539 nvlist_free(nvroot); 2540 2541 if (error == ENOSPC) 2542 ztest_record_enospc("spa_vdev_add"); 2543 else if (error != 0) 2544 fatal(0, "spa_vdev_add() = %d", error); 2545 } 2546 2547 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2548} 2549 2550/* 2551 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2552 */ 2553/* ARGSUSED */ 2554void 2555ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2556{ 2557 ztest_shared_t *zs = ztest_shared; 2558 spa_t *spa = ztest_spa; 2559 vdev_t *rvd = spa->spa_root_vdev; 2560 spa_aux_vdev_t *sav; 2561 char *aux; 2562 uint64_t guid = 0; 2563 int error; 2564 2565 if (ztest_random(2) == 0) { 2566 sav = &spa->spa_spares; 2567 aux = ZPOOL_CONFIG_SPARES; 2568 } else { 2569 sav = &spa->spa_l2cache; 2570 aux = ZPOOL_CONFIG_L2CACHE; 2571 } 2572 2573 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2574 2575 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2576 2577 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2578 /* 2579 * Pick a random device to remove. 2580 */ 2581 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2582 } else { 2583 /* 2584 * Find an unused device we can add. 2585 */ 2586 zs->zs_vdev_aux = 0; 2587 for (;;) { 2588 char path[MAXPATHLEN]; 2589 int c; 2590 (void) snprintf(path, sizeof (path), ztest_aux_template, 2591 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2592 zs->zs_vdev_aux); 2593 for (c = 0; c < sav->sav_count; c++) 2594 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2595 path) == 0) 2596 break; 2597 if (c == sav->sav_count && 2598 vdev_lookup_by_path(rvd, path) == NULL) 2599 break; 2600 zs->zs_vdev_aux++; 2601 } 2602 } 2603 2604 spa_config_exit(spa, SCL_VDEV, FTAG); 2605 2606 if (guid == 0) { 2607 /* 2608 * Add a new device. 2609 */ 2610 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 2611 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2612 error = spa_vdev_add(spa, nvroot); 2613 if (error != 0) 2614 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2615 nvlist_free(nvroot); 2616 } else { 2617 /* 2618 * Remove an existing device. Sometimes, dirty its 2619 * vdev state first to make sure we handle removal 2620 * of devices that have pending state changes. 2621 */ 2622 if (ztest_random(2) == 0) 2623 (void) vdev_online(spa, guid, 0, NULL); 2624 2625 error = spa_vdev_remove(spa, guid, B_FALSE); 2626 if (error != 0 && error != EBUSY) 2627 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2628 } 2629 2630 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2631} 2632 2633/* 2634 * split a pool if it has mirror tlvdevs 2635 */ 2636/* ARGSUSED */ 2637void 2638ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2639{ 2640 ztest_shared_t *zs = ztest_shared; 2641 spa_t *spa = ztest_spa; 2642 vdev_t *rvd = spa->spa_root_vdev; 2643 nvlist_t *tree, **child, *config, *split, **schild; 2644 uint_t c, children, schildren = 0, lastlogid = 0; 2645 int error = 0; 2646 2647 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2648 2649 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2650 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2651 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2652 return; 2653 } 2654 2655 /* clean up the old pool, if any */ 2656 (void) spa_destroy("splitp"); 2657 2658 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2659 2660 /* generate a config from the existing config */ 2661 mutex_enter(&spa->spa_props_lock); 2662 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2663 &tree) == 0); 2664 mutex_exit(&spa->spa_props_lock); 2665 2666 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2667 &children) == 0); 2668 2669 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2670 for (c = 0; c < children; c++) { 2671 vdev_t *tvd = rvd->vdev_child[c]; 2672 nvlist_t **mchild; 2673 uint_t mchildren; 2674 2675 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2676 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2677 0) == 0); 2678 VERIFY(nvlist_add_string(schild[schildren], 2679 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2680 VERIFY(nvlist_add_uint64(schild[schildren], 2681 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2682 if (lastlogid == 0) 2683 lastlogid = schildren; 2684 ++schildren; 2685 continue; 2686 } 2687 lastlogid = 0; 2688 VERIFY(nvlist_lookup_nvlist_array(child[c], 2689 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2690 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2691 } 2692 2693 /* OK, create a config that can be used to split */ 2694 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2695 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2696 VDEV_TYPE_ROOT) == 0); 2697 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2698 lastlogid != 0 ? lastlogid : schildren) == 0); 2699 2700 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2701 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2702 2703 for (c = 0; c < schildren; c++) 2704 nvlist_free(schild[c]); 2705 free(schild); 2706 nvlist_free(split); 2707 2708 spa_config_exit(spa, SCL_VDEV, FTAG); 2709 2710 (void) rw_wrlock(&ztest_name_lock); 2711 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2712 (void) rw_unlock(&ztest_name_lock); 2713 2714 nvlist_free(config); 2715 2716 if (error == 0) { 2717 (void) printf("successful split - results:\n"); 2718 mutex_enter(&spa_namespace_lock); 2719 show_pool_stats(spa); 2720 show_pool_stats(spa_lookup("splitp")); 2721 mutex_exit(&spa_namespace_lock); 2722 ++zs->zs_splits; 2723 --zs->zs_mirrors; 2724 } 2725 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2726 2727} 2728 2729/* 2730 * Verify that we can attach and detach devices. 2731 */ 2732/* ARGSUSED */ 2733void 2734ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2735{ 2736 ztest_shared_t *zs = ztest_shared; 2737 spa_t *spa = ztest_spa; 2738 spa_aux_vdev_t *sav = &spa->spa_spares; 2739 vdev_t *rvd = spa->spa_root_vdev; 2740 vdev_t *oldvd, *newvd, *pvd; 2741 nvlist_t *root; 2742 uint64_t leaves; 2743 uint64_t leaf, top; 2744 uint64_t ashift = ztest_get_ashift(); 2745 uint64_t oldguid, pguid; 2746 uint64_t oldsize, newsize; 2747 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2748 int replacing; 2749 int oldvd_has_siblings = B_FALSE; 2750 int newvd_is_spare = B_FALSE; 2751 int oldvd_is_log; 2752 int error, expected_error; 2753 2754 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2755 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2756 2757 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2758 2759 /* 2760 * Decide whether to do an attach or a replace. 2761 */ 2762 replacing = ztest_random(2); 2763 2764 /* 2765 * Pick a random top-level vdev. 2766 */ 2767 top = ztest_random_vdev_top(spa, B_TRUE); 2768 2769 /* 2770 * Pick a random leaf within it. 2771 */ 2772 leaf = ztest_random(leaves); 2773 2774 /* 2775 * Locate this vdev. 2776 */ 2777 oldvd = rvd->vdev_child[top]; 2778 if (zs->zs_mirrors >= 1) { 2779 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2780 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2781 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2782 } 2783 if (ztest_opts.zo_raidz > 1) { 2784 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2785 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2786 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2787 } 2788 2789 /* 2790 * If we're already doing an attach or replace, oldvd may be a 2791 * mirror vdev -- in which case, pick a random child. 2792 */ 2793 while (oldvd->vdev_children != 0) { 2794 oldvd_has_siblings = B_TRUE; 2795 ASSERT(oldvd->vdev_children >= 2); 2796 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2797 } 2798 2799 oldguid = oldvd->vdev_guid; 2800 oldsize = vdev_get_min_asize(oldvd); 2801 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2802 (void) strcpy(oldpath, oldvd->vdev_path); 2803 pvd = oldvd->vdev_parent; 2804 pguid = pvd->vdev_guid; 2805 2806 /* 2807 * If oldvd has siblings, then half of the time, detach it. 2808 */ 2809 if (oldvd_has_siblings && ztest_random(2) == 0) { 2810 spa_config_exit(spa, SCL_VDEV, FTAG); 2811 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2812 if (error != 0 && error != ENODEV && error != EBUSY && 2813 error != ENOTSUP) 2814 fatal(0, "detach (%s) returned %d", oldpath, error); 2815 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2816 return; 2817 } 2818 2819 /* 2820 * For the new vdev, choose with equal probability between the two 2821 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2822 */ 2823 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2824 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2825 newvd_is_spare = B_TRUE; 2826 (void) strcpy(newpath, newvd->vdev_path); 2827 } else { 2828 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2829 ztest_opts.zo_dir, ztest_opts.zo_pool, 2830 top * leaves + leaf); 2831 if (ztest_random(2) == 0) 2832 newpath[strlen(newpath) - 1] = 'b'; 2833 newvd = vdev_lookup_by_path(rvd, newpath); 2834 } 2835 2836 if (newvd) { 2837 newsize = vdev_get_min_asize(newvd); 2838 } else { 2839 /* 2840 * Make newsize a little bigger or smaller than oldsize. 2841 * If it's smaller, the attach should fail. 2842 * If it's larger, and we're doing a replace, 2843 * we should get dynamic LUN growth when we're done. 2844 */ 2845 newsize = 10 * oldsize / (9 + ztest_random(3)); 2846 } 2847 2848 /* 2849 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2850 * unless it's a replace; in that case any non-replacing parent is OK. 2851 * 2852 * If newvd is already part of the pool, it should fail with EBUSY. 2853 * 2854 * If newvd is too small, it should fail with EOVERFLOW. 2855 */ 2856 if (pvd->vdev_ops != &vdev_mirror_ops && 2857 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2858 pvd->vdev_ops == &vdev_replacing_ops || 2859 pvd->vdev_ops == &vdev_spare_ops)) 2860 expected_error = ENOTSUP; 2861 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2862 expected_error = ENOTSUP; 2863 else if (newvd == oldvd) 2864 expected_error = replacing ? 0 : EBUSY; 2865 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 2866 expected_error = EBUSY; 2867 else if (newsize < oldsize) 2868 expected_error = EOVERFLOW; 2869 else if (ashift > oldvd->vdev_top->vdev_ashift) 2870 expected_error = EDOM; 2871 else 2872 expected_error = 0; 2873 2874 spa_config_exit(spa, SCL_VDEV, FTAG); 2875 2876 /* 2877 * Build the nvlist describing newpath. 2878 */ 2879 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 2880 ashift, 0, 0, 0, 1); 2881 2882 error = spa_vdev_attach(spa, oldguid, root, replacing); 2883 2884 nvlist_free(root); 2885 2886 /* 2887 * If our parent was the replacing vdev, but the replace completed, 2888 * then instead of failing with ENOTSUP we may either succeed, 2889 * fail with ENODEV, or fail with EOVERFLOW. 2890 */ 2891 if (expected_error == ENOTSUP && 2892 (error == 0 || error == ENODEV || error == EOVERFLOW)) 2893 expected_error = error; 2894 2895 /* 2896 * If someone grew the LUN, the replacement may be too small. 2897 */ 2898 if (error == EOVERFLOW || error == EBUSY) 2899 expected_error = error; 2900 2901 /* XXX workaround 6690467 */ 2902 if (error != expected_error && expected_error != EBUSY) { 2903 fatal(0, "attach (%s %llu, %s %llu, %d) " 2904 "returned %d, expected %d", 2905 oldpath, oldsize, newpath, 2906 newsize, replacing, error, expected_error); 2907 } 2908 2909 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2910} 2911 2912/* 2913 * Callback function which expands the physical size of the vdev. 2914 */ 2915vdev_t * 2916grow_vdev(vdev_t *vd, void *arg) 2917{ 2918 spa_t *spa = vd->vdev_spa; 2919 size_t *newsize = arg; 2920 size_t fsize; 2921 int fd; 2922 2923 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2924 ASSERT(vd->vdev_ops->vdev_op_leaf); 2925 2926 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 2927 return (vd); 2928 2929 fsize = lseek(fd, 0, SEEK_END); 2930 (void) ftruncate(fd, *newsize); 2931 2932 if (ztest_opts.zo_verbose >= 6) { 2933 (void) printf("%s grew from %lu to %lu bytes\n", 2934 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 2935 } 2936 (void) close(fd); 2937 return (NULL); 2938} 2939 2940/* 2941 * Callback function which expands a given vdev by calling vdev_online(). 2942 */ 2943/* ARGSUSED */ 2944vdev_t * 2945online_vdev(vdev_t *vd, void *arg) 2946{ 2947 spa_t *spa = vd->vdev_spa; 2948 vdev_t *tvd = vd->vdev_top; 2949 uint64_t guid = vd->vdev_guid; 2950 uint64_t generation = spa->spa_config_generation + 1; 2951 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 2952 int error; 2953 2954 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2955 ASSERT(vd->vdev_ops->vdev_op_leaf); 2956 2957 /* Calling vdev_online will initialize the new metaslabs */ 2958 spa_config_exit(spa, SCL_STATE, spa); 2959 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 2960 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2961 2962 /* 2963 * If vdev_online returned an error or the underlying vdev_open 2964 * failed then we abort the expand. The only way to know that 2965 * vdev_open fails is by checking the returned newstate. 2966 */ 2967 if (error || newstate != VDEV_STATE_HEALTHY) { 2968 if (ztest_opts.zo_verbose >= 5) { 2969 (void) printf("Unable to expand vdev, state %llu, " 2970 "error %d\n", (u_longlong_t)newstate, error); 2971 } 2972 return (vd); 2973 } 2974 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 2975 2976 /* 2977 * Since we dropped the lock we need to ensure that we're 2978 * still talking to the original vdev. It's possible this 2979 * vdev may have been detached/replaced while we were 2980 * trying to online it. 2981 */ 2982 if (generation != spa->spa_config_generation) { 2983 if (ztest_opts.zo_verbose >= 5) { 2984 (void) printf("vdev configuration has changed, " 2985 "guid %llu, state %llu, expected gen %llu, " 2986 "got gen %llu\n", 2987 (u_longlong_t)guid, 2988 (u_longlong_t)tvd->vdev_state, 2989 (u_longlong_t)generation, 2990 (u_longlong_t)spa->spa_config_generation); 2991 } 2992 return (vd); 2993 } 2994 return (NULL); 2995} 2996 2997/* 2998 * Traverse the vdev tree calling the supplied function. 2999 * We continue to walk the tree until we either have walked all 3000 * children or we receive a non-NULL return from the callback. 3001 * If a NULL callback is passed, then we just return back the first 3002 * leaf vdev we encounter. 3003 */ 3004vdev_t * 3005vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 3006{ 3007 if (vd->vdev_ops->vdev_op_leaf) { 3008 if (func == NULL) 3009 return (vd); 3010 else 3011 return (func(vd, arg)); 3012 } 3013 3014 for (uint_t c = 0; c < vd->vdev_children; c++) { 3015 vdev_t *cvd = vd->vdev_child[c]; 3016 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 3017 return (cvd); 3018 } 3019 return (NULL); 3020} 3021 3022/* 3023 * Verify that dynamic LUN growth works as expected. 3024 */ 3025/* ARGSUSED */ 3026void 3027ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 3028{ 3029 spa_t *spa = ztest_spa; 3030 vdev_t *vd, *tvd; 3031 metaslab_class_t *mc; 3032 metaslab_group_t *mg; 3033 size_t psize, newsize; 3034 uint64_t top; 3035 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 3036 3037 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 3038 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3039 3040 top = ztest_random_vdev_top(spa, B_TRUE); 3041 3042 tvd = spa->spa_root_vdev->vdev_child[top]; 3043 mg = tvd->vdev_mg; 3044 mc = mg->mg_class; 3045 old_ms_count = tvd->vdev_ms_count; 3046 old_class_space = metaslab_class_get_space(mc); 3047 3048 /* 3049 * Determine the size of the first leaf vdev associated with 3050 * our top-level device. 3051 */ 3052 vd = vdev_walk_tree(tvd, NULL, NULL); 3053 ASSERT3P(vd, !=, NULL); 3054 ASSERT(vd->vdev_ops->vdev_op_leaf); 3055 3056 psize = vd->vdev_psize; 3057 3058 /* 3059 * We only try to expand the vdev if it's healthy, less than 4x its 3060 * original size, and it has a valid psize. 3061 */ 3062 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 3063 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 3064 spa_config_exit(spa, SCL_STATE, spa); 3065 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3066 return; 3067 } 3068 ASSERT(psize > 0); 3069 newsize = psize + psize / 8; 3070 ASSERT3U(newsize, >, psize); 3071 3072 if (ztest_opts.zo_verbose >= 6) { 3073 (void) printf("Expanding LUN %s from %lu to %lu\n", 3074 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 3075 } 3076 3077 /* 3078 * Growing the vdev is a two step process: 3079 * 1). expand the physical size (i.e. relabel) 3080 * 2). online the vdev to create the new metaslabs 3081 */ 3082 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 3083 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 3084 tvd->vdev_state != VDEV_STATE_HEALTHY) { 3085 if (ztest_opts.zo_verbose >= 5) { 3086 (void) printf("Could not expand LUN because " 3087 "the vdev configuration changed.\n"); 3088 } 3089 spa_config_exit(spa, SCL_STATE, spa); 3090 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3091 return; 3092 } 3093 3094 spa_config_exit(spa, SCL_STATE, spa); 3095 3096 /* 3097 * Expanding the LUN will update the config asynchronously, 3098 * thus we must wait for the async thread to complete any 3099 * pending tasks before proceeding. 3100 */ 3101 for (;;) { 3102 boolean_t done; 3103 mutex_enter(&spa->spa_async_lock); 3104 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 3105 mutex_exit(&spa->spa_async_lock); 3106 if (done) 3107 break; 3108 txg_wait_synced(spa_get_dsl(spa), 0); 3109 (void) poll(NULL, 0, 100); 3110 } 3111 3112 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3113 3114 tvd = spa->spa_root_vdev->vdev_child[top]; 3115 new_ms_count = tvd->vdev_ms_count; 3116 new_class_space = metaslab_class_get_space(mc); 3117 3118 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 3119 if (ztest_opts.zo_verbose >= 5) { 3120 (void) printf("Could not verify LUN expansion due to " 3121 "intervening vdev offline or remove.\n"); 3122 } 3123 spa_config_exit(spa, SCL_STATE, spa); 3124 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3125 return; 3126 } 3127 3128 /* 3129 * Make sure we were able to grow the vdev. 3130 */ 3131 if (new_ms_count <= old_ms_count) 3132 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n", 3133 old_ms_count, new_ms_count); 3134 3135 /* 3136 * Make sure we were able to grow the pool. 3137 */ 3138 if (new_class_space <= old_class_space) 3139 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n", 3140 old_class_space, new_class_space); 3141 3142 if (ztest_opts.zo_verbose >= 5) { 3143 char oldnumbuf[6], newnumbuf[6]; 3144 3145 nicenum(old_class_space, oldnumbuf); 3146 nicenum(new_class_space, newnumbuf); 3147 (void) printf("%s grew from %s to %s\n", 3148 spa->spa_name, oldnumbuf, newnumbuf); 3149 } 3150 3151 spa_config_exit(spa, SCL_STATE, spa); 3152 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3153} 3154 3155/* 3156 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 3157 */ 3158/* ARGSUSED */ 3159static void 3160ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3161{ 3162 /* 3163 * Create the objects common to all ztest datasets. 3164 */ 3165 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3166 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3167} 3168 3169static int 3170ztest_dataset_create(char *dsname) 3171{ 3172 uint64_t zilset = ztest_random(100); 3173 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3174 ztest_objset_create_cb, NULL); 3175 3176 if (err || zilset < 80) 3177 return (err); 3178 3179 if (ztest_opts.zo_verbose >= 6) 3180 (void) printf("Setting dataset %s to sync always\n", dsname); 3181 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3182 ZFS_SYNC_ALWAYS, B_FALSE)); 3183} 3184 3185/* ARGSUSED */ 3186static int 3187ztest_objset_destroy_cb(const char *name, void *arg) 3188{ 3189 objset_t *os; 3190 dmu_object_info_t doi; 3191 int error; 3192 3193 /* 3194 * Verify that the dataset contains a directory object. 3195 */ 3196 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os)); 3197 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3198 if (error != ENOENT) { 3199 /* We could have crashed in the middle of destroying it */ 3200 ASSERT0(error); 3201 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3202 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3203 } 3204 dmu_objset_disown(os, FTAG); 3205 3206 /* 3207 * Destroy the dataset. 3208 */ 3209 if (strchr(name, '@') != NULL) { 3210 VERIFY0(dsl_destroy_snapshot(name, B_FALSE)); 3211 } else { 3212 VERIFY0(dsl_destroy_head(name)); 3213 } 3214 return (0); 3215} 3216 3217static boolean_t 3218ztest_snapshot_create(char *osname, uint64_t id) 3219{ 3220 char snapname[MAXNAMELEN]; 3221 int error; 3222 3223 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id); 3224 3225 error = dmu_objset_snapshot_one(osname, snapname); 3226 if (error == ENOSPC) { 3227 ztest_record_enospc(FTAG); 3228 return (B_FALSE); 3229 } 3230 if (error != 0 && error != EEXIST) { 3231 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname, 3232 snapname, error); 3233 } 3234 return (B_TRUE); 3235} 3236 3237static boolean_t 3238ztest_snapshot_destroy(char *osname, uint64_t id) 3239{ 3240 char snapname[MAXNAMELEN]; 3241 int error; 3242 3243 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 3244 (u_longlong_t)id); 3245 3246 error = dsl_destroy_snapshot(snapname, B_FALSE); 3247 if (error != 0 && error != ENOENT) 3248 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3249 return (B_TRUE); 3250} 3251 3252/* ARGSUSED */ 3253void 3254ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3255{ 3256 ztest_ds_t zdtmp; 3257 int iters; 3258 int error; 3259 objset_t *os, *os2; 3260 char name[MAXNAMELEN]; 3261 zilog_t *zilog; 3262 3263 (void) rw_rdlock(&ztest_name_lock); 3264 3265 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", 3266 ztest_opts.zo_pool, (u_longlong_t)id); 3267 3268 /* 3269 * If this dataset exists from a previous run, process its replay log 3270 * half of the time. If we don't replay it, then dmu_objset_destroy() 3271 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3272 */ 3273 if (ztest_random(2) == 0 && 3274 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3275 ztest_zd_init(&zdtmp, NULL, os); 3276 zil_replay(os, &zdtmp, ztest_replay_vector); 3277 ztest_zd_fini(&zdtmp); 3278 dmu_objset_disown(os, FTAG); 3279 } 3280 3281 /* 3282 * There may be an old instance of the dataset we're about to 3283 * create lying around from a previous run. If so, destroy it 3284 * and all of its snapshots. 3285 */ 3286 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3287 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3288 3289 /* 3290 * Verify that the destroyed dataset is no longer in the namespace. 3291 */ 3292 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, 3293 FTAG, &os)); 3294 3295 /* 3296 * Verify that we can create a new dataset. 3297 */ 3298 error = ztest_dataset_create(name); 3299 if (error) { 3300 if (error == ENOSPC) { 3301 ztest_record_enospc(FTAG); 3302 (void) rw_unlock(&ztest_name_lock); 3303 return; 3304 } 3305 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3306 } 3307 3308 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3309 3310 ztest_zd_init(&zdtmp, NULL, os); 3311 3312 /* 3313 * Open the intent log for it. 3314 */ 3315 zilog = zil_open(os, ztest_get_data); 3316 3317 /* 3318 * Put some objects in there, do a little I/O to them, 3319 * and randomly take a couple of snapshots along the way. 3320 */ 3321 iters = ztest_random(5); 3322 for (int i = 0; i < iters; i++) { 3323 ztest_dmu_object_alloc_free(&zdtmp, id); 3324 if (ztest_random(iters) == 0) 3325 (void) ztest_snapshot_create(name, i); 3326 } 3327 3328 /* 3329 * Verify that we cannot create an existing dataset. 3330 */ 3331 VERIFY3U(EEXIST, ==, 3332 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3333 3334 /* 3335 * Verify that we can hold an objset that is also owned. 3336 */ 3337 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3338 dmu_objset_rele(os2, FTAG); 3339 3340 /* 3341 * Verify that we cannot own an objset that is already owned. 3342 */ 3343 VERIFY3U(EBUSY, ==, 3344 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3345 3346 zil_close(zilog); 3347 dmu_objset_disown(os, FTAG); 3348 ztest_zd_fini(&zdtmp); 3349 3350 (void) rw_unlock(&ztest_name_lock); 3351} 3352 3353/* 3354 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3355 */ 3356void 3357ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3358{ 3359 (void) rw_rdlock(&ztest_name_lock); 3360 (void) ztest_snapshot_destroy(zd->zd_name, id); 3361 (void) ztest_snapshot_create(zd->zd_name, id); 3362 (void) rw_unlock(&ztest_name_lock); 3363} 3364 3365/* 3366 * Cleanup non-standard snapshots and clones. 3367 */ 3368void 3369ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3370{ 3371 char snap1name[MAXNAMELEN]; 3372 char clone1name[MAXNAMELEN]; 3373 char snap2name[MAXNAMELEN]; 3374 char clone2name[MAXNAMELEN]; 3375 char snap3name[MAXNAMELEN]; 3376 int error; 3377 3378 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3379 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3380 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3381 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3382 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3383 3384 error = dsl_destroy_head(clone2name); 3385 if (error && error != ENOENT) 3386 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error); 3387 error = dsl_destroy_snapshot(snap3name, B_FALSE); 3388 if (error && error != ENOENT) 3389 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error); 3390 error = dsl_destroy_snapshot(snap2name, B_FALSE); 3391 if (error && error != ENOENT) 3392 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error); 3393 error = dsl_destroy_head(clone1name); 3394 if (error && error != ENOENT) 3395 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error); 3396 error = dsl_destroy_snapshot(snap1name, B_FALSE); 3397 if (error && error != ENOENT) 3398 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error); 3399} 3400 3401/* 3402 * Verify dsl_dataset_promote handles EBUSY 3403 */ 3404void 3405ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3406{ 3407 objset_t *os; 3408 char snap1name[MAXNAMELEN]; 3409 char clone1name[MAXNAMELEN]; 3410 char snap2name[MAXNAMELEN]; 3411 char clone2name[MAXNAMELEN]; 3412 char snap3name[MAXNAMELEN]; 3413 char *osname = zd->zd_name; 3414 int error; 3415 3416 (void) rw_rdlock(&ztest_name_lock); 3417 3418 ztest_dsl_dataset_cleanup(osname, id); 3419 3420 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3421 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3422 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3423 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3424 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3425 3426 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 3427 if (error && error != EEXIST) { 3428 if (error == ENOSPC) { 3429 ztest_record_enospc(FTAG); 3430 goto out; 3431 } 3432 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3433 } 3434 3435 error = dmu_objset_clone(clone1name, snap1name); 3436 if (error) { 3437 if (error == ENOSPC) { 3438 ztest_record_enospc(FTAG); 3439 goto out; 3440 } 3441 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3442 } 3443 3444 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 3445 if (error && error != EEXIST) { 3446 if (error == ENOSPC) { 3447 ztest_record_enospc(FTAG); 3448 goto out; 3449 } 3450 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3451 } 3452 3453 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 3454 if (error && error != EEXIST) { 3455 if (error == ENOSPC) { 3456 ztest_record_enospc(FTAG); 3457 goto out; 3458 } 3459 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3460 } 3461 3462 error = dmu_objset_clone(clone2name, snap3name); 3463 if (error) { 3464 if (error == ENOSPC) { 3465 ztest_record_enospc(FTAG); 3466 goto out; 3467 } 3468 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3469 } 3470 3471 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os); 3472 if (error) 3473 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error); 3474 error = dsl_dataset_promote(clone2name, NULL); 3475 if (error == ENOSPC) { 3476 dmu_objset_disown(os, FTAG); 3477 ztest_record_enospc(FTAG); 3478 goto out; 3479 } 3480 if (error != EBUSY) 3481 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3482 error); 3483 dmu_objset_disown(os, FTAG); 3484 3485out: 3486 ztest_dsl_dataset_cleanup(osname, id); 3487 3488 (void) rw_unlock(&ztest_name_lock); 3489} 3490 3491/* 3492 * Verify that dmu_object_{alloc,free} work as expected. 3493 */ 3494void 3495ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3496{ 3497 ztest_od_t od[4]; 3498 int batchsize = sizeof (od) / sizeof (od[0]); 3499 3500 for (int b = 0; b < batchsize; b++) 3501 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3502 3503 /* 3504 * Destroy the previous batch of objects, create a new batch, 3505 * and do some I/O on the new objects. 3506 */ 3507 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3508 return; 3509 3510 while (ztest_random(4 * batchsize) != 0) 3511 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3512 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3513} 3514 3515/* 3516 * Verify that dmu_{read,write} work as expected. 3517 */ 3518void 3519ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3520{ 3521 objset_t *os = zd->zd_os; 3522 ztest_od_t od[2]; 3523 dmu_tx_t *tx; 3524 int i, freeit, error; 3525 uint64_t n, s, txg; 3526 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3527 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3528 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3529 uint64_t regions = 997; 3530 uint64_t stride = 123456789ULL; 3531 uint64_t width = 40; 3532 int free_percent = 5; 3533 3534 /* 3535 * This test uses two objects, packobj and bigobj, that are always 3536 * updated together (i.e. in the same tx) so that their contents are 3537 * in sync and can be compared. Their contents relate to each other 3538 * in a simple way: packobj is a dense array of 'bufwad' structures, 3539 * while bigobj is a sparse array of the same bufwads. Specifically, 3540 * for any index n, there are three bufwads that should be identical: 3541 * 3542 * packobj, at offset n * sizeof (bufwad_t) 3543 * bigobj, at the head of the nth chunk 3544 * bigobj, at the tail of the nth chunk 3545 * 3546 * The chunk size is arbitrary. It doesn't have to be a power of two, 3547 * and it doesn't have any relation to the object blocksize. 3548 * The only requirement is that it can hold at least two bufwads. 3549 * 3550 * Normally, we write the bufwad to each of these locations. 3551 * However, free_percent of the time we instead write zeroes to 3552 * packobj and perform a dmu_free_range() on bigobj. By comparing 3553 * bigobj to packobj, we can verify that the DMU is correctly 3554 * tracking which parts of an object are allocated and free, 3555 * and that the contents of the allocated blocks are correct. 3556 */ 3557 3558 /* 3559 * Read the directory info. If it's the first time, set things up. 3560 */ 3561 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3562 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3563 3564 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3565 return; 3566 3567 bigobj = od[0].od_object; 3568 packobj = od[1].od_object; 3569 chunksize = od[0].od_gen; 3570 ASSERT(chunksize == od[1].od_gen); 3571 3572 /* 3573 * Prefetch a random chunk of the big object. 3574 * Our aim here is to get some async reads in flight 3575 * for blocks that we may free below; the DMU should 3576 * handle this race correctly. 3577 */ 3578 n = ztest_random(regions) * stride + ztest_random(width); 3579 s = 1 + ztest_random(2 * width - 1); 3580 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize); 3581 3582 /* 3583 * Pick a random index and compute the offsets into packobj and bigobj. 3584 */ 3585 n = ztest_random(regions) * stride + ztest_random(width); 3586 s = 1 + ztest_random(width - 1); 3587 3588 packoff = n * sizeof (bufwad_t); 3589 packsize = s * sizeof (bufwad_t); 3590 3591 bigoff = n * chunksize; 3592 bigsize = s * chunksize; 3593 3594 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3595 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3596 3597 /* 3598 * free_percent of the time, free a range of bigobj rather than 3599 * overwriting it. 3600 */ 3601 freeit = (ztest_random(100) < free_percent); 3602 3603 /* 3604 * Read the current contents of our objects. 3605 */ 3606 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3607 DMU_READ_PREFETCH); 3608 ASSERT0(error); 3609 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3610 DMU_READ_PREFETCH); 3611 ASSERT0(error); 3612 3613 /* 3614 * Get a tx for the mods to both packobj and bigobj. 3615 */ 3616 tx = dmu_tx_create(os); 3617 3618 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3619 3620 if (freeit) 3621 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3622 else 3623 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3624 3625 /* This accounts for setting the checksum/compression. */ 3626 dmu_tx_hold_bonus(tx, bigobj); 3627 3628 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3629 if (txg == 0) { 3630 umem_free(packbuf, packsize); 3631 umem_free(bigbuf, bigsize); 3632 return; 3633 } 3634 3635 enum zio_checksum cksum; 3636 do { 3637 cksum = (enum zio_checksum) 3638 ztest_random_dsl_prop(ZFS_PROP_CHECKSUM); 3639 } while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS); 3640 dmu_object_set_checksum(os, bigobj, cksum, tx); 3641 3642 enum zio_compress comp; 3643 do { 3644 comp = (enum zio_compress) 3645 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION); 3646 } while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS); 3647 dmu_object_set_compress(os, bigobj, comp, tx); 3648 3649 /* 3650 * For each index from n to n + s, verify that the existing bufwad 3651 * in packobj matches the bufwads at the head and tail of the 3652 * corresponding chunk in bigobj. Then update all three bufwads 3653 * with the new values we want to write out. 3654 */ 3655 for (i = 0; i < s; i++) { 3656 /* LINTED */ 3657 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3658 /* LINTED */ 3659 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3660 /* LINTED */ 3661 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3662 3663 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3664 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3665 3666 if (pack->bw_txg > txg) 3667 fatal(0, "future leak: got %llx, open txg is %llx", 3668 pack->bw_txg, txg); 3669 3670 if (pack->bw_data != 0 && pack->bw_index != n + i) 3671 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3672 pack->bw_index, n, i); 3673 3674 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3675 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3676 3677 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3678 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3679 3680 if (freeit) { 3681 bzero(pack, sizeof (bufwad_t)); 3682 } else { 3683 pack->bw_index = n + i; 3684 pack->bw_txg = txg; 3685 pack->bw_data = 1 + ztest_random(-2ULL); 3686 } 3687 *bigH = *pack; 3688 *bigT = *pack; 3689 } 3690 3691 /* 3692 * We've verified all the old bufwads, and made new ones. 3693 * Now write them out. 3694 */ 3695 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3696 3697 if (freeit) { 3698 if (ztest_opts.zo_verbose >= 7) { 3699 (void) printf("freeing offset %llx size %llx" 3700 " txg %llx\n", 3701 (u_longlong_t)bigoff, 3702 (u_longlong_t)bigsize, 3703 (u_longlong_t)txg); 3704 } 3705 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3706 } else { 3707 if (ztest_opts.zo_verbose >= 7) { 3708 (void) printf("writing offset %llx size %llx" 3709 " txg %llx\n", 3710 (u_longlong_t)bigoff, 3711 (u_longlong_t)bigsize, 3712 (u_longlong_t)txg); 3713 } 3714 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3715 } 3716 3717 dmu_tx_commit(tx); 3718 3719 /* 3720 * Sanity check the stuff we just wrote. 3721 */ 3722 { 3723 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3724 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3725 3726 VERIFY(0 == dmu_read(os, packobj, packoff, 3727 packsize, packcheck, DMU_READ_PREFETCH)); 3728 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3729 bigsize, bigcheck, DMU_READ_PREFETCH)); 3730 3731 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3732 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3733 3734 umem_free(packcheck, packsize); 3735 umem_free(bigcheck, bigsize); 3736 } 3737 3738 umem_free(packbuf, packsize); 3739 umem_free(bigbuf, bigsize); 3740} 3741 3742void 3743compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3744 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3745{ 3746 uint64_t i; 3747 bufwad_t *pack; 3748 bufwad_t *bigH; 3749 bufwad_t *bigT; 3750 3751 /* 3752 * For each index from n to n + s, verify that the existing bufwad 3753 * in packobj matches the bufwads at the head and tail of the 3754 * corresponding chunk in bigobj. Then update all three bufwads 3755 * with the new values we want to write out. 3756 */ 3757 for (i = 0; i < s; i++) { 3758 /* LINTED */ 3759 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3760 /* LINTED */ 3761 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3762 /* LINTED */ 3763 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3764 3765 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3766 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3767 3768 if (pack->bw_txg > txg) 3769 fatal(0, "future leak: got %llx, open txg is %llx", 3770 pack->bw_txg, txg); 3771 3772 if (pack->bw_data != 0 && pack->bw_index != n + i) 3773 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3774 pack->bw_index, n, i); 3775 3776 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3777 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3778 3779 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3780 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3781 3782 pack->bw_index = n + i; 3783 pack->bw_txg = txg; 3784 pack->bw_data = 1 + ztest_random(-2ULL); 3785 3786 *bigH = *pack; 3787 *bigT = *pack; 3788 } 3789} 3790 3791void 3792ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3793{ 3794 objset_t *os = zd->zd_os; 3795 ztest_od_t od[2]; 3796 dmu_tx_t *tx; 3797 uint64_t i; 3798 int error; 3799 uint64_t n, s, txg; 3800 bufwad_t *packbuf, *bigbuf; 3801 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3802 uint64_t blocksize = ztest_random_blocksize(); 3803 uint64_t chunksize = blocksize; 3804 uint64_t regions = 997; 3805 uint64_t stride = 123456789ULL; 3806 uint64_t width = 9; 3807 dmu_buf_t *bonus_db; 3808 arc_buf_t **bigbuf_arcbufs; 3809 dmu_object_info_t doi; 3810 3811 /* 3812 * This test uses two objects, packobj and bigobj, that are always 3813 * updated together (i.e. in the same tx) so that their contents are 3814 * in sync and can be compared. Their contents relate to each other 3815 * in a simple way: packobj is a dense array of 'bufwad' structures, 3816 * while bigobj is a sparse array of the same bufwads. Specifically, 3817 * for any index n, there are three bufwads that should be identical: 3818 * 3819 * packobj, at offset n * sizeof (bufwad_t) 3820 * bigobj, at the head of the nth chunk 3821 * bigobj, at the tail of the nth chunk 3822 * 3823 * The chunk size is set equal to bigobj block size so that 3824 * dmu_assign_arcbuf() can be tested for object updates. 3825 */ 3826 3827 /* 3828 * Read the directory info. If it's the first time, set things up. 3829 */ 3830 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3831 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3832 3833 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3834 return; 3835 3836 bigobj = od[0].od_object; 3837 packobj = od[1].od_object; 3838 blocksize = od[0].od_blocksize; 3839 chunksize = blocksize; 3840 ASSERT(chunksize == od[1].od_gen); 3841 3842 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 3843 VERIFY(ISP2(doi.doi_data_block_size)); 3844 VERIFY(chunksize == doi.doi_data_block_size); 3845 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 3846 3847 /* 3848 * Pick a random index and compute the offsets into packobj and bigobj. 3849 */ 3850 n = ztest_random(regions) * stride + ztest_random(width); 3851 s = 1 + ztest_random(width - 1); 3852 3853 packoff = n * sizeof (bufwad_t); 3854 packsize = s * sizeof (bufwad_t); 3855 3856 bigoff = n * chunksize; 3857 bigsize = s * chunksize; 3858 3859 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 3860 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 3861 3862 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 3863 3864 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 3865 3866 /* 3867 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 3868 * Iteration 1 test zcopy to already referenced dbufs. 3869 * Iteration 2 test zcopy to dirty dbuf in the same txg. 3870 * Iteration 3 test zcopy to dbuf dirty in previous txg. 3871 * Iteration 4 test zcopy when dbuf is no longer dirty. 3872 * Iteration 5 test zcopy when it can't be done. 3873 * Iteration 6 one more zcopy write. 3874 */ 3875 for (i = 0; i < 7; i++) { 3876 uint64_t j; 3877 uint64_t off; 3878 3879 /* 3880 * In iteration 5 (i == 5) use arcbufs 3881 * that don't match bigobj blksz to test 3882 * dmu_assign_arcbuf() when it can't directly 3883 * assign an arcbuf to a dbuf. 3884 */ 3885 for (j = 0; j < s; j++) { 3886 if (i != 5) { 3887 bigbuf_arcbufs[j] = 3888 dmu_request_arcbuf(bonus_db, chunksize); 3889 } else { 3890 bigbuf_arcbufs[2 * j] = 3891 dmu_request_arcbuf(bonus_db, chunksize / 2); 3892 bigbuf_arcbufs[2 * j + 1] = 3893 dmu_request_arcbuf(bonus_db, chunksize / 2); 3894 } 3895 } 3896 3897 /* 3898 * Get a tx for the mods to both packobj and bigobj. 3899 */ 3900 tx = dmu_tx_create(os); 3901 3902 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3903 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3904 3905 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3906 if (txg == 0) { 3907 umem_free(packbuf, packsize); 3908 umem_free(bigbuf, bigsize); 3909 for (j = 0; j < s; j++) { 3910 if (i != 5) { 3911 dmu_return_arcbuf(bigbuf_arcbufs[j]); 3912 } else { 3913 dmu_return_arcbuf( 3914 bigbuf_arcbufs[2 * j]); 3915 dmu_return_arcbuf( 3916 bigbuf_arcbufs[2 * j + 1]); 3917 } 3918 } 3919 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3920 dmu_buf_rele(bonus_db, FTAG); 3921 return; 3922 } 3923 3924 /* 3925 * 50% of the time don't read objects in the 1st iteration to 3926 * test dmu_assign_arcbuf() for the case when there're no 3927 * existing dbufs for the specified offsets. 3928 */ 3929 if (i != 0 || ztest_random(2) != 0) { 3930 error = dmu_read(os, packobj, packoff, 3931 packsize, packbuf, DMU_READ_PREFETCH); 3932 ASSERT0(error); 3933 error = dmu_read(os, bigobj, bigoff, bigsize, 3934 bigbuf, DMU_READ_PREFETCH); 3935 ASSERT0(error); 3936 } 3937 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 3938 n, chunksize, txg); 3939 3940 /* 3941 * We've verified all the old bufwads, and made new ones. 3942 * Now write them out. 3943 */ 3944 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3945 if (ztest_opts.zo_verbose >= 7) { 3946 (void) printf("writing offset %llx size %llx" 3947 " txg %llx\n", 3948 (u_longlong_t)bigoff, 3949 (u_longlong_t)bigsize, 3950 (u_longlong_t)txg); 3951 } 3952 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 3953 dmu_buf_t *dbt; 3954 if (i != 5) { 3955 bcopy((caddr_t)bigbuf + (off - bigoff), 3956 bigbuf_arcbufs[j]->b_data, chunksize); 3957 } else { 3958 bcopy((caddr_t)bigbuf + (off - bigoff), 3959 bigbuf_arcbufs[2 * j]->b_data, 3960 chunksize / 2); 3961 bcopy((caddr_t)bigbuf + (off - bigoff) + 3962 chunksize / 2, 3963 bigbuf_arcbufs[2 * j + 1]->b_data, 3964 chunksize / 2); 3965 } 3966 3967 if (i == 1) { 3968 VERIFY(dmu_buf_hold(os, bigobj, off, 3969 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 3970 } 3971 if (i != 5) { 3972 dmu_assign_arcbuf(bonus_db, off, 3973 bigbuf_arcbufs[j], tx); 3974 } else { 3975 dmu_assign_arcbuf(bonus_db, off, 3976 bigbuf_arcbufs[2 * j], tx); 3977 dmu_assign_arcbuf(bonus_db, 3978 off + chunksize / 2, 3979 bigbuf_arcbufs[2 * j + 1], tx); 3980 } 3981 if (i == 1) { 3982 dmu_buf_rele(dbt, FTAG); 3983 } 3984 } 3985 dmu_tx_commit(tx); 3986 3987 /* 3988 * Sanity check the stuff we just wrote. 3989 */ 3990 { 3991 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3992 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3993 3994 VERIFY(0 == dmu_read(os, packobj, packoff, 3995 packsize, packcheck, DMU_READ_PREFETCH)); 3996 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3997 bigsize, bigcheck, DMU_READ_PREFETCH)); 3998 3999 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 4000 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 4001 4002 umem_free(packcheck, packsize); 4003 umem_free(bigcheck, bigsize); 4004 } 4005 if (i == 2) { 4006 txg_wait_open(dmu_objset_pool(os), 0); 4007 } else if (i == 3) { 4008 txg_wait_synced(dmu_objset_pool(os), 0); 4009 } 4010 } 4011 4012 dmu_buf_rele(bonus_db, FTAG); 4013 umem_free(packbuf, packsize); 4014 umem_free(bigbuf, bigsize); 4015 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 4016} 4017 4018/* ARGSUSED */ 4019void 4020ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 4021{ 4022 ztest_od_t od[1]; 4023 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 4024 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4025 4026 /* 4027 * Have multiple threads write to large offsets in an object 4028 * to verify that parallel writes to an object -- even to the 4029 * same blocks within the object -- doesn't cause any trouble. 4030 */ 4031 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4032 4033 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4034 return; 4035 4036 while (ztest_random(10) != 0) 4037 ztest_io(zd, od[0].od_object, offset); 4038} 4039 4040void 4041ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 4042{ 4043 ztest_od_t od[1]; 4044 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 4045 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4046 uint64_t count = ztest_random(20) + 1; 4047 uint64_t blocksize = ztest_random_blocksize(); 4048 void *data; 4049 4050 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4051 4052 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4053 return; 4054 4055 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 4056 return; 4057 4058 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 4059 4060 data = umem_zalloc(blocksize, UMEM_NOFAIL); 4061 4062 while (ztest_random(count) != 0) { 4063 uint64_t randoff = offset + (ztest_random(count) * blocksize); 4064 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 4065 data) != 0) 4066 break; 4067 while (ztest_random(4) != 0) 4068 ztest_io(zd, od[0].od_object, randoff); 4069 } 4070 4071 umem_free(data, blocksize); 4072} 4073 4074/* 4075 * Verify that zap_{create,destroy,add,remove,update} work as expected. 4076 */ 4077#define ZTEST_ZAP_MIN_INTS 1 4078#define ZTEST_ZAP_MAX_INTS 4 4079#define ZTEST_ZAP_MAX_PROPS 1000 4080 4081void 4082ztest_zap(ztest_ds_t *zd, uint64_t id) 4083{ 4084 objset_t *os = zd->zd_os; 4085 ztest_od_t od[1]; 4086 uint64_t object; 4087 uint64_t txg, last_txg; 4088 uint64_t value[ZTEST_ZAP_MAX_INTS]; 4089 uint64_t zl_ints, zl_intsize, prop; 4090 int i, ints; 4091 dmu_tx_t *tx; 4092 char propname[100], txgname[100]; 4093 int error; 4094 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 4095 4096 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4097 4098 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4099 return; 4100 4101 object = od[0].od_object; 4102 4103 /* 4104 * Generate a known hash collision, and verify that 4105 * we can lookup and remove both entries. 4106 */ 4107 tx = dmu_tx_create(os); 4108 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4109 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4110 if (txg == 0) 4111 return; 4112 for (i = 0; i < 2; i++) { 4113 value[i] = i; 4114 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 4115 1, &value[i], tx)); 4116 } 4117 for (i = 0; i < 2; i++) { 4118 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 4119 sizeof (uint64_t), 1, &value[i], tx)); 4120 VERIFY3U(0, ==, 4121 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 4122 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4123 ASSERT3U(zl_ints, ==, 1); 4124 } 4125 for (i = 0; i < 2; i++) { 4126 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 4127 } 4128 dmu_tx_commit(tx); 4129 4130 /* 4131 * Generate a buch of random entries. 4132 */ 4133 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 4134 4135 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4136 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4137 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4138 bzero(value, sizeof (value)); 4139 last_txg = 0; 4140 4141 /* 4142 * If these zap entries already exist, validate their contents. 4143 */ 4144 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4145 if (error == 0) { 4146 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4147 ASSERT3U(zl_ints, ==, 1); 4148 4149 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 4150 zl_ints, &last_txg) == 0); 4151 4152 VERIFY(zap_length(os, object, propname, &zl_intsize, 4153 &zl_ints) == 0); 4154 4155 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4156 ASSERT3U(zl_ints, ==, ints); 4157 4158 VERIFY(zap_lookup(os, object, propname, zl_intsize, 4159 zl_ints, value) == 0); 4160 4161 for (i = 0; i < ints; i++) { 4162 ASSERT3U(value[i], ==, last_txg + object + i); 4163 } 4164 } else { 4165 ASSERT3U(error, ==, ENOENT); 4166 } 4167 4168 /* 4169 * Atomically update two entries in our zap object. 4170 * The first is named txg_%llu, and contains the txg 4171 * in which the property was last updated. The second 4172 * is named prop_%llu, and the nth element of its value 4173 * should be txg + object + n. 4174 */ 4175 tx = dmu_tx_create(os); 4176 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4177 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4178 if (txg == 0) 4179 return; 4180 4181 if (last_txg > txg) 4182 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4183 4184 for (i = 0; i < ints; i++) 4185 value[i] = txg + object + i; 4186 4187 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4188 1, &txg, tx)); 4189 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4190 ints, value, tx)); 4191 4192 dmu_tx_commit(tx); 4193 4194 /* 4195 * Remove a random pair of entries. 4196 */ 4197 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4198 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4199 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4200 4201 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4202 4203 if (error == ENOENT) 4204 return; 4205 4206 ASSERT0(error); 4207 4208 tx = dmu_tx_create(os); 4209 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4210 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4211 if (txg == 0) 4212 return; 4213 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4214 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4215 dmu_tx_commit(tx); 4216} 4217 4218/* 4219 * Testcase to test the upgrading of a microzap to fatzap. 4220 */ 4221void 4222ztest_fzap(ztest_ds_t *zd, uint64_t id) 4223{ 4224 objset_t *os = zd->zd_os; 4225 ztest_od_t od[1]; 4226 uint64_t object, txg; 4227 4228 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4229 4230 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4231 return; 4232 4233 object = od[0].od_object; 4234 4235 /* 4236 * Add entries to this ZAP and make sure it spills over 4237 * and gets upgraded to a fatzap. Also, since we are adding 4238 * 2050 entries we should see ptrtbl growth and leaf-block split. 4239 */ 4240 for (int i = 0; i < 2050; i++) { 4241 char name[MAXNAMELEN]; 4242 uint64_t value = i; 4243 dmu_tx_t *tx; 4244 int error; 4245 4246 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4247 id, value); 4248 4249 tx = dmu_tx_create(os); 4250 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4251 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4252 if (txg == 0) 4253 return; 4254 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4255 &value, tx); 4256 ASSERT(error == 0 || error == EEXIST); 4257 dmu_tx_commit(tx); 4258 } 4259} 4260 4261/* ARGSUSED */ 4262void 4263ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4264{ 4265 objset_t *os = zd->zd_os; 4266 ztest_od_t od[1]; 4267 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4268 dmu_tx_t *tx; 4269 int i, namelen, error; 4270 int micro = ztest_random(2); 4271 char name[20], string_value[20]; 4272 void *data; 4273 4274 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4275 4276 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4277 return; 4278 4279 object = od[0].od_object; 4280 4281 /* 4282 * Generate a random name of the form 'xxx.....' where each 4283 * x is a random printable character and the dots are dots. 4284 * There are 94 such characters, and the name length goes from 4285 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4286 */ 4287 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4288 4289 for (i = 0; i < 3; i++) 4290 name[i] = '!' + ztest_random('~' - '!' + 1); 4291 for (; i < namelen - 1; i++) 4292 name[i] = '.'; 4293 name[i] = '\0'; 4294 4295 if ((namelen & 1) || micro) { 4296 wsize = sizeof (txg); 4297 wc = 1; 4298 data = &txg; 4299 } else { 4300 wsize = 1; 4301 wc = namelen; 4302 data = string_value; 4303 } 4304 4305 count = -1ULL; 4306 VERIFY0(zap_count(os, object, &count)); 4307 ASSERT(count != -1ULL); 4308 4309 /* 4310 * Select an operation: length, lookup, add, update, remove. 4311 */ 4312 i = ztest_random(5); 4313 4314 if (i >= 2) { 4315 tx = dmu_tx_create(os); 4316 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4317 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4318 if (txg == 0) 4319 return; 4320 bcopy(name, string_value, namelen); 4321 } else { 4322 tx = NULL; 4323 txg = 0; 4324 bzero(string_value, namelen); 4325 } 4326 4327 switch (i) { 4328 4329 case 0: 4330 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4331 if (error == 0) { 4332 ASSERT3U(wsize, ==, zl_wsize); 4333 ASSERT3U(wc, ==, zl_wc); 4334 } else { 4335 ASSERT3U(error, ==, ENOENT); 4336 } 4337 break; 4338 4339 case 1: 4340 error = zap_lookup(os, object, name, wsize, wc, data); 4341 if (error == 0) { 4342 if (data == string_value && 4343 bcmp(name, data, namelen) != 0) 4344 fatal(0, "name '%s' != val '%s' len %d", 4345 name, data, namelen); 4346 } else { 4347 ASSERT3U(error, ==, ENOENT); 4348 } 4349 break; 4350 4351 case 2: 4352 error = zap_add(os, object, name, wsize, wc, data, tx); 4353 ASSERT(error == 0 || error == EEXIST); 4354 break; 4355 4356 case 3: 4357 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4358 break; 4359 4360 case 4: 4361 error = zap_remove(os, object, name, tx); 4362 ASSERT(error == 0 || error == ENOENT); 4363 break; 4364 } 4365 4366 if (tx != NULL) 4367 dmu_tx_commit(tx); 4368} 4369 4370/* 4371 * Commit callback data. 4372 */ 4373typedef struct ztest_cb_data { 4374 list_node_t zcd_node; 4375 uint64_t zcd_txg; 4376 int zcd_expected_err; 4377 boolean_t zcd_added; 4378 boolean_t zcd_called; 4379 spa_t *zcd_spa; 4380} ztest_cb_data_t; 4381 4382/* This is the actual commit callback function */ 4383static void 4384ztest_commit_callback(void *arg, int error) 4385{ 4386 ztest_cb_data_t *data = arg; 4387 uint64_t synced_txg; 4388 4389 VERIFY(data != NULL); 4390 VERIFY3S(data->zcd_expected_err, ==, error); 4391 VERIFY(!data->zcd_called); 4392 4393 synced_txg = spa_last_synced_txg(data->zcd_spa); 4394 if (data->zcd_txg > synced_txg) 4395 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4396 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4397 synced_txg); 4398 4399 data->zcd_called = B_TRUE; 4400 4401 if (error == ECANCELED) { 4402 ASSERT0(data->zcd_txg); 4403 ASSERT(!data->zcd_added); 4404 4405 /* 4406 * The private callback data should be destroyed here, but 4407 * since we are going to check the zcd_called field after 4408 * dmu_tx_abort(), we will destroy it there. 4409 */ 4410 return; 4411 } 4412 4413 /* Was this callback added to the global callback list? */ 4414 if (!data->zcd_added) 4415 goto out; 4416 4417 ASSERT3U(data->zcd_txg, !=, 0); 4418 4419 /* Remove our callback from the list */ 4420 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4421 list_remove(&zcl.zcl_callbacks, data); 4422 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4423 4424out: 4425 umem_free(data, sizeof (ztest_cb_data_t)); 4426} 4427 4428/* Allocate and initialize callback data structure */ 4429static ztest_cb_data_t * 4430ztest_create_cb_data(objset_t *os, uint64_t txg) 4431{ 4432 ztest_cb_data_t *cb_data; 4433 4434 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4435 4436 cb_data->zcd_txg = txg; 4437 cb_data->zcd_spa = dmu_objset_spa(os); 4438 4439 return (cb_data); 4440} 4441 4442/* 4443 * If a number of txgs equal to this threshold have been created after a commit 4444 * callback has been registered but not called, then we assume there is an 4445 * implementation bug. 4446 */ 4447#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4448 4449/* 4450 * Commit callback test. 4451 */ 4452void 4453ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4454{ 4455 objset_t *os = zd->zd_os; 4456 ztest_od_t od[1]; 4457 dmu_tx_t *tx; 4458 ztest_cb_data_t *cb_data[3], *tmp_cb; 4459 uint64_t old_txg, txg; 4460 int i, error; 4461 4462 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4463 4464 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4465 return; 4466 4467 tx = dmu_tx_create(os); 4468 4469 cb_data[0] = ztest_create_cb_data(os, 0); 4470 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4471 4472 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4473 4474 /* Every once in a while, abort the transaction on purpose */ 4475 if (ztest_random(100) == 0) 4476 error = -1; 4477 4478 if (!error) 4479 error = dmu_tx_assign(tx, TXG_NOWAIT); 4480 4481 txg = error ? 0 : dmu_tx_get_txg(tx); 4482 4483 cb_data[0]->zcd_txg = txg; 4484 cb_data[1] = ztest_create_cb_data(os, txg); 4485 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4486 4487 if (error) { 4488 /* 4489 * It's not a strict requirement to call the registered 4490 * callbacks from inside dmu_tx_abort(), but that's what 4491 * it's supposed to happen in the current implementation 4492 * so we will check for that. 4493 */ 4494 for (i = 0; i < 2; i++) { 4495 cb_data[i]->zcd_expected_err = ECANCELED; 4496 VERIFY(!cb_data[i]->zcd_called); 4497 } 4498 4499 dmu_tx_abort(tx); 4500 4501 for (i = 0; i < 2; i++) { 4502 VERIFY(cb_data[i]->zcd_called); 4503 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4504 } 4505 4506 return; 4507 } 4508 4509 cb_data[2] = ztest_create_cb_data(os, txg); 4510 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4511 4512 /* 4513 * Read existing data to make sure there isn't a future leak. 4514 */ 4515 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4516 &old_txg, DMU_READ_PREFETCH)); 4517 4518 if (old_txg > txg) 4519 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4520 old_txg, txg); 4521 4522 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4523 4524 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4525 4526 /* 4527 * Since commit callbacks don't have any ordering requirement and since 4528 * it is theoretically possible for a commit callback to be called 4529 * after an arbitrary amount of time has elapsed since its txg has been 4530 * synced, it is difficult to reliably determine whether a commit 4531 * callback hasn't been called due to high load or due to a flawed 4532 * implementation. 4533 * 4534 * In practice, we will assume that if after a certain number of txgs a 4535 * commit callback hasn't been called, then most likely there's an 4536 * implementation bug.. 4537 */ 4538 tmp_cb = list_head(&zcl.zcl_callbacks); 4539 if (tmp_cb != NULL && 4540 (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) { 4541 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4542 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4543 } 4544 4545 /* 4546 * Let's find the place to insert our callbacks. 4547 * 4548 * Even though the list is ordered by txg, it is possible for the 4549 * insertion point to not be the end because our txg may already be 4550 * quiescing at this point and other callbacks in the open txg 4551 * (from other objsets) may have sneaked in. 4552 */ 4553 tmp_cb = list_tail(&zcl.zcl_callbacks); 4554 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4555 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4556 4557 /* Add the 3 callbacks to the list */ 4558 for (i = 0; i < 3; i++) { 4559 if (tmp_cb == NULL) 4560 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4561 else 4562 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4563 cb_data[i]); 4564 4565 cb_data[i]->zcd_added = B_TRUE; 4566 VERIFY(!cb_data[i]->zcd_called); 4567 4568 tmp_cb = cb_data[i]; 4569 } 4570 4571 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4572 4573 dmu_tx_commit(tx); 4574} 4575 4576/* ARGSUSED */ 4577void 4578ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4579{ 4580 zfs_prop_t proplist[] = { 4581 ZFS_PROP_CHECKSUM, 4582 ZFS_PROP_COMPRESSION, 4583 ZFS_PROP_COPIES, 4584 ZFS_PROP_DEDUP 4585 }; 4586 4587 (void) rw_rdlock(&ztest_name_lock); 4588 4589 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4590 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4591 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4592 4593 (void) rw_unlock(&ztest_name_lock); 4594} 4595 4596/* ARGSUSED */ 4597void 4598ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4599{ 4600 nvlist_t *props = NULL; 4601 4602 (void) rw_rdlock(&ztest_name_lock); 4603 4604 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4605 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4606 4607 VERIFY0(spa_prop_get(ztest_spa, &props)); 4608 4609 if (ztest_opts.zo_verbose >= 6) 4610 dump_nvlist(props, 4); 4611 4612 nvlist_free(props); 4613 4614 (void) rw_unlock(&ztest_name_lock); 4615} 4616 4617static int 4618user_release_one(const char *snapname, const char *holdname) 4619{ 4620 nvlist_t *snaps, *holds; 4621 int error; 4622 4623 snaps = fnvlist_alloc(); 4624 holds = fnvlist_alloc(); 4625 fnvlist_add_boolean(holds, holdname); 4626 fnvlist_add_nvlist(snaps, snapname, holds); 4627 fnvlist_free(holds); 4628 error = dsl_dataset_user_release(snaps, NULL); 4629 fnvlist_free(snaps); 4630 return (error); 4631} 4632 4633/* 4634 * Test snapshot hold/release and deferred destroy. 4635 */ 4636void 4637ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4638{ 4639 int error; 4640 objset_t *os = zd->zd_os; 4641 objset_t *origin; 4642 char snapname[100]; 4643 char fullname[100]; 4644 char clonename[100]; 4645 char tag[100]; 4646 char osname[MAXNAMELEN]; 4647 nvlist_t *holds; 4648 4649 (void) rw_rdlock(&ztest_name_lock); 4650 4651 dmu_objset_name(os, osname); 4652 4653 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id); 4654 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname); 4655 (void) snprintf(clonename, sizeof (clonename), 4656 "%s/ch1_%llu", osname, id); 4657 (void) snprintf(tag, sizeof (tag), "tag_%llu", id); 4658 4659 /* 4660 * Clean up from any previous run. 4661 */ 4662 error = dsl_destroy_head(clonename); 4663 if (error != ENOENT) 4664 ASSERT0(error); 4665 error = user_release_one(fullname, tag); 4666 if (error != ESRCH && error != ENOENT) 4667 ASSERT0(error); 4668 error = dsl_destroy_snapshot(fullname, B_FALSE); 4669 if (error != ENOENT) 4670 ASSERT0(error); 4671 4672 /* 4673 * Create snapshot, clone it, mark snap for deferred destroy, 4674 * destroy clone, verify snap was also destroyed. 4675 */ 4676 error = dmu_objset_snapshot_one(osname, snapname); 4677 if (error) { 4678 if (error == ENOSPC) { 4679 ztest_record_enospc("dmu_objset_snapshot"); 4680 goto out; 4681 } 4682 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4683 } 4684 4685 error = dmu_objset_clone(clonename, fullname); 4686 if (error) { 4687 if (error == ENOSPC) { 4688 ztest_record_enospc("dmu_objset_clone"); 4689 goto out; 4690 } 4691 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4692 } 4693 4694 error = dsl_destroy_snapshot(fullname, B_TRUE); 4695 if (error) { 4696 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4697 fullname, error); 4698 } 4699 4700 error = dsl_destroy_head(clonename); 4701 if (error) 4702 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error); 4703 4704 error = dmu_objset_hold(fullname, FTAG, &origin); 4705 if (error != ENOENT) 4706 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4707 4708 /* 4709 * Create snapshot, add temporary hold, verify that we can't 4710 * destroy a held snapshot, mark for deferred destroy, 4711 * release hold, verify snapshot was destroyed. 4712 */ 4713 error = dmu_objset_snapshot_one(osname, snapname); 4714 if (error) { 4715 if (error == ENOSPC) { 4716 ztest_record_enospc("dmu_objset_snapshot"); 4717 goto out; 4718 } 4719 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4720 } 4721 4722 holds = fnvlist_alloc(); 4723 fnvlist_add_string(holds, fullname, tag); 4724 error = dsl_dataset_user_hold(holds, 0, NULL); 4725 fnvlist_free(holds); 4726 4727 if (error == ENOSPC) { 4728 ztest_record_enospc("dsl_dataset_user_hold"); 4729 goto out; 4730 } else if (error) { 4731 fatal(0, "dsl_dataset_user_hold(%s, %s) = %u", 4732 fullname, tag, error); 4733 } 4734 4735 error = dsl_destroy_snapshot(fullname, B_FALSE); 4736 if (error != EBUSY) { 4737 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d", 4738 fullname, error); 4739 } 4740 4741 error = dsl_destroy_snapshot(fullname, B_TRUE); 4742 if (error) { 4743 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4744 fullname, error); 4745 } 4746 4747 error = user_release_one(fullname, tag); 4748 if (error) 4749 fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error); 4750 4751 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT); 4752 4753out: 4754 (void) rw_unlock(&ztest_name_lock); 4755} 4756 4757/* 4758 * Inject random faults into the on-disk data. 4759 */ 4760/* ARGSUSED */ 4761void 4762ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4763{ 4764 ztest_shared_t *zs = ztest_shared; 4765 spa_t *spa = ztest_spa; 4766 int fd; 4767 uint64_t offset; 4768 uint64_t leaves; 4769 uint64_t bad = 0x1990c0ffeedecadeULL; 4770 uint64_t top, leaf; 4771 char path0[MAXPATHLEN]; 4772 char pathrand[MAXPATHLEN]; 4773 size_t fsize; 4774 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ 4775 int iters = 1000; 4776 int maxfaults; 4777 int mirror_save; 4778 vdev_t *vd0 = NULL; 4779 uint64_t guid0 = 0; 4780 boolean_t islog = B_FALSE; 4781 4782 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4783 maxfaults = MAXFAULTS(); 4784 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4785 mirror_save = zs->zs_mirrors; 4786 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4787 4788 ASSERT(leaves >= 1); 4789 4790 /* 4791 * Grab the name lock as reader. There are some operations 4792 * which don't like to have their vdevs changed while 4793 * they are in progress (i.e. spa_change_guid). Those 4794 * operations will have grabbed the name lock as writer. 4795 */ 4796 (void) rw_rdlock(&ztest_name_lock); 4797 4798 /* 4799 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 4800 */ 4801 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4802 4803 if (ztest_random(2) == 0) { 4804 /* 4805 * Inject errors on a normal data device or slog device. 4806 */ 4807 top = ztest_random_vdev_top(spa, B_TRUE); 4808 leaf = ztest_random(leaves) + zs->zs_splits; 4809 4810 /* 4811 * Generate paths to the first leaf in this top-level vdev, 4812 * and to the random leaf we selected. We'll induce transient 4813 * write failures and random online/offline activity on leaf 0, 4814 * and we'll write random garbage to the randomly chosen leaf. 4815 */ 4816 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 4817 ztest_opts.zo_dir, ztest_opts.zo_pool, 4818 top * leaves + zs->zs_splits); 4819 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 4820 ztest_opts.zo_dir, ztest_opts.zo_pool, 4821 top * leaves + leaf); 4822 4823 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 4824 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 4825 islog = B_TRUE; 4826 4827 /* 4828 * If the top-level vdev needs to be resilvered 4829 * then we only allow faults on the device that is 4830 * resilvering. 4831 */ 4832 if (vd0 != NULL && maxfaults != 1 && 4833 (!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) || 4834 vd0->vdev_resilver_txg != 0)) { 4835 /* 4836 * Make vd0 explicitly claim to be unreadable, 4837 * or unwriteable, or reach behind its back 4838 * and close the underlying fd. We can do this if 4839 * maxfaults == 0 because we'll fail and reexecute, 4840 * and we can do it if maxfaults >= 2 because we'll 4841 * have enough redundancy. If maxfaults == 1, the 4842 * combination of this with injection of random data 4843 * corruption below exceeds the pool's fault tolerance. 4844 */ 4845 vdev_file_t *vf = vd0->vdev_tsd; 4846 4847 if (vf != NULL && ztest_random(3) == 0) { 4848 (void) close(vf->vf_vnode->v_fd); 4849 vf->vf_vnode->v_fd = -1; 4850 } else if (ztest_random(2) == 0) { 4851 vd0->vdev_cant_read = B_TRUE; 4852 } else { 4853 vd0->vdev_cant_write = B_TRUE; 4854 } 4855 guid0 = vd0->vdev_guid; 4856 } 4857 } else { 4858 /* 4859 * Inject errors on an l2cache device. 4860 */ 4861 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4862 4863 if (sav->sav_count == 0) { 4864 spa_config_exit(spa, SCL_STATE, FTAG); 4865 (void) rw_unlock(&ztest_name_lock); 4866 return; 4867 } 4868 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 4869 guid0 = vd0->vdev_guid; 4870 (void) strcpy(path0, vd0->vdev_path); 4871 (void) strcpy(pathrand, vd0->vdev_path); 4872 4873 leaf = 0; 4874 leaves = 1; 4875 maxfaults = INT_MAX; /* no limit on cache devices */ 4876 } 4877 4878 spa_config_exit(spa, SCL_STATE, FTAG); 4879 (void) rw_unlock(&ztest_name_lock); 4880 4881 /* 4882 * If we can tolerate two or more faults, or we're dealing 4883 * with a slog, randomly online/offline vd0. 4884 */ 4885 if ((maxfaults >= 2 || islog) && guid0 != 0) { 4886 if (ztest_random(10) < 6) { 4887 int flags = (ztest_random(2) == 0 ? 4888 ZFS_OFFLINE_TEMPORARY : 0); 4889 4890 /* 4891 * We have to grab the zs_name_lock as writer to 4892 * prevent a race between offlining a slog and 4893 * destroying a dataset. Offlining the slog will 4894 * grab a reference on the dataset which may cause 4895 * dmu_objset_destroy() to fail with EBUSY thus 4896 * leaving the dataset in an inconsistent state. 4897 */ 4898 if (islog) 4899 (void) rw_wrlock(&ztest_name_lock); 4900 4901 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 4902 4903 if (islog) 4904 (void) rw_unlock(&ztest_name_lock); 4905 } else { 4906 /* 4907 * Ideally we would like to be able to randomly 4908 * call vdev_[on|off]line without holding locks 4909 * to force unpredictable failures but the side 4910 * effects of vdev_[on|off]line prevent us from 4911 * doing so. We grab the ztest_vdev_lock here to 4912 * prevent a race between injection testing and 4913 * aux_vdev removal. 4914 */ 4915 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4916 (void) vdev_online(spa, guid0, 0, NULL); 4917 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4918 } 4919 } 4920 4921 if (maxfaults == 0) 4922 return; 4923 4924 /* 4925 * We have at least single-fault tolerance, so inject data corruption. 4926 */ 4927 fd = open(pathrand, O_RDWR); 4928 4929 if (fd == -1) /* we hit a gap in the device namespace */ 4930 return; 4931 4932 fsize = lseek(fd, 0, SEEK_END); 4933 4934 while (--iters != 0) { 4935 offset = ztest_random(fsize / (leaves << bshift)) * 4936 (leaves << bshift) + (leaf << bshift) + 4937 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 4938 4939 if (offset >= fsize) 4940 continue; 4941 4942 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4943 if (mirror_save != zs->zs_mirrors) { 4944 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4945 (void) close(fd); 4946 return; 4947 } 4948 4949 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 4950 fatal(1, "can't inject bad word at 0x%llx in %s", 4951 offset, pathrand); 4952 4953 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4954 4955 if (ztest_opts.zo_verbose >= 7) 4956 (void) printf("injected bad word into %s," 4957 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 4958 } 4959 4960 (void) close(fd); 4961} 4962 4963/* 4964 * Verify that DDT repair works as expected. 4965 */ 4966void 4967ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 4968{ 4969 ztest_shared_t *zs = ztest_shared; 4970 spa_t *spa = ztest_spa; 4971 objset_t *os = zd->zd_os; 4972 ztest_od_t od[1]; 4973 uint64_t object, blocksize, txg, pattern, psize; 4974 enum zio_checksum checksum = spa_dedup_checksum(spa); 4975 dmu_buf_t *db; 4976 dmu_tx_t *tx; 4977 void *buf; 4978 blkptr_t blk; 4979 int copies = 2 * ZIO_DEDUPDITTO_MIN; 4980 4981 blocksize = ztest_random_blocksize(); 4982 blocksize = MIN(blocksize, 2048); /* because we write so many */ 4983 4984 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4985 4986 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4987 return; 4988 4989 /* 4990 * Take the name lock as writer to prevent anyone else from changing 4991 * the pool and dataset properies we need to maintain during this test. 4992 */ 4993 (void) rw_wrlock(&ztest_name_lock); 4994 4995 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 4996 B_FALSE) != 0 || 4997 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 4998 B_FALSE) != 0) { 4999 (void) rw_unlock(&ztest_name_lock); 5000 return; 5001 } 5002 5003 object = od[0].od_object; 5004 blocksize = od[0].od_blocksize; 5005 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); 5006 5007 ASSERT(object != 0); 5008 5009 tx = dmu_tx_create(os); 5010 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 5011 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 5012 if (txg == 0) { 5013 (void) rw_unlock(&ztest_name_lock); 5014 return; 5015 } 5016 5017 /* 5018 * Write all the copies of our block. 5019 */ 5020 for (int i = 0; i < copies; i++) { 5021 uint64_t offset = i * blocksize; 5022 int error = dmu_buf_hold(os, object, offset, FTAG, &db, 5023 DMU_READ_NO_PREFETCH); 5024 if (error != 0) { 5025 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u", 5026 os, (long long)object, (long long) offset, error); 5027 } 5028 ASSERT(db->db_offset == offset); 5029 ASSERT(db->db_size == blocksize); 5030 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 5031 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 5032 dmu_buf_will_fill(db, tx); 5033 ztest_pattern_set(db->db_data, db->db_size, pattern); 5034 dmu_buf_rele(db, FTAG); 5035 } 5036 5037 dmu_tx_commit(tx); 5038 txg_wait_synced(spa_get_dsl(spa), txg); 5039 5040 /* 5041 * Find out what block we got. 5042 */ 5043 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, 5044 DMU_READ_NO_PREFETCH)); 5045 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 5046 dmu_buf_rele(db, FTAG); 5047 5048 /* 5049 * Damage the block. Dedup-ditto will save us when we read it later. 5050 */ 5051 psize = BP_GET_PSIZE(&blk); 5052 buf = zio_buf_alloc(psize); 5053 ztest_pattern_set(buf, psize, ~pattern); 5054 5055 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 5056 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 5057 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 5058 5059 zio_buf_free(buf, psize); 5060 5061 (void) rw_unlock(&ztest_name_lock); 5062} 5063 5064/* 5065 * Scrub the pool. 5066 */ 5067/* ARGSUSED */ 5068void 5069ztest_scrub(ztest_ds_t *zd, uint64_t id) 5070{ 5071 spa_t *spa = ztest_spa; 5072 5073 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5074 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 5075 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5076} 5077 5078/* 5079 * Change the guid for the pool. 5080 */ 5081/* ARGSUSED */ 5082void 5083ztest_reguid(ztest_ds_t *zd, uint64_t id) 5084{ 5085 spa_t *spa = ztest_spa; 5086 uint64_t orig, load; 5087 int error; 5088 5089 orig = spa_guid(spa); 5090 load = spa_load_guid(spa); 5091 5092 (void) rw_wrlock(&ztest_name_lock); 5093 error = spa_change_guid(spa); 5094 (void) rw_unlock(&ztest_name_lock); 5095 5096 if (error != 0) 5097 return; 5098 5099 if (ztest_opts.zo_verbose >= 4) { 5100 (void) printf("Changed guid old %llu -> %llu\n", 5101 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 5102 } 5103 5104 VERIFY3U(orig, !=, spa_guid(spa)); 5105 VERIFY3U(load, ==, spa_load_guid(spa)); 5106} 5107 5108/* 5109 * Rename the pool to a different name and then rename it back. 5110 */ 5111/* ARGSUSED */ 5112void 5113ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 5114{ 5115 char *oldname, *newname; 5116 spa_t *spa; 5117 5118 (void) rw_wrlock(&ztest_name_lock); 5119 5120 oldname = ztest_opts.zo_pool; 5121 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 5122 (void) strcpy(newname, oldname); 5123 (void) strcat(newname, "_tmp"); 5124 5125 /* 5126 * Do the rename 5127 */ 5128 VERIFY3U(0, ==, spa_rename(oldname, newname)); 5129 5130 /* 5131 * Try to open it under the old name, which shouldn't exist 5132 */ 5133 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5134 5135 /* 5136 * Open it under the new name and make sure it's still the same spa_t. 5137 */ 5138 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5139 5140 ASSERT(spa == ztest_spa); 5141 spa_close(spa, FTAG); 5142 5143 /* 5144 * Rename it back to the original 5145 */ 5146 VERIFY3U(0, ==, spa_rename(newname, oldname)); 5147 5148 /* 5149 * Make sure it can still be opened 5150 */ 5151 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5152 5153 ASSERT(spa == ztest_spa); 5154 spa_close(spa, FTAG); 5155 5156 umem_free(newname, strlen(newname) + 1); 5157 5158 (void) rw_unlock(&ztest_name_lock); 5159} 5160 5161/* 5162 * Verify pool integrity by running zdb. 5163 */ 5164static void 5165ztest_run_zdb(char *pool) 5166{ 5167 int status; 5168 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 5169 char zbuf[1024]; 5170 char *bin; 5171 char *ztest; 5172 char *isa; 5173 int isalen; 5174 FILE *fp; 5175 5176 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb)); 5177 5178 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 5179 bin = strstr(zdb, "/usr/bin/"); 5180 ztest = strstr(bin, "/ztest"); 5181 isa = bin + 8; 5182 isalen = ztest - isa; 5183 isa = strdup(isa); 5184 /* LINTED */ 5185 (void) sprintf(bin, 5186 "/usr/sbin%.*s/zdb -bcc%s%s -d -U %s %s", 5187 isalen, 5188 isa, 5189 ztest_opts.zo_verbose >= 3 ? "s" : "", 5190 ztest_opts.zo_verbose >= 4 ? "v" : "", 5191 spa_config_path, 5192 pool); 5193 free(isa); 5194 5195 if (ztest_opts.zo_verbose >= 5) 5196 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 5197 5198 fp = popen(zdb, "r"); 5199 assert(fp != NULL); 5200 5201 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 5202 if (ztest_opts.zo_verbose >= 3) 5203 (void) printf("%s", zbuf); 5204 5205 status = pclose(fp); 5206 5207 if (status == 0) 5208 return; 5209 5210 ztest_dump_core = 0; 5211 if (WIFEXITED(status)) 5212 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 5213 else 5214 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 5215} 5216 5217static void 5218ztest_walk_pool_directory(char *header) 5219{ 5220 spa_t *spa = NULL; 5221 5222 if (ztest_opts.zo_verbose >= 6) 5223 (void) printf("%s\n", header); 5224 5225 mutex_enter(&spa_namespace_lock); 5226 while ((spa = spa_next(spa)) != NULL) 5227 if (ztest_opts.zo_verbose >= 6) 5228 (void) printf("\t%s\n", spa_name(spa)); 5229 mutex_exit(&spa_namespace_lock); 5230} 5231 5232static void 5233ztest_spa_import_export(char *oldname, char *newname) 5234{ 5235 nvlist_t *config, *newconfig; 5236 uint64_t pool_guid; 5237 spa_t *spa; 5238 int error; 5239 5240 if (ztest_opts.zo_verbose >= 4) { 5241 (void) printf("import/export: old = %s, new = %s\n", 5242 oldname, newname); 5243 } 5244 5245 /* 5246 * Clean up from previous runs. 5247 */ 5248 (void) spa_destroy(newname); 5249 5250 /* 5251 * Get the pool's configuration and guid. 5252 */ 5253 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5254 5255 /* 5256 * Kick off a scrub to tickle scrub/export races. 5257 */ 5258 if (ztest_random(2) == 0) 5259 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5260 5261 pool_guid = spa_guid(spa); 5262 spa_close(spa, FTAG); 5263 5264 ztest_walk_pool_directory("pools before export"); 5265 5266 /* 5267 * Export it. 5268 */ 5269 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5270 5271 ztest_walk_pool_directory("pools after export"); 5272 5273 /* 5274 * Try to import it. 5275 */ 5276 newconfig = spa_tryimport(config); 5277 ASSERT(newconfig != NULL); 5278 nvlist_free(newconfig); 5279 5280 /* 5281 * Import it under the new name. 5282 */ 5283 error = spa_import(newname, config, NULL, 0); 5284 if (error != 0) { 5285 dump_nvlist(config, 0); 5286 fatal(B_FALSE, "couldn't import pool %s as %s: error %u", 5287 oldname, newname, error); 5288 } 5289 5290 ztest_walk_pool_directory("pools after import"); 5291 5292 /* 5293 * Try to import it again -- should fail with EEXIST. 5294 */ 5295 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5296 5297 /* 5298 * Try to import it under a different name -- should fail with EEXIST. 5299 */ 5300 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5301 5302 /* 5303 * Verify that the pool is no longer visible under the old name. 5304 */ 5305 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5306 5307 /* 5308 * Verify that we can open and close the pool using the new name. 5309 */ 5310 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5311 ASSERT(pool_guid == spa_guid(spa)); 5312 spa_close(spa, FTAG); 5313 5314 nvlist_free(config); 5315} 5316 5317static void 5318ztest_resume(spa_t *spa) 5319{ 5320 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5321 (void) printf("resuming from suspended state\n"); 5322 spa_vdev_state_enter(spa, SCL_NONE); 5323 vdev_clear(spa, NULL); 5324 (void) spa_vdev_state_exit(spa, NULL, 0); 5325 (void) zio_resume(spa); 5326} 5327 5328static void * 5329ztest_resume_thread(void *arg) 5330{ 5331 spa_t *spa = arg; 5332 5333 while (!ztest_exiting) { 5334 if (spa_suspended(spa)) 5335 ztest_resume(spa); 5336 (void) poll(NULL, 0, 100); 5337 } 5338 return (NULL); 5339} 5340 5341static void * 5342ztest_deadman_thread(void *arg) 5343{ 5344 ztest_shared_t *zs = arg; 5345 spa_t *spa = ztest_spa; 5346 hrtime_t delta, total = 0; 5347 5348 for (;;) { 5349 delta = zs->zs_thread_stop - zs->zs_thread_start + 5350 MSEC2NSEC(zfs_deadman_synctime_ms); 5351 5352 (void) poll(NULL, 0, (int)NSEC2MSEC(delta)); 5353 5354 /* 5355 * If the pool is suspended then fail immediately. Otherwise, 5356 * check to see if the pool is making any progress. If 5357 * vdev_deadman() discovers that there hasn't been any recent 5358 * I/Os then it will end up aborting the tests. 5359 */ 5360 if (spa_suspended(spa) || spa->spa_root_vdev == NULL) { 5361 fatal(0, "aborting test after %llu seconds because " 5362 "pool has transitioned to a suspended state.", 5363 zfs_deadman_synctime_ms / 1000); 5364 return (NULL); 5365 } 5366 vdev_deadman(spa->spa_root_vdev); 5367 5368 total += zfs_deadman_synctime_ms/1000; 5369 (void) printf("ztest has been running for %lld seconds\n", 5370 total); 5371 } 5372} 5373 5374static void 5375ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5376{ 5377 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5378 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5379 hrtime_t functime = gethrtime(); 5380 5381 for (int i = 0; i < zi->zi_iters; i++) 5382 zi->zi_func(zd, id); 5383 5384 functime = gethrtime() - functime; 5385 5386 atomic_add_64(&zc->zc_count, 1); 5387 atomic_add_64(&zc->zc_time, functime); 5388 5389 if (ztest_opts.zo_verbose >= 4) { 5390 Dl_info dli; 5391 (void) dladdr((void *)zi->zi_func, &dli); 5392 (void) printf("%6.2f sec in %s\n", 5393 (double)functime / NANOSEC, dli.dli_sname); 5394 } 5395} 5396 5397static void * 5398ztest_thread(void *arg) 5399{ 5400 int rand; 5401 uint64_t id = (uintptr_t)arg; 5402 ztest_shared_t *zs = ztest_shared; 5403 uint64_t call_next; 5404 hrtime_t now; 5405 ztest_info_t *zi; 5406 ztest_shared_callstate_t *zc; 5407 5408 while ((now = gethrtime()) < zs->zs_thread_stop) { 5409 /* 5410 * See if it's time to force a crash. 5411 */ 5412 if (now > zs->zs_thread_kill) 5413 ztest_kill(zs); 5414 5415 /* 5416 * If we're getting ENOSPC with some regularity, stop. 5417 */ 5418 if (zs->zs_enospc_count > 10) 5419 break; 5420 5421 /* 5422 * Pick a random function to execute. 5423 */ 5424 rand = ztest_random(ZTEST_FUNCS); 5425 zi = &ztest_info[rand]; 5426 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5427 call_next = zc->zc_next; 5428 5429 if (now >= call_next && 5430 atomic_cas_64(&zc->zc_next, call_next, call_next + 5431 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5432 ztest_execute(rand, zi, id); 5433 } 5434 } 5435 5436 return (NULL); 5437} 5438 5439static void 5440ztest_dataset_name(char *dsname, char *pool, int d) 5441{ 5442 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d); 5443} 5444 5445static void 5446ztest_dataset_destroy(int d) 5447{ 5448 char name[MAXNAMELEN]; 5449 5450 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5451 5452 if (ztest_opts.zo_verbose >= 3) 5453 (void) printf("Destroying %s to free up space\n", name); 5454 5455 /* 5456 * Cleanup any non-standard clones and snapshots. In general, 5457 * ztest thread t operates on dataset (t % zopt_datasets), 5458 * so there may be more than one thing to clean up. 5459 */ 5460 for (int t = d; t < ztest_opts.zo_threads; 5461 t += ztest_opts.zo_datasets) { 5462 ztest_dsl_dataset_cleanup(name, t); 5463 } 5464 5465 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5466 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5467} 5468 5469static void 5470ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5471{ 5472 uint64_t usedobjs, dirobjs, scratch; 5473 5474 /* 5475 * ZTEST_DIROBJ is the object directory for the entire dataset. 5476 * Therefore, the number of objects in use should equal the 5477 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5478 * If not, we have an object leak. 5479 * 5480 * Note that we can only check this in ztest_dataset_open(), 5481 * when the open-context and syncing-context values agree. 5482 * That's because zap_count() returns the open-context value, 5483 * while dmu_objset_space() returns the rootbp fill count. 5484 */ 5485 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5486 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5487 ASSERT3U(dirobjs + 1, ==, usedobjs); 5488} 5489 5490static int 5491ztest_dataset_open(int d) 5492{ 5493 ztest_ds_t *zd = &ztest_ds[d]; 5494 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5495 objset_t *os; 5496 zilog_t *zilog; 5497 char name[MAXNAMELEN]; 5498 int error; 5499 5500 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5501 5502 (void) rw_rdlock(&ztest_name_lock); 5503 5504 error = ztest_dataset_create(name); 5505 if (error == ENOSPC) { 5506 (void) rw_unlock(&ztest_name_lock); 5507 ztest_record_enospc(FTAG); 5508 return (error); 5509 } 5510 ASSERT(error == 0 || error == EEXIST); 5511 5512 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os)); 5513 (void) rw_unlock(&ztest_name_lock); 5514 5515 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5516 5517 zilog = zd->zd_zilog; 5518 5519 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5520 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5521 fatal(0, "missing log records: claimed %llu < committed %llu", 5522 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5523 5524 ztest_dataset_dirobj_verify(zd); 5525 5526 zil_replay(os, zd, ztest_replay_vector); 5527 5528 ztest_dataset_dirobj_verify(zd); 5529 5530 if (ztest_opts.zo_verbose >= 6) 5531 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5532 zd->zd_name, 5533 (u_longlong_t)zilog->zl_parse_blk_count, 5534 (u_longlong_t)zilog->zl_parse_lr_count, 5535 (u_longlong_t)zilog->zl_replaying_seq); 5536 5537 zilog = zil_open(os, ztest_get_data); 5538 5539 if (zilog->zl_replaying_seq != 0 && 5540 zilog->zl_replaying_seq < committed_seq) 5541 fatal(0, "missing log records: replayed %llu < committed %llu", 5542 zilog->zl_replaying_seq, committed_seq); 5543 5544 return (0); 5545} 5546 5547static void 5548ztest_dataset_close(int d) 5549{ 5550 ztest_ds_t *zd = &ztest_ds[d]; 5551 5552 zil_close(zd->zd_zilog); 5553 dmu_objset_disown(zd->zd_os, zd); 5554 5555 ztest_zd_fini(zd); 5556} 5557 5558/* 5559 * Kick off threads to run tests on all datasets in parallel. 5560 */ 5561static void 5562ztest_run(ztest_shared_t *zs) 5563{ 5564 thread_t *tid; 5565 spa_t *spa; 5566 objset_t *os; 5567 thread_t resume_tid; 5568 int error; 5569 5570 ztest_exiting = B_FALSE; 5571 5572 /* 5573 * Initialize parent/child shared state. 5574 */ 5575 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5576 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5577 5578 zs->zs_thread_start = gethrtime(); 5579 zs->zs_thread_stop = 5580 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5581 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5582 zs->zs_thread_kill = zs->zs_thread_stop; 5583 if (ztest_random(100) < ztest_opts.zo_killrate) { 5584 zs->zs_thread_kill -= 5585 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5586 } 5587 5588 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); 5589 5590 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5591 offsetof(ztest_cb_data_t, zcd_node)); 5592 5593 /* 5594 * Open our pool. 5595 */ 5596 kernel_init(FREAD | FWRITE); 5597 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5598 spa->spa_debug = B_TRUE; 5599 ztest_spa = spa; 5600 5601 VERIFY0(dmu_objset_own(ztest_opts.zo_pool, 5602 DMU_OST_ANY, B_TRUE, FTAG, &os)); 5603 zs->zs_guid = dmu_objset_fsid_guid(os); 5604 dmu_objset_disown(os, FTAG); 5605 5606 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5607 5608 /* 5609 * We don't expect the pool to suspend unless maxfaults == 0, 5610 * in which case ztest_fault_inject() temporarily takes away 5611 * the only valid replica. 5612 */ 5613 if (MAXFAULTS() == 0) 5614 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5615 else 5616 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5617 5618 /* 5619 * Create a thread to periodically resume suspended I/O. 5620 */ 5621 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5622 &resume_tid) == 0); 5623 5624 /* 5625 * Create a deadman thread to abort() if we hang. 5626 */ 5627 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5628 NULL) == 0); 5629 5630 /* 5631 * Verify that we can safely inquire about about any object, 5632 * whether it's allocated or not. To make it interesting, 5633 * we probe a 5-wide window around each power of two. 5634 * This hits all edge cases, including zero and the max. 5635 */ 5636 for (int t = 0; t < 64; t++) { 5637 for (int d = -5; d <= 5; d++) { 5638 error = dmu_object_info(spa->spa_meta_objset, 5639 (1ULL << t) + d, NULL); 5640 ASSERT(error == 0 || error == ENOENT || 5641 error == EINVAL); 5642 } 5643 } 5644 5645 /* 5646 * If we got any ENOSPC errors on the previous run, destroy something. 5647 */ 5648 if (zs->zs_enospc_count != 0) { 5649 int d = ztest_random(ztest_opts.zo_datasets); 5650 ztest_dataset_destroy(d); 5651 } 5652 zs->zs_enospc_count = 0; 5653 5654 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5655 UMEM_NOFAIL); 5656 5657 if (ztest_opts.zo_verbose >= 4) 5658 (void) printf("starting main threads...\n"); 5659 5660 /* 5661 * Kick off all the tests that run in parallel. 5662 */ 5663 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5664 if (t < ztest_opts.zo_datasets && 5665 ztest_dataset_open(t) != 0) 5666 return; 5667 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5668 THR_BOUND, &tid[t]) == 0); 5669 } 5670 5671 /* 5672 * Wait for all of the tests to complete. We go in reverse order 5673 * so we don't close datasets while threads are still using them. 5674 */ 5675 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5676 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5677 if (t < ztest_opts.zo_datasets) 5678 ztest_dataset_close(t); 5679 } 5680 5681 txg_wait_synced(spa_get_dsl(spa), 0); 5682 5683 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5684 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5685 zfs_dbgmsg_print(FTAG); 5686 5687 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5688 5689 /* Kill the resume thread */ 5690 ztest_exiting = B_TRUE; 5691 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5692 ztest_resume(spa); 5693 5694 /* 5695 * Right before closing the pool, kick off a bunch of async I/O; 5696 * spa_close() should wait for it to complete. 5697 */ 5698 for (uint64_t object = 1; object < 50; object++) 5699 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); 5700 5701 spa_close(spa, FTAG); 5702 5703 /* 5704 * Verify that we can loop over all pools. 5705 */ 5706 mutex_enter(&spa_namespace_lock); 5707 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5708 if (ztest_opts.zo_verbose > 3) 5709 (void) printf("spa_next: found %s\n", spa_name(spa)); 5710 mutex_exit(&spa_namespace_lock); 5711 5712 /* 5713 * Verify that we can export the pool and reimport it under a 5714 * different name. 5715 */ 5716 if (ztest_random(2) == 0) { 5717 char name[MAXNAMELEN]; 5718 (void) snprintf(name, MAXNAMELEN, "%s_import", 5719 ztest_opts.zo_pool); 5720 ztest_spa_import_export(ztest_opts.zo_pool, name); 5721 ztest_spa_import_export(name, ztest_opts.zo_pool); 5722 } 5723 5724 kernel_fini(); 5725 5726 list_destroy(&zcl.zcl_callbacks); 5727 5728 (void) _mutex_destroy(&zcl.zcl_callbacks_lock); 5729 5730 (void) rwlock_destroy(&ztest_name_lock); 5731 (void) _mutex_destroy(&ztest_vdev_lock); 5732} 5733 5734static void 5735ztest_freeze(void) 5736{ 5737 ztest_ds_t *zd = &ztest_ds[0]; 5738 spa_t *spa; 5739 int numloops = 0; 5740 5741 if (ztest_opts.zo_verbose >= 3) 5742 (void) printf("testing spa_freeze()...\n"); 5743 5744 kernel_init(FREAD | FWRITE); 5745 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5746 VERIFY3U(0, ==, ztest_dataset_open(0)); 5747 spa->spa_debug = B_TRUE; 5748 ztest_spa = spa; 5749 5750 /* 5751 * Force the first log block to be transactionally allocated. 5752 * We have to do this before we freeze the pool -- otherwise 5753 * the log chain won't be anchored. 5754 */ 5755 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 5756 ztest_dmu_object_alloc_free(zd, 0); 5757 zil_commit(zd->zd_zilog, 0); 5758 } 5759 5760 txg_wait_synced(spa_get_dsl(spa), 0); 5761 5762 /* 5763 * Freeze the pool. This stops spa_sync() from doing anything, 5764 * so that the only way to record changes from now on is the ZIL. 5765 */ 5766 spa_freeze(spa); 5767 5768 /* 5769 * Run tests that generate log records but don't alter the pool config 5770 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 5771 * We do a txg_wait_synced() after each iteration to force the txg 5772 * to increase well beyond the last synced value in the uberblock. 5773 * The ZIL should be OK with that. 5774 */ 5775 while (ztest_random(10) != 0 && 5776 numloops++ < ztest_opts.zo_maxloops) { 5777 ztest_dmu_write_parallel(zd, 0); 5778 ztest_dmu_object_alloc_free(zd, 0); 5779 txg_wait_synced(spa_get_dsl(spa), 0); 5780 } 5781 5782 /* 5783 * Commit all of the changes we just generated. 5784 */ 5785 zil_commit(zd->zd_zilog, 0); 5786 txg_wait_synced(spa_get_dsl(spa), 0); 5787 5788 /* 5789 * Close our dataset and close the pool. 5790 */ 5791 ztest_dataset_close(0); 5792 spa_close(spa, FTAG); 5793 kernel_fini(); 5794 5795 /* 5796 * Open and close the pool and dataset to induce log replay. 5797 */ 5798 kernel_init(FREAD | FWRITE); 5799 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5800 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 5801 VERIFY3U(0, ==, ztest_dataset_open(0)); 5802 ztest_dataset_close(0); 5803 5804 spa->spa_debug = B_TRUE; 5805 ztest_spa = spa; 5806 txg_wait_synced(spa_get_dsl(spa), 0); 5807 ztest_reguid(NULL, 0); 5808 5809 spa_close(spa, FTAG); 5810 kernel_fini(); 5811} 5812 5813void 5814print_time(hrtime_t t, char *timebuf) 5815{ 5816 hrtime_t s = t / NANOSEC; 5817 hrtime_t m = s / 60; 5818 hrtime_t h = m / 60; 5819 hrtime_t d = h / 24; 5820 5821 s -= m * 60; 5822 m -= h * 60; 5823 h -= d * 24; 5824 5825 timebuf[0] = '\0'; 5826 5827 if (d) 5828 (void) sprintf(timebuf, 5829 "%llud%02lluh%02llum%02llus", d, h, m, s); 5830 else if (h) 5831 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 5832 else if (m) 5833 (void) sprintf(timebuf, "%llum%02llus", m, s); 5834 else 5835 (void) sprintf(timebuf, "%llus", s); 5836} 5837 5838static nvlist_t * 5839make_random_props() 5840{ 5841 nvlist_t *props; 5842 5843 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 5844 if (ztest_random(2) == 0) 5845 return (props); 5846 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 5847 5848 return (props); 5849} 5850 5851/* 5852 * Create a storage pool with the given name and initial vdev size. 5853 * Then test spa_freeze() functionality. 5854 */ 5855static void 5856ztest_init(ztest_shared_t *zs) 5857{ 5858 spa_t *spa; 5859 nvlist_t *nvroot, *props; 5860 5861 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5862 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5863 5864 kernel_init(FREAD | FWRITE); 5865 5866 /* 5867 * Create the storage pool. 5868 */ 5869 (void) spa_destroy(ztest_opts.zo_pool); 5870 ztest_shared->zs_vdev_next_leaf = 0; 5871 zs->zs_splits = 0; 5872 zs->zs_mirrors = ztest_opts.zo_mirrors; 5873 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 5874 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 5875 props = make_random_props(); 5876 for (int i = 0; i < SPA_FEATURES; i++) { 5877 char buf[1024]; 5878 (void) snprintf(buf, sizeof (buf), "feature@%s", 5879 spa_feature_table[i].fi_uname); 5880 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 5881 } 5882 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL)); 5883 nvlist_free(nvroot); 5884 5885 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5886 zs->zs_metaslab_sz = 5887 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 5888 5889 spa_close(spa, FTAG); 5890 5891 kernel_fini(); 5892 5893 ztest_run_zdb(ztest_opts.zo_pool); 5894 5895 ztest_freeze(); 5896 5897 ztest_run_zdb(ztest_opts.zo_pool); 5898 5899 (void) rwlock_destroy(&ztest_name_lock); 5900 (void) _mutex_destroy(&ztest_vdev_lock); 5901} 5902 5903static void 5904setup_data_fd(void) 5905{ 5906 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX"; 5907 5908 ztest_fd_data = mkstemp(ztest_name_data); 5909 ASSERT3S(ztest_fd_data, >=, 0); 5910 (void) unlink(ztest_name_data); 5911} 5912 5913 5914static int 5915shared_data_size(ztest_shared_hdr_t *hdr) 5916{ 5917 int size; 5918 5919 size = hdr->zh_hdr_size; 5920 size += hdr->zh_opts_size; 5921 size += hdr->zh_size; 5922 size += hdr->zh_stats_size * hdr->zh_stats_count; 5923 size += hdr->zh_ds_size * hdr->zh_ds_count; 5924 5925 return (size); 5926} 5927 5928static void 5929setup_hdr(void) 5930{ 5931 int size; 5932 ztest_shared_hdr_t *hdr; 5933 5934 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5935 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5936 ASSERT(hdr != MAP_FAILED); 5937 5938 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t))); 5939 5940 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 5941 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 5942 hdr->zh_size = sizeof (ztest_shared_t); 5943 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 5944 hdr->zh_stats_count = ZTEST_FUNCS; 5945 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 5946 hdr->zh_ds_count = ztest_opts.zo_datasets; 5947 5948 size = shared_data_size(hdr); 5949 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size)); 5950 5951 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5952} 5953 5954static void 5955setup_data(void) 5956{ 5957 int size, offset; 5958 ztest_shared_hdr_t *hdr; 5959 uint8_t *buf; 5960 5961 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5962 PROT_READ, MAP_SHARED, ztest_fd_data, 0); 5963 ASSERT(hdr != MAP_FAILED); 5964 5965 size = shared_data_size(hdr); 5966 5967 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5968 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 5969 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5970 ASSERT(hdr != MAP_FAILED); 5971 buf = (uint8_t *)hdr; 5972 5973 offset = hdr->zh_hdr_size; 5974 ztest_shared_opts = (void *)&buf[offset]; 5975 offset += hdr->zh_opts_size; 5976 ztest_shared = (void *)&buf[offset]; 5977 offset += hdr->zh_size; 5978 ztest_shared_callstate = (void *)&buf[offset]; 5979 offset += hdr->zh_stats_size * hdr->zh_stats_count; 5980 ztest_shared_ds = (void *)&buf[offset]; 5981} 5982 5983static boolean_t 5984exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 5985{ 5986 pid_t pid; 5987 int status; 5988 char *cmdbuf = NULL; 5989 5990 pid = fork(); 5991 5992 if (cmd == NULL) { 5993 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 5994 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN); 5995 cmd = cmdbuf; 5996 } 5997 5998 if (pid == -1) 5999 fatal(1, "fork failed"); 6000 6001 if (pid == 0) { /* child */ 6002 char *emptyargv[2] = { cmd, NULL }; 6003 char fd_data_str[12]; 6004 6005 struct rlimit rl = { 1024, 1024 }; 6006 (void) setrlimit(RLIMIT_NOFILE, &rl); 6007 6008 (void) close(ztest_fd_rand); 6009 VERIFY3U(11, >=, 6010 snprintf(fd_data_str, 12, "%d", ztest_fd_data)); 6011 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1)); 6012 6013 (void) enable_extended_FILE_stdio(-1, -1); 6014 if (libpath != NULL) 6015 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 6016#ifdef illumos 6017 (void) execv(cmd, emptyargv); 6018#else 6019 (void) execvp(cmd, emptyargv); 6020#endif 6021 ztest_dump_core = B_FALSE; 6022 fatal(B_TRUE, "exec failed: %s", cmd); 6023 } 6024 6025 if (cmdbuf != NULL) { 6026 umem_free(cmdbuf, MAXPATHLEN); 6027 cmd = NULL; 6028 } 6029 6030 while (waitpid(pid, &status, 0) != pid) 6031 continue; 6032 if (statusp != NULL) 6033 *statusp = status; 6034 6035 if (WIFEXITED(status)) { 6036 if (WEXITSTATUS(status) != 0) { 6037 (void) fprintf(stderr, "child exited with code %d\n", 6038 WEXITSTATUS(status)); 6039 exit(2); 6040 } 6041 return (B_FALSE); 6042 } else if (WIFSIGNALED(status)) { 6043 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 6044 (void) fprintf(stderr, "child died with signal %d\n", 6045 WTERMSIG(status)); 6046 exit(3); 6047 } 6048 return (B_TRUE); 6049 } else { 6050 (void) fprintf(stderr, "something strange happened to child\n"); 6051 exit(4); 6052 /* NOTREACHED */ 6053 } 6054} 6055 6056static void 6057ztest_run_init(void) 6058{ 6059 ztest_shared_t *zs = ztest_shared; 6060 6061 ASSERT(ztest_opts.zo_init != 0); 6062 6063 /* 6064 * Blow away any existing copy of zpool.cache 6065 */ 6066 (void) remove(spa_config_path); 6067 6068 /* 6069 * Create and initialize our storage pool. 6070 */ 6071 for (int i = 1; i <= ztest_opts.zo_init; i++) { 6072 bzero(zs, sizeof (ztest_shared_t)); 6073 if (ztest_opts.zo_verbose >= 3 && 6074 ztest_opts.zo_init != 1) { 6075 (void) printf("ztest_init(), pass %d\n", i); 6076 } 6077 ztest_init(zs); 6078 } 6079} 6080 6081int 6082main(int argc, char **argv) 6083{ 6084 int kills = 0; 6085 int iters = 0; 6086 int older = 0; 6087 int newer = 0; 6088 ztest_shared_t *zs; 6089 ztest_info_t *zi; 6090 ztest_shared_callstate_t *zc; 6091 char timebuf[100]; 6092 char numbuf[6]; 6093 spa_t *spa; 6094 char *cmd; 6095 boolean_t hasalt; 6096 char *fd_data_str = getenv("ZTEST_FD_DATA"); 6097 6098 (void) setvbuf(stdout, NULL, _IOLBF, 0); 6099 6100 dprintf_setup(&argc, argv); 6101 zfs_deadman_synctime_ms = 300000; 6102 6103 ztest_fd_rand = open("/dev/urandom", O_RDONLY); 6104 ASSERT3S(ztest_fd_rand, >=, 0); 6105 6106 if (!fd_data_str) { 6107 process_options(argc, argv); 6108 6109 setup_data_fd(); 6110 setup_hdr(); 6111 setup_data(); 6112 bcopy(&ztest_opts, ztest_shared_opts, 6113 sizeof (*ztest_shared_opts)); 6114 } else { 6115 ztest_fd_data = atoi(fd_data_str); 6116 setup_data(); 6117 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 6118 } 6119 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 6120 6121 /* Override location of zpool.cache */ 6122 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache", 6123 ztest_opts.zo_dir), !=, -1); 6124 6125 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 6126 UMEM_NOFAIL); 6127 zs = ztest_shared; 6128 6129 if (fd_data_str) { 6130 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang; 6131 metaslab_df_alloc_threshold = 6132 zs->zs_metaslab_df_alloc_threshold; 6133 6134 if (zs->zs_do_init) 6135 ztest_run_init(); 6136 else 6137 ztest_run(zs); 6138 exit(0); 6139 } 6140 6141 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 6142 6143 if (ztest_opts.zo_verbose >= 1) { 6144 (void) printf("%llu vdevs, %d datasets, %d threads," 6145 " %llu seconds...\n", 6146 (u_longlong_t)ztest_opts.zo_vdevs, 6147 ztest_opts.zo_datasets, 6148 ztest_opts.zo_threads, 6149 (u_longlong_t)ztest_opts.zo_time); 6150 } 6151 6152 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); 6153 (void) strlcpy(cmd, getexecname(), MAXNAMELEN); 6154 6155 zs->zs_do_init = B_TRUE; 6156 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 6157 if (ztest_opts.zo_verbose >= 1) { 6158 (void) printf("Executing older ztest for " 6159 "initialization: %s\n", ztest_opts.zo_alt_ztest); 6160 } 6161 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 6162 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 6163 } else { 6164 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 6165 } 6166 zs->zs_do_init = B_FALSE; 6167 6168 zs->zs_proc_start = gethrtime(); 6169 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 6170 6171 for (int f = 0; f < ZTEST_FUNCS; f++) { 6172 zi = &ztest_info[f]; 6173 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6174 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 6175 zc->zc_next = UINT64_MAX; 6176 else 6177 zc->zc_next = zs->zs_proc_start + 6178 ztest_random(2 * zi->zi_interval[0] + 1); 6179 } 6180 6181 /* 6182 * Run the tests in a loop. These tests include fault injection 6183 * to verify that self-healing data works, and forced crashes 6184 * to verify that we never lose on-disk consistency. 6185 */ 6186 while (gethrtime() < zs->zs_proc_stop) { 6187 int status; 6188 boolean_t killed; 6189 6190 /* 6191 * Initialize the workload counters for each function. 6192 */ 6193 for (int f = 0; f < ZTEST_FUNCS; f++) { 6194 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6195 zc->zc_count = 0; 6196 zc->zc_time = 0; 6197 } 6198 6199 /* Set the allocation switch size */ 6200 zs->zs_metaslab_df_alloc_threshold = 6201 ztest_random(zs->zs_metaslab_sz / 4) + 1; 6202 6203 if (!hasalt || ztest_random(2) == 0) { 6204 if (hasalt && ztest_opts.zo_verbose >= 1) { 6205 (void) printf("Executing newer ztest: %s\n", 6206 cmd); 6207 } 6208 newer++; 6209 killed = exec_child(cmd, NULL, B_TRUE, &status); 6210 } else { 6211 if (hasalt && ztest_opts.zo_verbose >= 1) { 6212 (void) printf("Executing older ztest: %s\n", 6213 ztest_opts.zo_alt_ztest); 6214 } 6215 older++; 6216 killed = exec_child(ztest_opts.zo_alt_ztest, 6217 ztest_opts.zo_alt_libpath, B_TRUE, &status); 6218 } 6219 6220 if (killed) 6221 kills++; 6222 iters++; 6223 6224 if (ztest_opts.zo_verbose >= 1) { 6225 hrtime_t now = gethrtime(); 6226 6227 now = MIN(now, zs->zs_proc_stop); 6228 print_time(zs->zs_proc_stop - now, timebuf); 6229 nicenum(zs->zs_space, numbuf); 6230 6231 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 6232 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 6233 iters, 6234 WIFEXITED(status) ? "Complete" : "SIGKILL", 6235 (u_longlong_t)zs->zs_enospc_count, 6236 100.0 * zs->zs_alloc / zs->zs_space, 6237 numbuf, 6238 100.0 * (now - zs->zs_proc_start) / 6239 (ztest_opts.zo_time * NANOSEC), timebuf); 6240 } 6241 6242 if (ztest_opts.zo_verbose >= 2) { 6243 (void) printf("\nWorkload summary:\n\n"); 6244 (void) printf("%7s %9s %s\n", 6245 "Calls", "Time", "Function"); 6246 (void) printf("%7s %9s %s\n", 6247 "-----", "----", "--------"); 6248 for (int f = 0; f < ZTEST_FUNCS; f++) { 6249 Dl_info dli; 6250 6251 zi = &ztest_info[f]; 6252 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6253 print_time(zc->zc_time, timebuf); 6254 (void) dladdr((void *)zi->zi_func, &dli); 6255 (void) printf("%7llu %9s %s\n", 6256 (u_longlong_t)zc->zc_count, timebuf, 6257 dli.dli_sname); 6258 } 6259 (void) printf("\n"); 6260 } 6261 6262 /* 6263 * It's possible that we killed a child during a rename test, 6264 * in which case we'll have a 'ztest_tmp' pool lying around 6265 * instead of 'ztest'. Do a blind rename in case this happened. 6266 */ 6267 kernel_init(FREAD); 6268 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 6269 spa_close(spa, FTAG); 6270 } else { 6271 char tmpname[MAXNAMELEN]; 6272 kernel_fini(); 6273 kernel_init(FREAD | FWRITE); 6274 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 6275 ztest_opts.zo_pool); 6276 (void) spa_rename(tmpname, ztest_opts.zo_pool); 6277 } 6278 kernel_fini(); 6279 6280 ztest_run_zdb(ztest_opts.zo_pool); 6281 } 6282 6283 if (ztest_opts.zo_verbose >= 1) { 6284 if (hasalt) { 6285 (void) printf("%d runs of older ztest: %s\n", older, 6286 ztest_opts.zo_alt_ztest); 6287 (void) printf("%d runs of newer ztest: %s\n", newer, 6288 cmd); 6289 } 6290 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6291 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6292 } 6293 6294 umem_free(cmd, MAXNAMELEN); 6295 6296 return (0); 6297} 6298