ztest.c revision 268656
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2013 Steven Hartland. All rights reserved. 27 */ 28 29/* 30 * The objective of this program is to provide a DMU/ZAP/SPA stress test 31 * that runs entirely in userland, is easy to use, and easy to extend. 32 * 33 * The overall design of the ztest program is as follows: 34 * 35 * (1) For each major functional area (e.g. adding vdevs to a pool, 36 * creating and destroying datasets, reading and writing objects, etc) 37 * we have a simple routine to test that functionality. These 38 * individual routines do not have to do anything "stressful". 39 * 40 * (2) We turn these simple functionality tests into a stress test by 41 * running them all in parallel, with as many threads as desired, 42 * and spread across as many datasets, objects, and vdevs as desired. 43 * 44 * (3) While all this is happening, we inject faults into the pool to 45 * verify that self-healing data really works. 46 * 47 * (4) Every time we open a dataset, we change its checksum and compression 48 * functions. Thus even individual objects vary from block to block 49 * in which checksum they use and whether they're compressed. 50 * 51 * (5) To verify that we never lose on-disk consistency after a crash, 52 * we run the entire test in a child of the main process. 53 * At random times, the child self-immolates with a SIGKILL. 54 * This is the software equivalent of pulling the power cord. 55 * The parent then runs the test again, using the existing 56 * storage pool, as many times as desired. If backwards compatibility 57 * testing is enabled ztest will sometimes run the "older" version 58 * of ztest after a SIGKILL. 59 * 60 * (6) To verify that we don't have future leaks or temporal incursions, 61 * many of the functional tests record the transaction group number 62 * as part of their data. When reading old data, they verify that 63 * the transaction group number is less than the current, open txg. 64 * If you add a new test, please do this if applicable. 65 * 66 * When run with no arguments, ztest runs for about five minutes and 67 * produces no output if successful. To get a little bit of information, 68 * specify -V. To get more information, specify -VV, and so on. 69 * 70 * To turn this into an overnight stress test, use -T to specify run time. 71 * 72 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 73 * to increase the pool capacity, fanout, and overall stress level. 74 * 75 * Use the -k option to set the desired frequency of kills. 76 * 77 * When ztest invokes itself it passes all relevant information through a 78 * temporary file which is mmap-ed in the child process. This allows shared 79 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 80 * stored at offset 0 of this file and contains information on the size and 81 * number of shared structures in the file. The information stored in this file 82 * must remain backwards compatible with older versions of ztest so that 83 * ztest can invoke them during backwards compatibility testing (-B). 84 */ 85 86#include <sys/zfs_context.h> 87#include <sys/spa.h> 88#include <sys/dmu.h> 89#include <sys/txg.h> 90#include <sys/dbuf.h> 91#include <sys/zap.h> 92#include <sys/dmu_objset.h> 93#include <sys/poll.h> 94#include <sys/stat.h> 95#include <sys/time.h> 96#include <sys/wait.h> 97#include <sys/mman.h> 98#include <sys/resource.h> 99#include <sys/zio.h> 100#include <sys/zil.h> 101#include <sys/zil_impl.h> 102#include <sys/vdev_impl.h> 103#include <sys/vdev_file.h> 104#include <sys/spa_impl.h> 105#include <sys/metaslab_impl.h> 106#include <sys/dsl_prop.h> 107#include <sys/dsl_dataset.h> 108#include <sys/dsl_destroy.h> 109#include <sys/dsl_scan.h> 110#include <sys/zio_checksum.h> 111#include <sys/refcount.h> 112#include <sys/zfeature.h> 113#include <sys/dsl_userhold.h> 114#include <stdio.h> 115#include <stdio_ext.h> 116#include <stdlib.h> 117#include <unistd.h> 118#include <signal.h> 119#include <umem.h> 120#include <dlfcn.h> 121#include <ctype.h> 122#include <math.h> 123#include <errno.h> 124#include <sys/fs/zfs.h> 125#include <libnvpair.h> 126 127static int ztest_fd_data = -1; 128static int ztest_fd_rand = -1; 129 130typedef struct ztest_shared_hdr { 131 uint64_t zh_hdr_size; 132 uint64_t zh_opts_size; 133 uint64_t zh_size; 134 uint64_t zh_stats_size; 135 uint64_t zh_stats_count; 136 uint64_t zh_ds_size; 137 uint64_t zh_ds_count; 138} ztest_shared_hdr_t; 139 140static ztest_shared_hdr_t *ztest_shared_hdr; 141 142typedef struct ztest_shared_opts { 143 char zo_pool[MAXNAMELEN]; 144 char zo_dir[MAXNAMELEN]; 145 char zo_alt_ztest[MAXNAMELEN]; 146 char zo_alt_libpath[MAXNAMELEN]; 147 uint64_t zo_vdevs; 148 uint64_t zo_vdevtime; 149 size_t zo_vdev_size; 150 int zo_ashift; 151 int zo_mirrors; 152 int zo_raidz; 153 int zo_raidz_parity; 154 int zo_datasets; 155 int zo_threads; 156 uint64_t zo_passtime; 157 uint64_t zo_killrate; 158 int zo_verbose; 159 int zo_init; 160 uint64_t zo_time; 161 uint64_t zo_maxloops; 162 uint64_t zo_metaslab_gang_bang; 163} ztest_shared_opts_t; 164 165static const ztest_shared_opts_t ztest_opts_defaults = { 166 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 167 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 168 .zo_alt_ztest = { '\0' }, 169 .zo_alt_libpath = { '\0' }, 170 .zo_vdevs = 5, 171 .zo_ashift = SPA_MINBLOCKSHIFT, 172 .zo_mirrors = 2, 173 .zo_raidz = 4, 174 .zo_raidz_parity = 1, 175 .zo_vdev_size = SPA_MINDEVSIZE, 176 .zo_datasets = 7, 177 .zo_threads = 23, 178 .zo_passtime = 60, /* 60 seconds */ 179 .zo_killrate = 70, /* 70% kill rate */ 180 .zo_verbose = 0, 181 .zo_init = 1, 182 .zo_time = 300, /* 5 minutes */ 183 .zo_maxloops = 50, /* max loops during spa_freeze() */ 184 .zo_metaslab_gang_bang = 32 << 10 185}; 186 187extern uint64_t metaslab_gang_bang; 188extern uint64_t metaslab_df_alloc_threshold; 189extern uint64_t zfs_deadman_synctime_ms; 190extern int metaslab_preload_limit; 191 192static ztest_shared_opts_t *ztest_shared_opts; 193static ztest_shared_opts_t ztest_opts; 194 195typedef struct ztest_shared_ds { 196 uint64_t zd_seq; 197} ztest_shared_ds_t; 198 199static ztest_shared_ds_t *ztest_shared_ds; 200#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 201 202#define BT_MAGIC 0x123456789abcdefULL 203#define MAXFAULTS() \ 204 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 205 206enum ztest_io_type { 207 ZTEST_IO_WRITE_TAG, 208 ZTEST_IO_WRITE_PATTERN, 209 ZTEST_IO_WRITE_ZEROES, 210 ZTEST_IO_TRUNCATE, 211 ZTEST_IO_SETATTR, 212 ZTEST_IO_REWRITE, 213 ZTEST_IO_TYPES 214}; 215 216typedef struct ztest_block_tag { 217 uint64_t bt_magic; 218 uint64_t bt_objset; 219 uint64_t bt_object; 220 uint64_t bt_offset; 221 uint64_t bt_gen; 222 uint64_t bt_txg; 223 uint64_t bt_crtxg; 224} ztest_block_tag_t; 225 226typedef struct bufwad { 227 uint64_t bw_index; 228 uint64_t bw_txg; 229 uint64_t bw_data; 230} bufwad_t; 231 232/* 233 * XXX -- fix zfs range locks to be generic so we can use them here. 234 */ 235typedef enum { 236 RL_READER, 237 RL_WRITER, 238 RL_APPEND 239} rl_type_t; 240 241typedef struct rll { 242 void *rll_writer; 243 int rll_readers; 244 mutex_t rll_lock; 245 cond_t rll_cv; 246} rll_t; 247 248typedef struct rl { 249 uint64_t rl_object; 250 uint64_t rl_offset; 251 uint64_t rl_size; 252 rll_t *rl_lock; 253} rl_t; 254 255#define ZTEST_RANGE_LOCKS 64 256#define ZTEST_OBJECT_LOCKS 64 257 258/* 259 * Object descriptor. Used as a template for object lookup/create/remove. 260 */ 261typedef struct ztest_od { 262 uint64_t od_dir; 263 uint64_t od_object; 264 dmu_object_type_t od_type; 265 dmu_object_type_t od_crtype; 266 uint64_t od_blocksize; 267 uint64_t od_crblocksize; 268 uint64_t od_gen; 269 uint64_t od_crgen; 270 char od_name[MAXNAMELEN]; 271} ztest_od_t; 272 273/* 274 * Per-dataset state. 275 */ 276typedef struct ztest_ds { 277 ztest_shared_ds_t *zd_shared; 278 objset_t *zd_os; 279 rwlock_t zd_zilog_lock; 280 zilog_t *zd_zilog; 281 ztest_od_t *zd_od; /* debugging aid */ 282 char zd_name[MAXNAMELEN]; 283 mutex_t zd_dirobj_lock; 284 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 285 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 286} ztest_ds_t; 287 288/* 289 * Per-iteration state. 290 */ 291typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 292 293typedef struct ztest_info { 294 ztest_func_t *zi_func; /* test function */ 295 uint64_t zi_iters; /* iterations per execution */ 296 uint64_t *zi_interval; /* execute every <interval> seconds */ 297} ztest_info_t; 298 299typedef struct ztest_shared_callstate { 300 uint64_t zc_count; /* per-pass count */ 301 uint64_t zc_time; /* per-pass time */ 302 uint64_t zc_next; /* next time to call this function */ 303} ztest_shared_callstate_t; 304 305static ztest_shared_callstate_t *ztest_shared_callstate; 306#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 307 308/* 309 * Note: these aren't static because we want dladdr() to work. 310 */ 311ztest_func_t ztest_dmu_read_write; 312ztest_func_t ztest_dmu_write_parallel; 313ztest_func_t ztest_dmu_object_alloc_free; 314ztest_func_t ztest_dmu_commit_callbacks; 315ztest_func_t ztest_zap; 316ztest_func_t ztest_zap_parallel; 317ztest_func_t ztest_zil_commit; 318ztest_func_t ztest_zil_remount; 319ztest_func_t ztest_dmu_read_write_zcopy; 320ztest_func_t ztest_dmu_objset_create_destroy; 321ztest_func_t ztest_dmu_prealloc; 322ztest_func_t ztest_fzap; 323ztest_func_t ztest_dmu_snapshot_create_destroy; 324ztest_func_t ztest_dsl_prop_get_set; 325ztest_func_t ztest_spa_prop_get_set; 326ztest_func_t ztest_spa_create_destroy; 327ztest_func_t ztest_fault_inject; 328ztest_func_t ztest_ddt_repair; 329ztest_func_t ztest_dmu_snapshot_hold; 330ztest_func_t ztest_spa_rename; 331ztest_func_t ztest_scrub; 332ztest_func_t ztest_dsl_dataset_promote_busy; 333ztest_func_t ztest_vdev_attach_detach; 334ztest_func_t ztest_vdev_LUN_growth; 335ztest_func_t ztest_vdev_add_remove; 336ztest_func_t ztest_vdev_aux_add_remove; 337ztest_func_t ztest_split_pool; 338ztest_func_t ztest_reguid; 339ztest_func_t ztest_spa_upgrade; 340 341uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 342uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 343uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 344uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 345uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 346 347ztest_info_t ztest_info[] = { 348 { ztest_dmu_read_write, 1, &zopt_always }, 349 { ztest_dmu_write_parallel, 10, &zopt_always }, 350 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 351 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 352 { ztest_zap, 30, &zopt_always }, 353 { ztest_zap_parallel, 100, &zopt_always }, 354 { ztest_split_pool, 1, &zopt_always }, 355 { ztest_zil_commit, 1, &zopt_incessant }, 356 { ztest_zil_remount, 1, &zopt_sometimes }, 357 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 358 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 359 { ztest_dsl_prop_get_set, 1, &zopt_often }, 360 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 361#if 0 362 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 363#endif 364 { ztest_fzap, 1, &zopt_sometimes }, 365 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 366 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 367 { ztest_fault_inject, 1, &zopt_sometimes }, 368 { ztest_ddt_repair, 1, &zopt_sometimes }, 369 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 370 { ztest_reguid, 1, &zopt_rarely }, 371 { ztest_spa_rename, 1, &zopt_rarely }, 372 { ztest_scrub, 1, &zopt_rarely }, 373 { ztest_spa_upgrade, 1, &zopt_rarely }, 374 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 375 { ztest_vdev_attach_detach, 1, &zopt_sometimes }, 376 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 377 { ztest_vdev_add_remove, 1, 378 &ztest_opts.zo_vdevtime }, 379 { ztest_vdev_aux_add_remove, 1, 380 &ztest_opts.zo_vdevtime }, 381}; 382 383#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 384 385/* 386 * The following struct is used to hold a list of uncalled commit callbacks. 387 * The callbacks are ordered by txg number. 388 */ 389typedef struct ztest_cb_list { 390 mutex_t zcl_callbacks_lock; 391 list_t zcl_callbacks; 392} ztest_cb_list_t; 393 394/* 395 * Stuff we need to share writably between parent and child. 396 */ 397typedef struct ztest_shared { 398 boolean_t zs_do_init; 399 hrtime_t zs_proc_start; 400 hrtime_t zs_proc_stop; 401 hrtime_t zs_thread_start; 402 hrtime_t zs_thread_stop; 403 hrtime_t zs_thread_kill; 404 uint64_t zs_enospc_count; 405 uint64_t zs_vdev_next_leaf; 406 uint64_t zs_vdev_aux; 407 uint64_t zs_alloc; 408 uint64_t zs_space; 409 uint64_t zs_splits; 410 uint64_t zs_mirrors; 411 uint64_t zs_metaslab_sz; 412 uint64_t zs_metaslab_df_alloc_threshold; 413 uint64_t zs_guid; 414} ztest_shared_t; 415 416#define ID_PARALLEL -1ULL 417 418static char ztest_dev_template[] = "%s/%s.%llua"; 419static char ztest_aux_template[] = "%s/%s.%s.%llu"; 420ztest_shared_t *ztest_shared; 421 422static spa_t *ztest_spa = NULL; 423static ztest_ds_t *ztest_ds; 424 425static mutex_t ztest_vdev_lock; 426 427/* 428 * The ztest_name_lock protects the pool and dataset namespace used by 429 * the individual tests. To modify the namespace, consumers must grab 430 * this lock as writer. Grabbing the lock as reader will ensure that the 431 * namespace does not change while the lock is held. 432 */ 433static rwlock_t ztest_name_lock; 434 435static boolean_t ztest_dump_core = B_TRUE; 436static boolean_t ztest_exiting; 437 438/* Global commit callback list */ 439static ztest_cb_list_t zcl; 440 441enum ztest_object { 442 ZTEST_META_DNODE = 0, 443 ZTEST_DIROBJ, 444 ZTEST_OBJECTS 445}; 446 447static void usage(boolean_t) __NORETURN; 448 449/* 450 * These libumem hooks provide a reasonable set of defaults for the allocator's 451 * debugging facilities. 452 */ 453const char * 454_umem_debug_init() 455{ 456 return ("default,verbose"); /* $UMEM_DEBUG setting */ 457} 458 459const char * 460_umem_logging_init(void) 461{ 462 return ("fail,contents"); /* $UMEM_LOGGING setting */ 463} 464 465#define FATAL_MSG_SZ 1024 466 467char *fatal_msg; 468 469static void 470fatal(int do_perror, char *message, ...) 471{ 472 va_list args; 473 int save_errno = errno; 474 char buf[FATAL_MSG_SZ]; 475 476 (void) fflush(stdout); 477 478 va_start(args, message); 479 (void) sprintf(buf, "ztest: "); 480 /* LINTED */ 481 (void) vsprintf(buf + strlen(buf), message, args); 482 va_end(args); 483 if (do_perror) { 484 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 485 ": %s", strerror(save_errno)); 486 } 487 (void) fprintf(stderr, "%s\n", buf); 488 fatal_msg = buf; /* to ease debugging */ 489 if (ztest_dump_core) 490 abort(); 491 exit(3); 492} 493 494static int 495str2shift(const char *buf) 496{ 497 const char *ends = "BKMGTPEZ"; 498 int i; 499 500 if (buf[0] == '\0') 501 return (0); 502 for (i = 0; i < strlen(ends); i++) { 503 if (toupper(buf[0]) == ends[i]) 504 break; 505 } 506 if (i == strlen(ends)) { 507 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 508 buf); 509 usage(B_FALSE); 510 } 511 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 512 return (10*i); 513 } 514 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 515 usage(B_FALSE); 516 /* NOTREACHED */ 517} 518 519static uint64_t 520nicenumtoull(const char *buf) 521{ 522 char *end; 523 uint64_t val; 524 525 val = strtoull(buf, &end, 0); 526 if (end == buf) { 527 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 528 usage(B_FALSE); 529 } else if (end[0] == '.') { 530 double fval = strtod(buf, &end); 531 fval *= pow(2, str2shift(end)); 532 if (fval > UINT64_MAX) { 533 (void) fprintf(stderr, "ztest: value too large: %s\n", 534 buf); 535 usage(B_FALSE); 536 } 537 val = (uint64_t)fval; 538 } else { 539 int shift = str2shift(end); 540 if (shift >= 64 || (val << shift) >> shift != val) { 541 (void) fprintf(stderr, "ztest: value too large: %s\n", 542 buf); 543 usage(B_FALSE); 544 } 545 val <<= shift; 546 } 547 return (val); 548} 549 550static void 551usage(boolean_t requested) 552{ 553 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 554 555 char nice_vdev_size[10]; 556 char nice_gang_bang[10]; 557 FILE *fp = requested ? stdout : stderr; 558 559 nicenum(zo->zo_vdev_size, nice_vdev_size); 560 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang); 561 562 (void) fprintf(fp, "Usage: %s\n" 563 "\t[-v vdevs (default: %llu)]\n" 564 "\t[-s size_of_each_vdev (default: %s)]\n" 565 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 566 "\t[-m mirror_copies (default: %d)]\n" 567 "\t[-r raidz_disks (default: %d)]\n" 568 "\t[-R raidz_parity (default: %d)]\n" 569 "\t[-d datasets (default: %d)]\n" 570 "\t[-t threads (default: %d)]\n" 571 "\t[-g gang_block_threshold (default: %s)]\n" 572 "\t[-i init_count (default: %d)] initialize pool i times\n" 573 "\t[-k kill_percentage (default: %llu%%)]\n" 574 "\t[-p pool_name (default: %s)]\n" 575 "\t[-f dir (default: %s)] file directory for vdev files\n" 576 "\t[-V] verbose (use multiple times for ever more blather)\n" 577 "\t[-E] use existing pool instead of creating new one\n" 578 "\t[-T time (default: %llu sec)] total run time\n" 579 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 580 "\t[-P passtime (default: %llu sec)] time per pass\n" 581 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 582 "\t[-h] (print help)\n" 583 "", 584 zo->zo_pool, 585 (u_longlong_t)zo->zo_vdevs, /* -v */ 586 nice_vdev_size, /* -s */ 587 zo->zo_ashift, /* -a */ 588 zo->zo_mirrors, /* -m */ 589 zo->zo_raidz, /* -r */ 590 zo->zo_raidz_parity, /* -R */ 591 zo->zo_datasets, /* -d */ 592 zo->zo_threads, /* -t */ 593 nice_gang_bang, /* -g */ 594 zo->zo_init, /* -i */ 595 (u_longlong_t)zo->zo_killrate, /* -k */ 596 zo->zo_pool, /* -p */ 597 zo->zo_dir, /* -f */ 598 (u_longlong_t)zo->zo_time, /* -T */ 599 (u_longlong_t)zo->zo_maxloops, /* -F */ 600 (u_longlong_t)zo->zo_passtime); 601 exit(requested ? 0 : 1); 602} 603 604static void 605process_options(int argc, char **argv) 606{ 607 char *path; 608 ztest_shared_opts_t *zo = &ztest_opts; 609 610 int opt; 611 uint64_t value; 612 char altdir[MAXNAMELEN] = { 0 }; 613 614 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 615 616 while ((opt = getopt(argc, argv, 617 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) { 618 value = 0; 619 switch (opt) { 620 case 'v': 621 case 's': 622 case 'a': 623 case 'm': 624 case 'r': 625 case 'R': 626 case 'd': 627 case 't': 628 case 'g': 629 case 'i': 630 case 'k': 631 case 'T': 632 case 'P': 633 case 'F': 634 value = nicenumtoull(optarg); 635 } 636 switch (opt) { 637 case 'v': 638 zo->zo_vdevs = value; 639 break; 640 case 's': 641 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 642 break; 643 case 'a': 644 zo->zo_ashift = value; 645 break; 646 case 'm': 647 zo->zo_mirrors = value; 648 break; 649 case 'r': 650 zo->zo_raidz = MAX(1, value); 651 break; 652 case 'R': 653 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 654 break; 655 case 'd': 656 zo->zo_datasets = MAX(1, value); 657 break; 658 case 't': 659 zo->zo_threads = MAX(1, value); 660 break; 661 case 'g': 662 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, 663 value); 664 break; 665 case 'i': 666 zo->zo_init = value; 667 break; 668 case 'k': 669 zo->zo_killrate = value; 670 break; 671 case 'p': 672 (void) strlcpy(zo->zo_pool, optarg, 673 sizeof (zo->zo_pool)); 674 break; 675 case 'f': 676 path = realpath(optarg, NULL); 677 if (path == NULL) { 678 (void) fprintf(stderr, "error: %s: %s\n", 679 optarg, strerror(errno)); 680 usage(B_FALSE); 681 } else { 682 (void) strlcpy(zo->zo_dir, path, 683 sizeof (zo->zo_dir)); 684 } 685 break; 686 case 'V': 687 zo->zo_verbose++; 688 break; 689 case 'E': 690 zo->zo_init = 0; 691 break; 692 case 'T': 693 zo->zo_time = value; 694 break; 695 case 'P': 696 zo->zo_passtime = MAX(1, value); 697 break; 698 case 'F': 699 zo->zo_maxloops = MAX(1, value); 700 break; 701 case 'B': 702 (void) strlcpy(altdir, optarg, sizeof (altdir)); 703 break; 704 case 'h': 705 usage(B_TRUE); 706 break; 707 case '?': 708 default: 709 usage(B_FALSE); 710 break; 711 } 712 } 713 714 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 715 716 zo->zo_vdevtime = 717 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 718 UINT64_MAX >> 2); 719 720 if (strlen(altdir) > 0) { 721 char *cmd; 722 char *realaltdir; 723 char *bin; 724 char *ztest; 725 char *isa; 726 int isalen; 727 728 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 729 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 730 731 VERIFY(NULL != realpath(getexecname(), cmd)); 732 if (0 != access(altdir, F_OK)) { 733 ztest_dump_core = B_FALSE; 734 fatal(B_TRUE, "invalid alternate ztest path: %s", 735 altdir); 736 } 737 VERIFY(NULL != realpath(altdir, realaltdir)); 738 739 /* 740 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 741 * We want to extract <isa> to determine if we should use 742 * 32 or 64 bit binaries. 743 */ 744 bin = strstr(cmd, "/usr/bin/"); 745 ztest = strstr(bin, "/ztest"); 746 isa = bin + 9; 747 isalen = ztest - isa; 748 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 749 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 750 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 751 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 752 753 if (0 != access(zo->zo_alt_ztest, X_OK)) { 754 ztest_dump_core = B_FALSE; 755 fatal(B_TRUE, "invalid alternate ztest: %s", 756 zo->zo_alt_ztest); 757 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 758 ztest_dump_core = B_FALSE; 759 fatal(B_TRUE, "invalid alternate lib directory %s", 760 zo->zo_alt_libpath); 761 } 762 763 umem_free(cmd, MAXPATHLEN); 764 umem_free(realaltdir, MAXPATHLEN); 765 } 766} 767 768static void 769ztest_kill(ztest_shared_t *zs) 770{ 771 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 772 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 773 774 /* 775 * Before we kill off ztest, make sure that the config is updated. 776 * See comment above spa_config_sync(). 777 */ 778 mutex_enter(&spa_namespace_lock); 779 spa_config_sync(ztest_spa, B_FALSE, B_FALSE); 780 mutex_exit(&spa_namespace_lock); 781 782 zfs_dbgmsg_print(FTAG); 783 (void) kill(getpid(), SIGKILL); 784} 785 786static uint64_t 787ztest_random(uint64_t range) 788{ 789 uint64_t r; 790 791 ASSERT3S(ztest_fd_rand, >=, 0); 792 793 if (range == 0) 794 return (0); 795 796 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r)) 797 fatal(1, "short read from /dev/urandom"); 798 799 return (r % range); 800} 801 802/* ARGSUSED */ 803static void 804ztest_record_enospc(const char *s) 805{ 806 ztest_shared->zs_enospc_count++; 807} 808 809static uint64_t 810ztest_get_ashift(void) 811{ 812 if (ztest_opts.zo_ashift == 0) 813 return (SPA_MINBLOCKSHIFT + ztest_random(3)); 814 return (ztest_opts.zo_ashift); 815} 816 817static nvlist_t * 818make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift) 819{ 820 char pathbuf[MAXPATHLEN]; 821 uint64_t vdev; 822 nvlist_t *file; 823 824 if (ashift == 0) 825 ashift = ztest_get_ashift(); 826 827 if (path == NULL) { 828 path = pathbuf; 829 830 if (aux != NULL) { 831 vdev = ztest_shared->zs_vdev_aux; 832 (void) snprintf(path, sizeof (pathbuf), 833 ztest_aux_template, ztest_opts.zo_dir, 834 pool == NULL ? ztest_opts.zo_pool : pool, 835 aux, vdev); 836 } else { 837 vdev = ztest_shared->zs_vdev_next_leaf++; 838 (void) snprintf(path, sizeof (pathbuf), 839 ztest_dev_template, ztest_opts.zo_dir, 840 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 841 } 842 } 843 844 if (size != 0) { 845 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 846 if (fd == -1) 847 fatal(1, "can't open %s", path); 848 if (ftruncate(fd, size) != 0) 849 fatal(1, "can't ftruncate %s", path); 850 (void) close(fd); 851 } 852 853 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 854 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 855 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 856 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 857 858 return (file); 859} 860 861static nvlist_t * 862make_vdev_raidz(char *path, char *aux, char *pool, size_t size, 863 uint64_t ashift, int r) 864{ 865 nvlist_t *raidz, **child; 866 int c; 867 868 if (r < 2) 869 return (make_vdev_file(path, aux, pool, size, ashift)); 870 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 871 872 for (c = 0; c < r; c++) 873 child[c] = make_vdev_file(path, aux, pool, size, ashift); 874 875 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 876 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 877 VDEV_TYPE_RAIDZ) == 0); 878 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 879 ztest_opts.zo_raidz_parity) == 0); 880 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 881 child, r) == 0); 882 883 for (c = 0; c < r; c++) 884 nvlist_free(child[c]); 885 886 umem_free(child, r * sizeof (nvlist_t *)); 887 888 return (raidz); 889} 890 891static nvlist_t * 892make_vdev_mirror(char *path, char *aux, char *pool, size_t size, 893 uint64_t ashift, int r, int m) 894{ 895 nvlist_t *mirror, **child; 896 int c; 897 898 if (m < 1) 899 return (make_vdev_raidz(path, aux, pool, size, ashift, r)); 900 901 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 902 903 for (c = 0; c < m; c++) 904 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r); 905 906 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 907 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 908 VDEV_TYPE_MIRROR) == 0); 909 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 910 child, m) == 0); 911 912 for (c = 0; c < m; c++) 913 nvlist_free(child[c]); 914 915 umem_free(child, m * sizeof (nvlist_t *)); 916 917 return (mirror); 918} 919 920static nvlist_t * 921make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift, 922 int log, int r, int m, int t) 923{ 924 nvlist_t *root, **child; 925 int c; 926 927 ASSERT(t > 0); 928 929 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 930 931 for (c = 0; c < t; c++) { 932 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 933 r, m); 934 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 935 log) == 0); 936 } 937 938 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 939 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 940 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 941 child, t) == 0); 942 943 for (c = 0; c < t; c++) 944 nvlist_free(child[c]); 945 946 umem_free(child, t * sizeof (nvlist_t *)); 947 948 return (root); 949} 950 951/* 952 * Find a random spa version. Returns back a random spa version in the 953 * range [initial_version, SPA_VERSION_FEATURES]. 954 */ 955static uint64_t 956ztest_random_spa_version(uint64_t initial_version) 957{ 958 uint64_t version = initial_version; 959 960 if (version <= SPA_VERSION_BEFORE_FEATURES) { 961 version = version + 962 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 963 } 964 965 if (version > SPA_VERSION_BEFORE_FEATURES) 966 version = SPA_VERSION_FEATURES; 967 968 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 969 return (version); 970} 971 972static int 973ztest_random_blocksize(void) 974{ 975 return (1 << (SPA_MINBLOCKSHIFT + 976 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1))); 977} 978 979static int 980ztest_random_ibshift(void) 981{ 982 return (DN_MIN_INDBLKSHIFT + 983 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 984} 985 986static uint64_t 987ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 988{ 989 uint64_t top; 990 vdev_t *rvd = spa->spa_root_vdev; 991 vdev_t *tvd; 992 993 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 994 995 do { 996 top = ztest_random(rvd->vdev_children); 997 tvd = rvd->vdev_child[top]; 998 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) || 999 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 1000 1001 return (top); 1002} 1003 1004static uint64_t 1005ztest_random_dsl_prop(zfs_prop_t prop) 1006{ 1007 uint64_t value; 1008 1009 do { 1010 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 1011 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 1012 1013 return (value); 1014} 1015 1016static int 1017ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 1018 boolean_t inherit) 1019{ 1020 const char *propname = zfs_prop_to_name(prop); 1021 const char *valname; 1022 char setpoint[MAXPATHLEN]; 1023 uint64_t curval; 1024 int error; 1025 1026 error = dsl_prop_set_int(osname, propname, 1027 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value); 1028 1029 if (error == ENOSPC) { 1030 ztest_record_enospc(FTAG); 1031 return (error); 1032 } 1033 ASSERT0(error); 1034 1035 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint)); 1036 1037 if (ztest_opts.zo_verbose >= 6) { 1038 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 1039 (void) printf("%s %s = %s at '%s'\n", 1040 osname, propname, valname, setpoint); 1041 } 1042 1043 return (error); 1044} 1045 1046static int 1047ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1048{ 1049 spa_t *spa = ztest_spa; 1050 nvlist_t *props = NULL; 1051 int error; 1052 1053 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 1054 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 1055 1056 error = spa_prop_set(spa, props); 1057 1058 nvlist_free(props); 1059 1060 if (error == ENOSPC) { 1061 ztest_record_enospc(FTAG); 1062 return (error); 1063 } 1064 ASSERT0(error); 1065 1066 return (error); 1067} 1068 1069static void 1070ztest_rll_init(rll_t *rll) 1071{ 1072 rll->rll_writer = NULL; 1073 rll->rll_readers = 0; 1074 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); 1075 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); 1076} 1077 1078static void 1079ztest_rll_destroy(rll_t *rll) 1080{ 1081 ASSERT(rll->rll_writer == NULL); 1082 ASSERT(rll->rll_readers == 0); 1083 VERIFY(_mutex_destroy(&rll->rll_lock) == 0); 1084 VERIFY(cond_destroy(&rll->rll_cv) == 0); 1085} 1086 1087static void 1088ztest_rll_lock(rll_t *rll, rl_type_t type) 1089{ 1090 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1091 1092 if (type == RL_READER) { 1093 while (rll->rll_writer != NULL) 1094 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1095 rll->rll_readers++; 1096 } else { 1097 while (rll->rll_writer != NULL || rll->rll_readers) 1098 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1099 rll->rll_writer = curthread; 1100 } 1101 1102 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1103} 1104 1105static void 1106ztest_rll_unlock(rll_t *rll) 1107{ 1108 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1109 1110 if (rll->rll_writer) { 1111 ASSERT(rll->rll_readers == 0); 1112 rll->rll_writer = NULL; 1113 } else { 1114 ASSERT(rll->rll_readers != 0); 1115 ASSERT(rll->rll_writer == NULL); 1116 rll->rll_readers--; 1117 } 1118 1119 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1120 VERIFY(cond_broadcast(&rll->rll_cv) == 0); 1121 1122 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1123} 1124 1125static void 1126ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1127{ 1128 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1129 1130 ztest_rll_lock(rll, type); 1131} 1132 1133static void 1134ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1135{ 1136 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1137 1138 ztest_rll_unlock(rll); 1139} 1140 1141static rl_t * 1142ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1143 uint64_t size, rl_type_t type) 1144{ 1145 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1146 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1147 rl_t *rl; 1148 1149 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1150 rl->rl_object = object; 1151 rl->rl_offset = offset; 1152 rl->rl_size = size; 1153 rl->rl_lock = rll; 1154 1155 ztest_rll_lock(rll, type); 1156 1157 return (rl); 1158} 1159 1160static void 1161ztest_range_unlock(rl_t *rl) 1162{ 1163 rll_t *rll = rl->rl_lock; 1164 1165 ztest_rll_unlock(rll); 1166 1167 umem_free(rl, sizeof (*rl)); 1168} 1169 1170static void 1171ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1172{ 1173 zd->zd_os = os; 1174 zd->zd_zilog = dmu_objset_zil(os); 1175 zd->zd_shared = szd; 1176 dmu_objset_name(os, zd->zd_name); 1177 1178 if (zd->zd_shared != NULL) 1179 zd->zd_shared->zd_seq = 0; 1180 1181 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0); 1182 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); 1183 1184 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1185 ztest_rll_init(&zd->zd_object_lock[l]); 1186 1187 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1188 ztest_rll_init(&zd->zd_range_lock[l]); 1189} 1190 1191static void 1192ztest_zd_fini(ztest_ds_t *zd) 1193{ 1194 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); 1195 1196 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1197 ztest_rll_destroy(&zd->zd_object_lock[l]); 1198 1199 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1200 ztest_rll_destroy(&zd->zd_range_lock[l]); 1201} 1202 1203#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1204 1205static uint64_t 1206ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1207{ 1208 uint64_t txg; 1209 int error; 1210 1211 /* 1212 * Attempt to assign tx to some transaction group. 1213 */ 1214 error = dmu_tx_assign(tx, txg_how); 1215 if (error) { 1216 if (error == ERESTART) { 1217 ASSERT(txg_how == TXG_NOWAIT); 1218 dmu_tx_wait(tx); 1219 } else { 1220 ASSERT3U(error, ==, ENOSPC); 1221 ztest_record_enospc(tag); 1222 } 1223 dmu_tx_abort(tx); 1224 return (0); 1225 } 1226 txg = dmu_tx_get_txg(tx); 1227 ASSERT(txg != 0); 1228 return (txg); 1229} 1230 1231static void 1232ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1233{ 1234 uint64_t *ip = buf; 1235 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1236 1237 while (ip < ip_end) 1238 *ip++ = value; 1239} 1240 1241static boolean_t 1242ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1243{ 1244 uint64_t *ip = buf; 1245 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1246 uint64_t diff = 0; 1247 1248 while (ip < ip_end) 1249 diff |= (value - *ip++); 1250 1251 return (diff == 0); 1252} 1253 1254static void 1255ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1256 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1257{ 1258 bt->bt_magic = BT_MAGIC; 1259 bt->bt_objset = dmu_objset_id(os); 1260 bt->bt_object = object; 1261 bt->bt_offset = offset; 1262 bt->bt_gen = gen; 1263 bt->bt_txg = txg; 1264 bt->bt_crtxg = crtxg; 1265} 1266 1267static void 1268ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1269 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1270{ 1271 ASSERT3U(bt->bt_magic, ==, BT_MAGIC); 1272 ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os)); 1273 ASSERT3U(bt->bt_object, ==, object); 1274 ASSERT3U(bt->bt_offset, ==, offset); 1275 ASSERT3U(bt->bt_gen, <=, gen); 1276 ASSERT3U(bt->bt_txg, <=, txg); 1277 ASSERT3U(bt->bt_crtxg, ==, crtxg); 1278} 1279 1280static ztest_block_tag_t * 1281ztest_bt_bonus(dmu_buf_t *db) 1282{ 1283 dmu_object_info_t doi; 1284 ztest_block_tag_t *bt; 1285 1286 dmu_object_info_from_db(db, &doi); 1287 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1288 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1289 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1290 1291 return (bt); 1292} 1293 1294/* 1295 * ZIL logging ops 1296 */ 1297 1298#define lrz_type lr_mode 1299#define lrz_blocksize lr_uid 1300#define lrz_ibshift lr_gid 1301#define lrz_bonustype lr_rdev 1302#define lrz_bonuslen lr_crtime[1] 1303 1304static void 1305ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1306{ 1307 char *name = (void *)(lr + 1); /* name follows lr */ 1308 size_t namesize = strlen(name) + 1; 1309 itx_t *itx; 1310 1311 if (zil_replaying(zd->zd_zilog, tx)) 1312 return; 1313 1314 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1315 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1316 sizeof (*lr) + namesize - sizeof (lr_t)); 1317 1318 zil_itx_assign(zd->zd_zilog, itx, tx); 1319} 1320 1321static void 1322ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1323{ 1324 char *name = (void *)(lr + 1); /* name follows lr */ 1325 size_t namesize = strlen(name) + 1; 1326 itx_t *itx; 1327 1328 if (zil_replaying(zd->zd_zilog, tx)) 1329 return; 1330 1331 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1332 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1333 sizeof (*lr) + namesize - sizeof (lr_t)); 1334 1335 itx->itx_oid = object; 1336 zil_itx_assign(zd->zd_zilog, itx, tx); 1337} 1338 1339static void 1340ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1341{ 1342 itx_t *itx; 1343 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1344 1345 if (zil_replaying(zd->zd_zilog, tx)) 1346 return; 1347 1348 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1349 write_state = WR_INDIRECT; 1350 1351 itx = zil_itx_create(TX_WRITE, 1352 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1353 1354 if (write_state == WR_COPIED && 1355 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1356 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1357 zil_itx_destroy(itx); 1358 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1359 write_state = WR_NEED_COPY; 1360 } 1361 itx->itx_private = zd; 1362 itx->itx_wr_state = write_state; 1363 itx->itx_sync = (ztest_random(8) == 0); 1364 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); 1365 1366 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1367 sizeof (*lr) - sizeof (lr_t)); 1368 1369 zil_itx_assign(zd->zd_zilog, itx, tx); 1370} 1371 1372static void 1373ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1374{ 1375 itx_t *itx; 1376 1377 if (zil_replaying(zd->zd_zilog, tx)) 1378 return; 1379 1380 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1381 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1382 sizeof (*lr) - sizeof (lr_t)); 1383 1384 itx->itx_sync = B_FALSE; 1385 zil_itx_assign(zd->zd_zilog, itx, tx); 1386} 1387 1388static void 1389ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1390{ 1391 itx_t *itx; 1392 1393 if (zil_replaying(zd->zd_zilog, tx)) 1394 return; 1395 1396 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1397 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1398 sizeof (*lr) - sizeof (lr_t)); 1399 1400 itx->itx_sync = B_FALSE; 1401 zil_itx_assign(zd->zd_zilog, itx, tx); 1402} 1403 1404/* 1405 * ZIL replay ops 1406 */ 1407static int 1408ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) 1409{ 1410 char *name = (void *)(lr + 1); /* name follows lr */ 1411 objset_t *os = zd->zd_os; 1412 ztest_block_tag_t *bbt; 1413 dmu_buf_t *db; 1414 dmu_tx_t *tx; 1415 uint64_t txg; 1416 int error = 0; 1417 1418 if (byteswap) 1419 byteswap_uint64_array(lr, sizeof (*lr)); 1420 1421 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1422 ASSERT(name[0] != '\0'); 1423 1424 tx = dmu_tx_create(os); 1425 1426 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1427 1428 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1429 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1430 } else { 1431 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1432 } 1433 1434 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1435 if (txg == 0) 1436 return (ENOSPC); 1437 1438 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1439 1440 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1441 if (lr->lr_foid == 0) { 1442 lr->lr_foid = zap_create(os, 1443 lr->lrz_type, lr->lrz_bonustype, 1444 lr->lrz_bonuslen, tx); 1445 } else { 1446 error = zap_create_claim(os, lr->lr_foid, 1447 lr->lrz_type, lr->lrz_bonustype, 1448 lr->lrz_bonuslen, tx); 1449 } 1450 } else { 1451 if (lr->lr_foid == 0) { 1452 lr->lr_foid = dmu_object_alloc(os, 1453 lr->lrz_type, 0, lr->lrz_bonustype, 1454 lr->lrz_bonuslen, tx); 1455 } else { 1456 error = dmu_object_claim(os, lr->lr_foid, 1457 lr->lrz_type, 0, lr->lrz_bonustype, 1458 lr->lrz_bonuslen, tx); 1459 } 1460 } 1461 1462 if (error) { 1463 ASSERT3U(error, ==, EEXIST); 1464 ASSERT(zd->zd_zilog->zl_replay); 1465 dmu_tx_commit(tx); 1466 return (error); 1467 } 1468 1469 ASSERT(lr->lr_foid != 0); 1470 1471 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1472 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1473 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1474 1475 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1476 bbt = ztest_bt_bonus(db); 1477 dmu_buf_will_dirty(db, tx); 1478 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1479 dmu_buf_rele(db, FTAG); 1480 1481 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1482 &lr->lr_foid, tx)); 1483 1484 (void) ztest_log_create(zd, tx, lr); 1485 1486 dmu_tx_commit(tx); 1487 1488 return (0); 1489} 1490 1491static int 1492ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) 1493{ 1494 char *name = (void *)(lr + 1); /* name follows lr */ 1495 objset_t *os = zd->zd_os; 1496 dmu_object_info_t doi; 1497 dmu_tx_t *tx; 1498 uint64_t object, txg; 1499 1500 if (byteswap) 1501 byteswap_uint64_array(lr, sizeof (*lr)); 1502 1503 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1504 ASSERT(name[0] != '\0'); 1505 1506 VERIFY3U(0, ==, 1507 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1508 ASSERT(object != 0); 1509 1510 ztest_object_lock(zd, object, RL_WRITER); 1511 1512 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1513 1514 tx = dmu_tx_create(os); 1515 1516 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1517 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1518 1519 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1520 if (txg == 0) { 1521 ztest_object_unlock(zd, object); 1522 return (ENOSPC); 1523 } 1524 1525 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1526 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1527 } else { 1528 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1529 } 1530 1531 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1532 1533 (void) ztest_log_remove(zd, tx, lr, object); 1534 1535 dmu_tx_commit(tx); 1536 1537 ztest_object_unlock(zd, object); 1538 1539 return (0); 1540} 1541 1542static int 1543ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) 1544{ 1545 objset_t *os = zd->zd_os; 1546 void *data = lr + 1; /* data follows lr */ 1547 uint64_t offset, length; 1548 ztest_block_tag_t *bt = data; 1549 ztest_block_tag_t *bbt; 1550 uint64_t gen, txg, lrtxg, crtxg; 1551 dmu_object_info_t doi; 1552 dmu_tx_t *tx; 1553 dmu_buf_t *db; 1554 arc_buf_t *abuf = NULL; 1555 rl_t *rl; 1556 1557 if (byteswap) 1558 byteswap_uint64_array(lr, sizeof (*lr)); 1559 1560 offset = lr->lr_offset; 1561 length = lr->lr_length; 1562 1563 /* If it's a dmu_sync() block, write the whole block */ 1564 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1565 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1566 if (length < blocksize) { 1567 offset -= offset % blocksize; 1568 length = blocksize; 1569 } 1570 } 1571 1572 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1573 byteswap_uint64_array(bt, sizeof (*bt)); 1574 1575 if (bt->bt_magic != BT_MAGIC) 1576 bt = NULL; 1577 1578 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1579 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1580 1581 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1582 1583 dmu_object_info_from_db(db, &doi); 1584 1585 bbt = ztest_bt_bonus(db); 1586 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1587 gen = bbt->bt_gen; 1588 crtxg = bbt->bt_crtxg; 1589 lrtxg = lr->lr_common.lrc_txg; 1590 1591 tx = dmu_tx_create(os); 1592 1593 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1594 1595 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1596 P2PHASE(offset, length) == 0) 1597 abuf = dmu_request_arcbuf(db, length); 1598 1599 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1600 if (txg == 0) { 1601 if (abuf != NULL) 1602 dmu_return_arcbuf(abuf); 1603 dmu_buf_rele(db, FTAG); 1604 ztest_range_unlock(rl); 1605 ztest_object_unlock(zd, lr->lr_foid); 1606 return (ENOSPC); 1607 } 1608 1609 if (bt != NULL) { 1610 /* 1611 * Usually, verify the old data before writing new data -- 1612 * but not always, because we also want to verify correct 1613 * behavior when the data was not recently read into cache. 1614 */ 1615 ASSERT(offset % doi.doi_data_block_size == 0); 1616 if (ztest_random(4) != 0) { 1617 int prefetch = ztest_random(2) ? 1618 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1619 ztest_block_tag_t rbt; 1620 1621 VERIFY(dmu_read(os, lr->lr_foid, offset, 1622 sizeof (rbt), &rbt, prefetch) == 0); 1623 if (rbt.bt_magic == BT_MAGIC) { 1624 ztest_bt_verify(&rbt, os, lr->lr_foid, 1625 offset, gen, txg, crtxg); 1626 } 1627 } 1628 1629 /* 1630 * Writes can appear to be newer than the bonus buffer because 1631 * the ztest_get_data() callback does a dmu_read() of the 1632 * open-context data, which may be different than the data 1633 * as it was when the write was generated. 1634 */ 1635 if (zd->zd_zilog->zl_replay) { 1636 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1637 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1638 bt->bt_crtxg); 1639 } 1640 1641 /* 1642 * Set the bt's gen/txg to the bonus buffer's gen/txg 1643 * so that all of the usual ASSERTs will work. 1644 */ 1645 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1646 } 1647 1648 if (abuf == NULL) { 1649 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1650 } else { 1651 bcopy(data, abuf->b_data, length); 1652 dmu_assign_arcbuf(db, offset, abuf, tx); 1653 } 1654 1655 (void) ztest_log_write(zd, tx, lr); 1656 1657 dmu_buf_rele(db, FTAG); 1658 1659 dmu_tx_commit(tx); 1660 1661 ztest_range_unlock(rl); 1662 ztest_object_unlock(zd, lr->lr_foid); 1663 1664 return (0); 1665} 1666 1667static int 1668ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) 1669{ 1670 objset_t *os = zd->zd_os; 1671 dmu_tx_t *tx; 1672 uint64_t txg; 1673 rl_t *rl; 1674 1675 if (byteswap) 1676 byteswap_uint64_array(lr, sizeof (*lr)); 1677 1678 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1679 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1680 RL_WRITER); 1681 1682 tx = dmu_tx_create(os); 1683 1684 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1685 1686 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1687 if (txg == 0) { 1688 ztest_range_unlock(rl); 1689 ztest_object_unlock(zd, lr->lr_foid); 1690 return (ENOSPC); 1691 } 1692 1693 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1694 lr->lr_length, tx) == 0); 1695 1696 (void) ztest_log_truncate(zd, tx, lr); 1697 1698 dmu_tx_commit(tx); 1699 1700 ztest_range_unlock(rl); 1701 ztest_object_unlock(zd, lr->lr_foid); 1702 1703 return (0); 1704} 1705 1706static int 1707ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) 1708{ 1709 objset_t *os = zd->zd_os; 1710 dmu_tx_t *tx; 1711 dmu_buf_t *db; 1712 ztest_block_tag_t *bbt; 1713 uint64_t txg, lrtxg, crtxg; 1714 1715 if (byteswap) 1716 byteswap_uint64_array(lr, sizeof (*lr)); 1717 1718 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1719 1720 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1721 1722 tx = dmu_tx_create(os); 1723 dmu_tx_hold_bonus(tx, lr->lr_foid); 1724 1725 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1726 if (txg == 0) { 1727 dmu_buf_rele(db, FTAG); 1728 ztest_object_unlock(zd, lr->lr_foid); 1729 return (ENOSPC); 1730 } 1731 1732 bbt = ztest_bt_bonus(db); 1733 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1734 crtxg = bbt->bt_crtxg; 1735 lrtxg = lr->lr_common.lrc_txg; 1736 1737 if (zd->zd_zilog->zl_replay) { 1738 ASSERT(lr->lr_size != 0); 1739 ASSERT(lr->lr_mode != 0); 1740 ASSERT(lrtxg != 0); 1741 } else { 1742 /* 1743 * Randomly change the size and increment the generation. 1744 */ 1745 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1746 sizeof (*bbt); 1747 lr->lr_mode = bbt->bt_gen + 1; 1748 ASSERT(lrtxg == 0); 1749 } 1750 1751 /* 1752 * Verify that the current bonus buffer is not newer than our txg. 1753 */ 1754 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1755 MAX(txg, lrtxg), crtxg); 1756 1757 dmu_buf_will_dirty(db, tx); 1758 1759 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1760 ASSERT3U(lr->lr_size, <=, db->db_size); 1761 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 1762 bbt = ztest_bt_bonus(db); 1763 1764 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1765 1766 dmu_buf_rele(db, FTAG); 1767 1768 (void) ztest_log_setattr(zd, tx, lr); 1769 1770 dmu_tx_commit(tx); 1771 1772 ztest_object_unlock(zd, lr->lr_foid); 1773 1774 return (0); 1775} 1776 1777zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1778 NULL, /* 0 no such transaction type */ 1779 ztest_replay_create, /* TX_CREATE */ 1780 NULL, /* TX_MKDIR */ 1781 NULL, /* TX_MKXATTR */ 1782 NULL, /* TX_SYMLINK */ 1783 ztest_replay_remove, /* TX_REMOVE */ 1784 NULL, /* TX_RMDIR */ 1785 NULL, /* TX_LINK */ 1786 NULL, /* TX_RENAME */ 1787 ztest_replay_write, /* TX_WRITE */ 1788 ztest_replay_truncate, /* TX_TRUNCATE */ 1789 ztest_replay_setattr, /* TX_SETATTR */ 1790 NULL, /* TX_ACL */ 1791 NULL, /* TX_CREATE_ACL */ 1792 NULL, /* TX_CREATE_ATTR */ 1793 NULL, /* TX_CREATE_ACL_ATTR */ 1794 NULL, /* TX_MKDIR_ACL */ 1795 NULL, /* TX_MKDIR_ATTR */ 1796 NULL, /* TX_MKDIR_ACL_ATTR */ 1797 NULL, /* TX_WRITE2 */ 1798}; 1799 1800/* 1801 * ZIL get_data callbacks 1802 */ 1803 1804static void 1805ztest_get_done(zgd_t *zgd, int error) 1806{ 1807 ztest_ds_t *zd = zgd->zgd_private; 1808 uint64_t object = zgd->zgd_rl->rl_object; 1809 1810 if (zgd->zgd_db) 1811 dmu_buf_rele(zgd->zgd_db, zgd); 1812 1813 ztest_range_unlock(zgd->zgd_rl); 1814 ztest_object_unlock(zd, object); 1815 1816 if (error == 0 && zgd->zgd_bp) 1817 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1818 1819 umem_free(zgd, sizeof (*zgd)); 1820} 1821 1822static int 1823ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1824{ 1825 ztest_ds_t *zd = arg; 1826 objset_t *os = zd->zd_os; 1827 uint64_t object = lr->lr_foid; 1828 uint64_t offset = lr->lr_offset; 1829 uint64_t size = lr->lr_length; 1830 blkptr_t *bp = &lr->lr_blkptr; 1831 uint64_t txg = lr->lr_common.lrc_txg; 1832 uint64_t crtxg; 1833 dmu_object_info_t doi; 1834 dmu_buf_t *db; 1835 zgd_t *zgd; 1836 int error; 1837 1838 ztest_object_lock(zd, object, RL_READER); 1839 error = dmu_bonus_hold(os, object, FTAG, &db); 1840 if (error) { 1841 ztest_object_unlock(zd, object); 1842 return (error); 1843 } 1844 1845 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1846 1847 if (crtxg == 0 || crtxg > txg) { 1848 dmu_buf_rele(db, FTAG); 1849 ztest_object_unlock(zd, object); 1850 return (ENOENT); 1851 } 1852 1853 dmu_object_info_from_db(db, &doi); 1854 dmu_buf_rele(db, FTAG); 1855 db = NULL; 1856 1857 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1858 zgd->zgd_zilog = zd->zd_zilog; 1859 zgd->zgd_private = zd; 1860 1861 if (buf != NULL) { /* immediate write */ 1862 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1863 RL_READER); 1864 1865 error = dmu_read(os, object, offset, size, buf, 1866 DMU_READ_NO_PREFETCH); 1867 ASSERT(error == 0); 1868 } else { 1869 size = doi.doi_data_block_size; 1870 if (ISP2(size)) { 1871 offset = P2ALIGN(offset, size); 1872 } else { 1873 ASSERT(offset < size); 1874 offset = 0; 1875 } 1876 1877 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1878 RL_READER); 1879 1880 error = dmu_buf_hold(os, object, offset, zgd, &db, 1881 DMU_READ_NO_PREFETCH); 1882 1883 if (error == 0) { 1884 blkptr_t *obp = dmu_buf_get_blkptr(db); 1885 if (obp) { 1886 ASSERT(BP_IS_HOLE(bp)); 1887 *bp = *obp; 1888 } 1889 1890 zgd->zgd_db = db; 1891 zgd->zgd_bp = bp; 1892 1893 ASSERT(db->db_offset == offset); 1894 ASSERT(db->db_size == size); 1895 1896 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1897 ztest_get_done, zgd); 1898 1899 if (error == 0) 1900 return (0); 1901 } 1902 } 1903 1904 ztest_get_done(zgd, error); 1905 1906 return (error); 1907} 1908 1909static void * 1910ztest_lr_alloc(size_t lrsize, char *name) 1911{ 1912 char *lr; 1913 size_t namesize = name ? strlen(name) + 1 : 0; 1914 1915 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1916 1917 if (name) 1918 bcopy(name, lr + lrsize, namesize); 1919 1920 return (lr); 1921} 1922 1923void 1924ztest_lr_free(void *lr, size_t lrsize, char *name) 1925{ 1926 size_t namesize = name ? strlen(name) + 1 : 0; 1927 1928 umem_free(lr, lrsize + namesize); 1929} 1930 1931/* 1932 * Lookup a bunch of objects. Returns the number of objects not found. 1933 */ 1934static int 1935ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1936{ 1937 int missing = 0; 1938 int error; 1939 1940 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1941 1942 for (int i = 0; i < count; i++, od++) { 1943 od->od_object = 0; 1944 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1945 sizeof (uint64_t), 1, &od->od_object); 1946 if (error) { 1947 ASSERT(error == ENOENT); 1948 ASSERT(od->od_object == 0); 1949 missing++; 1950 } else { 1951 dmu_buf_t *db; 1952 ztest_block_tag_t *bbt; 1953 dmu_object_info_t doi; 1954 1955 ASSERT(od->od_object != 0); 1956 ASSERT(missing == 0); /* there should be no gaps */ 1957 1958 ztest_object_lock(zd, od->od_object, RL_READER); 1959 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1960 od->od_object, FTAG, &db)); 1961 dmu_object_info_from_db(db, &doi); 1962 bbt = ztest_bt_bonus(db); 1963 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1964 od->od_type = doi.doi_type; 1965 od->od_blocksize = doi.doi_data_block_size; 1966 od->od_gen = bbt->bt_gen; 1967 dmu_buf_rele(db, FTAG); 1968 ztest_object_unlock(zd, od->od_object); 1969 } 1970 } 1971 1972 return (missing); 1973} 1974 1975static int 1976ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 1977{ 1978 int missing = 0; 1979 1980 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1981 1982 for (int i = 0; i < count; i++, od++) { 1983 if (missing) { 1984 od->od_object = 0; 1985 missing++; 1986 continue; 1987 } 1988 1989 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1990 1991 lr->lr_doid = od->od_dir; 1992 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 1993 lr->lrz_type = od->od_crtype; 1994 lr->lrz_blocksize = od->od_crblocksize; 1995 lr->lrz_ibshift = ztest_random_ibshift(); 1996 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 1997 lr->lrz_bonuslen = dmu_bonus_max(); 1998 lr->lr_gen = od->od_crgen; 1999 lr->lr_crtime[0] = time(NULL); 2000 2001 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 2002 ASSERT(missing == 0); 2003 od->od_object = 0; 2004 missing++; 2005 } else { 2006 od->od_object = lr->lr_foid; 2007 od->od_type = od->od_crtype; 2008 od->od_blocksize = od->od_crblocksize; 2009 od->od_gen = od->od_crgen; 2010 ASSERT(od->od_object != 0); 2011 } 2012 2013 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2014 } 2015 2016 return (missing); 2017} 2018 2019static int 2020ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2021{ 2022 int missing = 0; 2023 int error; 2024 2025 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 2026 2027 od += count - 1; 2028 2029 for (int i = count - 1; i >= 0; i--, od--) { 2030 if (missing) { 2031 missing++; 2032 continue; 2033 } 2034 2035 /* 2036 * No object was found. 2037 */ 2038 if (od->od_object == 0) 2039 continue; 2040 2041 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2042 2043 lr->lr_doid = od->od_dir; 2044 2045 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2046 ASSERT3U(error, ==, ENOSPC); 2047 missing++; 2048 } else { 2049 od->od_object = 0; 2050 } 2051 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2052 } 2053 2054 return (missing); 2055} 2056 2057static int 2058ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2059 void *data) 2060{ 2061 lr_write_t *lr; 2062 int error; 2063 2064 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2065 2066 lr->lr_foid = object; 2067 lr->lr_offset = offset; 2068 lr->lr_length = size; 2069 lr->lr_blkoff = 0; 2070 BP_ZERO(&lr->lr_blkptr); 2071 2072 bcopy(data, lr + 1, size); 2073 2074 error = ztest_replay_write(zd, lr, B_FALSE); 2075 2076 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2077 2078 return (error); 2079} 2080 2081static int 2082ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2083{ 2084 lr_truncate_t *lr; 2085 int error; 2086 2087 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2088 2089 lr->lr_foid = object; 2090 lr->lr_offset = offset; 2091 lr->lr_length = size; 2092 2093 error = ztest_replay_truncate(zd, lr, B_FALSE); 2094 2095 ztest_lr_free(lr, sizeof (*lr), NULL); 2096 2097 return (error); 2098} 2099 2100static int 2101ztest_setattr(ztest_ds_t *zd, uint64_t object) 2102{ 2103 lr_setattr_t *lr; 2104 int error; 2105 2106 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2107 2108 lr->lr_foid = object; 2109 lr->lr_size = 0; 2110 lr->lr_mode = 0; 2111 2112 error = ztest_replay_setattr(zd, lr, B_FALSE); 2113 2114 ztest_lr_free(lr, sizeof (*lr), NULL); 2115 2116 return (error); 2117} 2118 2119static void 2120ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2121{ 2122 objset_t *os = zd->zd_os; 2123 dmu_tx_t *tx; 2124 uint64_t txg; 2125 rl_t *rl; 2126 2127 txg_wait_synced(dmu_objset_pool(os), 0); 2128 2129 ztest_object_lock(zd, object, RL_READER); 2130 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2131 2132 tx = dmu_tx_create(os); 2133 2134 dmu_tx_hold_write(tx, object, offset, size); 2135 2136 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2137 2138 if (txg != 0) { 2139 dmu_prealloc(os, object, offset, size, tx); 2140 dmu_tx_commit(tx); 2141 txg_wait_synced(dmu_objset_pool(os), txg); 2142 } else { 2143 (void) dmu_free_long_range(os, object, offset, size); 2144 } 2145 2146 ztest_range_unlock(rl); 2147 ztest_object_unlock(zd, object); 2148} 2149 2150static void 2151ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2152{ 2153 int err; 2154 ztest_block_tag_t wbt; 2155 dmu_object_info_t doi; 2156 enum ztest_io_type io_type; 2157 uint64_t blocksize; 2158 void *data; 2159 2160 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2161 blocksize = doi.doi_data_block_size; 2162 data = umem_alloc(blocksize, UMEM_NOFAIL); 2163 2164 /* 2165 * Pick an i/o type at random, biased toward writing block tags. 2166 */ 2167 io_type = ztest_random(ZTEST_IO_TYPES); 2168 if (ztest_random(2) == 0) 2169 io_type = ZTEST_IO_WRITE_TAG; 2170 2171 (void) rw_rdlock(&zd->zd_zilog_lock); 2172 2173 switch (io_type) { 2174 2175 case ZTEST_IO_WRITE_TAG: 2176 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2177 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2178 break; 2179 2180 case ZTEST_IO_WRITE_PATTERN: 2181 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2182 if (ztest_random(2) == 0) { 2183 /* 2184 * Induce fletcher2 collisions to ensure that 2185 * zio_ddt_collision() detects and resolves them 2186 * when using fletcher2-verify for deduplication. 2187 */ 2188 ((uint64_t *)data)[0] ^= 1ULL << 63; 2189 ((uint64_t *)data)[4] ^= 1ULL << 63; 2190 } 2191 (void) ztest_write(zd, object, offset, blocksize, data); 2192 break; 2193 2194 case ZTEST_IO_WRITE_ZEROES: 2195 bzero(data, blocksize); 2196 (void) ztest_write(zd, object, offset, blocksize, data); 2197 break; 2198 2199 case ZTEST_IO_TRUNCATE: 2200 (void) ztest_truncate(zd, object, offset, blocksize); 2201 break; 2202 2203 case ZTEST_IO_SETATTR: 2204 (void) ztest_setattr(zd, object); 2205 break; 2206 2207 case ZTEST_IO_REWRITE: 2208 (void) rw_rdlock(&ztest_name_lock); 2209 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2210 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2211 B_FALSE); 2212 VERIFY(err == 0 || err == ENOSPC); 2213 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2214 ZFS_PROP_COMPRESSION, 2215 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2216 B_FALSE); 2217 VERIFY(err == 0 || err == ENOSPC); 2218 (void) rw_unlock(&ztest_name_lock); 2219 2220 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2221 DMU_READ_NO_PREFETCH)); 2222 2223 (void) ztest_write(zd, object, offset, blocksize, data); 2224 break; 2225 } 2226 2227 (void) rw_unlock(&zd->zd_zilog_lock); 2228 2229 umem_free(data, blocksize); 2230} 2231 2232/* 2233 * Initialize an object description template. 2234 */ 2235static void 2236ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2237 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2238{ 2239 od->od_dir = ZTEST_DIROBJ; 2240 od->od_object = 0; 2241 2242 od->od_crtype = type; 2243 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2244 od->od_crgen = gen; 2245 2246 od->od_type = DMU_OT_NONE; 2247 od->od_blocksize = 0; 2248 od->od_gen = 0; 2249 2250 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2251 tag, (int64_t)id, index); 2252} 2253 2254/* 2255 * Lookup or create the objects for a test using the od template. 2256 * If the objects do not all exist, or if 'remove' is specified, 2257 * remove any existing objects and create new ones. Otherwise, 2258 * use the existing objects. 2259 */ 2260static int 2261ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2262{ 2263 int count = size / sizeof (*od); 2264 int rv = 0; 2265 2266 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2267 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2268 (ztest_remove(zd, od, count) != 0 || 2269 ztest_create(zd, od, count) != 0)) 2270 rv = -1; 2271 zd->zd_od = od; 2272 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2273 2274 return (rv); 2275} 2276 2277/* ARGSUSED */ 2278void 2279ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2280{ 2281 zilog_t *zilog = zd->zd_zilog; 2282 2283 (void) rw_rdlock(&zd->zd_zilog_lock); 2284 2285 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2286 2287 /* 2288 * Remember the committed values in zd, which is in parent/child 2289 * shared memory. If we die, the next iteration of ztest_run() 2290 * will verify that the log really does contain this record. 2291 */ 2292 mutex_enter(&zilog->zl_lock); 2293 ASSERT(zd->zd_shared != NULL); 2294 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2295 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2296 mutex_exit(&zilog->zl_lock); 2297 2298 (void) rw_unlock(&zd->zd_zilog_lock); 2299} 2300 2301/* 2302 * This function is designed to simulate the operations that occur during a 2303 * mount/unmount operation. We hold the dataset across these operations in an 2304 * attempt to expose any implicit assumptions about ZIL management. 2305 */ 2306/* ARGSUSED */ 2307void 2308ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2309{ 2310 objset_t *os = zd->zd_os; 2311 2312 /* 2313 * We grab the zd_dirobj_lock to ensure that no other thread is 2314 * updating the zil (i.e. adding in-memory log records) and the 2315 * zd_zilog_lock to block any I/O. 2316 */ 2317 VERIFY0(mutex_lock(&zd->zd_dirobj_lock)); 2318 (void) rw_wrlock(&zd->zd_zilog_lock); 2319 2320 /* zfsvfs_teardown() */ 2321 zil_close(zd->zd_zilog); 2322 2323 /* zfsvfs_setup() */ 2324 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2325 zil_replay(os, zd, ztest_replay_vector); 2326 2327 (void) rw_unlock(&zd->zd_zilog_lock); 2328 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2329} 2330 2331/* 2332 * Verify that we can't destroy an active pool, create an existing pool, 2333 * or create a pool with a bad vdev spec. 2334 */ 2335/* ARGSUSED */ 2336void 2337ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2338{ 2339 ztest_shared_opts_t *zo = &ztest_opts; 2340 spa_t *spa; 2341 nvlist_t *nvroot; 2342 2343 /* 2344 * Attempt to create using a bad file. 2345 */ 2346 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2347 VERIFY3U(ENOENT, ==, 2348 spa_create("ztest_bad_file", nvroot, NULL, NULL)); 2349 nvlist_free(nvroot); 2350 2351 /* 2352 * Attempt to create using a bad mirror. 2353 */ 2354 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1); 2355 VERIFY3U(ENOENT, ==, 2356 spa_create("ztest_bad_mirror", nvroot, NULL, NULL)); 2357 nvlist_free(nvroot); 2358 2359 /* 2360 * Attempt to create an existing pool. It shouldn't matter 2361 * what's in the nvroot; we should fail with EEXIST. 2362 */ 2363 (void) rw_rdlock(&ztest_name_lock); 2364 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2365 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL)); 2366 nvlist_free(nvroot); 2367 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2368 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2369 spa_close(spa, FTAG); 2370 2371 (void) rw_unlock(&ztest_name_lock); 2372} 2373 2374/* ARGSUSED */ 2375void 2376ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 2377{ 2378 spa_t *spa; 2379 uint64_t initial_version = SPA_VERSION_INITIAL; 2380 uint64_t version, newversion; 2381 nvlist_t *nvroot, *props; 2382 char *name; 2383 2384 VERIFY0(mutex_lock(&ztest_vdev_lock)); 2385 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 2386 2387 /* 2388 * Clean up from previous runs. 2389 */ 2390 (void) spa_destroy(name); 2391 2392 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 2393 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1); 2394 2395 /* 2396 * If we're configuring a RAIDZ device then make sure that the 2397 * the initial version is capable of supporting that feature. 2398 */ 2399 switch (ztest_opts.zo_raidz_parity) { 2400 case 0: 2401 case 1: 2402 initial_version = SPA_VERSION_INITIAL; 2403 break; 2404 case 2: 2405 initial_version = SPA_VERSION_RAIDZ2; 2406 break; 2407 case 3: 2408 initial_version = SPA_VERSION_RAIDZ3; 2409 break; 2410 } 2411 2412 /* 2413 * Create a pool with a spa version that can be upgraded. Pick 2414 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 2415 */ 2416 do { 2417 version = ztest_random_spa_version(initial_version); 2418 } while (version > SPA_VERSION_BEFORE_FEATURES); 2419 2420 props = fnvlist_alloc(); 2421 fnvlist_add_uint64(props, 2422 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 2423 VERIFY0(spa_create(name, nvroot, props, NULL)); 2424 fnvlist_free(nvroot); 2425 fnvlist_free(props); 2426 2427 VERIFY0(spa_open(name, &spa, FTAG)); 2428 VERIFY3U(spa_version(spa), ==, version); 2429 newversion = ztest_random_spa_version(version + 1); 2430 2431 if (ztest_opts.zo_verbose >= 4) { 2432 (void) printf("upgrading spa version from %llu to %llu\n", 2433 (u_longlong_t)version, (u_longlong_t)newversion); 2434 } 2435 2436 spa_upgrade(spa, newversion); 2437 VERIFY3U(spa_version(spa), >, version); 2438 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 2439 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 2440 spa_close(spa, FTAG); 2441 2442 strfree(name); 2443 VERIFY0(mutex_unlock(&ztest_vdev_lock)); 2444} 2445 2446static vdev_t * 2447vdev_lookup_by_path(vdev_t *vd, const char *path) 2448{ 2449 vdev_t *mvd; 2450 2451 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2452 return (vd); 2453 2454 for (int c = 0; c < vd->vdev_children; c++) 2455 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2456 NULL) 2457 return (mvd); 2458 2459 return (NULL); 2460} 2461 2462/* 2463 * Find the first available hole which can be used as a top-level. 2464 */ 2465int 2466find_vdev_hole(spa_t *spa) 2467{ 2468 vdev_t *rvd = spa->spa_root_vdev; 2469 int c; 2470 2471 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2472 2473 for (c = 0; c < rvd->vdev_children; c++) { 2474 vdev_t *cvd = rvd->vdev_child[c]; 2475 2476 if (cvd->vdev_ishole) 2477 break; 2478 } 2479 return (c); 2480} 2481 2482/* 2483 * Verify that vdev_add() works as expected. 2484 */ 2485/* ARGSUSED */ 2486void 2487ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2488{ 2489 ztest_shared_t *zs = ztest_shared; 2490 spa_t *spa = ztest_spa; 2491 uint64_t leaves; 2492 uint64_t guid; 2493 nvlist_t *nvroot; 2494 int error; 2495 2496 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2497 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2498 2499 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2500 2501 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2502 2503 /* 2504 * If we have slogs then remove them 1/4 of the time. 2505 */ 2506 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2507 /* 2508 * Grab the guid from the head of the log class rotor. 2509 */ 2510 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2511 2512 spa_config_exit(spa, SCL_VDEV, FTAG); 2513 2514 /* 2515 * We have to grab the zs_name_lock as writer to 2516 * prevent a race between removing a slog (dmu_objset_find) 2517 * and destroying a dataset. Removing the slog will 2518 * grab a reference on the dataset which may cause 2519 * dmu_objset_destroy() to fail with EBUSY thus 2520 * leaving the dataset in an inconsistent state. 2521 */ 2522 VERIFY(rw_wrlock(&ztest_name_lock) == 0); 2523 error = spa_vdev_remove(spa, guid, B_FALSE); 2524 VERIFY(rw_unlock(&ztest_name_lock) == 0); 2525 2526 if (error && error != EEXIST) 2527 fatal(0, "spa_vdev_remove() = %d", error); 2528 } else { 2529 spa_config_exit(spa, SCL_VDEV, FTAG); 2530 2531 /* 2532 * Make 1/4 of the devices be log devices. 2533 */ 2534 nvroot = make_vdev_root(NULL, NULL, NULL, 2535 ztest_opts.zo_vdev_size, 0, 2536 ztest_random(4) == 0, ztest_opts.zo_raidz, 2537 zs->zs_mirrors, 1); 2538 2539 error = spa_vdev_add(spa, nvroot); 2540 nvlist_free(nvroot); 2541 2542 if (error == ENOSPC) 2543 ztest_record_enospc("spa_vdev_add"); 2544 else if (error != 0) 2545 fatal(0, "spa_vdev_add() = %d", error); 2546 } 2547 2548 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2549} 2550 2551/* 2552 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2553 */ 2554/* ARGSUSED */ 2555void 2556ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2557{ 2558 ztest_shared_t *zs = ztest_shared; 2559 spa_t *spa = ztest_spa; 2560 vdev_t *rvd = spa->spa_root_vdev; 2561 spa_aux_vdev_t *sav; 2562 char *aux; 2563 uint64_t guid = 0; 2564 int error; 2565 2566 if (ztest_random(2) == 0) { 2567 sav = &spa->spa_spares; 2568 aux = ZPOOL_CONFIG_SPARES; 2569 } else { 2570 sav = &spa->spa_l2cache; 2571 aux = ZPOOL_CONFIG_L2CACHE; 2572 } 2573 2574 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2575 2576 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2577 2578 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2579 /* 2580 * Pick a random device to remove. 2581 */ 2582 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2583 } else { 2584 /* 2585 * Find an unused device we can add. 2586 */ 2587 zs->zs_vdev_aux = 0; 2588 for (;;) { 2589 char path[MAXPATHLEN]; 2590 int c; 2591 (void) snprintf(path, sizeof (path), ztest_aux_template, 2592 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2593 zs->zs_vdev_aux); 2594 for (c = 0; c < sav->sav_count; c++) 2595 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2596 path) == 0) 2597 break; 2598 if (c == sav->sav_count && 2599 vdev_lookup_by_path(rvd, path) == NULL) 2600 break; 2601 zs->zs_vdev_aux++; 2602 } 2603 } 2604 2605 spa_config_exit(spa, SCL_VDEV, FTAG); 2606 2607 if (guid == 0) { 2608 /* 2609 * Add a new device. 2610 */ 2611 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 2612 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2613 error = spa_vdev_add(spa, nvroot); 2614 if (error != 0) 2615 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2616 nvlist_free(nvroot); 2617 } else { 2618 /* 2619 * Remove an existing device. Sometimes, dirty its 2620 * vdev state first to make sure we handle removal 2621 * of devices that have pending state changes. 2622 */ 2623 if (ztest_random(2) == 0) 2624 (void) vdev_online(spa, guid, 0, NULL); 2625 2626 error = spa_vdev_remove(spa, guid, B_FALSE); 2627 if (error != 0 && error != EBUSY) 2628 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2629 } 2630 2631 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2632} 2633 2634/* 2635 * split a pool if it has mirror tlvdevs 2636 */ 2637/* ARGSUSED */ 2638void 2639ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2640{ 2641 ztest_shared_t *zs = ztest_shared; 2642 spa_t *spa = ztest_spa; 2643 vdev_t *rvd = spa->spa_root_vdev; 2644 nvlist_t *tree, **child, *config, *split, **schild; 2645 uint_t c, children, schildren = 0, lastlogid = 0; 2646 int error = 0; 2647 2648 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2649 2650 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2651 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2652 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2653 return; 2654 } 2655 2656 /* clean up the old pool, if any */ 2657 (void) spa_destroy("splitp"); 2658 2659 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2660 2661 /* generate a config from the existing config */ 2662 mutex_enter(&spa->spa_props_lock); 2663 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2664 &tree) == 0); 2665 mutex_exit(&spa->spa_props_lock); 2666 2667 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2668 &children) == 0); 2669 2670 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2671 for (c = 0; c < children; c++) { 2672 vdev_t *tvd = rvd->vdev_child[c]; 2673 nvlist_t **mchild; 2674 uint_t mchildren; 2675 2676 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2677 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2678 0) == 0); 2679 VERIFY(nvlist_add_string(schild[schildren], 2680 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2681 VERIFY(nvlist_add_uint64(schild[schildren], 2682 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2683 if (lastlogid == 0) 2684 lastlogid = schildren; 2685 ++schildren; 2686 continue; 2687 } 2688 lastlogid = 0; 2689 VERIFY(nvlist_lookup_nvlist_array(child[c], 2690 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2691 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2692 } 2693 2694 /* OK, create a config that can be used to split */ 2695 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2696 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2697 VDEV_TYPE_ROOT) == 0); 2698 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2699 lastlogid != 0 ? lastlogid : schildren) == 0); 2700 2701 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2702 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2703 2704 for (c = 0; c < schildren; c++) 2705 nvlist_free(schild[c]); 2706 free(schild); 2707 nvlist_free(split); 2708 2709 spa_config_exit(spa, SCL_VDEV, FTAG); 2710 2711 (void) rw_wrlock(&ztest_name_lock); 2712 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2713 (void) rw_unlock(&ztest_name_lock); 2714 2715 nvlist_free(config); 2716 2717 if (error == 0) { 2718 (void) printf("successful split - results:\n"); 2719 mutex_enter(&spa_namespace_lock); 2720 show_pool_stats(spa); 2721 show_pool_stats(spa_lookup("splitp")); 2722 mutex_exit(&spa_namespace_lock); 2723 ++zs->zs_splits; 2724 --zs->zs_mirrors; 2725 } 2726 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2727 2728} 2729 2730/* 2731 * Verify that we can attach and detach devices. 2732 */ 2733/* ARGSUSED */ 2734void 2735ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2736{ 2737 ztest_shared_t *zs = ztest_shared; 2738 spa_t *spa = ztest_spa; 2739 spa_aux_vdev_t *sav = &spa->spa_spares; 2740 vdev_t *rvd = spa->spa_root_vdev; 2741 vdev_t *oldvd, *newvd, *pvd; 2742 nvlist_t *root; 2743 uint64_t leaves; 2744 uint64_t leaf, top; 2745 uint64_t ashift = ztest_get_ashift(); 2746 uint64_t oldguid, pguid; 2747 uint64_t oldsize, newsize; 2748 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2749 int replacing; 2750 int oldvd_has_siblings = B_FALSE; 2751 int newvd_is_spare = B_FALSE; 2752 int oldvd_is_log; 2753 int error, expected_error; 2754 2755 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2756 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2757 2758 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2759 2760 /* 2761 * Decide whether to do an attach or a replace. 2762 */ 2763 replacing = ztest_random(2); 2764 2765 /* 2766 * Pick a random top-level vdev. 2767 */ 2768 top = ztest_random_vdev_top(spa, B_TRUE); 2769 2770 /* 2771 * Pick a random leaf within it. 2772 */ 2773 leaf = ztest_random(leaves); 2774 2775 /* 2776 * Locate this vdev. 2777 */ 2778 oldvd = rvd->vdev_child[top]; 2779 if (zs->zs_mirrors >= 1) { 2780 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2781 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2782 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2783 } 2784 if (ztest_opts.zo_raidz > 1) { 2785 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2786 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2787 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2788 } 2789 2790 /* 2791 * If we're already doing an attach or replace, oldvd may be a 2792 * mirror vdev -- in which case, pick a random child. 2793 */ 2794 while (oldvd->vdev_children != 0) { 2795 oldvd_has_siblings = B_TRUE; 2796 ASSERT(oldvd->vdev_children >= 2); 2797 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2798 } 2799 2800 oldguid = oldvd->vdev_guid; 2801 oldsize = vdev_get_min_asize(oldvd); 2802 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2803 (void) strcpy(oldpath, oldvd->vdev_path); 2804 pvd = oldvd->vdev_parent; 2805 pguid = pvd->vdev_guid; 2806 2807 /* 2808 * If oldvd has siblings, then half of the time, detach it. 2809 */ 2810 if (oldvd_has_siblings && ztest_random(2) == 0) { 2811 spa_config_exit(spa, SCL_VDEV, FTAG); 2812 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2813 if (error != 0 && error != ENODEV && error != EBUSY && 2814 error != ENOTSUP) 2815 fatal(0, "detach (%s) returned %d", oldpath, error); 2816 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2817 return; 2818 } 2819 2820 /* 2821 * For the new vdev, choose with equal probability between the two 2822 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2823 */ 2824 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2825 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2826 newvd_is_spare = B_TRUE; 2827 (void) strcpy(newpath, newvd->vdev_path); 2828 } else { 2829 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2830 ztest_opts.zo_dir, ztest_opts.zo_pool, 2831 top * leaves + leaf); 2832 if (ztest_random(2) == 0) 2833 newpath[strlen(newpath) - 1] = 'b'; 2834 newvd = vdev_lookup_by_path(rvd, newpath); 2835 } 2836 2837 if (newvd) { 2838 newsize = vdev_get_min_asize(newvd); 2839 } else { 2840 /* 2841 * Make newsize a little bigger or smaller than oldsize. 2842 * If it's smaller, the attach should fail. 2843 * If it's larger, and we're doing a replace, 2844 * we should get dynamic LUN growth when we're done. 2845 */ 2846 newsize = 10 * oldsize / (9 + ztest_random(3)); 2847 } 2848 2849 /* 2850 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2851 * unless it's a replace; in that case any non-replacing parent is OK. 2852 * 2853 * If newvd is already part of the pool, it should fail with EBUSY. 2854 * 2855 * If newvd is too small, it should fail with EOVERFLOW. 2856 */ 2857 if (pvd->vdev_ops != &vdev_mirror_ops && 2858 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2859 pvd->vdev_ops == &vdev_replacing_ops || 2860 pvd->vdev_ops == &vdev_spare_ops)) 2861 expected_error = ENOTSUP; 2862 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2863 expected_error = ENOTSUP; 2864 else if (newvd == oldvd) 2865 expected_error = replacing ? 0 : EBUSY; 2866 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 2867 expected_error = EBUSY; 2868 else if (newsize < oldsize) 2869 expected_error = EOVERFLOW; 2870 else if (ashift > oldvd->vdev_top->vdev_ashift) 2871 expected_error = EDOM; 2872 else 2873 expected_error = 0; 2874 2875 spa_config_exit(spa, SCL_VDEV, FTAG); 2876 2877 /* 2878 * Build the nvlist describing newpath. 2879 */ 2880 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 2881 ashift, 0, 0, 0, 1); 2882 2883 error = spa_vdev_attach(spa, oldguid, root, replacing); 2884 2885 nvlist_free(root); 2886 2887 /* 2888 * If our parent was the replacing vdev, but the replace completed, 2889 * then instead of failing with ENOTSUP we may either succeed, 2890 * fail with ENODEV, or fail with EOVERFLOW. 2891 */ 2892 if (expected_error == ENOTSUP && 2893 (error == 0 || error == ENODEV || error == EOVERFLOW)) 2894 expected_error = error; 2895 2896 /* 2897 * If someone grew the LUN, the replacement may be too small. 2898 */ 2899 if (error == EOVERFLOW || error == EBUSY) 2900 expected_error = error; 2901 2902 /* XXX workaround 6690467 */ 2903 if (error != expected_error && expected_error != EBUSY) { 2904 fatal(0, "attach (%s %llu, %s %llu, %d) " 2905 "returned %d, expected %d", 2906 oldpath, oldsize, newpath, 2907 newsize, replacing, error, expected_error); 2908 } 2909 2910 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2911} 2912 2913/* 2914 * Callback function which expands the physical size of the vdev. 2915 */ 2916vdev_t * 2917grow_vdev(vdev_t *vd, void *arg) 2918{ 2919 spa_t *spa = vd->vdev_spa; 2920 size_t *newsize = arg; 2921 size_t fsize; 2922 int fd; 2923 2924 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2925 ASSERT(vd->vdev_ops->vdev_op_leaf); 2926 2927 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 2928 return (vd); 2929 2930 fsize = lseek(fd, 0, SEEK_END); 2931 (void) ftruncate(fd, *newsize); 2932 2933 if (ztest_opts.zo_verbose >= 6) { 2934 (void) printf("%s grew from %lu to %lu bytes\n", 2935 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 2936 } 2937 (void) close(fd); 2938 return (NULL); 2939} 2940 2941/* 2942 * Callback function which expands a given vdev by calling vdev_online(). 2943 */ 2944/* ARGSUSED */ 2945vdev_t * 2946online_vdev(vdev_t *vd, void *arg) 2947{ 2948 spa_t *spa = vd->vdev_spa; 2949 vdev_t *tvd = vd->vdev_top; 2950 uint64_t guid = vd->vdev_guid; 2951 uint64_t generation = spa->spa_config_generation + 1; 2952 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 2953 int error; 2954 2955 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2956 ASSERT(vd->vdev_ops->vdev_op_leaf); 2957 2958 /* Calling vdev_online will initialize the new metaslabs */ 2959 spa_config_exit(spa, SCL_STATE, spa); 2960 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 2961 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2962 2963 /* 2964 * If vdev_online returned an error or the underlying vdev_open 2965 * failed then we abort the expand. The only way to know that 2966 * vdev_open fails is by checking the returned newstate. 2967 */ 2968 if (error || newstate != VDEV_STATE_HEALTHY) { 2969 if (ztest_opts.zo_verbose >= 5) { 2970 (void) printf("Unable to expand vdev, state %llu, " 2971 "error %d\n", (u_longlong_t)newstate, error); 2972 } 2973 return (vd); 2974 } 2975 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 2976 2977 /* 2978 * Since we dropped the lock we need to ensure that we're 2979 * still talking to the original vdev. It's possible this 2980 * vdev may have been detached/replaced while we were 2981 * trying to online it. 2982 */ 2983 if (generation != spa->spa_config_generation) { 2984 if (ztest_opts.zo_verbose >= 5) { 2985 (void) printf("vdev configuration has changed, " 2986 "guid %llu, state %llu, expected gen %llu, " 2987 "got gen %llu\n", 2988 (u_longlong_t)guid, 2989 (u_longlong_t)tvd->vdev_state, 2990 (u_longlong_t)generation, 2991 (u_longlong_t)spa->spa_config_generation); 2992 } 2993 return (vd); 2994 } 2995 return (NULL); 2996} 2997 2998/* 2999 * Traverse the vdev tree calling the supplied function. 3000 * We continue to walk the tree until we either have walked all 3001 * children or we receive a non-NULL return from the callback. 3002 * If a NULL callback is passed, then we just return back the first 3003 * leaf vdev we encounter. 3004 */ 3005vdev_t * 3006vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 3007{ 3008 if (vd->vdev_ops->vdev_op_leaf) { 3009 if (func == NULL) 3010 return (vd); 3011 else 3012 return (func(vd, arg)); 3013 } 3014 3015 for (uint_t c = 0; c < vd->vdev_children; c++) { 3016 vdev_t *cvd = vd->vdev_child[c]; 3017 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 3018 return (cvd); 3019 } 3020 return (NULL); 3021} 3022 3023/* 3024 * Verify that dynamic LUN growth works as expected. 3025 */ 3026/* ARGSUSED */ 3027void 3028ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 3029{ 3030 spa_t *spa = ztest_spa; 3031 vdev_t *vd, *tvd; 3032 metaslab_class_t *mc; 3033 metaslab_group_t *mg; 3034 size_t psize, newsize; 3035 uint64_t top; 3036 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 3037 3038 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 3039 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3040 3041 top = ztest_random_vdev_top(spa, B_TRUE); 3042 3043 tvd = spa->spa_root_vdev->vdev_child[top]; 3044 mg = tvd->vdev_mg; 3045 mc = mg->mg_class; 3046 old_ms_count = tvd->vdev_ms_count; 3047 old_class_space = metaslab_class_get_space(mc); 3048 3049 /* 3050 * Determine the size of the first leaf vdev associated with 3051 * our top-level device. 3052 */ 3053 vd = vdev_walk_tree(tvd, NULL, NULL); 3054 ASSERT3P(vd, !=, NULL); 3055 ASSERT(vd->vdev_ops->vdev_op_leaf); 3056 3057 psize = vd->vdev_psize; 3058 3059 /* 3060 * We only try to expand the vdev if it's healthy, less than 4x its 3061 * original size, and it has a valid psize. 3062 */ 3063 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 3064 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 3065 spa_config_exit(spa, SCL_STATE, spa); 3066 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3067 return; 3068 } 3069 ASSERT(psize > 0); 3070 newsize = psize + psize / 8; 3071 ASSERT3U(newsize, >, psize); 3072 3073 if (ztest_opts.zo_verbose >= 6) { 3074 (void) printf("Expanding LUN %s from %lu to %lu\n", 3075 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 3076 } 3077 3078 /* 3079 * Growing the vdev is a two step process: 3080 * 1). expand the physical size (i.e. relabel) 3081 * 2). online the vdev to create the new metaslabs 3082 */ 3083 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 3084 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 3085 tvd->vdev_state != VDEV_STATE_HEALTHY) { 3086 if (ztest_opts.zo_verbose >= 5) { 3087 (void) printf("Could not expand LUN because " 3088 "the vdev configuration changed.\n"); 3089 } 3090 spa_config_exit(spa, SCL_STATE, spa); 3091 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3092 return; 3093 } 3094 3095 spa_config_exit(spa, SCL_STATE, spa); 3096 3097 /* 3098 * Expanding the LUN will update the config asynchronously, 3099 * thus we must wait for the async thread to complete any 3100 * pending tasks before proceeding. 3101 */ 3102 for (;;) { 3103 boolean_t done; 3104 mutex_enter(&spa->spa_async_lock); 3105 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 3106 mutex_exit(&spa->spa_async_lock); 3107 if (done) 3108 break; 3109 txg_wait_synced(spa_get_dsl(spa), 0); 3110 (void) poll(NULL, 0, 100); 3111 } 3112 3113 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3114 3115 tvd = spa->spa_root_vdev->vdev_child[top]; 3116 new_ms_count = tvd->vdev_ms_count; 3117 new_class_space = metaslab_class_get_space(mc); 3118 3119 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 3120 if (ztest_opts.zo_verbose >= 5) { 3121 (void) printf("Could not verify LUN expansion due to " 3122 "intervening vdev offline or remove.\n"); 3123 } 3124 spa_config_exit(spa, SCL_STATE, spa); 3125 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3126 return; 3127 } 3128 3129 /* 3130 * Make sure we were able to grow the vdev. 3131 */ 3132 if (new_ms_count <= old_ms_count) 3133 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n", 3134 old_ms_count, new_ms_count); 3135 3136 /* 3137 * Make sure we were able to grow the pool. 3138 */ 3139 if (new_class_space <= old_class_space) 3140 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n", 3141 old_class_space, new_class_space); 3142 3143 if (ztest_opts.zo_verbose >= 5) { 3144 char oldnumbuf[6], newnumbuf[6]; 3145 3146 nicenum(old_class_space, oldnumbuf); 3147 nicenum(new_class_space, newnumbuf); 3148 (void) printf("%s grew from %s to %s\n", 3149 spa->spa_name, oldnumbuf, newnumbuf); 3150 } 3151 3152 spa_config_exit(spa, SCL_STATE, spa); 3153 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3154} 3155 3156/* 3157 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 3158 */ 3159/* ARGSUSED */ 3160static void 3161ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3162{ 3163 /* 3164 * Create the objects common to all ztest datasets. 3165 */ 3166 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3167 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3168} 3169 3170static int 3171ztest_dataset_create(char *dsname) 3172{ 3173 uint64_t zilset = ztest_random(100); 3174 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3175 ztest_objset_create_cb, NULL); 3176 3177 if (err || zilset < 80) 3178 return (err); 3179 3180 if (ztest_opts.zo_verbose >= 6) 3181 (void) printf("Setting dataset %s to sync always\n", dsname); 3182 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3183 ZFS_SYNC_ALWAYS, B_FALSE)); 3184} 3185 3186/* ARGSUSED */ 3187static int 3188ztest_objset_destroy_cb(const char *name, void *arg) 3189{ 3190 objset_t *os; 3191 dmu_object_info_t doi; 3192 int error; 3193 3194 /* 3195 * Verify that the dataset contains a directory object. 3196 */ 3197 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os)); 3198 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3199 if (error != ENOENT) { 3200 /* We could have crashed in the middle of destroying it */ 3201 ASSERT0(error); 3202 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3203 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3204 } 3205 dmu_objset_disown(os, FTAG); 3206 3207 /* 3208 * Destroy the dataset. 3209 */ 3210 if (strchr(name, '@') != NULL) { 3211 VERIFY0(dsl_destroy_snapshot(name, B_FALSE)); 3212 } else { 3213 VERIFY0(dsl_destroy_head(name)); 3214 } 3215 return (0); 3216} 3217 3218static boolean_t 3219ztest_snapshot_create(char *osname, uint64_t id) 3220{ 3221 char snapname[MAXNAMELEN]; 3222 int error; 3223 3224 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id); 3225 3226 error = dmu_objset_snapshot_one(osname, snapname); 3227 if (error == ENOSPC) { 3228 ztest_record_enospc(FTAG); 3229 return (B_FALSE); 3230 } 3231 if (error != 0 && error != EEXIST) { 3232 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname, 3233 snapname, error); 3234 } 3235 return (B_TRUE); 3236} 3237 3238static boolean_t 3239ztest_snapshot_destroy(char *osname, uint64_t id) 3240{ 3241 char snapname[MAXNAMELEN]; 3242 int error; 3243 3244 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 3245 (u_longlong_t)id); 3246 3247 error = dsl_destroy_snapshot(snapname, B_FALSE); 3248 if (error != 0 && error != ENOENT) 3249 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3250 return (B_TRUE); 3251} 3252 3253/* ARGSUSED */ 3254void 3255ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3256{ 3257 ztest_ds_t zdtmp; 3258 int iters; 3259 int error; 3260 objset_t *os, *os2; 3261 char name[MAXNAMELEN]; 3262 zilog_t *zilog; 3263 3264 (void) rw_rdlock(&ztest_name_lock); 3265 3266 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", 3267 ztest_opts.zo_pool, (u_longlong_t)id); 3268 3269 /* 3270 * If this dataset exists from a previous run, process its replay log 3271 * half of the time. If we don't replay it, then dmu_objset_destroy() 3272 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3273 */ 3274 if (ztest_random(2) == 0 && 3275 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3276 ztest_zd_init(&zdtmp, NULL, os); 3277 zil_replay(os, &zdtmp, ztest_replay_vector); 3278 ztest_zd_fini(&zdtmp); 3279 dmu_objset_disown(os, FTAG); 3280 } 3281 3282 /* 3283 * There may be an old instance of the dataset we're about to 3284 * create lying around from a previous run. If so, destroy it 3285 * and all of its snapshots. 3286 */ 3287 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3288 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3289 3290 /* 3291 * Verify that the destroyed dataset is no longer in the namespace. 3292 */ 3293 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, 3294 FTAG, &os)); 3295 3296 /* 3297 * Verify that we can create a new dataset. 3298 */ 3299 error = ztest_dataset_create(name); 3300 if (error) { 3301 if (error == ENOSPC) { 3302 ztest_record_enospc(FTAG); 3303 (void) rw_unlock(&ztest_name_lock); 3304 return; 3305 } 3306 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3307 } 3308 3309 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3310 3311 ztest_zd_init(&zdtmp, NULL, os); 3312 3313 /* 3314 * Open the intent log for it. 3315 */ 3316 zilog = zil_open(os, ztest_get_data); 3317 3318 /* 3319 * Put some objects in there, do a little I/O to them, 3320 * and randomly take a couple of snapshots along the way. 3321 */ 3322 iters = ztest_random(5); 3323 for (int i = 0; i < iters; i++) { 3324 ztest_dmu_object_alloc_free(&zdtmp, id); 3325 if (ztest_random(iters) == 0) 3326 (void) ztest_snapshot_create(name, i); 3327 } 3328 3329 /* 3330 * Verify that we cannot create an existing dataset. 3331 */ 3332 VERIFY3U(EEXIST, ==, 3333 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3334 3335 /* 3336 * Verify that we can hold an objset that is also owned. 3337 */ 3338 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3339 dmu_objset_rele(os2, FTAG); 3340 3341 /* 3342 * Verify that we cannot own an objset that is already owned. 3343 */ 3344 VERIFY3U(EBUSY, ==, 3345 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3346 3347 zil_close(zilog); 3348 dmu_objset_disown(os, FTAG); 3349 ztest_zd_fini(&zdtmp); 3350 3351 (void) rw_unlock(&ztest_name_lock); 3352} 3353 3354/* 3355 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3356 */ 3357void 3358ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3359{ 3360 (void) rw_rdlock(&ztest_name_lock); 3361 (void) ztest_snapshot_destroy(zd->zd_name, id); 3362 (void) ztest_snapshot_create(zd->zd_name, id); 3363 (void) rw_unlock(&ztest_name_lock); 3364} 3365 3366/* 3367 * Cleanup non-standard snapshots and clones. 3368 */ 3369void 3370ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3371{ 3372 char snap1name[MAXNAMELEN]; 3373 char clone1name[MAXNAMELEN]; 3374 char snap2name[MAXNAMELEN]; 3375 char clone2name[MAXNAMELEN]; 3376 char snap3name[MAXNAMELEN]; 3377 int error; 3378 3379 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3380 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3381 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3382 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3383 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3384 3385 error = dsl_destroy_head(clone2name); 3386 if (error && error != ENOENT) 3387 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error); 3388 error = dsl_destroy_snapshot(snap3name, B_FALSE); 3389 if (error && error != ENOENT) 3390 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error); 3391 error = dsl_destroy_snapshot(snap2name, B_FALSE); 3392 if (error && error != ENOENT) 3393 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error); 3394 error = dsl_destroy_head(clone1name); 3395 if (error && error != ENOENT) 3396 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error); 3397 error = dsl_destroy_snapshot(snap1name, B_FALSE); 3398 if (error && error != ENOENT) 3399 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error); 3400} 3401 3402/* 3403 * Verify dsl_dataset_promote handles EBUSY 3404 */ 3405void 3406ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3407{ 3408 objset_t *os; 3409 char snap1name[MAXNAMELEN]; 3410 char clone1name[MAXNAMELEN]; 3411 char snap2name[MAXNAMELEN]; 3412 char clone2name[MAXNAMELEN]; 3413 char snap3name[MAXNAMELEN]; 3414 char *osname = zd->zd_name; 3415 int error; 3416 3417 (void) rw_rdlock(&ztest_name_lock); 3418 3419 ztest_dsl_dataset_cleanup(osname, id); 3420 3421 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3422 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3423 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3424 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3425 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3426 3427 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 3428 if (error && error != EEXIST) { 3429 if (error == ENOSPC) { 3430 ztest_record_enospc(FTAG); 3431 goto out; 3432 } 3433 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3434 } 3435 3436 error = dmu_objset_clone(clone1name, snap1name); 3437 if (error) { 3438 if (error == ENOSPC) { 3439 ztest_record_enospc(FTAG); 3440 goto out; 3441 } 3442 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3443 } 3444 3445 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 3446 if (error && error != EEXIST) { 3447 if (error == ENOSPC) { 3448 ztest_record_enospc(FTAG); 3449 goto out; 3450 } 3451 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3452 } 3453 3454 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 3455 if (error && error != EEXIST) { 3456 if (error == ENOSPC) { 3457 ztest_record_enospc(FTAG); 3458 goto out; 3459 } 3460 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3461 } 3462 3463 error = dmu_objset_clone(clone2name, snap3name); 3464 if (error) { 3465 if (error == ENOSPC) { 3466 ztest_record_enospc(FTAG); 3467 goto out; 3468 } 3469 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3470 } 3471 3472 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os); 3473 if (error) 3474 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error); 3475 error = dsl_dataset_promote(clone2name, NULL); 3476 if (error == ENOSPC) { 3477 dmu_objset_disown(os, FTAG); 3478 ztest_record_enospc(FTAG); 3479 goto out; 3480 } 3481 if (error != EBUSY) 3482 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3483 error); 3484 dmu_objset_disown(os, FTAG); 3485 3486out: 3487 ztest_dsl_dataset_cleanup(osname, id); 3488 3489 (void) rw_unlock(&ztest_name_lock); 3490} 3491 3492/* 3493 * Verify that dmu_object_{alloc,free} work as expected. 3494 */ 3495void 3496ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3497{ 3498 ztest_od_t od[4]; 3499 int batchsize = sizeof (od) / sizeof (od[0]); 3500 3501 for (int b = 0; b < batchsize; b++) 3502 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3503 3504 /* 3505 * Destroy the previous batch of objects, create a new batch, 3506 * and do some I/O on the new objects. 3507 */ 3508 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3509 return; 3510 3511 while (ztest_random(4 * batchsize) != 0) 3512 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3513 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3514} 3515 3516/* 3517 * Verify that dmu_{read,write} work as expected. 3518 */ 3519void 3520ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3521{ 3522 objset_t *os = zd->zd_os; 3523 ztest_od_t od[2]; 3524 dmu_tx_t *tx; 3525 int i, freeit, error; 3526 uint64_t n, s, txg; 3527 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3528 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3529 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3530 uint64_t regions = 997; 3531 uint64_t stride = 123456789ULL; 3532 uint64_t width = 40; 3533 int free_percent = 5; 3534 3535 /* 3536 * This test uses two objects, packobj and bigobj, that are always 3537 * updated together (i.e. in the same tx) so that their contents are 3538 * in sync and can be compared. Their contents relate to each other 3539 * in a simple way: packobj is a dense array of 'bufwad' structures, 3540 * while bigobj is a sparse array of the same bufwads. Specifically, 3541 * for any index n, there are three bufwads that should be identical: 3542 * 3543 * packobj, at offset n * sizeof (bufwad_t) 3544 * bigobj, at the head of the nth chunk 3545 * bigobj, at the tail of the nth chunk 3546 * 3547 * The chunk size is arbitrary. It doesn't have to be a power of two, 3548 * and it doesn't have any relation to the object blocksize. 3549 * The only requirement is that it can hold at least two bufwads. 3550 * 3551 * Normally, we write the bufwad to each of these locations. 3552 * However, free_percent of the time we instead write zeroes to 3553 * packobj and perform a dmu_free_range() on bigobj. By comparing 3554 * bigobj to packobj, we can verify that the DMU is correctly 3555 * tracking which parts of an object are allocated and free, 3556 * and that the contents of the allocated blocks are correct. 3557 */ 3558 3559 /* 3560 * Read the directory info. If it's the first time, set things up. 3561 */ 3562 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3563 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3564 3565 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3566 return; 3567 3568 bigobj = od[0].od_object; 3569 packobj = od[1].od_object; 3570 chunksize = od[0].od_gen; 3571 ASSERT(chunksize == od[1].od_gen); 3572 3573 /* 3574 * Prefetch a random chunk of the big object. 3575 * Our aim here is to get some async reads in flight 3576 * for blocks that we may free below; the DMU should 3577 * handle this race correctly. 3578 */ 3579 n = ztest_random(regions) * stride + ztest_random(width); 3580 s = 1 + ztest_random(2 * width - 1); 3581 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize); 3582 3583 /* 3584 * Pick a random index and compute the offsets into packobj and bigobj. 3585 */ 3586 n = ztest_random(regions) * stride + ztest_random(width); 3587 s = 1 + ztest_random(width - 1); 3588 3589 packoff = n * sizeof (bufwad_t); 3590 packsize = s * sizeof (bufwad_t); 3591 3592 bigoff = n * chunksize; 3593 bigsize = s * chunksize; 3594 3595 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3596 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3597 3598 /* 3599 * free_percent of the time, free a range of bigobj rather than 3600 * overwriting it. 3601 */ 3602 freeit = (ztest_random(100) < free_percent); 3603 3604 /* 3605 * Read the current contents of our objects. 3606 */ 3607 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3608 DMU_READ_PREFETCH); 3609 ASSERT0(error); 3610 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3611 DMU_READ_PREFETCH); 3612 ASSERT0(error); 3613 3614 /* 3615 * Get a tx for the mods to both packobj and bigobj. 3616 */ 3617 tx = dmu_tx_create(os); 3618 3619 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3620 3621 if (freeit) 3622 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3623 else 3624 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3625 3626 /* This accounts for setting the checksum/compression. */ 3627 dmu_tx_hold_bonus(tx, bigobj); 3628 3629 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3630 if (txg == 0) { 3631 umem_free(packbuf, packsize); 3632 umem_free(bigbuf, bigsize); 3633 return; 3634 } 3635 3636 enum zio_checksum cksum; 3637 do { 3638 cksum = (enum zio_checksum) 3639 ztest_random_dsl_prop(ZFS_PROP_CHECKSUM); 3640 } while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS); 3641 dmu_object_set_checksum(os, bigobj, cksum, tx); 3642 3643 enum zio_compress comp; 3644 do { 3645 comp = (enum zio_compress) 3646 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION); 3647 } while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS); 3648 dmu_object_set_compress(os, bigobj, comp, tx); 3649 3650 /* 3651 * For each index from n to n + s, verify that the existing bufwad 3652 * in packobj matches the bufwads at the head and tail of the 3653 * corresponding chunk in bigobj. Then update all three bufwads 3654 * with the new values we want to write out. 3655 */ 3656 for (i = 0; i < s; i++) { 3657 /* LINTED */ 3658 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3659 /* LINTED */ 3660 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3661 /* LINTED */ 3662 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3663 3664 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3665 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3666 3667 if (pack->bw_txg > txg) 3668 fatal(0, "future leak: got %llx, open txg is %llx", 3669 pack->bw_txg, txg); 3670 3671 if (pack->bw_data != 0 && pack->bw_index != n + i) 3672 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3673 pack->bw_index, n, i); 3674 3675 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3676 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3677 3678 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3679 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3680 3681 if (freeit) { 3682 bzero(pack, sizeof (bufwad_t)); 3683 } else { 3684 pack->bw_index = n + i; 3685 pack->bw_txg = txg; 3686 pack->bw_data = 1 + ztest_random(-2ULL); 3687 } 3688 *bigH = *pack; 3689 *bigT = *pack; 3690 } 3691 3692 /* 3693 * We've verified all the old bufwads, and made new ones. 3694 * Now write them out. 3695 */ 3696 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3697 3698 if (freeit) { 3699 if (ztest_opts.zo_verbose >= 7) { 3700 (void) printf("freeing offset %llx size %llx" 3701 " txg %llx\n", 3702 (u_longlong_t)bigoff, 3703 (u_longlong_t)bigsize, 3704 (u_longlong_t)txg); 3705 } 3706 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3707 } else { 3708 if (ztest_opts.zo_verbose >= 7) { 3709 (void) printf("writing offset %llx size %llx" 3710 " txg %llx\n", 3711 (u_longlong_t)bigoff, 3712 (u_longlong_t)bigsize, 3713 (u_longlong_t)txg); 3714 } 3715 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3716 } 3717 3718 dmu_tx_commit(tx); 3719 3720 /* 3721 * Sanity check the stuff we just wrote. 3722 */ 3723 { 3724 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3725 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3726 3727 VERIFY(0 == dmu_read(os, packobj, packoff, 3728 packsize, packcheck, DMU_READ_PREFETCH)); 3729 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3730 bigsize, bigcheck, DMU_READ_PREFETCH)); 3731 3732 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3733 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3734 3735 umem_free(packcheck, packsize); 3736 umem_free(bigcheck, bigsize); 3737 } 3738 3739 umem_free(packbuf, packsize); 3740 umem_free(bigbuf, bigsize); 3741} 3742 3743void 3744compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3745 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3746{ 3747 uint64_t i; 3748 bufwad_t *pack; 3749 bufwad_t *bigH; 3750 bufwad_t *bigT; 3751 3752 /* 3753 * For each index from n to n + s, verify that the existing bufwad 3754 * in packobj matches the bufwads at the head and tail of the 3755 * corresponding chunk in bigobj. Then update all three bufwads 3756 * with the new values we want to write out. 3757 */ 3758 for (i = 0; i < s; i++) { 3759 /* LINTED */ 3760 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3761 /* LINTED */ 3762 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3763 /* LINTED */ 3764 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3765 3766 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3767 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3768 3769 if (pack->bw_txg > txg) 3770 fatal(0, "future leak: got %llx, open txg is %llx", 3771 pack->bw_txg, txg); 3772 3773 if (pack->bw_data != 0 && pack->bw_index != n + i) 3774 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3775 pack->bw_index, n, i); 3776 3777 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3778 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3779 3780 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3781 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3782 3783 pack->bw_index = n + i; 3784 pack->bw_txg = txg; 3785 pack->bw_data = 1 + ztest_random(-2ULL); 3786 3787 *bigH = *pack; 3788 *bigT = *pack; 3789 } 3790} 3791 3792void 3793ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3794{ 3795 objset_t *os = zd->zd_os; 3796 ztest_od_t od[2]; 3797 dmu_tx_t *tx; 3798 uint64_t i; 3799 int error; 3800 uint64_t n, s, txg; 3801 bufwad_t *packbuf, *bigbuf; 3802 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3803 uint64_t blocksize = ztest_random_blocksize(); 3804 uint64_t chunksize = blocksize; 3805 uint64_t regions = 997; 3806 uint64_t stride = 123456789ULL; 3807 uint64_t width = 9; 3808 dmu_buf_t *bonus_db; 3809 arc_buf_t **bigbuf_arcbufs; 3810 dmu_object_info_t doi; 3811 3812 /* 3813 * This test uses two objects, packobj and bigobj, that are always 3814 * updated together (i.e. in the same tx) so that their contents are 3815 * in sync and can be compared. Their contents relate to each other 3816 * in a simple way: packobj is a dense array of 'bufwad' structures, 3817 * while bigobj is a sparse array of the same bufwads. Specifically, 3818 * for any index n, there are three bufwads that should be identical: 3819 * 3820 * packobj, at offset n * sizeof (bufwad_t) 3821 * bigobj, at the head of the nth chunk 3822 * bigobj, at the tail of the nth chunk 3823 * 3824 * The chunk size is set equal to bigobj block size so that 3825 * dmu_assign_arcbuf() can be tested for object updates. 3826 */ 3827 3828 /* 3829 * Read the directory info. If it's the first time, set things up. 3830 */ 3831 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3832 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3833 3834 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3835 return; 3836 3837 bigobj = od[0].od_object; 3838 packobj = od[1].od_object; 3839 blocksize = od[0].od_blocksize; 3840 chunksize = blocksize; 3841 ASSERT(chunksize == od[1].od_gen); 3842 3843 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 3844 VERIFY(ISP2(doi.doi_data_block_size)); 3845 VERIFY(chunksize == doi.doi_data_block_size); 3846 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 3847 3848 /* 3849 * Pick a random index and compute the offsets into packobj and bigobj. 3850 */ 3851 n = ztest_random(regions) * stride + ztest_random(width); 3852 s = 1 + ztest_random(width - 1); 3853 3854 packoff = n * sizeof (bufwad_t); 3855 packsize = s * sizeof (bufwad_t); 3856 3857 bigoff = n * chunksize; 3858 bigsize = s * chunksize; 3859 3860 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 3861 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 3862 3863 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 3864 3865 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 3866 3867 /* 3868 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 3869 * Iteration 1 test zcopy to already referenced dbufs. 3870 * Iteration 2 test zcopy to dirty dbuf in the same txg. 3871 * Iteration 3 test zcopy to dbuf dirty in previous txg. 3872 * Iteration 4 test zcopy when dbuf is no longer dirty. 3873 * Iteration 5 test zcopy when it can't be done. 3874 * Iteration 6 one more zcopy write. 3875 */ 3876 for (i = 0; i < 7; i++) { 3877 uint64_t j; 3878 uint64_t off; 3879 3880 /* 3881 * In iteration 5 (i == 5) use arcbufs 3882 * that don't match bigobj blksz to test 3883 * dmu_assign_arcbuf() when it can't directly 3884 * assign an arcbuf to a dbuf. 3885 */ 3886 for (j = 0; j < s; j++) { 3887 if (i != 5) { 3888 bigbuf_arcbufs[j] = 3889 dmu_request_arcbuf(bonus_db, chunksize); 3890 } else { 3891 bigbuf_arcbufs[2 * j] = 3892 dmu_request_arcbuf(bonus_db, chunksize / 2); 3893 bigbuf_arcbufs[2 * j + 1] = 3894 dmu_request_arcbuf(bonus_db, chunksize / 2); 3895 } 3896 } 3897 3898 /* 3899 * Get a tx for the mods to both packobj and bigobj. 3900 */ 3901 tx = dmu_tx_create(os); 3902 3903 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3904 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3905 3906 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3907 if (txg == 0) { 3908 umem_free(packbuf, packsize); 3909 umem_free(bigbuf, bigsize); 3910 for (j = 0; j < s; j++) { 3911 if (i != 5) { 3912 dmu_return_arcbuf(bigbuf_arcbufs[j]); 3913 } else { 3914 dmu_return_arcbuf( 3915 bigbuf_arcbufs[2 * j]); 3916 dmu_return_arcbuf( 3917 bigbuf_arcbufs[2 * j + 1]); 3918 } 3919 } 3920 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3921 dmu_buf_rele(bonus_db, FTAG); 3922 return; 3923 } 3924 3925 /* 3926 * 50% of the time don't read objects in the 1st iteration to 3927 * test dmu_assign_arcbuf() for the case when there're no 3928 * existing dbufs for the specified offsets. 3929 */ 3930 if (i != 0 || ztest_random(2) != 0) { 3931 error = dmu_read(os, packobj, packoff, 3932 packsize, packbuf, DMU_READ_PREFETCH); 3933 ASSERT0(error); 3934 error = dmu_read(os, bigobj, bigoff, bigsize, 3935 bigbuf, DMU_READ_PREFETCH); 3936 ASSERT0(error); 3937 } 3938 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 3939 n, chunksize, txg); 3940 3941 /* 3942 * We've verified all the old bufwads, and made new ones. 3943 * Now write them out. 3944 */ 3945 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3946 if (ztest_opts.zo_verbose >= 7) { 3947 (void) printf("writing offset %llx size %llx" 3948 " txg %llx\n", 3949 (u_longlong_t)bigoff, 3950 (u_longlong_t)bigsize, 3951 (u_longlong_t)txg); 3952 } 3953 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 3954 dmu_buf_t *dbt; 3955 if (i != 5) { 3956 bcopy((caddr_t)bigbuf + (off - bigoff), 3957 bigbuf_arcbufs[j]->b_data, chunksize); 3958 } else { 3959 bcopy((caddr_t)bigbuf + (off - bigoff), 3960 bigbuf_arcbufs[2 * j]->b_data, 3961 chunksize / 2); 3962 bcopy((caddr_t)bigbuf + (off - bigoff) + 3963 chunksize / 2, 3964 bigbuf_arcbufs[2 * j + 1]->b_data, 3965 chunksize / 2); 3966 } 3967 3968 if (i == 1) { 3969 VERIFY(dmu_buf_hold(os, bigobj, off, 3970 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 3971 } 3972 if (i != 5) { 3973 dmu_assign_arcbuf(bonus_db, off, 3974 bigbuf_arcbufs[j], tx); 3975 } else { 3976 dmu_assign_arcbuf(bonus_db, off, 3977 bigbuf_arcbufs[2 * j], tx); 3978 dmu_assign_arcbuf(bonus_db, 3979 off + chunksize / 2, 3980 bigbuf_arcbufs[2 * j + 1], tx); 3981 } 3982 if (i == 1) { 3983 dmu_buf_rele(dbt, FTAG); 3984 } 3985 } 3986 dmu_tx_commit(tx); 3987 3988 /* 3989 * Sanity check the stuff we just wrote. 3990 */ 3991 { 3992 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3993 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3994 3995 VERIFY(0 == dmu_read(os, packobj, packoff, 3996 packsize, packcheck, DMU_READ_PREFETCH)); 3997 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3998 bigsize, bigcheck, DMU_READ_PREFETCH)); 3999 4000 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 4001 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 4002 4003 umem_free(packcheck, packsize); 4004 umem_free(bigcheck, bigsize); 4005 } 4006 if (i == 2) { 4007 txg_wait_open(dmu_objset_pool(os), 0); 4008 } else if (i == 3) { 4009 txg_wait_synced(dmu_objset_pool(os), 0); 4010 } 4011 } 4012 4013 dmu_buf_rele(bonus_db, FTAG); 4014 umem_free(packbuf, packsize); 4015 umem_free(bigbuf, bigsize); 4016 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 4017} 4018 4019/* ARGSUSED */ 4020void 4021ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 4022{ 4023 ztest_od_t od[1]; 4024 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 4025 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4026 4027 /* 4028 * Have multiple threads write to large offsets in an object 4029 * to verify that parallel writes to an object -- even to the 4030 * same blocks within the object -- doesn't cause any trouble. 4031 */ 4032 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4033 4034 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4035 return; 4036 4037 while (ztest_random(10) != 0) 4038 ztest_io(zd, od[0].od_object, offset); 4039} 4040 4041void 4042ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 4043{ 4044 ztest_od_t od[1]; 4045 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 4046 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4047 uint64_t count = ztest_random(20) + 1; 4048 uint64_t blocksize = ztest_random_blocksize(); 4049 void *data; 4050 4051 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4052 4053 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4054 return; 4055 4056 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 4057 return; 4058 4059 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 4060 4061 data = umem_zalloc(blocksize, UMEM_NOFAIL); 4062 4063 while (ztest_random(count) != 0) { 4064 uint64_t randoff = offset + (ztest_random(count) * blocksize); 4065 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 4066 data) != 0) 4067 break; 4068 while (ztest_random(4) != 0) 4069 ztest_io(zd, od[0].od_object, randoff); 4070 } 4071 4072 umem_free(data, blocksize); 4073} 4074 4075/* 4076 * Verify that zap_{create,destroy,add,remove,update} work as expected. 4077 */ 4078#define ZTEST_ZAP_MIN_INTS 1 4079#define ZTEST_ZAP_MAX_INTS 4 4080#define ZTEST_ZAP_MAX_PROPS 1000 4081 4082void 4083ztest_zap(ztest_ds_t *zd, uint64_t id) 4084{ 4085 objset_t *os = zd->zd_os; 4086 ztest_od_t od[1]; 4087 uint64_t object; 4088 uint64_t txg, last_txg; 4089 uint64_t value[ZTEST_ZAP_MAX_INTS]; 4090 uint64_t zl_ints, zl_intsize, prop; 4091 int i, ints; 4092 dmu_tx_t *tx; 4093 char propname[100], txgname[100]; 4094 int error; 4095 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 4096 4097 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4098 4099 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4100 return; 4101 4102 object = od[0].od_object; 4103 4104 /* 4105 * Generate a known hash collision, and verify that 4106 * we can lookup and remove both entries. 4107 */ 4108 tx = dmu_tx_create(os); 4109 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4110 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4111 if (txg == 0) 4112 return; 4113 for (i = 0; i < 2; i++) { 4114 value[i] = i; 4115 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 4116 1, &value[i], tx)); 4117 } 4118 for (i = 0; i < 2; i++) { 4119 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 4120 sizeof (uint64_t), 1, &value[i], tx)); 4121 VERIFY3U(0, ==, 4122 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 4123 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4124 ASSERT3U(zl_ints, ==, 1); 4125 } 4126 for (i = 0; i < 2; i++) { 4127 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 4128 } 4129 dmu_tx_commit(tx); 4130 4131 /* 4132 * Generate a buch of random entries. 4133 */ 4134 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 4135 4136 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4137 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4138 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4139 bzero(value, sizeof (value)); 4140 last_txg = 0; 4141 4142 /* 4143 * If these zap entries already exist, validate their contents. 4144 */ 4145 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4146 if (error == 0) { 4147 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4148 ASSERT3U(zl_ints, ==, 1); 4149 4150 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 4151 zl_ints, &last_txg) == 0); 4152 4153 VERIFY(zap_length(os, object, propname, &zl_intsize, 4154 &zl_ints) == 0); 4155 4156 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4157 ASSERT3U(zl_ints, ==, ints); 4158 4159 VERIFY(zap_lookup(os, object, propname, zl_intsize, 4160 zl_ints, value) == 0); 4161 4162 for (i = 0; i < ints; i++) { 4163 ASSERT3U(value[i], ==, last_txg + object + i); 4164 } 4165 } else { 4166 ASSERT3U(error, ==, ENOENT); 4167 } 4168 4169 /* 4170 * Atomically update two entries in our zap object. 4171 * The first is named txg_%llu, and contains the txg 4172 * in which the property was last updated. The second 4173 * is named prop_%llu, and the nth element of its value 4174 * should be txg + object + n. 4175 */ 4176 tx = dmu_tx_create(os); 4177 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4178 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4179 if (txg == 0) 4180 return; 4181 4182 if (last_txg > txg) 4183 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4184 4185 for (i = 0; i < ints; i++) 4186 value[i] = txg + object + i; 4187 4188 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4189 1, &txg, tx)); 4190 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4191 ints, value, tx)); 4192 4193 dmu_tx_commit(tx); 4194 4195 /* 4196 * Remove a random pair of entries. 4197 */ 4198 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4199 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4200 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4201 4202 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4203 4204 if (error == ENOENT) 4205 return; 4206 4207 ASSERT0(error); 4208 4209 tx = dmu_tx_create(os); 4210 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4211 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4212 if (txg == 0) 4213 return; 4214 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4215 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4216 dmu_tx_commit(tx); 4217} 4218 4219/* 4220 * Testcase to test the upgrading of a microzap to fatzap. 4221 */ 4222void 4223ztest_fzap(ztest_ds_t *zd, uint64_t id) 4224{ 4225 objset_t *os = zd->zd_os; 4226 ztest_od_t od[1]; 4227 uint64_t object, txg; 4228 4229 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4230 4231 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4232 return; 4233 4234 object = od[0].od_object; 4235 4236 /* 4237 * Add entries to this ZAP and make sure it spills over 4238 * and gets upgraded to a fatzap. Also, since we are adding 4239 * 2050 entries we should see ptrtbl growth and leaf-block split. 4240 */ 4241 for (int i = 0; i < 2050; i++) { 4242 char name[MAXNAMELEN]; 4243 uint64_t value = i; 4244 dmu_tx_t *tx; 4245 int error; 4246 4247 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4248 id, value); 4249 4250 tx = dmu_tx_create(os); 4251 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4252 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4253 if (txg == 0) 4254 return; 4255 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4256 &value, tx); 4257 ASSERT(error == 0 || error == EEXIST); 4258 dmu_tx_commit(tx); 4259 } 4260} 4261 4262/* ARGSUSED */ 4263void 4264ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4265{ 4266 objset_t *os = zd->zd_os; 4267 ztest_od_t od[1]; 4268 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4269 dmu_tx_t *tx; 4270 int i, namelen, error; 4271 int micro = ztest_random(2); 4272 char name[20], string_value[20]; 4273 void *data; 4274 4275 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4276 4277 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4278 return; 4279 4280 object = od[0].od_object; 4281 4282 /* 4283 * Generate a random name of the form 'xxx.....' where each 4284 * x is a random printable character and the dots are dots. 4285 * There are 94 such characters, and the name length goes from 4286 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4287 */ 4288 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4289 4290 for (i = 0; i < 3; i++) 4291 name[i] = '!' + ztest_random('~' - '!' + 1); 4292 for (; i < namelen - 1; i++) 4293 name[i] = '.'; 4294 name[i] = '\0'; 4295 4296 if ((namelen & 1) || micro) { 4297 wsize = sizeof (txg); 4298 wc = 1; 4299 data = &txg; 4300 } else { 4301 wsize = 1; 4302 wc = namelen; 4303 data = string_value; 4304 } 4305 4306 count = -1ULL; 4307 VERIFY0(zap_count(os, object, &count)); 4308 ASSERT(count != -1ULL); 4309 4310 /* 4311 * Select an operation: length, lookup, add, update, remove. 4312 */ 4313 i = ztest_random(5); 4314 4315 if (i >= 2) { 4316 tx = dmu_tx_create(os); 4317 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4318 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4319 if (txg == 0) 4320 return; 4321 bcopy(name, string_value, namelen); 4322 } else { 4323 tx = NULL; 4324 txg = 0; 4325 bzero(string_value, namelen); 4326 } 4327 4328 switch (i) { 4329 4330 case 0: 4331 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4332 if (error == 0) { 4333 ASSERT3U(wsize, ==, zl_wsize); 4334 ASSERT3U(wc, ==, zl_wc); 4335 } else { 4336 ASSERT3U(error, ==, ENOENT); 4337 } 4338 break; 4339 4340 case 1: 4341 error = zap_lookup(os, object, name, wsize, wc, data); 4342 if (error == 0) { 4343 if (data == string_value && 4344 bcmp(name, data, namelen) != 0) 4345 fatal(0, "name '%s' != val '%s' len %d", 4346 name, data, namelen); 4347 } else { 4348 ASSERT3U(error, ==, ENOENT); 4349 } 4350 break; 4351 4352 case 2: 4353 error = zap_add(os, object, name, wsize, wc, data, tx); 4354 ASSERT(error == 0 || error == EEXIST); 4355 break; 4356 4357 case 3: 4358 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4359 break; 4360 4361 case 4: 4362 error = zap_remove(os, object, name, tx); 4363 ASSERT(error == 0 || error == ENOENT); 4364 break; 4365 } 4366 4367 if (tx != NULL) 4368 dmu_tx_commit(tx); 4369} 4370 4371/* 4372 * Commit callback data. 4373 */ 4374typedef struct ztest_cb_data { 4375 list_node_t zcd_node; 4376 uint64_t zcd_txg; 4377 int zcd_expected_err; 4378 boolean_t zcd_added; 4379 boolean_t zcd_called; 4380 spa_t *zcd_spa; 4381} ztest_cb_data_t; 4382 4383/* This is the actual commit callback function */ 4384static void 4385ztest_commit_callback(void *arg, int error) 4386{ 4387 ztest_cb_data_t *data = arg; 4388 uint64_t synced_txg; 4389 4390 VERIFY(data != NULL); 4391 VERIFY3S(data->zcd_expected_err, ==, error); 4392 VERIFY(!data->zcd_called); 4393 4394 synced_txg = spa_last_synced_txg(data->zcd_spa); 4395 if (data->zcd_txg > synced_txg) 4396 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4397 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4398 synced_txg); 4399 4400 data->zcd_called = B_TRUE; 4401 4402 if (error == ECANCELED) { 4403 ASSERT0(data->zcd_txg); 4404 ASSERT(!data->zcd_added); 4405 4406 /* 4407 * The private callback data should be destroyed here, but 4408 * since we are going to check the zcd_called field after 4409 * dmu_tx_abort(), we will destroy it there. 4410 */ 4411 return; 4412 } 4413 4414 /* Was this callback added to the global callback list? */ 4415 if (!data->zcd_added) 4416 goto out; 4417 4418 ASSERT3U(data->zcd_txg, !=, 0); 4419 4420 /* Remove our callback from the list */ 4421 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4422 list_remove(&zcl.zcl_callbacks, data); 4423 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4424 4425out: 4426 umem_free(data, sizeof (ztest_cb_data_t)); 4427} 4428 4429/* Allocate and initialize callback data structure */ 4430static ztest_cb_data_t * 4431ztest_create_cb_data(objset_t *os, uint64_t txg) 4432{ 4433 ztest_cb_data_t *cb_data; 4434 4435 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4436 4437 cb_data->zcd_txg = txg; 4438 cb_data->zcd_spa = dmu_objset_spa(os); 4439 4440 return (cb_data); 4441} 4442 4443/* 4444 * If a number of txgs equal to this threshold have been created after a commit 4445 * callback has been registered but not called, then we assume there is an 4446 * implementation bug. 4447 */ 4448#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4449 4450/* 4451 * Commit callback test. 4452 */ 4453void 4454ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4455{ 4456 objset_t *os = zd->zd_os; 4457 ztest_od_t od[1]; 4458 dmu_tx_t *tx; 4459 ztest_cb_data_t *cb_data[3], *tmp_cb; 4460 uint64_t old_txg, txg; 4461 int i, error; 4462 4463 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4464 4465 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4466 return; 4467 4468 tx = dmu_tx_create(os); 4469 4470 cb_data[0] = ztest_create_cb_data(os, 0); 4471 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4472 4473 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4474 4475 /* Every once in a while, abort the transaction on purpose */ 4476 if (ztest_random(100) == 0) 4477 error = -1; 4478 4479 if (!error) 4480 error = dmu_tx_assign(tx, TXG_NOWAIT); 4481 4482 txg = error ? 0 : dmu_tx_get_txg(tx); 4483 4484 cb_data[0]->zcd_txg = txg; 4485 cb_data[1] = ztest_create_cb_data(os, txg); 4486 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4487 4488 if (error) { 4489 /* 4490 * It's not a strict requirement to call the registered 4491 * callbacks from inside dmu_tx_abort(), but that's what 4492 * it's supposed to happen in the current implementation 4493 * so we will check for that. 4494 */ 4495 for (i = 0; i < 2; i++) { 4496 cb_data[i]->zcd_expected_err = ECANCELED; 4497 VERIFY(!cb_data[i]->zcd_called); 4498 } 4499 4500 dmu_tx_abort(tx); 4501 4502 for (i = 0; i < 2; i++) { 4503 VERIFY(cb_data[i]->zcd_called); 4504 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4505 } 4506 4507 return; 4508 } 4509 4510 cb_data[2] = ztest_create_cb_data(os, txg); 4511 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4512 4513 /* 4514 * Read existing data to make sure there isn't a future leak. 4515 */ 4516 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4517 &old_txg, DMU_READ_PREFETCH)); 4518 4519 if (old_txg > txg) 4520 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4521 old_txg, txg); 4522 4523 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4524 4525 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4526 4527 /* 4528 * Since commit callbacks don't have any ordering requirement and since 4529 * it is theoretically possible for a commit callback to be called 4530 * after an arbitrary amount of time has elapsed since its txg has been 4531 * synced, it is difficult to reliably determine whether a commit 4532 * callback hasn't been called due to high load or due to a flawed 4533 * implementation. 4534 * 4535 * In practice, we will assume that if after a certain number of txgs a 4536 * commit callback hasn't been called, then most likely there's an 4537 * implementation bug.. 4538 */ 4539 tmp_cb = list_head(&zcl.zcl_callbacks); 4540 if (tmp_cb != NULL && 4541 (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) { 4542 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4543 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4544 } 4545 4546 /* 4547 * Let's find the place to insert our callbacks. 4548 * 4549 * Even though the list is ordered by txg, it is possible for the 4550 * insertion point to not be the end because our txg may already be 4551 * quiescing at this point and other callbacks in the open txg 4552 * (from other objsets) may have sneaked in. 4553 */ 4554 tmp_cb = list_tail(&zcl.zcl_callbacks); 4555 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4556 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4557 4558 /* Add the 3 callbacks to the list */ 4559 for (i = 0; i < 3; i++) { 4560 if (tmp_cb == NULL) 4561 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4562 else 4563 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4564 cb_data[i]); 4565 4566 cb_data[i]->zcd_added = B_TRUE; 4567 VERIFY(!cb_data[i]->zcd_called); 4568 4569 tmp_cb = cb_data[i]; 4570 } 4571 4572 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4573 4574 dmu_tx_commit(tx); 4575} 4576 4577/* ARGSUSED */ 4578void 4579ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4580{ 4581 zfs_prop_t proplist[] = { 4582 ZFS_PROP_CHECKSUM, 4583 ZFS_PROP_COMPRESSION, 4584 ZFS_PROP_COPIES, 4585 ZFS_PROP_DEDUP 4586 }; 4587 4588 (void) rw_rdlock(&ztest_name_lock); 4589 4590 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4591 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4592 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4593 4594 (void) rw_unlock(&ztest_name_lock); 4595} 4596 4597/* ARGSUSED */ 4598void 4599ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4600{ 4601 nvlist_t *props = NULL; 4602 4603 (void) rw_rdlock(&ztest_name_lock); 4604 4605 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4606 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4607 4608 VERIFY0(spa_prop_get(ztest_spa, &props)); 4609 4610 if (ztest_opts.zo_verbose >= 6) 4611 dump_nvlist(props, 4); 4612 4613 nvlist_free(props); 4614 4615 (void) rw_unlock(&ztest_name_lock); 4616} 4617 4618static int 4619user_release_one(const char *snapname, const char *holdname) 4620{ 4621 nvlist_t *snaps, *holds; 4622 int error; 4623 4624 snaps = fnvlist_alloc(); 4625 holds = fnvlist_alloc(); 4626 fnvlist_add_boolean(holds, holdname); 4627 fnvlist_add_nvlist(snaps, snapname, holds); 4628 fnvlist_free(holds); 4629 error = dsl_dataset_user_release(snaps, NULL); 4630 fnvlist_free(snaps); 4631 return (error); 4632} 4633 4634/* 4635 * Test snapshot hold/release and deferred destroy. 4636 */ 4637void 4638ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4639{ 4640 int error; 4641 objset_t *os = zd->zd_os; 4642 objset_t *origin; 4643 char snapname[100]; 4644 char fullname[100]; 4645 char clonename[100]; 4646 char tag[100]; 4647 char osname[MAXNAMELEN]; 4648 nvlist_t *holds; 4649 4650 (void) rw_rdlock(&ztest_name_lock); 4651 4652 dmu_objset_name(os, osname); 4653 4654 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id); 4655 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname); 4656 (void) snprintf(clonename, sizeof (clonename), 4657 "%s/ch1_%llu", osname, id); 4658 (void) snprintf(tag, sizeof (tag), "tag_%llu", id); 4659 4660 /* 4661 * Clean up from any previous run. 4662 */ 4663 error = dsl_destroy_head(clonename); 4664 if (error != ENOENT) 4665 ASSERT0(error); 4666 error = user_release_one(fullname, tag); 4667 if (error != ESRCH && error != ENOENT) 4668 ASSERT0(error); 4669 error = dsl_destroy_snapshot(fullname, B_FALSE); 4670 if (error != ENOENT) 4671 ASSERT0(error); 4672 4673 /* 4674 * Create snapshot, clone it, mark snap for deferred destroy, 4675 * destroy clone, verify snap was also destroyed. 4676 */ 4677 error = dmu_objset_snapshot_one(osname, snapname); 4678 if (error) { 4679 if (error == ENOSPC) { 4680 ztest_record_enospc("dmu_objset_snapshot"); 4681 goto out; 4682 } 4683 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4684 } 4685 4686 error = dmu_objset_clone(clonename, fullname); 4687 if (error) { 4688 if (error == ENOSPC) { 4689 ztest_record_enospc("dmu_objset_clone"); 4690 goto out; 4691 } 4692 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4693 } 4694 4695 error = dsl_destroy_snapshot(fullname, B_TRUE); 4696 if (error) { 4697 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4698 fullname, error); 4699 } 4700 4701 error = dsl_destroy_head(clonename); 4702 if (error) 4703 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error); 4704 4705 error = dmu_objset_hold(fullname, FTAG, &origin); 4706 if (error != ENOENT) 4707 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4708 4709 /* 4710 * Create snapshot, add temporary hold, verify that we can't 4711 * destroy a held snapshot, mark for deferred destroy, 4712 * release hold, verify snapshot was destroyed. 4713 */ 4714 error = dmu_objset_snapshot_one(osname, snapname); 4715 if (error) { 4716 if (error == ENOSPC) { 4717 ztest_record_enospc("dmu_objset_snapshot"); 4718 goto out; 4719 } 4720 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4721 } 4722 4723 holds = fnvlist_alloc(); 4724 fnvlist_add_string(holds, fullname, tag); 4725 error = dsl_dataset_user_hold(holds, 0, NULL); 4726 fnvlist_free(holds); 4727 4728 if (error == ENOSPC) { 4729 ztest_record_enospc("dsl_dataset_user_hold"); 4730 goto out; 4731 } else if (error) { 4732 fatal(0, "dsl_dataset_user_hold(%s, %s) = %u", 4733 fullname, tag, error); 4734 } 4735 4736 error = dsl_destroy_snapshot(fullname, B_FALSE); 4737 if (error != EBUSY) { 4738 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d", 4739 fullname, error); 4740 } 4741 4742 error = dsl_destroy_snapshot(fullname, B_TRUE); 4743 if (error) { 4744 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4745 fullname, error); 4746 } 4747 4748 error = user_release_one(fullname, tag); 4749 if (error) 4750 fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error); 4751 4752 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT); 4753 4754out: 4755 (void) rw_unlock(&ztest_name_lock); 4756} 4757 4758/* 4759 * Inject random faults into the on-disk data. 4760 */ 4761/* ARGSUSED */ 4762void 4763ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4764{ 4765 ztest_shared_t *zs = ztest_shared; 4766 spa_t *spa = ztest_spa; 4767 int fd; 4768 uint64_t offset; 4769 uint64_t leaves; 4770 uint64_t bad = 0x1990c0ffeedecadeULL; 4771 uint64_t top, leaf; 4772 char path0[MAXPATHLEN]; 4773 char pathrand[MAXPATHLEN]; 4774 size_t fsize; 4775 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ 4776 int iters = 1000; 4777 int maxfaults; 4778 int mirror_save; 4779 vdev_t *vd0 = NULL; 4780 uint64_t guid0 = 0; 4781 boolean_t islog = B_FALSE; 4782 4783 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4784 maxfaults = MAXFAULTS(); 4785 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4786 mirror_save = zs->zs_mirrors; 4787 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4788 4789 ASSERT(leaves >= 1); 4790 4791 /* 4792 * Grab the name lock as reader. There are some operations 4793 * which don't like to have their vdevs changed while 4794 * they are in progress (i.e. spa_change_guid). Those 4795 * operations will have grabbed the name lock as writer. 4796 */ 4797 (void) rw_rdlock(&ztest_name_lock); 4798 4799 /* 4800 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 4801 */ 4802 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4803 4804 if (ztest_random(2) == 0) { 4805 /* 4806 * Inject errors on a normal data device or slog device. 4807 */ 4808 top = ztest_random_vdev_top(spa, B_TRUE); 4809 leaf = ztest_random(leaves) + zs->zs_splits; 4810 4811 /* 4812 * Generate paths to the first leaf in this top-level vdev, 4813 * and to the random leaf we selected. We'll induce transient 4814 * write failures and random online/offline activity on leaf 0, 4815 * and we'll write random garbage to the randomly chosen leaf. 4816 */ 4817 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 4818 ztest_opts.zo_dir, ztest_opts.zo_pool, 4819 top * leaves + zs->zs_splits); 4820 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 4821 ztest_opts.zo_dir, ztest_opts.zo_pool, 4822 top * leaves + leaf); 4823 4824 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 4825 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 4826 islog = B_TRUE; 4827 4828 /* 4829 * If the top-level vdev needs to be resilvered 4830 * then we only allow faults on the device that is 4831 * resilvering. 4832 */ 4833 if (vd0 != NULL && maxfaults != 1 && 4834 (!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) || 4835 vd0->vdev_resilver_txg != 0)) { 4836 /* 4837 * Make vd0 explicitly claim to be unreadable, 4838 * or unwriteable, or reach behind its back 4839 * and close the underlying fd. We can do this if 4840 * maxfaults == 0 because we'll fail and reexecute, 4841 * and we can do it if maxfaults >= 2 because we'll 4842 * have enough redundancy. If maxfaults == 1, the 4843 * combination of this with injection of random data 4844 * corruption below exceeds the pool's fault tolerance. 4845 */ 4846 vdev_file_t *vf = vd0->vdev_tsd; 4847 4848 if (vf != NULL && ztest_random(3) == 0) { 4849 (void) close(vf->vf_vnode->v_fd); 4850 vf->vf_vnode->v_fd = -1; 4851 } else if (ztest_random(2) == 0) { 4852 vd0->vdev_cant_read = B_TRUE; 4853 } else { 4854 vd0->vdev_cant_write = B_TRUE; 4855 } 4856 guid0 = vd0->vdev_guid; 4857 } 4858 } else { 4859 /* 4860 * Inject errors on an l2cache device. 4861 */ 4862 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4863 4864 if (sav->sav_count == 0) { 4865 spa_config_exit(spa, SCL_STATE, FTAG); 4866 (void) rw_unlock(&ztest_name_lock); 4867 return; 4868 } 4869 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 4870 guid0 = vd0->vdev_guid; 4871 (void) strcpy(path0, vd0->vdev_path); 4872 (void) strcpy(pathrand, vd0->vdev_path); 4873 4874 leaf = 0; 4875 leaves = 1; 4876 maxfaults = INT_MAX; /* no limit on cache devices */ 4877 } 4878 4879 spa_config_exit(spa, SCL_STATE, FTAG); 4880 (void) rw_unlock(&ztest_name_lock); 4881 4882 /* 4883 * If we can tolerate two or more faults, or we're dealing 4884 * with a slog, randomly online/offline vd0. 4885 */ 4886 if ((maxfaults >= 2 || islog) && guid0 != 0) { 4887 if (ztest_random(10) < 6) { 4888 int flags = (ztest_random(2) == 0 ? 4889 ZFS_OFFLINE_TEMPORARY : 0); 4890 4891 /* 4892 * We have to grab the zs_name_lock as writer to 4893 * prevent a race between offlining a slog and 4894 * destroying a dataset. Offlining the slog will 4895 * grab a reference on the dataset which may cause 4896 * dmu_objset_destroy() to fail with EBUSY thus 4897 * leaving the dataset in an inconsistent state. 4898 */ 4899 if (islog) 4900 (void) rw_wrlock(&ztest_name_lock); 4901 4902 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 4903 4904 if (islog) 4905 (void) rw_unlock(&ztest_name_lock); 4906 } else { 4907 /* 4908 * Ideally we would like to be able to randomly 4909 * call vdev_[on|off]line without holding locks 4910 * to force unpredictable failures but the side 4911 * effects of vdev_[on|off]line prevent us from 4912 * doing so. We grab the ztest_vdev_lock here to 4913 * prevent a race between injection testing and 4914 * aux_vdev removal. 4915 */ 4916 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4917 (void) vdev_online(spa, guid0, 0, NULL); 4918 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4919 } 4920 } 4921 4922 if (maxfaults == 0) 4923 return; 4924 4925 /* 4926 * We have at least single-fault tolerance, so inject data corruption. 4927 */ 4928 fd = open(pathrand, O_RDWR); 4929 4930 if (fd == -1) /* we hit a gap in the device namespace */ 4931 return; 4932 4933 fsize = lseek(fd, 0, SEEK_END); 4934 4935 while (--iters != 0) { 4936 offset = ztest_random(fsize / (leaves << bshift)) * 4937 (leaves << bshift) + (leaf << bshift) + 4938 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 4939 4940 if (offset >= fsize) 4941 continue; 4942 4943 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4944 if (mirror_save != zs->zs_mirrors) { 4945 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4946 (void) close(fd); 4947 return; 4948 } 4949 4950 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 4951 fatal(1, "can't inject bad word at 0x%llx in %s", 4952 offset, pathrand); 4953 4954 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4955 4956 if (ztest_opts.zo_verbose >= 7) 4957 (void) printf("injected bad word into %s," 4958 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 4959 } 4960 4961 (void) close(fd); 4962} 4963 4964/* 4965 * Verify that DDT repair works as expected. 4966 */ 4967void 4968ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 4969{ 4970 ztest_shared_t *zs = ztest_shared; 4971 spa_t *spa = ztest_spa; 4972 objset_t *os = zd->zd_os; 4973 ztest_od_t od[1]; 4974 uint64_t object, blocksize, txg, pattern, psize; 4975 enum zio_checksum checksum = spa_dedup_checksum(spa); 4976 dmu_buf_t *db; 4977 dmu_tx_t *tx; 4978 void *buf; 4979 blkptr_t blk; 4980 int copies = 2 * ZIO_DEDUPDITTO_MIN; 4981 4982 blocksize = ztest_random_blocksize(); 4983 blocksize = MIN(blocksize, 2048); /* because we write so many */ 4984 4985 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4986 4987 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4988 return; 4989 4990 /* 4991 * Take the name lock as writer to prevent anyone else from changing 4992 * the pool and dataset properies we need to maintain during this test. 4993 */ 4994 (void) rw_wrlock(&ztest_name_lock); 4995 4996 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 4997 B_FALSE) != 0 || 4998 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 4999 B_FALSE) != 0) { 5000 (void) rw_unlock(&ztest_name_lock); 5001 return; 5002 } 5003 5004 object = od[0].od_object; 5005 blocksize = od[0].od_blocksize; 5006 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); 5007 5008 ASSERT(object != 0); 5009 5010 tx = dmu_tx_create(os); 5011 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 5012 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 5013 if (txg == 0) { 5014 (void) rw_unlock(&ztest_name_lock); 5015 return; 5016 } 5017 5018 /* 5019 * Write all the copies of our block. 5020 */ 5021 for (int i = 0; i < copies; i++) { 5022 uint64_t offset = i * blocksize; 5023 int error = dmu_buf_hold(os, object, offset, FTAG, &db, 5024 DMU_READ_NO_PREFETCH); 5025 if (error != 0) { 5026 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u", 5027 os, (long long)object, (long long) offset, error); 5028 } 5029 ASSERT(db->db_offset == offset); 5030 ASSERT(db->db_size == blocksize); 5031 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 5032 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 5033 dmu_buf_will_fill(db, tx); 5034 ztest_pattern_set(db->db_data, db->db_size, pattern); 5035 dmu_buf_rele(db, FTAG); 5036 } 5037 5038 dmu_tx_commit(tx); 5039 txg_wait_synced(spa_get_dsl(spa), txg); 5040 5041 /* 5042 * Find out what block we got. 5043 */ 5044 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, 5045 DMU_READ_NO_PREFETCH)); 5046 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 5047 dmu_buf_rele(db, FTAG); 5048 5049 /* 5050 * Damage the block. Dedup-ditto will save us when we read it later. 5051 */ 5052 psize = BP_GET_PSIZE(&blk); 5053 buf = zio_buf_alloc(psize); 5054 ztest_pattern_set(buf, psize, ~pattern); 5055 5056 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 5057 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 5058 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 5059 5060 zio_buf_free(buf, psize); 5061 5062 (void) rw_unlock(&ztest_name_lock); 5063} 5064 5065/* 5066 * Scrub the pool. 5067 */ 5068/* ARGSUSED */ 5069void 5070ztest_scrub(ztest_ds_t *zd, uint64_t id) 5071{ 5072 spa_t *spa = ztest_spa; 5073 5074 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5075 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 5076 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5077} 5078 5079/* 5080 * Change the guid for the pool. 5081 */ 5082/* ARGSUSED */ 5083void 5084ztest_reguid(ztest_ds_t *zd, uint64_t id) 5085{ 5086 spa_t *spa = ztest_spa; 5087 uint64_t orig, load; 5088 int error; 5089 5090 orig = spa_guid(spa); 5091 load = spa_load_guid(spa); 5092 5093 (void) rw_wrlock(&ztest_name_lock); 5094 error = spa_change_guid(spa); 5095 (void) rw_unlock(&ztest_name_lock); 5096 5097 if (error != 0) 5098 return; 5099 5100 if (ztest_opts.zo_verbose >= 4) { 5101 (void) printf("Changed guid old %llu -> %llu\n", 5102 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 5103 } 5104 5105 VERIFY3U(orig, !=, spa_guid(spa)); 5106 VERIFY3U(load, ==, spa_load_guid(spa)); 5107} 5108 5109/* 5110 * Rename the pool to a different name and then rename it back. 5111 */ 5112/* ARGSUSED */ 5113void 5114ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 5115{ 5116 char *oldname, *newname; 5117 spa_t *spa; 5118 5119 (void) rw_wrlock(&ztest_name_lock); 5120 5121 oldname = ztest_opts.zo_pool; 5122 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 5123 (void) strcpy(newname, oldname); 5124 (void) strcat(newname, "_tmp"); 5125 5126 /* 5127 * Do the rename 5128 */ 5129 VERIFY3U(0, ==, spa_rename(oldname, newname)); 5130 5131 /* 5132 * Try to open it under the old name, which shouldn't exist 5133 */ 5134 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5135 5136 /* 5137 * Open it under the new name and make sure it's still the same spa_t. 5138 */ 5139 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5140 5141 ASSERT(spa == ztest_spa); 5142 spa_close(spa, FTAG); 5143 5144 /* 5145 * Rename it back to the original 5146 */ 5147 VERIFY3U(0, ==, spa_rename(newname, oldname)); 5148 5149 /* 5150 * Make sure it can still be opened 5151 */ 5152 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5153 5154 ASSERT(spa == ztest_spa); 5155 spa_close(spa, FTAG); 5156 5157 umem_free(newname, strlen(newname) + 1); 5158 5159 (void) rw_unlock(&ztest_name_lock); 5160} 5161 5162/* 5163 * Verify pool integrity by running zdb. 5164 */ 5165static void 5166ztest_run_zdb(char *pool) 5167{ 5168 int status; 5169 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 5170 char zbuf[1024]; 5171 char *bin; 5172 char *ztest; 5173 char *isa; 5174 int isalen; 5175 FILE *fp; 5176 5177 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb)); 5178 5179 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 5180 bin = strstr(zdb, "/usr/bin/"); 5181 ztest = strstr(bin, "/ztest"); 5182 isa = bin + 8; 5183 isalen = ztest - isa; 5184 isa = strdup(isa); 5185 /* LINTED */ 5186 (void) sprintf(bin, 5187 "/usr/sbin%.*s/zdb -bcc%s%s -d -U %s %s", 5188 isalen, 5189 isa, 5190 ztest_opts.zo_verbose >= 3 ? "s" : "", 5191 ztest_opts.zo_verbose >= 4 ? "v" : "", 5192 spa_config_path, 5193 pool); 5194 free(isa); 5195 5196 if (ztest_opts.zo_verbose >= 5) 5197 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 5198 5199 fp = popen(zdb, "r"); 5200 assert(fp != NULL); 5201 5202 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 5203 if (ztest_opts.zo_verbose >= 3) 5204 (void) printf("%s", zbuf); 5205 5206 status = pclose(fp); 5207 5208 if (status == 0) 5209 return; 5210 5211 ztest_dump_core = 0; 5212 if (WIFEXITED(status)) 5213 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 5214 else 5215 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 5216} 5217 5218static void 5219ztest_walk_pool_directory(char *header) 5220{ 5221 spa_t *spa = NULL; 5222 5223 if (ztest_opts.zo_verbose >= 6) 5224 (void) printf("%s\n", header); 5225 5226 mutex_enter(&spa_namespace_lock); 5227 while ((spa = spa_next(spa)) != NULL) 5228 if (ztest_opts.zo_verbose >= 6) 5229 (void) printf("\t%s\n", spa_name(spa)); 5230 mutex_exit(&spa_namespace_lock); 5231} 5232 5233static void 5234ztest_spa_import_export(char *oldname, char *newname) 5235{ 5236 nvlist_t *config, *newconfig; 5237 uint64_t pool_guid; 5238 spa_t *spa; 5239 int error; 5240 5241 if (ztest_opts.zo_verbose >= 4) { 5242 (void) printf("import/export: old = %s, new = %s\n", 5243 oldname, newname); 5244 } 5245 5246 /* 5247 * Clean up from previous runs. 5248 */ 5249 (void) spa_destroy(newname); 5250 5251 /* 5252 * Get the pool's configuration and guid. 5253 */ 5254 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5255 5256 /* 5257 * Kick off a scrub to tickle scrub/export races. 5258 */ 5259 if (ztest_random(2) == 0) 5260 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5261 5262 pool_guid = spa_guid(spa); 5263 spa_close(spa, FTAG); 5264 5265 ztest_walk_pool_directory("pools before export"); 5266 5267 /* 5268 * Export it. 5269 */ 5270 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5271 5272 ztest_walk_pool_directory("pools after export"); 5273 5274 /* 5275 * Try to import it. 5276 */ 5277 newconfig = spa_tryimport(config); 5278 ASSERT(newconfig != NULL); 5279 nvlist_free(newconfig); 5280 5281 /* 5282 * Import it under the new name. 5283 */ 5284 error = spa_import(newname, config, NULL, 0); 5285 if (error != 0) { 5286 dump_nvlist(config, 0); 5287 fatal(B_FALSE, "couldn't import pool %s as %s: error %u", 5288 oldname, newname, error); 5289 } 5290 5291 ztest_walk_pool_directory("pools after import"); 5292 5293 /* 5294 * Try to import it again -- should fail with EEXIST. 5295 */ 5296 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5297 5298 /* 5299 * Try to import it under a different name -- should fail with EEXIST. 5300 */ 5301 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5302 5303 /* 5304 * Verify that the pool is no longer visible under the old name. 5305 */ 5306 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5307 5308 /* 5309 * Verify that we can open and close the pool using the new name. 5310 */ 5311 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5312 ASSERT(pool_guid == spa_guid(spa)); 5313 spa_close(spa, FTAG); 5314 5315 nvlist_free(config); 5316} 5317 5318static void 5319ztest_resume(spa_t *spa) 5320{ 5321 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5322 (void) printf("resuming from suspended state\n"); 5323 spa_vdev_state_enter(spa, SCL_NONE); 5324 vdev_clear(spa, NULL); 5325 (void) spa_vdev_state_exit(spa, NULL, 0); 5326 (void) zio_resume(spa); 5327} 5328 5329static void * 5330ztest_resume_thread(void *arg) 5331{ 5332 spa_t *spa = arg; 5333 5334 while (!ztest_exiting) { 5335 if (spa_suspended(spa)) 5336 ztest_resume(spa); 5337 (void) poll(NULL, 0, 100); 5338 } 5339 return (NULL); 5340} 5341 5342static void * 5343ztest_deadman_thread(void *arg) 5344{ 5345 ztest_shared_t *zs = arg; 5346 spa_t *spa = ztest_spa; 5347 hrtime_t delta, total = 0; 5348 5349 for (;;) { 5350 delta = zs->zs_thread_stop - zs->zs_thread_start + 5351 MSEC2NSEC(zfs_deadman_synctime_ms); 5352 5353 (void) poll(NULL, 0, (int)NSEC2MSEC(delta)); 5354 5355 /* 5356 * If the pool is suspended then fail immediately. Otherwise, 5357 * check to see if the pool is making any progress. If 5358 * vdev_deadman() discovers that there hasn't been any recent 5359 * I/Os then it will end up aborting the tests. 5360 */ 5361 if (spa_suspended(spa) || spa->spa_root_vdev == NULL) { 5362 fatal(0, "aborting test after %llu seconds because " 5363 "pool has transitioned to a suspended state.", 5364 zfs_deadman_synctime_ms / 1000); 5365 return (NULL); 5366 } 5367 vdev_deadman(spa->spa_root_vdev); 5368 5369 total += zfs_deadman_synctime_ms/1000; 5370 (void) printf("ztest has been running for %lld seconds\n", 5371 total); 5372 } 5373} 5374 5375static void 5376ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5377{ 5378 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5379 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5380 hrtime_t functime = gethrtime(); 5381 5382 for (int i = 0; i < zi->zi_iters; i++) 5383 zi->zi_func(zd, id); 5384 5385 functime = gethrtime() - functime; 5386 5387 atomic_add_64(&zc->zc_count, 1); 5388 atomic_add_64(&zc->zc_time, functime); 5389 5390 if (ztest_opts.zo_verbose >= 4) { 5391 Dl_info dli; 5392 (void) dladdr((void *)zi->zi_func, &dli); 5393 (void) printf("%6.2f sec in %s\n", 5394 (double)functime / NANOSEC, dli.dli_sname); 5395 } 5396} 5397 5398static void * 5399ztest_thread(void *arg) 5400{ 5401 int rand; 5402 uint64_t id = (uintptr_t)arg; 5403 ztest_shared_t *zs = ztest_shared; 5404 uint64_t call_next; 5405 hrtime_t now; 5406 ztest_info_t *zi; 5407 ztest_shared_callstate_t *zc; 5408 5409 while ((now = gethrtime()) < zs->zs_thread_stop) { 5410 /* 5411 * See if it's time to force a crash. 5412 */ 5413 if (now > zs->zs_thread_kill) 5414 ztest_kill(zs); 5415 5416 /* 5417 * If we're getting ENOSPC with some regularity, stop. 5418 */ 5419 if (zs->zs_enospc_count > 10) 5420 break; 5421 5422 /* 5423 * Pick a random function to execute. 5424 */ 5425 rand = ztest_random(ZTEST_FUNCS); 5426 zi = &ztest_info[rand]; 5427 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5428 call_next = zc->zc_next; 5429 5430 if (now >= call_next && 5431 atomic_cas_64(&zc->zc_next, call_next, call_next + 5432 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5433 ztest_execute(rand, zi, id); 5434 } 5435 } 5436 5437 return (NULL); 5438} 5439 5440static void 5441ztest_dataset_name(char *dsname, char *pool, int d) 5442{ 5443 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d); 5444} 5445 5446static void 5447ztest_dataset_destroy(int d) 5448{ 5449 char name[MAXNAMELEN]; 5450 5451 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5452 5453 if (ztest_opts.zo_verbose >= 3) 5454 (void) printf("Destroying %s to free up space\n", name); 5455 5456 /* 5457 * Cleanup any non-standard clones and snapshots. In general, 5458 * ztest thread t operates on dataset (t % zopt_datasets), 5459 * so there may be more than one thing to clean up. 5460 */ 5461 for (int t = d; t < ztest_opts.zo_threads; 5462 t += ztest_opts.zo_datasets) { 5463 ztest_dsl_dataset_cleanup(name, t); 5464 } 5465 5466 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5467 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5468} 5469 5470static void 5471ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5472{ 5473 uint64_t usedobjs, dirobjs, scratch; 5474 5475 /* 5476 * ZTEST_DIROBJ is the object directory for the entire dataset. 5477 * Therefore, the number of objects in use should equal the 5478 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5479 * If not, we have an object leak. 5480 * 5481 * Note that we can only check this in ztest_dataset_open(), 5482 * when the open-context and syncing-context values agree. 5483 * That's because zap_count() returns the open-context value, 5484 * while dmu_objset_space() returns the rootbp fill count. 5485 */ 5486 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5487 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5488 ASSERT3U(dirobjs + 1, ==, usedobjs); 5489} 5490 5491static int 5492ztest_dataset_open(int d) 5493{ 5494 ztest_ds_t *zd = &ztest_ds[d]; 5495 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5496 objset_t *os; 5497 zilog_t *zilog; 5498 char name[MAXNAMELEN]; 5499 int error; 5500 5501 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5502 5503 (void) rw_rdlock(&ztest_name_lock); 5504 5505 error = ztest_dataset_create(name); 5506 if (error == ENOSPC) { 5507 (void) rw_unlock(&ztest_name_lock); 5508 ztest_record_enospc(FTAG); 5509 return (error); 5510 } 5511 ASSERT(error == 0 || error == EEXIST); 5512 5513 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os)); 5514 (void) rw_unlock(&ztest_name_lock); 5515 5516 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5517 5518 zilog = zd->zd_zilog; 5519 5520 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5521 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5522 fatal(0, "missing log records: claimed %llu < committed %llu", 5523 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5524 5525 ztest_dataset_dirobj_verify(zd); 5526 5527 zil_replay(os, zd, ztest_replay_vector); 5528 5529 ztest_dataset_dirobj_verify(zd); 5530 5531 if (ztest_opts.zo_verbose >= 6) 5532 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5533 zd->zd_name, 5534 (u_longlong_t)zilog->zl_parse_blk_count, 5535 (u_longlong_t)zilog->zl_parse_lr_count, 5536 (u_longlong_t)zilog->zl_replaying_seq); 5537 5538 zilog = zil_open(os, ztest_get_data); 5539 5540 if (zilog->zl_replaying_seq != 0 && 5541 zilog->zl_replaying_seq < committed_seq) 5542 fatal(0, "missing log records: replayed %llu < committed %llu", 5543 zilog->zl_replaying_seq, committed_seq); 5544 5545 return (0); 5546} 5547 5548static void 5549ztest_dataset_close(int d) 5550{ 5551 ztest_ds_t *zd = &ztest_ds[d]; 5552 5553 zil_close(zd->zd_zilog); 5554 dmu_objset_disown(zd->zd_os, zd); 5555 5556 ztest_zd_fini(zd); 5557} 5558 5559/* 5560 * Kick off threads to run tests on all datasets in parallel. 5561 */ 5562static void 5563ztest_run(ztest_shared_t *zs) 5564{ 5565 thread_t *tid; 5566 spa_t *spa; 5567 objset_t *os; 5568 thread_t resume_tid; 5569 int error; 5570 5571 ztest_exiting = B_FALSE; 5572 5573 /* 5574 * Initialize parent/child shared state. 5575 */ 5576 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5577 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5578 5579 zs->zs_thread_start = gethrtime(); 5580 zs->zs_thread_stop = 5581 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5582 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5583 zs->zs_thread_kill = zs->zs_thread_stop; 5584 if (ztest_random(100) < ztest_opts.zo_killrate) { 5585 zs->zs_thread_kill -= 5586 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5587 } 5588 5589 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); 5590 5591 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5592 offsetof(ztest_cb_data_t, zcd_node)); 5593 5594 /* 5595 * Open our pool. 5596 */ 5597 kernel_init(FREAD | FWRITE); 5598 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5599 spa->spa_debug = B_TRUE; 5600 metaslab_preload_limit = ztest_random(20) + 1; 5601 ztest_spa = spa; 5602 5603 VERIFY0(dmu_objset_own(ztest_opts.zo_pool, 5604 DMU_OST_ANY, B_TRUE, FTAG, &os)); 5605 zs->zs_guid = dmu_objset_fsid_guid(os); 5606 dmu_objset_disown(os, FTAG); 5607 5608 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5609 5610 /* 5611 * We don't expect the pool to suspend unless maxfaults == 0, 5612 * in which case ztest_fault_inject() temporarily takes away 5613 * the only valid replica. 5614 */ 5615 if (MAXFAULTS() == 0) 5616 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5617 else 5618 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5619 5620 /* 5621 * Create a thread to periodically resume suspended I/O. 5622 */ 5623 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5624 &resume_tid) == 0); 5625 5626 /* 5627 * Create a deadman thread to abort() if we hang. 5628 */ 5629 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5630 NULL) == 0); 5631 5632 /* 5633 * Verify that we can safely inquire about about any object, 5634 * whether it's allocated or not. To make it interesting, 5635 * we probe a 5-wide window around each power of two. 5636 * This hits all edge cases, including zero and the max. 5637 */ 5638 for (int t = 0; t < 64; t++) { 5639 for (int d = -5; d <= 5; d++) { 5640 error = dmu_object_info(spa->spa_meta_objset, 5641 (1ULL << t) + d, NULL); 5642 ASSERT(error == 0 || error == ENOENT || 5643 error == EINVAL); 5644 } 5645 } 5646 5647 /* 5648 * If we got any ENOSPC errors on the previous run, destroy something. 5649 */ 5650 if (zs->zs_enospc_count != 0) { 5651 int d = ztest_random(ztest_opts.zo_datasets); 5652 ztest_dataset_destroy(d); 5653 } 5654 zs->zs_enospc_count = 0; 5655 5656 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5657 UMEM_NOFAIL); 5658 5659 if (ztest_opts.zo_verbose >= 4) 5660 (void) printf("starting main threads...\n"); 5661 5662 /* 5663 * Kick off all the tests that run in parallel. 5664 */ 5665 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5666 if (t < ztest_opts.zo_datasets && 5667 ztest_dataset_open(t) != 0) 5668 return; 5669 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5670 THR_BOUND, &tid[t]) == 0); 5671 } 5672 5673 /* 5674 * Wait for all of the tests to complete. We go in reverse order 5675 * so we don't close datasets while threads are still using them. 5676 */ 5677 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5678 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5679 if (t < ztest_opts.zo_datasets) 5680 ztest_dataset_close(t); 5681 } 5682 5683 txg_wait_synced(spa_get_dsl(spa), 0); 5684 5685 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5686 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5687 zfs_dbgmsg_print(FTAG); 5688 5689 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5690 5691 /* Kill the resume thread */ 5692 ztest_exiting = B_TRUE; 5693 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5694 ztest_resume(spa); 5695 5696 /* 5697 * Right before closing the pool, kick off a bunch of async I/O; 5698 * spa_close() should wait for it to complete. 5699 */ 5700 for (uint64_t object = 1; object < 50; object++) 5701 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); 5702 5703 spa_close(spa, FTAG); 5704 5705 /* 5706 * Verify that we can loop over all pools. 5707 */ 5708 mutex_enter(&spa_namespace_lock); 5709 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5710 if (ztest_opts.zo_verbose > 3) 5711 (void) printf("spa_next: found %s\n", spa_name(spa)); 5712 mutex_exit(&spa_namespace_lock); 5713 5714 /* 5715 * Verify that we can export the pool and reimport it under a 5716 * different name. 5717 */ 5718 if (ztest_random(2) == 0) { 5719 char name[MAXNAMELEN]; 5720 (void) snprintf(name, MAXNAMELEN, "%s_import", 5721 ztest_opts.zo_pool); 5722 ztest_spa_import_export(ztest_opts.zo_pool, name); 5723 ztest_spa_import_export(name, ztest_opts.zo_pool); 5724 } 5725 5726 kernel_fini(); 5727 5728 list_destroy(&zcl.zcl_callbacks); 5729 5730 (void) _mutex_destroy(&zcl.zcl_callbacks_lock); 5731 5732 (void) rwlock_destroy(&ztest_name_lock); 5733 (void) _mutex_destroy(&ztest_vdev_lock); 5734} 5735 5736static void 5737ztest_freeze(void) 5738{ 5739 ztest_ds_t *zd = &ztest_ds[0]; 5740 spa_t *spa; 5741 int numloops = 0; 5742 5743 if (ztest_opts.zo_verbose >= 3) 5744 (void) printf("testing spa_freeze()...\n"); 5745 5746 kernel_init(FREAD | FWRITE); 5747 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5748 VERIFY3U(0, ==, ztest_dataset_open(0)); 5749 spa->spa_debug = B_TRUE; 5750 ztest_spa = spa; 5751 5752 /* 5753 * Force the first log block to be transactionally allocated. 5754 * We have to do this before we freeze the pool -- otherwise 5755 * the log chain won't be anchored. 5756 */ 5757 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 5758 ztest_dmu_object_alloc_free(zd, 0); 5759 zil_commit(zd->zd_zilog, 0); 5760 } 5761 5762 txg_wait_synced(spa_get_dsl(spa), 0); 5763 5764 /* 5765 * Freeze the pool. This stops spa_sync() from doing anything, 5766 * so that the only way to record changes from now on is the ZIL. 5767 */ 5768 spa_freeze(spa); 5769 5770 /* 5771 * Run tests that generate log records but don't alter the pool config 5772 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 5773 * We do a txg_wait_synced() after each iteration to force the txg 5774 * to increase well beyond the last synced value in the uberblock. 5775 * The ZIL should be OK with that. 5776 */ 5777 while (ztest_random(10) != 0 && 5778 numloops++ < ztest_opts.zo_maxloops) { 5779 ztest_dmu_write_parallel(zd, 0); 5780 ztest_dmu_object_alloc_free(zd, 0); 5781 txg_wait_synced(spa_get_dsl(spa), 0); 5782 } 5783 5784 /* 5785 * Commit all of the changes we just generated. 5786 */ 5787 zil_commit(zd->zd_zilog, 0); 5788 txg_wait_synced(spa_get_dsl(spa), 0); 5789 5790 /* 5791 * Close our dataset and close the pool. 5792 */ 5793 ztest_dataset_close(0); 5794 spa_close(spa, FTAG); 5795 kernel_fini(); 5796 5797 /* 5798 * Open and close the pool and dataset to induce log replay. 5799 */ 5800 kernel_init(FREAD | FWRITE); 5801 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5802 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 5803 VERIFY3U(0, ==, ztest_dataset_open(0)); 5804 ztest_dataset_close(0); 5805 5806 spa->spa_debug = B_TRUE; 5807 ztest_spa = spa; 5808 txg_wait_synced(spa_get_dsl(spa), 0); 5809 ztest_reguid(NULL, 0); 5810 5811 spa_close(spa, FTAG); 5812 kernel_fini(); 5813} 5814 5815void 5816print_time(hrtime_t t, char *timebuf) 5817{ 5818 hrtime_t s = t / NANOSEC; 5819 hrtime_t m = s / 60; 5820 hrtime_t h = m / 60; 5821 hrtime_t d = h / 24; 5822 5823 s -= m * 60; 5824 m -= h * 60; 5825 h -= d * 24; 5826 5827 timebuf[0] = '\0'; 5828 5829 if (d) 5830 (void) sprintf(timebuf, 5831 "%llud%02lluh%02llum%02llus", d, h, m, s); 5832 else if (h) 5833 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 5834 else if (m) 5835 (void) sprintf(timebuf, "%llum%02llus", m, s); 5836 else 5837 (void) sprintf(timebuf, "%llus", s); 5838} 5839 5840static nvlist_t * 5841make_random_props() 5842{ 5843 nvlist_t *props; 5844 5845 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 5846 if (ztest_random(2) == 0) 5847 return (props); 5848 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 5849 5850 return (props); 5851} 5852 5853/* 5854 * Create a storage pool with the given name and initial vdev size. 5855 * Then test spa_freeze() functionality. 5856 */ 5857static void 5858ztest_init(ztest_shared_t *zs) 5859{ 5860 spa_t *spa; 5861 nvlist_t *nvroot, *props; 5862 5863 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5864 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5865 5866 kernel_init(FREAD | FWRITE); 5867 5868 /* 5869 * Create the storage pool. 5870 */ 5871 (void) spa_destroy(ztest_opts.zo_pool); 5872 ztest_shared->zs_vdev_next_leaf = 0; 5873 zs->zs_splits = 0; 5874 zs->zs_mirrors = ztest_opts.zo_mirrors; 5875 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 5876 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 5877 props = make_random_props(); 5878 for (int i = 0; i < SPA_FEATURES; i++) { 5879 char buf[1024]; 5880 (void) snprintf(buf, sizeof (buf), "feature@%s", 5881 spa_feature_table[i].fi_uname); 5882 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 5883 } 5884 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL)); 5885 nvlist_free(nvroot); 5886 5887 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5888 zs->zs_metaslab_sz = 5889 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 5890 5891 spa_close(spa, FTAG); 5892 5893 kernel_fini(); 5894 5895 ztest_run_zdb(ztest_opts.zo_pool); 5896 5897 ztest_freeze(); 5898 5899 ztest_run_zdb(ztest_opts.zo_pool); 5900 5901 (void) rwlock_destroy(&ztest_name_lock); 5902 (void) _mutex_destroy(&ztest_vdev_lock); 5903} 5904 5905static void 5906setup_data_fd(void) 5907{ 5908 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX"; 5909 5910 ztest_fd_data = mkstemp(ztest_name_data); 5911 ASSERT3S(ztest_fd_data, >=, 0); 5912 (void) unlink(ztest_name_data); 5913} 5914 5915 5916static int 5917shared_data_size(ztest_shared_hdr_t *hdr) 5918{ 5919 int size; 5920 5921 size = hdr->zh_hdr_size; 5922 size += hdr->zh_opts_size; 5923 size += hdr->zh_size; 5924 size += hdr->zh_stats_size * hdr->zh_stats_count; 5925 size += hdr->zh_ds_size * hdr->zh_ds_count; 5926 5927 return (size); 5928} 5929 5930static void 5931setup_hdr(void) 5932{ 5933 int size; 5934 ztest_shared_hdr_t *hdr; 5935 5936 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5937 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5938 ASSERT(hdr != MAP_FAILED); 5939 5940 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t))); 5941 5942 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 5943 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 5944 hdr->zh_size = sizeof (ztest_shared_t); 5945 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 5946 hdr->zh_stats_count = ZTEST_FUNCS; 5947 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 5948 hdr->zh_ds_count = ztest_opts.zo_datasets; 5949 5950 size = shared_data_size(hdr); 5951 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size)); 5952 5953 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5954} 5955 5956static void 5957setup_data(void) 5958{ 5959 int size, offset; 5960 ztest_shared_hdr_t *hdr; 5961 uint8_t *buf; 5962 5963 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5964 PROT_READ, MAP_SHARED, ztest_fd_data, 0); 5965 ASSERT(hdr != MAP_FAILED); 5966 5967 size = shared_data_size(hdr); 5968 5969 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5970 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 5971 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5972 ASSERT(hdr != MAP_FAILED); 5973 buf = (uint8_t *)hdr; 5974 5975 offset = hdr->zh_hdr_size; 5976 ztest_shared_opts = (void *)&buf[offset]; 5977 offset += hdr->zh_opts_size; 5978 ztest_shared = (void *)&buf[offset]; 5979 offset += hdr->zh_size; 5980 ztest_shared_callstate = (void *)&buf[offset]; 5981 offset += hdr->zh_stats_size * hdr->zh_stats_count; 5982 ztest_shared_ds = (void *)&buf[offset]; 5983} 5984 5985static boolean_t 5986exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 5987{ 5988 pid_t pid; 5989 int status; 5990 char *cmdbuf = NULL; 5991 5992 pid = fork(); 5993 5994 if (cmd == NULL) { 5995 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 5996 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN); 5997 cmd = cmdbuf; 5998 } 5999 6000 if (pid == -1) 6001 fatal(1, "fork failed"); 6002 6003 if (pid == 0) { /* child */ 6004 char *emptyargv[2] = { cmd, NULL }; 6005 char fd_data_str[12]; 6006 6007 struct rlimit rl = { 1024, 1024 }; 6008 (void) setrlimit(RLIMIT_NOFILE, &rl); 6009 6010 (void) close(ztest_fd_rand); 6011 VERIFY3U(11, >=, 6012 snprintf(fd_data_str, 12, "%d", ztest_fd_data)); 6013 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1)); 6014 6015 (void) enable_extended_FILE_stdio(-1, -1); 6016 if (libpath != NULL) 6017 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 6018#ifdef illumos 6019 (void) execv(cmd, emptyargv); 6020#else 6021 (void) execvp(cmd, emptyargv); 6022#endif 6023 ztest_dump_core = B_FALSE; 6024 fatal(B_TRUE, "exec failed: %s", cmd); 6025 } 6026 6027 if (cmdbuf != NULL) { 6028 umem_free(cmdbuf, MAXPATHLEN); 6029 cmd = NULL; 6030 } 6031 6032 while (waitpid(pid, &status, 0) != pid) 6033 continue; 6034 if (statusp != NULL) 6035 *statusp = status; 6036 6037 if (WIFEXITED(status)) { 6038 if (WEXITSTATUS(status) != 0) { 6039 (void) fprintf(stderr, "child exited with code %d\n", 6040 WEXITSTATUS(status)); 6041 exit(2); 6042 } 6043 return (B_FALSE); 6044 } else if (WIFSIGNALED(status)) { 6045 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 6046 (void) fprintf(stderr, "child died with signal %d\n", 6047 WTERMSIG(status)); 6048 exit(3); 6049 } 6050 return (B_TRUE); 6051 } else { 6052 (void) fprintf(stderr, "something strange happened to child\n"); 6053 exit(4); 6054 /* NOTREACHED */ 6055 } 6056} 6057 6058static void 6059ztest_run_init(void) 6060{ 6061 ztest_shared_t *zs = ztest_shared; 6062 6063 ASSERT(ztest_opts.zo_init != 0); 6064 6065 /* 6066 * Blow away any existing copy of zpool.cache 6067 */ 6068 (void) remove(spa_config_path); 6069 6070 /* 6071 * Create and initialize our storage pool. 6072 */ 6073 for (int i = 1; i <= ztest_opts.zo_init; i++) { 6074 bzero(zs, sizeof (ztest_shared_t)); 6075 if (ztest_opts.zo_verbose >= 3 && 6076 ztest_opts.zo_init != 1) { 6077 (void) printf("ztest_init(), pass %d\n", i); 6078 } 6079 ztest_init(zs); 6080 } 6081} 6082 6083int 6084main(int argc, char **argv) 6085{ 6086 int kills = 0; 6087 int iters = 0; 6088 int older = 0; 6089 int newer = 0; 6090 ztest_shared_t *zs; 6091 ztest_info_t *zi; 6092 ztest_shared_callstate_t *zc; 6093 char timebuf[100]; 6094 char numbuf[6]; 6095 spa_t *spa; 6096 char *cmd; 6097 boolean_t hasalt; 6098 char *fd_data_str = getenv("ZTEST_FD_DATA"); 6099 6100 (void) setvbuf(stdout, NULL, _IOLBF, 0); 6101 6102 dprintf_setup(&argc, argv); 6103 zfs_deadman_synctime_ms = 300000; 6104 6105 ztest_fd_rand = open("/dev/urandom", O_RDONLY); 6106 ASSERT3S(ztest_fd_rand, >=, 0); 6107 6108 if (!fd_data_str) { 6109 process_options(argc, argv); 6110 6111 setup_data_fd(); 6112 setup_hdr(); 6113 setup_data(); 6114 bcopy(&ztest_opts, ztest_shared_opts, 6115 sizeof (*ztest_shared_opts)); 6116 } else { 6117 ztest_fd_data = atoi(fd_data_str); 6118 setup_data(); 6119 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 6120 } 6121 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 6122 6123 /* Override location of zpool.cache */ 6124 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache", 6125 ztest_opts.zo_dir), !=, -1); 6126 6127 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 6128 UMEM_NOFAIL); 6129 zs = ztest_shared; 6130 6131 if (fd_data_str) { 6132 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang; 6133 metaslab_df_alloc_threshold = 6134 zs->zs_metaslab_df_alloc_threshold; 6135 6136 if (zs->zs_do_init) 6137 ztest_run_init(); 6138 else 6139 ztest_run(zs); 6140 exit(0); 6141 } 6142 6143 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 6144 6145 if (ztest_opts.zo_verbose >= 1) { 6146 (void) printf("%llu vdevs, %d datasets, %d threads," 6147 " %llu seconds...\n", 6148 (u_longlong_t)ztest_opts.zo_vdevs, 6149 ztest_opts.zo_datasets, 6150 ztest_opts.zo_threads, 6151 (u_longlong_t)ztest_opts.zo_time); 6152 } 6153 6154 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); 6155 (void) strlcpy(cmd, getexecname(), MAXNAMELEN); 6156 6157 zs->zs_do_init = B_TRUE; 6158 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 6159 if (ztest_opts.zo_verbose >= 1) { 6160 (void) printf("Executing older ztest for " 6161 "initialization: %s\n", ztest_opts.zo_alt_ztest); 6162 } 6163 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 6164 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 6165 } else { 6166 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 6167 } 6168 zs->zs_do_init = B_FALSE; 6169 6170 zs->zs_proc_start = gethrtime(); 6171 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 6172 6173 for (int f = 0; f < ZTEST_FUNCS; f++) { 6174 zi = &ztest_info[f]; 6175 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6176 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 6177 zc->zc_next = UINT64_MAX; 6178 else 6179 zc->zc_next = zs->zs_proc_start + 6180 ztest_random(2 * zi->zi_interval[0] + 1); 6181 } 6182 6183 /* 6184 * Run the tests in a loop. These tests include fault injection 6185 * to verify that self-healing data works, and forced crashes 6186 * to verify that we never lose on-disk consistency. 6187 */ 6188 while (gethrtime() < zs->zs_proc_stop) { 6189 int status; 6190 boolean_t killed; 6191 6192 /* 6193 * Initialize the workload counters for each function. 6194 */ 6195 for (int f = 0; f < ZTEST_FUNCS; f++) { 6196 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6197 zc->zc_count = 0; 6198 zc->zc_time = 0; 6199 } 6200 6201 /* Set the allocation switch size */ 6202 zs->zs_metaslab_df_alloc_threshold = 6203 ztest_random(zs->zs_metaslab_sz / 4) + 1; 6204 6205 if (!hasalt || ztest_random(2) == 0) { 6206 if (hasalt && ztest_opts.zo_verbose >= 1) { 6207 (void) printf("Executing newer ztest: %s\n", 6208 cmd); 6209 } 6210 newer++; 6211 killed = exec_child(cmd, NULL, B_TRUE, &status); 6212 } else { 6213 if (hasalt && ztest_opts.zo_verbose >= 1) { 6214 (void) printf("Executing older ztest: %s\n", 6215 ztest_opts.zo_alt_ztest); 6216 } 6217 older++; 6218 killed = exec_child(ztest_opts.zo_alt_ztest, 6219 ztest_opts.zo_alt_libpath, B_TRUE, &status); 6220 } 6221 6222 if (killed) 6223 kills++; 6224 iters++; 6225 6226 if (ztest_opts.zo_verbose >= 1) { 6227 hrtime_t now = gethrtime(); 6228 6229 now = MIN(now, zs->zs_proc_stop); 6230 print_time(zs->zs_proc_stop - now, timebuf); 6231 nicenum(zs->zs_space, numbuf); 6232 6233 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 6234 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 6235 iters, 6236 WIFEXITED(status) ? "Complete" : "SIGKILL", 6237 (u_longlong_t)zs->zs_enospc_count, 6238 100.0 * zs->zs_alloc / zs->zs_space, 6239 numbuf, 6240 100.0 * (now - zs->zs_proc_start) / 6241 (ztest_opts.zo_time * NANOSEC), timebuf); 6242 } 6243 6244 if (ztest_opts.zo_verbose >= 2) { 6245 (void) printf("\nWorkload summary:\n\n"); 6246 (void) printf("%7s %9s %s\n", 6247 "Calls", "Time", "Function"); 6248 (void) printf("%7s %9s %s\n", 6249 "-----", "----", "--------"); 6250 for (int f = 0; f < ZTEST_FUNCS; f++) { 6251 Dl_info dli; 6252 6253 zi = &ztest_info[f]; 6254 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6255 print_time(zc->zc_time, timebuf); 6256 (void) dladdr((void *)zi->zi_func, &dli); 6257 (void) printf("%7llu %9s %s\n", 6258 (u_longlong_t)zc->zc_count, timebuf, 6259 dli.dli_sname); 6260 } 6261 (void) printf("\n"); 6262 } 6263 6264 /* 6265 * It's possible that we killed a child during a rename test, 6266 * in which case we'll have a 'ztest_tmp' pool lying around 6267 * instead of 'ztest'. Do a blind rename in case this happened. 6268 */ 6269 kernel_init(FREAD); 6270 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 6271 spa_close(spa, FTAG); 6272 } else { 6273 char tmpname[MAXNAMELEN]; 6274 kernel_fini(); 6275 kernel_init(FREAD | FWRITE); 6276 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 6277 ztest_opts.zo_pool); 6278 (void) spa_rename(tmpname, ztest_opts.zo_pool); 6279 } 6280 kernel_fini(); 6281 6282 ztest_run_zdb(ztest_opts.zo_pool); 6283 } 6284 6285 if (ztest_opts.zo_verbose >= 1) { 6286 if (hasalt) { 6287 (void) printf("%d runs of older ztest: %s\n", older, 6288 ztest_opts.zo_alt_ztest); 6289 (void) printf("%d runs of newer ztest: %s\n", newer, 6290 cmd); 6291 } 6292 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6293 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6294 } 6295 6296 umem_free(cmd, MAXNAMELEN); 6297 6298 return (0); 6299} 6300