1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 /* 28 * The objective of this program is to provide a DMU/ZAP/SPA stress test 29 * that runs entirely in userland, is easy to use, and easy to extend. 30 * 31 * The overall design of the ztest program is as follows: 32 * 33 * (1) For each major functional area (e.g. adding vdevs to a pool, 34 * creating and destroying datasets, reading and writing objects, etc) 35 * we have a simple routine to test that functionality. These 36 * individual routines do not have to do anything "stressful". 37 * 38 * (2) We turn these simple functionality tests into a stress test by 39 * running them all in parallel, with as many threads as desired, 40 * and spread across as many datasets, objects, and vdevs as desired. 41 * 42 * (3) While all this is happening, we inject faults into the pool to 43 * verify that self-healing data really works. 44 * 45 * (4) Every time we open a dataset, we change its checksum and compression 46 * functions. Thus even individual objects vary from block to block 47 * in which checksum they use and whether they're compressed. 48 * 49 * (5) To verify that we never lose on-disk consistency after a crash, 50 * we run the entire test in a child of the main process. 51 * At random times, the child self-immolates with a SIGKILL. 52 * This is the software equivalent of pulling the power cord. 53 * The parent then runs the test again, using the existing 54 * storage pool, as many times as desired. If backwards compatability 55 * testing is enabled ztest will sometimes run the "older" version 56 * of ztest after a SIGKILL. 57 * 58 * (6) To verify that we don't have future leaks or temporal incursions, 59 * many of the functional tests record the transaction group number 60 * as part of their data. When reading old data, they verify that 61 * the transaction group number is less than the current, open txg. 62 * If you add a new test, please do this if applicable. 63 * 64 * When run with no arguments, ztest runs for about five minutes and 65 * produces no output if successful. To get a little bit of information, 66 * specify -V. To get more information, specify -VV, and so on. 67 * 68 * To turn this into an overnight stress test, use -T to specify run time. 69 * 70 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 71 * to increase the pool capacity, fanout, and overall stress level. 72 * 73 * Use the -k option to set the desired frequency of kills. 74 * 75 * When ztest invokes itself it passes all relevant information through a 76 * temporary file which is mmap-ed in the child process. This allows shared 77 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 78 * stored at offset 0 of this file and contains information on the size and 79 * number of shared structures in the file. The information stored in this file 80 * must remain backwards compatible with older versions of ztest so that 81 * ztest can invoke them during backwards compatibility testing (-B). 82 */ 83 84 #include <sys/zfs_context.h> 85 #include <sys/spa.h> 86 #include <sys/dmu.h> 87 #include <sys/txg.h> 88 #include <sys/dbuf.h> 89 #include <sys/zap.h> 90 #include <sys/dmu_objset.h> 91 #include <sys/poll.h> 92 #include <sys/stat.h> 93 #include <sys/time.h> 94 #include <sys/wait.h> 95 #include <sys/mman.h> 96 #include <sys/resource.h> 97 #include <sys/zio.h> 98 #include <sys/zil.h> 99 #include <sys/zil_impl.h> 100 #include <sys/vdev_impl.h> 101 #include <sys/vdev_file.h> 102 #include <sys/spa_impl.h> 103 #include <sys/metaslab_impl.h> 104 #include <sys/dsl_prop.h> 105 #include <sys/dsl_dataset.h> 106 #include <sys/dsl_scan.h> 107 #include <sys/zio_checksum.h> 108 #include <sys/refcount.h> 109 #include <sys/zfeature.h> 110 #include <stdio.h> 111 #include <stdio_ext.h> 112 #include <stdlib.h> 113 #include <unistd.h> 114 #include <signal.h> 115 #include <umem.h> 116 #include <dlfcn.h> 117 #include <ctype.h> 118 #include <math.h> 119 #include <sys/fs/zfs.h> 120 #include <libnvpair.h> 121 122 #define ZTEST_FD_DATA 3 123 #define ZTEST_FD_RAND 4 124 125 typedef struct ztest_shared_hdr { 126 uint64_t zh_hdr_size; 127 uint64_t zh_opts_size; 128 uint64_t zh_size; 129 uint64_t zh_stats_size; 130 uint64_t zh_stats_count; 131 uint64_t zh_ds_size; 132 uint64_t zh_ds_count; 133 } ztest_shared_hdr_t; 134 135 static ztest_shared_hdr_t *ztest_shared_hdr; 136 137 typedef struct ztest_shared_opts { 138 char zo_pool[MAXNAMELEN]; 139 char zo_dir[MAXNAMELEN]; 140 char zo_alt_ztest[MAXNAMELEN]; 141 char zo_alt_libpath[MAXNAMELEN]; 142 uint64_t zo_vdevs; 143 uint64_t zo_vdevtime; 144 size_t zo_vdev_size; 145 int zo_ashift; 146 int zo_mirrors; 147 int zo_raidz; 148 int zo_raidz_parity; 149 int zo_datasets; 150 int zo_threads; 151 uint64_t zo_passtime; 152 uint64_t zo_killrate; 153 int zo_verbose; 154 int zo_init; 155 uint64_t zo_time; 156 uint64_t zo_maxloops; 157 uint64_t zo_metaslab_gang_bang; 158 } ztest_shared_opts_t; 159 160 static const ztest_shared_opts_t ztest_opts_defaults = { 161 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 162 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 163 .zo_alt_ztest = { '\0' }, 164 .zo_alt_libpath = { '\0' }, 165 .zo_vdevs = 5, 166 .zo_ashift = SPA_MINBLOCKSHIFT, 167 .zo_mirrors = 2, 168 .zo_raidz = 4, 169 .zo_raidz_parity = 1, 170 .zo_vdev_size = SPA_MINDEVSIZE, 171 .zo_datasets = 7, 172 .zo_threads = 23, 173 .zo_passtime = 60, /* 60 seconds */ 174 .zo_killrate = 70, /* 70% kill rate */ 175 .zo_verbose = 0, 176 .zo_init = 1, 177 .zo_time = 300, /* 5 minutes */ 178 .zo_maxloops = 50, /* max loops during spa_freeze() */ 179 .zo_metaslab_gang_bang = 32 << 10 180 }; 181 182 extern uint64_t metaslab_gang_bang; 183 extern uint64_t metaslab_df_alloc_threshold; 184 185 static ztest_shared_opts_t *ztest_shared_opts; 186 static ztest_shared_opts_t ztest_opts; 187 188 typedef struct ztest_shared_ds { 189 uint64_t zd_seq; 190 } ztest_shared_ds_t; 191 192 static ztest_shared_ds_t *ztest_shared_ds; 193 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 194 195 #define BT_MAGIC 0x123456789abcdefULL 196 #define MAXFAULTS() \ 197 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 198 199 enum ztest_io_type { 200 ZTEST_IO_WRITE_TAG, 201 ZTEST_IO_WRITE_PATTERN, 202 ZTEST_IO_WRITE_ZEROES, 203 ZTEST_IO_TRUNCATE, 204 ZTEST_IO_SETATTR, 205 ZTEST_IO_TYPES 206 }; 207 208 typedef struct ztest_block_tag { 209 uint64_t bt_magic; 210 uint64_t bt_objset; 211 uint64_t bt_object; 212 uint64_t bt_offset; 213 uint64_t bt_gen; 214 uint64_t bt_txg; 215 uint64_t bt_crtxg; 216 } ztest_block_tag_t; 217 218 typedef struct bufwad { 219 uint64_t bw_index; 220 uint64_t bw_txg; 221 uint64_t bw_data; 222 } bufwad_t; 223 224 /* 225 * XXX -- fix zfs range locks to be generic so we can use them here. 226 */ 227 typedef enum { 228 RL_READER, 229 RL_WRITER, 230 RL_APPEND 231 } rl_type_t; 232 233 typedef struct rll { 234 void *rll_writer; 235 int rll_readers; 236 mutex_t rll_lock; 237 cond_t rll_cv; 238 } rll_t; 239 240 typedef struct rl { 241 uint64_t rl_object; 242 uint64_t rl_offset; 243 uint64_t rl_size; 244 rll_t *rl_lock; 245 } rl_t; 246 247 #define ZTEST_RANGE_LOCKS 64 248 #define ZTEST_OBJECT_LOCKS 64 249 250 /* 251 * Object descriptor. Used as a template for object lookup/create/remove. 252 */ 253 typedef struct ztest_od { 254 uint64_t od_dir; 255 uint64_t od_object; 256 dmu_object_type_t od_type; 257 dmu_object_type_t od_crtype; 258 uint64_t od_blocksize; 259 uint64_t od_crblocksize; 260 uint64_t od_gen; 261 uint64_t od_crgen; 262 char od_name[MAXNAMELEN]; 263 } ztest_od_t; 264 265 /* 266 * Per-dataset state. 267 */ 268 typedef struct ztest_ds { 269 ztest_shared_ds_t *zd_shared; 270 objset_t *zd_os; 271 rwlock_t zd_zilog_lock; 272 zilog_t *zd_zilog; 273 ztest_od_t *zd_od; /* debugging aid */ 274 char zd_name[MAXNAMELEN]; 275 mutex_t zd_dirobj_lock; 276 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 277 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 278 } ztest_ds_t; 279 280 /* 281 * Per-iteration state. 282 */ 283 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 284 285 typedef struct ztest_info { 286 ztest_func_t *zi_func; /* test function */ 287 uint64_t zi_iters; /* iterations per execution */ 288 uint64_t *zi_interval; /* execute every <interval> seconds */ 289 } ztest_info_t; 290 291 typedef struct ztest_shared_callstate { 292 uint64_t zc_count; /* per-pass count */ 293 uint64_t zc_time; /* per-pass time */ 294 uint64_t zc_next; /* next time to call this function */ 295 } ztest_shared_callstate_t; 296 297 static ztest_shared_callstate_t *ztest_shared_callstate; 298 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 299 300 /* 301 * Note: these aren't static because we want dladdr() to work. 302 */ 303 ztest_func_t ztest_dmu_read_write; 304 ztest_func_t ztest_dmu_write_parallel; 305 ztest_func_t ztest_dmu_object_alloc_free; 306 ztest_func_t ztest_dmu_commit_callbacks; 307 ztest_func_t ztest_zap; 308 ztest_func_t ztest_zap_parallel; 309 ztest_func_t ztest_zil_commit; 310 ztest_func_t ztest_zil_remount; 311 ztest_func_t ztest_dmu_read_write_zcopy; 312 ztest_func_t ztest_dmu_objset_create_destroy; 313 ztest_func_t ztest_dmu_prealloc; 314 ztest_func_t ztest_fzap; 315 ztest_func_t ztest_dmu_snapshot_create_destroy; 316 ztest_func_t ztest_dsl_prop_get_set; 317 ztest_func_t ztest_spa_prop_get_set; 318 ztest_func_t ztest_spa_create_destroy; 319 ztest_func_t ztest_fault_inject; 320 ztest_func_t ztest_ddt_repair; 321 ztest_func_t ztest_dmu_snapshot_hold; 322 ztest_func_t ztest_spa_rename; 323 ztest_func_t ztest_scrub; 324 ztest_func_t ztest_dsl_dataset_promote_busy; 325 ztest_func_t ztest_vdev_attach_detach; 326 ztest_func_t ztest_vdev_LUN_growth; 327 ztest_func_t ztest_vdev_add_remove; 328 ztest_func_t ztest_vdev_aux_add_remove; 329 ztest_func_t ztest_split_pool; 330 ztest_func_t ztest_reguid; 331 332 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 333 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 334 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 335 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 336 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 337 338 ztest_info_t ztest_info[] = { 339 { ztest_dmu_read_write, 1, &zopt_always }, 340 { ztest_dmu_write_parallel, 10, &zopt_always }, 341 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 342 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 343 { ztest_zap, 30, &zopt_always }, 344 { ztest_zap_parallel, 100, &zopt_always }, 345 { ztest_split_pool, 1, &zopt_always }, 346 { ztest_zil_commit, 1, &zopt_incessant }, 347 { ztest_zil_remount, 1, &zopt_sometimes }, 348 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 349 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 350 { ztest_dsl_prop_get_set, 1, &zopt_often }, 351 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 352 #if 0 353 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 354 #endif 355 { ztest_fzap, 1, &zopt_sometimes }, 356 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 357 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 358 { ztest_fault_inject, 1, &zopt_sometimes }, 359 { ztest_ddt_repair, 1, &zopt_sometimes }, 360 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 361 { ztest_reguid, 1, &zopt_sometimes }, 362 { ztest_spa_rename, 1, &zopt_rarely }, 363 { ztest_scrub, 1, &zopt_rarely }, 364 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 365 { ztest_vdev_attach_detach, 1, &zopt_rarely }, 366 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 367 { ztest_vdev_add_remove, 1, 368 &ztest_opts.zo_vdevtime }, 369 { ztest_vdev_aux_add_remove, 1, 370 &ztest_opts.zo_vdevtime }, 371 }; 372 373 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 374 375 /* 376 * The following struct is used to hold a list of uncalled commit callbacks. 377 * The callbacks are ordered by txg number. 378 */ 379 typedef struct ztest_cb_list { 380 mutex_t zcl_callbacks_lock; 381 list_t zcl_callbacks; 382 } ztest_cb_list_t; 383 384 /* 385 * Stuff we need to share writably between parent and child. 386 */ 387 typedef struct ztest_shared { 388 boolean_t zs_do_init; 389 hrtime_t zs_proc_start; 390 hrtime_t zs_proc_stop; 391 hrtime_t zs_thread_start; 392 hrtime_t zs_thread_stop; 393 hrtime_t zs_thread_kill; 394 uint64_t zs_enospc_count; 395 uint64_t zs_vdev_next_leaf; 396 uint64_t zs_vdev_aux; 397 uint64_t zs_alloc; 398 uint64_t zs_space; 399 uint64_t zs_splits; 400 uint64_t zs_mirrors; 401 uint64_t zs_metaslab_sz; 402 uint64_t zs_metaslab_df_alloc_threshold; 403 uint64_t zs_guid; 404 } ztest_shared_t; 405 406 #define ID_PARALLEL -1ULL 407 408 static char ztest_dev_template[] = "%s/%s.%llua"; 409 static char ztest_aux_template[] = "%s/%s.%s.%llu"; 410 ztest_shared_t *ztest_shared; 411 412 static spa_t *ztest_spa = NULL; 413 static ztest_ds_t *ztest_ds; 414 415 static mutex_t ztest_vdev_lock; 416 static rwlock_t ztest_name_lock; 417 418 static boolean_t ztest_dump_core = B_TRUE; 419 static boolean_t ztest_exiting; 420 421 /* Global commit callback list */ 422 static ztest_cb_list_t zcl; 423 424 enum ztest_object { 425 ZTEST_META_DNODE = 0, 426 ZTEST_DIROBJ, 427 ZTEST_OBJECTS 428 }; 429 430 static void usage(boolean_t) __NORETURN; 431 432 /* 433 * These libumem hooks provide a reasonable set of defaults for the allocator's 434 * debugging facilities. 435 */ 436 const char * 437 _umem_debug_init() 438 { 439 return ("default,verbose"); /* $UMEM_DEBUG setting */ 440 } 441 442 const char * 443 _umem_logging_init(void) 444 { 445 return ("fail,contents"); /* $UMEM_LOGGING setting */ 446 } 447 448 #define FATAL_MSG_SZ 1024 449 450 char *fatal_msg; 451 452 static void 453 fatal(int do_perror, char *message, ...) 454 { 455 va_list args; 456 int save_errno = errno; 457 char buf[FATAL_MSG_SZ]; 458 459 (void) fflush(stdout); 460 461 va_start(args, message); 462 (void) sprintf(buf, "ztest: "); 463 /* LINTED */ 464 (void) vsprintf(buf + strlen(buf), message, args); 465 va_end(args); 466 if (do_perror) { 467 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 468 ": %s", strerror(save_errno)); 469 } 470 (void) fprintf(stderr, "%s\n", buf); 471 fatal_msg = buf; /* to ease debugging */ 472 if (ztest_dump_core) 473 abort(); 474 exit(3); 475 } 476 477 static int 478 str2shift(const char *buf) 479 { 480 const char *ends = "BKMGTPEZ"; 481 int i; 482 483 if (buf[0] == '\0') 484 return (0); 485 for (i = 0; i < strlen(ends); i++) { 486 if (toupper(buf[0]) == ends[i]) 487 break; 488 } 489 if (i == strlen(ends)) { 490 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 491 buf); 492 usage(B_FALSE); 493 } 494 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 495 return (10*i); 496 } 497 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 498 usage(B_FALSE); 499 /* NOTREACHED */ 500 } 501 502 static uint64_t 503 nicenumtoull(const char *buf) 504 { 505 char *end; 506 uint64_t val; 507 508 val = strtoull(buf, &end, 0); 509 if (end == buf) { 510 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 511 usage(B_FALSE); 512 } else if (end[0] == '.') { 513 double fval = strtod(buf, &end); 514 fval *= pow(2, str2shift(end)); 515 if (fval > UINT64_MAX) { 516 (void) fprintf(stderr, "ztest: value too large: %s\n", 517 buf); 518 usage(B_FALSE); 519 } 520 val = (uint64_t)fval; 521 } else { 522 int shift = str2shift(end); 523 if (shift >= 64 || (val << shift) >> shift != val) { 524 (void) fprintf(stderr, "ztest: value too large: %s\n", 525 buf); 526 usage(B_FALSE); 527 } 528 val <<= shift; 529 } 530 return (val); 531 } 532 533 static void 534 usage(boolean_t requested) 535 { 536 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 537 538 char nice_vdev_size[10]; 539 char nice_gang_bang[10]; 540 FILE *fp = requested ? stdout : stderr; 541 542 nicenum(zo->zo_vdev_size, nice_vdev_size); 543 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang); 544 545 (void) fprintf(fp, "Usage: %s\n" 546 "\t[-v vdevs (default: %llu)]\n" 547 "\t[-s size_of_each_vdev (default: %s)]\n" 548 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 549 "\t[-m mirror_copies (default: %d)]\n" 550 "\t[-r raidz_disks (default: %d)]\n" 551 "\t[-R raidz_parity (default: %d)]\n" 552 "\t[-d datasets (default: %d)]\n" 553 "\t[-t threads (default: %d)]\n" 554 "\t[-g gang_block_threshold (default: %s)]\n" 555 "\t[-i init_count (default: %d)] initialize pool i times\n" 556 "\t[-k kill_percentage (default: %llu%%)]\n" 557 "\t[-p pool_name (default: %s)]\n" 558 "\t[-f dir (default: %s)] file directory for vdev files\n" 559 "\t[-V] verbose (use multiple times for ever more blather)\n" 560 "\t[-E] use existing pool instead of creating new one\n" 561 "\t[-T time (default: %llu sec)] total run time\n" 562 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 563 "\t[-P passtime (default: %llu sec)] time per pass\n" 564 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 565 "\t[-h] (print help)\n" 566 "", 567 zo->zo_pool, 568 (u_longlong_t)zo->zo_vdevs, /* -v */ 569 nice_vdev_size, /* -s */ 570 zo->zo_ashift, /* -a */ 571 zo->zo_mirrors, /* -m */ 572 zo->zo_raidz, /* -r */ 573 zo->zo_raidz_parity, /* -R */ 574 zo->zo_datasets, /* -d */ 575 zo->zo_threads, /* -t */ 576 nice_gang_bang, /* -g */ 577 zo->zo_init, /* -i */ 578 (u_longlong_t)zo->zo_killrate, /* -k */ 579 zo->zo_pool, /* -p */ 580 zo->zo_dir, /* -f */ 581 (u_longlong_t)zo->zo_time, /* -T */ 582 (u_longlong_t)zo->zo_maxloops, /* -F */ 583 (u_longlong_t)zo->zo_passtime); 584 exit(requested ? 0 : 1); 585 } 586 587 static void 588 process_options(int argc, char **argv) 589 { 590 char *path; 591 ztest_shared_opts_t *zo = &ztest_opts; 592 593 int opt; 594 uint64_t value; 595 char altdir[MAXNAMELEN] = { 0 }; 596 597 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 598 599 while ((opt = getopt(argc, argv, 600 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) { 601 value = 0; 602 switch (opt) { 603 case 'v': 604 case 's': 605 case 'a': 606 case 'm': 607 case 'r': 608 case 'R': 609 case 'd': 610 case 't': 611 case 'g': 612 case 'i': 613 case 'k': 614 case 'T': 615 case 'P': 616 case 'F': 617 value = nicenumtoull(optarg); 618 } 619 switch (opt) { 620 case 'v': 621 zo->zo_vdevs = value; 622 break; 623 case 's': 624 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 625 break; 626 case 'a': 627 zo->zo_ashift = value; 628 break; 629 case 'm': 630 zo->zo_mirrors = value; 631 break; 632 case 'r': 633 zo->zo_raidz = MAX(1, value); 634 break; 635 case 'R': 636 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 637 break; 638 case 'd': 639 zo->zo_datasets = MAX(1, value); 640 break; 641 case 't': 642 zo->zo_threads = MAX(1, value); 643 break; 644 case 'g': 645 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, 646 value); 647 break; 648 case 'i': 649 zo->zo_init = value; 650 break; 651 case 'k': 652 zo->zo_killrate = value; 653 break; 654 case 'p': 655 (void) strlcpy(zo->zo_pool, optarg, 656 sizeof (zo->zo_pool)); 657 break; 658 case 'f': 659 path = realpath(optarg, NULL); 660 if (path == NULL) { 661 (void) fprintf(stderr, "error: %s: %s\n", 662 optarg, strerror(errno)); 663 usage(B_FALSE); 664 } else { 665 (void) strlcpy(zo->zo_dir, path, 666 sizeof (zo->zo_dir)); 667 } 668 break; 669 case 'V': 670 zo->zo_verbose++; 671 break; 672 case 'E': 673 zo->zo_init = 0; 674 break; 675 case 'T': 676 zo->zo_time = value; 677 break; 678 case 'P': 679 zo->zo_passtime = MAX(1, value); 680 break; 681 case 'F': 682 zo->zo_maxloops = MAX(1, value); 683 break; 684 case 'B': 685 (void) strlcpy(altdir, optarg, sizeof (altdir)); 686 break; 687 case 'h': 688 usage(B_TRUE); 689 break; 690 case '?': 691 default: 692 usage(B_FALSE); 693 break; 694 } 695 } 696 697 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 698 699 zo->zo_vdevtime = 700 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 701 UINT64_MAX >> 2); 702 703 if (strlen(altdir) > 0) { 704 char cmd[MAXNAMELEN]; 705 char realaltdir[MAXNAMELEN]; 706 char *bin; 707 char *ztest; 708 char *isa; 709 int isalen; 710 711 (void) realpath(getexecname(), cmd); 712 if (0 != access(altdir, F_OK)) { 713 ztest_dump_core = B_FALSE; 714 fatal(B_TRUE, "invalid alternate ztest path: %s", 715 altdir); 716 } 717 VERIFY(NULL != realpath(altdir, realaltdir)); 718 719 /* 720 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 721 * We want to extract <isa> to determine if we should use 722 * 32 or 64 bit binaries. 723 */ 724 bin = strstr(cmd, "/usr/bin/"); 725 ztest = strstr(bin, "/ztest"); 726 isa = bin + 9; 727 isalen = ztest - isa; 728 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 729 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 730 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 731 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 732 733 if (0 != access(zo->zo_alt_ztest, X_OK)) { 734 ztest_dump_core = B_FALSE; 735 fatal(B_TRUE, "invalid alternate ztest: %s", 736 zo->zo_alt_ztest); 737 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 738 ztest_dump_core = B_FALSE; 739 fatal(B_TRUE, "invalid alternate lib directory %s", 740 zo->zo_alt_libpath); 741 } 742 } 743 } 744 745 static void 746 ztest_kill(ztest_shared_t *zs) 747 { 748 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 749 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 750 (void) kill(getpid(), SIGKILL); 751 } 752 753 static uint64_t 754 ztest_random(uint64_t range) 755 { 756 uint64_t r; 757 758 if (range == 0) 759 return (0); 760 761 if (read(ZTEST_FD_RAND, &r, sizeof (r)) != sizeof (r)) 762 fatal(1, "short read from /dev/urandom"); 763 764 return (r % range); 765 } 766 767 /* ARGSUSED */ 768 static void 769 ztest_record_enospc(const char *s) 770 { 771 ztest_shared->zs_enospc_count++; 772 } 773 774 static uint64_t 775 ztest_get_ashift(void) 776 { 777 if (ztest_opts.zo_ashift == 0) 778 return (SPA_MINBLOCKSHIFT + ztest_random(3)); 779 return (ztest_opts.zo_ashift); 780 } 781 782 static nvlist_t * 783 make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift) 784 { 785 char pathbuf[MAXPATHLEN]; 786 uint64_t vdev; 787 nvlist_t *file; 788 789 if (ashift == 0) 790 ashift = ztest_get_ashift(); 791 792 if (path == NULL) { 793 path = pathbuf; 794 795 if (aux != NULL) { 796 vdev = ztest_shared->zs_vdev_aux; 797 (void) snprintf(path, sizeof (pathbuf), 798 ztest_aux_template, ztest_opts.zo_dir, 799 ztest_opts.zo_pool, aux, vdev); 800 } else { 801 vdev = ztest_shared->zs_vdev_next_leaf++; 802 (void) snprintf(path, sizeof (pathbuf), 803 ztest_dev_template, ztest_opts.zo_dir, 804 ztest_opts.zo_pool, vdev); 805 } 806 } 807 808 if (size != 0) { 809 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 810 if (fd == -1) 811 fatal(1, "can't open %s", path); 812 if (ftruncate(fd, size) != 0) 813 fatal(1, "can't ftruncate %s", path); 814 (void) close(fd); 815 } 816 817 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 818 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 819 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 820 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 821 822 return (file); 823 } 824 825 static nvlist_t * 826 make_vdev_raidz(char *path, char *aux, size_t size, uint64_t ashift, int r) 827 { 828 nvlist_t *raidz, **child; 829 int c; 830 831 if (r < 2) 832 return (make_vdev_file(path, aux, size, ashift)); 833 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 834 835 for (c = 0; c < r; c++) 836 child[c] = make_vdev_file(path, aux, size, ashift); 837 838 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 839 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 840 VDEV_TYPE_RAIDZ) == 0); 841 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 842 ztest_opts.zo_raidz_parity) == 0); 843 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 844 child, r) == 0); 845 846 for (c = 0; c < r; c++) 847 nvlist_free(child[c]); 848 849 umem_free(child, r * sizeof (nvlist_t *)); 850 851 return (raidz); 852 } 853 854 static nvlist_t * 855 make_vdev_mirror(char *path, char *aux, size_t size, uint64_t ashift, 856 int r, int m) 857 { 858 nvlist_t *mirror, **child; 859 int c; 860 861 if (m < 1) 862 return (make_vdev_raidz(path, aux, size, ashift, r)); 863 864 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 865 866 for (c = 0; c < m; c++) 867 child[c] = make_vdev_raidz(path, aux, size, ashift, r); 868 869 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 870 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 871 VDEV_TYPE_MIRROR) == 0); 872 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 873 child, m) == 0); 874 875 for (c = 0; c < m; c++) 876 nvlist_free(child[c]); 877 878 umem_free(child, m * sizeof (nvlist_t *)); 879 880 return (mirror); 881 } 882 883 static nvlist_t * 884 make_vdev_root(char *path, char *aux, size_t size, uint64_t ashift, 885 int log, int r, int m, int t) 886 { 887 nvlist_t *root, **child; 888 int c; 889 890 ASSERT(t > 0); 891 892 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 893 894 for (c = 0; c < t; c++) { 895 child[c] = make_vdev_mirror(path, aux, size, ashift, r, m); 896 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 897 log) == 0); 898 } 899 900 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 901 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 902 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 903 child, t) == 0); 904 905 for (c = 0; c < t; c++) 906 nvlist_free(child[c]); 907 908 umem_free(child, t * sizeof (nvlist_t *)); 909 910 return (root); 911 } 912 913 static int 914 ztest_random_blocksize(void) 915 { 916 return (1 << (SPA_MINBLOCKSHIFT + 917 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1))); 918 } 919 920 static int 921 ztest_random_ibshift(void) 922 { 923 return (DN_MIN_INDBLKSHIFT + 924 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 925 } 926 927 static uint64_t 928 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 929 { 930 uint64_t top; 931 vdev_t *rvd = spa->spa_root_vdev; 932 vdev_t *tvd; 933 934 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 935 936 do { 937 top = ztest_random(rvd->vdev_children); 938 tvd = rvd->vdev_child[top]; 939 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) || 940 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 941 942 return (top); 943 } 944 945 static uint64_t 946 ztest_random_dsl_prop(zfs_prop_t prop) 947 { 948 uint64_t value; 949 950 do { 951 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 952 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 953 954 return (value); 955 } 956 957 static int 958 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 959 boolean_t inherit) 960 { 961 const char *propname = zfs_prop_to_name(prop); 962 const char *valname; 963 char setpoint[MAXPATHLEN]; 964 uint64_t curval; 965 int error; 966 967 error = dsl_prop_set(osname, propname, 968 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), 969 sizeof (value), 1, &value); 970 971 if (error == ENOSPC) { 972 ztest_record_enospc(FTAG); 973 return (error); 974 } 975 ASSERT3U(error, ==, 0); 976 977 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval), 978 1, &curval, setpoint), ==, 0); 979 980 if (ztest_opts.zo_verbose >= 6) { 981 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 982 (void) printf("%s %s = %s at '%s'\n", 983 osname, propname, valname, setpoint); 984 } 985 986 return (error); 987 } 988 989 static int 990 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 991 { 992 spa_t *spa = ztest_spa; 993 nvlist_t *props = NULL; 994 int error; 995 996 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 997 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 998 999 error = spa_prop_set(spa, props); 1000 1001 nvlist_free(props); 1002 1003 if (error == ENOSPC) { 1004 ztest_record_enospc(FTAG); 1005 return (error); 1006 } 1007 ASSERT3U(error, ==, 0); 1008 1009 return (error); 1010 } 1011 1012 static void 1013 ztest_rll_init(rll_t *rll) 1014 { 1015 rll->rll_writer = NULL; 1016 rll->rll_readers = 0; 1017 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); 1018 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); 1019 } 1020 1021 static void 1022 ztest_rll_destroy(rll_t *rll) 1023 { 1024 ASSERT(rll->rll_writer == NULL); 1025 ASSERT(rll->rll_readers == 0); 1026 VERIFY(_mutex_destroy(&rll->rll_lock) == 0); 1027 VERIFY(cond_destroy(&rll->rll_cv) == 0); 1028 } 1029 1030 static void 1031 ztest_rll_lock(rll_t *rll, rl_type_t type) 1032 { 1033 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1034 1035 if (type == RL_READER) { 1036 while (rll->rll_writer != NULL) 1037 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1038 rll->rll_readers++; 1039 } else { 1040 while (rll->rll_writer != NULL || rll->rll_readers) 1041 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1042 rll->rll_writer = curthread; 1043 } 1044 1045 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1046 } 1047 1048 static void 1049 ztest_rll_unlock(rll_t *rll) 1050 { 1051 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1052 1053 if (rll->rll_writer) { 1054 ASSERT(rll->rll_readers == 0); 1055 rll->rll_writer = NULL; 1056 } else { 1057 ASSERT(rll->rll_readers != 0); 1058 ASSERT(rll->rll_writer == NULL); 1059 rll->rll_readers--; 1060 } 1061 1062 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1063 VERIFY(cond_broadcast(&rll->rll_cv) == 0); 1064 1065 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1066 } 1067 1068 static void 1069 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1070 { 1071 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1072 1073 ztest_rll_lock(rll, type); 1074 } 1075 1076 static void 1077 ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1078 { 1079 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1080 1081 ztest_rll_unlock(rll); 1082 } 1083 1084 static rl_t * 1085 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1086 uint64_t size, rl_type_t type) 1087 { 1088 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1089 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1090 rl_t *rl; 1091 1092 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1093 rl->rl_object = object; 1094 rl->rl_offset = offset; 1095 rl->rl_size = size; 1096 rl->rl_lock = rll; 1097 1098 ztest_rll_lock(rll, type); 1099 1100 return (rl); 1101 } 1102 1103 static void 1104 ztest_range_unlock(rl_t *rl) 1105 { 1106 rll_t *rll = rl->rl_lock; 1107 1108 ztest_rll_unlock(rll); 1109 1110 umem_free(rl, sizeof (*rl)); 1111 } 1112 1113 static void 1114 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1115 { 1116 zd->zd_os = os; 1117 zd->zd_zilog = dmu_objset_zil(os); 1118 zd->zd_shared = szd; 1119 dmu_objset_name(os, zd->zd_name); 1120 1121 if (zd->zd_shared != NULL) 1122 zd->zd_shared->zd_seq = 0; 1123 1124 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0); 1125 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); 1126 1127 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1128 ztest_rll_init(&zd->zd_object_lock[l]); 1129 1130 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1131 ztest_rll_init(&zd->zd_range_lock[l]); 1132 } 1133 1134 static void 1135 ztest_zd_fini(ztest_ds_t *zd) 1136 { 1137 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); 1138 1139 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1140 ztest_rll_destroy(&zd->zd_object_lock[l]); 1141 1142 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1143 ztest_rll_destroy(&zd->zd_range_lock[l]); 1144 } 1145 1146 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1147 1148 static uint64_t 1149 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1150 { 1151 uint64_t txg; 1152 int error; 1153 1154 /* 1155 * Attempt to assign tx to some transaction group. 1156 */ 1157 error = dmu_tx_assign(tx, txg_how); 1158 if (error) { 1159 if (error == ERESTART) { 1160 ASSERT(txg_how == TXG_NOWAIT); 1161 dmu_tx_wait(tx); 1162 } else { 1163 ASSERT3U(error, ==, ENOSPC); 1164 ztest_record_enospc(tag); 1165 } 1166 dmu_tx_abort(tx); 1167 return (0); 1168 } 1169 txg = dmu_tx_get_txg(tx); 1170 ASSERT(txg != 0); 1171 return (txg); 1172 } 1173 1174 static void 1175 ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1176 { 1177 uint64_t *ip = buf; 1178 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1179 1180 while (ip < ip_end) 1181 *ip++ = value; 1182 } 1183 1184 static boolean_t 1185 ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1186 { 1187 uint64_t *ip = buf; 1188 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1189 uint64_t diff = 0; 1190 1191 while (ip < ip_end) 1192 diff |= (value - *ip++); 1193 1194 return (diff == 0); 1195 } 1196 1197 static void 1198 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1199 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1200 { 1201 bt->bt_magic = BT_MAGIC; 1202 bt->bt_objset = dmu_objset_id(os); 1203 bt->bt_object = object; 1204 bt->bt_offset = offset; 1205 bt->bt_gen = gen; 1206 bt->bt_txg = txg; 1207 bt->bt_crtxg = crtxg; 1208 } 1209 1210 static void 1211 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1212 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1213 { 1214 ASSERT(bt->bt_magic == BT_MAGIC); 1215 ASSERT(bt->bt_objset == dmu_objset_id(os)); 1216 ASSERT(bt->bt_object == object); 1217 ASSERT(bt->bt_offset == offset); 1218 ASSERT(bt->bt_gen <= gen); 1219 ASSERT(bt->bt_txg <= txg); 1220 ASSERT(bt->bt_crtxg == crtxg); 1221 } 1222 1223 static ztest_block_tag_t * 1224 ztest_bt_bonus(dmu_buf_t *db) 1225 { 1226 dmu_object_info_t doi; 1227 ztest_block_tag_t *bt; 1228 1229 dmu_object_info_from_db(db, &doi); 1230 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1231 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1232 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1233 1234 return (bt); 1235 } 1236 1237 /* 1238 * ZIL logging ops 1239 */ 1240 1241 #define lrz_type lr_mode 1242 #define lrz_blocksize lr_uid 1243 #define lrz_ibshift lr_gid 1244 #define lrz_bonustype lr_rdev 1245 #define lrz_bonuslen lr_crtime[1] 1246 1247 static void 1248 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1249 { 1250 char *name = (void *)(lr + 1); /* name follows lr */ 1251 size_t namesize = strlen(name) + 1; 1252 itx_t *itx; 1253 1254 if (zil_replaying(zd->zd_zilog, tx)) 1255 return; 1256 1257 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1258 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1259 sizeof (*lr) + namesize - sizeof (lr_t)); 1260 1261 zil_itx_assign(zd->zd_zilog, itx, tx); 1262 } 1263 1264 static void 1265 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1266 { 1267 char *name = (void *)(lr + 1); /* name follows lr */ 1268 size_t namesize = strlen(name) + 1; 1269 itx_t *itx; 1270 1271 if (zil_replaying(zd->zd_zilog, tx)) 1272 return; 1273 1274 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1275 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1276 sizeof (*lr) + namesize - sizeof (lr_t)); 1277 1278 itx->itx_oid = object; 1279 zil_itx_assign(zd->zd_zilog, itx, tx); 1280 } 1281 1282 static void 1283 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1284 { 1285 itx_t *itx; 1286 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1287 1288 if (zil_replaying(zd->zd_zilog, tx)) 1289 return; 1290 1291 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1292 write_state = WR_INDIRECT; 1293 1294 itx = zil_itx_create(TX_WRITE, 1295 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1296 1297 if (write_state == WR_COPIED && 1298 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1299 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1300 zil_itx_destroy(itx); 1301 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1302 write_state = WR_NEED_COPY; 1303 } 1304 itx->itx_private = zd; 1305 itx->itx_wr_state = write_state; 1306 itx->itx_sync = (ztest_random(8) == 0); 1307 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); 1308 1309 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1310 sizeof (*lr) - sizeof (lr_t)); 1311 1312 zil_itx_assign(zd->zd_zilog, itx, tx); 1313 } 1314 1315 static void 1316 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1317 { 1318 itx_t *itx; 1319 1320 if (zil_replaying(zd->zd_zilog, tx)) 1321 return; 1322 1323 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1324 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1325 sizeof (*lr) - sizeof (lr_t)); 1326 1327 itx->itx_sync = B_FALSE; 1328 zil_itx_assign(zd->zd_zilog, itx, tx); 1329 } 1330 1331 static void 1332 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1333 { 1334 itx_t *itx; 1335 1336 if (zil_replaying(zd->zd_zilog, tx)) 1337 return; 1338 1339 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1340 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1341 sizeof (*lr) - sizeof (lr_t)); 1342 1343 itx->itx_sync = B_FALSE; 1344 zil_itx_assign(zd->zd_zilog, itx, tx); 1345 } 1346 1347 /* 1348 * ZIL replay ops 1349 */ 1350 static int 1351 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) 1352 { 1353 char *name = (void *)(lr + 1); /* name follows lr */ 1354 objset_t *os = zd->zd_os; 1355 ztest_block_tag_t *bbt; 1356 dmu_buf_t *db; 1357 dmu_tx_t *tx; 1358 uint64_t txg; 1359 int error = 0; 1360 1361 if (byteswap) 1362 byteswap_uint64_array(lr, sizeof (*lr)); 1363 1364 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1365 ASSERT(name[0] != '\0'); 1366 1367 tx = dmu_tx_create(os); 1368 1369 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1370 1371 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1372 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1373 } else { 1374 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1375 } 1376 1377 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1378 if (txg == 0) 1379 return (ENOSPC); 1380 1381 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1382 1383 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1384 if (lr->lr_foid == 0) { 1385 lr->lr_foid = zap_create(os, 1386 lr->lrz_type, lr->lrz_bonustype, 1387 lr->lrz_bonuslen, tx); 1388 } else { 1389 error = zap_create_claim(os, lr->lr_foid, 1390 lr->lrz_type, lr->lrz_bonustype, 1391 lr->lrz_bonuslen, tx); 1392 } 1393 } else { 1394 if (lr->lr_foid == 0) { 1395 lr->lr_foid = dmu_object_alloc(os, 1396 lr->lrz_type, 0, lr->lrz_bonustype, 1397 lr->lrz_bonuslen, tx); 1398 } else { 1399 error = dmu_object_claim(os, lr->lr_foid, 1400 lr->lrz_type, 0, lr->lrz_bonustype, 1401 lr->lrz_bonuslen, tx); 1402 } 1403 } 1404 1405 if (error) { 1406 ASSERT3U(error, ==, EEXIST); 1407 ASSERT(zd->zd_zilog->zl_replay); 1408 dmu_tx_commit(tx); 1409 return (error); 1410 } 1411 1412 ASSERT(lr->lr_foid != 0); 1413 1414 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1415 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1416 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1417 1418 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1419 bbt = ztest_bt_bonus(db); 1420 dmu_buf_will_dirty(db, tx); 1421 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1422 dmu_buf_rele(db, FTAG); 1423 1424 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1425 &lr->lr_foid, tx)); 1426 1427 (void) ztest_log_create(zd, tx, lr); 1428 1429 dmu_tx_commit(tx); 1430 1431 return (0); 1432 } 1433 1434 static int 1435 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) 1436 { 1437 char *name = (void *)(lr + 1); /* name follows lr */ 1438 objset_t *os = zd->zd_os; 1439 dmu_object_info_t doi; 1440 dmu_tx_t *tx; 1441 uint64_t object, txg; 1442 1443 if (byteswap) 1444 byteswap_uint64_array(lr, sizeof (*lr)); 1445 1446 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1447 ASSERT(name[0] != '\0'); 1448 1449 VERIFY3U(0, ==, 1450 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1451 ASSERT(object != 0); 1452 1453 ztest_object_lock(zd, object, RL_WRITER); 1454 1455 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1456 1457 tx = dmu_tx_create(os); 1458 1459 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1460 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1461 1462 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1463 if (txg == 0) { 1464 ztest_object_unlock(zd, object); 1465 return (ENOSPC); 1466 } 1467 1468 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1469 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1470 } else { 1471 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1472 } 1473 1474 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1475 1476 (void) ztest_log_remove(zd, tx, lr, object); 1477 1478 dmu_tx_commit(tx); 1479 1480 ztest_object_unlock(zd, object); 1481 1482 return (0); 1483 } 1484 1485 static int 1486 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) 1487 { 1488 objset_t *os = zd->zd_os; 1489 void *data = lr + 1; /* data follows lr */ 1490 uint64_t offset, length; 1491 ztest_block_tag_t *bt = data; 1492 ztest_block_tag_t *bbt; 1493 uint64_t gen, txg, lrtxg, crtxg; 1494 dmu_object_info_t doi; 1495 dmu_tx_t *tx; 1496 dmu_buf_t *db; 1497 arc_buf_t *abuf = NULL; 1498 rl_t *rl; 1499 1500 if (byteswap) 1501 byteswap_uint64_array(lr, sizeof (*lr)); 1502 1503 offset = lr->lr_offset; 1504 length = lr->lr_length; 1505 1506 /* If it's a dmu_sync() block, write the whole block */ 1507 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1508 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1509 if (length < blocksize) { 1510 offset -= offset % blocksize; 1511 length = blocksize; 1512 } 1513 } 1514 1515 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1516 byteswap_uint64_array(bt, sizeof (*bt)); 1517 1518 if (bt->bt_magic != BT_MAGIC) 1519 bt = NULL; 1520 1521 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1522 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1523 1524 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1525 1526 dmu_object_info_from_db(db, &doi); 1527 1528 bbt = ztest_bt_bonus(db); 1529 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1530 gen = bbt->bt_gen; 1531 crtxg = bbt->bt_crtxg; 1532 lrtxg = lr->lr_common.lrc_txg; 1533 1534 tx = dmu_tx_create(os); 1535 1536 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1537 1538 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1539 P2PHASE(offset, length) == 0) 1540 abuf = dmu_request_arcbuf(db, length); 1541 1542 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1543 if (txg == 0) { 1544 if (abuf != NULL) 1545 dmu_return_arcbuf(abuf); 1546 dmu_buf_rele(db, FTAG); 1547 ztest_range_unlock(rl); 1548 ztest_object_unlock(zd, lr->lr_foid); 1549 return (ENOSPC); 1550 } 1551 1552 if (bt != NULL) { 1553 /* 1554 * Usually, verify the old data before writing new data -- 1555 * but not always, because we also want to verify correct 1556 * behavior when the data was not recently read into cache. 1557 */ 1558 ASSERT(offset % doi.doi_data_block_size == 0); 1559 if (ztest_random(4) != 0) { 1560 int prefetch = ztest_random(2) ? 1561 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1562 ztest_block_tag_t rbt; 1563 1564 VERIFY(dmu_read(os, lr->lr_foid, offset, 1565 sizeof (rbt), &rbt, prefetch) == 0); 1566 if (rbt.bt_magic == BT_MAGIC) { 1567 ztest_bt_verify(&rbt, os, lr->lr_foid, 1568 offset, gen, txg, crtxg); 1569 } 1570 } 1571 1572 /* 1573 * Writes can appear to be newer than the bonus buffer because 1574 * the ztest_get_data() callback does a dmu_read() of the 1575 * open-context data, which may be different than the data 1576 * as it was when the write was generated. 1577 */ 1578 if (zd->zd_zilog->zl_replay) { 1579 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1580 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1581 bt->bt_crtxg); 1582 } 1583 1584 /* 1585 * Set the bt's gen/txg to the bonus buffer's gen/txg 1586 * so that all of the usual ASSERTs will work. 1587 */ 1588 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1589 } 1590 1591 if (abuf == NULL) { 1592 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1593 } else { 1594 bcopy(data, abuf->b_data, length); 1595 dmu_assign_arcbuf(db, offset, abuf, tx); 1596 } 1597 1598 (void) ztest_log_write(zd, tx, lr); 1599 1600 dmu_buf_rele(db, FTAG); 1601 1602 dmu_tx_commit(tx); 1603 1604 ztest_range_unlock(rl); 1605 ztest_object_unlock(zd, lr->lr_foid); 1606 1607 return (0); 1608 } 1609 1610 static int 1611 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) 1612 { 1613 objset_t *os = zd->zd_os; 1614 dmu_tx_t *tx; 1615 uint64_t txg; 1616 rl_t *rl; 1617 1618 if (byteswap) 1619 byteswap_uint64_array(lr, sizeof (*lr)); 1620 1621 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1622 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1623 RL_WRITER); 1624 1625 tx = dmu_tx_create(os); 1626 1627 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1628 1629 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1630 if (txg == 0) { 1631 ztest_range_unlock(rl); 1632 ztest_object_unlock(zd, lr->lr_foid); 1633 return (ENOSPC); 1634 } 1635 1636 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1637 lr->lr_length, tx) == 0); 1638 1639 (void) ztest_log_truncate(zd, tx, lr); 1640 1641 dmu_tx_commit(tx); 1642 1643 ztest_range_unlock(rl); 1644 ztest_object_unlock(zd, lr->lr_foid); 1645 1646 return (0); 1647 } 1648 1649 static int 1650 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) 1651 { 1652 objset_t *os = zd->zd_os; 1653 dmu_tx_t *tx; 1654 dmu_buf_t *db; 1655 ztest_block_tag_t *bbt; 1656 uint64_t txg, lrtxg, crtxg; 1657 1658 if (byteswap) 1659 byteswap_uint64_array(lr, sizeof (*lr)); 1660 1661 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1662 1663 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1664 1665 tx = dmu_tx_create(os); 1666 dmu_tx_hold_bonus(tx, lr->lr_foid); 1667 1668 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1669 if (txg == 0) { 1670 dmu_buf_rele(db, FTAG); 1671 ztest_object_unlock(zd, lr->lr_foid); 1672 return (ENOSPC); 1673 } 1674 1675 bbt = ztest_bt_bonus(db); 1676 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1677 crtxg = bbt->bt_crtxg; 1678 lrtxg = lr->lr_common.lrc_txg; 1679 1680 if (zd->zd_zilog->zl_replay) { 1681 ASSERT(lr->lr_size != 0); 1682 ASSERT(lr->lr_mode != 0); 1683 ASSERT(lrtxg != 0); 1684 } else { 1685 /* 1686 * Randomly change the size and increment the generation. 1687 */ 1688 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1689 sizeof (*bbt); 1690 lr->lr_mode = bbt->bt_gen + 1; 1691 ASSERT(lrtxg == 0); 1692 } 1693 1694 /* 1695 * Verify that the current bonus buffer is not newer than our txg. 1696 */ 1697 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1698 MAX(txg, lrtxg), crtxg); 1699 1700 dmu_buf_will_dirty(db, tx); 1701 1702 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1703 ASSERT3U(lr->lr_size, <=, db->db_size); 1704 VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0); 1705 bbt = ztest_bt_bonus(db); 1706 1707 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1708 1709 dmu_buf_rele(db, FTAG); 1710 1711 (void) ztest_log_setattr(zd, tx, lr); 1712 1713 dmu_tx_commit(tx); 1714 1715 ztest_object_unlock(zd, lr->lr_foid); 1716 1717 return (0); 1718 } 1719 1720 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1721 NULL, /* 0 no such transaction type */ 1722 ztest_replay_create, /* TX_CREATE */ 1723 NULL, /* TX_MKDIR */ 1724 NULL, /* TX_MKXATTR */ 1725 NULL, /* TX_SYMLINK */ 1726 ztest_replay_remove, /* TX_REMOVE */ 1727 NULL, /* TX_RMDIR */ 1728 NULL, /* TX_LINK */ 1729 NULL, /* TX_RENAME */ 1730 ztest_replay_write, /* TX_WRITE */ 1731 ztest_replay_truncate, /* TX_TRUNCATE */ 1732 ztest_replay_setattr, /* TX_SETATTR */ 1733 NULL, /* TX_ACL */ 1734 NULL, /* TX_CREATE_ACL */ 1735 NULL, /* TX_CREATE_ATTR */ 1736 NULL, /* TX_CREATE_ACL_ATTR */ 1737 NULL, /* TX_MKDIR_ACL */ 1738 NULL, /* TX_MKDIR_ATTR */ 1739 NULL, /* TX_MKDIR_ACL_ATTR */ 1740 NULL, /* TX_WRITE2 */ 1741 }; 1742 1743 /* 1744 * ZIL get_data callbacks 1745 */ 1746 1747 static void 1748 ztest_get_done(zgd_t *zgd, int error) 1749 { 1750 ztest_ds_t *zd = zgd->zgd_private; 1751 uint64_t object = zgd->zgd_rl->rl_object; 1752 1753 if (zgd->zgd_db) 1754 dmu_buf_rele(zgd->zgd_db, zgd); 1755 1756 ztest_range_unlock(zgd->zgd_rl); 1757 ztest_object_unlock(zd, object); 1758 1759 if (error == 0 && zgd->zgd_bp) 1760 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1761 1762 umem_free(zgd, sizeof (*zgd)); 1763 } 1764 1765 static int 1766 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1767 { 1768 ztest_ds_t *zd = arg; 1769 objset_t *os = zd->zd_os; 1770 uint64_t object = lr->lr_foid; 1771 uint64_t offset = lr->lr_offset; 1772 uint64_t size = lr->lr_length; 1773 blkptr_t *bp = &lr->lr_blkptr; 1774 uint64_t txg = lr->lr_common.lrc_txg; 1775 uint64_t crtxg; 1776 dmu_object_info_t doi; 1777 dmu_buf_t *db; 1778 zgd_t *zgd; 1779 int error; 1780 1781 ztest_object_lock(zd, object, RL_READER); 1782 error = dmu_bonus_hold(os, object, FTAG, &db); 1783 if (error) { 1784 ztest_object_unlock(zd, object); 1785 return (error); 1786 } 1787 1788 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1789 1790 if (crtxg == 0 || crtxg > txg) { 1791 dmu_buf_rele(db, FTAG); 1792 ztest_object_unlock(zd, object); 1793 return (ENOENT); 1794 } 1795 1796 dmu_object_info_from_db(db, &doi); 1797 dmu_buf_rele(db, FTAG); 1798 db = NULL; 1799 1800 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1801 zgd->zgd_zilog = zd->zd_zilog; 1802 zgd->zgd_private = zd; 1803 1804 if (buf != NULL) { /* immediate write */ 1805 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1806 RL_READER); 1807 1808 error = dmu_read(os, object, offset, size, buf, 1809 DMU_READ_NO_PREFETCH); 1810 ASSERT(error == 0); 1811 } else { 1812 size = doi.doi_data_block_size; 1813 if (ISP2(size)) { 1814 offset = P2ALIGN(offset, size); 1815 } else { 1816 ASSERT(offset < size); 1817 offset = 0; 1818 } 1819 1820 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1821 RL_READER); 1822 1823 error = dmu_buf_hold(os, object, offset, zgd, &db, 1824 DMU_READ_NO_PREFETCH); 1825 1826 if (error == 0) { 1827 zgd->zgd_db = db; 1828 zgd->zgd_bp = bp; 1829 1830 ASSERT(db->db_offset == offset); 1831 ASSERT(db->db_size == size); 1832 1833 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1834 ztest_get_done, zgd); 1835 1836 if (error == 0) 1837 return (0); 1838 } 1839 } 1840 1841 ztest_get_done(zgd, error); 1842 1843 return (error); 1844 } 1845 1846 static void * 1847 ztest_lr_alloc(size_t lrsize, char *name) 1848 { 1849 char *lr; 1850 size_t namesize = name ? strlen(name) + 1 : 0; 1851 1852 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1853 1854 if (name) 1855 bcopy(name, lr + lrsize, namesize); 1856 1857 return (lr); 1858 } 1859 1860 void 1861 ztest_lr_free(void *lr, size_t lrsize, char *name) 1862 { 1863 size_t namesize = name ? strlen(name) + 1 : 0; 1864 1865 umem_free(lr, lrsize + namesize); 1866 } 1867 1868 /* 1869 * Lookup a bunch of objects. Returns the number of objects not found. 1870 */ 1871 static int 1872 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1873 { 1874 int missing = 0; 1875 int error; 1876 1877 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1878 1879 for (int i = 0; i < count; i++, od++) { 1880 od->od_object = 0; 1881 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1882 sizeof (uint64_t), 1, &od->od_object); 1883 if (error) { 1884 ASSERT(error == ENOENT); 1885 ASSERT(od->od_object == 0); 1886 missing++; 1887 } else { 1888 dmu_buf_t *db; 1889 ztest_block_tag_t *bbt; 1890 dmu_object_info_t doi; 1891 1892 ASSERT(od->od_object != 0); 1893 ASSERT(missing == 0); /* there should be no gaps */ 1894 1895 ztest_object_lock(zd, od->od_object, RL_READER); 1896 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1897 od->od_object, FTAG, &db)); 1898 dmu_object_info_from_db(db, &doi); 1899 bbt = ztest_bt_bonus(db); 1900 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1901 od->od_type = doi.doi_type; 1902 od->od_blocksize = doi.doi_data_block_size; 1903 od->od_gen = bbt->bt_gen; 1904 dmu_buf_rele(db, FTAG); 1905 ztest_object_unlock(zd, od->od_object); 1906 } 1907 } 1908 1909 return (missing); 1910 } 1911 1912 static int 1913 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 1914 { 1915 int missing = 0; 1916 1917 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1918 1919 for (int i = 0; i < count; i++, od++) { 1920 if (missing) { 1921 od->od_object = 0; 1922 missing++; 1923 continue; 1924 } 1925 1926 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1927 1928 lr->lr_doid = od->od_dir; 1929 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 1930 lr->lrz_type = od->od_crtype; 1931 lr->lrz_blocksize = od->od_crblocksize; 1932 lr->lrz_ibshift = ztest_random_ibshift(); 1933 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 1934 lr->lrz_bonuslen = dmu_bonus_max(); 1935 lr->lr_gen = od->od_crgen; 1936 lr->lr_crtime[0] = time(NULL); 1937 1938 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 1939 ASSERT(missing == 0); 1940 od->od_object = 0; 1941 missing++; 1942 } else { 1943 od->od_object = lr->lr_foid; 1944 od->od_type = od->od_crtype; 1945 od->od_blocksize = od->od_crblocksize; 1946 od->od_gen = od->od_crgen; 1947 ASSERT(od->od_object != 0); 1948 } 1949 1950 ztest_lr_free(lr, sizeof (*lr), od->od_name); 1951 } 1952 1953 return (missing); 1954 } 1955 1956 static int 1957 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 1958 { 1959 int missing = 0; 1960 int error; 1961 1962 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1963 1964 od += count - 1; 1965 1966 for (int i = count - 1; i >= 0; i--, od--) { 1967 if (missing) { 1968 missing++; 1969 continue; 1970 } 1971 1972 if (od->od_object == 0) 1973 continue; 1974 1975 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1976 1977 lr->lr_doid = od->od_dir; 1978 1979 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 1980 ASSERT3U(error, ==, ENOSPC); 1981 missing++; 1982 } else { 1983 od->od_object = 0; 1984 } 1985 ztest_lr_free(lr, sizeof (*lr), od->od_name); 1986 } 1987 1988 return (missing); 1989 } 1990 1991 static int 1992 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 1993 void *data) 1994 { 1995 lr_write_t *lr; 1996 int error; 1997 1998 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 1999 2000 lr->lr_foid = object; 2001 lr->lr_offset = offset; 2002 lr->lr_length = size; 2003 lr->lr_blkoff = 0; 2004 BP_ZERO(&lr->lr_blkptr); 2005 2006 bcopy(data, lr + 1, size); 2007 2008 error = ztest_replay_write(zd, lr, B_FALSE); 2009 2010 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2011 2012 return (error); 2013 } 2014 2015 static int 2016 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2017 { 2018 lr_truncate_t *lr; 2019 int error; 2020 2021 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2022 2023 lr->lr_foid = object; 2024 lr->lr_offset = offset; 2025 lr->lr_length = size; 2026 2027 error = ztest_replay_truncate(zd, lr, B_FALSE); 2028 2029 ztest_lr_free(lr, sizeof (*lr), NULL); 2030 2031 return (error); 2032 } 2033 2034 static int 2035 ztest_setattr(ztest_ds_t *zd, uint64_t object) 2036 { 2037 lr_setattr_t *lr; 2038 int error; 2039 2040 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2041 2042 lr->lr_foid = object; 2043 lr->lr_size = 0; 2044 lr->lr_mode = 0; 2045 2046 error = ztest_replay_setattr(zd, lr, B_FALSE); 2047 2048 ztest_lr_free(lr, sizeof (*lr), NULL); 2049 2050 return (error); 2051 } 2052 2053 static void 2054 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2055 { 2056 objset_t *os = zd->zd_os; 2057 dmu_tx_t *tx; 2058 uint64_t txg; 2059 rl_t *rl; 2060 2061 txg_wait_synced(dmu_objset_pool(os), 0); 2062 2063 ztest_object_lock(zd, object, RL_READER); 2064 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2065 2066 tx = dmu_tx_create(os); 2067 2068 dmu_tx_hold_write(tx, object, offset, size); 2069 2070 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2071 2072 if (txg != 0) { 2073 dmu_prealloc(os, object, offset, size, tx); 2074 dmu_tx_commit(tx); 2075 txg_wait_synced(dmu_objset_pool(os), txg); 2076 } else { 2077 (void) dmu_free_long_range(os, object, offset, size); 2078 } 2079 2080 ztest_range_unlock(rl); 2081 ztest_object_unlock(zd, object); 2082 } 2083 2084 static void 2085 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2086 { 2087 ztest_block_tag_t wbt; 2088 dmu_object_info_t doi; 2089 enum ztest_io_type io_type; 2090 uint64_t blocksize; 2091 void *data; 2092 2093 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2094 blocksize = doi.doi_data_block_size; 2095 data = umem_alloc(blocksize, UMEM_NOFAIL); 2096 2097 /* 2098 * Pick an i/o type at random, biased toward writing block tags. 2099 */ 2100 io_type = ztest_random(ZTEST_IO_TYPES); 2101 if (ztest_random(2) == 0) 2102 io_type = ZTEST_IO_WRITE_TAG; 2103 2104 (void) rw_rdlock(&zd->zd_zilog_lock); 2105 2106 switch (io_type) { 2107 2108 case ZTEST_IO_WRITE_TAG: 2109 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2110 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2111 break; 2112 2113 case ZTEST_IO_WRITE_PATTERN: 2114 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2115 if (ztest_random(2) == 0) { 2116 /* 2117 * Induce fletcher2 collisions to ensure that 2118 * zio_ddt_collision() detects and resolves them 2119 * when using fletcher2-verify for deduplication. 2120 */ 2121 ((uint64_t *)data)[0] ^= 1ULL << 63; 2122 ((uint64_t *)data)[4] ^= 1ULL << 63; 2123 } 2124 (void) ztest_write(zd, object, offset, blocksize, data); 2125 break; 2126 2127 case ZTEST_IO_WRITE_ZEROES: 2128 bzero(data, blocksize); 2129 (void) ztest_write(zd, object, offset, blocksize, data); 2130 break; 2131 2132 case ZTEST_IO_TRUNCATE: 2133 (void) ztest_truncate(zd, object, offset, blocksize); 2134 break; 2135 2136 case ZTEST_IO_SETATTR: 2137 (void) ztest_setattr(zd, object); 2138 break; 2139 } 2140 2141 (void) rw_unlock(&zd->zd_zilog_lock); 2142 2143 umem_free(data, blocksize); 2144 } 2145 2146 /* 2147 * Initialize an object description template. 2148 */ 2149 static void 2150 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2151 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2152 { 2153 od->od_dir = ZTEST_DIROBJ; 2154 od->od_object = 0; 2155 2156 od->od_crtype = type; 2157 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2158 od->od_crgen = gen; 2159 2160 od->od_type = DMU_OT_NONE; 2161 od->od_blocksize = 0; 2162 od->od_gen = 0; 2163 2164 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2165 tag, (int64_t)id, index); 2166 } 2167 2168 /* 2169 * Lookup or create the objects for a test using the od template. 2170 * If the objects do not all exist, or if 'remove' is specified, 2171 * remove any existing objects and create new ones. Otherwise, 2172 * use the existing objects. 2173 */ 2174 static int 2175 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2176 { 2177 int count = size / sizeof (*od); 2178 int rv = 0; 2179 2180 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2181 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2182 (ztest_remove(zd, od, count) != 0 || 2183 ztest_create(zd, od, count) != 0)) 2184 rv = -1; 2185 zd->zd_od = od; 2186 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2187 2188 return (rv); 2189 } 2190 2191 /* ARGSUSED */ 2192 void 2193 ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2194 { 2195 zilog_t *zilog = zd->zd_zilog; 2196 2197 (void) rw_rdlock(&zd->zd_zilog_lock); 2198 2199 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2200 2201 /* 2202 * Remember the committed values in zd, which is in parent/child 2203 * shared memory. If we die, the next iteration of ztest_run() 2204 * will verify that the log really does contain this record. 2205 */ 2206 mutex_enter(&zilog->zl_lock); 2207 ASSERT(zd->zd_shared != NULL); 2208 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2209 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2210 mutex_exit(&zilog->zl_lock); 2211 2212 (void) rw_unlock(&zd->zd_zilog_lock); 2213 } 2214 2215 /* 2216 * This function is designed to simulate the operations that occur during a 2217 * mount/unmount operation. We hold the dataset across these operations in an 2218 * attempt to expose any implicit assumptions about ZIL management. 2219 */ 2220 /* ARGSUSED */ 2221 void 2222 ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2223 { 2224 objset_t *os = zd->zd_os; 2225 2226 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2227 (void) rw_wrlock(&zd->zd_zilog_lock); 2228 2229 /* zfsvfs_teardown() */ 2230 zil_close(zd->zd_zilog); 2231 2232 /* zfsvfs_setup() */ 2233 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2234 zil_replay(os, zd, ztest_replay_vector); 2235 2236 (void) rw_unlock(&zd->zd_zilog_lock); 2237 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2238 } 2239 2240 /* 2241 * Verify that we can't destroy an active pool, create an existing pool, 2242 * or create a pool with a bad vdev spec. 2243 */ 2244 /* ARGSUSED */ 2245 void 2246 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2247 { 2248 ztest_shared_opts_t *zo = &ztest_opts; 2249 spa_t *spa; 2250 nvlist_t *nvroot; 2251 2252 /* 2253 * Attempt to create using a bad file. 2254 */ 2255 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); 2256 VERIFY3U(ENOENT, ==, 2257 spa_create("ztest_bad_file", nvroot, NULL, NULL)); 2258 nvlist_free(nvroot); 2259 2260 /* 2261 * Attempt to create using a bad mirror. 2262 */ 2263 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1); 2264 VERIFY3U(ENOENT, ==, 2265 spa_create("ztest_bad_mirror", nvroot, NULL, NULL)); 2266 nvlist_free(nvroot); 2267 2268 /* 2269 * Attempt to create an existing pool. It shouldn't matter 2270 * what's in the nvroot; we should fail with EEXIST. 2271 */ 2272 (void) rw_rdlock(&ztest_name_lock); 2273 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); 2274 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL)); 2275 nvlist_free(nvroot); 2276 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2277 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2278 spa_close(spa, FTAG); 2279 2280 (void) rw_unlock(&ztest_name_lock); 2281 } 2282 2283 static vdev_t * 2284 vdev_lookup_by_path(vdev_t *vd, const char *path) 2285 { 2286 vdev_t *mvd; 2287 2288 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2289 return (vd); 2290 2291 for (int c = 0; c < vd->vdev_children; c++) 2292 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2293 NULL) 2294 return (mvd); 2295 2296 return (NULL); 2297 } 2298 2299 /* 2300 * Find the first available hole which can be used as a top-level. 2301 */ 2302 int 2303 find_vdev_hole(spa_t *spa) 2304 { 2305 vdev_t *rvd = spa->spa_root_vdev; 2306 int c; 2307 2308 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2309 2310 for (c = 0; c < rvd->vdev_children; c++) { 2311 vdev_t *cvd = rvd->vdev_child[c]; 2312 2313 if (cvd->vdev_ishole) 2314 break; 2315 } 2316 return (c); 2317 } 2318 2319 /* 2320 * Verify that vdev_add() works as expected. 2321 */ 2322 /* ARGSUSED */ 2323 void 2324 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2325 { 2326 ztest_shared_t *zs = ztest_shared; 2327 spa_t *spa = ztest_spa; 2328 uint64_t leaves; 2329 uint64_t guid; 2330 nvlist_t *nvroot; 2331 int error; 2332 2333 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2334 leaves = 2335 MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2336 2337 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2338 2339 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2340 2341 /* 2342 * If we have slogs then remove them 1/4 of the time. 2343 */ 2344 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2345 /* 2346 * Grab the guid from the head of the log class rotor. 2347 */ 2348 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2349 2350 spa_config_exit(spa, SCL_VDEV, FTAG); 2351 2352 /* 2353 * We have to grab the zs_name_lock as writer to 2354 * prevent a race between removing a slog (dmu_objset_find) 2355 * and destroying a dataset. Removing the slog will 2356 * grab a reference on the dataset which may cause 2357 * dmu_objset_destroy() to fail with EBUSY thus 2358 * leaving the dataset in an inconsistent state. 2359 */ 2360 VERIFY(rw_wrlock(&ztest_name_lock) == 0); 2361 error = spa_vdev_remove(spa, guid, B_FALSE); 2362 VERIFY(rw_unlock(&ztest_name_lock) == 0); 2363 2364 if (error && error != EEXIST) 2365 fatal(0, "spa_vdev_remove() = %d", error); 2366 } else { 2367 spa_config_exit(spa, SCL_VDEV, FTAG); 2368 2369 /* 2370 * Make 1/4 of the devices be log devices. 2371 */ 2372 nvroot = make_vdev_root(NULL, NULL, 2373 ztest_opts.zo_vdev_size, 0, 2374 ztest_random(4) == 0, ztest_opts.zo_raidz, 2375 zs->zs_mirrors, 1); 2376 2377 error = spa_vdev_add(spa, nvroot); 2378 nvlist_free(nvroot); 2379 2380 if (error == ENOSPC) 2381 ztest_record_enospc("spa_vdev_add"); 2382 else if (error != 0) 2383 fatal(0, "spa_vdev_add() = %d", error); 2384 } 2385 2386 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2387 } 2388 2389 /* 2390 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2391 */ 2392 /* ARGSUSED */ 2393 void 2394 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2395 { 2396 ztest_shared_t *zs = ztest_shared; 2397 spa_t *spa = ztest_spa; 2398 vdev_t *rvd = spa->spa_root_vdev; 2399 spa_aux_vdev_t *sav; 2400 char *aux; 2401 uint64_t guid = 0; 2402 int error; 2403 2404 if (ztest_random(2) == 0) { 2405 sav = &spa->spa_spares; 2406 aux = ZPOOL_CONFIG_SPARES; 2407 } else { 2408 sav = &spa->spa_l2cache; 2409 aux = ZPOOL_CONFIG_L2CACHE; 2410 } 2411 2412 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2413 2414 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2415 2416 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2417 /* 2418 * Pick a random device to remove. 2419 */ 2420 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2421 } else { 2422 /* 2423 * Find an unused device we can add. 2424 */ 2425 zs->zs_vdev_aux = 0; 2426 for (;;) { 2427 char path[MAXPATHLEN]; 2428 int c; 2429 (void) snprintf(path, sizeof (path), ztest_aux_template, 2430 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2431 zs->zs_vdev_aux); 2432 for (c = 0; c < sav->sav_count; c++) 2433 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2434 path) == 0) 2435 break; 2436 if (c == sav->sav_count && 2437 vdev_lookup_by_path(rvd, path) == NULL) 2438 break; 2439 zs->zs_vdev_aux++; 2440 } 2441 } 2442 2443 spa_config_exit(spa, SCL_VDEV, FTAG); 2444 2445 if (guid == 0) { 2446 /* 2447 * Add a new device. 2448 */ 2449 nvlist_t *nvroot = make_vdev_root(NULL, aux, 2450 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2451 error = spa_vdev_add(spa, nvroot); 2452 if (error != 0) 2453 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2454 nvlist_free(nvroot); 2455 } else { 2456 /* 2457 * Remove an existing device. Sometimes, dirty its 2458 * vdev state first to make sure we handle removal 2459 * of devices that have pending state changes. 2460 */ 2461 if (ztest_random(2) == 0) 2462 (void) vdev_online(spa, guid, 0, NULL); 2463 2464 error = spa_vdev_remove(spa, guid, B_FALSE); 2465 if (error != 0 && error != EBUSY) 2466 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2467 } 2468 2469 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2470 } 2471 2472 /* 2473 * split a pool if it has mirror tlvdevs 2474 */ 2475 /* ARGSUSED */ 2476 void 2477 ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2478 { 2479 ztest_shared_t *zs = ztest_shared; 2480 spa_t *spa = ztest_spa; 2481 vdev_t *rvd = spa->spa_root_vdev; 2482 nvlist_t *tree, **child, *config, *split, **schild; 2483 uint_t c, children, schildren = 0, lastlogid = 0; 2484 int error = 0; 2485 2486 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2487 2488 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2489 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2490 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2491 return; 2492 } 2493 2494 /* clean up the old pool, if any */ 2495 (void) spa_destroy("splitp"); 2496 2497 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2498 2499 /* generate a config from the existing config */ 2500 mutex_enter(&spa->spa_props_lock); 2501 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2502 &tree) == 0); 2503 mutex_exit(&spa->spa_props_lock); 2504 2505 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2506 &children) == 0); 2507 2508 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2509 for (c = 0; c < children; c++) { 2510 vdev_t *tvd = rvd->vdev_child[c]; 2511 nvlist_t **mchild; 2512 uint_t mchildren; 2513 2514 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2515 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2516 0) == 0); 2517 VERIFY(nvlist_add_string(schild[schildren], 2518 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2519 VERIFY(nvlist_add_uint64(schild[schildren], 2520 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2521 if (lastlogid == 0) 2522 lastlogid = schildren; 2523 ++schildren; 2524 continue; 2525 } 2526 lastlogid = 0; 2527 VERIFY(nvlist_lookup_nvlist_array(child[c], 2528 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2529 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2530 } 2531 2532 /* OK, create a config that can be used to split */ 2533 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2534 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2535 VDEV_TYPE_ROOT) == 0); 2536 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2537 lastlogid != 0 ? lastlogid : schildren) == 0); 2538 2539 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2540 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2541 2542 for (c = 0; c < schildren; c++) 2543 nvlist_free(schild[c]); 2544 free(schild); 2545 nvlist_free(split); 2546 2547 spa_config_exit(spa, SCL_VDEV, FTAG); 2548 2549 (void) rw_wrlock(&ztest_name_lock); 2550 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2551 (void) rw_unlock(&ztest_name_lock); 2552 2553 nvlist_free(config); 2554 2555 if (error == 0) { 2556 (void) printf("successful split - results:\n"); 2557 mutex_enter(&spa_namespace_lock); 2558 show_pool_stats(spa); 2559 show_pool_stats(spa_lookup("splitp")); 2560 mutex_exit(&spa_namespace_lock); 2561 ++zs->zs_splits; 2562 --zs->zs_mirrors; 2563 } 2564 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2565 2566 } 2567 2568 /* 2569 * Verify that we can attach and detach devices. 2570 */ 2571 /* ARGSUSED */ 2572 void 2573 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2574 { 2575 ztest_shared_t *zs = ztest_shared; 2576 spa_t *spa = ztest_spa; 2577 spa_aux_vdev_t *sav = &spa->spa_spares; 2578 vdev_t *rvd = spa->spa_root_vdev; 2579 vdev_t *oldvd, *newvd, *pvd; 2580 nvlist_t *root; 2581 uint64_t leaves; 2582 uint64_t leaf, top; 2583 uint64_t ashift = ztest_get_ashift(); 2584 uint64_t oldguid, pguid; 2585 size_t oldsize, newsize; 2586 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2587 int replacing; 2588 int oldvd_has_siblings = B_FALSE; 2589 int newvd_is_spare = B_FALSE; 2590 int oldvd_is_log; 2591 int error, expected_error; 2592 2593 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2594 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2595 2596 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2597 2598 /* 2599 * Decide whether to do an attach or a replace. 2600 */ 2601 replacing = ztest_random(2); 2602 2603 /* 2604 * Pick a random top-level vdev. 2605 */ 2606 top = ztest_random_vdev_top(spa, B_TRUE); 2607 2608 /* 2609 * Pick a random leaf within it. 2610 */ 2611 leaf = ztest_random(leaves); 2612 2613 /* 2614 * Locate this vdev. 2615 */ 2616 oldvd = rvd->vdev_child[top]; 2617 if (zs->zs_mirrors >= 1) { 2618 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2619 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2620 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2621 } 2622 if (ztest_opts.zo_raidz > 1) { 2623 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2624 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2625 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2626 } 2627 2628 /* 2629 * If we're already doing an attach or replace, oldvd may be a 2630 * mirror vdev -- in which case, pick a random child. 2631 */ 2632 while (oldvd->vdev_children != 0) { 2633 oldvd_has_siblings = B_TRUE; 2634 ASSERT(oldvd->vdev_children >= 2); 2635 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2636 } 2637 2638 oldguid = oldvd->vdev_guid; 2639 oldsize = vdev_get_min_asize(oldvd); 2640 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2641 (void) strcpy(oldpath, oldvd->vdev_path); 2642 pvd = oldvd->vdev_parent; 2643 pguid = pvd->vdev_guid; 2644 2645 /* 2646 * If oldvd has siblings, then half of the time, detach it. 2647 */ 2648 if (oldvd_has_siblings && ztest_random(2) == 0) { 2649 spa_config_exit(spa, SCL_VDEV, FTAG); 2650 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2651 if (error != 0 && error != ENODEV && error != EBUSY && 2652 error != ENOTSUP) 2653 fatal(0, "detach (%s) returned %d", oldpath, error); 2654 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2655 return; 2656 } 2657 2658 /* 2659 * For the new vdev, choose with equal probability between the two 2660 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2661 */ 2662 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2663 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2664 newvd_is_spare = B_TRUE; 2665 (void) strcpy(newpath, newvd->vdev_path); 2666 } else { 2667 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2668 ztest_opts.zo_dir, ztest_opts.zo_pool, 2669 top * leaves + leaf); 2670 if (ztest_random(2) == 0) 2671 newpath[strlen(newpath) - 1] = 'b'; 2672 newvd = vdev_lookup_by_path(rvd, newpath); 2673 } 2674 2675 if (newvd) { 2676 newsize = vdev_get_min_asize(newvd); 2677 } else { 2678 /* 2679 * Make newsize a little bigger or smaller than oldsize. 2680 * If it's smaller, the attach should fail. 2681 * If it's larger, and we're doing a replace, 2682 * we should get dynamic LUN growth when we're done. 2683 */ 2684 newsize = 10 * oldsize / (9 + ztest_random(3)); 2685 } 2686 2687 /* 2688 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2689 * unless it's a replace; in that case any non-replacing parent is OK. 2690 * 2691 * If newvd is already part of the pool, it should fail with EBUSY. 2692 * 2693 * If newvd is too small, it should fail with EOVERFLOW. 2694 */ 2695 if (pvd->vdev_ops != &vdev_mirror_ops && 2696 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2697 pvd->vdev_ops == &vdev_replacing_ops || 2698 pvd->vdev_ops == &vdev_spare_ops)) 2699 expected_error = ENOTSUP; 2700 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2701 expected_error = ENOTSUP; 2702 else if (newvd == oldvd) 2703 expected_error = replacing ? 0 : EBUSY; 2704 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 2705 expected_error = EBUSY; 2706 else if (newsize < oldsize) 2707 expected_error = EOVERFLOW; 2708 else if (ashift > oldvd->vdev_top->vdev_ashift) 2709 expected_error = EDOM; 2710 else 2711 expected_error = 0; 2712 2713 spa_config_exit(spa, SCL_VDEV, FTAG); 2714 2715 /* 2716 * Build the nvlist describing newpath. 2717 */ 2718 root = make_vdev_root(newpath, NULL, newvd == NULL ? newsize : 0, 2719 ashift, 0, 0, 0, 1); 2720 2721 error = spa_vdev_attach(spa, oldguid, root, replacing); 2722 2723 nvlist_free(root); 2724 2725 /* 2726 * If our parent was the replacing vdev, but the replace completed, 2727 * then instead of failing with ENOTSUP we may either succeed, 2728 * fail with ENODEV, or fail with EOVERFLOW. 2729 */ 2730 if (expected_error == ENOTSUP && 2731 (error == 0 || error == ENODEV || error == EOVERFLOW)) 2732 expected_error = error; 2733 2734 /* 2735 * If someone grew the LUN, the replacement may be too small. 2736 */ 2737 if (error == EOVERFLOW || error == EBUSY) 2738 expected_error = error; 2739 2740 /* XXX workaround 6690467 */ 2741 if (error != expected_error && expected_error != EBUSY) { 2742 fatal(0, "attach (%s %llu, %s %llu, %d) " 2743 "returned %d, expected %d", 2744 oldpath, (longlong_t)oldsize, newpath, 2745 (longlong_t)newsize, replacing, error, expected_error); 2746 } 2747 2748 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2749 } 2750 2751 /* 2752 * Callback function which expands the physical size of the vdev. 2753 */ 2754 vdev_t * 2755 grow_vdev(vdev_t *vd, void *arg) 2756 { 2757 spa_t *spa = vd->vdev_spa; 2758 size_t *newsize = arg; 2759 size_t fsize; 2760 int fd; 2761 2762 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2763 ASSERT(vd->vdev_ops->vdev_op_leaf); 2764 2765 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 2766 return (vd); 2767 2768 fsize = lseek(fd, 0, SEEK_END); 2769 (void) ftruncate(fd, *newsize); 2770 2771 if (ztest_opts.zo_verbose >= 6) { 2772 (void) printf("%s grew from %lu to %lu bytes\n", 2773 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 2774 } 2775 (void) close(fd); 2776 return (NULL); 2777 } 2778 2779 /* 2780 * Callback function which expands a given vdev by calling vdev_online(). 2781 */ 2782 /* ARGSUSED */ 2783 vdev_t * 2784 online_vdev(vdev_t *vd, void *arg) 2785 { 2786 spa_t *spa = vd->vdev_spa; 2787 vdev_t *tvd = vd->vdev_top; 2788 uint64_t guid = vd->vdev_guid; 2789 uint64_t generation = spa->spa_config_generation + 1; 2790 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 2791 int error; 2792 2793 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2794 ASSERT(vd->vdev_ops->vdev_op_leaf); 2795 2796 /* Calling vdev_online will initialize the new metaslabs */ 2797 spa_config_exit(spa, SCL_STATE, spa); 2798 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 2799 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2800 2801 /* 2802 * If vdev_online returned an error or the underlying vdev_open 2803 * failed then we abort the expand. The only way to know that 2804 * vdev_open fails is by checking the returned newstate. 2805 */ 2806 if (error || newstate != VDEV_STATE_HEALTHY) { 2807 if (ztest_opts.zo_verbose >= 5) { 2808 (void) printf("Unable to expand vdev, state %llu, " 2809 "error %d\n", (u_longlong_t)newstate, error); 2810 } 2811 return (vd); 2812 } 2813 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 2814 2815 /* 2816 * Since we dropped the lock we need to ensure that we're 2817 * still talking to the original vdev. It's possible this 2818 * vdev may have been detached/replaced while we were 2819 * trying to online it. 2820 */ 2821 if (generation != spa->spa_config_generation) { 2822 if (ztest_opts.zo_verbose >= 5) { 2823 (void) printf("vdev configuration has changed, " 2824 "guid %llu, state %llu, expected gen %llu, " 2825 "got gen %llu\n", 2826 (u_longlong_t)guid, 2827 (u_longlong_t)tvd->vdev_state, 2828 (u_longlong_t)generation, 2829 (u_longlong_t)spa->spa_config_generation); 2830 } 2831 return (vd); 2832 } 2833 return (NULL); 2834 } 2835 2836 /* 2837 * Traverse the vdev tree calling the supplied function. 2838 * We continue to walk the tree until we either have walked all 2839 * children or we receive a non-NULL return from the callback. 2840 * If a NULL callback is passed, then we just return back the first 2841 * leaf vdev we encounter. 2842 */ 2843 vdev_t * 2844 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 2845 { 2846 if (vd->vdev_ops->vdev_op_leaf) { 2847 if (func == NULL) 2848 return (vd); 2849 else 2850 return (func(vd, arg)); 2851 } 2852 2853 for (uint_t c = 0; c < vd->vdev_children; c++) { 2854 vdev_t *cvd = vd->vdev_child[c]; 2855 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 2856 return (cvd); 2857 } 2858 return (NULL); 2859 } 2860 2861 /* 2862 * Verify that dynamic LUN growth works as expected. 2863 */ 2864 /* ARGSUSED */ 2865 void 2866 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 2867 { 2868 spa_t *spa = ztest_spa; 2869 vdev_t *vd, *tvd; 2870 metaslab_class_t *mc; 2871 metaslab_group_t *mg; 2872 size_t psize, newsize; 2873 uint64_t top; 2874 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 2875 2876 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2877 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2878 2879 top = ztest_random_vdev_top(spa, B_TRUE); 2880 2881 tvd = spa->spa_root_vdev->vdev_child[top]; 2882 mg = tvd->vdev_mg; 2883 mc = mg->mg_class; 2884 old_ms_count = tvd->vdev_ms_count; 2885 old_class_space = metaslab_class_get_space(mc); 2886 2887 /* 2888 * Determine the size of the first leaf vdev associated with 2889 * our top-level device. 2890 */ 2891 vd = vdev_walk_tree(tvd, NULL, NULL); 2892 ASSERT3P(vd, !=, NULL); 2893 ASSERT(vd->vdev_ops->vdev_op_leaf); 2894 2895 psize = vd->vdev_psize; 2896 2897 /* 2898 * We only try to expand the vdev if it's healthy, less than 4x its 2899 * original size, and it has a valid psize. 2900 */ 2901 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 2902 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 2903 spa_config_exit(spa, SCL_STATE, spa); 2904 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2905 return; 2906 } 2907 ASSERT(psize > 0); 2908 newsize = psize + psize / 8; 2909 ASSERT3U(newsize, >, psize); 2910 2911 if (ztest_opts.zo_verbose >= 6) { 2912 (void) printf("Expanding LUN %s from %lu to %lu\n", 2913 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 2914 } 2915 2916 /* 2917 * Growing the vdev is a two step process: 2918 * 1). expand the physical size (i.e. relabel) 2919 * 2). online the vdev to create the new metaslabs 2920 */ 2921 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 2922 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 2923 tvd->vdev_state != VDEV_STATE_HEALTHY) { 2924 if (ztest_opts.zo_verbose >= 5) { 2925 (void) printf("Could not expand LUN because " 2926 "the vdev configuration changed.\n"); 2927 } 2928 spa_config_exit(spa, SCL_STATE, spa); 2929 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2930 return; 2931 } 2932 2933 spa_config_exit(spa, SCL_STATE, spa); 2934 2935 /* 2936 * Expanding the LUN will update the config asynchronously, 2937 * thus we must wait for the async thread to complete any 2938 * pending tasks before proceeding. 2939 */ 2940 for (;;) { 2941 boolean_t done; 2942 mutex_enter(&spa->spa_async_lock); 2943 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 2944 mutex_exit(&spa->spa_async_lock); 2945 if (done) 2946 break; 2947 txg_wait_synced(spa_get_dsl(spa), 0); 2948 (void) poll(NULL, 0, 100); 2949 } 2950 2951 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2952 2953 tvd = spa->spa_root_vdev->vdev_child[top]; 2954 new_ms_count = tvd->vdev_ms_count; 2955 new_class_space = metaslab_class_get_space(mc); 2956 2957 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 2958 if (ztest_opts.zo_verbose >= 5) { 2959 (void) printf("Could not verify LUN expansion due to " 2960 "intervening vdev offline or remove.\n"); 2961 } 2962 spa_config_exit(spa, SCL_STATE, spa); 2963 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2964 return; 2965 } 2966 2967 /* 2968 * Make sure we were able to grow the vdev. 2969 */ 2970 if (new_ms_count <= old_ms_count) 2971 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n", 2972 old_ms_count, new_ms_count); 2973 2974 /* 2975 * Make sure we were able to grow the pool. 2976 */ 2977 if (new_class_space <= old_class_space) 2978 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n", 2979 old_class_space, new_class_space); 2980 2981 if (ztest_opts.zo_verbose >= 5) { 2982 char oldnumbuf[6], newnumbuf[6]; 2983 2984 nicenum(old_class_space, oldnumbuf); 2985 nicenum(new_class_space, newnumbuf); 2986 (void) printf("%s grew from %s to %s\n", 2987 spa->spa_name, oldnumbuf, newnumbuf); 2988 } 2989 2990 spa_config_exit(spa, SCL_STATE, spa); 2991 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2992 } 2993 2994 /* 2995 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 2996 */ 2997 /* ARGSUSED */ 2998 static void 2999 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3000 { 3001 /* 3002 * Create the objects common to all ztest datasets. 3003 */ 3004 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3005 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3006 } 3007 3008 static int 3009 ztest_dataset_create(char *dsname) 3010 { 3011 uint64_t zilset = ztest_random(100); 3012 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3013 ztest_objset_create_cb, NULL); 3014 3015 if (err || zilset < 80) 3016 return (err); 3017 3018 if (ztest_opts.zo_verbose >= 6) 3019 (void) printf("Setting dataset %s to sync always\n", dsname); 3020 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3021 ZFS_SYNC_ALWAYS, B_FALSE)); 3022 } 3023 3024 /* ARGSUSED */ 3025 static int 3026 ztest_objset_destroy_cb(const char *name, void *arg) 3027 { 3028 objset_t *os; 3029 dmu_object_info_t doi; 3030 int error; 3031 3032 /* 3033 * Verify that the dataset contains a directory object. 3034 */ 3035 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os)); 3036 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3037 if (error != ENOENT) { 3038 /* We could have crashed in the middle of destroying it */ 3039 ASSERT3U(error, ==, 0); 3040 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3041 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3042 } 3043 dmu_objset_rele(os, FTAG); 3044 3045 /* 3046 * Destroy the dataset. 3047 */ 3048 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE)); 3049 return (0); 3050 } 3051 3052 static boolean_t 3053 ztest_snapshot_create(char *osname, uint64_t id) 3054 { 3055 char snapname[MAXNAMELEN]; 3056 int error; 3057 3058 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 3059 (u_longlong_t)id); 3060 3061 error = dmu_objset_snapshot_one(osname, strchr(snapname, '@') + 1); 3062 if (error == ENOSPC) { 3063 ztest_record_enospc(FTAG); 3064 return (B_FALSE); 3065 } 3066 if (error != 0 && error != EEXIST) 3067 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error); 3068 return (B_TRUE); 3069 } 3070 3071 static boolean_t 3072 ztest_snapshot_destroy(char *osname, uint64_t id) 3073 { 3074 char snapname[MAXNAMELEN]; 3075 int error; 3076 3077 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 3078 (u_longlong_t)id); 3079 3080 error = dmu_objset_destroy(snapname, B_FALSE); 3081 if (error != 0 && error != ENOENT) 3082 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3083 return (B_TRUE); 3084 } 3085 3086 /* ARGSUSED */ 3087 void 3088 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3089 { 3090 ztest_ds_t zdtmp; 3091 int iters; 3092 int error; 3093 objset_t *os, *os2; 3094 char name[MAXNAMELEN]; 3095 zilog_t *zilog; 3096 3097 (void) rw_rdlock(&ztest_name_lock); 3098 3099 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", 3100 ztest_opts.zo_pool, (u_longlong_t)id); 3101 3102 /* 3103 * If this dataset exists from a previous run, process its replay log 3104 * half of the time. If we don't replay it, then dmu_objset_destroy() 3105 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3106 */ 3107 if (ztest_random(2) == 0 && 3108 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3109 ztest_zd_init(&zdtmp, NULL, os); 3110 zil_replay(os, &zdtmp, ztest_replay_vector); 3111 ztest_zd_fini(&zdtmp); 3112 dmu_objset_disown(os, FTAG); 3113 } 3114 3115 /* 3116 * There may be an old instance of the dataset we're about to 3117 * create lying around from a previous run. If so, destroy it 3118 * and all of its snapshots. 3119 */ 3120 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3121 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3122 3123 /* 3124 * Verify that the destroyed dataset is no longer in the namespace. 3125 */ 3126 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os)); 3127 3128 /* 3129 * Verify that we can create a new dataset. 3130 */ 3131 error = ztest_dataset_create(name); 3132 if (error) { 3133 if (error == ENOSPC) { 3134 ztest_record_enospc(FTAG); 3135 (void) rw_unlock(&ztest_name_lock); 3136 return; 3137 } 3138 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3139 } 3140 3141 VERIFY3U(0, ==, 3142 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3143 3144 ztest_zd_init(&zdtmp, NULL, os); 3145 3146 /* 3147 * Open the intent log for it. 3148 */ 3149 zilog = zil_open(os, ztest_get_data); 3150 3151 /* 3152 * Put some objects in there, do a little I/O to them, 3153 * and randomly take a couple of snapshots along the way. 3154 */ 3155 iters = ztest_random(5); 3156 for (int i = 0; i < iters; i++) { 3157 ztest_dmu_object_alloc_free(&zdtmp, id); 3158 if (ztest_random(iters) == 0) 3159 (void) ztest_snapshot_create(name, i); 3160 } 3161 3162 /* 3163 * Verify that we cannot create an existing dataset. 3164 */ 3165 VERIFY3U(EEXIST, ==, 3166 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3167 3168 /* 3169 * Verify that we can hold an objset that is also owned. 3170 */ 3171 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3172 dmu_objset_rele(os2, FTAG); 3173 3174 /* 3175 * Verify that we cannot own an objset that is already owned. 3176 */ 3177 VERIFY3U(EBUSY, ==, 3178 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3179 3180 zil_close(zilog); 3181 dmu_objset_disown(os, FTAG); 3182 ztest_zd_fini(&zdtmp); 3183 3184 (void) rw_unlock(&ztest_name_lock); 3185 } 3186 3187 /* 3188 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3189 */ 3190 void 3191 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3192 { 3193 (void) rw_rdlock(&ztest_name_lock); 3194 (void) ztest_snapshot_destroy(zd->zd_name, id); 3195 (void) ztest_snapshot_create(zd->zd_name, id); 3196 (void) rw_unlock(&ztest_name_lock); 3197 } 3198 3199 /* 3200 * Cleanup non-standard snapshots and clones. 3201 */ 3202 void 3203 ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3204 { 3205 char snap1name[MAXNAMELEN]; 3206 char clone1name[MAXNAMELEN]; 3207 char snap2name[MAXNAMELEN]; 3208 char clone2name[MAXNAMELEN]; 3209 char snap3name[MAXNAMELEN]; 3210 int error; 3211 3212 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3213 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3214 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3215 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3216 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3217 3218 error = dmu_objset_destroy(clone2name, B_FALSE); 3219 if (error && error != ENOENT) 3220 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error); 3221 error = dmu_objset_destroy(snap3name, B_FALSE); 3222 if (error && error != ENOENT) 3223 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error); 3224 error = dmu_objset_destroy(snap2name, B_FALSE); 3225 if (error && error != ENOENT) 3226 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error); 3227 error = dmu_objset_destroy(clone1name, B_FALSE); 3228 if (error && error != ENOENT) 3229 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error); 3230 error = dmu_objset_destroy(snap1name, B_FALSE); 3231 if (error && error != ENOENT) 3232 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error); 3233 } 3234 3235 /* 3236 * Verify dsl_dataset_promote handles EBUSY 3237 */ 3238 void 3239 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3240 { 3241 objset_t *clone; 3242 dsl_dataset_t *ds; 3243 char snap1name[MAXNAMELEN]; 3244 char clone1name[MAXNAMELEN]; 3245 char snap2name[MAXNAMELEN]; 3246 char clone2name[MAXNAMELEN]; 3247 char snap3name[MAXNAMELEN]; 3248 char *osname = zd->zd_name; 3249 int error; 3250 3251 (void) rw_rdlock(&ztest_name_lock); 3252 3253 ztest_dsl_dataset_cleanup(osname, id); 3254 3255 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3256 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3257 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3258 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3259 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3260 3261 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 3262 if (error && error != EEXIST) { 3263 if (error == ENOSPC) { 3264 ztest_record_enospc(FTAG); 3265 goto out; 3266 } 3267 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3268 } 3269 3270 error = dmu_objset_hold(snap1name, FTAG, &clone); 3271 if (error) 3272 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error); 3273 3274 error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0); 3275 dmu_objset_rele(clone, FTAG); 3276 if (error) { 3277 if (error == ENOSPC) { 3278 ztest_record_enospc(FTAG); 3279 goto out; 3280 } 3281 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3282 } 3283 3284 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 3285 if (error && error != EEXIST) { 3286 if (error == ENOSPC) { 3287 ztest_record_enospc(FTAG); 3288 goto out; 3289 } 3290 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3291 } 3292 3293 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 3294 if (error && error != EEXIST) { 3295 if (error == ENOSPC) { 3296 ztest_record_enospc(FTAG); 3297 goto out; 3298 } 3299 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3300 } 3301 3302 error = dmu_objset_hold(snap3name, FTAG, &clone); 3303 if (error) 3304 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3305 3306 error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0); 3307 dmu_objset_rele(clone, FTAG); 3308 if (error) { 3309 if (error == ENOSPC) { 3310 ztest_record_enospc(FTAG); 3311 goto out; 3312 } 3313 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3314 } 3315 3316 error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds); 3317 if (error) 3318 fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error); 3319 error = dsl_dataset_promote(clone2name, NULL); 3320 if (error != EBUSY) 3321 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3322 error); 3323 dsl_dataset_disown(ds, FTAG); 3324 3325 out: 3326 ztest_dsl_dataset_cleanup(osname, id); 3327 3328 (void) rw_unlock(&ztest_name_lock); 3329 } 3330 3331 /* 3332 * Verify that dmu_object_{alloc,free} work as expected. 3333 */ 3334 void 3335 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3336 { 3337 ztest_od_t od[4]; 3338 int batchsize = sizeof (od) / sizeof (od[0]); 3339 3340 for (int b = 0; b < batchsize; b++) 3341 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3342 3343 /* 3344 * Destroy the previous batch of objects, create a new batch, 3345 * and do some I/O on the new objects. 3346 */ 3347 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3348 return; 3349 3350 while (ztest_random(4 * batchsize) != 0) 3351 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3352 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3353 } 3354 3355 /* 3356 * Verify that dmu_{read,write} work as expected. 3357 */ 3358 void 3359 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3360 { 3361 objset_t *os = zd->zd_os; 3362 ztest_od_t od[2]; 3363 dmu_tx_t *tx; 3364 int i, freeit, error; 3365 uint64_t n, s, txg; 3366 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3367 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3368 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3369 uint64_t regions = 997; 3370 uint64_t stride = 123456789ULL; 3371 uint64_t width = 40; 3372 int free_percent = 5; 3373 3374 /* 3375 * This test uses two objects, packobj and bigobj, that are always 3376 * updated together (i.e. in the same tx) so that their contents are 3377 * in sync and can be compared. Their contents relate to each other 3378 * in a simple way: packobj is a dense array of 'bufwad' structures, 3379 * while bigobj is a sparse array of the same bufwads. Specifically, 3380 * for any index n, there are three bufwads that should be identical: 3381 * 3382 * packobj, at offset n * sizeof (bufwad_t) 3383 * bigobj, at the head of the nth chunk 3384 * bigobj, at the tail of the nth chunk 3385 * 3386 * The chunk size is arbitrary. It doesn't have to be a power of two, 3387 * and it doesn't have any relation to the object blocksize. 3388 * The only requirement is that it can hold at least two bufwads. 3389 * 3390 * Normally, we write the bufwad to each of these locations. 3391 * However, free_percent of the time we instead write zeroes to 3392 * packobj and perform a dmu_free_range() on bigobj. By comparing 3393 * bigobj to packobj, we can verify that the DMU is correctly 3394 * tracking which parts of an object are allocated and free, 3395 * and that the contents of the allocated blocks are correct. 3396 */ 3397 3398 /* 3399 * Read the directory info. If it's the first time, set things up. 3400 */ 3401 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3402 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3403 3404 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3405 return; 3406 3407 bigobj = od[0].od_object; 3408 packobj = od[1].od_object; 3409 chunksize = od[0].od_gen; 3410 ASSERT(chunksize == od[1].od_gen); 3411 3412 /* 3413 * Prefetch a random chunk of the big object. 3414 * Our aim here is to get some async reads in flight 3415 * for blocks that we may free below; the DMU should 3416 * handle this race correctly. 3417 */ 3418 n = ztest_random(regions) * stride + ztest_random(width); 3419 s = 1 + ztest_random(2 * width - 1); 3420 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize); 3421 3422 /* 3423 * Pick a random index and compute the offsets into packobj and bigobj. 3424 */ 3425 n = ztest_random(regions) * stride + ztest_random(width); 3426 s = 1 + ztest_random(width - 1); 3427 3428 packoff = n * sizeof (bufwad_t); 3429 packsize = s * sizeof (bufwad_t); 3430 3431 bigoff = n * chunksize; 3432 bigsize = s * chunksize; 3433 3434 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3435 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3436 3437 /* 3438 * free_percent of the time, free a range of bigobj rather than 3439 * overwriting it. 3440 */ 3441 freeit = (ztest_random(100) < free_percent); 3442 3443 /* 3444 * Read the current contents of our objects. 3445 */ 3446 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3447 DMU_READ_PREFETCH); 3448 ASSERT3U(error, ==, 0); 3449 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3450 DMU_READ_PREFETCH); 3451 ASSERT3U(error, ==, 0); 3452 3453 /* 3454 * Get a tx for the mods to both packobj and bigobj. 3455 */ 3456 tx = dmu_tx_create(os); 3457 3458 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3459 3460 if (freeit) 3461 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3462 else 3463 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3464 3465 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3466 if (txg == 0) { 3467 umem_free(packbuf, packsize); 3468 umem_free(bigbuf, bigsize); 3469 return; 3470 } 3471 3472 dmu_object_set_checksum(os, bigobj, 3473 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx); 3474 3475 dmu_object_set_compress(os, bigobj, 3476 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx); 3477 3478 /* 3479 * For each index from n to n + s, verify that the existing bufwad 3480 * in packobj matches the bufwads at the head and tail of the 3481 * corresponding chunk in bigobj. Then update all three bufwads 3482 * with the new values we want to write out. 3483 */ 3484 for (i = 0; i < s; i++) { 3485 /* LINTED */ 3486 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3487 /* LINTED */ 3488 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3489 /* LINTED */ 3490 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3491 3492 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3493 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3494 3495 if (pack->bw_txg > txg) 3496 fatal(0, "future leak: got %llx, open txg is %llx", 3497 pack->bw_txg, txg); 3498 3499 if (pack->bw_data != 0 && pack->bw_index != n + i) 3500 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3501 pack->bw_index, n, i); 3502 3503 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3504 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3505 3506 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3507 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3508 3509 if (freeit) { 3510 bzero(pack, sizeof (bufwad_t)); 3511 } else { 3512 pack->bw_index = n + i; 3513 pack->bw_txg = txg; 3514 pack->bw_data = 1 + ztest_random(-2ULL); 3515 } 3516 *bigH = *pack; 3517 *bigT = *pack; 3518 } 3519 3520 /* 3521 * We've verified all the old bufwads, and made new ones. 3522 * Now write them out. 3523 */ 3524 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3525 3526 if (freeit) { 3527 if (ztest_opts.zo_verbose >= 7) { 3528 (void) printf("freeing offset %llx size %llx" 3529 " txg %llx\n", 3530 (u_longlong_t)bigoff, 3531 (u_longlong_t)bigsize, 3532 (u_longlong_t)txg); 3533 } 3534 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3535 } else { 3536 if (ztest_opts.zo_verbose >= 7) { 3537 (void) printf("writing offset %llx size %llx" 3538 " txg %llx\n", 3539 (u_longlong_t)bigoff, 3540 (u_longlong_t)bigsize, 3541 (u_longlong_t)txg); 3542 } 3543 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3544 } 3545 3546 dmu_tx_commit(tx); 3547 3548 /* 3549 * Sanity check the stuff we just wrote. 3550 */ 3551 { 3552 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3553 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3554 3555 VERIFY(0 == dmu_read(os, packobj, packoff, 3556 packsize, packcheck, DMU_READ_PREFETCH)); 3557 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3558 bigsize, bigcheck, DMU_READ_PREFETCH)); 3559 3560 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3561 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3562 3563 umem_free(packcheck, packsize); 3564 umem_free(bigcheck, bigsize); 3565 } 3566 3567 umem_free(packbuf, packsize); 3568 umem_free(bigbuf, bigsize); 3569 } 3570 3571 void 3572 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3573 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3574 { 3575 uint64_t i; 3576 bufwad_t *pack; 3577 bufwad_t *bigH; 3578 bufwad_t *bigT; 3579 3580 /* 3581 * For each index from n to n + s, verify that the existing bufwad 3582 * in packobj matches the bufwads at the head and tail of the 3583 * corresponding chunk in bigobj. Then update all three bufwads 3584 * with the new values we want to write out. 3585 */ 3586 for (i = 0; i < s; i++) { 3587 /* LINTED */ 3588 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3589 /* LINTED */ 3590 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3591 /* LINTED */ 3592 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3593 3594 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3595 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3596 3597 if (pack->bw_txg > txg) 3598 fatal(0, "future leak: got %llx, open txg is %llx", 3599 pack->bw_txg, txg); 3600 3601 if (pack->bw_data != 0 && pack->bw_index != n + i) 3602 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3603 pack->bw_index, n, i); 3604 3605 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3606 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3607 3608 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3609 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3610 3611 pack->bw_index = n + i; 3612 pack->bw_txg = txg; 3613 pack->bw_data = 1 + ztest_random(-2ULL); 3614 3615 *bigH = *pack; 3616 *bigT = *pack; 3617 } 3618 } 3619 3620 void 3621 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3622 { 3623 objset_t *os = zd->zd_os; 3624 ztest_od_t od[2]; 3625 dmu_tx_t *tx; 3626 uint64_t i; 3627 int error; 3628 uint64_t n, s, txg; 3629 bufwad_t *packbuf, *bigbuf; 3630 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3631 uint64_t blocksize = ztest_random_blocksize(); 3632 uint64_t chunksize = blocksize; 3633 uint64_t regions = 997; 3634 uint64_t stride = 123456789ULL; 3635 uint64_t width = 9; 3636 dmu_buf_t *bonus_db; 3637 arc_buf_t **bigbuf_arcbufs; 3638 dmu_object_info_t doi; 3639 3640 /* 3641 * This test uses two objects, packobj and bigobj, that are always 3642 * updated together (i.e. in the same tx) so that their contents are 3643 * in sync and can be compared. Their contents relate to each other 3644 * in a simple way: packobj is a dense array of 'bufwad' structures, 3645 * while bigobj is a sparse array of the same bufwads. Specifically, 3646 * for any index n, there are three bufwads that should be identical: 3647 * 3648 * packobj, at offset n * sizeof (bufwad_t) 3649 * bigobj, at the head of the nth chunk 3650 * bigobj, at the tail of the nth chunk 3651 * 3652 * The chunk size is set equal to bigobj block size so that 3653 * dmu_assign_arcbuf() can be tested for object updates. 3654 */ 3655 3656 /* 3657 * Read the directory info. If it's the first time, set things up. 3658 */ 3659 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3660 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3661 3662 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3663 return; 3664 3665 bigobj = od[0].od_object; 3666 packobj = od[1].od_object; 3667 blocksize = od[0].od_blocksize; 3668 chunksize = blocksize; 3669 ASSERT(chunksize == od[1].od_gen); 3670 3671 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 3672 VERIFY(ISP2(doi.doi_data_block_size)); 3673 VERIFY(chunksize == doi.doi_data_block_size); 3674 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 3675 3676 /* 3677 * Pick a random index and compute the offsets into packobj and bigobj. 3678 */ 3679 n = ztest_random(regions) * stride + ztest_random(width); 3680 s = 1 + ztest_random(width - 1); 3681 3682 packoff = n * sizeof (bufwad_t); 3683 packsize = s * sizeof (bufwad_t); 3684 3685 bigoff = n * chunksize; 3686 bigsize = s * chunksize; 3687 3688 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 3689 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 3690 3691 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 3692 3693 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 3694 3695 /* 3696 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 3697 * Iteration 1 test zcopy to already referenced dbufs. 3698 * Iteration 2 test zcopy to dirty dbuf in the same txg. 3699 * Iteration 3 test zcopy to dbuf dirty in previous txg. 3700 * Iteration 4 test zcopy when dbuf is no longer dirty. 3701 * Iteration 5 test zcopy when it can't be done. 3702 * Iteration 6 one more zcopy write. 3703 */ 3704 for (i = 0; i < 7; i++) { 3705 uint64_t j; 3706 uint64_t off; 3707 3708 /* 3709 * In iteration 5 (i == 5) use arcbufs 3710 * that don't match bigobj blksz to test 3711 * dmu_assign_arcbuf() when it can't directly 3712 * assign an arcbuf to a dbuf. 3713 */ 3714 for (j = 0; j < s; j++) { 3715 if (i != 5) { 3716 bigbuf_arcbufs[j] = 3717 dmu_request_arcbuf(bonus_db, chunksize); 3718 } else { 3719 bigbuf_arcbufs[2 * j] = 3720 dmu_request_arcbuf(bonus_db, chunksize / 2); 3721 bigbuf_arcbufs[2 * j + 1] = 3722 dmu_request_arcbuf(bonus_db, chunksize / 2); 3723 } 3724 } 3725 3726 /* 3727 * Get a tx for the mods to both packobj and bigobj. 3728 */ 3729 tx = dmu_tx_create(os); 3730 3731 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3732 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3733 3734 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3735 if (txg == 0) { 3736 umem_free(packbuf, packsize); 3737 umem_free(bigbuf, bigsize); 3738 for (j = 0; j < s; j++) { 3739 if (i != 5) { 3740 dmu_return_arcbuf(bigbuf_arcbufs[j]); 3741 } else { 3742 dmu_return_arcbuf( 3743 bigbuf_arcbufs[2 * j]); 3744 dmu_return_arcbuf( 3745 bigbuf_arcbufs[2 * j + 1]); 3746 } 3747 } 3748 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3749 dmu_buf_rele(bonus_db, FTAG); 3750 return; 3751 } 3752 3753 /* 3754 * 50% of the time don't read objects in the 1st iteration to 3755 * test dmu_assign_arcbuf() for the case when there're no 3756 * existing dbufs for the specified offsets. 3757 */ 3758 if (i != 0 || ztest_random(2) != 0) { 3759 error = dmu_read(os, packobj, packoff, 3760 packsize, packbuf, DMU_READ_PREFETCH); 3761 ASSERT3U(error, ==, 0); 3762 error = dmu_read(os, bigobj, bigoff, bigsize, 3763 bigbuf, DMU_READ_PREFETCH); 3764 ASSERT3U(error, ==, 0); 3765 } 3766 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 3767 n, chunksize, txg); 3768 3769 /* 3770 * We've verified all the old bufwads, and made new ones. 3771 * Now write them out. 3772 */ 3773 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3774 if (ztest_opts.zo_verbose >= 7) { 3775 (void) printf("writing offset %llx size %llx" 3776 " txg %llx\n", 3777 (u_longlong_t)bigoff, 3778 (u_longlong_t)bigsize, 3779 (u_longlong_t)txg); 3780 } 3781 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 3782 dmu_buf_t *dbt; 3783 if (i != 5) { 3784 bcopy((caddr_t)bigbuf + (off - bigoff), 3785 bigbuf_arcbufs[j]->b_data, chunksize); 3786 } else { 3787 bcopy((caddr_t)bigbuf + (off - bigoff), 3788 bigbuf_arcbufs[2 * j]->b_data, 3789 chunksize / 2); 3790 bcopy((caddr_t)bigbuf + (off - bigoff) + 3791 chunksize / 2, 3792 bigbuf_arcbufs[2 * j + 1]->b_data, 3793 chunksize / 2); 3794 } 3795 3796 if (i == 1) { 3797 VERIFY(dmu_buf_hold(os, bigobj, off, 3798 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 3799 } 3800 if (i != 5) { 3801 dmu_assign_arcbuf(bonus_db, off, 3802 bigbuf_arcbufs[j], tx); 3803 } else { 3804 dmu_assign_arcbuf(bonus_db, off, 3805 bigbuf_arcbufs[2 * j], tx); 3806 dmu_assign_arcbuf(bonus_db, 3807 off + chunksize / 2, 3808 bigbuf_arcbufs[2 * j + 1], tx); 3809 } 3810 if (i == 1) { 3811 dmu_buf_rele(dbt, FTAG); 3812 } 3813 } 3814 dmu_tx_commit(tx); 3815 3816 /* 3817 * Sanity check the stuff we just wrote. 3818 */ 3819 { 3820 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3821 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3822 3823 VERIFY(0 == dmu_read(os, packobj, packoff, 3824 packsize, packcheck, DMU_READ_PREFETCH)); 3825 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3826 bigsize, bigcheck, DMU_READ_PREFETCH)); 3827 3828 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3829 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3830 3831 umem_free(packcheck, packsize); 3832 umem_free(bigcheck, bigsize); 3833 } 3834 if (i == 2) { 3835 txg_wait_open(dmu_objset_pool(os), 0); 3836 } else if (i == 3) { 3837 txg_wait_synced(dmu_objset_pool(os), 0); 3838 } 3839 } 3840 3841 dmu_buf_rele(bonus_db, FTAG); 3842 umem_free(packbuf, packsize); 3843 umem_free(bigbuf, bigsize); 3844 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3845 } 3846 3847 /* ARGSUSED */ 3848 void 3849 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 3850 { 3851 ztest_od_t od[1]; 3852 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 3853 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3854 3855 /* 3856 * Have multiple threads write to large offsets in an object 3857 * to verify that parallel writes to an object -- even to the 3858 * same blocks within the object -- doesn't cause any trouble. 3859 */ 3860 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 3861 3862 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3863 return; 3864 3865 while (ztest_random(10) != 0) 3866 ztest_io(zd, od[0].od_object, offset); 3867 } 3868 3869 void 3870 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 3871 { 3872 ztest_od_t od[1]; 3873 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 3874 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3875 uint64_t count = ztest_random(20) + 1; 3876 uint64_t blocksize = ztest_random_blocksize(); 3877 void *data; 3878 3879 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3880 3881 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 3882 return; 3883 3884 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 3885 return; 3886 3887 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 3888 3889 data = umem_zalloc(blocksize, UMEM_NOFAIL); 3890 3891 while (ztest_random(count) != 0) { 3892 uint64_t randoff = offset + (ztest_random(count) * blocksize); 3893 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 3894 data) != 0) 3895 break; 3896 while (ztest_random(4) != 0) 3897 ztest_io(zd, od[0].od_object, randoff); 3898 } 3899 3900 umem_free(data, blocksize); 3901 } 3902 3903 /* 3904 * Verify that zap_{create,destroy,add,remove,update} work as expected. 3905 */ 3906 #define ZTEST_ZAP_MIN_INTS 1 3907 #define ZTEST_ZAP_MAX_INTS 4 3908 #define ZTEST_ZAP_MAX_PROPS 1000 3909 3910 void 3911 ztest_zap(ztest_ds_t *zd, uint64_t id) 3912 { 3913 objset_t *os = zd->zd_os; 3914 ztest_od_t od[1]; 3915 uint64_t object; 3916 uint64_t txg, last_txg; 3917 uint64_t value[ZTEST_ZAP_MAX_INTS]; 3918 uint64_t zl_ints, zl_intsize, prop; 3919 int i, ints; 3920 dmu_tx_t *tx; 3921 char propname[100], txgname[100]; 3922 int error; 3923 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 3924 3925 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 3926 3927 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 3928 return; 3929 3930 object = od[0].od_object; 3931 3932 /* 3933 * Generate a known hash collision, and verify that 3934 * we can lookup and remove both entries. 3935 */ 3936 tx = dmu_tx_create(os); 3937 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 3938 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3939 if (txg == 0) 3940 return; 3941 for (i = 0; i < 2; i++) { 3942 value[i] = i; 3943 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 3944 1, &value[i], tx)); 3945 } 3946 for (i = 0; i < 2; i++) { 3947 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 3948 sizeof (uint64_t), 1, &value[i], tx)); 3949 VERIFY3U(0, ==, 3950 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 3951 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 3952 ASSERT3U(zl_ints, ==, 1); 3953 } 3954 for (i = 0; i < 2; i++) { 3955 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 3956 } 3957 dmu_tx_commit(tx); 3958 3959 /* 3960 * Generate a buch of random entries. 3961 */ 3962 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 3963 3964 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 3965 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 3966 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 3967 bzero(value, sizeof (value)); 3968 last_txg = 0; 3969 3970 /* 3971 * If these zap entries already exist, validate their contents. 3972 */ 3973 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 3974 if (error == 0) { 3975 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 3976 ASSERT3U(zl_ints, ==, 1); 3977 3978 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 3979 zl_ints, &last_txg) == 0); 3980 3981 VERIFY(zap_length(os, object, propname, &zl_intsize, 3982 &zl_ints) == 0); 3983 3984 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 3985 ASSERT3U(zl_ints, ==, ints); 3986 3987 VERIFY(zap_lookup(os, object, propname, zl_intsize, 3988 zl_ints, value) == 0); 3989 3990 for (i = 0; i < ints; i++) { 3991 ASSERT3U(value[i], ==, last_txg + object + i); 3992 } 3993 } else { 3994 ASSERT3U(error, ==, ENOENT); 3995 } 3996 3997 /* 3998 * Atomically update two entries in our zap object. 3999 * The first is named txg_%llu, and contains the txg 4000 * in which the property was last updated. The second 4001 * is named prop_%llu, and the nth element of its value 4002 * should be txg + object + n. 4003 */ 4004 tx = dmu_tx_create(os); 4005 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4006 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4007 if (txg == 0) 4008 return; 4009 4010 if (last_txg > txg) 4011 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4012 4013 for (i = 0; i < ints; i++) 4014 value[i] = txg + object + i; 4015 4016 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4017 1, &txg, tx)); 4018 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4019 ints, value, tx)); 4020 4021 dmu_tx_commit(tx); 4022 4023 /* 4024 * Remove a random pair of entries. 4025 */ 4026 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4027 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4028 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4029 4030 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4031 4032 if (error == ENOENT) 4033 return; 4034 4035 ASSERT3U(error, ==, 0); 4036 4037 tx = dmu_tx_create(os); 4038 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4039 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4040 if (txg == 0) 4041 return; 4042 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4043 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4044 dmu_tx_commit(tx); 4045 } 4046 4047 /* 4048 * Testcase to test the upgrading of a microzap to fatzap. 4049 */ 4050 void 4051 ztest_fzap(ztest_ds_t *zd, uint64_t id) 4052 { 4053 objset_t *os = zd->zd_os; 4054 ztest_od_t od[1]; 4055 uint64_t object, txg; 4056 4057 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4058 4059 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4060 return; 4061 4062 object = od[0].od_object; 4063 4064 /* 4065 * Add entries to this ZAP and make sure it spills over 4066 * and gets upgraded to a fatzap. Also, since we are adding 4067 * 2050 entries we should see ptrtbl growth and leaf-block split. 4068 */ 4069 for (int i = 0; i < 2050; i++) { 4070 char name[MAXNAMELEN]; 4071 uint64_t value = i; 4072 dmu_tx_t *tx; 4073 int error; 4074 4075 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4076 id, value); 4077 4078 tx = dmu_tx_create(os); 4079 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4080 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4081 if (txg == 0) 4082 return; 4083 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4084 &value, tx); 4085 ASSERT(error == 0 || error == EEXIST); 4086 dmu_tx_commit(tx); 4087 } 4088 } 4089 4090 /* ARGSUSED */ 4091 void 4092 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4093 { 4094 objset_t *os = zd->zd_os; 4095 ztest_od_t od[1]; 4096 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4097 dmu_tx_t *tx; 4098 int i, namelen, error; 4099 int micro = ztest_random(2); 4100 char name[20], string_value[20]; 4101 void *data; 4102 4103 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4104 4105 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4106 return; 4107 4108 object = od[0].od_object; 4109 4110 /* 4111 * Generate a random name of the form 'xxx.....' where each 4112 * x is a random printable character and the dots are dots. 4113 * There are 94 such characters, and the name length goes from 4114 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4115 */ 4116 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4117 4118 for (i = 0; i < 3; i++) 4119 name[i] = '!' + ztest_random('~' - '!' + 1); 4120 for (; i < namelen - 1; i++) 4121 name[i] = '.'; 4122 name[i] = '\0'; 4123 4124 if ((namelen & 1) || micro) { 4125 wsize = sizeof (txg); 4126 wc = 1; 4127 data = &txg; 4128 } else { 4129 wsize = 1; 4130 wc = namelen; 4131 data = string_value; 4132 } 4133 4134 count = -1ULL; 4135 VERIFY(zap_count(os, object, &count) == 0); 4136 ASSERT(count != -1ULL); 4137 4138 /* 4139 * Select an operation: length, lookup, add, update, remove. 4140 */ 4141 i = ztest_random(5); 4142 4143 if (i >= 2) { 4144 tx = dmu_tx_create(os); 4145 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4146 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4147 if (txg == 0) 4148 return; 4149 bcopy(name, string_value, namelen); 4150 } else { 4151 tx = NULL; 4152 txg = 0; 4153 bzero(string_value, namelen); 4154 } 4155 4156 switch (i) { 4157 4158 case 0: 4159 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4160 if (error == 0) { 4161 ASSERT3U(wsize, ==, zl_wsize); 4162 ASSERT3U(wc, ==, zl_wc); 4163 } else { 4164 ASSERT3U(error, ==, ENOENT); 4165 } 4166 break; 4167 4168 case 1: 4169 error = zap_lookup(os, object, name, wsize, wc, data); 4170 if (error == 0) { 4171 if (data == string_value && 4172 bcmp(name, data, namelen) != 0) 4173 fatal(0, "name '%s' != val '%s' len %d", 4174 name, data, namelen); 4175 } else { 4176 ASSERT3U(error, ==, ENOENT); 4177 } 4178 break; 4179 4180 case 2: 4181 error = zap_add(os, object, name, wsize, wc, data, tx); 4182 ASSERT(error == 0 || error == EEXIST); 4183 break; 4184 4185 case 3: 4186 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4187 break; 4188 4189 case 4: 4190 error = zap_remove(os, object, name, tx); 4191 ASSERT(error == 0 || error == ENOENT); 4192 break; 4193 } 4194 4195 if (tx != NULL) 4196 dmu_tx_commit(tx); 4197 } 4198 4199 /* 4200 * Commit callback data. 4201 */ 4202 typedef struct ztest_cb_data { 4203 list_node_t zcd_node; 4204 uint64_t zcd_txg; 4205 int zcd_expected_err; 4206 boolean_t zcd_added; 4207 boolean_t zcd_called; 4208 spa_t *zcd_spa; 4209 } ztest_cb_data_t; 4210 4211 /* This is the actual commit callback function */ 4212 static void 4213 ztest_commit_callback(void *arg, int error) 4214 { 4215 ztest_cb_data_t *data = arg; 4216 uint64_t synced_txg; 4217 4218 VERIFY(data != NULL); 4219 VERIFY3S(data->zcd_expected_err, ==, error); 4220 VERIFY(!data->zcd_called); 4221 4222 synced_txg = spa_last_synced_txg(data->zcd_spa); 4223 if (data->zcd_txg > synced_txg) 4224 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4225 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4226 synced_txg); 4227 4228 data->zcd_called = B_TRUE; 4229 4230 if (error == ECANCELED) { 4231 ASSERT3U(data->zcd_txg, ==, 0); 4232 ASSERT(!data->zcd_added); 4233 4234 /* 4235 * The private callback data should be destroyed here, but 4236 * since we are going to check the zcd_called field after 4237 * dmu_tx_abort(), we will destroy it there. 4238 */ 4239 return; 4240 } 4241 4242 /* Was this callback added to the global callback list? */ 4243 if (!data->zcd_added) 4244 goto out; 4245 4246 ASSERT3U(data->zcd_txg, !=, 0); 4247 4248 /* Remove our callback from the list */ 4249 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4250 list_remove(&zcl.zcl_callbacks, data); 4251 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4252 4253 out: 4254 umem_free(data, sizeof (ztest_cb_data_t)); 4255 } 4256 4257 /* Allocate and initialize callback data structure */ 4258 static ztest_cb_data_t * 4259 ztest_create_cb_data(objset_t *os, uint64_t txg) 4260 { 4261 ztest_cb_data_t *cb_data; 4262 4263 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4264 4265 cb_data->zcd_txg = txg; 4266 cb_data->zcd_spa = dmu_objset_spa(os); 4267 4268 return (cb_data); 4269 } 4270 4271 /* 4272 * If a number of txgs equal to this threshold have been created after a commit 4273 * callback has been registered but not called, then we assume there is an 4274 * implementation bug. 4275 */ 4276 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4277 4278 /* 4279 * Commit callback test. 4280 */ 4281 void 4282 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4283 { 4284 objset_t *os = zd->zd_os; 4285 ztest_od_t od[1]; 4286 dmu_tx_t *tx; 4287 ztest_cb_data_t *cb_data[3], *tmp_cb; 4288 uint64_t old_txg, txg; 4289 int i, error; 4290 4291 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4292 4293 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4294 return; 4295 4296 tx = dmu_tx_create(os); 4297 4298 cb_data[0] = ztest_create_cb_data(os, 0); 4299 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4300 4301 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4302 4303 /* Every once in a while, abort the transaction on purpose */ 4304 if (ztest_random(100) == 0) 4305 error = -1; 4306 4307 if (!error) 4308 error = dmu_tx_assign(tx, TXG_NOWAIT); 4309 4310 txg = error ? 0 : dmu_tx_get_txg(tx); 4311 4312 cb_data[0]->zcd_txg = txg; 4313 cb_data[1] = ztest_create_cb_data(os, txg); 4314 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4315 4316 if (error) { 4317 /* 4318 * It's not a strict requirement to call the registered 4319 * callbacks from inside dmu_tx_abort(), but that's what 4320 * it's supposed to happen in the current implementation 4321 * so we will check for that. 4322 */ 4323 for (i = 0; i < 2; i++) { 4324 cb_data[i]->zcd_expected_err = ECANCELED; 4325 VERIFY(!cb_data[i]->zcd_called); 4326 } 4327 4328 dmu_tx_abort(tx); 4329 4330 for (i = 0; i < 2; i++) { 4331 VERIFY(cb_data[i]->zcd_called); 4332 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4333 } 4334 4335 return; 4336 } 4337 4338 cb_data[2] = ztest_create_cb_data(os, txg); 4339 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4340 4341 /* 4342 * Read existing data to make sure there isn't a future leak. 4343 */ 4344 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4345 &old_txg, DMU_READ_PREFETCH)); 4346 4347 if (old_txg > txg) 4348 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4349 old_txg, txg); 4350 4351 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4352 4353 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4354 4355 /* 4356 * Since commit callbacks don't have any ordering requirement and since 4357 * it is theoretically possible for a commit callback to be called 4358 * after an arbitrary amount of time has elapsed since its txg has been 4359 * synced, it is difficult to reliably determine whether a commit 4360 * callback hasn't been called due to high load or due to a flawed 4361 * implementation. 4362 * 4363 * In practice, we will assume that if after a certain number of txgs a 4364 * commit callback hasn't been called, then most likely there's an 4365 * implementation bug.. 4366 */ 4367 tmp_cb = list_head(&zcl.zcl_callbacks); 4368 if (tmp_cb != NULL && 4369 tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) { 4370 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4371 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4372 } 4373 4374 /* 4375 * Let's find the place to insert our callbacks. 4376 * 4377 * Even though the list is ordered by txg, it is possible for the 4378 * insertion point to not be the end because our txg may already be 4379 * quiescing at this point and other callbacks in the open txg 4380 * (from other objsets) may have sneaked in. 4381 */ 4382 tmp_cb = list_tail(&zcl.zcl_callbacks); 4383 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4384 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4385 4386 /* Add the 3 callbacks to the list */ 4387 for (i = 0; i < 3; i++) { 4388 if (tmp_cb == NULL) 4389 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4390 else 4391 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4392 cb_data[i]); 4393 4394 cb_data[i]->zcd_added = B_TRUE; 4395 VERIFY(!cb_data[i]->zcd_called); 4396 4397 tmp_cb = cb_data[i]; 4398 } 4399 4400 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4401 4402 dmu_tx_commit(tx); 4403 } 4404 4405 /* ARGSUSED */ 4406 void 4407 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4408 { 4409 zfs_prop_t proplist[] = { 4410 ZFS_PROP_CHECKSUM, 4411 ZFS_PROP_COMPRESSION, 4412 ZFS_PROP_COPIES, 4413 ZFS_PROP_DEDUP 4414 }; 4415 4416 (void) rw_rdlock(&ztest_name_lock); 4417 4418 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4419 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4420 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4421 4422 (void) rw_unlock(&ztest_name_lock); 4423 } 4424 4425 /* ARGSUSED */ 4426 void 4427 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4428 { 4429 nvlist_t *props = NULL; 4430 4431 (void) rw_rdlock(&ztest_name_lock); 4432 4433 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4434 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4435 4436 VERIFY3U(spa_prop_get(ztest_spa, &props), ==, 0); 4437 4438 if (ztest_opts.zo_verbose >= 6) 4439 dump_nvlist(props, 4); 4440 4441 nvlist_free(props); 4442 4443 (void) rw_unlock(&ztest_name_lock); 4444 } 4445 4446 /* 4447 * Test snapshot hold/release and deferred destroy. 4448 */ 4449 void 4450 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4451 { 4452 int error; 4453 objset_t *os = zd->zd_os; 4454 objset_t *origin; 4455 char snapname[100]; 4456 char fullname[100]; 4457 char clonename[100]; 4458 char tag[100]; 4459 char osname[MAXNAMELEN]; 4460 4461 (void) rw_rdlock(&ztest_name_lock); 4462 4463 dmu_objset_name(os, osname); 4464 4465 (void) snprintf(snapname, 100, "sh1_%llu", id); 4466 (void) snprintf(fullname, 100, "%s@%s", osname, snapname); 4467 (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id); 4468 (void) snprintf(tag, 100, "%tag_%llu", id); 4469 4470 /* 4471 * Clean up from any previous run. 4472 */ 4473 (void) dmu_objset_destroy(clonename, B_FALSE); 4474 (void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE); 4475 (void) dmu_objset_destroy(fullname, B_FALSE); 4476 4477 /* 4478 * Create snapshot, clone it, mark snap for deferred destroy, 4479 * destroy clone, verify snap was also destroyed. 4480 */ 4481 error = dmu_objset_snapshot_one(osname, snapname); 4482 if (error) { 4483 if (error == ENOSPC) { 4484 ztest_record_enospc("dmu_objset_snapshot"); 4485 goto out; 4486 } 4487 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4488 } 4489 4490 error = dmu_objset_hold(fullname, FTAG, &origin); 4491 if (error) 4492 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4493 4494 error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0); 4495 dmu_objset_rele(origin, FTAG); 4496 if (error) { 4497 if (error == ENOSPC) { 4498 ztest_record_enospc("dmu_objset_clone"); 4499 goto out; 4500 } 4501 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4502 } 4503 4504 error = dmu_objset_destroy(fullname, B_TRUE); 4505 if (error) { 4506 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d", 4507 fullname, error); 4508 } 4509 4510 error = dmu_objset_destroy(clonename, B_FALSE); 4511 if (error) 4512 fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error); 4513 4514 error = dmu_objset_hold(fullname, FTAG, &origin); 4515 if (error != ENOENT) 4516 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4517 4518 /* 4519 * Create snapshot, add temporary hold, verify that we can't 4520 * destroy a held snapshot, mark for deferred destroy, 4521 * release hold, verify snapshot was destroyed. 4522 */ 4523 error = dmu_objset_snapshot_one(osname, snapname); 4524 if (error) { 4525 if (error == ENOSPC) { 4526 ztest_record_enospc("dmu_objset_snapshot"); 4527 goto out; 4528 } 4529 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4530 } 4531 4532 error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE, 4533 B_TRUE, -1); 4534 if (error) 4535 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag); 4536 4537 error = dmu_objset_destroy(fullname, B_FALSE); 4538 if (error != EBUSY) { 4539 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d", 4540 fullname, error); 4541 } 4542 4543 error = dmu_objset_destroy(fullname, B_TRUE); 4544 if (error) { 4545 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d", 4546 fullname, error); 4547 } 4548 4549 error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE); 4550 if (error) 4551 fatal(0, "dsl_dataset_user_release(%s)", fullname, tag); 4552 4553 VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT); 4554 4555 out: 4556 (void) rw_unlock(&ztest_name_lock); 4557 } 4558 4559 /* 4560 * Inject random faults into the on-disk data. 4561 */ 4562 /* ARGSUSED */ 4563 void 4564 ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4565 { 4566 ztest_shared_t *zs = ztest_shared; 4567 spa_t *spa = ztest_spa; 4568 int fd; 4569 uint64_t offset; 4570 uint64_t leaves; 4571 uint64_t bad = 0x1990c0ffeedecade; 4572 uint64_t top, leaf; 4573 char path0[MAXPATHLEN]; 4574 char pathrand[MAXPATHLEN]; 4575 size_t fsize; 4576 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ 4577 int iters = 1000; 4578 int maxfaults; 4579 int mirror_save; 4580 vdev_t *vd0 = NULL; 4581 uint64_t guid0 = 0; 4582 boolean_t islog = B_FALSE; 4583 4584 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4585 maxfaults = MAXFAULTS(); 4586 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4587 mirror_save = zs->zs_mirrors; 4588 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4589 4590 ASSERT(leaves >= 1); 4591 4592 /* 4593 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 4594 */ 4595 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4596 4597 if (ztest_random(2) == 0) { 4598 /* 4599 * Inject errors on a normal data device or slog device. 4600 */ 4601 top = ztest_random_vdev_top(spa, B_TRUE); 4602 leaf = ztest_random(leaves) + zs->zs_splits; 4603 4604 /* 4605 * Generate paths to the first leaf in this top-level vdev, 4606 * and to the random leaf we selected. We'll induce transient 4607 * write failures and random online/offline activity on leaf 0, 4608 * and we'll write random garbage to the randomly chosen leaf. 4609 */ 4610 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 4611 ztest_opts.zo_dir, ztest_opts.zo_pool, 4612 top * leaves + zs->zs_splits); 4613 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 4614 ztest_opts.zo_dir, ztest_opts.zo_pool, 4615 top * leaves + leaf); 4616 4617 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 4618 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 4619 islog = B_TRUE; 4620 4621 if (vd0 != NULL && maxfaults != 1) { 4622 /* 4623 * Make vd0 explicitly claim to be unreadable, 4624 * or unwriteable, or reach behind its back 4625 * and close the underlying fd. We can do this if 4626 * maxfaults == 0 because we'll fail and reexecute, 4627 * and we can do it if maxfaults >= 2 because we'll 4628 * have enough redundancy. If maxfaults == 1, the 4629 * combination of this with injection of random data 4630 * corruption below exceeds the pool's fault tolerance. 4631 */ 4632 vdev_file_t *vf = vd0->vdev_tsd; 4633 4634 if (vf != NULL && ztest_random(3) == 0) { 4635 (void) close(vf->vf_vnode->v_fd); 4636 vf->vf_vnode->v_fd = -1; 4637 } else if (ztest_random(2) == 0) { 4638 vd0->vdev_cant_read = B_TRUE; 4639 } else { 4640 vd0->vdev_cant_write = B_TRUE; 4641 } 4642 guid0 = vd0->vdev_guid; 4643 } 4644 } else { 4645 /* 4646 * Inject errors on an l2cache device. 4647 */ 4648 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4649 4650 if (sav->sav_count == 0) { 4651 spa_config_exit(spa, SCL_STATE, FTAG); 4652 return; 4653 } 4654 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 4655 guid0 = vd0->vdev_guid; 4656 (void) strcpy(path0, vd0->vdev_path); 4657 (void) strcpy(pathrand, vd0->vdev_path); 4658 4659 leaf = 0; 4660 leaves = 1; 4661 maxfaults = INT_MAX; /* no limit on cache devices */ 4662 } 4663 4664 spa_config_exit(spa, SCL_STATE, FTAG); 4665 4666 /* 4667 * If we can tolerate two or more faults, or we're dealing 4668 * with a slog, randomly online/offline vd0. 4669 */ 4670 if ((maxfaults >= 2 || islog) && guid0 != 0) { 4671 if (ztest_random(10) < 6) { 4672 int flags = (ztest_random(2) == 0 ? 4673 ZFS_OFFLINE_TEMPORARY : 0); 4674 4675 /* 4676 * We have to grab the zs_name_lock as writer to 4677 * prevent a race between offlining a slog and 4678 * destroying a dataset. Offlining the slog will 4679 * grab a reference on the dataset which may cause 4680 * dmu_objset_destroy() to fail with EBUSY thus 4681 * leaving the dataset in an inconsistent state. 4682 */ 4683 if (islog) 4684 (void) rw_wrlock(&ztest_name_lock); 4685 4686 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 4687 4688 if (islog) 4689 (void) rw_unlock(&ztest_name_lock); 4690 } else { 4691 (void) vdev_online(spa, guid0, 0, NULL); 4692 } 4693 } 4694 4695 if (maxfaults == 0) 4696 return; 4697 4698 /* 4699 * We have at least single-fault tolerance, so inject data corruption. 4700 */ 4701 fd = open(pathrand, O_RDWR); 4702 4703 if (fd == -1) /* we hit a gap in the device namespace */ 4704 return; 4705 4706 fsize = lseek(fd, 0, SEEK_END); 4707 4708 while (--iters != 0) { 4709 offset = ztest_random(fsize / (leaves << bshift)) * 4710 (leaves << bshift) + (leaf << bshift) + 4711 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 4712 4713 if (offset >= fsize) 4714 continue; 4715 4716 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4717 if (mirror_save != zs->zs_mirrors) { 4718 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4719 (void) close(fd); 4720 return; 4721 } 4722 4723 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 4724 fatal(1, "can't inject bad word at 0x%llx in %s", 4725 offset, pathrand); 4726 4727 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4728 4729 if (ztest_opts.zo_verbose >= 7) 4730 (void) printf("injected bad word into %s," 4731 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 4732 } 4733 4734 (void) close(fd); 4735 } 4736 4737 /* 4738 * Verify that DDT repair works as expected. 4739 */ 4740 void 4741 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 4742 { 4743 ztest_shared_t *zs = ztest_shared; 4744 spa_t *spa = ztest_spa; 4745 objset_t *os = zd->zd_os; 4746 ztest_od_t od[1]; 4747 uint64_t object, blocksize, txg, pattern, psize; 4748 enum zio_checksum checksum = spa_dedup_checksum(spa); 4749 dmu_buf_t *db; 4750 dmu_tx_t *tx; 4751 void *buf; 4752 blkptr_t blk; 4753 int copies = 2 * ZIO_DEDUPDITTO_MIN; 4754 4755 blocksize = ztest_random_blocksize(); 4756 blocksize = MIN(blocksize, 2048); /* because we write so many */ 4757 4758 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4759 4760 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4761 return; 4762 4763 /* 4764 * Take the name lock as writer to prevent anyone else from changing 4765 * the pool and dataset properies we need to maintain during this test. 4766 */ 4767 (void) rw_wrlock(&ztest_name_lock); 4768 4769 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 4770 B_FALSE) != 0 || 4771 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 4772 B_FALSE) != 0) { 4773 (void) rw_unlock(&ztest_name_lock); 4774 return; 4775 } 4776 4777 object = od[0].od_object; 4778 blocksize = od[0].od_blocksize; 4779 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); 4780 4781 ASSERT(object != 0); 4782 4783 tx = dmu_tx_create(os); 4784 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 4785 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 4786 if (txg == 0) { 4787 (void) rw_unlock(&ztest_name_lock); 4788 return; 4789 } 4790 4791 /* 4792 * Write all the copies of our block. 4793 */ 4794 for (int i = 0; i < copies; i++) { 4795 uint64_t offset = i * blocksize; 4796 VERIFY(dmu_buf_hold(os, object, offset, FTAG, &db, 4797 DMU_READ_NO_PREFETCH) == 0); 4798 ASSERT(db->db_offset == offset); 4799 ASSERT(db->db_size == blocksize); 4800 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 4801 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 4802 dmu_buf_will_fill(db, tx); 4803 ztest_pattern_set(db->db_data, db->db_size, pattern); 4804 dmu_buf_rele(db, FTAG); 4805 } 4806 4807 dmu_tx_commit(tx); 4808 txg_wait_synced(spa_get_dsl(spa), txg); 4809 4810 /* 4811 * Find out what block we got. 4812 */ 4813 VERIFY(dmu_buf_hold(os, object, 0, FTAG, &db, 4814 DMU_READ_NO_PREFETCH) == 0); 4815 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 4816 dmu_buf_rele(db, FTAG); 4817 4818 /* 4819 * Damage the block. Dedup-ditto will save us when we read it later. 4820 */ 4821 psize = BP_GET_PSIZE(&blk); 4822 buf = zio_buf_alloc(psize); 4823 ztest_pattern_set(buf, psize, ~pattern); 4824 4825 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 4826 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 4827 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 4828 4829 zio_buf_free(buf, psize); 4830 4831 (void) rw_unlock(&ztest_name_lock); 4832 } 4833 4834 /* 4835 * Scrub the pool. 4836 */ 4837 /* ARGSUSED */ 4838 void 4839 ztest_scrub(ztest_ds_t *zd, uint64_t id) 4840 { 4841 spa_t *spa = ztest_spa; 4842 4843 (void) spa_scan(spa, POOL_SCAN_SCRUB); 4844 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 4845 (void) spa_scan(spa, POOL_SCAN_SCRUB); 4846 } 4847 4848 /* 4849 * Change the guid for the pool. 4850 */ 4851 /* ARGSUSED */ 4852 void 4853 ztest_reguid(ztest_ds_t *zd, uint64_t id) 4854 { 4855 spa_t *spa = ztest_spa; 4856 uint64_t orig, load; 4857 4858 orig = spa_guid(spa); 4859 load = spa_load_guid(spa); 4860 if (spa_change_guid(spa) != 0) 4861 return; 4862 4863 if (ztest_opts.zo_verbose >= 3) { 4864 (void) printf("Changed guid old %llu -> %llu\n", 4865 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 4866 } 4867 4868 VERIFY3U(orig, !=, spa_guid(spa)); 4869 VERIFY3U(load, ==, spa_load_guid(spa)); 4870 } 4871 4872 /* 4873 * Rename the pool to a different name and then rename it back. 4874 */ 4875 /* ARGSUSED */ 4876 void 4877 ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 4878 { 4879 char *oldname, *newname; 4880 spa_t *spa; 4881 4882 (void) rw_wrlock(&ztest_name_lock); 4883 4884 oldname = ztest_opts.zo_pool; 4885 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 4886 (void) strcpy(newname, oldname); 4887 (void) strcat(newname, "_tmp"); 4888 4889 /* 4890 * Do the rename 4891 */ 4892 VERIFY3U(0, ==, spa_rename(oldname, newname)); 4893 4894 /* 4895 * Try to open it under the old name, which shouldn't exist 4896 */ 4897 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 4898 4899 /* 4900 * Open it under the new name and make sure it's still the same spa_t. 4901 */ 4902 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 4903 4904 ASSERT(spa == ztest_spa); 4905 spa_close(spa, FTAG); 4906 4907 /* 4908 * Rename it back to the original 4909 */ 4910 VERIFY3U(0, ==, spa_rename(newname, oldname)); 4911 4912 /* 4913 * Make sure it can still be opened 4914 */ 4915 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 4916 4917 ASSERT(spa == ztest_spa); 4918 spa_close(spa, FTAG); 4919 4920 umem_free(newname, strlen(newname) + 1); 4921 4922 (void) rw_unlock(&ztest_name_lock); 4923 } 4924 4925 /* 4926 * Verify pool integrity by running zdb. 4927 */ 4928 static void 4929 ztest_run_zdb(char *pool) 4930 { 4931 int status; 4932 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 4933 char zbuf[1024]; 4934 char *bin; 4935 char *ztest; 4936 char *isa; 4937 int isalen; 4938 FILE *fp; 4939 4940 (void) realpath(getexecname(), zdb); 4941 4942 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 4943 bin = strstr(zdb, "/usr/bin/"); 4944 ztest = strstr(bin, "/ztest"); 4945 isa = bin + 8; 4946 isalen = ztest - isa; 4947 isa = strdup(isa); 4948 /* LINTED */ 4949 (void) sprintf(bin, 4950 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s", 4951 isalen, 4952 isa, 4953 ztest_opts.zo_verbose >= 3 ? "s" : "", 4954 ztest_opts.zo_verbose >= 4 ? "v" : "", 4955 spa_config_path, 4956 pool); 4957 free(isa); 4958 4959 if (ztest_opts.zo_verbose >= 5) 4960 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 4961 4962 fp = popen(zdb, "r"); 4963 4964 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 4965 if (ztest_opts.zo_verbose >= 3) 4966 (void) printf("%s", zbuf); 4967 4968 status = pclose(fp); 4969 4970 if (status == 0) 4971 return; 4972 4973 ztest_dump_core = 0; 4974 if (WIFEXITED(status)) 4975 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 4976 else 4977 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 4978 } 4979 4980 static void 4981 ztest_walk_pool_directory(char *header) 4982 { 4983 spa_t *spa = NULL; 4984 4985 if (ztest_opts.zo_verbose >= 6) 4986 (void) printf("%s\n", header); 4987 4988 mutex_enter(&spa_namespace_lock); 4989 while ((spa = spa_next(spa)) != NULL) 4990 if (ztest_opts.zo_verbose >= 6) 4991 (void) printf("\t%s\n", spa_name(spa)); 4992 mutex_exit(&spa_namespace_lock); 4993 } 4994 4995 static void 4996 ztest_spa_import_export(char *oldname, char *newname) 4997 { 4998 nvlist_t *config, *newconfig; 4999 uint64_t pool_guid; 5000 spa_t *spa; 5001 5002 if (ztest_opts.zo_verbose >= 4) { 5003 (void) printf("import/export: old = %s, new = %s\n", 5004 oldname, newname); 5005 } 5006 5007 /* 5008 * Clean up from previous runs. 5009 */ 5010 (void) spa_destroy(newname); 5011 5012 /* 5013 * Get the pool's configuration and guid. 5014 */ 5015 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5016 5017 /* 5018 * Kick off a scrub to tickle scrub/export races. 5019 */ 5020 if (ztest_random(2) == 0) 5021 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5022 5023 pool_guid = spa_guid(spa); 5024 spa_close(spa, FTAG); 5025 5026 ztest_walk_pool_directory("pools before export"); 5027 5028 /* 5029 * Export it. 5030 */ 5031 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5032 5033 ztest_walk_pool_directory("pools after export"); 5034 5035 /* 5036 * Try to import it. 5037 */ 5038 newconfig = spa_tryimport(config); 5039 ASSERT(newconfig != NULL); 5040 nvlist_free(newconfig); 5041 5042 /* 5043 * Import it under the new name. 5044 */ 5045 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0)); 5046 5047 ztest_walk_pool_directory("pools after import"); 5048 5049 /* 5050 * Try to import it again -- should fail with EEXIST. 5051 */ 5052 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5053 5054 /* 5055 * Try to import it under a different name -- should fail with EEXIST. 5056 */ 5057 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5058 5059 /* 5060 * Verify that the pool is no longer visible under the old name. 5061 */ 5062 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5063 5064 /* 5065 * Verify that we can open and close the pool using the new name. 5066 */ 5067 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5068 ASSERT(pool_guid == spa_guid(spa)); 5069 spa_close(spa, FTAG); 5070 5071 nvlist_free(config); 5072 } 5073 5074 static void 5075 ztest_resume(spa_t *spa) 5076 { 5077 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5078 (void) printf("resuming from suspended state\n"); 5079 spa_vdev_state_enter(spa, SCL_NONE); 5080 vdev_clear(spa, NULL); 5081 (void) spa_vdev_state_exit(spa, NULL, 0); 5082 (void) zio_resume(spa); 5083 } 5084 5085 static void * 5086 ztest_resume_thread(void *arg) 5087 { 5088 spa_t *spa = arg; 5089 5090 while (!ztest_exiting) { 5091 if (spa_suspended(spa)) 5092 ztest_resume(spa); 5093 (void) poll(NULL, 0, 100); 5094 } 5095 return (NULL); 5096 } 5097 5098 static void * 5099 ztest_deadman_thread(void *arg) 5100 { 5101 ztest_shared_t *zs = arg; 5102 int grace = 300; 5103 hrtime_t delta; 5104 5105 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace; 5106 5107 (void) poll(NULL, 0, (int)(1000 * delta)); 5108 5109 fatal(0, "failed to complete within %d seconds of deadline", grace); 5110 5111 return (NULL); 5112 } 5113 5114 static void 5115 ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5116 { 5117 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5118 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5119 hrtime_t functime = gethrtime(); 5120 5121 for (int i = 0; i < zi->zi_iters; i++) 5122 zi->zi_func(zd, id); 5123 5124 functime = gethrtime() - functime; 5125 5126 atomic_add_64(&zc->zc_count, 1); 5127 atomic_add_64(&zc->zc_time, functime); 5128 5129 if (ztest_opts.zo_verbose >= 4) { 5130 Dl_info dli; 5131 (void) dladdr((void *)zi->zi_func, &dli); 5132 (void) printf("%6.2f sec in %s\n", 5133 (double)functime / NANOSEC, dli.dli_sname); 5134 } 5135 } 5136 5137 static void * 5138 ztest_thread(void *arg) 5139 { 5140 int rand; 5141 uint64_t id = (uintptr_t)arg; 5142 ztest_shared_t *zs = ztest_shared; 5143 uint64_t call_next; 5144 hrtime_t now; 5145 ztest_info_t *zi; 5146 ztest_shared_callstate_t *zc; 5147 5148 while ((now = gethrtime()) < zs->zs_thread_stop) { 5149 /* 5150 * See if it's time to force a crash. 5151 */ 5152 if (now > zs->zs_thread_kill) 5153 ztest_kill(zs); 5154 5155 /* 5156 * If we're getting ENOSPC with some regularity, stop. 5157 */ 5158 if (zs->zs_enospc_count > 10) 5159 break; 5160 5161 /* 5162 * Pick a random function to execute. 5163 */ 5164 rand = ztest_random(ZTEST_FUNCS); 5165 zi = &ztest_info[rand]; 5166 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5167 call_next = zc->zc_next; 5168 5169 if (now >= call_next && 5170 atomic_cas_64(&zc->zc_next, call_next, call_next + 5171 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5172 ztest_execute(rand, zi, id); 5173 } 5174 } 5175 5176 return (NULL); 5177 } 5178 5179 static void 5180 ztest_dataset_name(char *dsname, char *pool, int d) 5181 { 5182 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d); 5183 } 5184 5185 static void 5186 ztest_dataset_destroy(int d) 5187 { 5188 char name[MAXNAMELEN]; 5189 5190 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5191 5192 if (ztest_opts.zo_verbose >= 3) 5193 (void) printf("Destroying %s to free up space\n", name); 5194 5195 /* 5196 * Cleanup any non-standard clones and snapshots. In general, 5197 * ztest thread t operates on dataset (t % zopt_datasets), 5198 * so there may be more than one thing to clean up. 5199 */ 5200 for (int t = d; t < ztest_opts.zo_threads; 5201 t += ztest_opts.zo_datasets) { 5202 ztest_dsl_dataset_cleanup(name, t); 5203 } 5204 5205 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5206 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5207 } 5208 5209 static void 5210 ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5211 { 5212 uint64_t usedobjs, dirobjs, scratch; 5213 5214 /* 5215 * ZTEST_DIROBJ is the object directory for the entire dataset. 5216 * Therefore, the number of objects in use should equal the 5217 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5218 * If not, we have an object leak. 5219 * 5220 * Note that we can only check this in ztest_dataset_open(), 5221 * when the open-context and syncing-context values agree. 5222 * That's because zap_count() returns the open-context value, 5223 * while dmu_objset_space() returns the rootbp fill count. 5224 */ 5225 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5226 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5227 ASSERT3U(dirobjs + 1, ==, usedobjs); 5228 } 5229 5230 static int 5231 ztest_dataset_open(int d) 5232 { 5233 ztest_ds_t *zd = &ztest_ds[d]; 5234 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5235 objset_t *os; 5236 zilog_t *zilog; 5237 char name[MAXNAMELEN]; 5238 int error; 5239 5240 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5241 5242 (void) rw_rdlock(&ztest_name_lock); 5243 5244 error = ztest_dataset_create(name); 5245 if (error == ENOSPC) { 5246 (void) rw_unlock(&ztest_name_lock); 5247 ztest_record_enospc(FTAG); 5248 return (error); 5249 } 5250 ASSERT(error == 0 || error == EEXIST); 5251 5252 VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0); 5253 (void) rw_unlock(&ztest_name_lock); 5254 5255 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5256 5257 zilog = zd->zd_zilog; 5258 5259 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5260 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5261 fatal(0, "missing log records: claimed %llu < committed %llu", 5262 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5263 5264 ztest_dataset_dirobj_verify(zd); 5265 5266 zil_replay(os, zd, ztest_replay_vector); 5267 5268 ztest_dataset_dirobj_verify(zd); 5269 5270 if (ztest_opts.zo_verbose >= 6) 5271 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5272 zd->zd_name, 5273 (u_longlong_t)zilog->zl_parse_blk_count, 5274 (u_longlong_t)zilog->zl_parse_lr_count, 5275 (u_longlong_t)zilog->zl_replaying_seq); 5276 5277 zilog = zil_open(os, ztest_get_data); 5278 5279 if (zilog->zl_replaying_seq != 0 && 5280 zilog->zl_replaying_seq < committed_seq) 5281 fatal(0, "missing log records: replayed %llu < committed %llu", 5282 zilog->zl_replaying_seq, committed_seq); 5283 5284 return (0); 5285 } 5286 5287 static void 5288 ztest_dataset_close(int d) 5289 { 5290 ztest_ds_t *zd = &ztest_ds[d]; 5291 5292 zil_close(zd->zd_zilog); 5293 dmu_objset_rele(zd->zd_os, zd); 5294 5295 ztest_zd_fini(zd); 5296 } 5297 5298 /* 5299 * Kick off threads to run tests on all datasets in parallel. 5300 */ 5301 static void 5302 ztest_run(ztest_shared_t *zs) 5303 { 5304 thread_t *tid; 5305 spa_t *spa; 5306 objset_t *os; 5307 thread_t resume_tid; 5308 int error; 5309 5310 ztest_exiting = B_FALSE; 5311 5312 /* 5313 * Initialize parent/child shared state. 5314 */ 5315 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5316 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5317 5318 zs->zs_thread_start = gethrtime(); 5319 zs->zs_thread_stop = 5320 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5321 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5322 zs->zs_thread_kill = zs->zs_thread_stop; 5323 if (ztest_random(100) < ztest_opts.zo_killrate) { 5324 zs->zs_thread_kill -= 5325 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5326 } 5327 5328 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); 5329 5330 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5331 offsetof(ztest_cb_data_t, zcd_node)); 5332 5333 /* 5334 * Open our pool. 5335 */ 5336 kernel_init(FREAD | FWRITE); 5337 VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0); 5338 spa->spa_debug = B_TRUE; 5339 ztest_spa = spa; 5340 5341 VERIFY3U(0, ==, dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os)); 5342 zs->zs_guid = dmu_objset_fsid_guid(os); 5343 dmu_objset_rele(os, FTAG); 5344 5345 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5346 5347 /* 5348 * We don't expect the pool to suspend unless maxfaults == 0, 5349 * in which case ztest_fault_inject() temporarily takes away 5350 * the only valid replica. 5351 */ 5352 if (MAXFAULTS() == 0) 5353 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5354 else 5355 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5356 5357 /* 5358 * Create a thread to periodically resume suspended I/O. 5359 */ 5360 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5361 &resume_tid) == 0); 5362 5363 /* 5364 * Create a deadman thread to abort() if we hang. 5365 */ 5366 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5367 NULL) == 0); 5368 5369 /* 5370 * Verify that we can safely inquire about about any object, 5371 * whether it's allocated or not. To make it interesting, 5372 * we probe a 5-wide window around each power of two. 5373 * This hits all edge cases, including zero and the max. 5374 */ 5375 for (int t = 0; t < 64; t++) { 5376 for (int d = -5; d <= 5; d++) { 5377 error = dmu_object_info(spa->spa_meta_objset, 5378 (1ULL << t) + d, NULL); 5379 ASSERT(error == 0 || error == ENOENT || 5380 error == EINVAL); 5381 } 5382 } 5383 5384 /* 5385 * If we got any ENOSPC errors on the previous run, destroy something. 5386 */ 5387 if (zs->zs_enospc_count != 0) { 5388 int d = ztest_random(ztest_opts.zo_datasets); 5389 ztest_dataset_destroy(d); 5390 } 5391 zs->zs_enospc_count = 0; 5392 5393 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5394 UMEM_NOFAIL); 5395 5396 if (ztest_opts.zo_verbose >= 4) 5397 (void) printf("starting main threads...\n"); 5398 5399 /* 5400 * Kick off all the tests that run in parallel. 5401 */ 5402 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5403 if (t < ztest_opts.zo_datasets && 5404 ztest_dataset_open(t) != 0) 5405 return; 5406 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5407 THR_BOUND, &tid[t]) == 0); 5408 } 5409 5410 /* 5411 * Wait for all of the tests to complete. We go in reverse order 5412 * so we don't close datasets while threads are still using them. 5413 */ 5414 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5415 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5416 if (t < ztest_opts.zo_datasets) 5417 ztest_dataset_close(t); 5418 } 5419 5420 txg_wait_synced(spa_get_dsl(spa), 0); 5421 5422 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5423 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5424 5425 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5426 5427 /* Kill the resume thread */ 5428 ztest_exiting = B_TRUE; 5429 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5430 ztest_resume(spa); 5431 5432 /* 5433 * Right before closing the pool, kick off a bunch of async I/O; 5434 * spa_close() should wait for it to complete. 5435 */ 5436 for (uint64_t object = 1; object < 50; object++) 5437 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); 5438 5439 spa_close(spa, FTAG); 5440 5441 /* 5442 * Verify that we can loop over all pools. 5443 */ 5444 mutex_enter(&spa_namespace_lock); 5445 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5446 if (ztest_opts.zo_verbose > 3) 5447 (void) printf("spa_next: found %s\n", spa_name(spa)); 5448 mutex_exit(&spa_namespace_lock); 5449 5450 /* 5451 * Verify that we can export the pool and reimport it under a 5452 * different name. 5453 */ 5454 if (ztest_random(2) == 0) { 5455 char name[MAXNAMELEN]; 5456 (void) snprintf(name, MAXNAMELEN, "%s_import", 5457 ztest_opts.zo_pool); 5458 ztest_spa_import_export(ztest_opts.zo_pool, name); 5459 ztest_spa_import_export(name, ztest_opts.zo_pool); 5460 } 5461 5462 kernel_fini(); 5463 5464 list_destroy(&zcl.zcl_callbacks); 5465 5466 (void) _mutex_destroy(&zcl.zcl_callbacks_lock); 5467 5468 (void) rwlock_destroy(&ztest_name_lock); 5469 (void) _mutex_destroy(&ztest_vdev_lock); 5470 } 5471 5472 static void 5473 ztest_freeze(void) 5474 { 5475 ztest_ds_t *zd = &ztest_ds[0]; 5476 spa_t *spa; 5477 int numloops = 0; 5478 5479 if (ztest_opts.zo_verbose >= 3) 5480 (void) printf("testing spa_freeze()...\n"); 5481 5482 kernel_init(FREAD | FWRITE); 5483 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5484 VERIFY3U(0, ==, ztest_dataset_open(0)); 5485 5486 /* 5487 * Force the first log block to be transactionally allocated. 5488 * We have to do this before we freeze the pool -- otherwise 5489 * the log chain won't be anchored. 5490 */ 5491 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 5492 ztest_dmu_object_alloc_free(zd, 0); 5493 zil_commit(zd->zd_zilog, 0); 5494 } 5495 5496 txg_wait_synced(spa_get_dsl(spa), 0); 5497 5498 /* 5499 * Freeze the pool. This stops spa_sync() from doing anything, 5500 * so that the only way to record changes from now on is the ZIL. 5501 */ 5502 spa_freeze(spa); 5503 5504 /* 5505 * Run tests that generate log records but don't alter the pool config 5506 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 5507 * We do a txg_wait_synced() after each iteration to force the txg 5508 * to increase well beyond the last synced value in the uberblock. 5509 * The ZIL should be OK with that. 5510 */ 5511 while (ztest_random(10) != 0 && 5512 numloops++ < ztest_opts.zo_maxloops) { 5513 ztest_dmu_write_parallel(zd, 0); 5514 ztest_dmu_object_alloc_free(zd, 0); 5515 txg_wait_synced(spa_get_dsl(spa), 0); 5516 } 5517 5518 /* 5519 * Commit all of the changes we just generated. 5520 */ 5521 zil_commit(zd->zd_zilog, 0); 5522 txg_wait_synced(spa_get_dsl(spa), 0); 5523 5524 /* 5525 * Close our dataset and close the pool. 5526 */ 5527 ztest_dataset_close(0); 5528 spa_close(spa, FTAG); 5529 kernel_fini(); 5530 5531 /* 5532 * Open and close the pool and dataset to induce log replay. 5533 */ 5534 kernel_init(FREAD | FWRITE); 5535 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5536 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 5537 VERIFY3U(0, ==, ztest_dataset_open(0)); 5538 ztest_dataset_close(0); 5539 spa_close(spa, FTAG); 5540 kernel_fini(); 5541 } 5542 5543 void 5544 print_time(hrtime_t t, char *timebuf) 5545 { 5546 hrtime_t s = t / NANOSEC; 5547 hrtime_t m = s / 60; 5548 hrtime_t h = m / 60; 5549 hrtime_t d = h / 24; 5550 5551 s -= m * 60; 5552 m -= h * 60; 5553 h -= d * 24; 5554 5555 timebuf[0] = '\0'; 5556 5557 if (d) 5558 (void) sprintf(timebuf, 5559 "%llud%02lluh%02llum%02llus", d, h, m, s); 5560 else if (h) 5561 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 5562 else if (m) 5563 (void) sprintf(timebuf, "%llum%02llus", m, s); 5564 else 5565 (void) sprintf(timebuf, "%llus", s); 5566 } 5567 5568 static nvlist_t * 5569 make_random_props() 5570 { 5571 nvlist_t *props; 5572 5573 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 5574 if (ztest_random(2) == 0) 5575 return (props); 5576 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 5577 5578 return (props); 5579 } 5580 5581 /* 5582 * Create a storage pool with the given name and initial vdev size. 5583 * Then test spa_freeze() functionality. 5584 */ 5585 static void 5586 ztest_init(ztest_shared_t *zs) 5587 { 5588 spa_t *spa; 5589 nvlist_t *nvroot, *props; 5590 5591 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5592 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5593 5594 kernel_init(FREAD | FWRITE); 5595 5596 /* 5597 * Create the storage pool. 5598 */ 5599 (void) spa_destroy(ztest_opts.zo_pool); 5600 ztest_shared->zs_vdev_next_leaf = 0; 5601 zs->zs_splits = 0; 5602 zs->zs_mirrors = ztest_opts.zo_mirrors; 5603 nvroot = make_vdev_root(NULL, NULL, ztest_opts.zo_vdev_size, 0, 5604 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 5605 props = make_random_props(); 5606 for (int i = 0; i < SPA_FEATURES; i++) { 5607 char buf[1024]; 5608 (void) snprintf(buf, sizeof (buf), "feature@%s", 5609 spa_feature_table[i].fi_uname); 5610 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 5611 } 5612 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL)); 5613 nvlist_free(nvroot); 5614 5615 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5616 zs->zs_metaslab_sz = 5617 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 5618 5619 spa_close(spa, FTAG); 5620 5621 kernel_fini(); 5622 5623 ztest_run_zdb(ztest_opts.zo_pool); 5624 5625 ztest_freeze(); 5626 5627 ztest_run_zdb(ztest_opts.zo_pool); 5628 5629 (void) rwlock_destroy(&ztest_name_lock); 5630 (void) _mutex_destroy(&ztest_vdev_lock); 5631 } 5632 5633 static void 5634 setup_fds(void) 5635 { 5636 int fd; 5637 5638 char *tmp = tempnam(NULL, NULL); 5639 fd = open(tmp, O_RDWR | O_CREAT, 0700); 5640 ASSERT3U(fd, ==, ZTEST_FD_DATA); 5641 (void) unlink(tmp); 5642 free(tmp); 5643 5644 fd = open("/dev/urandom", O_RDONLY); 5645 ASSERT3U(fd, ==, ZTEST_FD_RAND); 5646 } 5647 5648 static int 5649 shared_data_size(ztest_shared_hdr_t *hdr) 5650 { 5651 int size; 5652 5653 size = hdr->zh_hdr_size; 5654 size += hdr->zh_opts_size; 5655 size += hdr->zh_size; 5656 size += hdr->zh_stats_size * hdr->zh_stats_count; 5657 size += hdr->zh_ds_size * hdr->zh_ds_count; 5658 5659 return (size); 5660 } 5661 5662 static void 5663 setup_hdr(void) 5664 { 5665 int size; 5666 ztest_shared_hdr_t *hdr; 5667 5668 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5669 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0); 5670 ASSERT(hdr != MAP_FAILED); 5671 5672 VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, sizeof (ztest_shared_hdr_t))); 5673 5674 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 5675 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 5676 hdr->zh_size = sizeof (ztest_shared_t); 5677 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 5678 hdr->zh_stats_count = ZTEST_FUNCS; 5679 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 5680 hdr->zh_ds_count = ztest_opts.zo_datasets; 5681 5682 size = shared_data_size(hdr); 5683 VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, size)); 5684 5685 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5686 } 5687 5688 static void 5689 setup_data(void) 5690 { 5691 int size, offset; 5692 ztest_shared_hdr_t *hdr; 5693 uint8_t *buf; 5694 5695 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5696 PROT_READ, MAP_SHARED, ZTEST_FD_DATA, 0); 5697 ASSERT(hdr != MAP_FAILED); 5698 5699 size = shared_data_size(hdr); 5700 5701 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5702 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 5703 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0); 5704 ASSERT(hdr != MAP_FAILED); 5705 buf = (uint8_t *)hdr; 5706 5707 offset = hdr->zh_hdr_size; 5708 ztest_shared_opts = (void *)&buf[offset]; 5709 offset += hdr->zh_opts_size; 5710 ztest_shared = (void *)&buf[offset]; 5711 offset += hdr->zh_size; 5712 ztest_shared_callstate = (void *)&buf[offset]; 5713 offset += hdr->zh_stats_size * hdr->zh_stats_count; 5714 ztest_shared_ds = (void *)&buf[offset]; 5715 } 5716 5717 static boolean_t 5718 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 5719 { 5720 pid_t pid; 5721 int status; 5722 char cmdbuf[MAXPATHLEN]; 5723 5724 pid = fork(); 5725 5726 if (cmd == NULL) { 5727 (void) strlcpy(cmdbuf, getexecname(), sizeof (cmdbuf)); 5728 cmd = cmdbuf; 5729 } 5730 5731 if (pid == -1) 5732 fatal(1, "fork failed"); 5733 5734 if (pid == 0) { /* child */ 5735 char *emptyargv[2] = { cmd, NULL }; 5736 5737 struct rlimit rl = { 1024, 1024 }; 5738 (void) setrlimit(RLIMIT_NOFILE, &rl); 5739 (void) enable_extended_FILE_stdio(-1, -1); 5740 if (libpath != NULL) 5741 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 5742 (void) execv(cmd, emptyargv); 5743 ztest_dump_core = B_FALSE; 5744 fatal(B_TRUE, "exec failed: %s", cmd); 5745 } 5746 5747 while (waitpid(pid, &status, 0) != pid) 5748 continue; 5749 if (statusp != NULL) 5750 *statusp = status; 5751 5752 if (WIFEXITED(status)) { 5753 if (WEXITSTATUS(status) != 0) { 5754 (void) fprintf(stderr, "child exited with code %d\n", 5755 WEXITSTATUS(status)); 5756 exit(2); 5757 } 5758 return (B_FALSE); 5759 } else if (WIFSIGNALED(status)) { 5760 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 5761 (void) fprintf(stderr, "child died with signal %d\n", 5762 WTERMSIG(status)); 5763 exit(3); 5764 } 5765 return (B_TRUE); 5766 } else { 5767 (void) fprintf(stderr, "something strange happened to child\n"); 5768 exit(4); 5769 /* NOTREACHED */ 5770 } 5771 } 5772 5773 static void 5774 ztest_run_init(void) 5775 { 5776 ztest_shared_t *zs = ztest_shared; 5777 5778 ASSERT(ztest_opts.zo_init != 0); 5779 5780 /* 5781 * Blow away any existing copy of zpool.cache 5782 */ 5783 (void) remove(spa_config_path); 5784 5785 /* 5786 * Create and initialize our storage pool. 5787 */ 5788 for (int i = 1; i <= ztest_opts.zo_init; i++) { 5789 bzero(zs, sizeof (ztest_shared_t)); 5790 if (ztest_opts.zo_verbose >= 3 && 5791 ztest_opts.zo_init != 1) { 5792 (void) printf("ztest_init(), pass %d\n", i); 5793 } 5794 ztest_init(zs); 5795 } 5796 } 5797 5798 int 5799 main(int argc, char **argv) 5800 { 5801 int kills = 0; 5802 int iters = 0; 5803 int older = 0; 5804 int newer = 0; 5805 ztest_shared_t *zs; 5806 ztest_info_t *zi; 5807 ztest_shared_callstate_t *zc; 5808 char timebuf[100]; 5809 char numbuf[6]; 5810 spa_t *spa; 5811 char cmd[MAXNAMELEN]; 5812 boolean_t hasalt; 5813 5814 boolean_t ischild = (0 == lseek(ZTEST_FD_DATA, 0, SEEK_CUR)); 5815 ASSERT(ischild || errno == EBADF); 5816 5817 (void) setvbuf(stdout, NULL, _IOLBF, 0); 5818 5819 if (!ischild) { 5820 process_options(argc, argv); 5821 5822 setup_fds(); 5823 setup_hdr(); 5824 setup_data(); 5825 bcopy(&ztest_opts, ztest_shared_opts, 5826 sizeof (*ztest_shared_opts)); 5827 } else { 5828 setup_data(); 5829 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 5830 } 5831 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 5832 5833 /* Override location of zpool.cache */ 5834 (void) asprintf((char **)&spa_config_path, "%s/zpool.cache", 5835 ztest_opts.zo_dir); 5836 5837 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 5838 UMEM_NOFAIL); 5839 zs = ztest_shared; 5840 5841 if (ischild) { 5842 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang; 5843 metaslab_df_alloc_threshold = 5844 zs->zs_metaslab_df_alloc_threshold; 5845 5846 if (zs->zs_do_init) 5847 ztest_run_init(); 5848 else 5849 ztest_run(zs); 5850 exit(0); 5851 } 5852 5853 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 5854 5855 if (ztest_opts.zo_verbose >= 1) { 5856 (void) printf("%llu vdevs, %d datasets, %d threads," 5857 " %llu seconds...\n", 5858 (u_longlong_t)ztest_opts.zo_vdevs, 5859 ztest_opts.zo_datasets, 5860 ztest_opts.zo_threads, 5861 (u_longlong_t)ztest_opts.zo_time); 5862 } 5863 5864 (void) strlcpy(cmd, getexecname(), sizeof (cmd)); 5865 5866 zs->zs_do_init = B_TRUE; 5867 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 5868 if (ztest_opts.zo_verbose >= 1) { 5869 (void) printf("Executing older ztest for " 5870 "initialization: %s\n", ztest_opts.zo_alt_ztest); 5871 } 5872 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 5873 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 5874 } else { 5875 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 5876 } 5877 zs->zs_do_init = B_FALSE; 5878 5879 zs->zs_proc_start = gethrtime(); 5880 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 5881 5882 for (int f = 0; f < ZTEST_FUNCS; f++) { 5883 zi = &ztest_info[f]; 5884 zc = ZTEST_GET_SHARED_CALLSTATE(f); 5885 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 5886 zc->zc_next = UINT64_MAX; 5887 else 5888 zc->zc_next = zs->zs_proc_start + 5889 ztest_random(2 * zi->zi_interval[0] + 1); 5890 } 5891 5892 /* 5893 * Run the tests in a loop. These tests include fault injection 5894 * to verify that self-healing data works, and forced crashes 5895 * to verify that we never lose on-disk consistency. 5896 */ 5897 while (gethrtime() < zs->zs_proc_stop) { 5898 int status; 5899 boolean_t killed; 5900 5901 /* 5902 * Initialize the workload counters for each function. 5903 */ 5904 for (int f = 0; f < ZTEST_FUNCS; f++) { 5905 zc = ZTEST_GET_SHARED_CALLSTATE(f); 5906 zc->zc_count = 0; 5907 zc->zc_time = 0; 5908 } 5909 5910 /* Set the allocation switch size */ 5911 zs->zs_metaslab_df_alloc_threshold = 5912 ztest_random(zs->zs_metaslab_sz / 4) + 1; 5913 5914 if (!hasalt || ztest_random(2) == 0) { 5915 if (hasalt && ztest_opts.zo_verbose >= 1) { 5916 (void) printf("Executing newer ztest: %s\n", 5917 cmd); 5918 } 5919 newer++; 5920 killed = exec_child(cmd, NULL, B_TRUE, &status); 5921 } else { 5922 if (hasalt && ztest_opts.zo_verbose >= 1) { 5923 (void) printf("Executing older ztest: %s\n", 5924 ztest_opts.zo_alt_ztest); 5925 } 5926 older++; 5927 killed = exec_child(ztest_opts.zo_alt_ztest, 5928 ztest_opts.zo_alt_libpath, B_TRUE, &status); 5929 } 5930 5931 if (killed) 5932 kills++; 5933 iters++; 5934 5935 if (ztest_opts.zo_verbose >= 1) { 5936 hrtime_t now = gethrtime(); 5937 5938 now = MIN(now, zs->zs_proc_stop); 5939 print_time(zs->zs_proc_stop - now, timebuf); 5940 nicenum(zs->zs_space, numbuf); 5941 5942 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 5943 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 5944 iters, 5945 WIFEXITED(status) ? "Complete" : "SIGKILL", 5946 (u_longlong_t)zs->zs_enospc_count, 5947 100.0 * zs->zs_alloc / zs->zs_space, 5948 numbuf, 5949 100.0 * (now - zs->zs_proc_start) / 5950 (ztest_opts.zo_time * NANOSEC), timebuf); 5951 } 5952 5953 if (ztest_opts.zo_verbose >= 2) { 5954 (void) printf("\nWorkload summary:\n\n"); 5955 (void) printf("%7s %9s %s\n", 5956 "Calls", "Time", "Function"); 5957 (void) printf("%7s %9s %s\n", 5958 "-----", "----", "--------"); 5959 for (int f = 0; f < ZTEST_FUNCS; f++) { 5960 Dl_info dli; 5961 5962 zi = &ztest_info[f]; 5963 zc = ZTEST_GET_SHARED_CALLSTATE(f); 5964 print_time(zc->zc_time, timebuf); 5965 (void) dladdr((void *)zi->zi_func, &dli); 5966 (void) printf("%7llu %9s %s\n", 5967 (u_longlong_t)zc->zc_count, timebuf, 5968 dli.dli_sname); 5969 } 5970 (void) printf("\n"); 5971 } 5972 5973 /* 5974 * It's possible that we killed a child during a rename test, 5975 * in which case we'll have a 'ztest_tmp' pool lying around 5976 * instead of 'ztest'. Do a blind rename in case this happened. 5977 */ 5978 kernel_init(FREAD); 5979 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 5980 spa_close(spa, FTAG); 5981 } else { 5982 char tmpname[MAXNAMELEN]; 5983 kernel_fini(); 5984 kernel_init(FREAD | FWRITE); 5985 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 5986 ztest_opts.zo_pool); 5987 (void) spa_rename(tmpname, ztest_opts.zo_pool); 5988 } 5989 kernel_fini(); 5990 5991 ztest_run_zdb(ztest_opts.zo_pool); 5992 } 5993 5994 if (ztest_opts.zo_verbose >= 1) { 5995 if (hasalt) { 5996 (void) printf("%d runs of older ztest: %s\n", older, 5997 ztest_opts.zo_alt_ztest); 5998 (void) printf("%d runs of newer ztest: %s\n", newer, 5999 cmd); 6000 } 6001 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6002 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6003 } 6004 6005 return (0); 6006 } 6007