1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2013 Steven Hartland. All rights reserved. 26 * Copyright (c) 2014 Integros [integros.com] 27 * Copyright 2017 Joyent, Inc. 28 * Copyright 2017 RackTop Systems. 29 */ 30 31 /* 32 * The objective of this program is to provide a DMU/ZAP/SPA stress test 33 * that runs entirely in userland, is easy to use, and easy to extend. 34 * 35 * The overall design of the ztest program is as follows: 36 * 37 * (1) For each major functional area (e.g. adding vdevs to a pool, 38 * creating and destroying datasets, reading and writing objects, etc) 39 * we have a simple routine to test that functionality. These 40 * individual routines do not have to do anything "stressful". 41 * 42 * (2) We turn these simple functionality tests into a stress test by 43 * running them all in parallel, with as many threads as desired, 44 * and spread across as many datasets, objects, and vdevs as desired. 45 * 46 * (3) While all this is happening, we inject faults into the pool to 47 * verify that self-healing data really works. 48 * 49 * (4) Every time we open a dataset, we change its checksum and compression 50 * functions. Thus even individual objects vary from block to block 51 * in which checksum they use and whether they're compressed. 52 * 53 * (5) To verify that we never lose on-disk consistency after a crash, 54 * we run the entire test in a child of the main process. 55 * At random times, the child self-immolates with a SIGKILL. 56 * This is the software equivalent of pulling the power cord. 57 * The parent then runs the test again, using the existing 58 * storage pool, as many times as desired. If backwards compatibility 59 * testing is enabled ztest will sometimes run the "older" version 60 * of ztest after a SIGKILL. 61 * 62 * (6) To verify that we don't have future leaks or temporal incursions, 63 * many of the functional tests record the transaction group number 64 * as part of their data. When reading old data, they verify that 65 * the transaction group number is less than the current, open txg. 66 * If you add a new test, please do this if applicable. 67 * 68 * When run with no arguments, ztest runs for about five minutes and 69 * produces no output if successful. To get a little bit of information, 70 * specify -V. To get more information, specify -VV, and so on. 71 * 72 * To turn this into an overnight stress test, use -T to specify run time. 73 * 74 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 75 * to increase the pool capacity, fanout, and overall stress level. 76 * 77 * Use the -k option to set the desired frequency of kills. 78 * 79 * When ztest invokes itself it passes all relevant information through a 80 * temporary file which is mmap-ed in the child process. This allows shared 81 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 82 * stored at offset 0 of this file and contains information on the size and 83 * number of shared structures in the file. The information stored in this file 84 * must remain backwards compatible with older versions of ztest so that 85 * ztest can invoke them during backwards compatibility testing (-B). 86 */ 87 88 #include <sys/zfs_context.h> 89 #include <sys/spa.h> 90 #include <sys/dmu.h> 91 #include <sys/txg.h> 92 #include <sys/dbuf.h> 93 #include <sys/zap.h> 94 #include <sys/dmu_objset.h> 95 #include <sys/poll.h> 96 #include <sys/stat.h> 97 #include <sys/time.h> 98 #include <sys/wait.h> 99 #include <sys/mman.h> 100 #include <sys/resource.h> 101 #include <sys/zio.h> 102 #include <sys/zil.h> 103 #include <sys/zil_impl.h> 104 #include <sys/vdev_impl.h> 105 #include <sys/vdev_file.h> 106 #include <sys/spa_impl.h> 107 #include <sys/metaslab_impl.h> 108 #include <sys/dsl_prop.h> 109 #include <sys/dsl_dataset.h> 110 #include <sys/dsl_destroy.h> 111 #include <sys/dsl_scan.h> 112 #include <sys/zio_checksum.h> 113 #include <sys/refcount.h> 114 #include <sys/zfeature.h> 115 #include <sys/dsl_userhold.h> 116 #include <sys/abd.h> 117 #include <stdio.h> 118 #include <stdio_ext.h> 119 #include <stdlib.h> 120 #include <unistd.h> 121 #include <signal.h> 122 #include <umem.h> 123 #include <dlfcn.h> 124 #include <ctype.h> 125 #include <math.h> 126 #include <sys/fs/zfs.h> 127 #include <libnvpair.h> 128 #include <libcmdutils.h> 129 130 static int ztest_fd_data = -1; 131 static int ztest_fd_rand = -1; 132 133 typedef struct ztest_shared_hdr { 134 uint64_t zh_hdr_size; 135 uint64_t zh_opts_size; 136 uint64_t zh_size; 137 uint64_t zh_stats_size; 138 uint64_t zh_stats_count; 139 uint64_t zh_ds_size; 140 uint64_t zh_ds_count; 141 } ztest_shared_hdr_t; 142 143 static ztest_shared_hdr_t *ztest_shared_hdr; 144 145 typedef struct ztest_shared_opts { 146 char zo_pool[ZFS_MAX_DATASET_NAME_LEN]; 147 char zo_dir[ZFS_MAX_DATASET_NAME_LEN]; 148 char zo_alt_ztest[MAXNAMELEN]; 149 char zo_alt_libpath[MAXNAMELEN]; 150 uint64_t zo_vdevs; 151 uint64_t zo_vdevtime; 152 size_t zo_vdev_size; 153 int zo_ashift; 154 int zo_mirrors; 155 int zo_raidz; 156 int zo_raidz_parity; 157 int zo_datasets; 158 int zo_threads; 159 uint64_t zo_passtime; 160 uint64_t zo_killrate; 161 int zo_verbose; 162 int zo_init; 163 uint64_t zo_time; 164 uint64_t zo_maxloops; 165 uint64_t zo_metaslab_force_ganging; 166 } ztest_shared_opts_t; 167 168 static const ztest_shared_opts_t ztest_opts_defaults = { 169 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 170 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 171 .zo_alt_ztest = { '\0' }, 172 .zo_alt_libpath = { '\0' }, 173 .zo_vdevs = 5, 174 .zo_ashift = SPA_MINBLOCKSHIFT, 175 .zo_mirrors = 2, 176 .zo_raidz = 4, 177 .zo_raidz_parity = 1, 178 .zo_vdev_size = SPA_MINDEVSIZE * 4, /* 256m default size */ 179 .zo_datasets = 7, 180 .zo_threads = 23, 181 .zo_passtime = 60, /* 60 seconds */ 182 .zo_killrate = 70, /* 70% kill rate */ 183 .zo_verbose = 0, 184 .zo_init = 1, 185 .zo_time = 300, /* 5 minutes */ 186 .zo_maxloops = 50, /* max loops during spa_freeze() */ 187 .zo_metaslab_force_ganging = 32 << 10 188 }; 189 190 extern uint64_t metaslab_force_ganging; 191 extern uint64_t metaslab_df_alloc_threshold; 192 extern uint64_t zfs_deadman_synctime_ms; 193 extern int metaslab_preload_limit; 194 extern boolean_t zfs_compressed_arc_enabled; 195 extern boolean_t zfs_abd_scatter_enabled; 196 extern boolean_t zfs_force_some_double_word_sm_entries; 197 198 static ztest_shared_opts_t *ztest_shared_opts; 199 static ztest_shared_opts_t ztest_opts; 200 201 typedef struct ztest_shared_ds { 202 uint64_t zd_seq; 203 } ztest_shared_ds_t; 204 205 static ztest_shared_ds_t *ztest_shared_ds; 206 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 207 208 #define BT_MAGIC 0x123456789abcdefULL 209 #define MAXFAULTS() \ 210 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 211 212 enum ztest_io_type { 213 ZTEST_IO_WRITE_TAG, 214 ZTEST_IO_WRITE_PATTERN, 215 ZTEST_IO_WRITE_ZEROES, 216 ZTEST_IO_TRUNCATE, 217 ZTEST_IO_SETATTR, 218 ZTEST_IO_REWRITE, 219 ZTEST_IO_TYPES 220 }; 221 222 typedef struct ztest_block_tag { 223 uint64_t bt_magic; 224 uint64_t bt_objset; 225 uint64_t bt_object; 226 uint64_t bt_offset; 227 uint64_t bt_gen; 228 uint64_t bt_txg; 229 uint64_t bt_crtxg; 230 } ztest_block_tag_t; 231 232 typedef struct bufwad { 233 uint64_t bw_index; 234 uint64_t bw_txg; 235 uint64_t bw_data; 236 } bufwad_t; 237 238 /* 239 * XXX -- fix zfs range locks to be generic so we can use them here. 240 */ 241 typedef enum { 242 RL_READER, 243 RL_WRITER, 244 RL_APPEND 245 } rl_type_t; 246 247 typedef struct rll { 248 void *rll_writer; 249 int rll_readers; 250 kmutex_t rll_lock; 251 kcondvar_t rll_cv; 252 } rll_t; 253 254 typedef struct rl { 255 uint64_t rl_object; 256 uint64_t rl_offset; 257 uint64_t rl_size; 258 rll_t *rl_lock; 259 } rl_t; 260 261 #define ZTEST_RANGE_LOCKS 64 262 #define ZTEST_OBJECT_LOCKS 64 263 264 /* 265 * Object descriptor. Used as a template for object lookup/create/remove. 266 */ 267 typedef struct ztest_od { 268 uint64_t od_dir; 269 uint64_t od_object; 270 dmu_object_type_t od_type; 271 dmu_object_type_t od_crtype; 272 uint64_t od_blocksize; 273 uint64_t od_crblocksize; 274 uint64_t od_gen; 275 uint64_t od_crgen; 276 char od_name[ZFS_MAX_DATASET_NAME_LEN]; 277 } ztest_od_t; 278 279 /* 280 * Per-dataset state. 281 */ 282 typedef struct ztest_ds { 283 ztest_shared_ds_t *zd_shared; 284 objset_t *zd_os; 285 krwlock_t zd_zilog_lock; 286 zilog_t *zd_zilog; 287 ztest_od_t *zd_od; /* debugging aid */ 288 char zd_name[ZFS_MAX_DATASET_NAME_LEN]; 289 kmutex_t zd_dirobj_lock; 290 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 291 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 292 } ztest_ds_t; 293 294 /* 295 * Per-iteration state. 296 */ 297 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 298 299 typedef struct ztest_info { 300 ztest_func_t *zi_func; /* test function */ 301 uint64_t zi_iters; /* iterations per execution */ 302 uint64_t *zi_interval; /* execute every <interval> seconds */ 303 } ztest_info_t; 304 305 typedef struct ztest_shared_callstate { 306 uint64_t zc_count; /* per-pass count */ 307 uint64_t zc_time; /* per-pass time */ 308 uint64_t zc_next; /* next time to call this function */ 309 } ztest_shared_callstate_t; 310 311 static ztest_shared_callstate_t *ztest_shared_callstate; 312 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 313 314 /* 315 * Note: these aren't static because we want dladdr() to work. 316 */ 317 ztest_func_t ztest_dmu_read_write; 318 ztest_func_t ztest_dmu_write_parallel; 319 ztest_func_t ztest_dmu_object_alloc_free; 320 ztest_func_t ztest_dmu_commit_callbacks; 321 ztest_func_t ztest_zap; 322 ztest_func_t ztest_zap_parallel; 323 ztest_func_t ztest_zil_commit; 324 ztest_func_t ztest_zil_remount; 325 ztest_func_t ztest_dmu_read_write_zcopy; 326 ztest_func_t ztest_dmu_objset_create_destroy; 327 ztest_func_t ztest_dmu_prealloc; 328 ztest_func_t ztest_fzap; 329 ztest_func_t ztest_dmu_snapshot_create_destroy; 330 ztest_func_t ztest_dsl_prop_get_set; 331 ztest_func_t ztest_spa_prop_get_set; 332 ztest_func_t ztest_spa_create_destroy; 333 ztest_func_t ztest_fault_inject; 334 ztest_func_t ztest_ddt_repair; 335 ztest_func_t ztest_dmu_snapshot_hold; 336 ztest_func_t ztest_spa_rename; 337 ztest_func_t ztest_scrub; 338 ztest_func_t ztest_dsl_dataset_promote_busy; 339 ztest_func_t ztest_vdev_attach_detach; 340 ztest_func_t ztest_vdev_LUN_growth; 341 ztest_func_t ztest_vdev_add_remove; 342 ztest_func_t ztest_vdev_aux_add_remove; 343 ztest_func_t ztest_split_pool; 344 ztest_func_t ztest_reguid; 345 ztest_func_t ztest_spa_upgrade; 346 ztest_func_t ztest_device_removal; 347 ztest_func_t ztest_remap_blocks; 348 ztest_func_t ztest_spa_checkpoint_create_discard; 349 350 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 351 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 352 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 353 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 354 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 355 356 ztest_info_t ztest_info[] = { 357 { ztest_dmu_read_write, 1, &zopt_always }, 358 { ztest_dmu_write_parallel, 10, &zopt_always }, 359 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 360 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 361 { ztest_zap, 30, &zopt_always }, 362 { ztest_zap_parallel, 100, &zopt_always }, 363 { ztest_split_pool, 1, &zopt_always }, 364 { ztest_zil_commit, 1, &zopt_incessant }, 365 { ztest_zil_remount, 1, &zopt_sometimes }, 366 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 367 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 368 { ztest_dsl_prop_get_set, 1, &zopt_often }, 369 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 370 #if 0 371 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 372 #endif 373 { ztest_fzap, 1, &zopt_sometimes }, 374 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 375 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 376 { ztest_fault_inject, 1, &zopt_sometimes }, 377 { ztest_ddt_repair, 1, &zopt_sometimes }, 378 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 379 { ztest_reguid, 1, &zopt_rarely }, 380 { ztest_spa_rename, 1, &zopt_rarely }, 381 { ztest_scrub, 1, &zopt_rarely }, 382 { ztest_spa_upgrade, 1, &zopt_rarely }, 383 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 384 { ztest_vdev_attach_detach, 1, &zopt_sometimes }, 385 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 386 { ztest_vdev_add_remove, 1, 387 &ztest_opts.zo_vdevtime }, 388 { ztest_vdev_aux_add_remove, 1, 389 &ztest_opts.zo_vdevtime }, 390 { ztest_device_removal, 1, &zopt_sometimes }, 391 { ztest_remap_blocks, 1, &zopt_sometimes }, 392 { ztest_spa_checkpoint_create_discard, 1, &zopt_rarely } 393 }; 394 395 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 396 397 /* 398 * The following struct is used to hold a list of uncalled commit callbacks. 399 * The callbacks are ordered by txg number. 400 */ 401 typedef struct ztest_cb_list { 402 kmutex_t zcl_callbacks_lock; 403 list_t zcl_callbacks; 404 } ztest_cb_list_t; 405 406 /* 407 * Stuff we need to share writably between parent and child. 408 */ 409 typedef struct ztest_shared { 410 boolean_t zs_do_init; 411 hrtime_t zs_proc_start; 412 hrtime_t zs_proc_stop; 413 hrtime_t zs_thread_start; 414 hrtime_t zs_thread_stop; 415 hrtime_t zs_thread_kill; 416 uint64_t zs_enospc_count; 417 uint64_t zs_vdev_next_leaf; 418 uint64_t zs_vdev_aux; 419 uint64_t zs_alloc; 420 uint64_t zs_space; 421 uint64_t zs_splits; 422 uint64_t zs_mirrors; 423 uint64_t zs_metaslab_sz; 424 uint64_t zs_metaslab_df_alloc_threshold; 425 uint64_t zs_guid; 426 } ztest_shared_t; 427 428 #define ID_PARALLEL -1ULL 429 430 static char ztest_dev_template[] = "%s/%s.%llua"; 431 static char ztest_aux_template[] = "%s/%s.%s.%llu"; 432 ztest_shared_t *ztest_shared; 433 434 static spa_t *ztest_spa = NULL; 435 static ztest_ds_t *ztest_ds; 436 437 static kmutex_t ztest_vdev_lock; 438 static kmutex_t ztest_checkpoint_lock; 439 440 /* 441 * The ztest_name_lock protects the pool and dataset namespace used by 442 * the individual tests. To modify the namespace, consumers must grab 443 * this lock as writer. Grabbing the lock as reader will ensure that the 444 * namespace does not change while the lock is held. 445 */ 446 static krwlock_t ztest_name_lock; 447 448 static boolean_t ztest_dump_core = B_TRUE; 449 static boolean_t ztest_exiting; 450 451 /* Global commit callback list */ 452 static ztest_cb_list_t zcl; 453 454 enum ztest_object { 455 ZTEST_META_DNODE = 0, 456 ZTEST_DIROBJ, 457 ZTEST_OBJECTS 458 }; 459 460 static void usage(boolean_t) __NORETURN; 461 462 /* 463 * These libumem hooks provide a reasonable set of defaults for the allocator's 464 * debugging facilities. 465 */ 466 const char * 467 _umem_debug_init() 468 { 469 return ("default,verbose"); /* $UMEM_DEBUG setting */ 470 } 471 472 const char * 473 _umem_logging_init(void) 474 { 475 return ("fail,contents"); /* $UMEM_LOGGING setting */ 476 } 477 478 #define FATAL_MSG_SZ 1024 479 480 char *fatal_msg; 481 482 static void 483 fatal(int do_perror, char *message, ...) 484 { 485 va_list args; 486 int save_errno = errno; 487 char buf[FATAL_MSG_SZ]; 488 489 (void) fflush(stdout); 490 491 va_start(args, message); 492 (void) sprintf(buf, "ztest: "); 493 /* LINTED */ 494 (void) vsprintf(buf + strlen(buf), message, args); 495 va_end(args); 496 if (do_perror) { 497 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 498 ": %s", strerror(save_errno)); 499 } 500 (void) fprintf(stderr, "%s\n", buf); 501 fatal_msg = buf; /* to ease debugging */ 502 if (ztest_dump_core) 503 abort(); 504 exit(3); 505 } 506 507 static int 508 str2shift(const char *buf) 509 { 510 const char *ends = "BKMGTPEZ"; 511 int i; 512 513 if (buf[0] == '\0') 514 return (0); 515 for (i = 0; i < strlen(ends); i++) { 516 if (toupper(buf[0]) == ends[i]) 517 break; 518 } 519 if (i == strlen(ends)) { 520 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 521 buf); 522 usage(B_FALSE); 523 } 524 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 525 return (10*i); 526 } 527 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 528 usage(B_FALSE); 529 /* NOTREACHED */ 530 } 531 532 static uint64_t 533 nicenumtoull(const char *buf) 534 { 535 char *end; 536 uint64_t val; 537 538 val = strtoull(buf, &end, 0); 539 if (end == buf) { 540 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 541 usage(B_FALSE); 542 } else if (end[0] == '.') { 543 double fval = strtod(buf, &end); 544 fval *= pow(2, str2shift(end)); 545 if (fval > UINT64_MAX) { 546 (void) fprintf(stderr, "ztest: value too large: %s\n", 547 buf); 548 usage(B_FALSE); 549 } 550 val = (uint64_t)fval; 551 } else { 552 int shift = str2shift(end); 553 if (shift >= 64 || (val << shift) >> shift != val) { 554 (void) fprintf(stderr, "ztest: value too large: %s\n", 555 buf); 556 usage(B_FALSE); 557 } 558 val <<= shift; 559 } 560 return (val); 561 } 562 563 static void 564 usage(boolean_t requested) 565 { 566 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 567 568 char nice_vdev_size[NN_NUMBUF_SZ]; 569 char nice_force_ganging[NN_NUMBUF_SZ]; 570 FILE *fp = requested ? stdout : stderr; 571 572 nicenum(zo->zo_vdev_size, nice_vdev_size, sizeof (nice_vdev_size)); 573 nicenum(zo->zo_metaslab_force_ganging, nice_force_ganging, 574 sizeof (nice_force_ganging)); 575 576 (void) fprintf(fp, "Usage: %s\n" 577 "\t[-v vdevs (default: %llu)]\n" 578 "\t[-s size_of_each_vdev (default: %s)]\n" 579 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 580 "\t[-m mirror_copies (default: %d)]\n" 581 "\t[-r raidz_disks (default: %d)]\n" 582 "\t[-R raidz_parity (default: %d)]\n" 583 "\t[-d datasets (default: %d)]\n" 584 "\t[-t threads (default: %d)]\n" 585 "\t[-g gang_block_threshold (default: %s)]\n" 586 "\t[-i init_count (default: %d)] initialize pool i times\n" 587 "\t[-k kill_percentage (default: %llu%%)]\n" 588 "\t[-p pool_name (default: %s)]\n" 589 "\t[-f dir (default: %s)] file directory for vdev files\n" 590 "\t[-V] verbose (use multiple times for ever more blather)\n" 591 "\t[-E] use existing pool instead of creating new one\n" 592 "\t[-T time (default: %llu sec)] total run time\n" 593 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 594 "\t[-P passtime (default: %llu sec)] time per pass\n" 595 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 596 "\t[-o variable=value] ... set global variable to an unsigned\n" 597 "\t 32-bit integer value\n" 598 "\t[-h] (print help)\n" 599 "", 600 zo->zo_pool, 601 (u_longlong_t)zo->zo_vdevs, /* -v */ 602 nice_vdev_size, /* -s */ 603 zo->zo_ashift, /* -a */ 604 zo->zo_mirrors, /* -m */ 605 zo->zo_raidz, /* -r */ 606 zo->zo_raidz_parity, /* -R */ 607 zo->zo_datasets, /* -d */ 608 zo->zo_threads, /* -t */ 609 nice_force_ganging, /* -g */ 610 zo->zo_init, /* -i */ 611 (u_longlong_t)zo->zo_killrate, /* -k */ 612 zo->zo_pool, /* -p */ 613 zo->zo_dir, /* -f */ 614 (u_longlong_t)zo->zo_time, /* -T */ 615 (u_longlong_t)zo->zo_maxloops, /* -F */ 616 (u_longlong_t)zo->zo_passtime); 617 exit(requested ? 0 : 1); 618 } 619 620 static void 621 process_options(int argc, char **argv) 622 { 623 char *path; 624 ztest_shared_opts_t *zo = &ztest_opts; 625 626 int opt; 627 uint64_t value; 628 char altdir[MAXNAMELEN] = { 0 }; 629 630 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 631 632 while ((opt = getopt(argc, argv, 633 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:o:")) != EOF) { 634 value = 0; 635 switch (opt) { 636 case 'v': 637 case 's': 638 case 'a': 639 case 'm': 640 case 'r': 641 case 'R': 642 case 'd': 643 case 't': 644 case 'g': 645 case 'i': 646 case 'k': 647 case 'T': 648 case 'P': 649 case 'F': 650 value = nicenumtoull(optarg); 651 } 652 switch (opt) { 653 case 'v': 654 zo->zo_vdevs = value; 655 break; 656 case 's': 657 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 658 break; 659 case 'a': 660 zo->zo_ashift = value; 661 break; 662 case 'm': 663 zo->zo_mirrors = value; 664 break; 665 case 'r': 666 zo->zo_raidz = MAX(1, value); 667 break; 668 case 'R': 669 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 670 break; 671 case 'd': 672 zo->zo_datasets = MAX(1, value); 673 break; 674 case 't': 675 zo->zo_threads = MAX(1, value); 676 break; 677 case 'g': 678 zo->zo_metaslab_force_ganging = 679 MAX(SPA_MINBLOCKSIZE << 1, value); 680 break; 681 case 'i': 682 zo->zo_init = value; 683 break; 684 case 'k': 685 zo->zo_killrate = value; 686 break; 687 case 'p': 688 (void) strlcpy(zo->zo_pool, optarg, 689 sizeof (zo->zo_pool)); 690 break; 691 case 'f': 692 path = realpath(optarg, NULL); 693 if (path == NULL) { 694 (void) fprintf(stderr, "error: %s: %s\n", 695 optarg, strerror(errno)); 696 usage(B_FALSE); 697 } else { 698 (void) strlcpy(zo->zo_dir, path, 699 sizeof (zo->zo_dir)); 700 } 701 break; 702 case 'V': 703 zo->zo_verbose++; 704 break; 705 case 'E': 706 zo->zo_init = 0; 707 break; 708 case 'T': 709 zo->zo_time = value; 710 break; 711 case 'P': 712 zo->zo_passtime = MAX(1, value); 713 break; 714 case 'F': 715 zo->zo_maxloops = MAX(1, value); 716 break; 717 case 'B': 718 (void) strlcpy(altdir, optarg, sizeof (altdir)); 719 break; 720 case 'o': 721 if (set_global_var(optarg) != 0) 722 usage(B_FALSE); 723 break; 724 case 'h': 725 usage(B_TRUE); 726 break; 727 case '?': 728 default: 729 usage(B_FALSE); 730 break; 731 } 732 } 733 734 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 735 736 zo->zo_vdevtime = 737 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 738 UINT64_MAX >> 2); 739 740 if (strlen(altdir) > 0) { 741 char *cmd; 742 char *realaltdir; 743 char *bin; 744 char *ztest; 745 char *isa; 746 int isalen; 747 748 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 749 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 750 751 VERIFY(NULL != realpath(getexecname(), cmd)); 752 if (0 != access(altdir, F_OK)) { 753 ztest_dump_core = B_FALSE; 754 fatal(B_TRUE, "invalid alternate ztest path: %s", 755 altdir); 756 } 757 VERIFY(NULL != realpath(altdir, realaltdir)); 758 759 /* 760 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 761 * We want to extract <isa> to determine if we should use 762 * 32 or 64 bit binaries. 763 */ 764 bin = strstr(cmd, "/usr/bin/"); 765 ztest = strstr(bin, "/ztest"); 766 isa = bin + 9; 767 isalen = ztest - isa; 768 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 769 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 770 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 771 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 772 773 if (0 != access(zo->zo_alt_ztest, X_OK)) { 774 ztest_dump_core = B_FALSE; 775 fatal(B_TRUE, "invalid alternate ztest: %s", 776 zo->zo_alt_ztest); 777 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 778 ztest_dump_core = B_FALSE; 779 fatal(B_TRUE, "invalid alternate lib directory %s", 780 zo->zo_alt_libpath); 781 } 782 783 umem_free(cmd, MAXPATHLEN); 784 umem_free(realaltdir, MAXPATHLEN); 785 } 786 } 787 788 static void 789 ztest_kill(ztest_shared_t *zs) 790 { 791 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 792 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 793 794 /* 795 * Before we kill off ztest, make sure that the config is updated. 796 * See comment above spa_write_cachefile(). 797 */ 798 mutex_enter(&spa_namespace_lock); 799 spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE); 800 mutex_exit(&spa_namespace_lock); 801 802 zfs_dbgmsg_print(FTAG); 803 (void) kill(getpid(), SIGKILL); 804 } 805 806 static uint64_t 807 ztest_random(uint64_t range) 808 { 809 uint64_t r; 810 811 ASSERT3S(ztest_fd_rand, >=, 0); 812 813 if (range == 0) 814 return (0); 815 816 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r)) 817 fatal(1, "short read from /dev/urandom"); 818 819 return (r % range); 820 } 821 822 /* ARGSUSED */ 823 static void 824 ztest_record_enospc(const char *s) 825 { 826 ztest_shared->zs_enospc_count++; 827 } 828 829 static uint64_t 830 ztest_get_ashift(void) 831 { 832 if (ztest_opts.zo_ashift == 0) 833 return (SPA_MINBLOCKSHIFT + ztest_random(5)); 834 return (ztest_opts.zo_ashift); 835 } 836 837 static nvlist_t * 838 make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift) 839 { 840 char pathbuf[MAXPATHLEN]; 841 uint64_t vdev; 842 nvlist_t *file; 843 844 if (ashift == 0) 845 ashift = ztest_get_ashift(); 846 847 if (path == NULL) { 848 path = pathbuf; 849 850 if (aux != NULL) { 851 vdev = ztest_shared->zs_vdev_aux; 852 (void) snprintf(path, sizeof (pathbuf), 853 ztest_aux_template, ztest_opts.zo_dir, 854 pool == NULL ? ztest_opts.zo_pool : pool, 855 aux, vdev); 856 } else { 857 vdev = ztest_shared->zs_vdev_next_leaf++; 858 (void) snprintf(path, sizeof (pathbuf), 859 ztest_dev_template, ztest_opts.zo_dir, 860 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 861 } 862 } 863 864 if (size != 0) { 865 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 866 if (fd == -1) 867 fatal(1, "can't open %s", path); 868 if (ftruncate(fd, size) != 0) 869 fatal(1, "can't ftruncate %s", path); 870 (void) close(fd); 871 } 872 873 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 874 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 875 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 876 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 877 878 return (file); 879 } 880 881 static nvlist_t * 882 make_vdev_raidz(char *path, char *aux, char *pool, size_t size, 883 uint64_t ashift, int r) 884 { 885 nvlist_t *raidz, **child; 886 int c; 887 888 if (r < 2) 889 return (make_vdev_file(path, aux, pool, size, ashift)); 890 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 891 892 for (c = 0; c < r; c++) 893 child[c] = make_vdev_file(path, aux, pool, size, ashift); 894 895 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 896 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 897 VDEV_TYPE_RAIDZ) == 0); 898 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 899 ztest_opts.zo_raidz_parity) == 0); 900 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 901 child, r) == 0); 902 903 for (c = 0; c < r; c++) 904 nvlist_free(child[c]); 905 906 umem_free(child, r * sizeof (nvlist_t *)); 907 908 return (raidz); 909 } 910 911 static nvlist_t * 912 make_vdev_mirror(char *path, char *aux, char *pool, size_t size, 913 uint64_t ashift, int r, int m) 914 { 915 nvlist_t *mirror, **child; 916 int c; 917 918 if (m < 1) 919 return (make_vdev_raidz(path, aux, pool, size, ashift, r)); 920 921 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 922 923 for (c = 0; c < m; c++) 924 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r); 925 926 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 927 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 928 VDEV_TYPE_MIRROR) == 0); 929 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 930 child, m) == 0); 931 932 for (c = 0; c < m; c++) 933 nvlist_free(child[c]); 934 935 umem_free(child, m * sizeof (nvlist_t *)); 936 937 return (mirror); 938 } 939 940 static nvlist_t * 941 make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift, 942 int log, int r, int m, int t) 943 { 944 nvlist_t *root, **child; 945 int c; 946 947 ASSERT(t > 0); 948 949 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 950 951 for (c = 0; c < t; c++) { 952 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 953 r, m); 954 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 955 log) == 0); 956 } 957 958 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 959 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 960 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 961 child, t) == 0); 962 963 for (c = 0; c < t; c++) 964 nvlist_free(child[c]); 965 966 umem_free(child, t * sizeof (nvlist_t *)); 967 968 return (root); 969 } 970 971 /* 972 * Find a random spa version. Returns back a random spa version in the 973 * range [initial_version, SPA_VERSION_FEATURES]. 974 */ 975 static uint64_t 976 ztest_random_spa_version(uint64_t initial_version) 977 { 978 uint64_t version = initial_version; 979 980 if (version <= SPA_VERSION_BEFORE_FEATURES) { 981 version = version + 982 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 983 } 984 985 if (version > SPA_VERSION_BEFORE_FEATURES) 986 version = SPA_VERSION_FEATURES; 987 988 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 989 return (version); 990 } 991 992 static int 993 ztest_random_blocksize(void) 994 { 995 uint64_t block_shift; 996 /* 997 * Choose a block size >= the ashift. 998 * If the SPA supports new MAXBLOCKSIZE, test up to 1MB blocks. 999 */ 1000 int maxbs = SPA_OLD_MAXBLOCKSHIFT; 1001 if (spa_maxblocksize(ztest_spa) == SPA_MAXBLOCKSIZE) 1002 maxbs = 20; 1003 block_shift = ztest_random(maxbs - ztest_spa->spa_max_ashift + 1); 1004 return (1 << (SPA_MINBLOCKSHIFT + block_shift)); 1005 } 1006 1007 static int 1008 ztest_random_ibshift(void) 1009 { 1010 return (DN_MIN_INDBLKSHIFT + 1011 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 1012 } 1013 1014 static uint64_t 1015 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 1016 { 1017 uint64_t top; 1018 vdev_t *rvd = spa->spa_root_vdev; 1019 vdev_t *tvd; 1020 1021 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1022 1023 do { 1024 top = ztest_random(rvd->vdev_children); 1025 tvd = rvd->vdev_child[top]; 1026 } while (!vdev_is_concrete(tvd) || (tvd->vdev_islog && !log_ok) || 1027 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 1028 1029 return (top); 1030 } 1031 1032 static uint64_t 1033 ztest_random_dsl_prop(zfs_prop_t prop) 1034 { 1035 uint64_t value; 1036 1037 do { 1038 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 1039 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 1040 1041 return (value); 1042 } 1043 1044 static int 1045 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 1046 boolean_t inherit) 1047 { 1048 const char *propname = zfs_prop_to_name(prop); 1049 const char *valname; 1050 char setpoint[MAXPATHLEN]; 1051 uint64_t curval; 1052 int error; 1053 1054 error = dsl_prop_set_int(osname, propname, 1055 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value); 1056 1057 if (error == ENOSPC) { 1058 ztest_record_enospc(FTAG); 1059 return (error); 1060 } 1061 ASSERT0(error); 1062 1063 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint)); 1064 1065 if (ztest_opts.zo_verbose >= 6) { 1066 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 1067 (void) printf("%s %s = %s at '%s'\n", 1068 osname, propname, valname, setpoint); 1069 } 1070 1071 return (error); 1072 } 1073 1074 static int 1075 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1076 { 1077 spa_t *spa = ztest_spa; 1078 nvlist_t *props = NULL; 1079 int error; 1080 1081 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 1082 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 1083 1084 error = spa_prop_set(spa, props); 1085 1086 nvlist_free(props); 1087 1088 if (error == ENOSPC) { 1089 ztest_record_enospc(FTAG); 1090 return (error); 1091 } 1092 ASSERT0(error); 1093 1094 return (error); 1095 } 1096 1097 static void 1098 ztest_rll_init(rll_t *rll) 1099 { 1100 rll->rll_writer = NULL; 1101 rll->rll_readers = 0; 1102 mutex_init(&rll->rll_lock, NULL, USYNC_THREAD, NULL); 1103 cv_init(&rll->rll_cv, NULL, USYNC_THREAD, NULL); 1104 } 1105 1106 static void 1107 ztest_rll_destroy(rll_t *rll) 1108 { 1109 ASSERT(rll->rll_writer == NULL); 1110 ASSERT(rll->rll_readers == 0); 1111 mutex_destroy(&rll->rll_lock); 1112 cv_destroy(&rll->rll_cv); 1113 } 1114 1115 static void 1116 ztest_rll_lock(rll_t *rll, rl_type_t type) 1117 { 1118 mutex_enter(&rll->rll_lock); 1119 1120 if (type == RL_READER) { 1121 while (rll->rll_writer != NULL) 1122 cv_wait(&rll->rll_cv, &rll->rll_lock); 1123 rll->rll_readers++; 1124 } else { 1125 while (rll->rll_writer != NULL || rll->rll_readers) 1126 cv_wait(&rll->rll_cv, &rll->rll_lock); 1127 rll->rll_writer = curthread; 1128 } 1129 1130 mutex_exit(&rll->rll_lock); 1131 } 1132 1133 static void 1134 ztest_rll_unlock(rll_t *rll) 1135 { 1136 mutex_enter(&rll->rll_lock); 1137 1138 if (rll->rll_writer) { 1139 ASSERT(rll->rll_readers == 0); 1140 rll->rll_writer = NULL; 1141 } else { 1142 ASSERT(rll->rll_readers != 0); 1143 ASSERT(rll->rll_writer == NULL); 1144 rll->rll_readers--; 1145 } 1146 1147 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1148 cv_broadcast(&rll->rll_cv); 1149 1150 mutex_exit(&rll->rll_lock); 1151 } 1152 1153 static void 1154 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1155 { 1156 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1157 1158 ztest_rll_lock(rll, type); 1159 } 1160 1161 static void 1162 ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1163 { 1164 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1165 1166 ztest_rll_unlock(rll); 1167 } 1168 1169 static rl_t * 1170 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1171 uint64_t size, rl_type_t type) 1172 { 1173 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1174 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1175 rl_t *rl; 1176 1177 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1178 rl->rl_object = object; 1179 rl->rl_offset = offset; 1180 rl->rl_size = size; 1181 rl->rl_lock = rll; 1182 1183 ztest_rll_lock(rll, type); 1184 1185 return (rl); 1186 } 1187 1188 static void 1189 ztest_range_unlock(rl_t *rl) 1190 { 1191 rll_t *rll = rl->rl_lock; 1192 1193 ztest_rll_unlock(rll); 1194 1195 umem_free(rl, sizeof (*rl)); 1196 } 1197 1198 static void 1199 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1200 { 1201 zd->zd_os = os; 1202 zd->zd_zilog = dmu_objset_zil(os); 1203 zd->zd_shared = szd; 1204 dmu_objset_name(os, zd->zd_name); 1205 1206 if (zd->zd_shared != NULL) 1207 zd->zd_shared->zd_seq = 0; 1208 1209 rw_init(&zd->zd_zilog_lock, NULL, USYNC_THREAD, NULL); 1210 mutex_init(&zd->zd_dirobj_lock, NULL, USYNC_THREAD, NULL); 1211 1212 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1213 ztest_rll_init(&zd->zd_object_lock[l]); 1214 1215 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1216 ztest_rll_init(&zd->zd_range_lock[l]); 1217 } 1218 1219 static void 1220 ztest_zd_fini(ztest_ds_t *zd) 1221 { 1222 mutex_destroy(&zd->zd_dirobj_lock); 1223 1224 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1225 ztest_rll_destroy(&zd->zd_object_lock[l]); 1226 1227 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1228 ztest_rll_destroy(&zd->zd_range_lock[l]); 1229 } 1230 1231 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1232 1233 static uint64_t 1234 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1235 { 1236 uint64_t txg; 1237 int error; 1238 1239 /* 1240 * Attempt to assign tx to some transaction group. 1241 */ 1242 error = dmu_tx_assign(tx, txg_how); 1243 if (error) { 1244 if (error == ERESTART) { 1245 ASSERT(txg_how == TXG_NOWAIT); 1246 dmu_tx_wait(tx); 1247 } else { 1248 ASSERT3U(error, ==, ENOSPC); 1249 ztest_record_enospc(tag); 1250 } 1251 dmu_tx_abort(tx); 1252 return (0); 1253 } 1254 txg = dmu_tx_get_txg(tx); 1255 ASSERT(txg != 0); 1256 return (txg); 1257 } 1258 1259 static void 1260 ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1261 { 1262 uint64_t *ip = buf; 1263 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1264 1265 while (ip < ip_end) 1266 *ip++ = value; 1267 } 1268 1269 static boolean_t 1270 ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1271 { 1272 uint64_t *ip = buf; 1273 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1274 uint64_t diff = 0; 1275 1276 while (ip < ip_end) 1277 diff |= (value - *ip++); 1278 1279 return (diff == 0); 1280 } 1281 1282 static void 1283 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1284 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1285 { 1286 bt->bt_magic = BT_MAGIC; 1287 bt->bt_objset = dmu_objset_id(os); 1288 bt->bt_object = object; 1289 bt->bt_offset = offset; 1290 bt->bt_gen = gen; 1291 bt->bt_txg = txg; 1292 bt->bt_crtxg = crtxg; 1293 } 1294 1295 static void 1296 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1297 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1298 { 1299 ASSERT3U(bt->bt_magic, ==, BT_MAGIC); 1300 ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os)); 1301 ASSERT3U(bt->bt_object, ==, object); 1302 ASSERT3U(bt->bt_offset, ==, offset); 1303 ASSERT3U(bt->bt_gen, <=, gen); 1304 ASSERT3U(bt->bt_txg, <=, txg); 1305 ASSERT3U(bt->bt_crtxg, ==, crtxg); 1306 } 1307 1308 static ztest_block_tag_t * 1309 ztest_bt_bonus(dmu_buf_t *db) 1310 { 1311 dmu_object_info_t doi; 1312 ztest_block_tag_t *bt; 1313 1314 dmu_object_info_from_db(db, &doi); 1315 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1316 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1317 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1318 1319 return (bt); 1320 } 1321 1322 /* 1323 * ZIL logging ops 1324 */ 1325 1326 #define lrz_type lr_mode 1327 #define lrz_blocksize lr_uid 1328 #define lrz_ibshift lr_gid 1329 #define lrz_bonustype lr_rdev 1330 #define lrz_bonuslen lr_crtime[1] 1331 1332 static void 1333 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1334 { 1335 char *name = (void *)(lr + 1); /* name follows lr */ 1336 size_t namesize = strlen(name) + 1; 1337 itx_t *itx; 1338 1339 if (zil_replaying(zd->zd_zilog, tx)) 1340 return; 1341 1342 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1343 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1344 sizeof (*lr) + namesize - sizeof (lr_t)); 1345 1346 zil_itx_assign(zd->zd_zilog, itx, tx); 1347 } 1348 1349 static void 1350 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1351 { 1352 char *name = (void *)(lr + 1); /* name follows lr */ 1353 size_t namesize = strlen(name) + 1; 1354 itx_t *itx; 1355 1356 if (zil_replaying(zd->zd_zilog, tx)) 1357 return; 1358 1359 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1360 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1361 sizeof (*lr) + namesize - sizeof (lr_t)); 1362 1363 itx->itx_oid = object; 1364 zil_itx_assign(zd->zd_zilog, itx, tx); 1365 } 1366 1367 static void 1368 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1369 { 1370 itx_t *itx; 1371 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1372 1373 if (zil_replaying(zd->zd_zilog, tx)) 1374 return; 1375 1376 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1377 write_state = WR_INDIRECT; 1378 1379 itx = zil_itx_create(TX_WRITE, 1380 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1381 1382 if (write_state == WR_COPIED && 1383 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1384 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1385 zil_itx_destroy(itx); 1386 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1387 write_state = WR_NEED_COPY; 1388 } 1389 itx->itx_private = zd; 1390 itx->itx_wr_state = write_state; 1391 itx->itx_sync = (ztest_random(8) == 0); 1392 1393 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1394 sizeof (*lr) - sizeof (lr_t)); 1395 1396 zil_itx_assign(zd->zd_zilog, itx, tx); 1397 } 1398 1399 static void 1400 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1401 { 1402 itx_t *itx; 1403 1404 if (zil_replaying(zd->zd_zilog, tx)) 1405 return; 1406 1407 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1408 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1409 sizeof (*lr) - sizeof (lr_t)); 1410 1411 itx->itx_sync = B_FALSE; 1412 zil_itx_assign(zd->zd_zilog, itx, tx); 1413 } 1414 1415 static void 1416 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1417 { 1418 itx_t *itx; 1419 1420 if (zil_replaying(zd->zd_zilog, tx)) 1421 return; 1422 1423 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1424 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1425 sizeof (*lr) - sizeof (lr_t)); 1426 1427 itx->itx_sync = B_FALSE; 1428 zil_itx_assign(zd->zd_zilog, itx, tx); 1429 } 1430 1431 /* 1432 * ZIL replay ops 1433 */ 1434 static int 1435 ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap) 1436 { 1437 ztest_ds_t *zd = arg1; 1438 lr_create_t *lr = arg2; 1439 char *name = (void *)(lr + 1); /* name follows lr */ 1440 objset_t *os = zd->zd_os; 1441 ztest_block_tag_t *bbt; 1442 dmu_buf_t *db; 1443 dmu_tx_t *tx; 1444 uint64_t txg; 1445 int error = 0; 1446 1447 if (byteswap) 1448 byteswap_uint64_array(lr, sizeof (*lr)); 1449 1450 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1451 ASSERT(name[0] != '\0'); 1452 1453 tx = dmu_tx_create(os); 1454 1455 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1456 1457 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1458 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1459 } else { 1460 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1461 } 1462 1463 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1464 if (txg == 0) 1465 return (ENOSPC); 1466 1467 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1468 1469 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1470 if (lr->lr_foid == 0) { 1471 lr->lr_foid = zap_create(os, 1472 lr->lrz_type, lr->lrz_bonustype, 1473 lr->lrz_bonuslen, tx); 1474 } else { 1475 error = zap_create_claim(os, lr->lr_foid, 1476 lr->lrz_type, lr->lrz_bonustype, 1477 lr->lrz_bonuslen, tx); 1478 } 1479 } else { 1480 if (lr->lr_foid == 0) { 1481 lr->lr_foid = dmu_object_alloc(os, 1482 lr->lrz_type, 0, lr->lrz_bonustype, 1483 lr->lrz_bonuslen, tx); 1484 } else { 1485 error = dmu_object_claim(os, lr->lr_foid, 1486 lr->lrz_type, 0, lr->lrz_bonustype, 1487 lr->lrz_bonuslen, tx); 1488 } 1489 } 1490 1491 if (error) { 1492 ASSERT3U(error, ==, EEXIST); 1493 ASSERT(zd->zd_zilog->zl_replay); 1494 dmu_tx_commit(tx); 1495 return (error); 1496 } 1497 1498 ASSERT(lr->lr_foid != 0); 1499 1500 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1501 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1502 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1503 1504 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1505 bbt = ztest_bt_bonus(db); 1506 dmu_buf_will_dirty(db, tx); 1507 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1508 dmu_buf_rele(db, FTAG); 1509 1510 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1511 &lr->lr_foid, tx)); 1512 1513 (void) ztest_log_create(zd, tx, lr); 1514 1515 dmu_tx_commit(tx); 1516 1517 return (0); 1518 } 1519 1520 static int 1521 ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap) 1522 { 1523 ztest_ds_t *zd = arg1; 1524 lr_remove_t *lr = arg2; 1525 char *name = (void *)(lr + 1); /* name follows lr */ 1526 objset_t *os = zd->zd_os; 1527 dmu_object_info_t doi; 1528 dmu_tx_t *tx; 1529 uint64_t object, txg; 1530 1531 if (byteswap) 1532 byteswap_uint64_array(lr, sizeof (*lr)); 1533 1534 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1535 ASSERT(name[0] != '\0'); 1536 1537 VERIFY3U(0, ==, 1538 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1539 ASSERT(object != 0); 1540 1541 ztest_object_lock(zd, object, RL_WRITER); 1542 1543 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1544 1545 tx = dmu_tx_create(os); 1546 1547 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1548 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1549 1550 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1551 if (txg == 0) { 1552 ztest_object_unlock(zd, object); 1553 return (ENOSPC); 1554 } 1555 1556 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1557 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1558 } else { 1559 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1560 } 1561 1562 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1563 1564 (void) ztest_log_remove(zd, tx, lr, object); 1565 1566 dmu_tx_commit(tx); 1567 1568 ztest_object_unlock(zd, object); 1569 1570 return (0); 1571 } 1572 1573 static int 1574 ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap) 1575 { 1576 ztest_ds_t *zd = arg1; 1577 lr_write_t *lr = arg2; 1578 objset_t *os = zd->zd_os; 1579 void *data = lr + 1; /* data follows lr */ 1580 uint64_t offset, length; 1581 ztest_block_tag_t *bt = data; 1582 ztest_block_tag_t *bbt; 1583 uint64_t gen, txg, lrtxg, crtxg; 1584 dmu_object_info_t doi; 1585 dmu_tx_t *tx; 1586 dmu_buf_t *db; 1587 arc_buf_t *abuf = NULL; 1588 rl_t *rl; 1589 1590 if (byteswap) 1591 byteswap_uint64_array(lr, sizeof (*lr)); 1592 1593 offset = lr->lr_offset; 1594 length = lr->lr_length; 1595 1596 /* If it's a dmu_sync() block, write the whole block */ 1597 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1598 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1599 if (length < blocksize) { 1600 offset -= offset % blocksize; 1601 length = blocksize; 1602 } 1603 } 1604 1605 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1606 byteswap_uint64_array(bt, sizeof (*bt)); 1607 1608 if (bt->bt_magic != BT_MAGIC) 1609 bt = NULL; 1610 1611 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1612 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1613 1614 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1615 1616 dmu_object_info_from_db(db, &doi); 1617 1618 bbt = ztest_bt_bonus(db); 1619 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1620 gen = bbt->bt_gen; 1621 crtxg = bbt->bt_crtxg; 1622 lrtxg = lr->lr_common.lrc_txg; 1623 1624 tx = dmu_tx_create(os); 1625 1626 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1627 1628 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1629 P2PHASE(offset, length) == 0) 1630 abuf = dmu_request_arcbuf(db, length); 1631 1632 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1633 if (txg == 0) { 1634 if (abuf != NULL) 1635 dmu_return_arcbuf(abuf); 1636 dmu_buf_rele(db, FTAG); 1637 ztest_range_unlock(rl); 1638 ztest_object_unlock(zd, lr->lr_foid); 1639 return (ENOSPC); 1640 } 1641 1642 if (bt != NULL) { 1643 /* 1644 * Usually, verify the old data before writing new data -- 1645 * but not always, because we also want to verify correct 1646 * behavior when the data was not recently read into cache. 1647 */ 1648 ASSERT(offset % doi.doi_data_block_size == 0); 1649 if (ztest_random(4) != 0) { 1650 int prefetch = ztest_random(2) ? 1651 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1652 ztest_block_tag_t rbt; 1653 1654 VERIFY(dmu_read(os, lr->lr_foid, offset, 1655 sizeof (rbt), &rbt, prefetch) == 0); 1656 if (rbt.bt_magic == BT_MAGIC) { 1657 ztest_bt_verify(&rbt, os, lr->lr_foid, 1658 offset, gen, txg, crtxg); 1659 } 1660 } 1661 1662 /* 1663 * Writes can appear to be newer than the bonus buffer because 1664 * the ztest_get_data() callback does a dmu_read() of the 1665 * open-context data, which may be different than the data 1666 * as it was when the write was generated. 1667 */ 1668 if (zd->zd_zilog->zl_replay) { 1669 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1670 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1671 bt->bt_crtxg); 1672 } 1673 1674 /* 1675 * Set the bt's gen/txg to the bonus buffer's gen/txg 1676 * so that all of the usual ASSERTs will work. 1677 */ 1678 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1679 } 1680 1681 if (abuf == NULL) { 1682 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1683 } else { 1684 bcopy(data, abuf->b_data, length); 1685 dmu_assign_arcbuf(db, offset, abuf, tx); 1686 } 1687 1688 (void) ztest_log_write(zd, tx, lr); 1689 1690 dmu_buf_rele(db, FTAG); 1691 1692 dmu_tx_commit(tx); 1693 1694 ztest_range_unlock(rl); 1695 ztest_object_unlock(zd, lr->lr_foid); 1696 1697 return (0); 1698 } 1699 1700 static int 1701 ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap) 1702 { 1703 ztest_ds_t *zd = arg1; 1704 lr_truncate_t *lr = arg2; 1705 objset_t *os = zd->zd_os; 1706 dmu_tx_t *tx; 1707 uint64_t txg; 1708 rl_t *rl; 1709 1710 if (byteswap) 1711 byteswap_uint64_array(lr, sizeof (*lr)); 1712 1713 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1714 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1715 RL_WRITER); 1716 1717 tx = dmu_tx_create(os); 1718 1719 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1720 1721 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1722 if (txg == 0) { 1723 ztest_range_unlock(rl); 1724 ztest_object_unlock(zd, lr->lr_foid); 1725 return (ENOSPC); 1726 } 1727 1728 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1729 lr->lr_length, tx) == 0); 1730 1731 (void) ztest_log_truncate(zd, tx, lr); 1732 1733 dmu_tx_commit(tx); 1734 1735 ztest_range_unlock(rl); 1736 ztest_object_unlock(zd, lr->lr_foid); 1737 1738 return (0); 1739 } 1740 1741 static int 1742 ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap) 1743 { 1744 ztest_ds_t *zd = arg1; 1745 lr_setattr_t *lr = arg2; 1746 objset_t *os = zd->zd_os; 1747 dmu_tx_t *tx; 1748 dmu_buf_t *db; 1749 ztest_block_tag_t *bbt; 1750 uint64_t txg, lrtxg, crtxg; 1751 1752 if (byteswap) 1753 byteswap_uint64_array(lr, sizeof (*lr)); 1754 1755 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1756 1757 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1758 1759 tx = dmu_tx_create(os); 1760 dmu_tx_hold_bonus(tx, lr->lr_foid); 1761 1762 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1763 if (txg == 0) { 1764 dmu_buf_rele(db, FTAG); 1765 ztest_object_unlock(zd, lr->lr_foid); 1766 return (ENOSPC); 1767 } 1768 1769 bbt = ztest_bt_bonus(db); 1770 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1771 crtxg = bbt->bt_crtxg; 1772 lrtxg = lr->lr_common.lrc_txg; 1773 1774 if (zd->zd_zilog->zl_replay) { 1775 ASSERT(lr->lr_size != 0); 1776 ASSERT(lr->lr_mode != 0); 1777 ASSERT(lrtxg != 0); 1778 } else { 1779 /* 1780 * Randomly change the size and increment the generation. 1781 */ 1782 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1783 sizeof (*bbt); 1784 lr->lr_mode = bbt->bt_gen + 1; 1785 ASSERT(lrtxg == 0); 1786 } 1787 1788 /* 1789 * Verify that the current bonus buffer is not newer than our txg. 1790 */ 1791 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1792 MAX(txg, lrtxg), crtxg); 1793 1794 dmu_buf_will_dirty(db, tx); 1795 1796 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1797 ASSERT3U(lr->lr_size, <=, db->db_size); 1798 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 1799 bbt = ztest_bt_bonus(db); 1800 1801 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1802 1803 dmu_buf_rele(db, FTAG); 1804 1805 (void) ztest_log_setattr(zd, tx, lr); 1806 1807 dmu_tx_commit(tx); 1808 1809 ztest_object_unlock(zd, lr->lr_foid); 1810 1811 return (0); 1812 } 1813 1814 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1815 NULL, /* 0 no such transaction type */ 1816 ztest_replay_create, /* TX_CREATE */ 1817 NULL, /* TX_MKDIR */ 1818 NULL, /* TX_MKXATTR */ 1819 NULL, /* TX_SYMLINK */ 1820 ztest_replay_remove, /* TX_REMOVE */ 1821 NULL, /* TX_RMDIR */ 1822 NULL, /* TX_LINK */ 1823 NULL, /* TX_RENAME */ 1824 ztest_replay_write, /* TX_WRITE */ 1825 ztest_replay_truncate, /* TX_TRUNCATE */ 1826 ztest_replay_setattr, /* TX_SETATTR */ 1827 NULL, /* TX_ACL */ 1828 NULL, /* TX_CREATE_ACL */ 1829 NULL, /* TX_CREATE_ATTR */ 1830 NULL, /* TX_CREATE_ACL_ATTR */ 1831 NULL, /* TX_MKDIR_ACL */ 1832 NULL, /* TX_MKDIR_ATTR */ 1833 NULL, /* TX_MKDIR_ACL_ATTR */ 1834 NULL, /* TX_WRITE2 */ 1835 }; 1836 1837 /* 1838 * ZIL get_data callbacks 1839 */ 1840 1841 static void 1842 ztest_get_done(zgd_t *zgd, int error) 1843 { 1844 ztest_ds_t *zd = zgd->zgd_private; 1845 uint64_t object = zgd->zgd_rl->rl_object; 1846 1847 if (zgd->zgd_db) 1848 dmu_buf_rele(zgd->zgd_db, zgd); 1849 1850 ztest_range_unlock(zgd->zgd_rl); 1851 ztest_object_unlock(zd, object); 1852 1853 if (error == 0 && zgd->zgd_bp) 1854 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1855 1856 umem_free(zgd, sizeof (*zgd)); 1857 } 1858 1859 static int 1860 ztest_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, 1861 zio_t *zio) 1862 { 1863 ztest_ds_t *zd = arg; 1864 objset_t *os = zd->zd_os; 1865 uint64_t object = lr->lr_foid; 1866 uint64_t offset = lr->lr_offset; 1867 uint64_t size = lr->lr_length; 1868 uint64_t txg = lr->lr_common.lrc_txg; 1869 uint64_t crtxg; 1870 dmu_object_info_t doi; 1871 dmu_buf_t *db; 1872 zgd_t *zgd; 1873 int error; 1874 1875 ASSERT3P(lwb, !=, NULL); 1876 ASSERT3P(zio, !=, NULL); 1877 ASSERT3U(size, !=, 0); 1878 1879 ztest_object_lock(zd, object, RL_READER); 1880 error = dmu_bonus_hold(os, object, FTAG, &db); 1881 if (error) { 1882 ztest_object_unlock(zd, object); 1883 return (error); 1884 } 1885 1886 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1887 1888 if (crtxg == 0 || crtxg > txg) { 1889 dmu_buf_rele(db, FTAG); 1890 ztest_object_unlock(zd, object); 1891 return (ENOENT); 1892 } 1893 1894 dmu_object_info_from_db(db, &doi); 1895 dmu_buf_rele(db, FTAG); 1896 db = NULL; 1897 1898 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1899 zgd->zgd_lwb = lwb; 1900 zgd->zgd_private = zd; 1901 1902 if (buf != NULL) { /* immediate write */ 1903 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1904 RL_READER); 1905 1906 error = dmu_read(os, object, offset, size, buf, 1907 DMU_READ_NO_PREFETCH); 1908 ASSERT(error == 0); 1909 } else { 1910 size = doi.doi_data_block_size; 1911 if (ISP2(size)) { 1912 offset = P2ALIGN(offset, size); 1913 } else { 1914 ASSERT(offset < size); 1915 offset = 0; 1916 } 1917 1918 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1919 RL_READER); 1920 1921 error = dmu_buf_hold(os, object, offset, zgd, &db, 1922 DMU_READ_NO_PREFETCH); 1923 1924 if (error == 0) { 1925 blkptr_t *bp = &lr->lr_blkptr; 1926 1927 zgd->zgd_db = db; 1928 zgd->zgd_bp = bp; 1929 1930 ASSERT(db->db_offset == offset); 1931 ASSERT(db->db_size == size); 1932 1933 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1934 ztest_get_done, zgd); 1935 1936 if (error == 0) 1937 return (0); 1938 } 1939 } 1940 1941 ztest_get_done(zgd, error); 1942 1943 return (error); 1944 } 1945 1946 static void * 1947 ztest_lr_alloc(size_t lrsize, char *name) 1948 { 1949 char *lr; 1950 size_t namesize = name ? strlen(name) + 1 : 0; 1951 1952 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1953 1954 if (name) 1955 bcopy(name, lr + lrsize, namesize); 1956 1957 return (lr); 1958 } 1959 1960 void 1961 ztest_lr_free(void *lr, size_t lrsize, char *name) 1962 { 1963 size_t namesize = name ? strlen(name) + 1 : 0; 1964 1965 umem_free(lr, lrsize + namesize); 1966 } 1967 1968 /* 1969 * Lookup a bunch of objects. Returns the number of objects not found. 1970 */ 1971 static int 1972 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1973 { 1974 int missing = 0; 1975 int error; 1976 1977 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 1978 1979 for (int i = 0; i < count; i++, od++) { 1980 od->od_object = 0; 1981 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1982 sizeof (uint64_t), 1, &od->od_object); 1983 if (error) { 1984 ASSERT(error == ENOENT); 1985 ASSERT(od->od_object == 0); 1986 missing++; 1987 } else { 1988 dmu_buf_t *db; 1989 ztest_block_tag_t *bbt; 1990 dmu_object_info_t doi; 1991 1992 ASSERT(od->od_object != 0); 1993 ASSERT(missing == 0); /* there should be no gaps */ 1994 1995 ztest_object_lock(zd, od->od_object, RL_READER); 1996 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1997 od->od_object, FTAG, &db)); 1998 dmu_object_info_from_db(db, &doi); 1999 bbt = ztest_bt_bonus(db); 2000 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 2001 od->od_type = doi.doi_type; 2002 od->od_blocksize = doi.doi_data_block_size; 2003 od->od_gen = bbt->bt_gen; 2004 dmu_buf_rele(db, FTAG); 2005 ztest_object_unlock(zd, od->od_object); 2006 } 2007 } 2008 2009 return (missing); 2010 } 2011 2012 static int 2013 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 2014 { 2015 int missing = 0; 2016 2017 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 2018 2019 for (int i = 0; i < count; i++, od++) { 2020 if (missing) { 2021 od->od_object = 0; 2022 missing++; 2023 continue; 2024 } 2025 2026 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2027 2028 lr->lr_doid = od->od_dir; 2029 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 2030 lr->lrz_type = od->od_crtype; 2031 lr->lrz_blocksize = od->od_crblocksize; 2032 lr->lrz_ibshift = ztest_random_ibshift(); 2033 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 2034 lr->lrz_bonuslen = dmu_bonus_max(); 2035 lr->lr_gen = od->od_crgen; 2036 lr->lr_crtime[0] = time(NULL); 2037 2038 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 2039 ASSERT(missing == 0); 2040 od->od_object = 0; 2041 missing++; 2042 } else { 2043 od->od_object = lr->lr_foid; 2044 od->od_type = od->od_crtype; 2045 od->od_blocksize = od->od_crblocksize; 2046 od->od_gen = od->od_crgen; 2047 ASSERT(od->od_object != 0); 2048 } 2049 2050 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2051 } 2052 2053 return (missing); 2054 } 2055 2056 static int 2057 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2058 { 2059 int missing = 0; 2060 int error; 2061 2062 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 2063 2064 od += count - 1; 2065 2066 for (int i = count - 1; i >= 0; i--, od--) { 2067 if (missing) { 2068 missing++; 2069 continue; 2070 } 2071 2072 /* 2073 * No object was found. 2074 */ 2075 if (od->od_object == 0) 2076 continue; 2077 2078 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2079 2080 lr->lr_doid = od->od_dir; 2081 2082 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2083 ASSERT3U(error, ==, ENOSPC); 2084 missing++; 2085 } else { 2086 od->od_object = 0; 2087 } 2088 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2089 } 2090 2091 return (missing); 2092 } 2093 2094 static int 2095 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2096 void *data) 2097 { 2098 lr_write_t *lr; 2099 int error; 2100 2101 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2102 2103 lr->lr_foid = object; 2104 lr->lr_offset = offset; 2105 lr->lr_length = size; 2106 lr->lr_blkoff = 0; 2107 BP_ZERO(&lr->lr_blkptr); 2108 2109 bcopy(data, lr + 1, size); 2110 2111 error = ztest_replay_write(zd, lr, B_FALSE); 2112 2113 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2114 2115 return (error); 2116 } 2117 2118 static int 2119 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2120 { 2121 lr_truncate_t *lr; 2122 int error; 2123 2124 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2125 2126 lr->lr_foid = object; 2127 lr->lr_offset = offset; 2128 lr->lr_length = size; 2129 2130 error = ztest_replay_truncate(zd, lr, B_FALSE); 2131 2132 ztest_lr_free(lr, sizeof (*lr), NULL); 2133 2134 return (error); 2135 } 2136 2137 static int 2138 ztest_setattr(ztest_ds_t *zd, uint64_t object) 2139 { 2140 lr_setattr_t *lr; 2141 int error; 2142 2143 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2144 2145 lr->lr_foid = object; 2146 lr->lr_size = 0; 2147 lr->lr_mode = 0; 2148 2149 error = ztest_replay_setattr(zd, lr, B_FALSE); 2150 2151 ztest_lr_free(lr, sizeof (*lr), NULL); 2152 2153 return (error); 2154 } 2155 2156 static void 2157 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2158 { 2159 objset_t *os = zd->zd_os; 2160 dmu_tx_t *tx; 2161 uint64_t txg; 2162 rl_t *rl; 2163 2164 txg_wait_synced(dmu_objset_pool(os), 0); 2165 2166 ztest_object_lock(zd, object, RL_READER); 2167 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2168 2169 tx = dmu_tx_create(os); 2170 2171 dmu_tx_hold_write(tx, object, offset, size); 2172 2173 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2174 2175 if (txg != 0) { 2176 dmu_prealloc(os, object, offset, size, tx); 2177 dmu_tx_commit(tx); 2178 txg_wait_synced(dmu_objset_pool(os), txg); 2179 } else { 2180 (void) dmu_free_long_range(os, object, offset, size); 2181 } 2182 2183 ztest_range_unlock(rl); 2184 ztest_object_unlock(zd, object); 2185 } 2186 2187 static void 2188 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2189 { 2190 int err; 2191 ztest_block_tag_t wbt; 2192 dmu_object_info_t doi; 2193 enum ztest_io_type io_type; 2194 uint64_t blocksize; 2195 void *data; 2196 2197 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2198 blocksize = doi.doi_data_block_size; 2199 data = umem_alloc(blocksize, UMEM_NOFAIL); 2200 2201 /* 2202 * Pick an i/o type at random, biased toward writing block tags. 2203 */ 2204 io_type = ztest_random(ZTEST_IO_TYPES); 2205 if (ztest_random(2) == 0) 2206 io_type = ZTEST_IO_WRITE_TAG; 2207 2208 rw_enter(&zd->zd_zilog_lock, RW_READER); 2209 2210 switch (io_type) { 2211 2212 case ZTEST_IO_WRITE_TAG: 2213 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2214 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2215 break; 2216 2217 case ZTEST_IO_WRITE_PATTERN: 2218 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2219 if (ztest_random(2) == 0) { 2220 /* 2221 * Induce fletcher2 collisions to ensure that 2222 * zio_ddt_collision() detects and resolves them 2223 * when using fletcher2-verify for deduplication. 2224 */ 2225 ((uint64_t *)data)[0] ^= 1ULL << 63; 2226 ((uint64_t *)data)[4] ^= 1ULL << 63; 2227 } 2228 (void) ztest_write(zd, object, offset, blocksize, data); 2229 break; 2230 2231 case ZTEST_IO_WRITE_ZEROES: 2232 bzero(data, blocksize); 2233 (void) ztest_write(zd, object, offset, blocksize, data); 2234 break; 2235 2236 case ZTEST_IO_TRUNCATE: 2237 (void) ztest_truncate(zd, object, offset, blocksize); 2238 break; 2239 2240 case ZTEST_IO_SETATTR: 2241 (void) ztest_setattr(zd, object); 2242 break; 2243 2244 case ZTEST_IO_REWRITE: 2245 rw_enter(&ztest_name_lock, RW_READER); 2246 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2247 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2248 B_FALSE); 2249 VERIFY(err == 0 || err == ENOSPC); 2250 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2251 ZFS_PROP_COMPRESSION, 2252 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2253 B_FALSE); 2254 VERIFY(err == 0 || err == ENOSPC); 2255 rw_exit(&ztest_name_lock); 2256 2257 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2258 DMU_READ_NO_PREFETCH)); 2259 2260 (void) ztest_write(zd, object, offset, blocksize, data); 2261 break; 2262 } 2263 2264 rw_exit(&zd->zd_zilog_lock); 2265 2266 umem_free(data, blocksize); 2267 } 2268 2269 /* 2270 * Initialize an object description template. 2271 */ 2272 static void 2273 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2274 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2275 { 2276 od->od_dir = ZTEST_DIROBJ; 2277 od->od_object = 0; 2278 2279 od->od_crtype = type; 2280 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2281 od->od_crgen = gen; 2282 2283 od->od_type = DMU_OT_NONE; 2284 od->od_blocksize = 0; 2285 od->od_gen = 0; 2286 2287 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2288 tag, (int64_t)id, index); 2289 } 2290 2291 /* 2292 * Lookup or create the objects for a test using the od template. 2293 * If the objects do not all exist, or if 'remove' is specified, 2294 * remove any existing objects and create new ones. Otherwise, 2295 * use the existing objects. 2296 */ 2297 static int 2298 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2299 { 2300 int count = size / sizeof (*od); 2301 int rv = 0; 2302 2303 mutex_enter(&zd->zd_dirobj_lock); 2304 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2305 (ztest_remove(zd, od, count) != 0 || 2306 ztest_create(zd, od, count) != 0)) 2307 rv = -1; 2308 zd->zd_od = od; 2309 mutex_exit(&zd->zd_dirobj_lock); 2310 2311 return (rv); 2312 } 2313 2314 /* ARGSUSED */ 2315 void 2316 ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2317 { 2318 zilog_t *zilog = zd->zd_zilog; 2319 2320 rw_enter(&zd->zd_zilog_lock, RW_READER); 2321 2322 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2323 2324 /* 2325 * Remember the committed values in zd, which is in parent/child 2326 * shared memory. If we die, the next iteration of ztest_run() 2327 * will verify that the log really does contain this record. 2328 */ 2329 mutex_enter(&zilog->zl_lock); 2330 ASSERT(zd->zd_shared != NULL); 2331 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2332 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2333 mutex_exit(&zilog->zl_lock); 2334 2335 rw_exit(&zd->zd_zilog_lock); 2336 } 2337 2338 /* 2339 * This function is designed to simulate the operations that occur during a 2340 * mount/unmount operation. We hold the dataset across these operations in an 2341 * attempt to expose any implicit assumptions about ZIL management. 2342 */ 2343 /* ARGSUSED */ 2344 void 2345 ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2346 { 2347 objset_t *os = zd->zd_os; 2348 2349 /* 2350 * We grab the zd_dirobj_lock to ensure that no other thread is 2351 * updating the zil (i.e. adding in-memory log records) and the 2352 * zd_zilog_lock to block any I/O. 2353 */ 2354 mutex_enter(&zd->zd_dirobj_lock); 2355 rw_enter(&zd->zd_zilog_lock, RW_WRITER); 2356 2357 /* zfsvfs_teardown() */ 2358 zil_close(zd->zd_zilog); 2359 2360 /* zfsvfs_setup() */ 2361 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2362 zil_replay(os, zd, ztest_replay_vector); 2363 2364 rw_exit(&zd->zd_zilog_lock); 2365 mutex_exit(&zd->zd_dirobj_lock); 2366 } 2367 2368 /* 2369 * Verify that we can't destroy an active pool, create an existing pool, 2370 * or create a pool with a bad vdev spec. 2371 */ 2372 /* ARGSUSED */ 2373 void 2374 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2375 { 2376 ztest_shared_opts_t *zo = &ztest_opts; 2377 spa_t *spa; 2378 nvlist_t *nvroot; 2379 2380 /* 2381 * Attempt to create using a bad file. 2382 */ 2383 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2384 VERIFY3U(ENOENT, ==, 2385 spa_create("ztest_bad_file", nvroot, NULL, NULL)); 2386 nvlist_free(nvroot); 2387 2388 /* 2389 * Attempt to create using a bad mirror. 2390 */ 2391 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1); 2392 VERIFY3U(ENOENT, ==, 2393 spa_create("ztest_bad_mirror", nvroot, NULL, NULL)); 2394 nvlist_free(nvroot); 2395 2396 /* 2397 * Attempt to create an existing pool. It shouldn't matter 2398 * what's in the nvroot; we should fail with EEXIST. 2399 */ 2400 rw_enter(&ztest_name_lock, RW_READER); 2401 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2402 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL)); 2403 nvlist_free(nvroot); 2404 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2405 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2406 spa_close(spa, FTAG); 2407 2408 rw_exit(&ztest_name_lock); 2409 } 2410 2411 /* ARGSUSED */ 2412 void 2413 ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 2414 { 2415 spa_t *spa; 2416 uint64_t initial_version = SPA_VERSION_INITIAL; 2417 uint64_t version, newversion; 2418 nvlist_t *nvroot, *props; 2419 char *name; 2420 2421 mutex_enter(&ztest_vdev_lock); 2422 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 2423 2424 /* 2425 * Clean up from previous runs. 2426 */ 2427 (void) spa_destroy(name); 2428 2429 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 2430 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1); 2431 2432 /* 2433 * If we're configuring a RAIDZ device then make sure that the 2434 * the initial version is capable of supporting that feature. 2435 */ 2436 switch (ztest_opts.zo_raidz_parity) { 2437 case 0: 2438 case 1: 2439 initial_version = SPA_VERSION_INITIAL; 2440 break; 2441 case 2: 2442 initial_version = SPA_VERSION_RAIDZ2; 2443 break; 2444 case 3: 2445 initial_version = SPA_VERSION_RAIDZ3; 2446 break; 2447 } 2448 2449 /* 2450 * Create a pool with a spa version that can be upgraded. Pick 2451 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 2452 */ 2453 do { 2454 version = ztest_random_spa_version(initial_version); 2455 } while (version > SPA_VERSION_BEFORE_FEATURES); 2456 2457 props = fnvlist_alloc(); 2458 fnvlist_add_uint64(props, 2459 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 2460 VERIFY0(spa_create(name, nvroot, props, NULL)); 2461 fnvlist_free(nvroot); 2462 fnvlist_free(props); 2463 2464 VERIFY0(spa_open(name, &spa, FTAG)); 2465 VERIFY3U(spa_version(spa), ==, version); 2466 newversion = ztest_random_spa_version(version + 1); 2467 2468 if (ztest_opts.zo_verbose >= 4) { 2469 (void) printf("upgrading spa version from %llu to %llu\n", 2470 (u_longlong_t)version, (u_longlong_t)newversion); 2471 } 2472 2473 spa_upgrade(spa, newversion); 2474 VERIFY3U(spa_version(spa), >, version); 2475 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 2476 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 2477 spa_close(spa, FTAG); 2478 2479 strfree(name); 2480 mutex_exit(&ztest_vdev_lock); 2481 } 2482 2483 static void 2484 ztest_spa_checkpoint(spa_t *spa) 2485 { 2486 ASSERT(MUTEX_HELD(&ztest_checkpoint_lock)); 2487 2488 int error = spa_checkpoint(spa->spa_name); 2489 2490 switch (error) { 2491 case 0: 2492 case ZFS_ERR_DEVRM_IN_PROGRESS: 2493 case ZFS_ERR_DISCARDING_CHECKPOINT: 2494 case ZFS_ERR_CHECKPOINT_EXISTS: 2495 break; 2496 case ENOSPC: 2497 ztest_record_enospc(FTAG); 2498 break; 2499 default: 2500 fatal(0, "spa_checkpoint(%s) = %d", spa->spa_name, error); 2501 } 2502 } 2503 2504 static void 2505 ztest_spa_discard_checkpoint(spa_t *spa) 2506 { 2507 ASSERT(MUTEX_HELD(&ztest_checkpoint_lock)); 2508 2509 int error = spa_checkpoint_discard(spa->spa_name); 2510 2511 switch (error) { 2512 case 0: 2513 case ZFS_ERR_DISCARDING_CHECKPOINT: 2514 case ZFS_ERR_NO_CHECKPOINT: 2515 break; 2516 default: 2517 fatal(0, "spa_discard_checkpoint(%s) = %d", 2518 spa->spa_name, error); 2519 } 2520 2521 } 2522 2523 /* ARGSUSED */ 2524 void 2525 ztest_spa_checkpoint_create_discard(ztest_ds_t *zd, uint64_t id) 2526 { 2527 spa_t *spa = ztest_spa; 2528 2529 mutex_enter(&ztest_checkpoint_lock); 2530 if (ztest_random(2) == 0) { 2531 ztest_spa_checkpoint(spa); 2532 } else { 2533 ztest_spa_discard_checkpoint(spa); 2534 } 2535 mutex_exit(&ztest_checkpoint_lock); 2536 } 2537 2538 2539 static vdev_t * 2540 vdev_lookup_by_path(vdev_t *vd, const char *path) 2541 { 2542 vdev_t *mvd; 2543 2544 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2545 return (vd); 2546 2547 for (int c = 0; c < vd->vdev_children; c++) 2548 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2549 NULL) 2550 return (mvd); 2551 2552 return (NULL); 2553 } 2554 2555 /* 2556 * Find the first available hole which can be used as a top-level. 2557 */ 2558 int 2559 find_vdev_hole(spa_t *spa) 2560 { 2561 vdev_t *rvd = spa->spa_root_vdev; 2562 int c; 2563 2564 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2565 2566 for (c = 0; c < rvd->vdev_children; c++) { 2567 vdev_t *cvd = rvd->vdev_child[c]; 2568 2569 if (cvd->vdev_ishole) 2570 break; 2571 } 2572 return (c); 2573 } 2574 2575 /* 2576 * Verify that vdev_add() works as expected. 2577 */ 2578 /* ARGSUSED */ 2579 void 2580 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2581 { 2582 ztest_shared_t *zs = ztest_shared; 2583 spa_t *spa = ztest_spa; 2584 uint64_t leaves; 2585 uint64_t guid; 2586 nvlist_t *nvroot; 2587 int error; 2588 2589 mutex_enter(&ztest_vdev_lock); 2590 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2591 2592 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2593 2594 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2595 2596 /* 2597 * If we have slogs then remove them 1/4 of the time. 2598 */ 2599 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2600 /* 2601 * Grab the guid from the head of the log class rotor. 2602 */ 2603 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2604 2605 spa_config_exit(spa, SCL_VDEV, FTAG); 2606 2607 /* 2608 * We have to grab the zs_name_lock as writer to 2609 * prevent a race between removing a slog (dmu_objset_find) 2610 * and destroying a dataset. Removing the slog will 2611 * grab a reference on the dataset which may cause 2612 * dmu_objset_destroy() to fail with EBUSY thus 2613 * leaving the dataset in an inconsistent state. 2614 */ 2615 rw_enter(&ztest_name_lock, RW_WRITER); 2616 error = spa_vdev_remove(spa, guid, B_FALSE); 2617 rw_exit(&ztest_name_lock); 2618 2619 switch (error) { 2620 case 0: 2621 case EEXIST: 2622 case ZFS_ERR_CHECKPOINT_EXISTS: 2623 case ZFS_ERR_DISCARDING_CHECKPOINT: 2624 break; 2625 default: 2626 fatal(0, "spa_vdev_remove() = %d", error); 2627 } 2628 } else { 2629 spa_config_exit(spa, SCL_VDEV, FTAG); 2630 2631 /* 2632 * Make 1/4 of the devices be log devices. 2633 */ 2634 nvroot = make_vdev_root(NULL, NULL, NULL, 2635 ztest_opts.zo_vdev_size, 0, 2636 ztest_random(4) == 0, ztest_opts.zo_raidz, 2637 zs->zs_mirrors, 1); 2638 2639 error = spa_vdev_add(spa, nvroot); 2640 nvlist_free(nvroot); 2641 2642 switch (error) { 2643 case 0: 2644 break; 2645 case ENOSPC: 2646 ztest_record_enospc("spa_vdev_add"); 2647 break; 2648 default: 2649 fatal(0, "spa_vdev_add() = %d", error); 2650 } 2651 } 2652 2653 mutex_exit(&ztest_vdev_lock); 2654 } 2655 2656 /* 2657 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2658 */ 2659 /* ARGSUSED */ 2660 void 2661 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2662 { 2663 ztest_shared_t *zs = ztest_shared; 2664 spa_t *spa = ztest_spa; 2665 vdev_t *rvd = spa->spa_root_vdev; 2666 spa_aux_vdev_t *sav; 2667 char *aux; 2668 uint64_t guid = 0; 2669 int error; 2670 2671 if (ztest_random(2) == 0) { 2672 sav = &spa->spa_spares; 2673 aux = ZPOOL_CONFIG_SPARES; 2674 } else { 2675 sav = &spa->spa_l2cache; 2676 aux = ZPOOL_CONFIG_L2CACHE; 2677 } 2678 2679 mutex_enter(&ztest_vdev_lock); 2680 2681 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2682 2683 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2684 /* 2685 * Pick a random device to remove. 2686 */ 2687 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2688 } else { 2689 /* 2690 * Find an unused device we can add. 2691 */ 2692 zs->zs_vdev_aux = 0; 2693 for (;;) { 2694 char path[MAXPATHLEN]; 2695 int c; 2696 (void) snprintf(path, sizeof (path), ztest_aux_template, 2697 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2698 zs->zs_vdev_aux); 2699 for (c = 0; c < sav->sav_count; c++) 2700 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2701 path) == 0) 2702 break; 2703 if (c == sav->sav_count && 2704 vdev_lookup_by_path(rvd, path) == NULL) 2705 break; 2706 zs->zs_vdev_aux++; 2707 } 2708 } 2709 2710 spa_config_exit(spa, SCL_VDEV, FTAG); 2711 2712 if (guid == 0) { 2713 /* 2714 * Add a new device. 2715 */ 2716 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 2717 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2718 error = spa_vdev_add(spa, nvroot); 2719 2720 switch (error) { 2721 case 0: 2722 break; 2723 default: 2724 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2725 } 2726 nvlist_free(nvroot); 2727 } else { 2728 /* 2729 * Remove an existing device. Sometimes, dirty its 2730 * vdev state first to make sure we handle removal 2731 * of devices that have pending state changes. 2732 */ 2733 if (ztest_random(2) == 0) 2734 (void) vdev_online(spa, guid, 0, NULL); 2735 2736 error = spa_vdev_remove(spa, guid, B_FALSE); 2737 2738 switch (error) { 2739 case 0: 2740 case EBUSY: 2741 case ZFS_ERR_CHECKPOINT_EXISTS: 2742 case ZFS_ERR_DISCARDING_CHECKPOINT: 2743 break; 2744 default: 2745 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2746 } 2747 } 2748 2749 mutex_exit(&ztest_vdev_lock); 2750 } 2751 2752 /* 2753 * split a pool if it has mirror tlvdevs 2754 */ 2755 /* ARGSUSED */ 2756 void 2757 ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2758 { 2759 ztest_shared_t *zs = ztest_shared; 2760 spa_t *spa = ztest_spa; 2761 vdev_t *rvd = spa->spa_root_vdev; 2762 nvlist_t *tree, **child, *config, *split, **schild; 2763 uint_t c, children, schildren = 0, lastlogid = 0; 2764 int error = 0; 2765 2766 mutex_enter(&ztest_vdev_lock); 2767 2768 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2769 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2770 mutex_exit(&ztest_vdev_lock); 2771 return; 2772 } 2773 2774 /* clean up the old pool, if any */ 2775 (void) spa_destroy("splitp"); 2776 2777 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2778 2779 /* generate a config from the existing config */ 2780 mutex_enter(&spa->spa_props_lock); 2781 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2782 &tree) == 0); 2783 mutex_exit(&spa->spa_props_lock); 2784 2785 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2786 &children) == 0); 2787 2788 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2789 for (c = 0; c < children; c++) { 2790 vdev_t *tvd = rvd->vdev_child[c]; 2791 nvlist_t **mchild; 2792 uint_t mchildren; 2793 2794 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2795 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2796 0) == 0); 2797 VERIFY(nvlist_add_string(schild[schildren], 2798 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2799 VERIFY(nvlist_add_uint64(schild[schildren], 2800 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2801 if (lastlogid == 0) 2802 lastlogid = schildren; 2803 ++schildren; 2804 continue; 2805 } 2806 lastlogid = 0; 2807 VERIFY(nvlist_lookup_nvlist_array(child[c], 2808 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2809 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2810 } 2811 2812 /* OK, create a config that can be used to split */ 2813 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2814 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2815 VDEV_TYPE_ROOT) == 0); 2816 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2817 lastlogid != 0 ? lastlogid : schildren) == 0); 2818 2819 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2820 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2821 2822 for (c = 0; c < schildren; c++) 2823 nvlist_free(schild[c]); 2824 free(schild); 2825 nvlist_free(split); 2826 2827 spa_config_exit(spa, SCL_VDEV, FTAG); 2828 2829 rw_enter(&ztest_name_lock, RW_WRITER); 2830 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2831 rw_exit(&ztest_name_lock); 2832 2833 nvlist_free(config); 2834 2835 if (error == 0) { 2836 (void) printf("successful split - results:\n"); 2837 mutex_enter(&spa_namespace_lock); 2838 show_pool_stats(spa); 2839 show_pool_stats(spa_lookup("splitp")); 2840 mutex_exit(&spa_namespace_lock); 2841 ++zs->zs_splits; 2842 --zs->zs_mirrors; 2843 } 2844 mutex_exit(&ztest_vdev_lock); 2845 } 2846 2847 /* 2848 * Verify that we can attach and detach devices. 2849 */ 2850 /* ARGSUSED */ 2851 void 2852 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2853 { 2854 ztest_shared_t *zs = ztest_shared; 2855 spa_t *spa = ztest_spa; 2856 spa_aux_vdev_t *sav = &spa->spa_spares; 2857 vdev_t *rvd = spa->spa_root_vdev; 2858 vdev_t *oldvd, *newvd, *pvd; 2859 nvlist_t *root; 2860 uint64_t leaves; 2861 uint64_t leaf, top; 2862 uint64_t ashift = ztest_get_ashift(); 2863 uint64_t oldguid, pguid; 2864 uint64_t oldsize, newsize; 2865 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2866 int replacing; 2867 int oldvd_has_siblings = B_FALSE; 2868 int newvd_is_spare = B_FALSE; 2869 int oldvd_is_log; 2870 int error, expected_error; 2871 2872 mutex_enter(&ztest_vdev_lock); 2873 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2874 2875 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2876 2877 /* 2878 * If a vdev is in the process of being removed, its removal may 2879 * finish while we are in progress, leading to an unexpected error 2880 * value. Don't bother trying to attach while we are in the middle 2881 * of removal. 2882 */ 2883 if (spa->spa_vdev_removal != NULL) { 2884 spa_config_exit(spa, SCL_ALL, FTAG); 2885 mutex_exit(&ztest_vdev_lock); 2886 return; 2887 } 2888 2889 /* 2890 * Decide whether to do an attach or a replace. 2891 */ 2892 replacing = ztest_random(2); 2893 2894 /* 2895 * Pick a random top-level vdev. 2896 */ 2897 top = ztest_random_vdev_top(spa, B_TRUE); 2898 2899 /* 2900 * Pick a random leaf within it. 2901 */ 2902 leaf = ztest_random(leaves); 2903 2904 /* 2905 * Locate this vdev. 2906 */ 2907 oldvd = rvd->vdev_child[top]; 2908 if (zs->zs_mirrors >= 1) { 2909 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2910 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2911 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2912 } 2913 if (ztest_opts.zo_raidz > 1) { 2914 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2915 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2916 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2917 } 2918 2919 /* 2920 * If we're already doing an attach or replace, oldvd may be a 2921 * mirror vdev -- in which case, pick a random child. 2922 */ 2923 while (oldvd->vdev_children != 0) { 2924 oldvd_has_siblings = B_TRUE; 2925 ASSERT(oldvd->vdev_children >= 2); 2926 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2927 } 2928 2929 oldguid = oldvd->vdev_guid; 2930 oldsize = vdev_get_min_asize(oldvd); 2931 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2932 (void) strcpy(oldpath, oldvd->vdev_path); 2933 pvd = oldvd->vdev_parent; 2934 pguid = pvd->vdev_guid; 2935 2936 /* 2937 * If oldvd has siblings, then half of the time, detach it. 2938 */ 2939 if (oldvd_has_siblings && ztest_random(2) == 0) { 2940 spa_config_exit(spa, SCL_ALL, FTAG); 2941 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2942 if (error != 0 && error != ENODEV && error != EBUSY && 2943 error != ENOTSUP && error != ZFS_ERR_CHECKPOINT_EXISTS && 2944 error != ZFS_ERR_DISCARDING_CHECKPOINT) 2945 fatal(0, "detach (%s) returned %d", oldpath, error); 2946 mutex_exit(&ztest_vdev_lock); 2947 return; 2948 } 2949 2950 /* 2951 * For the new vdev, choose with equal probability between the two 2952 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2953 */ 2954 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2955 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2956 newvd_is_spare = B_TRUE; 2957 (void) strcpy(newpath, newvd->vdev_path); 2958 } else { 2959 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2960 ztest_opts.zo_dir, ztest_opts.zo_pool, 2961 top * leaves + leaf); 2962 if (ztest_random(2) == 0) 2963 newpath[strlen(newpath) - 1] = 'b'; 2964 newvd = vdev_lookup_by_path(rvd, newpath); 2965 } 2966 2967 if (newvd) { 2968 /* 2969 * Reopen to ensure the vdev's asize field isn't stale. 2970 */ 2971 vdev_reopen(newvd); 2972 newsize = vdev_get_min_asize(newvd); 2973 } else { 2974 /* 2975 * Make newsize a little bigger or smaller than oldsize. 2976 * If it's smaller, the attach should fail. 2977 * If it's larger, and we're doing a replace, 2978 * we should get dynamic LUN growth when we're done. 2979 */ 2980 newsize = 10 * oldsize / (9 + ztest_random(3)); 2981 } 2982 2983 /* 2984 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2985 * unless it's a replace; in that case any non-replacing parent is OK. 2986 * 2987 * If newvd is already part of the pool, it should fail with EBUSY. 2988 * 2989 * If newvd is too small, it should fail with EOVERFLOW. 2990 */ 2991 if (pvd->vdev_ops != &vdev_mirror_ops && 2992 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2993 pvd->vdev_ops == &vdev_replacing_ops || 2994 pvd->vdev_ops == &vdev_spare_ops)) 2995 expected_error = ENOTSUP; 2996 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2997 expected_error = ENOTSUP; 2998 else if (newvd == oldvd) 2999 expected_error = replacing ? 0 : EBUSY; 3000 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 3001 expected_error = EBUSY; 3002 else if (newsize < oldsize) 3003 expected_error = EOVERFLOW; 3004 else if (ashift > oldvd->vdev_top->vdev_ashift) 3005 expected_error = EDOM; 3006 else 3007 expected_error = 0; 3008 3009 spa_config_exit(spa, SCL_ALL, FTAG); 3010 3011 /* 3012 * Build the nvlist describing newpath. 3013 */ 3014 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 3015 ashift, 0, 0, 0, 1); 3016 3017 error = spa_vdev_attach(spa, oldguid, root, replacing); 3018 3019 nvlist_free(root); 3020 3021 /* 3022 * If our parent was the replacing vdev, but the replace completed, 3023 * then instead of failing with ENOTSUP we may either succeed, 3024 * fail with ENODEV, or fail with EOVERFLOW. 3025 */ 3026 if (expected_error == ENOTSUP && 3027 (error == 0 || error == ENODEV || error == EOVERFLOW)) 3028 expected_error = error; 3029 3030 /* 3031 * If someone grew the LUN, the replacement may be too small. 3032 */ 3033 if (error == EOVERFLOW || error == EBUSY) 3034 expected_error = error; 3035 3036 if (error == ZFS_ERR_CHECKPOINT_EXISTS || 3037 error == ZFS_ERR_DISCARDING_CHECKPOINT) 3038 expected_error = error; 3039 3040 /* XXX workaround 6690467 */ 3041 if (error != expected_error && expected_error != EBUSY) { 3042 fatal(0, "attach (%s %llu, %s %llu, %d) " 3043 "returned %d, expected %d", 3044 oldpath, oldsize, newpath, 3045 newsize, replacing, error, expected_error); 3046 } 3047 3048 mutex_exit(&ztest_vdev_lock); 3049 } 3050 3051 /* ARGSUSED */ 3052 void 3053 ztest_device_removal(ztest_ds_t *zd, uint64_t id) 3054 { 3055 spa_t *spa = ztest_spa; 3056 vdev_t *vd; 3057 uint64_t guid; 3058 3059 mutex_enter(&ztest_vdev_lock); 3060 3061 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3062 vd = vdev_lookup_top(spa, ztest_random_vdev_top(spa, B_FALSE)); 3063 guid = vd->vdev_guid; 3064 spa_config_exit(spa, SCL_VDEV, FTAG); 3065 3066 (void) spa_vdev_remove(spa, guid, B_FALSE); 3067 3068 mutex_exit(&ztest_vdev_lock); 3069 } 3070 3071 /* 3072 * Callback function which expands the physical size of the vdev. 3073 */ 3074 vdev_t * 3075 grow_vdev(vdev_t *vd, void *arg) 3076 { 3077 spa_t *spa = vd->vdev_spa; 3078 size_t *newsize = arg; 3079 size_t fsize; 3080 int fd; 3081 3082 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 3083 ASSERT(vd->vdev_ops->vdev_op_leaf); 3084 3085 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 3086 return (vd); 3087 3088 fsize = lseek(fd, 0, SEEK_END); 3089 (void) ftruncate(fd, *newsize); 3090 3091 if (ztest_opts.zo_verbose >= 6) { 3092 (void) printf("%s grew from %lu to %lu bytes\n", 3093 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 3094 } 3095 (void) close(fd); 3096 return (NULL); 3097 } 3098 3099 /* 3100 * Callback function which expands a given vdev by calling vdev_online(). 3101 */ 3102 /* ARGSUSED */ 3103 vdev_t * 3104 online_vdev(vdev_t *vd, void *arg) 3105 { 3106 spa_t *spa = vd->vdev_spa; 3107 vdev_t *tvd = vd->vdev_top; 3108 uint64_t guid = vd->vdev_guid; 3109 uint64_t generation = spa->spa_config_generation + 1; 3110 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 3111 int error; 3112 3113 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 3114 ASSERT(vd->vdev_ops->vdev_op_leaf); 3115 3116 /* Calling vdev_online will initialize the new metaslabs */ 3117 spa_config_exit(spa, SCL_STATE, spa); 3118 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 3119 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3120 3121 /* 3122 * If vdev_online returned an error or the underlying vdev_open 3123 * failed then we abort the expand. The only way to know that 3124 * vdev_open fails is by checking the returned newstate. 3125 */ 3126 if (error || newstate != VDEV_STATE_HEALTHY) { 3127 if (ztest_opts.zo_verbose >= 5) { 3128 (void) printf("Unable to expand vdev, state %llu, " 3129 "error %d\n", (u_longlong_t)newstate, error); 3130 } 3131 return (vd); 3132 } 3133 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 3134 3135 /* 3136 * Since we dropped the lock we need to ensure that we're 3137 * still talking to the original vdev. It's possible this 3138 * vdev may have been detached/replaced while we were 3139 * trying to online it. 3140 */ 3141 if (generation != spa->spa_config_generation) { 3142 if (ztest_opts.zo_verbose >= 5) { 3143 (void) printf("vdev configuration has changed, " 3144 "guid %llu, state %llu, expected gen %llu, " 3145 "got gen %llu\n", 3146 (u_longlong_t)guid, 3147 (u_longlong_t)tvd->vdev_state, 3148 (u_longlong_t)generation, 3149 (u_longlong_t)spa->spa_config_generation); 3150 } 3151 return (vd); 3152 } 3153 return (NULL); 3154 } 3155 3156 /* 3157 * Traverse the vdev tree calling the supplied function. 3158 * We continue to walk the tree until we either have walked all 3159 * children or we receive a non-NULL return from the callback. 3160 * If a NULL callback is passed, then we just return back the first 3161 * leaf vdev we encounter. 3162 */ 3163 vdev_t * 3164 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 3165 { 3166 if (vd->vdev_ops->vdev_op_leaf) { 3167 if (func == NULL) 3168 return (vd); 3169 else 3170 return (func(vd, arg)); 3171 } 3172 3173 for (uint_t c = 0; c < vd->vdev_children; c++) { 3174 vdev_t *cvd = vd->vdev_child[c]; 3175 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 3176 return (cvd); 3177 } 3178 return (NULL); 3179 } 3180 3181 /* 3182 * Verify that dynamic LUN growth works as expected. 3183 */ 3184 /* ARGSUSED */ 3185 void 3186 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 3187 { 3188 spa_t *spa = ztest_spa; 3189 vdev_t *vd, *tvd; 3190 metaslab_class_t *mc; 3191 metaslab_group_t *mg; 3192 size_t psize, newsize; 3193 uint64_t top; 3194 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 3195 3196 mutex_enter(&ztest_checkpoint_lock); 3197 mutex_enter(&ztest_vdev_lock); 3198 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3199 3200 /* 3201 * If there is a vdev removal in progress, it could complete while 3202 * we are running, in which case we would not be able to verify 3203 * that the metaslab_class space increased (because it decreases 3204 * when the device removal completes). 3205 */ 3206 if (spa->spa_vdev_removal != NULL) { 3207 spa_config_exit(spa, SCL_STATE, spa); 3208 mutex_exit(&ztest_vdev_lock); 3209 mutex_exit(&ztest_checkpoint_lock); 3210 return; 3211 } 3212 3213 top = ztest_random_vdev_top(spa, B_TRUE); 3214 3215 tvd = spa->spa_root_vdev->vdev_child[top]; 3216 mg = tvd->vdev_mg; 3217 mc = mg->mg_class; 3218 old_ms_count = tvd->vdev_ms_count; 3219 old_class_space = metaslab_class_get_space(mc); 3220 3221 /* 3222 * Determine the size of the first leaf vdev associated with 3223 * our top-level device. 3224 */ 3225 vd = vdev_walk_tree(tvd, NULL, NULL); 3226 ASSERT3P(vd, !=, NULL); 3227 ASSERT(vd->vdev_ops->vdev_op_leaf); 3228 3229 psize = vd->vdev_psize; 3230 3231 /* 3232 * We only try to expand the vdev if it's healthy, less than 4x its 3233 * original size, and it has a valid psize. 3234 */ 3235 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 3236 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 3237 spa_config_exit(spa, SCL_STATE, spa); 3238 mutex_exit(&ztest_vdev_lock); 3239 mutex_exit(&ztest_checkpoint_lock); 3240 return; 3241 } 3242 ASSERT(psize > 0); 3243 newsize = psize + psize / 8; 3244 ASSERT3U(newsize, >, psize); 3245 3246 if (ztest_opts.zo_verbose >= 6) { 3247 (void) printf("Expanding LUN %s from %lu to %lu\n", 3248 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 3249 } 3250 3251 /* 3252 * Growing the vdev is a two step process: 3253 * 1). expand the physical size (i.e. relabel) 3254 * 2). online the vdev to create the new metaslabs 3255 */ 3256 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 3257 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 3258 tvd->vdev_state != VDEV_STATE_HEALTHY) { 3259 if (ztest_opts.zo_verbose >= 5) { 3260 (void) printf("Could not expand LUN because " 3261 "the vdev configuration changed.\n"); 3262 } 3263 spa_config_exit(spa, SCL_STATE, spa); 3264 mutex_exit(&ztest_vdev_lock); 3265 mutex_exit(&ztest_checkpoint_lock); 3266 return; 3267 } 3268 3269 spa_config_exit(spa, SCL_STATE, spa); 3270 3271 /* 3272 * Expanding the LUN will update the config asynchronously, 3273 * thus we must wait for the async thread to complete any 3274 * pending tasks before proceeding. 3275 */ 3276 for (;;) { 3277 boolean_t done; 3278 mutex_enter(&spa->spa_async_lock); 3279 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 3280 mutex_exit(&spa->spa_async_lock); 3281 if (done) 3282 break; 3283 txg_wait_synced(spa_get_dsl(spa), 0); 3284 (void) poll(NULL, 0, 100); 3285 } 3286 3287 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3288 3289 tvd = spa->spa_root_vdev->vdev_child[top]; 3290 new_ms_count = tvd->vdev_ms_count; 3291 new_class_space = metaslab_class_get_space(mc); 3292 3293 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 3294 if (ztest_opts.zo_verbose >= 5) { 3295 (void) printf("Could not verify LUN expansion due to " 3296 "intervening vdev offline or remove.\n"); 3297 } 3298 spa_config_exit(spa, SCL_STATE, spa); 3299 mutex_exit(&ztest_vdev_lock); 3300 mutex_exit(&ztest_checkpoint_lock); 3301 return; 3302 } 3303 3304 /* 3305 * Make sure we were able to grow the vdev. 3306 */ 3307 if (new_ms_count <= old_ms_count) { 3308 fatal(0, "LUN expansion failed: ms_count %llu < %llu\n", 3309 old_ms_count, new_ms_count); 3310 } 3311 3312 /* 3313 * Make sure we were able to grow the pool. 3314 */ 3315 if (new_class_space <= old_class_space) { 3316 fatal(0, "LUN expansion failed: class_space %llu < %llu\n", 3317 old_class_space, new_class_space); 3318 } 3319 3320 if (ztest_opts.zo_verbose >= 5) { 3321 char oldnumbuf[NN_NUMBUF_SZ], newnumbuf[NN_NUMBUF_SZ]; 3322 3323 nicenum(old_class_space, oldnumbuf, sizeof (oldnumbuf)); 3324 nicenum(new_class_space, newnumbuf, sizeof (newnumbuf)); 3325 (void) printf("%s grew from %s to %s\n", 3326 spa->spa_name, oldnumbuf, newnumbuf); 3327 } 3328 3329 spa_config_exit(spa, SCL_STATE, spa); 3330 mutex_exit(&ztest_vdev_lock); 3331 mutex_exit(&ztest_checkpoint_lock); 3332 } 3333 3334 /* 3335 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 3336 */ 3337 /* ARGSUSED */ 3338 static void 3339 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3340 { 3341 /* 3342 * Create the objects common to all ztest datasets. 3343 */ 3344 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3345 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3346 } 3347 3348 static int 3349 ztest_dataset_create(char *dsname) 3350 { 3351 uint64_t zilset = ztest_random(100); 3352 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3353 ztest_objset_create_cb, NULL); 3354 3355 if (err || zilset < 80) 3356 return (err); 3357 3358 if (ztest_opts.zo_verbose >= 6) 3359 (void) printf("Setting dataset %s to sync always\n", dsname); 3360 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3361 ZFS_SYNC_ALWAYS, B_FALSE)); 3362 } 3363 3364 /* ARGSUSED */ 3365 static int 3366 ztest_objset_destroy_cb(const char *name, void *arg) 3367 { 3368 objset_t *os; 3369 dmu_object_info_t doi; 3370 int error; 3371 3372 /* 3373 * Verify that the dataset contains a directory object. 3374 */ 3375 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os)); 3376 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3377 if (error != ENOENT) { 3378 /* We could have crashed in the middle of destroying it */ 3379 ASSERT0(error); 3380 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3381 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3382 } 3383 dmu_objset_disown(os, FTAG); 3384 3385 /* 3386 * Destroy the dataset. 3387 */ 3388 if (strchr(name, '@') != NULL) { 3389 VERIFY0(dsl_destroy_snapshot(name, B_TRUE)); 3390 } else { 3391 error = dsl_destroy_head(name); 3392 /* There could be a hold on this dataset */ 3393 if (error != EBUSY) 3394 ASSERT0(error); 3395 } 3396 return (0); 3397 } 3398 3399 static boolean_t 3400 ztest_snapshot_create(char *osname, uint64_t id) 3401 { 3402 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 3403 int error; 3404 3405 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id); 3406 3407 error = dmu_objset_snapshot_one(osname, snapname); 3408 if (error == ENOSPC) { 3409 ztest_record_enospc(FTAG); 3410 return (B_FALSE); 3411 } 3412 if (error != 0 && error != EEXIST) { 3413 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname, 3414 snapname, error); 3415 } 3416 return (B_TRUE); 3417 } 3418 3419 static boolean_t 3420 ztest_snapshot_destroy(char *osname, uint64_t id) 3421 { 3422 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 3423 int error; 3424 3425 (void) snprintf(snapname, sizeof (snapname), "%s@%llu", osname, 3426 (u_longlong_t)id); 3427 3428 error = dsl_destroy_snapshot(snapname, B_FALSE); 3429 if (error != 0 && error != ENOENT) 3430 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3431 return (B_TRUE); 3432 } 3433 3434 /* ARGSUSED */ 3435 void 3436 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3437 { 3438 ztest_ds_t zdtmp; 3439 int iters; 3440 int error; 3441 objset_t *os, *os2; 3442 char name[ZFS_MAX_DATASET_NAME_LEN]; 3443 zilog_t *zilog; 3444 3445 rw_enter(&ztest_name_lock, RW_READER); 3446 3447 (void) snprintf(name, sizeof (name), "%s/temp_%llu", 3448 ztest_opts.zo_pool, (u_longlong_t)id); 3449 3450 /* 3451 * If this dataset exists from a previous run, process its replay log 3452 * half of the time. If we don't replay it, then dmu_objset_destroy() 3453 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3454 */ 3455 if (ztest_random(2) == 0 && 3456 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3457 ztest_zd_init(&zdtmp, NULL, os); 3458 zil_replay(os, &zdtmp, ztest_replay_vector); 3459 ztest_zd_fini(&zdtmp); 3460 dmu_objset_disown(os, FTAG); 3461 } 3462 3463 /* 3464 * There may be an old instance of the dataset we're about to 3465 * create lying around from a previous run. If so, destroy it 3466 * and all of its snapshots. 3467 */ 3468 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3469 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3470 3471 /* 3472 * Verify that the destroyed dataset is no longer in the namespace. 3473 */ 3474 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, 3475 FTAG, &os)); 3476 3477 /* 3478 * Verify that we can create a new dataset. 3479 */ 3480 error = ztest_dataset_create(name); 3481 if (error) { 3482 if (error == ENOSPC) { 3483 ztest_record_enospc(FTAG); 3484 rw_exit(&ztest_name_lock); 3485 return; 3486 } 3487 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3488 } 3489 3490 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3491 3492 ztest_zd_init(&zdtmp, NULL, os); 3493 3494 /* 3495 * Open the intent log for it. 3496 */ 3497 zilog = zil_open(os, ztest_get_data); 3498 3499 /* 3500 * Put some objects in there, do a little I/O to them, 3501 * and randomly take a couple of snapshots along the way. 3502 */ 3503 iters = ztest_random(5); 3504 for (int i = 0; i < iters; i++) { 3505 ztest_dmu_object_alloc_free(&zdtmp, id); 3506 if (ztest_random(iters) == 0) 3507 (void) ztest_snapshot_create(name, i); 3508 } 3509 3510 /* 3511 * Verify that we cannot create an existing dataset. 3512 */ 3513 VERIFY3U(EEXIST, ==, 3514 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3515 3516 /* 3517 * Verify that we can hold an objset that is also owned. 3518 */ 3519 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3520 dmu_objset_rele(os2, FTAG); 3521 3522 /* 3523 * Verify that we cannot own an objset that is already owned. 3524 */ 3525 VERIFY3U(EBUSY, ==, 3526 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3527 3528 zil_close(zilog); 3529 dmu_objset_disown(os, FTAG); 3530 ztest_zd_fini(&zdtmp); 3531 3532 rw_exit(&ztest_name_lock); 3533 } 3534 3535 /* 3536 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3537 */ 3538 void 3539 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3540 { 3541 rw_enter(&ztest_name_lock, RW_READER); 3542 (void) ztest_snapshot_destroy(zd->zd_name, id); 3543 (void) ztest_snapshot_create(zd->zd_name, id); 3544 rw_exit(&ztest_name_lock); 3545 } 3546 3547 /* 3548 * Cleanup non-standard snapshots and clones. 3549 */ 3550 void 3551 ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3552 { 3553 char snap1name[ZFS_MAX_DATASET_NAME_LEN]; 3554 char clone1name[ZFS_MAX_DATASET_NAME_LEN]; 3555 char snap2name[ZFS_MAX_DATASET_NAME_LEN]; 3556 char clone2name[ZFS_MAX_DATASET_NAME_LEN]; 3557 char snap3name[ZFS_MAX_DATASET_NAME_LEN]; 3558 int error; 3559 3560 (void) snprintf(snap1name, sizeof (snap1name), 3561 "%s@s1_%llu", osname, id); 3562 (void) snprintf(clone1name, sizeof (clone1name), 3563 "%s/c1_%llu", osname, id); 3564 (void) snprintf(snap2name, sizeof (snap2name), 3565 "%s@s2_%llu", clone1name, id); 3566 (void) snprintf(clone2name, sizeof (clone2name), 3567 "%s/c2_%llu", osname, id); 3568 (void) snprintf(snap3name, sizeof (snap3name), 3569 "%s@s3_%llu", clone1name, id); 3570 3571 error = dsl_destroy_head(clone2name); 3572 if (error && error != ENOENT) 3573 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error); 3574 error = dsl_destroy_snapshot(snap3name, B_FALSE); 3575 if (error && error != ENOENT) 3576 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error); 3577 error = dsl_destroy_snapshot(snap2name, B_FALSE); 3578 if (error && error != ENOENT) 3579 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error); 3580 error = dsl_destroy_head(clone1name); 3581 if (error && error != ENOENT) 3582 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error); 3583 error = dsl_destroy_snapshot(snap1name, B_FALSE); 3584 if (error && error != ENOENT) 3585 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error); 3586 } 3587 3588 /* 3589 * Verify dsl_dataset_promote handles EBUSY 3590 */ 3591 void 3592 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3593 { 3594 objset_t *os; 3595 char snap1name[ZFS_MAX_DATASET_NAME_LEN]; 3596 char clone1name[ZFS_MAX_DATASET_NAME_LEN]; 3597 char snap2name[ZFS_MAX_DATASET_NAME_LEN]; 3598 char clone2name[ZFS_MAX_DATASET_NAME_LEN]; 3599 char snap3name[ZFS_MAX_DATASET_NAME_LEN]; 3600 char *osname = zd->zd_name; 3601 int error; 3602 3603 rw_enter(&ztest_name_lock, RW_READER); 3604 3605 ztest_dsl_dataset_cleanup(osname, id); 3606 3607 (void) snprintf(snap1name, sizeof (snap1name), 3608 "%s@s1_%llu", osname, id); 3609 (void) snprintf(clone1name, sizeof (clone1name), 3610 "%s/c1_%llu", osname, id); 3611 (void) snprintf(snap2name, sizeof (snap2name), 3612 "%s@s2_%llu", clone1name, id); 3613 (void) snprintf(clone2name, sizeof (clone2name), 3614 "%s/c2_%llu", osname, id); 3615 (void) snprintf(snap3name, sizeof (snap3name), 3616 "%s@s3_%llu", clone1name, id); 3617 3618 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 3619 if (error && error != EEXIST) { 3620 if (error == ENOSPC) { 3621 ztest_record_enospc(FTAG); 3622 goto out; 3623 } 3624 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3625 } 3626 3627 error = dmu_objset_clone(clone1name, snap1name); 3628 if (error) { 3629 if (error == ENOSPC) { 3630 ztest_record_enospc(FTAG); 3631 goto out; 3632 } 3633 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3634 } 3635 3636 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 3637 if (error && error != EEXIST) { 3638 if (error == ENOSPC) { 3639 ztest_record_enospc(FTAG); 3640 goto out; 3641 } 3642 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3643 } 3644 3645 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 3646 if (error && error != EEXIST) { 3647 if (error == ENOSPC) { 3648 ztest_record_enospc(FTAG); 3649 goto out; 3650 } 3651 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3652 } 3653 3654 error = dmu_objset_clone(clone2name, snap3name); 3655 if (error) { 3656 if (error == ENOSPC) { 3657 ztest_record_enospc(FTAG); 3658 goto out; 3659 } 3660 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3661 } 3662 3663 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os); 3664 if (error) 3665 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error); 3666 error = dsl_dataset_promote(clone2name, NULL); 3667 if (error == ENOSPC) { 3668 dmu_objset_disown(os, FTAG); 3669 ztest_record_enospc(FTAG); 3670 goto out; 3671 } 3672 if (error != EBUSY) 3673 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3674 error); 3675 dmu_objset_disown(os, FTAG); 3676 3677 out: 3678 ztest_dsl_dataset_cleanup(osname, id); 3679 3680 rw_exit(&ztest_name_lock); 3681 } 3682 3683 /* 3684 * Verify that dmu_object_{alloc,free} work as expected. 3685 */ 3686 void 3687 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3688 { 3689 ztest_od_t od[4]; 3690 int batchsize = sizeof (od) / sizeof (od[0]); 3691 3692 for (int b = 0; b < batchsize; b++) 3693 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3694 3695 /* 3696 * Destroy the previous batch of objects, create a new batch, 3697 * and do some I/O on the new objects. 3698 */ 3699 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3700 return; 3701 3702 while (ztest_random(4 * batchsize) != 0) 3703 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3704 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3705 } 3706 3707 /* 3708 * Verify that dmu_{read,write} work as expected. 3709 */ 3710 void 3711 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3712 { 3713 objset_t *os = zd->zd_os; 3714 ztest_od_t od[2]; 3715 dmu_tx_t *tx; 3716 int i, freeit, error; 3717 uint64_t n, s, txg; 3718 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3719 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3720 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3721 uint64_t regions = 997; 3722 uint64_t stride = 123456789ULL; 3723 uint64_t width = 40; 3724 int free_percent = 5; 3725 3726 /* 3727 * This test uses two objects, packobj and bigobj, that are always 3728 * updated together (i.e. in the same tx) so that their contents are 3729 * in sync and can be compared. Their contents relate to each other 3730 * in a simple way: packobj is a dense array of 'bufwad' structures, 3731 * while bigobj is a sparse array of the same bufwads. Specifically, 3732 * for any index n, there are three bufwads that should be identical: 3733 * 3734 * packobj, at offset n * sizeof (bufwad_t) 3735 * bigobj, at the head of the nth chunk 3736 * bigobj, at the tail of the nth chunk 3737 * 3738 * The chunk size is arbitrary. It doesn't have to be a power of two, 3739 * and it doesn't have any relation to the object blocksize. 3740 * The only requirement is that it can hold at least two bufwads. 3741 * 3742 * Normally, we write the bufwad to each of these locations. 3743 * However, free_percent of the time we instead write zeroes to 3744 * packobj and perform a dmu_free_range() on bigobj. By comparing 3745 * bigobj to packobj, we can verify that the DMU is correctly 3746 * tracking which parts of an object are allocated and free, 3747 * and that the contents of the allocated blocks are correct. 3748 */ 3749 3750 /* 3751 * Read the directory info. If it's the first time, set things up. 3752 */ 3753 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3754 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3755 3756 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3757 return; 3758 3759 bigobj = od[0].od_object; 3760 packobj = od[1].od_object; 3761 chunksize = od[0].od_gen; 3762 ASSERT(chunksize == od[1].od_gen); 3763 3764 /* 3765 * Prefetch a random chunk of the big object. 3766 * Our aim here is to get some async reads in flight 3767 * for blocks that we may free below; the DMU should 3768 * handle this race correctly. 3769 */ 3770 n = ztest_random(regions) * stride + ztest_random(width); 3771 s = 1 + ztest_random(2 * width - 1); 3772 dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize, 3773 ZIO_PRIORITY_SYNC_READ); 3774 3775 /* 3776 * Pick a random index and compute the offsets into packobj and bigobj. 3777 */ 3778 n = ztest_random(regions) * stride + ztest_random(width); 3779 s = 1 + ztest_random(width - 1); 3780 3781 packoff = n * sizeof (bufwad_t); 3782 packsize = s * sizeof (bufwad_t); 3783 3784 bigoff = n * chunksize; 3785 bigsize = s * chunksize; 3786 3787 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3788 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3789 3790 /* 3791 * free_percent of the time, free a range of bigobj rather than 3792 * overwriting it. 3793 */ 3794 freeit = (ztest_random(100) < free_percent); 3795 3796 /* 3797 * Read the current contents of our objects. 3798 */ 3799 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3800 DMU_READ_PREFETCH); 3801 ASSERT0(error); 3802 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3803 DMU_READ_PREFETCH); 3804 ASSERT0(error); 3805 3806 /* 3807 * Get a tx for the mods to both packobj and bigobj. 3808 */ 3809 tx = dmu_tx_create(os); 3810 3811 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3812 3813 if (freeit) 3814 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3815 else 3816 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3817 3818 /* This accounts for setting the checksum/compression. */ 3819 dmu_tx_hold_bonus(tx, bigobj); 3820 3821 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3822 if (txg == 0) { 3823 umem_free(packbuf, packsize); 3824 umem_free(bigbuf, bigsize); 3825 return; 3826 } 3827 3828 enum zio_checksum cksum; 3829 do { 3830 cksum = (enum zio_checksum) 3831 ztest_random_dsl_prop(ZFS_PROP_CHECKSUM); 3832 } while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS); 3833 dmu_object_set_checksum(os, bigobj, cksum, tx); 3834 3835 enum zio_compress comp; 3836 do { 3837 comp = (enum zio_compress) 3838 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION); 3839 } while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS); 3840 dmu_object_set_compress(os, bigobj, comp, tx); 3841 3842 /* 3843 * For each index from n to n + s, verify that the existing bufwad 3844 * in packobj matches the bufwads at the head and tail of the 3845 * corresponding chunk in bigobj. Then update all three bufwads 3846 * with the new values we want to write out. 3847 */ 3848 for (i = 0; i < s; i++) { 3849 /* LINTED */ 3850 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3851 /* LINTED */ 3852 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3853 /* LINTED */ 3854 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3855 3856 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3857 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3858 3859 if (pack->bw_txg > txg) 3860 fatal(0, "future leak: got %llx, open txg is %llx", 3861 pack->bw_txg, txg); 3862 3863 if (pack->bw_data != 0 && pack->bw_index != n + i) 3864 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3865 pack->bw_index, n, i); 3866 3867 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3868 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3869 3870 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3871 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3872 3873 if (freeit) { 3874 bzero(pack, sizeof (bufwad_t)); 3875 } else { 3876 pack->bw_index = n + i; 3877 pack->bw_txg = txg; 3878 pack->bw_data = 1 + ztest_random(-2ULL); 3879 } 3880 *bigH = *pack; 3881 *bigT = *pack; 3882 } 3883 3884 /* 3885 * We've verified all the old bufwads, and made new ones. 3886 * Now write them out. 3887 */ 3888 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3889 3890 if (freeit) { 3891 if (ztest_opts.zo_verbose >= 7) { 3892 (void) printf("freeing offset %llx size %llx" 3893 " txg %llx\n", 3894 (u_longlong_t)bigoff, 3895 (u_longlong_t)bigsize, 3896 (u_longlong_t)txg); 3897 } 3898 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3899 } else { 3900 if (ztest_opts.zo_verbose >= 7) { 3901 (void) printf("writing offset %llx size %llx" 3902 " txg %llx\n", 3903 (u_longlong_t)bigoff, 3904 (u_longlong_t)bigsize, 3905 (u_longlong_t)txg); 3906 } 3907 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3908 } 3909 3910 dmu_tx_commit(tx); 3911 3912 /* 3913 * Sanity check the stuff we just wrote. 3914 */ 3915 { 3916 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3917 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3918 3919 VERIFY(0 == dmu_read(os, packobj, packoff, 3920 packsize, packcheck, DMU_READ_PREFETCH)); 3921 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3922 bigsize, bigcheck, DMU_READ_PREFETCH)); 3923 3924 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3925 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3926 3927 umem_free(packcheck, packsize); 3928 umem_free(bigcheck, bigsize); 3929 } 3930 3931 umem_free(packbuf, packsize); 3932 umem_free(bigbuf, bigsize); 3933 } 3934 3935 void 3936 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3937 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3938 { 3939 uint64_t i; 3940 bufwad_t *pack; 3941 bufwad_t *bigH; 3942 bufwad_t *bigT; 3943 3944 /* 3945 * For each index from n to n + s, verify that the existing bufwad 3946 * in packobj matches the bufwads at the head and tail of the 3947 * corresponding chunk in bigobj. Then update all three bufwads 3948 * with the new values we want to write out. 3949 */ 3950 for (i = 0; i < s; i++) { 3951 /* LINTED */ 3952 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3953 /* LINTED */ 3954 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3955 /* LINTED */ 3956 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3957 3958 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3959 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3960 3961 if (pack->bw_txg > txg) 3962 fatal(0, "future leak: got %llx, open txg is %llx", 3963 pack->bw_txg, txg); 3964 3965 if (pack->bw_data != 0 && pack->bw_index != n + i) 3966 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3967 pack->bw_index, n, i); 3968 3969 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3970 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3971 3972 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3973 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3974 3975 pack->bw_index = n + i; 3976 pack->bw_txg = txg; 3977 pack->bw_data = 1 + ztest_random(-2ULL); 3978 3979 *bigH = *pack; 3980 *bigT = *pack; 3981 } 3982 } 3983 3984 void 3985 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3986 { 3987 objset_t *os = zd->zd_os; 3988 ztest_od_t od[2]; 3989 dmu_tx_t *tx; 3990 uint64_t i; 3991 int error; 3992 uint64_t n, s, txg; 3993 bufwad_t *packbuf, *bigbuf; 3994 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3995 uint64_t blocksize = ztest_random_blocksize(); 3996 uint64_t chunksize = blocksize; 3997 uint64_t regions = 997; 3998 uint64_t stride = 123456789ULL; 3999 uint64_t width = 9; 4000 dmu_buf_t *bonus_db; 4001 arc_buf_t **bigbuf_arcbufs; 4002 dmu_object_info_t doi; 4003 4004 /* 4005 * This test uses two objects, packobj and bigobj, that are always 4006 * updated together (i.e. in the same tx) so that their contents are 4007 * in sync and can be compared. Their contents relate to each other 4008 * in a simple way: packobj is a dense array of 'bufwad' structures, 4009 * while bigobj is a sparse array of the same bufwads. Specifically, 4010 * for any index n, there are three bufwads that should be identical: 4011 * 4012 * packobj, at offset n * sizeof (bufwad_t) 4013 * bigobj, at the head of the nth chunk 4014 * bigobj, at the tail of the nth chunk 4015 * 4016 * The chunk size is set equal to bigobj block size so that 4017 * dmu_assign_arcbuf() can be tested for object updates. 4018 */ 4019 4020 /* 4021 * Read the directory info. If it's the first time, set things up. 4022 */ 4023 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4024 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 4025 4026 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4027 return; 4028 4029 bigobj = od[0].od_object; 4030 packobj = od[1].od_object; 4031 blocksize = od[0].od_blocksize; 4032 chunksize = blocksize; 4033 ASSERT(chunksize == od[1].od_gen); 4034 4035 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 4036 VERIFY(ISP2(doi.doi_data_block_size)); 4037 VERIFY(chunksize == doi.doi_data_block_size); 4038 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 4039 4040 /* 4041 * Pick a random index and compute the offsets into packobj and bigobj. 4042 */ 4043 n = ztest_random(regions) * stride + ztest_random(width); 4044 s = 1 + ztest_random(width - 1); 4045 4046 packoff = n * sizeof (bufwad_t); 4047 packsize = s * sizeof (bufwad_t); 4048 4049 bigoff = n * chunksize; 4050 bigsize = s * chunksize; 4051 4052 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 4053 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 4054 4055 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 4056 4057 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 4058 4059 /* 4060 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 4061 * Iteration 1 test zcopy to already referenced dbufs. 4062 * Iteration 2 test zcopy to dirty dbuf in the same txg. 4063 * Iteration 3 test zcopy to dbuf dirty in previous txg. 4064 * Iteration 4 test zcopy when dbuf is no longer dirty. 4065 * Iteration 5 test zcopy when it can't be done. 4066 * Iteration 6 one more zcopy write. 4067 */ 4068 for (i = 0; i < 7; i++) { 4069 uint64_t j; 4070 uint64_t off; 4071 4072 /* 4073 * In iteration 5 (i == 5) use arcbufs 4074 * that don't match bigobj blksz to test 4075 * dmu_assign_arcbuf() when it can't directly 4076 * assign an arcbuf to a dbuf. 4077 */ 4078 for (j = 0; j < s; j++) { 4079 if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) { 4080 bigbuf_arcbufs[j] = 4081 dmu_request_arcbuf(bonus_db, chunksize); 4082 } else { 4083 bigbuf_arcbufs[2 * j] = 4084 dmu_request_arcbuf(bonus_db, chunksize / 2); 4085 bigbuf_arcbufs[2 * j + 1] = 4086 dmu_request_arcbuf(bonus_db, chunksize / 2); 4087 } 4088 } 4089 4090 /* 4091 * Get a tx for the mods to both packobj and bigobj. 4092 */ 4093 tx = dmu_tx_create(os); 4094 4095 dmu_tx_hold_write(tx, packobj, packoff, packsize); 4096 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 4097 4098 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4099 if (txg == 0) { 4100 umem_free(packbuf, packsize); 4101 umem_free(bigbuf, bigsize); 4102 for (j = 0; j < s; j++) { 4103 if (i != 5 || 4104 chunksize < (SPA_MINBLOCKSIZE * 2)) { 4105 dmu_return_arcbuf(bigbuf_arcbufs[j]); 4106 } else { 4107 dmu_return_arcbuf( 4108 bigbuf_arcbufs[2 * j]); 4109 dmu_return_arcbuf( 4110 bigbuf_arcbufs[2 * j + 1]); 4111 } 4112 } 4113 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 4114 dmu_buf_rele(bonus_db, FTAG); 4115 return; 4116 } 4117 4118 /* 4119 * 50% of the time don't read objects in the 1st iteration to 4120 * test dmu_assign_arcbuf() for the case when there're no 4121 * existing dbufs for the specified offsets. 4122 */ 4123 if (i != 0 || ztest_random(2) != 0) { 4124 error = dmu_read(os, packobj, packoff, 4125 packsize, packbuf, DMU_READ_PREFETCH); 4126 ASSERT0(error); 4127 error = dmu_read(os, bigobj, bigoff, bigsize, 4128 bigbuf, DMU_READ_PREFETCH); 4129 ASSERT0(error); 4130 } 4131 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 4132 n, chunksize, txg); 4133 4134 /* 4135 * We've verified all the old bufwads, and made new ones. 4136 * Now write them out. 4137 */ 4138 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 4139 if (ztest_opts.zo_verbose >= 7) { 4140 (void) printf("writing offset %llx size %llx" 4141 " txg %llx\n", 4142 (u_longlong_t)bigoff, 4143 (u_longlong_t)bigsize, 4144 (u_longlong_t)txg); 4145 } 4146 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 4147 dmu_buf_t *dbt; 4148 if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) { 4149 bcopy((caddr_t)bigbuf + (off - bigoff), 4150 bigbuf_arcbufs[j]->b_data, chunksize); 4151 } else { 4152 bcopy((caddr_t)bigbuf + (off - bigoff), 4153 bigbuf_arcbufs[2 * j]->b_data, 4154 chunksize / 2); 4155 bcopy((caddr_t)bigbuf + (off - bigoff) + 4156 chunksize / 2, 4157 bigbuf_arcbufs[2 * j + 1]->b_data, 4158 chunksize / 2); 4159 } 4160 4161 if (i == 1) { 4162 VERIFY(dmu_buf_hold(os, bigobj, off, 4163 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 4164 } 4165 if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) { 4166 dmu_assign_arcbuf(bonus_db, off, 4167 bigbuf_arcbufs[j], tx); 4168 } else { 4169 dmu_assign_arcbuf(bonus_db, off, 4170 bigbuf_arcbufs[2 * j], tx); 4171 dmu_assign_arcbuf(bonus_db, 4172 off + chunksize / 2, 4173 bigbuf_arcbufs[2 * j + 1], tx); 4174 } 4175 if (i == 1) { 4176 dmu_buf_rele(dbt, FTAG); 4177 } 4178 } 4179 dmu_tx_commit(tx); 4180 4181 /* 4182 * Sanity check the stuff we just wrote. 4183 */ 4184 { 4185 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 4186 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 4187 4188 VERIFY(0 == dmu_read(os, packobj, packoff, 4189 packsize, packcheck, DMU_READ_PREFETCH)); 4190 VERIFY(0 == dmu_read(os, bigobj, bigoff, 4191 bigsize, bigcheck, DMU_READ_PREFETCH)); 4192 4193 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 4194 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 4195 4196 umem_free(packcheck, packsize); 4197 umem_free(bigcheck, bigsize); 4198 } 4199 if (i == 2) { 4200 txg_wait_open(dmu_objset_pool(os), 0); 4201 } else if (i == 3) { 4202 txg_wait_synced(dmu_objset_pool(os), 0); 4203 } 4204 } 4205 4206 dmu_buf_rele(bonus_db, FTAG); 4207 umem_free(packbuf, packsize); 4208 umem_free(bigbuf, bigsize); 4209 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 4210 } 4211 4212 /* ARGSUSED */ 4213 void 4214 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 4215 { 4216 ztest_od_t od[1]; 4217 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 4218 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4219 4220 /* 4221 * Have multiple threads write to large offsets in an object 4222 * to verify that parallel writes to an object -- even to the 4223 * same blocks within the object -- doesn't cause any trouble. 4224 */ 4225 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4226 4227 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4228 return; 4229 4230 while (ztest_random(10) != 0) 4231 ztest_io(zd, od[0].od_object, offset); 4232 } 4233 4234 void 4235 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 4236 { 4237 ztest_od_t od[1]; 4238 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 4239 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4240 uint64_t count = ztest_random(20) + 1; 4241 uint64_t blocksize = ztest_random_blocksize(); 4242 void *data; 4243 4244 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4245 4246 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4247 return; 4248 4249 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 4250 return; 4251 4252 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 4253 4254 data = umem_zalloc(blocksize, UMEM_NOFAIL); 4255 4256 while (ztest_random(count) != 0) { 4257 uint64_t randoff = offset + (ztest_random(count) * blocksize); 4258 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 4259 data) != 0) 4260 break; 4261 while (ztest_random(4) != 0) 4262 ztest_io(zd, od[0].od_object, randoff); 4263 } 4264 4265 umem_free(data, blocksize); 4266 } 4267 4268 /* 4269 * Verify that zap_{create,destroy,add,remove,update} work as expected. 4270 */ 4271 #define ZTEST_ZAP_MIN_INTS 1 4272 #define ZTEST_ZAP_MAX_INTS 4 4273 #define ZTEST_ZAP_MAX_PROPS 1000 4274 4275 void 4276 ztest_zap(ztest_ds_t *zd, uint64_t id) 4277 { 4278 objset_t *os = zd->zd_os; 4279 ztest_od_t od[1]; 4280 uint64_t object; 4281 uint64_t txg, last_txg; 4282 uint64_t value[ZTEST_ZAP_MAX_INTS]; 4283 uint64_t zl_ints, zl_intsize, prop; 4284 int i, ints; 4285 dmu_tx_t *tx; 4286 char propname[100], txgname[100]; 4287 int error; 4288 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 4289 4290 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4291 4292 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4293 return; 4294 4295 object = od[0].od_object; 4296 4297 /* 4298 * Generate a known hash collision, and verify that 4299 * we can lookup and remove both entries. 4300 */ 4301 tx = dmu_tx_create(os); 4302 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4303 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4304 if (txg == 0) 4305 return; 4306 for (i = 0; i < 2; i++) { 4307 value[i] = i; 4308 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 4309 1, &value[i], tx)); 4310 } 4311 for (i = 0; i < 2; i++) { 4312 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 4313 sizeof (uint64_t), 1, &value[i], tx)); 4314 VERIFY3U(0, ==, 4315 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 4316 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4317 ASSERT3U(zl_ints, ==, 1); 4318 } 4319 for (i = 0; i < 2; i++) { 4320 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 4321 } 4322 dmu_tx_commit(tx); 4323 4324 /* 4325 * Generate a buch of random entries. 4326 */ 4327 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 4328 4329 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4330 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4331 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4332 bzero(value, sizeof (value)); 4333 last_txg = 0; 4334 4335 /* 4336 * If these zap entries already exist, validate their contents. 4337 */ 4338 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4339 if (error == 0) { 4340 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4341 ASSERT3U(zl_ints, ==, 1); 4342 4343 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 4344 zl_ints, &last_txg) == 0); 4345 4346 VERIFY(zap_length(os, object, propname, &zl_intsize, 4347 &zl_ints) == 0); 4348 4349 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4350 ASSERT3U(zl_ints, ==, ints); 4351 4352 VERIFY(zap_lookup(os, object, propname, zl_intsize, 4353 zl_ints, value) == 0); 4354 4355 for (i = 0; i < ints; i++) { 4356 ASSERT3U(value[i], ==, last_txg + object + i); 4357 } 4358 } else { 4359 ASSERT3U(error, ==, ENOENT); 4360 } 4361 4362 /* 4363 * Atomically update two entries in our zap object. 4364 * The first is named txg_%llu, and contains the txg 4365 * in which the property was last updated. The second 4366 * is named prop_%llu, and the nth element of its value 4367 * should be txg + object + n. 4368 */ 4369 tx = dmu_tx_create(os); 4370 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4371 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4372 if (txg == 0) 4373 return; 4374 4375 if (last_txg > txg) 4376 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4377 4378 for (i = 0; i < ints; i++) 4379 value[i] = txg + object + i; 4380 4381 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4382 1, &txg, tx)); 4383 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4384 ints, value, tx)); 4385 4386 dmu_tx_commit(tx); 4387 4388 /* 4389 * Remove a random pair of entries. 4390 */ 4391 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4392 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4393 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4394 4395 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4396 4397 if (error == ENOENT) 4398 return; 4399 4400 ASSERT0(error); 4401 4402 tx = dmu_tx_create(os); 4403 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4404 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4405 if (txg == 0) 4406 return; 4407 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4408 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4409 dmu_tx_commit(tx); 4410 } 4411 4412 /* 4413 * Testcase to test the upgrading of a microzap to fatzap. 4414 */ 4415 void 4416 ztest_fzap(ztest_ds_t *zd, uint64_t id) 4417 { 4418 objset_t *os = zd->zd_os; 4419 ztest_od_t od[1]; 4420 uint64_t object, txg; 4421 4422 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4423 4424 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4425 return; 4426 4427 object = od[0].od_object; 4428 4429 /* 4430 * Add entries to this ZAP and make sure it spills over 4431 * and gets upgraded to a fatzap. Also, since we are adding 4432 * 2050 entries we should see ptrtbl growth and leaf-block split. 4433 */ 4434 for (int i = 0; i < 2050; i++) { 4435 char name[ZFS_MAX_DATASET_NAME_LEN]; 4436 uint64_t value = i; 4437 dmu_tx_t *tx; 4438 int error; 4439 4440 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4441 id, value); 4442 4443 tx = dmu_tx_create(os); 4444 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4445 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4446 if (txg == 0) 4447 return; 4448 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4449 &value, tx); 4450 ASSERT(error == 0 || error == EEXIST); 4451 dmu_tx_commit(tx); 4452 } 4453 } 4454 4455 /* ARGSUSED */ 4456 void 4457 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4458 { 4459 objset_t *os = zd->zd_os; 4460 ztest_od_t od[1]; 4461 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4462 dmu_tx_t *tx; 4463 int i, namelen, error; 4464 int micro = ztest_random(2); 4465 char name[20], string_value[20]; 4466 void *data; 4467 4468 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4469 4470 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4471 return; 4472 4473 object = od[0].od_object; 4474 4475 /* 4476 * Generate a random name of the form 'xxx.....' where each 4477 * x is a random printable character and the dots are dots. 4478 * There are 94 such characters, and the name length goes from 4479 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4480 */ 4481 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4482 4483 for (i = 0; i < 3; i++) 4484 name[i] = '!' + ztest_random('~' - '!' + 1); 4485 for (; i < namelen - 1; i++) 4486 name[i] = '.'; 4487 name[i] = '\0'; 4488 4489 if ((namelen & 1) || micro) { 4490 wsize = sizeof (txg); 4491 wc = 1; 4492 data = &txg; 4493 } else { 4494 wsize = 1; 4495 wc = namelen; 4496 data = string_value; 4497 } 4498 4499 count = -1ULL; 4500 VERIFY0(zap_count(os, object, &count)); 4501 ASSERT(count != -1ULL); 4502 4503 /* 4504 * Select an operation: length, lookup, add, update, remove. 4505 */ 4506 i = ztest_random(5); 4507 4508 if (i >= 2) { 4509 tx = dmu_tx_create(os); 4510 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4511 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4512 if (txg == 0) 4513 return; 4514 bcopy(name, string_value, namelen); 4515 } else { 4516 tx = NULL; 4517 txg = 0; 4518 bzero(string_value, namelen); 4519 } 4520 4521 switch (i) { 4522 4523 case 0: 4524 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4525 if (error == 0) { 4526 ASSERT3U(wsize, ==, zl_wsize); 4527 ASSERT3U(wc, ==, zl_wc); 4528 } else { 4529 ASSERT3U(error, ==, ENOENT); 4530 } 4531 break; 4532 4533 case 1: 4534 error = zap_lookup(os, object, name, wsize, wc, data); 4535 if (error == 0) { 4536 if (data == string_value && 4537 bcmp(name, data, namelen) != 0) 4538 fatal(0, "name '%s' != val '%s' len %d", 4539 name, data, namelen); 4540 } else { 4541 ASSERT3U(error, ==, ENOENT); 4542 } 4543 break; 4544 4545 case 2: 4546 error = zap_add(os, object, name, wsize, wc, data, tx); 4547 ASSERT(error == 0 || error == EEXIST); 4548 break; 4549 4550 case 3: 4551 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4552 break; 4553 4554 case 4: 4555 error = zap_remove(os, object, name, tx); 4556 ASSERT(error == 0 || error == ENOENT); 4557 break; 4558 } 4559 4560 if (tx != NULL) 4561 dmu_tx_commit(tx); 4562 } 4563 4564 /* 4565 * Commit callback data. 4566 */ 4567 typedef struct ztest_cb_data { 4568 list_node_t zcd_node; 4569 uint64_t zcd_txg; 4570 int zcd_expected_err; 4571 boolean_t zcd_added; 4572 boolean_t zcd_called; 4573 spa_t *zcd_spa; 4574 } ztest_cb_data_t; 4575 4576 /* This is the actual commit callback function */ 4577 static void 4578 ztest_commit_callback(void *arg, int error) 4579 { 4580 ztest_cb_data_t *data = arg; 4581 uint64_t synced_txg; 4582 4583 VERIFY(data != NULL); 4584 VERIFY3S(data->zcd_expected_err, ==, error); 4585 VERIFY(!data->zcd_called); 4586 4587 synced_txg = spa_last_synced_txg(data->zcd_spa); 4588 if (data->zcd_txg > synced_txg) 4589 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4590 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4591 synced_txg); 4592 4593 data->zcd_called = B_TRUE; 4594 4595 if (error == ECANCELED) { 4596 ASSERT0(data->zcd_txg); 4597 ASSERT(!data->zcd_added); 4598 4599 /* 4600 * The private callback data should be destroyed here, but 4601 * since we are going to check the zcd_called field after 4602 * dmu_tx_abort(), we will destroy it there. 4603 */ 4604 return; 4605 } 4606 4607 /* Was this callback added to the global callback list? */ 4608 if (!data->zcd_added) 4609 goto out; 4610 4611 ASSERT3U(data->zcd_txg, !=, 0); 4612 4613 /* Remove our callback from the list */ 4614 mutex_enter(&zcl.zcl_callbacks_lock); 4615 list_remove(&zcl.zcl_callbacks, data); 4616 mutex_exit(&zcl.zcl_callbacks_lock); 4617 4618 out: 4619 umem_free(data, sizeof (ztest_cb_data_t)); 4620 } 4621 4622 /* Allocate and initialize callback data structure */ 4623 static ztest_cb_data_t * 4624 ztest_create_cb_data(objset_t *os, uint64_t txg) 4625 { 4626 ztest_cb_data_t *cb_data; 4627 4628 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4629 4630 cb_data->zcd_txg = txg; 4631 cb_data->zcd_spa = dmu_objset_spa(os); 4632 4633 return (cb_data); 4634 } 4635 4636 /* 4637 * If a number of txgs equal to this threshold have been created after a commit 4638 * callback has been registered but not called, then we assume there is an 4639 * implementation bug. 4640 */ 4641 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4642 4643 /* 4644 * Commit callback test. 4645 */ 4646 void 4647 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4648 { 4649 objset_t *os = zd->zd_os; 4650 ztest_od_t od[1]; 4651 dmu_tx_t *tx; 4652 ztest_cb_data_t *cb_data[3], *tmp_cb; 4653 uint64_t old_txg, txg; 4654 int i, error; 4655 4656 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4657 4658 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4659 return; 4660 4661 tx = dmu_tx_create(os); 4662 4663 cb_data[0] = ztest_create_cb_data(os, 0); 4664 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4665 4666 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4667 4668 /* Every once in a while, abort the transaction on purpose */ 4669 if (ztest_random(100) == 0) 4670 error = -1; 4671 4672 if (!error) 4673 error = dmu_tx_assign(tx, TXG_NOWAIT); 4674 4675 txg = error ? 0 : dmu_tx_get_txg(tx); 4676 4677 cb_data[0]->zcd_txg = txg; 4678 cb_data[1] = ztest_create_cb_data(os, txg); 4679 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4680 4681 if (error) { 4682 /* 4683 * It's not a strict requirement to call the registered 4684 * callbacks from inside dmu_tx_abort(), but that's what 4685 * it's supposed to happen in the current implementation 4686 * so we will check for that. 4687 */ 4688 for (i = 0; i < 2; i++) { 4689 cb_data[i]->zcd_expected_err = ECANCELED; 4690 VERIFY(!cb_data[i]->zcd_called); 4691 } 4692 4693 dmu_tx_abort(tx); 4694 4695 for (i = 0; i < 2; i++) { 4696 VERIFY(cb_data[i]->zcd_called); 4697 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4698 } 4699 4700 return; 4701 } 4702 4703 cb_data[2] = ztest_create_cb_data(os, txg); 4704 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4705 4706 /* 4707 * Read existing data to make sure there isn't a future leak. 4708 */ 4709 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4710 &old_txg, DMU_READ_PREFETCH)); 4711 4712 if (old_txg > txg) 4713 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4714 old_txg, txg); 4715 4716 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4717 4718 mutex_enter(&zcl.zcl_callbacks_lock); 4719 4720 /* 4721 * Since commit callbacks don't have any ordering requirement and since 4722 * it is theoretically possible for a commit callback to be called 4723 * after an arbitrary amount of time has elapsed since its txg has been 4724 * synced, it is difficult to reliably determine whether a commit 4725 * callback hasn't been called due to high load or due to a flawed 4726 * implementation. 4727 * 4728 * In practice, we will assume that if after a certain number of txgs a 4729 * commit callback hasn't been called, then most likely there's an 4730 * implementation bug.. 4731 */ 4732 tmp_cb = list_head(&zcl.zcl_callbacks); 4733 if (tmp_cb != NULL && 4734 (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) { 4735 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4736 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4737 } 4738 4739 /* 4740 * Let's find the place to insert our callbacks. 4741 * 4742 * Even though the list is ordered by txg, it is possible for the 4743 * insertion point to not be the end because our txg may already be 4744 * quiescing at this point and other callbacks in the open txg 4745 * (from other objsets) may have sneaked in. 4746 */ 4747 tmp_cb = list_tail(&zcl.zcl_callbacks); 4748 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4749 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4750 4751 /* Add the 3 callbacks to the list */ 4752 for (i = 0; i < 3; i++) { 4753 if (tmp_cb == NULL) 4754 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4755 else 4756 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4757 cb_data[i]); 4758 4759 cb_data[i]->zcd_added = B_TRUE; 4760 VERIFY(!cb_data[i]->zcd_called); 4761 4762 tmp_cb = cb_data[i]; 4763 } 4764 4765 mutex_exit(&zcl.zcl_callbacks_lock); 4766 4767 dmu_tx_commit(tx); 4768 } 4769 4770 /* ARGSUSED */ 4771 void 4772 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4773 { 4774 zfs_prop_t proplist[] = { 4775 ZFS_PROP_CHECKSUM, 4776 ZFS_PROP_COMPRESSION, 4777 ZFS_PROP_COPIES, 4778 ZFS_PROP_DEDUP 4779 }; 4780 4781 rw_enter(&ztest_name_lock, RW_READER); 4782 4783 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4784 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4785 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4786 4787 rw_exit(&ztest_name_lock); 4788 } 4789 4790 /* ARGSUSED */ 4791 void 4792 ztest_remap_blocks(ztest_ds_t *zd, uint64_t id) 4793 { 4794 rw_enter(&ztest_name_lock, RW_READER); 4795 4796 int error = dmu_objset_remap_indirects(zd->zd_name); 4797 if (error == ENOSPC) 4798 error = 0; 4799 ASSERT0(error); 4800 4801 rw_exit(&ztest_name_lock); 4802 } 4803 4804 /* ARGSUSED */ 4805 void 4806 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4807 { 4808 nvlist_t *props = NULL; 4809 4810 rw_enter(&ztest_name_lock, RW_READER); 4811 4812 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4813 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4814 4815 VERIFY0(spa_prop_get(ztest_spa, &props)); 4816 4817 if (ztest_opts.zo_verbose >= 6) 4818 dump_nvlist(props, 4); 4819 4820 nvlist_free(props); 4821 4822 rw_exit(&ztest_name_lock); 4823 } 4824 4825 static int 4826 user_release_one(const char *snapname, const char *holdname) 4827 { 4828 nvlist_t *snaps, *holds; 4829 int error; 4830 4831 snaps = fnvlist_alloc(); 4832 holds = fnvlist_alloc(); 4833 fnvlist_add_boolean(holds, holdname); 4834 fnvlist_add_nvlist(snaps, snapname, holds); 4835 fnvlist_free(holds); 4836 error = dsl_dataset_user_release(snaps, NULL); 4837 fnvlist_free(snaps); 4838 return (error); 4839 } 4840 4841 /* 4842 * Test snapshot hold/release and deferred destroy. 4843 */ 4844 void 4845 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4846 { 4847 int error; 4848 objset_t *os = zd->zd_os; 4849 objset_t *origin; 4850 char snapname[100]; 4851 char fullname[100]; 4852 char clonename[100]; 4853 char tag[100]; 4854 char osname[ZFS_MAX_DATASET_NAME_LEN]; 4855 nvlist_t *holds; 4856 4857 rw_enter(&ztest_name_lock, RW_READER); 4858 4859 dmu_objset_name(os, osname); 4860 4861 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id); 4862 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname); 4863 (void) snprintf(clonename, sizeof (clonename), 4864 "%s/ch1_%llu", osname, id); 4865 (void) snprintf(tag, sizeof (tag), "tag_%llu", id); 4866 4867 /* 4868 * Clean up from any previous run. 4869 */ 4870 error = dsl_destroy_head(clonename); 4871 if (error != ENOENT) 4872 ASSERT0(error); 4873 error = user_release_one(fullname, tag); 4874 if (error != ESRCH && error != ENOENT) 4875 ASSERT0(error); 4876 error = dsl_destroy_snapshot(fullname, B_FALSE); 4877 if (error != ENOENT) 4878 ASSERT0(error); 4879 4880 /* 4881 * Create snapshot, clone it, mark snap for deferred destroy, 4882 * destroy clone, verify snap was also destroyed. 4883 */ 4884 error = dmu_objset_snapshot_one(osname, snapname); 4885 if (error) { 4886 if (error == ENOSPC) { 4887 ztest_record_enospc("dmu_objset_snapshot"); 4888 goto out; 4889 } 4890 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4891 } 4892 4893 error = dmu_objset_clone(clonename, fullname); 4894 if (error) { 4895 if (error == ENOSPC) { 4896 ztest_record_enospc("dmu_objset_clone"); 4897 goto out; 4898 } 4899 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4900 } 4901 4902 error = dsl_destroy_snapshot(fullname, B_TRUE); 4903 if (error) { 4904 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4905 fullname, error); 4906 } 4907 4908 error = dsl_destroy_head(clonename); 4909 if (error) 4910 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error); 4911 4912 error = dmu_objset_hold(fullname, FTAG, &origin); 4913 if (error != ENOENT) 4914 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4915 4916 /* 4917 * Create snapshot, add temporary hold, verify that we can't 4918 * destroy a held snapshot, mark for deferred destroy, 4919 * release hold, verify snapshot was destroyed. 4920 */ 4921 error = dmu_objset_snapshot_one(osname, snapname); 4922 if (error) { 4923 if (error == ENOSPC) { 4924 ztest_record_enospc("dmu_objset_snapshot"); 4925 goto out; 4926 } 4927 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4928 } 4929 4930 holds = fnvlist_alloc(); 4931 fnvlist_add_string(holds, fullname, tag); 4932 error = dsl_dataset_user_hold(holds, 0, NULL); 4933 fnvlist_free(holds); 4934 4935 if (error == ENOSPC) { 4936 ztest_record_enospc("dsl_dataset_user_hold"); 4937 goto out; 4938 } else if (error) { 4939 fatal(0, "dsl_dataset_user_hold(%s, %s) = %u", 4940 fullname, tag, error); 4941 } 4942 4943 error = dsl_destroy_snapshot(fullname, B_FALSE); 4944 if (error != EBUSY) { 4945 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d", 4946 fullname, error); 4947 } 4948 4949 error = dsl_destroy_snapshot(fullname, B_TRUE); 4950 if (error) { 4951 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4952 fullname, error); 4953 } 4954 4955 error = user_release_one(fullname, tag); 4956 if (error) 4957 fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error); 4958 4959 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT); 4960 4961 out: 4962 rw_exit(&ztest_name_lock); 4963 } 4964 4965 /* 4966 * Inject random faults into the on-disk data. 4967 */ 4968 /* ARGSUSED */ 4969 void 4970 ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4971 { 4972 ztest_shared_t *zs = ztest_shared; 4973 spa_t *spa = ztest_spa; 4974 int fd; 4975 uint64_t offset; 4976 uint64_t leaves; 4977 uint64_t bad = 0x1990c0ffeedecade; 4978 uint64_t top, leaf; 4979 char path0[MAXPATHLEN]; 4980 char pathrand[MAXPATHLEN]; 4981 size_t fsize; 4982 int bshift = SPA_MAXBLOCKSHIFT + 2; 4983 int iters = 1000; 4984 int maxfaults; 4985 int mirror_save; 4986 vdev_t *vd0 = NULL; 4987 uint64_t guid0 = 0; 4988 boolean_t islog = B_FALSE; 4989 4990 mutex_enter(&ztest_vdev_lock); 4991 maxfaults = MAXFAULTS(); 4992 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4993 mirror_save = zs->zs_mirrors; 4994 mutex_exit(&ztest_vdev_lock); 4995 4996 ASSERT(leaves >= 1); 4997 4998 /* 4999 * Grab the name lock as reader. There are some operations 5000 * which don't like to have their vdevs changed while 5001 * they are in progress (i.e. spa_change_guid). Those 5002 * operations will have grabbed the name lock as writer. 5003 */ 5004 rw_enter(&ztest_name_lock, RW_READER); 5005 5006 /* 5007 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 5008 */ 5009 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 5010 5011 if (ztest_random(2) == 0) { 5012 /* 5013 * Inject errors on a normal data device or slog device. 5014 */ 5015 top = ztest_random_vdev_top(spa, B_TRUE); 5016 leaf = ztest_random(leaves) + zs->zs_splits; 5017 5018 /* 5019 * Generate paths to the first leaf in this top-level vdev, 5020 * and to the random leaf we selected. We'll induce transient 5021 * write failures and random online/offline activity on leaf 0, 5022 * and we'll write random garbage to the randomly chosen leaf. 5023 */ 5024 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 5025 ztest_opts.zo_dir, ztest_opts.zo_pool, 5026 top * leaves + zs->zs_splits); 5027 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 5028 ztest_opts.zo_dir, ztest_opts.zo_pool, 5029 top * leaves + leaf); 5030 5031 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 5032 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 5033 islog = B_TRUE; 5034 5035 /* 5036 * If the top-level vdev needs to be resilvered 5037 * then we only allow faults on the device that is 5038 * resilvering. 5039 */ 5040 if (vd0 != NULL && maxfaults != 1 && 5041 (!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) || 5042 vd0->vdev_resilver_txg != 0)) { 5043 /* 5044 * Make vd0 explicitly claim to be unreadable, 5045 * or unwriteable, or reach behind its back 5046 * and close the underlying fd. We can do this if 5047 * maxfaults == 0 because we'll fail and reexecute, 5048 * and we can do it if maxfaults >= 2 because we'll 5049 * have enough redundancy. If maxfaults == 1, the 5050 * combination of this with injection of random data 5051 * corruption below exceeds the pool's fault tolerance. 5052 */ 5053 vdev_file_t *vf = vd0->vdev_tsd; 5054 5055 zfs_dbgmsg("injecting fault to vdev %llu; maxfaults=%d", 5056 (long long)vd0->vdev_id, (int)maxfaults); 5057 5058 if (vf != NULL && ztest_random(3) == 0) { 5059 (void) close(vf->vf_vnode->v_fd); 5060 vf->vf_vnode->v_fd = -1; 5061 } else if (ztest_random(2) == 0) { 5062 vd0->vdev_cant_read = B_TRUE; 5063 } else { 5064 vd0->vdev_cant_write = B_TRUE; 5065 } 5066 guid0 = vd0->vdev_guid; 5067 } 5068 } else { 5069 /* 5070 * Inject errors on an l2cache device. 5071 */ 5072 spa_aux_vdev_t *sav = &spa->spa_l2cache; 5073 5074 if (sav->sav_count == 0) { 5075 spa_config_exit(spa, SCL_STATE, FTAG); 5076 rw_exit(&ztest_name_lock); 5077 return; 5078 } 5079 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 5080 guid0 = vd0->vdev_guid; 5081 (void) strcpy(path0, vd0->vdev_path); 5082 (void) strcpy(pathrand, vd0->vdev_path); 5083 5084 leaf = 0; 5085 leaves = 1; 5086 maxfaults = INT_MAX; /* no limit on cache devices */ 5087 } 5088 5089 spa_config_exit(spa, SCL_STATE, FTAG); 5090 rw_exit(&ztest_name_lock); 5091 5092 /* 5093 * If we can tolerate two or more faults, or we're dealing 5094 * with a slog, randomly online/offline vd0. 5095 */ 5096 if ((maxfaults >= 2 || islog) && guid0 != 0) { 5097 if (ztest_random(10) < 6) { 5098 int flags = (ztest_random(2) == 0 ? 5099 ZFS_OFFLINE_TEMPORARY : 0); 5100 5101 /* 5102 * We have to grab the zs_name_lock as writer to 5103 * prevent a race between offlining a slog and 5104 * destroying a dataset. Offlining the slog will 5105 * grab a reference on the dataset which may cause 5106 * dmu_objset_destroy() to fail with EBUSY thus 5107 * leaving the dataset in an inconsistent state. 5108 */ 5109 if (islog) 5110 rw_enter(&ztest_name_lock, RW_WRITER); 5111 5112 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 5113 5114 if (islog) 5115 rw_exit(&ztest_name_lock); 5116 } else { 5117 /* 5118 * Ideally we would like to be able to randomly 5119 * call vdev_[on|off]line without holding locks 5120 * to force unpredictable failures but the side 5121 * effects of vdev_[on|off]line prevent us from 5122 * doing so. We grab the ztest_vdev_lock here to 5123 * prevent a race between injection testing and 5124 * aux_vdev removal. 5125 */ 5126 mutex_enter(&ztest_vdev_lock); 5127 (void) vdev_online(spa, guid0, 0, NULL); 5128 mutex_exit(&ztest_vdev_lock); 5129 } 5130 } 5131 5132 if (maxfaults == 0) 5133 return; 5134 5135 /* 5136 * We have at least single-fault tolerance, so inject data corruption. 5137 */ 5138 fd = open(pathrand, O_RDWR); 5139 5140 if (fd == -1) /* we hit a gap in the device namespace */ 5141 return; 5142 5143 fsize = lseek(fd, 0, SEEK_END); 5144 5145 while (--iters != 0) { 5146 /* 5147 * The offset must be chosen carefully to ensure that 5148 * we do not inject a given logical block with errors 5149 * on two different leaf devices, because ZFS can not 5150 * tolerate that (if maxfaults==1). 5151 * 5152 * We divide each leaf into chunks of size 5153 * (# leaves * SPA_MAXBLOCKSIZE * 4). Within each chunk 5154 * there is a series of ranges to which we can inject errors. 5155 * Each range can accept errors on only a single leaf vdev. 5156 * The error injection ranges are separated by ranges 5157 * which we will not inject errors on any device (DMZs). 5158 * Each DMZ must be large enough such that a single block 5159 * can not straddle it, so that a single block can not be 5160 * a target in two different injection ranges (on different 5161 * leaf vdevs). 5162 * 5163 * For example, with 3 leaves, each chunk looks like: 5164 * 0 to 32M: injection range for leaf 0 5165 * 32M to 64M: DMZ - no injection allowed 5166 * 64M to 96M: injection range for leaf 1 5167 * 96M to 128M: DMZ - no injection allowed 5168 * 128M to 160M: injection range for leaf 2 5169 * 160M to 192M: DMZ - no injection allowed 5170 */ 5171 offset = ztest_random(fsize / (leaves << bshift)) * 5172 (leaves << bshift) + (leaf << bshift) + 5173 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 5174 5175 /* 5176 * Only allow damage to the labels at one end of the vdev. 5177 * 5178 * If all labels are damaged, the device will be totally 5179 * inaccessible, which will result in loss of data, 5180 * because we also damage (parts of) the other side of 5181 * the mirror/raidz. 5182 * 5183 * Additionally, we will always have both an even and an 5184 * odd label, so that we can handle crashes in the 5185 * middle of vdev_config_sync(). 5186 */ 5187 if ((leaf & 1) == 0 && offset < VDEV_LABEL_START_SIZE) 5188 continue; 5189 5190 /* 5191 * The two end labels are stored at the "end" of the disk, but 5192 * the end of the disk (vdev_psize) is aligned to 5193 * sizeof (vdev_label_t). 5194 */ 5195 uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t)); 5196 if ((leaf & 1) == 1 && 5197 offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE) 5198 continue; 5199 5200 mutex_enter(&ztest_vdev_lock); 5201 if (mirror_save != zs->zs_mirrors) { 5202 mutex_exit(&ztest_vdev_lock); 5203 (void) close(fd); 5204 return; 5205 } 5206 5207 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 5208 fatal(1, "can't inject bad word at 0x%llx in %s", 5209 offset, pathrand); 5210 5211 mutex_exit(&ztest_vdev_lock); 5212 5213 if (ztest_opts.zo_verbose >= 7) 5214 (void) printf("injected bad word into %s," 5215 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 5216 } 5217 5218 (void) close(fd); 5219 } 5220 5221 /* 5222 * Verify that DDT repair works as expected. 5223 */ 5224 void 5225 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 5226 { 5227 ztest_shared_t *zs = ztest_shared; 5228 spa_t *spa = ztest_spa; 5229 objset_t *os = zd->zd_os; 5230 ztest_od_t od[1]; 5231 uint64_t object, blocksize, txg, pattern, psize; 5232 enum zio_checksum checksum = spa_dedup_checksum(spa); 5233 dmu_buf_t *db; 5234 dmu_tx_t *tx; 5235 abd_t *abd; 5236 blkptr_t blk; 5237 int copies = 2 * ZIO_DEDUPDITTO_MIN; 5238 5239 blocksize = ztest_random_blocksize(); 5240 blocksize = MIN(blocksize, 2048); /* because we write so many */ 5241 5242 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 5243 5244 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 5245 return; 5246 5247 /* 5248 * Take the name lock as writer to prevent anyone else from changing 5249 * the pool and dataset properies we need to maintain during this test. 5250 */ 5251 rw_enter(&ztest_name_lock, RW_WRITER); 5252 5253 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 5254 B_FALSE) != 0 || 5255 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 5256 B_FALSE) != 0) { 5257 rw_exit(&ztest_name_lock); 5258 return; 5259 } 5260 5261 dmu_objset_stats_t dds; 5262 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 5263 dmu_objset_fast_stat(os, &dds); 5264 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 5265 5266 object = od[0].od_object; 5267 blocksize = od[0].od_blocksize; 5268 pattern = zs->zs_guid ^ dds.dds_guid; 5269 5270 ASSERT(object != 0); 5271 5272 tx = dmu_tx_create(os); 5273 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 5274 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 5275 if (txg == 0) { 5276 rw_exit(&ztest_name_lock); 5277 return; 5278 } 5279 5280 /* 5281 * Write all the copies of our block. 5282 */ 5283 for (int i = 0; i < copies; i++) { 5284 uint64_t offset = i * blocksize; 5285 int error = dmu_buf_hold(os, object, offset, FTAG, &db, 5286 DMU_READ_NO_PREFETCH); 5287 if (error != 0) { 5288 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u", 5289 os, (long long)object, (long long) offset, error); 5290 } 5291 ASSERT(db->db_offset == offset); 5292 ASSERT(db->db_size == blocksize); 5293 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 5294 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 5295 dmu_buf_will_fill(db, tx); 5296 ztest_pattern_set(db->db_data, db->db_size, pattern); 5297 dmu_buf_rele(db, FTAG); 5298 } 5299 5300 dmu_tx_commit(tx); 5301 txg_wait_synced(spa_get_dsl(spa), txg); 5302 5303 /* 5304 * Find out what block we got. 5305 */ 5306 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, 5307 DMU_READ_NO_PREFETCH)); 5308 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 5309 dmu_buf_rele(db, FTAG); 5310 5311 /* 5312 * Damage the block. Dedup-ditto will save us when we read it later. 5313 */ 5314 psize = BP_GET_PSIZE(&blk); 5315 abd = abd_alloc_linear(psize, B_TRUE); 5316 ztest_pattern_set(abd_to_buf(abd), psize, ~pattern); 5317 5318 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 5319 abd, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 5320 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 5321 5322 abd_free(abd); 5323 5324 rw_exit(&ztest_name_lock); 5325 } 5326 5327 /* 5328 * Scrub the pool. 5329 */ 5330 /* ARGSUSED */ 5331 void 5332 ztest_scrub(ztest_ds_t *zd, uint64_t id) 5333 { 5334 spa_t *spa = ztest_spa; 5335 5336 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5337 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 5338 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5339 } 5340 5341 /* 5342 * Change the guid for the pool. 5343 */ 5344 /* ARGSUSED */ 5345 void 5346 ztest_reguid(ztest_ds_t *zd, uint64_t id) 5347 { 5348 spa_t *spa = ztest_spa; 5349 uint64_t orig, load; 5350 int error; 5351 5352 orig = spa_guid(spa); 5353 load = spa_load_guid(spa); 5354 5355 rw_enter(&ztest_name_lock, RW_WRITER); 5356 error = spa_change_guid(spa); 5357 rw_exit(&ztest_name_lock); 5358 5359 if (error != 0) 5360 return; 5361 5362 if (ztest_opts.zo_verbose >= 4) { 5363 (void) printf("Changed guid old %llu -> %llu\n", 5364 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 5365 } 5366 5367 VERIFY3U(orig, !=, spa_guid(spa)); 5368 VERIFY3U(load, ==, spa_load_guid(spa)); 5369 } 5370 5371 /* 5372 * Rename the pool to a different name and then rename it back. 5373 */ 5374 /* ARGSUSED */ 5375 void 5376 ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 5377 { 5378 char *oldname, *newname; 5379 spa_t *spa; 5380 5381 rw_enter(&ztest_name_lock, RW_WRITER); 5382 5383 oldname = ztest_opts.zo_pool; 5384 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 5385 (void) strcpy(newname, oldname); 5386 (void) strcat(newname, "_tmp"); 5387 5388 /* 5389 * Do the rename 5390 */ 5391 VERIFY3U(0, ==, spa_rename(oldname, newname)); 5392 5393 /* 5394 * Try to open it under the old name, which shouldn't exist 5395 */ 5396 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5397 5398 /* 5399 * Open it under the new name and make sure it's still the same spa_t. 5400 */ 5401 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5402 5403 ASSERT(spa == ztest_spa); 5404 spa_close(spa, FTAG); 5405 5406 /* 5407 * Rename it back to the original 5408 */ 5409 VERIFY3U(0, ==, spa_rename(newname, oldname)); 5410 5411 /* 5412 * Make sure it can still be opened 5413 */ 5414 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5415 5416 ASSERT(spa == ztest_spa); 5417 spa_close(spa, FTAG); 5418 5419 umem_free(newname, strlen(newname) + 1); 5420 5421 rw_exit(&ztest_name_lock); 5422 } 5423 5424 /* 5425 * Verify pool integrity by running zdb. 5426 */ 5427 static void 5428 ztest_run_zdb(char *pool) 5429 { 5430 int status; 5431 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 5432 char zbuf[1024]; 5433 char *bin; 5434 char *ztest; 5435 char *isa; 5436 int isalen; 5437 FILE *fp; 5438 5439 (void) realpath(getexecname(), zdb); 5440 5441 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 5442 bin = strstr(zdb, "/usr/bin/"); 5443 ztest = strstr(bin, "/ztest"); 5444 isa = bin + 8; 5445 isalen = ztest - isa; 5446 isa = strdup(isa); 5447 /* LINTED */ 5448 (void) sprintf(bin, 5449 "/usr/sbin%.*s/zdb -bcc%s%s -G -d -U %s %s", 5450 isalen, 5451 isa, 5452 ztest_opts.zo_verbose >= 3 ? "s" : "", 5453 ztest_opts.zo_verbose >= 4 ? "v" : "", 5454 spa_config_path, 5455 pool); 5456 free(isa); 5457 5458 if (ztest_opts.zo_verbose >= 5) 5459 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 5460 5461 fp = popen(zdb, "r"); 5462 5463 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 5464 if (ztest_opts.zo_verbose >= 3) 5465 (void) printf("%s", zbuf); 5466 5467 status = pclose(fp); 5468 5469 if (status == 0) 5470 return; 5471 5472 ztest_dump_core = 0; 5473 if (WIFEXITED(status)) 5474 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 5475 else 5476 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 5477 } 5478 5479 static void 5480 ztest_walk_pool_directory(char *header) 5481 { 5482 spa_t *spa = NULL; 5483 5484 if (ztest_opts.zo_verbose >= 6) 5485 (void) printf("%s\n", header); 5486 5487 mutex_enter(&spa_namespace_lock); 5488 while ((spa = spa_next(spa)) != NULL) 5489 if (ztest_opts.zo_verbose >= 6) 5490 (void) printf("\t%s\n", spa_name(spa)); 5491 mutex_exit(&spa_namespace_lock); 5492 } 5493 5494 static void 5495 ztest_spa_import_export(char *oldname, char *newname) 5496 { 5497 nvlist_t *config, *newconfig; 5498 uint64_t pool_guid; 5499 spa_t *spa; 5500 int error; 5501 5502 if (ztest_opts.zo_verbose >= 4) { 5503 (void) printf("import/export: old = %s, new = %s\n", 5504 oldname, newname); 5505 } 5506 5507 /* 5508 * Clean up from previous runs. 5509 */ 5510 (void) spa_destroy(newname); 5511 5512 /* 5513 * Get the pool's configuration and guid. 5514 */ 5515 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5516 5517 /* 5518 * Kick off a scrub to tickle scrub/export races. 5519 */ 5520 if (ztest_random(2) == 0) 5521 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5522 5523 pool_guid = spa_guid(spa); 5524 spa_close(spa, FTAG); 5525 5526 ztest_walk_pool_directory("pools before export"); 5527 5528 /* 5529 * Export it. 5530 */ 5531 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5532 5533 ztest_walk_pool_directory("pools after export"); 5534 5535 /* 5536 * Try to import it. 5537 */ 5538 newconfig = spa_tryimport(config); 5539 ASSERT(newconfig != NULL); 5540 nvlist_free(newconfig); 5541 5542 /* 5543 * Import it under the new name. 5544 */ 5545 error = spa_import(newname, config, NULL, 0); 5546 if (error != 0) { 5547 dump_nvlist(config, 0); 5548 fatal(B_FALSE, "couldn't import pool %s as %s: error %u", 5549 oldname, newname, error); 5550 } 5551 5552 ztest_walk_pool_directory("pools after import"); 5553 5554 /* 5555 * Try to import it again -- should fail with EEXIST. 5556 */ 5557 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5558 5559 /* 5560 * Try to import it under a different name -- should fail with EEXIST. 5561 */ 5562 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5563 5564 /* 5565 * Verify that the pool is no longer visible under the old name. 5566 */ 5567 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5568 5569 /* 5570 * Verify that we can open and close the pool using the new name. 5571 */ 5572 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5573 ASSERT(pool_guid == spa_guid(spa)); 5574 spa_close(spa, FTAG); 5575 5576 nvlist_free(config); 5577 } 5578 5579 static void 5580 ztest_resume(spa_t *spa) 5581 { 5582 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5583 (void) printf("resuming from suspended state\n"); 5584 spa_vdev_state_enter(spa, SCL_NONE); 5585 vdev_clear(spa, NULL); 5586 (void) spa_vdev_state_exit(spa, NULL, 0); 5587 (void) zio_resume(spa); 5588 } 5589 5590 static void * 5591 ztest_resume_thread(void *arg) 5592 { 5593 spa_t *spa = arg; 5594 5595 while (!ztest_exiting) { 5596 if (spa_suspended(spa)) 5597 ztest_resume(spa); 5598 (void) poll(NULL, 0, 100); 5599 5600 /* 5601 * Periodically change the zfs_compressed_arc_enabled setting. 5602 */ 5603 if (ztest_random(10) == 0) 5604 zfs_compressed_arc_enabled = ztest_random(2); 5605 5606 /* 5607 * Periodically change the zfs_abd_scatter_enabled setting. 5608 */ 5609 if (ztest_random(10) == 0) 5610 zfs_abd_scatter_enabled = ztest_random(2); 5611 } 5612 return (NULL); 5613 } 5614 5615 static void * 5616 ztest_deadman_thread(void *arg) 5617 { 5618 ztest_shared_t *zs = arg; 5619 spa_t *spa = ztest_spa; 5620 hrtime_t delta, total = 0; 5621 5622 for (;;) { 5623 delta = zs->zs_thread_stop - zs->zs_thread_start + 5624 MSEC2NSEC(zfs_deadman_synctime_ms); 5625 5626 (void) poll(NULL, 0, (int)NSEC2MSEC(delta)); 5627 5628 /* 5629 * If the pool is suspended then fail immediately. Otherwise, 5630 * check to see if the pool is making any progress. If 5631 * vdev_deadman() discovers that there hasn't been any recent 5632 * I/Os then it will end up aborting the tests. 5633 */ 5634 if (spa_suspended(spa) || spa->spa_root_vdev == NULL) { 5635 fatal(0, "aborting test after %llu seconds because " 5636 "pool has transitioned to a suspended state.", 5637 zfs_deadman_synctime_ms / 1000); 5638 return (NULL); 5639 } 5640 vdev_deadman(spa->spa_root_vdev); 5641 5642 total += zfs_deadman_synctime_ms/1000; 5643 (void) printf("ztest has been running for %lld seconds\n", 5644 total); 5645 } 5646 } 5647 5648 static void 5649 ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5650 { 5651 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5652 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5653 hrtime_t functime = gethrtime(); 5654 5655 for (int i = 0; i < zi->zi_iters; i++) 5656 zi->zi_func(zd, id); 5657 5658 functime = gethrtime() - functime; 5659 5660 atomic_add_64(&zc->zc_count, 1); 5661 atomic_add_64(&zc->zc_time, functime); 5662 5663 if (ztest_opts.zo_verbose >= 4) { 5664 Dl_info dli; 5665 (void) dladdr((void *)zi->zi_func, &dli); 5666 (void) printf("%6.2f sec in %s\n", 5667 (double)functime / NANOSEC, dli.dli_sname); 5668 } 5669 } 5670 5671 static void * 5672 ztest_thread(void *arg) 5673 { 5674 int rand; 5675 uint64_t id = (uintptr_t)arg; 5676 ztest_shared_t *zs = ztest_shared; 5677 uint64_t call_next; 5678 hrtime_t now; 5679 ztest_info_t *zi; 5680 ztest_shared_callstate_t *zc; 5681 5682 while ((now = gethrtime()) < zs->zs_thread_stop) { 5683 /* 5684 * See if it's time to force a crash. 5685 */ 5686 if (now > zs->zs_thread_kill) 5687 ztest_kill(zs); 5688 5689 /* 5690 * If we're getting ENOSPC with some regularity, stop. 5691 */ 5692 if (zs->zs_enospc_count > 10) 5693 break; 5694 5695 /* 5696 * Pick a random function to execute. 5697 */ 5698 rand = ztest_random(ZTEST_FUNCS); 5699 zi = &ztest_info[rand]; 5700 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5701 call_next = zc->zc_next; 5702 5703 if (now >= call_next && 5704 atomic_cas_64(&zc->zc_next, call_next, call_next + 5705 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5706 ztest_execute(rand, zi, id); 5707 } 5708 } 5709 5710 return (NULL); 5711 } 5712 5713 static void 5714 ztest_dataset_name(char *dsname, char *pool, int d) 5715 { 5716 (void) snprintf(dsname, ZFS_MAX_DATASET_NAME_LEN, "%s/ds_%d", pool, d); 5717 } 5718 5719 static void 5720 ztest_dataset_destroy(int d) 5721 { 5722 char name[ZFS_MAX_DATASET_NAME_LEN]; 5723 5724 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5725 5726 if (ztest_opts.zo_verbose >= 3) 5727 (void) printf("Destroying %s to free up space\n", name); 5728 5729 /* 5730 * Cleanup any non-standard clones and snapshots. In general, 5731 * ztest thread t operates on dataset (t % zopt_datasets), 5732 * so there may be more than one thing to clean up. 5733 */ 5734 for (int t = d; t < ztest_opts.zo_threads; 5735 t += ztest_opts.zo_datasets) { 5736 ztest_dsl_dataset_cleanup(name, t); 5737 } 5738 5739 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5740 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5741 } 5742 5743 static void 5744 ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5745 { 5746 uint64_t usedobjs, dirobjs, scratch; 5747 5748 /* 5749 * ZTEST_DIROBJ is the object directory for the entire dataset. 5750 * Therefore, the number of objects in use should equal the 5751 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5752 * If not, we have an object leak. 5753 * 5754 * Note that we can only check this in ztest_dataset_open(), 5755 * when the open-context and syncing-context values agree. 5756 * That's because zap_count() returns the open-context value, 5757 * while dmu_objset_space() returns the rootbp fill count. 5758 */ 5759 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5760 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5761 ASSERT3U(dirobjs + 1, ==, usedobjs); 5762 } 5763 5764 static int 5765 ztest_dataset_open(int d) 5766 { 5767 ztest_ds_t *zd = &ztest_ds[d]; 5768 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5769 objset_t *os; 5770 zilog_t *zilog; 5771 char name[ZFS_MAX_DATASET_NAME_LEN]; 5772 int error; 5773 5774 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5775 5776 rw_enter(&ztest_name_lock, RW_READER); 5777 5778 error = ztest_dataset_create(name); 5779 if (error == ENOSPC) { 5780 rw_exit(&ztest_name_lock); 5781 ztest_record_enospc(FTAG); 5782 return (error); 5783 } 5784 ASSERT(error == 0 || error == EEXIST); 5785 5786 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os)); 5787 rw_exit(&ztest_name_lock); 5788 5789 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5790 5791 zilog = zd->zd_zilog; 5792 5793 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5794 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5795 fatal(0, "missing log records: claimed %llu < committed %llu", 5796 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5797 5798 ztest_dataset_dirobj_verify(zd); 5799 5800 zil_replay(os, zd, ztest_replay_vector); 5801 5802 ztest_dataset_dirobj_verify(zd); 5803 5804 if (ztest_opts.zo_verbose >= 6) 5805 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5806 zd->zd_name, 5807 (u_longlong_t)zilog->zl_parse_blk_count, 5808 (u_longlong_t)zilog->zl_parse_lr_count, 5809 (u_longlong_t)zilog->zl_replaying_seq); 5810 5811 zilog = zil_open(os, ztest_get_data); 5812 5813 if (zilog->zl_replaying_seq != 0 && 5814 zilog->zl_replaying_seq < committed_seq) 5815 fatal(0, "missing log records: replayed %llu < committed %llu", 5816 zilog->zl_replaying_seq, committed_seq); 5817 5818 return (0); 5819 } 5820 5821 static void 5822 ztest_dataset_close(int d) 5823 { 5824 ztest_ds_t *zd = &ztest_ds[d]; 5825 5826 zil_close(zd->zd_zilog); 5827 dmu_objset_disown(zd->zd_os, zd); 5828 5829 ztest_zd_fini(zd); 5830 } 5831 5832 /* 5833 * Kick off threads to run tests on all datasets in parallel. 5834 */ 5835 static void 5836 ztest_run(ztest_shared_t *zs) 5837 { 5838 thread_t *tid; 5839 spa_t *spa; 5840 objset_t *os; 5841 thread_t resume_tid; 5842 int error; 5843 5844 ztest_exiting = B_FALSE; 5845 5846 /* 5847 * Initialize parent/child shared state. 5848 */ 5849 mutex_init(&ztest_checkpoint_lock, NULL, USYNC_THREAD, NULL); 5850 mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL); 5851 rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL); 5852 5853 zs->zs_thread_start = gethrtime(); 5854 zs->zs_thread_stop = 5855 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5856 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5857 zs->zs_thread_kill = zs->zs_thread_stop; 5858 if (ztest_random(100) < ztest_opts.zo_killrate) { 5859 zs->zs_thread_kill -= 5860 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5861 } 5862 5863 mutex_init(&zcl.zcl_callbacks_lock, NULL, USYNC_THREAD, NULL); 5864 5865 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5866 offsetof(ztest_cb_data_t, zcd_node)); 5867 5868 /* 5869 * Open our pool. 5870 */ 5871 kernel_init(FREAD | FWRITE); 5872 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5873 spa->spa_debug = B_TRUE; 5874 metaslab_preload_limit = ztest_random(20) + 1; 5875 ztest_spa = spa; 5876 5877 dmu_objset_stats_t dds; 5878 VERIFY0(dmu_objset_own(ztest_opts.zo_pool, 5879 DMU_OST_ANY, B_TRUE, FTAG, &os)); 5880 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 5881 dmu_objset_fast_stat(os, &dds); 5882 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 5883 zs->zs_guid = dds.dds_guid; 5884 dmu_objset_disown(os, FTAG); 5885 5886 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5887 5888 /* 5889 * We don't expect the pool to suspend unless maxfaults == 0, 5890 * in which case ztest_fault_inject() temporarily takes away 5891 * the only valid replica. 5892 */ 5893 if (MAXFAULTS() == 0) 5894 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5895 else 5896 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5897 5898 /* 5899 * Create a thread to periodically resume suspended I/O. 5900 */ 5901 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5902 &resume_tid) == 0); 5903 5904 /* 5905 * Create a deadman thread to abort() if we hang. 5906 */ 5907 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5908 NULL) == 0); 5909 5910 /* 5911 * Verify that we can safely inquire about any object, 5912 * whether it's allocated or not. To make it interesting, 5913 * we probe a 5-wide window around each power of two. 5914 * This hits all edge cases, including zero and the max. 5915 */ 5916 for (int t = 0; t < 64; t++) { 5917 for (int d = -5; d <= 5; d++) { 5918 error = dmu_object_info(spa->spa_meta_objset, 5919 (1ULL << t) + d, NULL); 5920 ASSERT(error == 0 || error == ENOENT || 5921 error == EINVAL); 5922 } 5923 } 5924 5925 /* 5926 * If we got any ENOSPC errors on the previous run, destroy something. 5927 */ 5928 if (zs->zs_enospc_count != 0) { 5929 int d = ztest_random(ztest_opts.zo_datasets); 5930 ztest_dataset_destroy(d); 5931 } 5932 zs->zs_enospc_count = 0; 5933 5934 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5935 UMEM_NOFAIL); 5936 5937 if (ztest_opts.zo_verbose >= 4) 5938 (void) printf("starting main threads...\n"); 5939 5940 /* 5941 * Kick off all the tests that run in parallel. 5942 */ 5943 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5944 if (t < ztest_opts.zo_datasets && 5945 ztest_dataset_open(t) != 0) 5946 return; 5947 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5948 THR_BOUND, &tid[t]) == 0); 5949 } 5950 5951 /* 5952 * Wait for all of the tests to complete. We go in reverse order 5953 * so we don't close datasets while threads are still using them. 5954 */ 5955 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5956 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5957 if (t < ztest_opts.zo_datasets) 5958 ztest_dataset_close(t); 5959 } 5960 5961 txg_wait_synced(spa_get_dsl(spa), 0); 5962 5963 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5964 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5965 zfs_dbgmsg_print(FTAG); 5966 5967 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5968 5969 /* Kill the resume thread */ 5970 ztest_exiting = B_TRUE; 5971 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5972 ztest_resume(spa); 5973 5974 /* 5975 * Right before closing the pool, kick off a bunch of async I/O; 5976 * spa_close() should wait for it to complete. 5977 */ 5978 for (uint64_t object = 1; object < 50; object++) { 5979 dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20, 5980 ZIO_PRIORITY_SYNC_READ); 5981 } 5982 5983 spa_close(spa, FTAG); 5984 5985 /* 5986 * Verify that we can loop over all pools. 5987 */ 5988 mutex_enter(&spa_namespace_lock); 5989 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5990 if (ztest_opts.zo_verbose > 3) 5991 (void) printf("spa_next: found %s\n", spa_name(spa)); 5992 mutex_exit(&spa_namespace_lock); 5993 5994 /* 5995 * Verify that we can export the pool and reimport it under a 5996 * different name. 5997 */ 5998 if (ztest_random(2) == 0) { 5999 char name[ZFS_MAX_DATASET_NAME_LEN]; 6000 (void) snprintf(name, sizeof (name), "%s_import", 6001 ztest_opts.zo_pool); 6002 ztest_spa_import_export(ztest_opts.zo_pool, name); 6003 ztest_spa_import_export(name, ztest_opts.zo_pool); 6004 } 6005 6006 kernel_fini(); 6007 6008 list_destroy(&zcl.zcl_callbacks); 6009 6010 mutex_destroy(&zcl.zcl_callbacks_lock); 6011 6012 rw_destroy(&ztest_name_lock); 6013 mutex_destroy(&ztest_vdev_lock); 6014 mutex_destroy(&ztest_checkpoint_lock); 6015 } 6016 6017 static void 6018 ztest_freeze(void) 6019 { 6020 ztest_ds_t *zd = &ztest_ds[0]; 6021 spa_t *spa; 6022 int numloops = 0; 6023 6024 if (ztest_opts.zo_verbose >= 3) 6025 (void) printf("testing spa_freeze()...\n"); 6026 6027 kernel_init(FREAD | FWRITE); 6028 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 6029 VERIFY3U(0, ==, ztest_dataset_open(0)); 6030 spa->spa_debug = B_TRUE; 6031 ztest_spa = spa; 6032 6033 /* 6034 * Force the first log block to be transactionally allocated. 6035 * We have to do this before we freeze the pool -- otherwise 6036 * the log chain won't be anchored. 6037 */ 6038 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 6039 ztest_dmu_object_alloc_free(zd, 0); 6040 zil_commit(zd->zd_zilog, 0); 6041 } 6042 6043 txg_wait_synced(spa_get_dsl(spa), 0); 6044 6045 /* 6046 * Freeze the pool. This stops spa_sync() from doing anything, 6047 * so that the only way to record changes from now on is the ZIL. 6048 */ 6049 spa_freeze(spa); 6050 6051 /* 6052 * Because it is hard to predict how much space a write will actually 6053 * require beforehand, we leave ourselves some fudge space to write over 6054 * capacity. 6055 */ 6056 uint64_t capacity = metaslab_class_get_space(spa_normal_class(spa)) / 2; 6057 6058 /* 6059 * Run tests that generate log records but don't alter the pool config 6060 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 6061 * We do a txg_wait_synced() after each iteration to force the txg 6062 * to increase well beyond the last synced value in the uberblock. 6063 * The ZIL should be OK with that. 6064 * 6065 * Run a random number of times less than zo_maxloops and ensure we do 6066 * not run out of space on the pool. 6067 */ 6068 while (ztest_random(10) != 0 && 6069 numloops++ < ztest_opts.zo_maxloops && 6070 metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) { 6071 ztest_od_t od; 6072 ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 6073 VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE)); 6074 ztest_io(zd, od.od_object, 6075 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 6076 txg_wait_synced(spa_get_dsl(spa), 0); 6077 } 6078 6079 /* 6080 * Commit all of the changes we just generated. 6081 */ 6082 zil_commit(zd->zd_zilog, 0); 6083 txg_wait_synced(spa_get_dsl(spa), 0); 6084 6085 /* 6086 * Close our dataset and close the pool. 6087 */ 6088 ztest_dataset_close(0); 6089 spa_close(spa, FTAG); 6090 kernel_fini(); 6091 6092 /* 6093 * Open and close the pool and dataset to induce log replay. 6094 */ 6095 kernel_init(FREAD | FWRITE); 6096 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 6097 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 6098 VERIFY3U(0, ==, ztest_dataset_open(0)); 6099 ztest_dataset_close(0); 6100 6101 spa->spa_debug = B_TRUE; 6102 ztest_spa = spa; 6103 txg_wait_synced(spa_get_dsl(spa), 0); 6104 ztest_reguid(NULL, 0); 6105 6106 spa_close(spa, FTAG); 6107 kernel_fini(); 6108 } 6109 6110 void 6111 print_time(hrtime_t t, char *timebuf) 6112 { 6113 hrtime_t s = t / NANOSEC; 6114 hrtime_t m = s / 60; 6115 hrtime_t h = m / 60; 6116 hrtime_t d = h / 24; 6117 6118 s -= m * 60; 6119 m -= h * 60; 6120 h -= d * 24; 6121 6122 timebuf[0] = '\0'; 6123 6124 if (d) 6125 (void) sprintf(timebuf, 6126 "%llud%02lluh%02llum%02llus", d, h, m, s); 6127 else if (h) 6128 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 6129 else if (m) 6130 (void) sprintf(timebuf, "%llum%02llus", m, s); 6131 else 6132 (void) sprintf(timebuf, "%llus", s); 6133 } 6134 6135 static nvlist_t * 6136 make_random_props() 6137 { 6138 nvlist_t *props; 6139 6140 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 6141 if (ztest_random(2) == 0) 6142 return (props); 6143 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 6144 6145 return (props); 6146 } 6147 6148 /* 6149 * Create a storage pool with the given name and initial vdev size. 6150 * Then test spa_freeze() functionality. 6151 */ 6152 static void 6153 ztest_init(ztest_shared_t *zs) 6154 { 6155 spa_t *spa; 6156 nvlist_t *nvroot, *props; 6157 6158 mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL); 6159 mutex_init(&ztest_checkpoint_lock, NULL, USYNC_THREAD, NULL); 6160 rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL); 6161 6162 kernel_init(FREAD | FWRITE); 6163 6164 /* 6165 * Create the storage pool. 6166 */ 6167 (void) spa_destroy(ztest_opts.zo_pool); 6168 ztest_shared->zs_vdev_next_leaf = 0; 6169 zs->zs_splits = 0; 6170 zs->zs_mirrors = ztest_opts.zo_mirrors; 6171 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 6172 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 6173 props = make_random_props(); 6174 for (int i = 0; i < SPA_FEATURES; i++) { 6175 char buf[1024]; 6176 (void) snprintf(buf, sizeof (buf), "feature@%s", 6177 spa_feature_table[i].fi_uname); 6178 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 6179 } 6180 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL)); 6181 nvlist_free(nvroot); 6182 nvlist_free(props); 6183 6184 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 6185 zs->zs_metaslab_sz = 6186 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 6187 6188 spa_close(spa, FTAG); 6189 6190 kernel_fini(); 6191 6192 ztest_run_zdb(ztest_opts.zo_pool); 6193 6194 ztest_freeze(); 6195 6196 ztest_run_zdb(ztest_opts.zo_pool); 6197 6198 rw_destroy(&ztest_name_lock); 6199 mutex_destroy(&ztest_vdev_lock); 6200 mutex_destroy(&ztest_checkpoint_lock); 6201 } 6202 6203 static void 6204 setup_data_fd(void) 6205 { 6206 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX"; 6207 6208 ztest_fd_data = mkstemp(ztest_name_data); 6209 ASSERT3S(ztest_fd_data, >=, 0); 6210 (void) unlink(ztest_name_data); 6211 } 6212 6213 6214 static int 6215 shared_data_size(ztest_shared_hdr_t *hdr) 6216 { 6217 int size; 6218 6219 size = hdr->zh_hdr_size; 6220 size += hdr->zh_opts_size; 6221 size += hdr->zh_size; 6222 size += hdr->zh_stats_size * hdr->zh_stats_count; 6223 size += hdr->zh_ds_size * hdr->zh_ds_count; 6224 6225 return (size); 6226 } 6227 6228 static void 6229 setup_hdr(void) 6230 { 6231 int size; 6232 ztest_shared_hdr_t *hdr; 6233 6234 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 6235 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 6236 ASSERT(hdr != MAP_FAILED); 6237 6238 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t))); 6239 6240 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 6241 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 6242 hdr->zh_size = sizeof (ztest_shared_t); 6243 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 6244 hdr->zh_stats_count = ZTEST_FUNCS; 6245 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 6246 hdr->zh_ds_count = ztest_opts.zo_datasets; 6247 6248 size = shared_data_size(hdr); 6249 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size)); 6250 6251 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 6252 } 6253 6254 static void 6255 setup_data(void) 6256 { 6257 int size, offset; 6258 ztest_shared_hdr_t *hdr; 6259 uint8_t *buf; 6260 6261 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 6262 PROT_READ, MAP_SHARED, ztest_fd_data, 0); 6263 ASSERT(hdr != MAP_FAILED); 6264 6265 size = shared_data_size(hdr); 6266 6267 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 6268 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 6269 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 6270 ASSERT(hdr != MAP_FAILED); 6271 buf = (uint8_t *)hdr; 6272 6273 offset = hdr->zh_hdr_size; 6274 ztest_shared_opts = (void *)&buf[offset]; 6275 offset += hdr->zh_opts_size; 6276 ztest_shared = (void *)&buf[offset]; 6277 offset += hdr->zh_size; 6278 ztest_shared_callstate = (void *)&buf[offset]; 6279 offset += hdr->zh_stats_size * hdr->zh_stats_count; 6280 ztest_shared_ds = (void *)&buf[offset]; 6281 } 6282 6283 static boolean_t 6284 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 6285 { 6286 pid_t pid; 6287 int status; 6288 char *cmdbuf = NULL; 6289 6290 pid = fork(); 6291 6292 if (cmd == NULL) { 6293 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 6294 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN); 6295 cmd = cmdbuf; 6296 } 6297 6298 if (pid == -1) 6299 fatal(1, "fork failed"); 6300 6301 if (pid == 0) { /* child */ 6302 char *emptyargv[2] = { cmd, NULL }; 6303 char fd_data_str[12]; 6304 6305 struct rlimit rl = { 1024, 1024 }; 6306 (void) setrlimit(RLIMIT_NOFILE, &rl); 6307 6308 (void) close(ztest_fd_rand); 6309 VERIFY3U(11, >=, 6310 snprintf(fd_data_str, 12, "%d", ztest_fd_data)); 6311 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1)); 6312 6313 (void) enable_extended_FILE_stdio(-1, -1); 6314 if (libpath != NULL) 6315 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 6316 (void) execv(cmd, emptyargv); 6317 ztest_dump_core = B_FALSE; 6318 fatal(B_TRUE, "exec failed: %s", cmd); 6319 } 6320 6321 if (cmdbuf != NULL) { 6322 umem_free(cmdbuf, MAXPATHLEN); 6323 cmd = NULL; 6324 } 6325 6326 while (waitpid(pid, &status, 0) != pid) 6327 continue; 6328 if (statusp != NULL) 6329 *statusp = status; 6330 6331 if (WIFEXITED(status)) { 6332 if (WEXITSTATUS(status) != 0) { 6333 (void) fprintf(stderr, "child exited with code %d\n", 6334 WEXITSTATUS(status)); 6335 exit(2); 6336 } 6337 return (B_FALSE); 6338 } else if (WIFSIGNALED(status)) { 6339 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 6340 (void) fprintf(stderr, "child died with signal %d\n", 6341 WTERMSIG(status)); 6342 exit(3); 6343 } 6344 return (B_TRUE); 6345 } else { 6346 (void) fprintf(stderr, "something strange happened to child\n"); 6347 exit(4); 6348 /* NOTREACHED */ 6349 } 6350 } 6351 6352 static void 6353 ztest_run_init(void) 6354 { 6355 ztest_shared_t *zs = ztest_shared; 6356 6357 ASSERT(ztest_opts.zo_init != 0); 6358 6359 /* 6360 * Blow away any existing copy of zpool.cache 6361 */ 6362 (void) remove(spa_config_path); 6363 6364 /* 6365 * Create and initialize our storage pool. 6366 */ 6367 for (int i = 1; i <= ztest_opts.zo_init; i++) { 6368 bzero(zs, sizeof (ztest_shared_t)); 6369 if (ztest_opts.zo_verbose >= 3 && 6370 ztest_opts.zo_init != 1) { 6371 (void) printf("ztest_init(), pass %d\n", i); 6372 } 6373 ztest_init(zs); 6374 } 6375 } 6376 6377 int 6378 main(int argc, char **argv) 6379 { 6380 int kills = 0; 6381 int iters = 0; 6382 int older = 0; 6383 int newer = 0; 6384 ztest_shared_t *zs; 6385 ztest_info_t *zi; 6386 ztest_shared_callstate_t *zc; 6387 char timebuf[100]; 6388 char numbuf[NN_NUMBUF_SZ]; 6389 spa_t *spa; 6390 char *cmd; 6391 boolean_t hasalt; 6392 char *fd_data_str = getenv("ZTEST_FD_DATA"); 6393 6394 (void) setvbuf(stdout, NULL, _IOLBF, 0); 6395 6396 dprintf_setup(&argc, argv); 6397 zfs_deadman_synctime_ms = 300000; 6398 /* 6399 * As two-word space map entries may not come up often (especially 6400 * if pool and vdev sizes are small) we want to force at least some 6401 * of them so the feature get tested. 6402 */ 6403 zfs_force_some_double_word_sm_entries = B_TRUE; 6404 6405 ztest_fd_rand = open("/dev/urandom", O_RDONLY); 6406 ASSERT3S(ztest_fd_rand, >=, 0); 6407 6408 if (!fd_data_str) { 6409 process_options(argc, argv); 6410 6411 setup_data_fd(); 6412 setup_hdr(); 6413 setup_data(); 6414 bcopy(&ztest_opts, ztest_shared_opts, 6415 sizeof (*ztest_shared_opts)); 6416 } else { 6417 ztest_fd_data = atoi(fd_data_str); 6418 setup_data(); 6419 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 6420 } 6421 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 6422 6423 /* Override location of zpool.cache */ 6424 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache", 6425 ztest_opts.zo_dir), !=, -1); 6426 6427 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 6428 UMEM_NOFAIL); 6429 zs = ztest_shared; 6430 6431 if (fd_data_str) { 6432 metaslab_force_ganging = ztest_opts.zo_metaslab_force_ganging; 6433 metaslab_df_alloc_threshold = 6434 zs->zs_metaslab_df_alloc_threshold; 6435 6436 if (zs->zs_do_init) 6437 ztest_run_init(); 6438 else 6439 ztest_run(zs); 6440 exit(0); 6441 } 6442 6443 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 6444 6445 if (ztest_opts.zo_verbose >= 1) { 6446 (void) printf("%llu vdevs, %d datasets, %d threads," 6447 " %llu seconds...\n", 6448 (u_longlong_t)ztest_opts.zo_vdevs, 6449 ztest_opts.zo_datasets, 6450 ztest_opts.zo_threads, 6451 (u_longlong_t)ztest_opts.zo_time); 6452 } 6453 6454 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); 6455 (void) strlcpy(cmd, getexecname(), MAXNAMELEN); 6456 6457 zs->zs_do_init = B_TRUE; 6458 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 6459 if (ztest_opts.zo_verbose >= 1) { 6460 (void) printf("Executing older ztest for " 6461 "initialization: %s\n", ztest_opts.zo_alt_ztest); 6462 } 6463 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 6464 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 6465 } else { 6466 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 6467 } 6468 zs->zs_do_init = B_FALSE; 6469 6470 zs->zs_proc_start = gethrtime(); 6471 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 6472 6473 for (int f = 0; f < ZTEST_FUNCS; f++) { 6474 zi = &ztest_info[f]; 6475 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6476 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 6477 zc->zc_next = UINT64_MAX; 6478 else 6479 zc->zc_next = zs->zs_proc_start + 6480 ztest_random(2 * zi->zi_interval[0] + 1); 6481 } 6482 6483 /* 6484 * Run the tests in a loop. These tests include fault injection 6485 * to verify that self-healing data works, and forced crashes 6486 * to verify that we never lose on-disk consistency. 6487 */ 6488 while (gethrtime() < zs->zs_proc_stop) { 6489 int status; 6490 boolean_t killed; 6491 6492 /* 6493 * Initialize the workload counters for each function. 6494 */ 6495 for (int f = 0; f < ZTEST_FUNCS; f++) { 6496 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6497 zc->zc_count = 0; 6498 zc->zc_time = 0; 6499 } 6500 6501 /* Set the allocation switch size */ 6502 zs->zs_metaslab_df_alloc_threshold = 6503 ztest_random(zs->zs_metaslab_sz / 4) + 1; 6504 6505 if (!hasalt || ztest_random(2) == 0) { 6506 if (hasalt && ztest_opts.zo_verbose >= 1) { 6507 (void) printf("Executing newer ztest: %s\n", 6508 cmd); 6509 } 6510 newer++; 6511 killed = exec_child(cmd, NULL, B_TRUE, &status); 6512 } else { 6513 if (hasalt && ztest_opts.zo_verbose >= 1) { 6514 (void) printf("Executing older ztest: %s\n", 6515 ztest_opts.zo_alt_ztest); 6516 } 6517 older++; 6518 killed = exec_child(ztest_opts.zo_alt_ztest, 6519 ztest_opts.zo_alt_libpath, B_TRUE, &status); 6520 } 6521 6522 if (killed) 6523 kills++; 6524 iters++; 6525 6526 if (ztest_opts.zo_verbose >= 1) { 6527 hrtime_t now = gethrtime(); 6528 6529 now = MIN(now, zs->zs_proc_stop); 6530 print_time(zs->zs_proc_stop - now, timebuf); 6531 nicenum(zs->zs_space, numbuf, sizeof (numbuf)); 6532 6533 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 6534 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 6535 iters, 6536 WIFEXITED(status) ? "Complete" : "SIGKILL", 6537 (u_longlong_t)zs->zs_enospc_count, 6538 100.0 * zs->zs_alloc / zs->zs_space, 6539 numbuf, 6540 100.0 * (now - zs->zs_proc_start) / 6541 (ztest_opts.zo_time * NANOSEC), timebuf); 6542 } 6543 6544 if (ztest_opts.zo_verbose >= 2) { 6545 (void) printf("\nWorkload summary:\n\n"); 6546 (void) printf("%7s %9s %s\n", 6547 "Calls", "Time", "Function"); 6548 (void) printf("%7s %9s %s\n", 6549 "-----", "----", "--------"); 6550 for (int f = 0; f < ZTEST_FUNCS; f++) { 6551 Dl_info dli; 6552 6553 zi = &ztest_info[f]; 6554 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6555 print_time(zc->zc_time, timebuf); 6556 (void) dladdr((void *)zi->zi_func, &dli); 6557 (void) printf("%7llu %9s %s\n", 6558 (u_longlong_t)zc->zc_count, timebuf, 6559 dli.dli_sname); 6560 } 6561 (void) printf("\n"); 6562 } 6563 6564 /* 6565 * It's possible that we killed a child during a rename test, 6566 * in which case we'll have a 'ztest_tmp' pool lying around 6567 * instead of 'ztest'. Do a blind rename in case this happened. 6568 */ 6569 kernel_init(FREAD); 6570 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 6571 spa_close(spa, FTAG); 6572 } else { 6573 char tmpname[ZFS_MAX_DATASET_NAME_LEN]; 6574 kernel_fini(); 6575 kernel_init(FREAD | FWRITE); 6576 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 6577 ztest_opts.zo_pool); 6578 (void) spa_rename(tmpname, ztest_opts.zo_pool); 6579 } 6580 kernel_fini(); 6581 6582 ztest_run_zdb(ztest_opts.zo_pool); 6583 } 6584 6585 if (ztest_opts.zo_verbose >= 1) { 6586 if (hasalt) { 6587 (void) printf("%d runs of older ztest: %s\n", older, 6588 ztest_opts.zo_alt_ztest); 6589 (void) printf("%d runs of newer ztest: %s\n", newer, 6590 cmd); 6591 } 6592 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6593 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6594 } 6595 6596 umem_free(cmd, MAXNAMELEN); 6597 6598 return (0); 6599 } 6600