1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 * Copyright (c) 2017 Datto Inc. 29 * Copyright 2019 Joyent, Inc. 30 * Copyright (c) 2017, Intel Corporation. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/spa_impl.h> 35 #include <sys/spa_boot.h> 36 #include <sys/zio.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/zio_compress.h> 39 #include <sys/dmu.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zap.h> 42 #include <sys/zil.h> 43 #include <sys/vdev_impl.h> 44 #include <sys/vdev_initialize.h> 45 #include <sys/vdev_trim.h> 46 #include <sys/metaslab.h> 47 #include <sys/uberblock_impl.h> 48 #include <sys/txg.h> 49 #include <sys/avl.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dir.h> 53 #include <sys/dsl_prop.h> 54 #include <sys/dsl_scan.h> 55 #include <sys/fs/zfs.h> 56 #include <sys/metaslab_impl.h> 57 #include <sys/arc.h> 58 #include <sys/ddt.h> 59 #include "zfs_prop.h" 60 #include <sys/btree.h> 61 #include <sys/zfeature.h> 62 63 /* 64 * SPA locking 65 * 66 * There are three basic locks for managing spa_t structures: 67 * 68 * spa_namespace_lock (global mutex) 69 * 70 * This lock must be acquired to do any of the following: 71 * 72 * - Lookup a spa_t by name 73 * - Add or remove a spa_t from the namespace 74 * - Increase spa_refcount from non-zero 75 * - Check if spa_refcount is zero 76 * - Rename a spa_t 77 * - add/remove/attach/detach devices 78 * - Held for the duration of create/destroy/import/export 79 * 80 * It does not need to handle recursion. A create or destroy may 81 * reference objects (files or zvols) in other pools, but by 82 * definition they must have an existing reference, and will never need 83 * to lookup a spa_t by name. 84 * 85 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 86 * 87 * This reference count keep track of any active users of the spa_t. The 88 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 89 * the refcount is never really 'zero' - opening a pool implicitly keeps 90 * some references in the DMU. Internally we check against spa_minref, but 91 * present the image of a zero/non-zero value to consumers. 92 * 93 * spa_config_lock[] (per-spa array of rwlocks) 94 * 95 * This protects the spa_t from config changes, and must be held in 96 * the following circumstances: 97 * 98 * - RW_READER to perform I/O to the spa 99 * - RW_WRITER to change the vdev config 100 * 101 * The locking order is fairly straightforward: 102 * 103 * spa_namespace_lock -> spa_refcount 104 * 105 * The namespace lock must be acquired to increase the refcount from 0 106 * or to check if it is zero. 107 * 108 * spa_refcount -> spa_config_lock[] 109 * 110 * There must be at least one valid reference on the spa_t to acquire 111 * the config lock. 112 * 113 * spa_namespace_lock -> spa_config_lock[] 114 * 115 * The namespace lock must always be taken before the config lock. 116 * 117 * 118 * The spa_namespace_lock can be acquired directly and is globally visible. 119 * 120 * The namespace is manipulated using the following functions, all of which 121 * require the spa_namespace_lock to be held. 122 * 123 * spa_lookup() Lookup a spa_t by name. 124 * 125 * spa_add() Create a new spa_t in the namespace. 126 * 127 * spa_remove() Remove a spa_t from the namespace. This also 128 * frees up any memory associated with the spa_t. 129 * 130 * spa_next() Returns the next spa_t in the system, or the 131 * first if NULL is passed. 132 * 133 * spa_evict_all() Shutdown and remove all spa_t structures in 134 * the system. 135 * 136 * spa_guid_exists() Determine whether a pool/device guid exists. 137 * 138 * The spa_refcount is manipulated using the following functions: 139 * 140 * spa_open_ref() Adds a reference to the given spa_t. Must be 141 * called with spa_namespace_lock held if the 142 * refcount is currently zero. 143 * 144 * spa_close() Remove a reference from the spa_t. This will 145 * not free the spa_t or remove it from the 146 * namespace. No locking is required. 147 * 148 * spa_refcount_zero() Returns true if the refcount is currently 149 * zero. Must be called with spa_namespace_lock 150 * held. 151 * 152 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 153 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 154 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 155 * 156 * To read the configuration, it suffices to hold one of these locks as reader. 157 * To modify the configuration, you must hold all locks as writer. To modify 158 * vdev state without altering the vdev tree's topology (e.g. online/offline), 159 * you must hold SCL_STATE and SCL_ZIO as writer. 160 * 161 * We use these distinct config locks to avoid recursive lock entry. 162 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 163 * block allocations (SCL_ALLOC), which may require reading space maps 164 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 165 * 166 * The spa config locks cannot be normal rwlocks because we need the 167 * ability to hand off ownership. For example, SCL_ZIO is acquired 168 * by the issuing thread and later released by an interrupt thread. 169 * They do, however, obey the usual write-wanted semantics to prevent 170 * writer (i.e. system administrator) starvation. 171 * 172 * The lock acquisition rules are as follows: 173 * 174 * SCL_CONFIG 175 * Protects changes to the vdev tree topology, such as vdev 176 * add/remove/attach/detach. Protects the dirty config list 177 * (spa_config_dirty_list) and the set of spares and l2arc devices. 178 * 179 * SCL_STATE 180 * Protects changes to pool state and vdev state, such as vdev 181 * online/offline/fault/degrade/clear. Protects the dirty state list 182 * (spa_state_dirty_list) and global pool state (spa_state). 183 * 184 * SCL_ALLOC 185 * Protects changes to metaslab groups and classes. 186 * Held as reader by metaslab_alloc() and metaslab_claim(). 187 * 188 * SCL_ZIO 189 * Held by bp-level zios (those which have no io_vd upon entry) 190 * to prevent changes to the vdev tree. The bp-level zio implicitly 191 * protects all of its vdev child zios, which do not hold SCL_ZIO. 192 * 193 * SCL_FREE 194 * Protects changes to metaslab groups and classes. 195 * Held as reader by metaslab_free(). SCL_FREE is distinct from 196 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 197 * blocks in zio_done() while another i/o that holds either 198 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 199 * 200 * SCL_VDEV 201 * Held as reader to prevent changes to the vdev tree during trivial 202 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 203 * other locks, and lower than all of them, to ensure that it's safe 204 * to acquire regardless of caller context. 205 * 206 * In addition, the following rules apply: 207 * 208 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 209 * The lock ordering is SCL_CONFIG > spa_props_lock. 210 * 211 * (b) I/O operations on leaf vdevs. For any zio operation that takes 212 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 213 * or zio_write_phys() -- the caller must ensure that the config cannot 214 * cannot change in the interim, and that the vdev cannot be reopened. 215 * SCL_STATE as reader suffices for both. 216 * 217 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 218 * 219 * spa_vdev_enter() Acquire the namespace lock and the config lock 220 * for writing. 221 * 222 * spa_vdev_exit() Release the config lock, wait for all I/O 223 * to complete, sync the updated configs to the 224 * cache, and release the namespace lock. 225 * 226 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 227 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 228 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 229 */ 230 231 static avl_tree_t spa_namespace_avl; 232 kmutex_t spa_namespace_lock; 233 static kcondvar_t spa_namespace_cv; 234 static int spa_active_count; 235 int spa_max_replication_override = SPA_DVAS_PER_BP; 236 237 static kmutex_t spa_spare_lock; 238 static avl_tree_t spa_spare_avl; 239 static kmutex_t spa_l2cache_lock; 240 static avl_tree_t spa_l2cache_avl; 241 242 kmem_cache_t *spa_buffer_pool; 243 int spa_mode_global; 244 245 #ifdef ZFS_DEBUG 246 /* 247 * Everything except dprintf, spa, and indirect_remap is on by default 248 * in debug builds. 249 */ 250 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_INDIRECT_REMAP); 251 #else 252 int zfs_flags = 0; 253 #endif 254 255 /* 256 * zfs_recover can be set to nonzero to attempt to recover from 257 * otherwise-fatal errors, typically caused by on-disk corruption. When 258 * set, calls to zfs_panic_recover() will turn into warning messages. 259 * This should only be used as a last resort, as it typically results 260 * in leaked space, or worse. 261 */ 262 boolean_t zfs_recover = B_FALSE; 263 264 /* 265 * If destroy encounters an EIO while reading metadata (e.g. indirect 266 * blocks), space referenced by the missing metadata can not be freed. 267 * Normally this causes the background destroy to become "stalled", as 268 * it is unable to make forward progress. While in this stalled state, 269 * all remaining space to free from the error-encountering filesystem is 270 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 271 * permanently leak the space from indirect blocks that can not be read, 272 * and continue to free everything else that it can. 273 * 274 * The default, "stalling" behavior is useful if the storage partially 275 * fails (i.e. some but not all i/os fail), and then later recovers. In 276 * this case, we will be able to continue pool operations while it is 277 * partially failed, and when it recovers, we can continue to free the 278 * space, with no leaks. However, note that this case is actually 279 * fairly rare. 280 * 281 * Typically pools either (a) fail completely (but perhaps temporarily, 282 * e.g. a top-level vdev going offline), or (b) have localized, 283 * permanent errors (e.g. disk returns the wrong data due to bit flip or 284 * firmware bug). In case (a), this setting does not matter because the 285 * pool will be suspended and the sync thread will not be able to make 286 * forward progress regardless. In case (b), because the error is 287 * permanent, the best we can do is leak the minimum amount of space, 288 * which is what setting this flag will do. Therefore, it is reasonable 289 * for this flag to normally be set, but we chose the more conservative 290 * approach of not setting it, so that there is no possibility of 291 * leaking space in the "partial temporary" failure case. 292 */ 293 boolean_t zfs_free_leak_on_eio = B_FALSE; 294 295 /* 296 * Expiration time in milliseconds. This value has two meanings. First it is 297 * used to determine when the spa_deadman() logic should fire. By default the 298 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 299 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 300 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 301 * in a system panic. 302 */ 303 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 304 305 /* 306 * Check time in milliseconds. This defines the frequency at which we check 307 * for hung I/O. 308 */ 309 uint64_t zfs_deadman_checktime_ms = 5000ULL; 310 311 /* 312 * Override the zfs deadman behavior via /etc/system. By default the 313 * deadman is enabled except on VMware and sparc deployments. 314 */ 315 int zfs_deadman_enabled = -1; 316 317 /* 318 * The worst case is single-sector max-parity RAID-Z blocks, in which 319 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 320 * times the size; so just assume that. Add to this the fact that 321 * we can have up to 3 DVAs per bp, and one more factor of 2 because 322 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 323 * the worst case is: 324 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 325 */ 326 int spa_asize_inflation = 24; 327 328 /* 329 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 330 * the pool to be consumed. This ensures that we don't run the pool 331 * completely out of space, due to unaccounted changes (e.g. to the MOS). 332 * It also limits the worst-case time to allocate space. If we have 333 * less than this amount of free space, most ZPL operations (e.g. write, 334 * create) will return ENOSPC. 335 * 336 * Certain operations (e.g. file removal, most administrative actions) can 337 * use half the slop space. They will only return ENOSPC if less than half 338 * the slop space is free. Typically, once the pool has less than the slop 339 * space free, the user will use these operations to free up space in the pool. 340 * These are the operations that call dsl_pool_adjustedsize() with the netfree 341 * argument set to TRUE. 342 * 343 * Operations that are almost guaranteed to free up space in the absence of 344 * a pool checkpoint can use up to three quarters of the slop space 345 * (e.g zfs destroy). 346 * 347 * A very restricted set of operations are always permitted, regardless of 348 * the amount of free space. These are the operations that call 349 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 350 * increase in the amount of space used, it is possible to run the pool 351 * completely out of space, causing it to be permanently read-only. 352 * 353 * Note that on very small pools, the slop space will be larger than 354 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 355 * but we never allow it to be more than half the pool size. 356 * 357 * See also the comments in zfs_space_check_t. 358 */ 359 int spa_slop_shift = 5; 360 uint64_t spa_min_slop = 128 * 1024 * 1024; 361 362 int spa_allocators = 4; 363 364 /*PRINTFLIKE2*/ 365 void 366 spa_load_failed(spa_t *spa, const char *fmt, ...) 367 { 368 va_list adx; 369 char buf[256]; 370 371 va_start(adx, fmt); 372 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 373 va_end(adx); 374 375 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 376 spa->spa_trust_config ? "trusted" : "untrusted", buf); 377 } 378 379 /*PRINTFLIKE2*/ 380 void 381 spa_load_note(spa_t *spa, const char *fmt, ...) 382 { 383 va_list adx; 384 char buf[256]; 385 386 va_start(adx, fmt); 387 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 388 va_end(adx); 389 390 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 391 spa->spa_trust_config ? "trusted" : "untrusted", buf); 392 } 393 394 /* 395 * By default dedup and user data indirects land in the special class 396 */ 397 int zfs_ddt_data_is_special = B_TRUE; 398 int zfs_user_indirect_is_special = B_TRUE; 399 400 /* 401 * The percentage of special class final space reserved for metadata only. 402 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 403 * let metadata into the class. 404 */ 405 int zfs_special_class_metadata_reserve_pct = 25; 406 407 /* 408 * ========================================================================== 409 * SPA config locking 410 * ========================================================================== 411 */ 412 static void 413 spa_config_lock_init(spa_t *spa) 414 { 415 for (int i = 0; i < SCL_LOCKS; i++) { 416 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 417 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 418 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 419 zfs_refcount_create_untracked(&scl->scl_count); 420 scl->scl_writer = NULL; 421 scl->scl_write_wanted = 0; 422 } 423 } 424 425 static void 426 spa_config_lock_destroy(spa_t *spa) 427 { 428 for (int i = 0; i < SCL_LOCKS; i++) { 429 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 430 mutex_destroy(&scl->scl_lock); 431 cv_destroy(&scl->scl_cv); 432 zfs_refcount_destroy(&scl->scl_count); 433 ASSERT(scl->scl_writer == NULL); 434 ASSERT(scl->scl_write_wanted == 0); 435 } 436 } 437 438 int 439 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 440 { 441 for (int i = 0; i < SCL_LOCKS; i++) { 442 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 443 if (!(locks & (1 << i))) 444 continue; 445 mutex_enter(&scl->scl_lock); 446 if (rw == RW_READER) { 447 if (scl->scl_writer || scl->scl_write_wanted) { 448 mutex_exit(&scl->scl_lock); 449 spa_config_exit(spa, locks & ((1 << i) - 1), 450 tag); 451 return (0); 452 } 453 } else { 454 ASSERT(scl->scl_writer != curthread); 455 if (!zfs_refcount_is_zero(&scl->scl_count)) { 456 mutex_exit(&scl->scl_lock); 457 spa_config_exit(spa, locks & ((1 << i) - 1), 458 tag); 459 return (0); 460 } 461 scl->scl_writer = curthread; 462 } 463 (void) zfs_refcount_add(&scl->scl_count, tag); 464 mutex_exit(&scl->scl_lock); 465 } 466 return (1); 467 } 468 469 void 470 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 471 { 472 int wlocks_held = 0; 473 474 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 475 476 for (int i = 0; i < SCL_LOCKS; i++) { 477 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 478 if (scl->scl_writer == curthread) 479 wlocks_held |= (1 << i); 480 if (!(locks & (1 << i))) 481 continue; 482 mutex_enter(&scl->scl_lock); 483 if (rw == RW_READER) { 484 while (scl->scl_writer || scl->scl_write_wanted) { 485 cv_wait(&scl->scl_cv, &scl->scl_lock); 486 } 487 } else { 488 ASSERT(scl->scl_writer != curthread); 489 while (!zfs_refcount_is_zero(&scl->scl_count)) { 490 scl->scl_write_wanted++; 491 cv_wait(&scl->scl_cv, &scl->scl_lock); 492 scl->scl_write_wanted--; 493 } 494 scl->scl_writer = curthread; 495 } 496 (void) zfs_refcount_add(&scl->scl_count, tag); 497 mutex_exit(&scl->scl_lock); 498 } 499 ASSERT3U(wlocks_held, <=, locks); 500 } 501 502 void 503 spa_config_exit(spa_t *spa, int locks, void *tag) 504 { 505 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 506 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 507 if (!(locks & (1 << i))) 508 continue; 509 mutex_enter(&scl->scl_lock); 510 ASSERT(!zfs_refcount_is_zero(&scl->scl_count)); 511 if (zfs_refcount_remove(&scl->scl_count, tag) == 0) { 512 ASSERT(scl->scl_writer == NULL || 513 scl->scl_writer == curthread); 514 scl->scl_writer = NULL; /* OK in either case */ 515 cv_broadcast(&scl->scl_cv); 516 } 517 mutex_exit(&scl->scl_lock); 518 } 519 } 520 521 int 522 spa_config_held(spa_t *spa, int locks, krw_t rw) 523 { 524 int locks_held = 0; 525 526 for (int i = 0; i < SCL_LOCKS; i++) { 527 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 528 if (!(locks & (1 << i))) 529 continue; 530 if ((rw == RW_READER && 531 !zfs_refcount_is_zero(&scl->scl_count)) || 532 (rw == RW_WRITER && scl->scl_writer == curthread)) 533 locks_held |= 1 << i; 534 } 535 536 return (locks_held); 537 } 538 539 /* 540 * ========================================================================== 541 * SPA namespace functions 542 * ========================================================================== 543 */ 544 545 /* 546 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 547 * Returns NULL if no matching spa_t is found. 548 */ 549 spa_t * 550 spa_lookup(const char *name) 551 { 552 static spa_t search; /* spa_t is large; don't allocate on stack */ 553 spa_t *spa; 554 avl_index_t where; 555 char *cp; 556 557 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 558 559 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 560 561 /* 562 * If it's a full dataset name, figure out the pool name and 563 * just use that. 564 */ 565 cp = strpbrk(search.spa_name, "/@#"); 566 if (cp != NULL) 567 *cp = '\0'; 568 569 spa = avl_find(&spa_namespace_avl, &search, &where); 570 571 return (spa); 572 } 573 574 /* 575 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 576 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 577 * looking for potentially hung I/Os. 578 */ 579 void 580 spa_deadman(void *arg) 581 { 582 spa_t *spa = arg; 583 584 /* 585 * Disable the deadman timer if the pool is suspended. 586 */ 587 if (spa_suspended(spa)) { 588 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 589 return; 590 } 591 592 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 593 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 594 ++spa->spa_deadman_calls); 595 if (zfs_deadman_enabled) 596 vdev_deadman(spa->spa_root_vdev); 597 } 598 599 int 600 spa_log_sm_sort_by_txg(const void *va, const void *vb) 601 { 602 const spa_log_sm_t *a = va; 603 const spa_log_sm_t *b = vb; 604 605 return (TREE_CMP(a->sls_txg, b->sls_txg)); 606 } 607 608 /* 609 * Create an uninitialized spa_t with the given name. Requires 610 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 611 * exist by calling spa_lookup() first. 612 */ 613 spa_t * 614 spa_add(const char *name, nvlist_t *config, const char *altroot) 615 { 616 spa_t *spa; 617 spa_config_dirent_t *dp; 618 cyc_handler_t hdlr; 619 cyc_time_t when; 620 621 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 622 623 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 624 625 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 626 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 627 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 628 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 629 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 630 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 631 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 632 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 633 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 634 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 635 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 636 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 637 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 638 639 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 640 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 641 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 642 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 643 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 644 645 for (int t = 0; t < TXG_SIZE; t++) 646 bplist_create(&spa->spa_free_bplist[t]); 647 648 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 649 spa->spa_state = POOL_STATE_UNINITIALIZED; 650 spa->spa_freeze_txg = UINT64_MAX; 651 spa->spa_final_txg = UINT64_MAX; 652 spa->spa_load_max_txg = UINT64_MAX; 653 spa->spa_proc = &p0; 654 spa->spa_proc_state = SPA_PROC_NONE; 655 spa->spa_trust_config = B_TRUE; 656 657 hdlr.cyh_func = spa_deadman; 658 hdlr.cyh_arg = spa; 659 hdlr.cyh_level = CY_LOW_LEVEL; 660 661 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 662 663 /* 664 * This determines how often we need to check for hung I/Os after 665 * the cyclic has already fired. Since checking for hung I/Os is 666 * an expensive operation we don't want to check too frequently. 667 * Instead wait for 5 seconds before checking again. 668 */ 669 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 670 when.cyt_when = CY_INFINITY; 671 mutex_enter(&cpu_lock); 672 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 673 mutex_exit(&cpu_lock); 674 675 zfs_refcount_create(&spa->spa_refcount); 676 spa_config_lock_init(spa); 677 678 avl_add(&spa_namespace_avl, spa); 679 680 /* 681 * Set the alternate root, if there is one. 682 */ 683 if (altroot) { 684 spa->spa_root = spa_strdup(altroot); 685 spa_active_count++; 686 } 687 688 spa->spa_alloc_count = spa_allocators; 689 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count * 690 sizeof (kmutex_t), KM_SLEEP); 691 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count * 692 sizeof (avl_tree_t), KM_SLEEP); 693 for (int i = 0; i < spa->spa_alloc_count; i++) { 694 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL); 695 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare, 696 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 697 } 698 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 699 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 700 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 701 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 702 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 703 offsetof(log_summary_entry_t, lse_node)); 704 705 /* 706 * Every pool starts with the default cachefile 707 */ 708 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 709 offsetof(spa_config_dirent_t, scd_link)); 710 711 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 712 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 713 list_insert_head(&spa->spa_config_list, dp); 714 715 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 716 KM_SLEEP) == 0); 717 718 if (config != NULL) { 719 nvlist_t *features; 720 721 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 722 &features) == 0) { 723 VERIFY(nvlist_dup(features, &spa->spa_label_features, 724 0) == 0); 725 } 726 727 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 728 } 729 730 if (spa->spa_label_features == NULL) { 731 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 732 KM_SLEEP) == 0); 733 } 734 735 spa->spa_iokstat = kstat_create("zfs", 0, name, 736 "disk", KSTAT_TYPE_IO, 1, 0); 737 if (spa->spa_iokstat) { 738 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 739 kstat_install(spa->spa_iokstat); 740 } 741 742 spa->spa_min_ashift = INT_MAX; 743 spa->spa_max_ashift = 0; 744 745 /* 746 * As a pool is being created, treat all features as disabled by 747 * setting SPA_FEATURE_DISABLED for all entries in the feature 748 * refcount cache. 749 */ 750 for (int i = 0; i < SPA_FEATURES; i++) { 751 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 752 } 753 754 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 755 offsetof(vdev_t, vdev_leaf_node)); 756 757 return (spa); 758 } 759 760 /* 761 * Removes a spa_t from the namespace, freeing up any memory used. Requires 762 * spa_namespace_lock. This is called only after the spa_t has been closed and 763 * deactivated. 764 */ 765 void 766 spa_remove(spa_t *spa) 767 { 768 spa_config_dirent_t *dp; 769 770 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 771 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 772 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 773 774 nvlist_free(spa->spa_config_splitting); 775 776 avl_remove(&spa_namespace_avl, spa); 777 cv_broadcast(&spa_namespace_cv); 778 779 if (spa->spa_root) { 780 spa_strfree(spa->spa_root); 781 spa_active_count--; 782 } 783 784 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 785 list_remove(&spa->spa_config_list, dp); 786 if (dp->scd_path != NULL) 787 spa_strfree(dp->scd_path); 788 kmem_free(dp, sizeof (spa_config_dirent_t)); 789 } 790 791 for (int i = 0; i < spa->spa_alloc_count; i++) { 792 avl_destroy(&spa->spa_alloc_trees[i]); 793 mutex_destroy(&spa->spa_alloc_locks[i]); 794 } 795 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count * 796 sizeof (kmutex_t)); 797 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count * 798 sizeof (avl_tree_t)); 799 800 avl_destroy(&spa->spa_metaslabs_by_flushed); 801 avl_destroy(&spa->spa_sm_logs_by_txg); 802 list_destroy(&spa->spa_log_summary); 803 list_destroy(&spa->spa_config_list); 804 list_destroy(&spa->spa_leaf_list); 805 806 nvlist_free(spa->spa_label_features); 807 nvlist_free(spa->spa_load_info); 808 spa_config_set(spa, NULL); 809 810 mutex_enter(&cpu_lock); 811 if (spa->spa_deadman_cycid != CYCLIC_NONE) 812 cyclic_remove(spa->spa_deadman_cycid); 813 mutex_exit(&cpu_lock); 814 spa->spa_deadman_cycid = CYCLIC_NONE; 815 816 zfs_refcount_destroy(&spa->spa_refcount); 817 818 spa_config_lock_destroy(spa); 819 820 kstat_delete(spa->spa_iokstat); 821 spa->spa_iokstat = NULL; 822 823 for (int t = 0; t < TXG_SIZE; t++) 824 bplist_destroy(&spa->spa_free_bplist[t]); 825 826 zio_checksum_templates_free(spa); 827 828 cv_destroy(&spa->spa_async_cv); 829 cv_destroy(&spa->spa_evicting_os_cv); 830 cv_destroy(&spa->spa_proc_cv); 831 cv_destroy(&spa->spa_scrub_io_cv); 832 cv_destroy(&spa->spa_suspend_cv); 833 834 mutex_destroy(&spa->spa_flushed_ms_lock); 835 mutex_destroy(&spa->spa_async_lock); 836 mutex_destroy(&spa->spa_errlist_lock); 837 mutex_destroy(&spa->spa_errlog_lock); 838 mutex_destroy(&spa->spa_evicting_os_lock); 839 mutex_destroy(&spa->spa_history_lock); 840 mutex_destroy(&spa->spa_proc_lock); 841 mutex_destroy(&spa->spa_props_lock); 842 mutex_destroy(&spa->spa_cksum_tmpls_lock); 843 mutex_destroy(&spa->spa_scrub_lock); 844 mutex_destroy(&spa->spa_suspend_lock); 845 mutex_destroy(&spa->spa_vdev_top_lock); 846 mutex_destroy(&spa->spa_iokstat_lock); 847 848 kmem_free(spa, sizeof (spa_t)); 849 } 850 851 /* 852 * Given a pool, return the next pool in the namespace, or NULL if there is 853 * none. If 'prev' is NULL, return the first pool. 854 */ 855 spa_t * 856 spa_next(spa_t *prev) 857 { 858 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 859 860 if (prev) 861 return (AVL_NEXT(&spa_namespace_avl, prev)); 862 else 863 return (avl_first(&spa_namespace_avl)); 864 } 865 866 /* 867 * ========================================================================== 868 * SPA refcount functions 869 * ========================================================================== 870 */ 871 872 /* 873 * Add a reference to the given spa_t. Must have at least one reference, or 874 * have the namespace lock held. 875 */ 876 void 877 spa_open_ref(spa_t *spa, void *tag) 878 { 879 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 880 MUTEX_HELD(&spa_namespace_lock)); 881 (void) zfs_refcount_add(&spa->spa_refcount, tag); 882 } 883 884 /* 885 * Remove a reference to the given spa_t. Must have at least one reference, or 886 * have the namespace lock held. 887 */ 888 void 889 spa_close(spa_t *spa, void *tag) 890 { 891 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 892 MUTEX_HELD(&spa_namespace_lock)); 893 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 894 } 895 896 /* 897 * Remove a reference to the given spa_t held by a dsl dir that is 898 * being asynchronously released. Async releases occur from a taskq 899 * performing eviction of dsl datasets and dirs. The namespace lock 900 * isn't held and the hold by the object being evicted may contribute to 901 * spa_minref (e.g. dataset or directory released during pool export), 902 * so the asserts in spa_close() do not apply. 903 */ 904 void 905 spa_async_close(spa_t *spa, void *tag) 906 { 907 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 908 } 909 910 /* 911 * Check to see if the spa refcount is zero. Must be called with 912 * spa_namespace_lock held. We really compare against spa_minref, which is the 913 * number of references acquired when opening a pool 914 */ 915 boolean_t 916 spa_refcount_zero(spa_t *spa) 917 { 918 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 919 920 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 921 } 922 923 /* 924 * ========================================================================== 925 * SPA spare and l2cache tracking 926 * ========================================================================== 927 */ 928 929 /* 930 * Hot spares and cache devices are tracked using the same code below, 931 * for 'auxiliary' devices. 932 */ 933 934 typedef struct spa_aux { 935 uint64_t aux_guid; 936 uint64_t aux_pool; 937 avl_node_t aux_avl; 938 int aux_count; 939 } spa_aux_t; 940 941 static inline int 942 spa_aux_compare(const void *a, const void *b) 943 { 944 const spa_aux_t *sa = (const spa_aux_t *)a; 945 const spa_aux_t *sb = (const spa_aux_t *)b; 946 947 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 948 } 949 950 void 951 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 952 { 953 avl_index_t where; 954 spa_aux_t search; 955 spa_aux_t *aux; 956 957 search.aux_guid = vd->vdev_guid; 958 if ((aux = avl_find(avl, &search, &where)) != NULL) { 959 aux->aux_count++; 960 } else { 961 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 962 aux->aux_guid = vd->vdev_guid; 963 aux->aux_count = 1; 964 avl_insert(avl, aux, where); 965 } 966 } 967 968 void 969 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 970 { 971 spa_aux_t search; 972 spa_aux_t *aux; 973 avl_index_t where; 974 975 search.aux_guid = vd->vdev_guid; 976 aux = avl_find(avl, &search, &where); 977 978 ASSERT(aux != NULL); 979 980 if (--aux->aux_count == 0) { 981 avl_remove(avl, aux); 982 kmem_free(aux, sizeof (spa_aux_t)); 983 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 984 aux->aux_pool = 0ULL; 985 } 986 } 987 988 boolean_t 989 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 990 { 991 spa_aux_t search, *found; 992 993 search.aux_guid = guid; 994 found = avl_find(avl, &search, NULL); 995 996 if (pool) { 997 if (found) 998 *pool = found->aux_pool; 999 else 1000 *pool = 0ULL; 1001 } 1002 1003 if (refcnt) { 1004 if (found) 1005 *refcnt = found->aux_count; 1006 else 1007 *refcnt = 0; 1008 } 1009 1010 return (found != NULL); 1011 } 1012 1013 void 1014 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1015 { 1016 spa_aux_t search, *found; 1017 avl_index_t where; 1018 1019 search.aux_guid = vd->vdev_guid; 1020 found = avl_find(avl, &search, &where); 1021 ASSERT(found != NULL); 1022 ASSERT(found->aux_pool == 0ULL); 1023 1024 found->aux_pool = spa_guid(vd->vdev_spa); 1025 } 1026 1027 /* 1028 * Spares are tracked globally due to the following constraints: 1029 * 1030 * - A spare may be part of multiple pools. 1031 * - A spare may be added to a pool even if it's actively in use within 1032 * another pool. 1033 * - A spare in use in any pool can only be the source of a replacement if 1034 * the target is a spare in the same pool. 1035 * 1036 * We keep track of all spares on the system through the use of a reference 1037 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1038 * spare, then we bump the reference count in the AVL tree. In addition, we set 1039 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1040 * inactive). When a spare is made active (used to replace a device in the 1041 * pool), we also keep track of which pool its been made a part of. 1042 * 1043 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1044 * called under the spa_namespace lock as part of vdev reconfiguration. The 1045 * separate spare lock exists for the status query path, which does not need to 1046 * be completely consistent with respect to other vdev configuration changes. 1047 */ 1048 1049 /* 1050 * Poll the spare vdevs to make sure they are not faulty. 1051 * 1052 * The probe operation will raise an ENXIO error and create an FM ereport if the 1053 * probe fails. 1054 */ 1055 void 1056 spa_spare_poll(spa_t *spa) 1057 { 1058 boolean_t async_request = B_FALSE; 1059 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1060 for (int i = 0; i < spa->spa_spares.sav_count; i++) { 1061 spa_aux_t search, *found; 1062 vdev_t *vd = spa->spa_spares.sav_vdevs[i]; 1063 1064 search.aux_guid = vd->vdev_guid; 1065 1066 mutex_enter(&spa_spare_lock); 1067 found = avl_find(&spa_spare_avl, &search, NULL); 1068 /* This spare is in use by a pool. */ 1069 if (found != NULL && found->aux_pool != 0) { 1070 mutex_exit(&spa_spare_lock); 1071 continue; 1072 } 1073 mutex_exit(&spa_spare_lock); 1074 1075 vd->vdev_probe_wanted = B_TRUE; 1076 async_request = B_TRUE; 1077 } 1078 if (async_request) 1079 spa_async_request(spa, SPA_ASYNC_PROBE); 1080 1081 spa_config_exit(spa, SCL_STATE, FTAG); 1082 } 1083 1084 static int 1085 spa_spare_compare(const void *a, const void *b) 1086 { 1087 return (spa_aux_compare(a, b)); 1088 } 1089 1090 void 1091 spa_spare_add(vdev_t *vd) 1092 { 1093 mutex_enter(&spa_spare_lock); 1094 ASSERT(!vd->vdev_isspare); 1095 spa_aux_add(vd, &spa_spare_avl); 1096 vd->vdev_isspare = B_TRUE; 1097 mutex_exit(&spa_spare_lock); 1098 } 1099 1100 void 1101 spa_spare_remove(vdev_t *vd) 1102 { 1103 mutex_enter(&spa_spare_lock); 1104 ASSERT(vd->vdev_isspare); 1105 spa_aux_remove(vd, &spa_spare_avl); 1106 vd->vdev_isspare = B_FALSE; 1107 mutex_exit(&spa_spare_lock); 1108 } 1109 1110 boolean_t 1111 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1112 { 1113 boolean_t found; 1114 1115 mutex_enter(&spa_spare_lock); 1116 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1117 mutex_exit(&spa_spare_lock); 1118 1119 return (found); 1120 } 1121 1122 void 1123 spa_spare_activate(vdev_t *vd) 1124 { 1125 mutex_enter(&spa_spare_lock); 1126 ASSERT(vd->vdev_isspare); 1127 spa_aux_activate(vd, &spa_spare_avl); 1128 mutex_exit(&spa_spare_lock); 1129 } 1130 1131 /* 1132 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1133 * Cache devices currently only support one pool per cache device, and so 1134 * for these devices the aux reference count is currently unused beyond 1. 1135 */ 1136 1137 static int 1138 spa_l2cache_compare(const void *a, const void *b) 1139 { 1140 return (spa_aux_compare(a, b)); 1141 } 1142 1143 void 1144 spa_l2cache_add(vdev_t *vd) 1145 { 1146 mutex_enter(&spa_l2cache_lock); 1147 ASSERT(!vd->vdev_isl2cache); 1148 spa_aux_add(vd, &spa_l2cache_avl); 1149 vd->vdev_isl2cache = B_TRUE; 1150 mutex_exit(&spa_l2cache_lock); 1151 } 1152 1153 void 1154 spa_l2cache_remove(vdev_t *vd) 1155 { 1156 mutex_enter(&spa_l2cache_lock); 1157 ASSERT(vd->vdev_isl2cache); 1158 spa_aux_remove(vd, &spa_l2cache_avl); 1159 vd->vdev_isl2cache = B_FALSE; 1160 mutex_exit(&spa_l2cache_lock); 1161 } 1162 1163 boolean_t 1164 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1165 { 1166 boolean_t found; 1167 1168 mutex_enter(&spa_l2cache_lock); 1169 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1170 mutex_exit(&spa_l2cache_lock); 1171 1172 return (found); 1173 } 1174 1175 void 1176 spa_l2cache_activate(vdev_t *vd) 1177 { 1178 mutex_enter(&spa_l2cache_lock); 1179 ASSERT(vd->vdev_isl2cache); 1180 spa_aux_activate(vd, &spa_l2cache_avl); 1181 mutex_exit(&spa_l2cache_lock); 1182 } 1183 1184 /* 1185 * ========================================================================== 1186 * SPA vdev locking 1187 * ========================================================================== 1188 */ 1189 1190 /* 1191 * Lock the given spa_t for the purpose of adding or removing a vdev. 1192 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1193 * It returns the next transaction group for the spa_t. 1194 */ 1195 uint64_t 1196 spa_vdev_enter(spa_t *spa) 1197 { 1198 mutex_enter(&spa->spa_vdev_top_lock); 1199 mutex_enter(&spa_namespace_lock); 1200 1201 vdev_autotrim_stop_all(spa); 1202 1203 return (spa_vdev_config_enter(spa)); 1204 } 1205 1206 /* 1207 * Internal implementation for spa_vdev_enter(). Used when a vdev 1208 * operation requires multiple syncs (i.e. removing a device) while 1209 * keeping the spa_namespace_lock held. 1210 */ 1211 uint64_t 1212 spa_vdev_config_enter(spa_t *spa) 1213 { 1214 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1215 1216 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1217 1218 return (spa_last_synced_txg(spa) + 1); 1219 } 1220 1221 /* 1222 * Used in combination with spa_vdev_config_enter() to allow the syncing 1223 * of multiple transactions without releasing the spa_namespace_lock. 1224 */ 1225 void 1226 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1227 { 1228 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1229 1230 int config_changed = B_FALSE; 1231 1232 ASSERT(txg > spa_last_synced_txg(spa)); 1233 1234 spa->spa_pending_vdev = NULL; 1235 1236 /* 1237 * Reassess the DTLs. 1238 */ 1239 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1240 1241 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1242 config_changed = B_TRUE; 1243 spa->spa_config_generation++; 1244 } 1245 1246 /* 1247 * Verify the metaslab classes. 1248 */ 1249 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1250 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1251 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1252 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1253 1254 spa_config_exit(spa, SCL_ALL, spa); 1255 1256 /* 1257 * Panic the system if the specified tag requires it. This 1258 * is useful for ensuring that configurations are updated 1259 * transactionally. 1260 */ 1261 if (zio_injection_enabled) 1262 zio_handle_panic_injection(spa, tag, 0); 1263 1264 /* 1265 * Note: this txg_wait_synced() is important because it ensures 1266 * that there won't be more than one config change per txg. 1267 * This allows us to use the txg as the generation number. 1268 */ 1269 if (error == 0) 1270 txg_wait_synced(spa->spa_dsl_pool, txg); 1271 1272 if (vd != NULL) { 1273 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1274 if (vd->vdev_ops->vdev_op_leaf) { 1275 mutex_enter(&vd->vdev_initialize_lock); 1276 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1277 NULL); 1278 mutex_exit(&vd->vdev_initialize_lock); 1279 1280 mutex_enter(&vd->vdev_trim_lock); 1281 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1282 mutex_exit(&vd->vdev_trim_lock); 1283 } 1284 1285 /* 1286 * The vdev may be both a leaf and top-level device. 1287 */ 1288 vdev_autotrim_stop_wait(vd); 1289 1290 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1291 vdev_free(vd); 1292 spa_config_exit(spa, SCL_ALL, spa); 1293 } 1294 1295 /* 1296 * If the config changed, update the config cache. 1297 */ 1298 if (config_changed) 1299 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1300 } 1301 1302 /* 1303 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1304 * locking of spa_vdev_enter(), we also want make sure the transactions have 1305 * synced to disk, and then update the global configuration cache with the new 1306 * information. 1307 */ 1308 int 1309 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1310 { 1311 vdev_autotrim_restart(spa); 1312 1313 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1314 mutex_exit(&spa_namespace_lock); 1315 mutex_exit(&spa->spa_vdev_top_lock); 1316 1317 return (error); 1318 } 1319 1320 /* 1321 * Lock the given spa_t for the purpose of changing vdev state. 1322 */ 1323 void 1324 spa_vdev_state_enter(spa_t *spa, int oplocks) 1325 { 1326 int locks = SCL_STATE_ALL | oplocks; 1327 1328 /* 1329 * Root pools may need to read of the underlying devfs filesystem 1330 * when opening up a vdev. Unfortunately if we're holding the 1331 * SCL_ZIO lock it will result in a deadlock when we try to issue 1332 * the read from the root filesystem. Instead we "prefetch" 1333 * the associated vnodes that we need prior to opening the 1334 * underlying devices and cache them so that we can prevent 1335 * any I/O when we are doing the actual open. 1336 */ 1337 if (spa_is_root(spa)) { 1338 int low = locks & ~(SCL_ZIO - 1); 1339 int high = locks & ~low; 1340 1341 spa_config_enter(spa, high, spa, RW_WRITER); 1342 vdev_hold(spa->spa_root_vdev); 1343 spa_config_enter(spa, low, spa, RW_WRITER); 1344 } else { 1345 spa_config_enter(spa, locks, spa, RW_WRITER); 1346 } 1347 spa->spa_vdev_locks = locks; 1348 } 1349 1350 int 1351 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1352 { 1353 boolean_t config_changed = B_FALSE; 1354 1355 if (vd != NULL || error == 0) 1356 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1357 0, 0, B_FALSE); 1358 1359 if (vd != NULL) { 1360 vdev_state_dirty(vd->vdev_top); 1361 config_changed = B_TRUE; 1362 spa->spa_config_generation++; 1363 } 1364 1365 if (spa_is_root(spa)) 1366 vdev_rele(spa->spa_root_vdev); 1367 1368 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1369 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1370 1371 /* 1372 * If anything changed, wait for it to sync. This ensures that, 1373 * from the system administrator's perspective, zpool(1M) commands 1374 * are synchronous. This is important for things like zpool offline: 1375 * when the command completes, you expect no further I/O from ZFS. 1376 */ 1377 if (vd != NULL) 1378 txg_wait_synced(spa->spa_dsl_pool, 0); 1379 1380 /* 1381 * If the config changed, update the config cache. 1382 */ 1383 if (config_changed) { 1384 mutex_enter(&spa_namespace_lock); 1385 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1386 mutex_exit(&spa_namespace_lock); 1387 } 1388 1389 return (error); 1390 } 1391 1392 /* 1393 * ========================================================================== 1394 * Miscellaneous functions 1395 * ========================================================================== 1396 */ 1397 1398 void 1399 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1400 { 1401 if (!nvlist_exists(spa->spa_label_features, feature)) { 1402 fnvlist_add_boolean(spa->spa_label_features, feature); 1403 /* 1404 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1405 * dirty the vdev config because lock SCL_CONFIG is not held. 1406 * Thankfully, in this case we don't need to dirty the config 1407 * because it will be written out anyway when we finish 1408 * creating the pool. 1409 */ 1410 if (tx->tx_txg != TXG_INITIAL) 1411 vdev_config_dirty(spa->spa_root_vdev); 1412 } 1413 } 1414 1415 void 1416 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1417 { 1418 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1419 vdev_config_dirty(spa->spa_root_vdev); 1420 } 1421 1422 /* 1423 * Return the spa_t associated with given pool_guid, if it exists. If 1424 * device_guid is non-zero, determine whether the pool exists *and* contains 1425 * a device with the specified device_guid. 1426 */ 1427 spa_t * 1428 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1429 { 1430 spa_t *spa; 1431 avl_tree_t *t = &spa_namespace_avl; 1432 1433 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1434 1435 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1436 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1437 continue; 1438 if (spa->spa_root_vdev == NULL) 1439 continue; 1440 if (spa_guid(spa) == pool_guid) { 1441 if (device_guid == 0) 1442 break; 1443 1444 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1445 device_guid) != NULL) 1446 break; 1447 1448 /* 1449 * Check any devices we may be in the process of adding. 1450 */ 1451 if (spa->spa_pending_vdev) { 1452 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1453 device_guid) != NULL) 1454 break; 1455 } 1456 } 1457 } 1458 1459 return (spa); 1460 } 1461 1462 /* 1463 * Determine whether a pool with the given pool_guid exists. 1464 */ 1465 boolean_t 1466 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1467 { 1468 return (spa_by_guid(pool_guid, device_guid) != NULL); 1469 } 1470 1471 char * 1472 spa_strdup(const char *s) 1473 { 1474 size_t len; 1475 char *new; 1476 1477 len = strlen(s); 1478 new = kmem_alloc(len + 1, KM_SLEEP); 1479 bcopy(s, new, len); 1480 new[len] = '\0'; 1481 1482 return (new); 1483 } 1484 1485 void 1486 spa_strfree(char *s) 1487 { 1488 kmem_free(s, strlen(s) + 1); 1489 } 1490 1491 uint64_t 1492 spa_get_random(uint64_t range) 1493 { 1494 uint64_t r; 1495 1496 ASSERT(range != 0); 1497 1498 if (range == 1) 1499 return (0); 1500 1501 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1502 1503 return (r % range); 1504 } 1505 1506 uint64_t 1507 spa_generate_guid(spa_t *spa) 1508 { 1509 uint64_t guid = spa_get_random(-1ULL); 1510 1511 if (spa != NULL) { 1512 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1513 guid = spa_get_random(-1ULL); 1514 } else { 1515 while (guid == 0 || spa_guid_exists(guid, 0)) 1516 guid = spa_get_random(-1ULL); 1517 } 1518 1519 return (guid); 1520 } 1521 1522 void 1523 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1524 { 1525 char type[256]; 1526 char *checksum = NULL; 1527 char *compress = NULL; 1528 1529 if (bp != NULL) { 1530 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1531 dmu_object_byteswap_t bswap = 1532 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1533 (void) snprintf(type, sizeof (type), "bswap %s %s", 1534 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1535 "metadata" : "data", 1536 dmu_ot_byteswap[bswap].ob_name); 1537 } else { 1538 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1539 sizeof (type)); 1540 } 1541 if (!BP_IS_EMBEDDED(bp)) { 1542 checksum = 1543 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1544 } 1545 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1546 } 1547 1548 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1549 compress); 1550 } 1551 1552 void 1553 spa_freeze(spa_t *spa) 1554 { 1555 uint64_t freeze_txg = 0; 1556 1557 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1558 if (spa->spa_freeze_txg == UINT64_MAX) { 1559 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1560 spa->spa_freeze_txg = freeze_txg; 1561 } 1562 spa_config_exit(spa, SCL_ALL, FTAG); 1563 if (freeze_txg != 0) 1564 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1565 } 1566 1567 void 1568 zfs_panic_recover(const char *fmt, ...) 1569 { 1570 va_list adx; 1571 1572 va_start(adx, fmt); 1573 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1574 va_end(adx); 1575 } 1576 1577 /* 1578 * This is a stripped-down version of strtoull, suitable only for converting 1579 * lowercase hexadecimal numbers that don't overflow. 1580 */ 1581 uint64_t 1582 zfs_strtonum(const char *str, char **nptr) 1583 { 1584 uint64_t val = 0; 1585 char c; 1586 int digit; 1587 1588 while ((c = *str) != '\0') { 1589 if (c >= '0' && c <= '9') 1590 digit = c - '0'; 1591 else if (c >= 'a' && c <= 'f') 1592 digit = 10 + c - 'a'; 1593 else 1594 break; 1595 1596 val *= 16; 1597 val += digit; 1598 1599 str++; 1600 } 1601 1602 if (nptr) 1603 *nptr = (char *)str; 1604 1605 return (val); 1606 } 1607 1608 void 1609 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1610 { 1611 /* 1612 * We bump the feature refcount for each special vdev added to the pool 1613 */ 1614 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1615 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1616 } 1617 1618 /* 1619 * ========================================================================== 1620 * Accessor functions 1621 * ========================================================================== 1622 */ 1623 1624 boolean_t 1625 spa_shutting_down(spa_t *spa) 1626 { 1627 return (spa->spa_async_suspended); 1628 } 1629 1630 dsl_pool_t * 1631 spa_get_dsl(spa_t *spa) 1632 { 1633 return (spa->spa_dsl_pool); 1634 } 1635 1636 boolean_t 1637 spa_is_initializing(spa_t *spa) 1638 { 1639 return (spa->spa_is_initializing); 1640 } 1641 1642 boolean_t 1643 spa_indirect_vdevs_loaded(spa_t *spa) 1644 { 1645 return (spa->spa_indirect_vdevs_loaded); 1646 } 1647 1648 blkptr_t * 1649 spa_get_rootblkptr(spa_t *spa) 1650 { 1651 return (&spa->spa_ubsync.ub_rootbp); 1652 } 1653 1654 void 1655 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1656 { 1657 spa->spa_uberblock.ub_rootbp = *bp; 1658 } 1659 1660 void 1661 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1662 { 1663 if (spa->spa_root == NULL) 1664 buf[0] = '\0'; 1665 else 1666 (void) strncpy(buf, spa->spa_root, buflen); 1667 } 1668 1669 int 1670 spa_sync_pass(spa_t *spa) 1671 { 1672 return (spa->spa_sync_pass); 1673 } 1674 1675 char * 1676 spa_name(spa_t *spa) 1677 { 1678 return (spa->spa_name); 1679 } 1680 1681 uint64_t 1682 spa_guid(spa_t *spa) 1683 { 1684 dsl_pool_t *dp = spa_get_dsl(spa); 1685 uint64_t guid; 1686 1687 /* 1688 * If we fail to parse the config during spa_load(), we can go through 1689 * the error path (which posts an ereport) and end up here with no root 1690 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1691 * this case. 1692 */ 1693 if (spa->spa_root_vdev == NULL) 1694 return (spa->spa_config_guid); 1695 1696 guid = spa->spa_last_synced_guid != 0 ? 1697 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1698 1699 /* 1700 * Return the most recently synced out guid unless we're 1701 * in syncing context. 1702 */ 1703 if (dp && dsl_pool_sync_context(dp)) 1704 return (spa->spa_root_vdev->vdev_guid); 1705 else 1706 return (guid); 1707 } 1708 1709 uint64_t 1710 spa_load_guid(spa_t *spa) 1711 { 1712 /* 1713 * This is a GUID that exists solely as a reference for the 1714 * purposes of the arc. It is generated at load time, and 1715 * is never written to persistent storage. 1716 */ 1717 return (spa->spa_load_guid); 1718 } 1719 1720 uint64_t 1721 spa_last_synced_txg(spa_t *spa) 1722 { 1723 return (spa->spa_ubsync.ub_txg); 1724 } 1725 1726 uint64_t 1727 spa_first_txg(spa_t *spa) 1728 { 1729 return (spa->spa_first_txg); 1730 } 1731 1732 uint64_t 1733 spa_syncing_txg(spa_t *spa) 1734 { 1735 return (spa->spa_syncing_txg); 1736 } 1737 1738 /* 1739 * Return the last txg where data can be dirtied. The final txgs 1740 * will be used to just clear out any deferred frees that remain. 1741 */ 1742 uint64_t 1743 spa_final_dirty_txg(spa_t *spa) 1744 { 1745 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1746 } 1747 1748 pool_state_t 1749 spa_state(spa_t *spa) 1750 { 1751 return (spa->spa_state); 1752 } 1753 1754 spa_load_state_t 1755 spa_load_state(spa_t *spa) 1756 { 1757 return (spa->spa_load_state); 1758 } 1759 1760 uint64_t 1761 spa_freeze_txg(spa_t *spa) 1762 { 1763 return (spa->spa_freeze_txg); 1764 } 1765 1766 /* ARGSUSED */ 1767 uint64_t 1768 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1769 { 1770 return (lsize * spa_asize_inflation); 1771 } 1772 1773 /* 1774 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1775 * or at least 128MB, unless that would cause it to be more than half the 1776 * pool size. 1777 * 1778 * See the comment above spa_slop_shift for details. 1779 */ 1780 uint64_t 1781 spa_get_slop_space(spa_t *spa) 1782 { 1783 uint64_t space = spa_get_dspace(spa); 1784 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1785 } 1786 1787 uint64_t 1788 spa_get_dspace(spa_t *spa) 1789 { 1790 return (spa->spa_dspace); 1791 } 1792 1793 uint64_t 1794 spa_get_checkpoint_space(spa_t *spa) 1795 { 1796 return (spa->spa_checkpoint_info.sci_dspace); 1797 } 1798 1799 void 1800 spa_update_dspace(spa_t *spa) 1801 { 1802 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1803 ddt_get_dedup_dspace(spa); 1804 if (spa->spa_vdev_removal != NULL) { 1805 /* 1806 * We can't allocate from the removing device, so 1807 * subtract its size. This prevents the DMU/DSL from 1808 * filling up the (now smaller) pool while we are in the 1809 * middle of removing the device. 1810 * 1811 * Note that the DMU/DSL doesn't actually know or care 1812 * how much space is allocated (it does its own tracking 1813 * of how much space has been logically used). So it 1814 * doesn't matter that the data we are moving may be 1815 * allocated twice (on the old device and the new 1816 * device). 1817 */ 1818 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1819 vdev_t *vd = 1820 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1821 spa->spa_dspace -= spa_deflate(spa) ? 1822 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1823 spa_config_exit(spa, SCL_VDEV, FTAG); 1824 } 1825 } 1826 1827 /* 1828 * Return the failure mode that has been set to this pool. The default 1829 * behavior will be to block all I/Os when a complete failure occurs. 1830 */ 1831 uint8_t 1832 spa_get_failmode(spa_t *spa) 1833 { 1834 return (spa->spa_failmode); 1835 } 1836 1837 boolean_t 1838 spa_suspended(spa_t *spa) 1839 { 1840 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1841 } 1842 1843 uint64_t 1844 spa_version(spa_t *spa) 1845 { 1846 return (spa->spa_ubsync.ub_version); 1847 } 1848 1849 boolean_t 1850 spa_deflate(spa_t *spa) 1851 { 1852 return (spa->spa_deflate); 1853 } 1854 1855 metaslab_class_t * 1856 spa_normal_class(spa_t *spa) 1857 { 1858 return (spa->spa_normal_class); 1859 } 1860 1861 metaslab_class_t * 1862 spa_log_class(spa_t *spa) 1863 { 1864 return (spa->spa_log_class); 1865 } 1866 1867 metaslab_class_t * 1868 spa_special_class(spa_t *spa) 1869 { 1870 return (spa->spa_special_class); 1871 } 1872 1873 metaslab_class_t * 1874 spa_dedup_class(spa_t *spa) 1875 { 1876 return (spa->spa_dedup_class); 1877 } 1878 1879 /* 1880 * Locate an appropriate allocation class 1881 */ 1882 metaslab_class_t * 1883 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1884 uint_t level, uint_t special_smallblk) 1885 { 1886 if (DMU_OT_IS_ZIL(objtype)) { 1887 if (spa->spa_log_class->mc_groups != 0) 1888 return (spa_log_class(spa)); 1889 else 1890 return (spa_normal_class(spa)); 1891 } 1892 1893 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1894 1895 if (DMU_OT_IS_DDT(objtype)) { 1896 if (spa->spa_dedup_class->mc_groups != 0) 1897 return (spa_dedup_class(spa)); 1898 else if (has_special_class && zfs_ddt_data_is_special) 1899 return (spa_special_class(spa)); 1900 else 1901 return (spa_normal_class(spa)); 1902 } 1903 1904 /* Indirect blocks for user data can land in special if allowed */ 1905 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1906 if (has_special_class && zfs_user_indirect_is_special) 1907 return (spa_special_class(spa)); 1908 else 1909 return (spa_normal_class(spa)); 1910 } 1911 1912 if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1913 if (has_special_class) 1914 return (spa_special_class(spa)); 1915 else 1916 return (spa_normal_class(spa)); 1917 } 1918 1919 /* 1920 * Allow small file blocks in special class in some cases (like 1921 * for the dRAID vdev feature). But always leave a reserve of 1922 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 1923 */ 1924 if (DMU_OT_IS_FILE(objtype) && 1925 has_special_class && size <= special_smallblk) { 1926 metaslab_class_t *special = spa_special_class(spa); 1927 uint64_t alloc = metaslab_class_get_alloc(special); 1928 uint64_t space = metaslab_class_get_space(special); 1929 uint64_t limit = 1930 (space * (100 - zfs_special_class_metadata_reserve_pct)) 1931 / 100; 1932 1933 if (alloc < limit) 1934 return (special); 1935 } 1936 1937 return (spa_normal_class(spa)); 1938 } 1939 1940 void 1941 spa_evicting_os_register(spa_t *spa, objset_t *os) 1942 { 1943 mutex_enter(&spa->spa_evicting_os_lock); 1944 list_insert_head(&spa->spa_evicting_os_list, os); 1945 mutex_exit(&spa->spa_evicting_os_lock); 1946 } 1947 1948 void 1949 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1950 { 1951 mutex_enter(&spa->spa_evicting_os_lock); 1952 list_remove(&spa->spa_evicting_os_list, os); 1953 cv_broadcast(&spa->spa_evicting_os_cv); 1954 mutex_exit(&spa->spa_evicting_os_lock); 1955 } 1956 1957 void 1958 spa_evicting_os_wait(spa_t *spa) 1959 { 1960 mutex_enter(&spa->spa_evicting_os_lock); 1961 while (!list_is_empty(&spa->spa_evicting_os_list)) 1962 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1963 mutex_exit(&spa->spa_evicting_os_lock); 1964 1965 dmu_buf_user_evict_wait(); 1966 } 1967 1968 int 1969 spa_max_replication(spa_t *spa) 1970 { 1971 /* 1972 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1973 * handle BPs with more than one DVA allocated. Set our max 1974 * replication level accordingly. 1975 */ 1976 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1977 return (1); 1978 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1979 } 1980 1981 int 1982 spa_prev_software_version(spa_t *spa) 1983 { 1984 return (spa->spa_prev_software_version); 1985 } 1986 1987 uint64_t 1988 spa_deadman_synctime(spa_t *spa) 1989 { 1990 return (spa->spa_deadman_synctime); 1991 } 1992 1993 spa_autotrim_t 1994 spa_get_autotrim(spa_t *spa) 1995 { 1996 return (spa->spa_autotrim); 1997 } 1998 1999 uint64_t 2000 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2001 { 2002 uint64_t asize = DVA_GET_ASIZE(dva); 2003 uint64_t dsize = asize; 2004 2005 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2006 2007 if (asize != 0 && spa->spa_deflate) { 2008 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2009 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 2010 } 2011 2012 return (dsize); 2013 } 2014 2015 uint64_t 2016 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2017 { 2018 uint64_t dsize = 0; 2019 2020 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2021 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2022 2023 return (dsize); 2024 } 2025 2026 uint64_t 2027 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2028 { 2029 uint64_t dsize = 0; 2030 2031 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2032 2033 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2034 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2035 2036 spa_config_exit(spa, SCL_VDEV, FTAG); 2037 2038 return (dsize); 2039 } 2040 2041 uint64_t 2042 spa_dirty_data(spa_t *spa) 2043 { 2044 return (spa->spa_dsl_pool->dp_dirty_total); 2045 } 2046 2047 /* 2048 * ========================================================================== 2049 * Initialization and Termination 2050 * ========================================================================== 2051 */ 2052 2053 static int 2054 spa_name_compare(const void *a1, const void *a2) 2055 { 2056 const spa_t *s1 = a1; 2057 const spa_t *s2 = a2; 2058 int s; 2059 2060 s = strcmp(s1->spa_name, s2->spa_name); 2061 2062 return (TREE_ISIGN(s)); 2063 } 2064 2065 int 2066 spa_busy(void) 2067 { 2068 return (spa_active_count); 2069 } 2070 2071 void 2072 spa_boot_init() 2073 { 2074 spa_config_load(); 2075 } 2076 2077 void 2078 spa_init(int mode) 2079 { 2080 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2081 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2082 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2083 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2084 2085 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2086 offsetof(spa_t, spa_avl)); 2087 2088 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2089 offsetof(spa_aux_t, aux_avl)); 2090 2091 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2092 offsetof(spa_aux_t, aux_avl)); 2093 2094 spa_mode_global = mode; 2095 2096 #ifdef _KERNEL 2097 spa_arch_init(); 2098 #else 2099 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 2100 arc_procfd = open("/proc/self/ctl", O_WRONLY); 2101 if (arc_procfd == -1) { 2102 perror("could not enable watchpoints: " 2103 "opening /proc/self/ctl failed: "); 2104 } else { 2105 arc_watch = B_TRUE; 2106 } 2107 } 2108 #endif 2109 2110 zfs_refcount_init(); 2111 unique_init(); 2112 zfs_btree_init(); 2113 metaslab_stat_init(); 2114 zio_init(); 2115 dmu_init(); 2116 zil_init(); 2117 vdev_cache_stat_init(); 2118 vdev_mirror_stat_init(); 2119 zfs_prop_init(); 2120 zpool_prop_init(); 2121 zpool_feature_init(); 2122 spa_config_load(); 2123 l2arc_start(); 2124 scan_init(); 2125 } 2126 2127 void 2128 spa_fini(void) 2129 { 2130 l2arc_stop(); 2131 2132 spa_evict_all(); 2133 2134 vdev_cache_stat_fini(); 2135 vdev_mirror_stat_fini(); 2136 zil_fini(); 2137 dmu_fini(); 2138 zio_fini(); 2139 metaslab_stat_fini(); 2140 zfs_btree_fini(); 2141 unique_fini(); 2142 zfs_refcount_fini(); 2143 scan_fini(); 2144 2145 avl_destroy(&spa_namespace_avl); 2146 avl_destroy(&spa_spare_avl); 2147 avl_destroy(&spa_l2cache_avl); 2148 2149 cv_destroy(&spa_namespace_cv); 2150 mutex_destroy(&spa_namespace_lock); 2151 mutex_destroy(&spa_spare_lock); 2152 mutex_destroy(&spa_l2cache_lock); 2153 } 2154 2155 /* 2156 * Return whether this pool has slogs. No locking needed. 2157 * It's not a problem if the wrong answer is returned as it's only for 2158 * performance and not correctness 2159 */ 2160 boolean_t 2161 spa_has_slogs(spa_t *spa) 2162 { 2163 return (spa->spa_log_class->mc_rotor != NULL); 2164 } 2165 2166 spa_log_state_t 2167 spa_get_log_state(spa_t *spa) 2168 { 2169 return (spa->spa_log_state); 2170 } 2171 2172 void 2173 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2174 { 2175 spa->spa_log_state = state; 2176 } 2177 2178 boolean_t 2179 spa_is_root(spa_t *spa) 2180 { 2181 return (spa->spa_is_root); 2182 } 2183 2184 boolean_t 2185 spa_writeable(spa_t *spa) 2186 { 2187 return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config); 2188 } 2189 2190 /* 2191 * Returns true if there is a pending sync task in any of the current 2192 * syncing txg, the current quiescing txg, or the current open txg. 2193 */ 2194 boolean_t 2195 spa_has_pending_synctask(spa_t *spa) 2196 { 2197 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2198 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2199 } 2200 2201 int 2202 spa_mode(spa_t *spa) 2203 { 2204 return (spa->spa_mode); 2205 } 2206 2207 uint64_t 2208 spa_bootfs(spa_t *spa) 2209 { 2210 return (spa->spa_bootfs); 2211 } 2212 2213 uint64_t 2214 spa_delegation(spa_t *spa) 2215 { 2216 return (spa->spa_delegation); 2217 } 2218 2219 objset_t * 2220 spa_meta_objset(spa_t *spa) 2221 { 2222 return (spa->spa_meta_objset); 2223 } 2224 2225 enum zio_checksum 2226 spa_dedup_checksum(spa_t *spa) 2227 { 2228 return (spa->spa_dedup_checksum); 2229 } 2230 2231 /* 2232 * Reset pool scan stat per scan pass (or reboot). 2233 */ 2234 void 2235 spa_scan_stat_init(spa_t *spa) 2236 { 2237 /* data not stored on disk */ 2238 spa->spa_scan_pass_start = gethrestime_sec(); 2239 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2240 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2241 else 2242 spa->spa_scan_pass_scrub_pause = 0; 2243 spa->spa_scan_pass_scrub_spent_paused = 0; 2244 spa->spa_scan_pass_exam = 0; 2245 spa->spa_scan_pass_issued = 0; 2246 vdev_scan_stat_init(spa->spa_root_vdev); 2247 } 2248 2249 /* 2250 * Get scan stats for zpool status reports 2251 */ 2252 int 2253 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2254 { 2255 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2256 2257 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2258 return (SET_ERROR(ENOENT)); 2259 bzero(ps, sizeof (pool_scan_stat_t)); 2260 2261 /* data stored on disk */ 2262 ps->pss_func = scn->scn_phys.scn_func; 2263 ps->pss_state = scn->scn_phys.scn_state; 2264 ps->pss_start_time = scn->scn_phys.scn_start_time; 2265 ps->pss_end_time = scn->scn_phys.scn_end_time; 2266 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2267 ps->pss_to_process = scn->scn_phys.scn_to_process; 2268 ps->pss_processed = scn->scn_phys.scn_processed; 2269 ps->pss_errors = scn->scn_phys.scn_errors; 2270 ps->pss_examined = scn->scn_phys.scn_examined; 2271 ps->pss_issued = 2272 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2273 ps->pss_state = scn->scn_phys.scn_state; 2274 2275 /* data not stored on disk */ 2276 ps->pss_pass_start = spa->spa_scan_pass_start; 2277 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2278 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2279 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2280 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2281 2282 return (0); 2283 } 2284 2285 int 2286 spa_maxblocksize(spa_t *spa) 2287 { 2288 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2289 return (SPA_MAXBLOCKSIZE); 2290 else 2291 return (SPA_OLD_MAXBLOCKSIZE); 2292 } 2293 2294 int 2295 spa_maxdnodesize(spa_t *spa) 2296 { 2297 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2298 return (DNODE_MAX_SIZE); 2299 else 2300 return (DNODE_MIN_SIZE); 2301 } 2302 2303 boolean_t 2304 spa_multihost(spa_t *spa) 2305 { 2306 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2307 } 2308 2309 unsigned long 2310 spa_get_hostid(void) 2311 { 2312 unsigned long myhostid; 2313 2314 #ifdef _KERNEL 2315 myhostid = zone_get_hostid(NULL); 2316 #else /* _KERNEL */ 2317 /* 2318 * We're emulating the system's hostid in userland, so 2319 * we can't use zone_get_hostid(). 2320 */ 2321 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2322 #endif /* _KERNEL */ 2323 2324 return (myhostid); 2325 } 2326 2327 /* 2328 * Returns the txg that the last device removal completed. No indirect mappings 2329 * have been added since this txg. 2330 */ 2331 uint64_t 2332 spa_get_last_removal_txg(spa_t *spa) 2333 { 2334 uint64_t vdevid; 2335 uint64_t ret = -1ULL; 2336 2337 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2338 /* 2339 * sr_prev_indirect_vdev is only modified while holding all the 2340 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2341 * examining it. 2342 */ 2343 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2344 2345 while (vdevid != -1ULL) { 2346 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2347 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2348 2349 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2350 2351 /* 2352 * If the removal did not remap any data, we don't care. 2353 */ 2354 if (vdev_indirect_births_count(vib) != 0) { 2355 ret = vdev_indirect_births_last_entry_txg(vib); 2356 break; 2357 } 2358 2359 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2360 } 2361 spa_config_exit(spa, SCL_VDEV, FTAG); 2362 2363 IMPLY(ret != -1ULL, 2364 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2365 2366 return (ret); 2367 } 2368 2369 boolean_t 2370 spa_trust_config(spa_t *spa) 2371 { 2372 return (spa->spa_trust_config); 2373 } 2374 2375 uint64_t 2376 spa_missing_tvds_allowed(spa_t *spa) 2377 { 2378 return (spa->spa_missing_tvds_allowed); 2379 } 2380 2381 space_map_t * 2382 spa_syncing_log_sm(spa_t *spa) 2383 { 2384 return (spa->spa_syncing_log_sm); 2385 } 2386 2387 void 2388 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2389 { 2390 spa->spa_missing_tvds = missing; 2391 } 2392 2393 boolean_t 2394 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2395 { 2396 vdev_t *rvd = spa->spa_root_vdev; 2397 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2398 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2399 return (B_FALSE); 2400 } 2401 return (B_TRUE); 2402 } 2403 2404 boolean_t 2405 spa_has_checkpoint(spa_t *spa) 2406 { 2407 return (spa->spa_checkpoint_txg != 0); 2408 } 2409 2410 boolean_t 2411 spa_importing_readonly_checkpoint(spa_t *spa) 2412 { 2413 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2414 spa->spa_mode == FREAD); 2415 } 2416 2417 uint64_t 2418 spa_min_claim_txg(spa_t *spa) 2419 { 2420 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2421 2422 if (checkpoint_txg != 0) 2423 return (checkpoint_txg + 1); 2424 2425 return (spa->spa_first_txg); 2426 } 2427 2428 /* 2429 * If there is a checkpoint, async destroys may consume more space from 2430 * the pool instead of freeing it. In an attempt to save the pool from 2431 * getting suspended when it is about to run out of space, we stop 2432 * processing async destroys. 2433 */ 2434 boolean_t 2435 spa_suspend_async_destroy(spa_t *spa) 2436 { 2437 dsl_pool_t *dp = spa_get_dsl(spa); 2438 2439 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2440 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2441 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2442 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2443 2444 if (spa_has_checkpoint(spa) && avail == 0) 2445 return (B_TRUE); 2446 2447 return (B_FALSE); 2448 } 2449