1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 * Copyright (c) 2017 Datto Inc. 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/spa_impl.h> 33 #include <sys/spa_boot.h> 34 #include <sys/zio.h> 35 #include <sys/zio_checksum.h> 36 #include <sys/zio_compress.h> 37 #include <sys/dmu.h> 38 #include <sys/dmu_tx.h> 39 #include <sys/zap.h> 40 #include <sys/zil.h> 41 #include <sys/vdev_impl.h> 42 #include <sys/metaslab.h> 43 #include <sys/uberblock_impl.h> 44 #include <sys/txg.h> 45 #include <sys/avl.h> 46 #include <sys/unique.h> 47 #include <sys/dsl_pool.h> 48 #include <sys/dsl_dir.h> 49 #include <sys/dsl_prop.h> 50 #include <sys/dsl_scan.h> 51 #include <sys/fs/zfs.h> 52 #include <sys/metaslab_impl.h> 53 #include <sys/arc.h> 54 #include <sys/ddt.h> 55 #include "zfs_prop.h" 56 #include <sys/zfeature.h> 57 58 /* 59 * SPA locking 60 * 61 * There are four basic locks for managing spa_t structures: 62 * 63 * spa_namespace_lock (global mutex) 64 * 65 * This lock must be acquired to do any of the following: 66 * 67 * - Lookup a spa_t by name 68 * - Add or remove a spa_t from the namespace 69 * - Increase spa_refcount from non-zero 70 * - Check if spa_refcount is zero 71 * - Rename a spa_t 72 * - add/remove/attach/detach devices 73 * - Held for the duration of create/destroy/import/export 74 * 75 * It does not need to handle recursion. A create or destroy may 76 * reference objects (files or zvols) in other pools, but by 77 * definition they must have an existing reference, and will never need 78 * to lookup a spa_t by name. 79 * 80 * spa_refcount (per-spa refcount_t protected by mutex) 81 * 82 * This reference count keep track of any active users of the spa_t. The 83 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 84 * the refcount is never really 'zero' - opening a pool implicitly keeps 85 * some references in the DMU. Internally we check against spa_minref, but 86 * present the image of a zero/non-zero value to consumers. 87 * 88 * spa_config_lock[] (per-spa array of rwlocks) 89 * 90 * This protects the spa_t from config changes, and must be held in 91 * the following circumstances: 92 * 93 * - RW_READER to perform I/O to the spa 94 * - RW_WRITER to change the vdev config 95 * 96 * The locking order is fairly straightforward: 97 * 98 * spa_namespace_lock -> spa_refcount 99 * 100 * The namespace lock must be acquired to increase the refcount from 0 101 * or to check if it is zero. 102 * 103 * spa_refcount -> spa_config_lock[] 104 * 105 * There must be at least one valid reference on the spa_t to acquire 106 * the config lock. 107 * 108 * spa_namespace_lock -> spa_config_lock[] 109 * 110 * The namespace lock must always be taken before the config lock. 111 * 112 * 113 * The spa_namespace_lock can be acquired directly and is globally visible. 114 * 115 * The namespace is manipulated using the following functions, all of which 116 * require the spa_namespace_lock to be held. 117 * 118 * spa_lookup() Lookup a spa_t by name. 119 * 120 * spa_add() Create a new spa_t in the namespace. 121 * 122 * spa_remove() Remove a spa_t from the namespace. This also 123 * frees up any memory associated with the spa_t. 124 * 125 * spa_next() Returns the next spa_t in the system, or the 126 * first if NULL is passed. 127 * 128 * spa_evict_all() Shutdown and remove all spa_t structures in 129 * the system. 130 * 131 * spa_guid_exists() Determine whether a pool/device guid exists. 132 * 133 * The spa_refcount is manipulated using the following functions: 134 * 135 * spa_open_ref() Adds a reference to the given spa_t. Must be 136 * called with spa_namespace_lock held if the 137 * refcount is currently zero. 138 * 139 * spa_close() Remove a reference from the spa_t. This will 140 * not free the spa_t or remove it from the 141 * namespace. No locking is required. 142 * 143 * spa_refcount_zero() Returns true if the refcount is currently 144 * zero. Must be called with spa_namespace_lock 145 * held. 146 * 147 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 148 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 149 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 150 * 151 * To read the configuration, it suffices to hold one of these locks as reader. 152 * To modify the configuration, you must hold all locks as writer. To modify 153 * vdev state without altering the vdev tree's topology (e.g. online/offline), 154 * you must hold SCL_STATE and SCL_ZIO as writer. 155 * 156 * We use these distinct config locks to avoid recursive lock entry. 157 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 158 * block allocations (SCL_ALLOC), which may require reading space maps 159 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 160 * 161 * The spa config locks cannot be normal rwlocks because we need the 162 * ability to hand off ownership. For example, SCL_ZIO is acquired 163 * by the issuing thread and later released by an interrupt thread. 164 * They do, however, obey the usual write-wanted semantics to prevent 165 * writer (i.e. system administrator) starvation. 166 * 167 * The lock acquisition rules are as follows: 168 * 169 * SCL_CONFIG 170 * Protects changes to the vdev tree topology, such as vdev 171 * add/remove/attach/detach. Protects the dirty config list 172 * (spa_config_dirty_list) and the set of spares and l2arc devices. 173 * 174 * SCL_STATE 175 * Protects changes to pool state and vdev state, such as vdev 176 * online/offline/fault/degrade/clear. Protects the dirty state list 177 * (spa_state_dirty_list) and global pool state (spa_state). 178 * 179 * SCL_ALLOC 180 * Protects changes to metaslab groups and classes. 181 * Held as reader by metaslab_alloc() and metaslab_claim(). 182 * 183 * SCL_ZIO 184 * Held by bp-level zios (those which have no io_vd upon entry) 185 * to prevent changes to the vdev tree. The bp-level zio implicitly 186 * protects all of its vdev child zios, which do not hold SCL_ZIO. 187 * 188 * SCL_FREE 189 * Protects changes to metaslab groups and classes. 190 * Held as reader by metaslab_free(). SCL_FREE is distinct from 191 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 192 * blocks in zio_done() while another i/o that holds either 193 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 194 * 195 * SCL_VDEV 196 * Held as reader to prevent changes to the vdev tree during trivial 197 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 198 * other locks, and lower than all of them, to ensure that it's safe 199 * to acquire regardless of caller context. 200 * 201 * In addition, the following rules apply: 202 * 203 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 204 * The lock ordering is SCL_CONFIG > spa_props_lock. 205 * 206 * (b) I/O operations on leaf vdevs. For any zio operation that takes 207 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 208 * or zio_write_phys() -- the caller must ensure that the config cannot 209 * cannot change in the interim, and that the vdev cannot be reopened. 210 * SCL_STATE as reader suffices for both. 211 * 212 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 213 * 214 * spa_vdev_enter() Acquire the namespace lock and the config lock 215 * for writing. 216 * 217 * spa_vdev_exit() Release the config lock, wait for all I/O 218 * to complete, sync the updated configs to the 219 * cache, and release the namespace lock. 220 * 221 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 222 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 223 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 224 * 225 * spa_rename() is also implemented within this file since it requires 226 * manipulation of the namespace. 227 */ 228 229 static avl_tree_t spa_namespace_avl; 230 kmutex_t spa_namespace_lock; 231 static kcondvar_t spa_namespace_cv; 232 static int spa_active_count; 233 int spa_max_replication_override = SPA_DVAS_PER_BP; 234 235 static kmutex_t spa_spare_lock; 236 static avl_tree_t spa_spare_avl; 237 static kmutex_t spa_l2cache_lock; 238 static avl_tree_t spa_l2cache_avl; 239 240 kmem_cache_t *spa_buffer_pool; 241 int spa_mode_global; 242 243 #ifdef ZFS_DEBUG 244 /* 245 * Everything except dprintf, spa, and indirect_remap is on by default 246 * in debug builds. 247 */ 248 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA | ZFS_DEBUG_INDIRECT_REMAP); 249 #else 250 int zfs_flags = 0; 251 #endif 252 253 /* 254 * zfs_recover can be set to nonzero to attempt to recover from 255 * otherwise-fatal errors, typically caused by on-disk corruption. When 256 * set, calls to zfs_panic_recover() will turn into warning messages. 257 * This should only be used as a last resort, as it typically results 258 * in leaked space, or worse. 259 */ 260 boolean_t zfs_recover = B_FALSE; 261 262 /* 263 * If destroy encounters an EIO while reading metadata (e.g. indirect 264 * blocks), space referenced by the missing metadata can not be freed. 265 * Normally this causes the background destroy to become "stalled", as 266 * it is unable to make forward progress. While in this stalled state, 267 * all remaining space to free from the error-encountering filesystem is 268 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 269 * permanently leak the space from indirect blocks that can not be read, 270 * and continue to free everything else that it can. 271 * 272 * The default, "stalling" behavior is useful if the storage partially 273 * fails (i.e. some but not all i/os fail), and then later recovers. In 274 * this case, we will be able to continue pool operations while it is 275 * partially failed, and when it recovers, we can continue to free the 276 * space, with no leaks. However, note that this case is actually 277 * fairly rare. 278 * 279 * Typically pools either (a) fail completely (but perhaps temporarily, 280 * e.g. a top-level vdev going offline), or (b) have localized, 281 * permanent errors (e.g. disk returns the wrong data due to bit flip or 282 * firmware bug). In case (a), this setting does not matter because the 283 * pool will be suspended and the sync thread will not be able to make 284 * forward progress regardless. In case (b), because the error is 285 * permanent, the best we can do is leak the minimum amount of space, 286 * which is what setting this flag will do. Therefore, it is reasonable 287 * for this flag to normally be set, but we chose the more conservative 288 * approach of not setting it, so that there is no possibility of 289 * leaking space in the "partial temporary" failure case. 290 */ 291 boolean_t zfs_free_leak_on_eio = B_FALSE; 292 293 /* 294 * Expiration time in milliseconds. This value has two meanings. First it is 295 * used to determine when the spa_deadman() logic should fire. By default the 296 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 297 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 298 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 299 * in a system panic. 300 */ 301 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 302 303 /* 304 * Check time in milliseconds. This defines the frequency at which we check 305 * for hung I/O. 306 */ 307 uint64_t zfs_deadman_checktime_ms = 5000ULL; 308 309 /* 310 * Override the zfs deadman behavior via /etc/system. By default the 311 * deadman is enabled except on VMware and sparc deployments. 312 */ 313 int zfs_deadman_enabled = -1; 314 315 /* 316 * The worst case is single-sector max-parity RAID-Z blocks, in which 317 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 318 * times the size; so just assume that. Add to this the fact that 319 * we can have up to 3 DVAs per bp, and one more factor of 2 because 320 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 321 * the worst case is: 322 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 323 */ 324 int spa_asize_inflation = 24; 325 326 /* 327 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 328 * the pool to be consumed. This ensures that we don't run the pool 329 * completely out of space, due to unaccounted changes (e.g. to the MOS). 330 * It also limits the worst-case time to allocate space. If we have 331 * less than this amount of free space, most ZPL operations (e.g. write, 332 * create) will return ENOSPC. 333 * 334 * Certain operations (e.g. file removal, most administrative actions) can 335 * use half the slop space. They will only return ENOSPC if less than half 336 * the slop space is free. Typically, once the pool has less than the slop 337 * space free, the user will use these operations to free up space in the pool. 338 * These are the operations that call dsl_pool_adjustedsize() with the netfree 339 * argument set to TRUE. 340 * 341 * Operations that are almost guaranteed to free up space in the absence of 342 * a pool checkpoint can use up to three quarters of the slop space 343 * (e.g zfs destroy). 344 * 345 * A very restricted set of operations are always permitted, regardless of 346 * the amount of free space. These are the operations that call 347 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 348 * increase in the amount of space used, it is possible to run the pool 349 * completely out of space, causing it to be permanently read-only. 350 * 351 * Note that on very small pools, the slop space will be larger than 352 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 353 * but we never allow it to be more than half the pool size. 354 * 355 * See also the comments in zfs_space_check_t. 356 */ 357 int spa_slop_shift = 5; 358 uint64_t spa_min_slop = 128 * 1024 * 1024; 359 360 int spa_allocators = 4; 361 362 /*PRINTFLIKE2*/ 363 void 364 spa_load_failed(spa_t *spa, const char *fmt, ...) 365 { 366 va_list adx; 367 char buf[256]; 368 369 va_start(adx, fmt); 370 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 371 va_end(adx); 372 373 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 374 spa->spa_trust_config ? "trusted" : "untrusted", buf); 375 } 376 377 /*PRINTFLIKE2*/ 378 void 379 spa_load_note(spa_t *spa, const char *fmt, ...) 380 { 381 va_list adx; 382 char buf[256]; 383 384 va_start(adx, fmt); 385 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 386 va_end(adx); 387 388 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 389 spa->spa_trust_config ? "trusted" : "untrusted", buf); 390 } 391 392 /* 393 * ========================================================================== 394 * SPA config locking 395 * ========================================================================== 396 */ 397 static void 398 spa_config_lock_init(spa_t *spa) 399 { 400 for (int i = 0; i < SCL_LOCKS; i++) { 401 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 402 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 403 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 404 refcount_create_untracked(&scl->scl_count); 405 scl->scl_writer = NULL; 406 scl->scl_write_wanted = 0; 407 } 408 } 409 410 static void 411 spa_config_lock_destroy(spa_t *spa) 412 { 413 for (int i = 0; i < SCL_LOCKS; i++) { 414 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 415 mutex_destroy(&scl->scl_lock); 416 cv_destroy(&scl->scl_cv); 417 refcount_destroy(&scl->scl_count); 418 ASSERT(scl->scl_writer == NULL); 419 ASSERT(scl->scl_write_wanted == 0); 420 } 421 } 422 423 int 424 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 425 { 426 for (int i = 0; i < SCL_LOCKS; i++) { 427 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 428 if (!(locks & (1 << i))) 429 continue; 430 mutex_enter(&scl->scl_lock); 431 if (rw == RW_READER) { 432 if (scl->scl_writer || scl->scl_write_wanted) { 433 mutex_exit(&scl->scl_lock); 434 spa_config_exit(spa, locks & ((1 << i) - 1), 435 tag); 436 return (0); 437 } 438 } else { 439 ASSERT(scl->scl_writer != curthread); 440 if (!refcount_is_zero(&scl->scl_count)) { 441 mutex_exit(&scl->scl_lock); 442 spa_config_exit(spa, locks & ((1 << i) - 1), 443 tag); 444 return (0); 445 } 446 scl->scl_writer = curthread; 447 } 448 (void) refcount_add(&scl->scl_count, tag); 449 mutex_exit(&scl->scl_lock); 450 } 451 return (1); 452 } 453 454 void 455 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 456 { 457 int wlocks_held = 0; 458 459 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 460 461 for (int i = 0; i < SCL_LOCKS; i++) { 462 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 463 if (scl->scl_writer == curthread) 464 wlocks_held |= (1 << i); 465 if (!(locks & (1 << i))) 466 continue; 467 mutex_enter(&scl->scl_lock); 468 if (rw == RW_READER) { 469 while (scl->scl_writer || scl->scl_write_wanted) { 470 cv_wait(&scl->scl_cv, &scl->scl_lock); 471 } 472 } else { 473 ASSERT(scl->scl_writer != curthread); 474 while (!refcount_is_zero(&scl->scl_count)) { 475 scl->scl_write_wanted++; 476 cv_wait(&scl->scl_cv, &scl->scl_lock); 477 scl->scl_write_wanted--; 478 } 479 scl->scl_writer = curthread; 480 } 481 (void) refcount_add(&scl->scl_count, tag); 482 mutex_exit(&scl->scl_lock); 483 } 484 ASSERT3U(wlocks_held, <=, locks); 485 } 486 487 void 488 spa_config_exit(spa_t *spa, int locks, void *tag) 489 { 490 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 491 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 492 if (!(locks & (1 << i))) 493 continue; 494 mutex_enter(&scl->scl_lock); 495 ASSERT(!refcount_is_zero(&scl->scl_count)); 496 if (refcount_remove(&scl->scl_count, tag) == 0) { 497 ASSERT(scl->scl_writer == NULL || 498 scl->scl_writer == curthread); 499 scl->scl_writer = NULL; /* OK in either case */ 500 cv_broadcast(&scl->scl_cv); 501 } 502 mutex_exit(&scl->scl_lock); 503 } 504 } 505 506 int 507 spa_config_held(spa_t *spa, int locks, krw_t rw) 508 { 509 int locks_held = 0; 510 511 for (int i = 0; i < SCL_LOCKS; i++) { 512 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 513 if (!(locks & (1 << i))) 514 continue; 515 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 516 (rw == RW_WRITER && scl->scl_writer == curthread)) 517 locks_held |= 1 << i; 518 } 519 520 return (locks_held); 521 } 522 523 /* 524 * ========================================================================== 525 * SPA namespace functions 526 * ========================================================================== 527 */ 528 529 /* 530 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 531 * Returns NULL if no matching spa_t is found. 532 */ 533 spa_t * 534 spa_lookup(const char *name) 535 { 536 static spa_t search; /* spa_t is large; don't allocate on stack */ 537 spa_t *spa; 538 avl_index_t where; 539 char *cp; 540 541 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 542 543 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 544 545 /* 546 * If it's a full dataset name, figure out the pool name and 547 * just use that. 548 */ 549 cp = strpbrk(search.spa_name, "/@#"); 550 if (cp != NULL) 551 *cp = '\0'; 552 553 spa = avl_find(&spa_namespace_avl, &search, &where); 554 555 return (spa); 556 } 557 558 /* 559 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 560 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 561 * looking for potentially hung I/Os. 562 */ 563 void 564 spa_deadman(void *arg) 565 { 566 spa_t *spa = arg; 567 568 /* 569 * Disable the deadman timer if the pool is suspended. 570 */ 571 if (spa_suspended(spa)) { 572 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 573 return; 574 } 575 576 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 577 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 578 ++spa->spa_deadman_calls); 579 if (zfs_deadman_enabled) 580 vdev_deadman(spa->spa_root_vdev); 581 } 582 583 /* 584 * Create an uninitialized spa_t with the given name. Requires 585 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 586 * exist by calling spa_lookup() first. 587 */ 588 spa_t * 589 spa_add(const char *name, nvlist_t *config, const char *altroot) 590 { 591 spa_t *spa; 592 spa_config_dirent_t *dp; 593 cyc_handler_t hdlr; 594 cyc_time_t when; 595 596 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 597 598 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 599 600 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 601 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 602 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 603 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 604 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 605 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 606 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 607 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 608 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 609 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 610 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 611 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 612 613 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 614 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 615 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 616 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 617 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 618 619 for (int t = 0; t < TXG_SIZE; t++) 620 bplist_create(&spa->spa_free_bplist[t]); 621 622 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 623 spa->spa_state = POOL_STATE_UNINITIALIZED; 624 spa->spa_freeze_txg = UINT64_MAX; 625 spa->spa_final_txg = UINT64_MAX; 626 spa->spa_load_max_txg = UINT64_MAX; 627 spa->spa_proc = &p0; 628 spa->spa_proc_state = SPA_PROC_NONE; 629 spa->spa_trust_config = B_TRUE; 630 631 hdlr.cyh_func = spa_deadman; 632 hdlr.cyh_arg = spa; 633 hdlr.cyh_level = CY_LOW_LEVEL; 634 635 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 636 637 /* 638 * This determines how often we need to check for hung I/Os after 639 * the cyclic has already fired. Since checking for hung I/Os is 640 * an expensive operation we don't want to check too frequently. 641 * Instead wait for 5 seconds before checking again. 642 */ 643 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 644 when.cyt_when = CY_INFINITY; 645 mutex_enter(&cpu_lock); 646 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 647 mutex_exit(&cpu_lock); 648 649 refcount_create(&spa->spa_refcount); 650 spa_config_lock_init(spa); 651 652 avl_add(&spa_namespace_avl, spa); 653 654 /* 655 * Set the alternate root, if there is one. 656 */ 657 if (altroot) { 658 spa->spa_root = spa_strdup(altroot); 659 spa_active_count++; 660 } 661 662 spa->spa_alloc_count = spa_allocators; 663 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count * 664 sizeof (kmutex_t), KM_SLEEP); 665 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count * 666 sizeof (avl_tree_t), KM_SLEEP); 667 for (int i = 0; i < spa->spa_alloc_count; i++) { 668 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL); 669 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare, 670 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 671 } 672 673 /* 674 * Every pool starts with the default cachefile 675 */ 676 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 677 offsetof(spa_config_dirent_t, scd_link)); 678 679 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 680 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 681 list_insert_head(&spa->spa_config_list, dp); 682 683 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 684 KM_SLEEP) == 0); 685 686 if (config != NULL) { 687 nvlist_t *features; 688 689 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 690 &features) == 0) { 691 VERIFY(nvlist_dup(features, &spa->spa_label_features, 692 0) == 0); 693 } 694 695 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 696 } 697 698 if (spa->spa_label_features == NULL) { 699 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 700 KM_SLEEP) == 0); 701 } 702 703 spa->spa_iokstat = kstat_create("zfs", 0, name, 704 "disk", KSTAT_TYPE_IO, 1, 0); 705 if (spa->spa_iokstat) { 706 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 707 kstat_install(spa->spa_iokstat); 708 } 709 710 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 711 712 spa->spa_min_ashift = INT_MAX; 713 spa->spa_max_ashift = 0; 714 715 /* 716 * As a pool is being created, treat all features as disabled by 717 * setting SPA_FEATURE_DISABLED for all entries in the feature 718 * refcount cache. 719 */ 720 for (int i = 0; i < SPA_FEATURES; i++) { 721 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 722 } 723 724 return (spa); 725 } 726 727 /* 728 * Removes a spa_t from the namespace, freeing up any memory used. Requires 729 * spa_namespace_lock. This is called only after the spa_t has been closed and 730 * deactivated. 731 */ 732 void 733 spa_remove(spa_t *spa) 734 { 735 spa_config_dirent_t *dp; 736 737 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 738 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 739 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 740 741 nvlist_free(spa->spa_config_splitting); 742 743 avl_remove(&spa_namespace_avl, spa); 744 cv_broadcast(&spa_namespace_cv); 745 746 if (spa->spa_root) { 747 spa_strfree(spa->spa_root); 748 spa_active_count--; 749 } 750 751 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 752 list_remove(&spa->spa_config_list, dp); 753 if (dp->scd_path != NULL) 754 spa_strfree(dp->scd_path); 755 kmem_free(dp, sizeof (spa_config_dirent_t)); 756 } 757 758 for (int i = 0; i < spa->spa_alloc_count; i++) { 759 avl_destroy(&spa->spa_alloc_trees[i]); 760 mutex_destroy(&spa->spa_alloc_locks[i]); 761 } 762 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count * 763 sizeof (kmutex_t)); 764 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count * 765 sizeof (avl_tree_t)); 766 767 list_destroy(&spa->spa_config_list); 768 769 nvlist_free(spa->spa_label_features); 770 nvlist_free(spa->spa_load_info); 771 spa_config_set(spa, NULL); 772 773 mutex_enter(&cpu_lock); 774 if (spa->spa_deadman_cycid != CYCLIC_NONE) 775 cyclic_remove(spa->spa_deadman_cycid); 776 mutex_exit(&cpu_lock); 777 spa->spa_deadman_cycid = CYCLIC_NONE; 778 779 refcount_destroy(&spa->spa_refcount); 780 781 spa_config_lock_destroy(spa); 782 783 kstat_delete(spa->spa_iokstat); 784 spa->spa_iokstat = NULL; 785 786 for (int t = 0; t < TXG_SIZE; t++) 787 bplist_destroy(&spa->spa_free_bplist[t]); 788 789 zio_checksum_templates_free(spa); 790 791 cv_destroy(&spa->spa_async_cv); 792 cv_destroy(&spa->spa_evicting_os_cv); 793 cv_destroy(&spa->spa_proc_cv); 794 cv_destroy(&spa->spa_scrub_io_cv); 795 cv_destroy(&spa->spa_suspend_cv); 796 797 mutex_destroy(&spa->spa_async_lock); 798 mutex_destroy(&spa->spa_errlist_lock); 799 mutex_destroy(&spa->spa_errlog_lock); 800 mutex_destroy(&spa->spa_evicting_os_lock); 801 mutex_destroy(&spa->spa_history_lock); 802 mutex_destroy(&spa->spa_proc_lock); 803 mutex_destroy(&spa->spa_props_lock); 804 mutex_destroy(&spa->spa_cksum_tmpls_lock); 805 mutex_destroy(&spa->spa_scrub_lock); 806 mutex_destroy(&spa->spa_suspend_lock); 807 mutex_destroy(&spa->spa_vdev_top_lock); 808 mutex_destroy(&spa->spa_iokstat_lock); 809 810 kmem_free(spa, sizeof (spa_t)); 811 } 812 813 /* 814 * Given a pool, return the next pool in the namespace, or NULL if there is 815 * none. If 'prev' is NULL, return the first pool. 816 */ 817 spa_t * 818 spa_next(spa_t *prev) 819 { 820 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 821 822 if (prev) 823 return (AVL_NEXT(&spa_namespace_avl, prev)); 824 else 825 return (avl_first(&spa_namespace_avl)); 826 } 827 828 /* 829 * ========================================================================== 830 * SPA refcount functions 831 * ========================================================================== 832 */ 833 834 /* 835 * Add a reference to the given spa_t. Must have at least one reference, or 836 * have the namespace lock held. 837 */ 838 void 839 spa_open_ref(spa_t *spa, void *tag) 840 { 841 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 842 MUTEX_HELD(&spa_namespace_lock)); 843 (void) refcount_add(&spa->spa_refcount, tag); 844 } 845 846 /* 847 * Remove a reference to the given spa_t. Must have at least one reference, or 848 * have the namespace lock held. 849 */ 850 void 851 spa_close(spa_t *spa, void *tag) 852 { 853 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 854 MUTEX_HELD(&spa_namespace_lock)); 855 (void) refcount_remove(&spa->spa_refcount, tag); 856 } 857 858 /* 859 * Remove a reference to the given spa_t held by a dsl dir that is 860 * being asynchronously released. Async releases occur from a taskq 861 * performing eviction of dsl datasets and dirs. The namespace lock 862 * isn't held and the hold by the object being evicted may contribute to 863 * spa_minref (e.g. dataset or directory released during pool export), 864 * so the asserts in spa_close() do not apply. 865 */ 866 void 867 spa_async_close(spa_t *spa, void *tag) 868 { 869 (void) refcount_remove(&spa->spa_refcount, tag); 870 } 871 872 /* 873 * Check to see if the spa refcount is zero. Must be called with 874 * spa_namespace_lock held. We really compare against spa_minref, which is the 875 * number of references acquired when opening a pool 876 */ 877 boolean_t 878 spa_refcount_zero(spa_t *spa) 879 { 880 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 881 882 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 883 } 884 885 /* 886 * ========================================================================== 887 * SPA spare and l2cache tracking 888 * ========================================================================== 889 */ 890 891 /* 892 * Hot spares and cache devices are tracked using the same code below, 893 * for 'auxiliary' devices. 894 */ 895 896 typedef struct spa_aux { 897 uint64_t aux_guid; 898 uint64_t aux_pool; 899 avl_node_t aux_avl; 900 int aux_count; 901 } spa_aux_t; 902 903 static int 904 spa_aux_compare(const void *a, const void *b) 905 { 906 const spa_aux_t *sa = a; 907 const spa_aux_t *sb = b; 908 909 if (sa->aux_guid < sb->aux_guid) 910 return (-1); 911 else if (sa->aux_guid > sb->aux_guid) 912 return (1); 913 else 914 return (0); 915 } 916 917 void 918 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 919 { 920 avl_index_t where; 921 spa_aux_t search; 922 spa_aux_t *aux; 923 924 search.aux_guid = vd->vdev_guid; 925 if ((aux = avl_find(avl, &search, &where)) != NULL) { 926 aux->aux_count++; 927 } else { 928 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 929 aux->aux_guid = vd->vdev_guid; 930 aux->aux_count = 1; 931 avl_insert(avl, aux, where); 932 } 933 } 934 935 void 936 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 937 { 938 spa_aux_t search; 939 spa_aux_t *aux; 940 avl_index_t where; 941 942 search.aux_guid = vd->vdev_guid; 943 aux = avl_find(avl, &search, &where); 944 945 ASSERT(aux != NULL); 946 947 if (--aux->aux_count == 0) { 948 avl_remove(avl, aux); 949 kmem_free(aux, sizeof (spa_aux_t)); 950 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 951 aux->aux_pool = 0ULL; 952 } 953 } 954 955 boolean_t 956 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 957 { 958 spa_aux_t search, *found; 959 960 search.aux_guid = guid; 961 found = avl_find(avl, &search, NULL); 962 963 if (pool) { 964 if (found) 965 *pool = found->aux_pool; 966 else 967 *pool = 0ULL; 968 } 969 970 if (refcnt) { 971 if (found) 972 *refcnt = found->aux_count; 973 else 974 *refcnt = 0; 975 } 976 977 return (found != NULL); 978 } 979 980 void 981 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 982 { 983 spa_aux_t search, *found; 984 avl_index_t where; 985 986 search.aux_guid = vd->vdev_guid; 987 found = avl_find(avl, &search, &where); 988 ASSERT(found != NULL); 989 ASSERT(found->aux_pool == 0ULL); 990 991 found->aux_pool = spa_guid(vd->vdev_spa); 992 } 993 994 /* 995 * Spares are tracked globally due to the following constraints: 996 * 997 * - A spare may be part of multiple pools. 998 * - A spare may be added to a pool even if it's actively in use within 999 * another pool. 1000 * - A spare in use in any pool can only be the source of a replacement if 1001 * the target is a spare in the same pool. 1002 * 1003 * We keep track of all spares on the system through the use of a reference 1004 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1005 * spare, then we bump the reference count in the AVL tree. In addition, we set 1006 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1007 * inactive). When a spare is made active (used to replace a device in the 1008 * pool), we also keep track of which pool its been made a part of. 1009 * 1010 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1011 * called under the spa_namespace lock as part of vdev reconfiguration. The 1012 * separate spare lock exists for the status query path, which does not need to 1013 * be completely consistent with respect to other vdev configuration changes. 1014 */ 1015 1016 static int 1017 spa_spare_compare(const void *a, const void *b) 1018 { 1019 return (spa_aux_compare(a, b)); 1020 } 1021 1022 void 1023 spa_spare_add(vdev_t *vd) 1024 { 1025 mutex_enter(&spa_spare_lock); 1026 ASSERT(!vd->vdev_isspare); 1027 spa_aux_add(vd, &spa_spare_avl); 1028 vd->vdev_isspare = B_TRUE; 1029 mutex_exit(&spa_spare_lock); 1030 } 1031 1032 void 1033 spa_spare_remove(vdev_t *vd) 1034 { 1035 mutex_enter(&spa_spare_lock); 1036 ASSERT(vd->vdev_isspare); 1037 spa_aux_remove(vd, &spa_spare_avl); 1038 vd->vdev_isspare = B_FALSE; 1039 mutex_exit(&spa_spare_lock); 1040 } 1041 1042 boolean_t 1043 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1044 { 1045 boolean_t found; 1046 1047 mutex_enter(&spa_spare_lock); 1048 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1049 mutex_exit(&spa_spare_lock); 1050 1051 return (found); 1052 } 1053 1054 void 1055 spa_spare_activate(vdev_t *vd) 1056 { 1057 mutex_enter(&spa_spare_lock); 1058 ASSERT(vd->vdev_isspare); 1059 spa_aux_activate(vd, &spa_spare_avl); 1060 mutex_exit(&spa_spare_lock); 1061 } 1062 1063 /* 1064 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1065 * Cache devices currently only support one pool per cache device, and so 1066 * for these devices the aux reference count is currently unused beyond 1. 1067 */ 1068 1069 static int 1070 spa_l2cache_compare(const void *a, const void *b) 1071 { 1072 return (spa_aux_compare(a, b)); 1073 } 1074 1075 void 1076 spa_l2cache_add(vdev_t *vd) 1077 { 1078 mutex_enter(&spa_l2cache_lock); 1079 ASSERT(!vd->vdev_isl2cache); 1080 spa_aux_add(vd, &spa_l2cache_avl); 1081 vd->vdev_isl2cache = B_TRUE; 1082 mutex_exit(&spa_l2cache_lock); 1083 } 1084 1085 void 1086 spa_l2cache_remove(vdev_t *vd) 1087 { 1088 mutex_enter(&spa_l2cache_lock); 1089 ASSERT(vd->vdev_isl2cache); 1090 spa_aux_remove(vd, &spa_l2cache_avl); 1091 vd->vdev_isl2cache = B_FALSE; 1092 mutex_exit(&spa_l2cache_lock); 1093 } 1094 1095 boolean_t 1096 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1097 { 1098 boolean_t found; 1099 1100 mutex_enter(&spa_l2cache_lock); 1101 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1102 mutex_exit(&spa_l2cache_lock); 1103 1104 return (found); 1105 } 1106 1107 void 1108 spa_l2cache_activate(vdev_t *vd) 1109 { 1110 mutex_enter(&spa_l2cache_lock); 1111 ASSERT(vd->vdev_isl2cache); 1112 spa_aux_activate(vd, &spa_l2cache_avl); 1113 mutex_exit(&spa_l2cache_lock); 1114 } 1115 1116 /* 1117 * ========================================================================== 1118 * SPA vdev locking 1119 * ========================================================================== 1120 */ 1121 1122 /* 1123 * Lock the given spa_t for the purpose of adding or removing a vdev. 1124 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1125 * It returns the next transaction group for the spa_t. 1126 */ 1127 uint64_t 1128 spa_vdev_enter(spa_t *spa) 1129 { 1130 mutex_enter(&spa->spa_vdev_top_lock); 1131 mutex_enter(&spa_namespace_lock); 1132 return (spa_vdev_config_enter(spa)); 1133 } 1134 1135 /* 1136 * Internal implementation for spa_vdev_enter(). Used when a vdev 1137 * operation requires multiple syncs (i.e. removing a device) while 1138 * keeping the spa_namespace_lock held. 1139 */ 1140 uint64_t 1141 spa_vdev_config_enter(spa_t *spa) 1142 { 1143 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1144 1145 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1146 1147 return (spa_last_synced_txg(spa) + 1); 1148 } 1149 1150 /* 1151 * Used in combination with spa_vdev_config_enter() to allow the syncing 1152 * of multiple transactions without releasing the spa_namespace_lock. 1153 */ 1154 void 1155 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1156 { 1157 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1158 1159 int config_changed = B_FALSE; 1160 1161 ASSERT(txg > spa_last_synced_txg(spa)); 1162 1163 spa->spa_pending_vdev = NULL; 1164 1165 /* 1166 * Reassess the DTLs. 1167 */ 1168 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1169 1170 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1171 config_changed = B_TRUE; 1172 spa->spa_config_generation++; 1173 } 1174 1175 /* 1176 * Verify the metaslab classes. 1177 */ 1178 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1179 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1180 1181 spa_config_exit(spa, SCL_ALL, spa); 1182 1183 /* 1184 * Panic the system if the specified tag requires it. This 1185 * is useful for ensuring that configurations are updated 1186 * transactionally. 1187 */ 1188 if (zio_injection_enabled) 1189 zio_handle_panic_injection(spa, tag, 0); 1190 1191 /* 1192 * Note: this txg_wait_synced() is important because it ensures 1193 * that there won't be more than one config change per txg. 1194 * This allows us to use the txg as the generation number. 1195 */ 1196 if (error == 0) 1197 txg_wait_synced(spa->spa_dsl_pool, txg); 1198 1199 if (vd != NULL) { 1200 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1201 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1202 vdev_free(vd); 1203 spa_config_exit(spa, SCL_ALL, spa); 1204 } 1205 1206 /* 1207 * If the config changed, update the config cache. 1208 */ 1209 if (config_changed) 1210 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1211 } 1212 1213 /* 1214 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1215 * locking of spa_vdev_enter(), we also want make sure the transactions have 1216 * synced to disk, and then update the global configuration cache with the new 1217 * information. 1218 */ 1219 int 1220 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1221 { 1222 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1223 mutex_exit(&spa_namespace_lock); 1224 mutex_exit(&spa->spa_vdev_top_lock); 1225 1226 return (error); 1227 } 1228 1229 /* 1230 * Lock the given spa_t for the purpose of changing vdev state. 1231 */ 1232 void 1233 spa_vdev_state_enter(spa_t *spa, int oplocks) 1234 { 1235 int locks = SCL_STATE_ALL | oplocks; 1236 1237 /* 1238 * Root pools may need to read of the underlying devfs filesystem 1239 * when opening up a vdev. Unfortunately if we're holding the 1240 * SCL_ZIO lock it will result in a deadlock when we try to issue 1241 * the read from the root filesystem. Instead we "prefetch" 1242 * the associated vnodes that we need prior to opening the 1243 * underlying devices and cache them so that we can prevent 1244 * any I/O when we are doing the actual open. 1245 */ 1246 if (spa_is_root(spa)) { 1247 int low = locks & ~(SCL_ZIO - 1); 1248 int high = locks & ~low; 1249 1250 spa_config_enter(spa, high, spa, RW_WRITER); 1251 vdev_hold(spa->spa_root_vdev); 1252 spa_config_enter(spa, low, spa, RW_WRITER); 1253 } else { 1254 spa_config_enter(spa, locks, spa, RW_WRITER); 1255 } 1256 spa->spa_vdev_locks = locks; 1257 } 1258 1259 int 1260 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1261 { 1262 boolean_t config_changed = B_FALSE; 1263 1264 if (vd != NULL || error == 0) 1265 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1266 0, 0, B_FALSE); 1267 1268 if (vd != NULL) { 1269 vdev_state_dirty(vd->vdev_top); 1270 config_changed = B_TRUE; 1271 spa->spa_config_generation++; 1272 } 1273 1274 if (spa_is_root(spa)) 1275 vdev_rele(spa->spa_root_vdev); 1276 1277 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1278 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1279 1280 /* 1281 * If anything changed, wait for it to sync. This ensures that, 1282 * from the system administrator's perspective, zpool(1M) commands 1283 * are synchronous. This is important for things like zpool offline: 1284 * when the command completes, you expect no further I/O from ZFS. 1285 */ 1286 if (vd != NULL) 1287 txg_wait_synced(spa->spa_dsl_pool, 0); 1288 1289 /* 1290 * If the config changed, update the config cache. 1291 */ 1292 if (config_changed) { 1293 mutex_enter(&spa_namespace_lock); 1294 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1295 mutex_exit(&spa_namespace_lock); 1296 } 1297 1298 return (error); 1299 } 1300 1301 /* 1302 * ========================================================================== 1303 * Miscellaneous functions 1304 * ========================================================================== 1305 */ 1306 1307 void 1308 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1309 { 1310 if (!nvlist_exists(spa->spa_label_features, feature)) { 1311 fnvlist_add_boolean(spa->spa_label_features, feature); 1312 /* 1313 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1314 * dirty the vdev config because lock SCL_CONFIG is not held. 1315 * Thankfully, in this case we don't need to dirty the config 1316 * because it will be written out anyway when we finish 1317 * creating the pool. 1318 */ 1319 if (tx->tx_txg != TXG_INITIAL) 1320 vdev_config_dirty(spa->spa_root_vdev); 1321 } 1322 } 1323 1324 void 1325 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1326 { 1327 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1328 vdev_config_dirty(spa->spa_root_vdev); 1329 } 1330 1331 /* 1332 * Rename a spa_t. 1333 */ 1334 int 1335 spa_rename(const char *name, const char *newname) 1336 { 1337 spa_t *spa; 1338 int err; 1339 1340 /* 1341 * Lookup the spa_t and grab the config lock for writing. We need to 1342 * actually open the pool so that we can sync out the necessary labels. 1343 * It's OK to call spa_open() with the namespace lock held because we 1344 * allow recursive calls for other reasons. 1345 */ 1346 mutex_enter(&spa_namespace_lock); 1347 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1348 mutex_exit(&spa_namespace_lock); 1349 return (err); 1350 } 1351 1352 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1353 1354 avl_remove(&spa_namespace_avl, spa); 1355 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1356 avl_add(&spa_namespace_avl, spa); 1357 1358 /* 1359 * Sync all labels to disk with the new names by marking the root vdev 1360 * dirty and waiting for it to sync. It will pick up the new pool name 1361 * during the sync. 1362 */ 1363 vdev_config_dirty(spa->spa_root_vdev); 1364 1365 spa_config_exit(spa, SCL_ALL, FTAG); 1366 1367 txg_wait_synced(spa->spa_dsl_pool, 0); 1368 1369 /* 1370 * Sync the updated config cache. 1371 */ 1372 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1373 1374 spa_close(spa, FTAG); 1375 1376 mutex_exit(&spa_namespace_lock); 1377 1378 return (0); 1379 } 1380 1381 /* 1382 * Return the spa_t associated with given pool_guid, if it exists. If 1383 * device_guid is non-zero, determine whether the pool exists *and* contains 1384 * a device with the specified device_guid. 1385 */ 1386 spa_t * 1387 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1388 { 1389 spa_t *spa; 1390 avl_tree_t *t = &spa_namespace_avl; 1391 1392 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1393 1394 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1395 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1396 continue; 1397 if (spa->spa_root_vdev == NULL) 1398 continue; 1399 if (spa_guid(spa) == pool_guid) { 1400 if (device_guid == 0) 1401 break; 1402 1403 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1404 device_guid) != NULL) 1405 break; 1406 1407 /* 1408 * Check any devices we may be in the process of adding. 1409 */ 1410 if (spa->spa_pending_vdev) { 1411 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1412 device_guid) != NULL) 1413 break; 1414 } 1415 } 1416 } 1417 1418 return (spa); 1419 } 1420 1421 /* 1422 * Determine whether a pool with the given pool_guid exists. 1423 */ 1424 boolean_t 1425 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1426 { 1427 return (spa_by_guid(pool_guid, device_guid) != NULL); 1428 } 1429 1430 char * 1431 spa_strdup(const char *s) 1432 { 1433 size_t len; 1434 char *new; 1435 1436 len = strlen(s); 1437 new = kmem_alloc(len + 1, KM_SLEEP); 1438 bcopy(s, new, len); 1439 new[len] = '\0'; 1440 1441 return (new); 1442 } 1443 1444 void 1445 spa_strfree(char *s) 1446 { 1447 kmem_free(s, strlen(s) + 1); 1448 } 1449 1450 uint64_t 1451 spa_get_random(uint64_t range) 1452 { 1453 uint64_t r; 1454 1455 ASSERT(range != 0); 1456 1457 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1458 1459 return (r % range); 1460 } 1461 1462 uint64_t 1463 spa_generate_guid(spa_t *spa) 1464 { 1465 uint64_t guid = spa_get_random(-1ULL); 1466 1467 if (spa != NULL) { 1468 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1469 guid = spa_get_random(-1ULL); 1470 } else { 1471 while (guid == 0 || spa_guid_exists(guid, 0)) 1472 guid = spa_get_random(-1ULL); 1473 } 1474 1475 return (guid); 1476 } 1477 1478 void 1479 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1480 { 1481 char type[256]; 1482 char *checksum = NULL; 1483 char *compress = NULL; 1484 1485 if (bp != NULL) { 1486 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1487 dmu_object_byteswap_t bswap = 1488 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1489 (void) snprintf(type, sizeof (type), "bswap %s %s", 1490 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1491 "metadata" : "data", 1492 dmu_ot_byteswap[bswap].ob_name); 1493 } else { 1494 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1495 sizeof (type)); 1496 } 1497 if (!BP_IS_EMBEDDED(bp)) { 1498 checksum = 1499 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1500 } 1501 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1502 } 1503 1504 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1505 compress); 1506 } 1507 1508 void 1509 spa_freeze(spa_t *spa) 1510 { 1511 uint64_t freeze_txg = 0; 1512 1513 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1514 if (spa->spa_freeze_txg == UINT64_MAX) { 1515 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1516 spa->spa_freeze_txg = freeze_txg; 1517 } 1518 spa_config_exit(spa, SCL_ALL, FTAG); 1519 if (freeze_txg != 0) 1520 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1521 } 1522 1523 void 1524 zfs_panic_recover(const char *fmt, ...) 1525 { 1526 va_list adx; 1527 1528 va_start(adx, fmt); 1529 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1530 va_end(adx); 1531 } 1532 1533 /* 1534 * This is a stripped-down version of strtoull, suitable only for converting 1535 * lowercase hexadecimal numbers that don't overflow. 1536 */ 1537 uint64_t 1538 zfs_strtonum(const char *str, char **nptr) 1539 { 1540 uint64_t val = 0; 1541 char c; 1542 int digit; 1543 1544 while ((c = *str) != '\0') { 1545 if (c >= '0' && c <= '9') 1546 digit = c - '0'; 1547 else if (c >= 'a' && c <= 'f') 1548 digit = 10 + c - 'a'; 1549 else 1550 break; 1551 1552 val *= 16; 1553 val += digit; 1554 1555 str++; 1556 } 1557 1558 if (nptr) 1559 *nptr = (char *)str; 1560 1561 return (val); 1562 } 1563 1564 /* 1565 * ========================================================================== 1566 * Accessor functions 1567 * ========================================================================== 1568 */ 1569 1570 boolean_t 1571 spa_shutting_down(spa_t *spa) 1572 { 1573 return (spa->spa_async_suspended); 1574 } 1575 1576 dsl_pool_t * 1577 spa_get_dsl(spa_t *spa) 1578 { 1579 return (spa->spa_dsl_pool); 1580 } 1581 1582 boolean_t 1583 spa_is_initializing(spa_t *spa) 1584 { 1585 return (spa->spa_is_initializing); 1586 } 1587 1588 boolean_t 1589 spa_indirect_vdevs_loaded(spa_t *spa) 1590 { 1591 return (spa->spa_indirect_vdevs_loaded); 1592 } 1593 1594 blkptr_t * 1595 spa_get_rootblkptr(spa_t *spa) 1596 { 1597 return (&spa->spa_ubsync.ub_rootbp); 1598 } 1599 1600 void 1601 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1602 { 1603 spa->spa_uberblock.ub_rootbp = *bp; 1604 } 1605 1606 void 1607 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1608 { 1609 if (spa->spa_root == NULL) 1610 buf[0] = '\0'; 1611 else 1612 (void) strncpy(buf, spa->spa_root, buflen); 1613 } 1614 1615 int 1616 spa_sync_pass(spa_t *spa) 1617 { 1618 return (spa->spa_sync_pass); 1619 } 1620 1621 char * 1622 spa_name(spa_t *spa) 1623 { 1624 return (spa->spa_name); 1625 } 1626 1627 uint64_t 1628 spa_guid(spa_t *spa) 1629 { 1630 dsl_pool_t *dp = spa_get_dsl(spa); 1631 uint64_t guid; 1632 1633 /* 1634 * If we fail to parse the config during spa_load(), we can go through 1635 * the error path (which posts an ereport) and end up here with no root 1636 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1637 * this case. 1638 */ 1639 if (spa->spa_root_vdev == NULL) 1640 return (spa->spa_config_guid); 1641 1642 guid = spa->spa_last_synced_guid != 0 ? 1643 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1644 1645 /* 1646 * Return the most recently synced out guid unless we're 1647 * in syncing context. 1648 */ 1649 if (dp && dsl_pool_sync_context(dp)) 1650 return (spa->spa_root_vdev->vdev_guid); 1651 else 1652 return (guid); 1653 } 1654 1655 uint64_t 1656 spa_load_guid(spa_t *spa) 1657 { 1658 /* 1659 * This is a GUID that exists solely as a reference for the 1660 * purposes of the arc. It is generated at load time, and 1661 * is never written to persistent storage. 1662 */ 1663 return (spa->spa_load_guid); 1664 } 1665 1666 uint64_t 1667 spa_last_synced_txg(spa_t *spa) 1668 { 1669 return (spa->spa_ubsync.ub_txg); 1670 } 1671 1672 uint64_t 1673 spa_first_txg(spa_t *spa) 1674 { 1675 return (spa->spa_first_txg); 1676 } 1677 1678 uint64_t 1679 spa_syncing_txg(spa_t *spa) 1680 { 1681 return (spa->spa_syncing_txg); 1682 } 1683 1684 /* 1685 * Return the last txg where data can be dirtied. The final txgs 1686 * will be used to just clear out any deferred frees that remain. 1687 */ 1688 uint64_t 1689 spa_final_dirty_txg(spa_t *spa) 1690 { 1691 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1692 } 1693 1694 pool_state_t 1695 spa_state(spa_t *spa) 1696 { 1697 return (spa->spa_state); 1698 } 1699 1700 spa_load_state_t 1701 spa_load_state(spa_t *spa) 1702 { 1703 return (spa->spa_load_state); 1704 } 1705 1706 uint64_t 1707 spa_freeze_txg(spa_t *spa) 1708 { 1709 return (spa->spa_freeze_txg); 1710 } 1711 1712 /* ARGSUSED */ 1713 uint64_t 1714 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1715 { 1716 return (lsize * spa_asize_inflation); 1717 } 1718 1719 /* 1720 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1721 * or at least 128MB, unless that would cause it to be more than half the 1722 * pool size. 1723 * 1724 * See the comment above spa_slop_shift for details. 1725 */ 1726 uint64_t 1727 spa_get_slop_space(spa_t *spa) 1728 { 1729 uint64_t space = spa_get_dspace(spa); 1730 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1731 } 1732 1733 uint64_t 1734 spa_get_dspace(spa_t *spa) 1735 { 1736 return (spa->spa_dspace); 1737 } 1738 1739 uint64_t 1740 spa_get_checkpoint_space(spa_t *spa) 1741 { 1742 return (spa->spa_checkpoint_info.sci_dspace); 1743 } 1744 1745 void 1746 spa_update_dspace(spa_t *spa) 1747 { 1748 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1749 ddt_get_dedup_dspace(spa); 1750 if (spa->spa_vdev_removal != NULL) { 1751 /* 1752 * We can't allocate from the removing device, so 1753 * subtract its size. This prevents the DMU/DSL from 1754 * filling up the (now smaller) pool while we are in the 1755 * middle of removing the device. 1756 * 1757 * Note that the DMU/DSL doesn't actually know or care 1758 * how much space is allocated (it does its own tracking 1759 * of how much space has been logically used). So it 1760 * doesn't matter that the data we are moving may be 1761 * allocated twice (on the old device and the new 1762 * device). 1763 */ 1764 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1765 vdev_t *vd = 1766 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1767 spa->spa_dspace -= spa_deflate(spa) ? 1768 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1769 spa_config_exit(spa, SCL_VDEV, FTAG); 1770 } 1771 } 1772 1773 /* 1774 * Return the failure mode that has been set to this pool. The default 1775 * behavior will be to block all I/Os when a complete failure occurs. 1776 */ 1777 uint8_t 1778 spa_get_failmode(spa_t *spa) 1779 { 1780 return (spa->spa_failmode); 1781 } 1782 1783 boolean_t 1784 spa_suspended(spa_t *spa) 1785 { 1786 return (spa->spa_suspended); 1787 } 1788 1789 uint64_t 1790 spa_version(spa_t *spa) 1791 { 1792 return (spa->spa_ubsync.ub_version); 1793 } 1794 1795 boolean_t 1796 spa_deflate(spa_t *spa) 1797 { 1798 return (spa->spa_deflate); 1799 } 1800 1801 metaslab_class_t * 1802 spa_normal_class(spa_t *spa) 1803 { 1804 return (spa->spa_normal_class); 1805 } 1806 1807 metaslab_class_t * 1808 spa_log_class(spa_t *spa) 1809 { 1810 return (spa->spa_log_class); 1811 } 1812 1813 void 1814 spa_evicting_os_register(spa_t *spa, objset_t *os) 1815 { 1816 mutex_enter(&spa->spa_evicting_os_lock); 1817 list_insert_head(&spa->spa_evicting_os_list, os); 1818 mutex_exit(&spa->spa_evicting_os_lock); 1819 } 1820 1821 void 1822 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1823 { 1824 mutex_enter(&spa->spa_evicting_os_lock); 1825 list_remove(&spa->spa_evicting_os_list, os); 1826 cv_broadcast(&spa->spa_evicting_os_cv); 1827 mutex_exit(&spa->spa_evicting_os_lock); 1828 } 1829 1830 void 1831 spa_evicting_os_wait(spa_t *spa) 1832 { 1833 mutex_enter(&spa->spa_evicting_os_lock); 1834 while (!list_is_empty(&spa->spa_evicting_os_list)) 1835 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1836 mutex_exit(&spa->spa_evicting_os_lock); 1837 1838 dmu_buf_user_evict_wait(); 1839 } 1840 1841 int 1842 spa_max_replication(spa_t *spa) 1843 { 1844 /* 1845 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1846 * handle BPs with more than one DVA allocated. Set our max 1847 * replication level accordingly. 1848 */ 1849 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1850 return (1); 1851 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1852 } 1853 1854 int 1855 spa_prev_software_version(spa_t *spa) 1856 { 1857 return (spa->spa_prev_software_version); 1858 } 1859 1860 uint64_t 1861 spa_deadman_synctime(spa_t *spa) 1862 { 1863 return (spa->spa_deadman_synctime); 1864 } 1865 1866 uint64_t 1867 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1868 { 1869 uint64_t asize = DVA_GET_ASIZE(dva); 1870 uint64_t dsize = asize; 1871 1872 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1873 1874 if (asize != 0 && spa->spa_deflate) { 1875 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1876 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1877 } 1878 1879 return (dsize); 1880 } 1881 1882 uint64_t 1883 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1884 { 1885 uint64_t dsize = 0; 1886 1887 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1888 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1889 1890 return (dsize); 1891 } 1892 1893 uint64_t 1894 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1895 { 1896 uint64_t dsize = 0; 1897 1898 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1899 1900 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1901 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1902 1903 spa_config_exit(spa, SCL_VDEV, FTAG); 1904 1905 return (dsize); 1906 } 1907 1908 /* 1909 * ========================================================================== 1910 * Initialization and Termination 1911 * ========================================================================== 1912 */ 1913 1914 static int 1915 spa_name_compare(const void *a1, const void *a2) 1916 { 1917 const spa_t *s1 = a1; 1918 const spa_t *s2 = a2; 1919 int s; 1920 1921 s = strcmp(s1->spa_name, s2->spa_name); 1922 if (s > 0) 1923 return (1); 1924 if (s < 0) 1925 return (-1); 1926 return (0); 1927 } 1928 1929 int 1930 spa_busy(void) 1931 { 1932 return (spa_active_count); 1933 } 1934 1935 void 1936 spa_boot_init() 1937 { 1938 spa_config_load(); 1939 } 1940 1941 void 1942 spa_init(int mode) 1943 { 1944 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1945 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1946 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1947 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1948 1949 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1950 offsetof(spa_t, spa_avl)); 1951 1952 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1953 offsetof(spa_aux_t, aux_avl)); 1954 1955 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1956 offsetof(spa_aux_t, aux_avl)); 1957 1958 spa_mode_global = mode; 1959 1960 #ifdef _KERNEL 1961 spa_arch_init(); 1962 #else 1963 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1964 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1965 if (arc_procfd == -1) { 1966 perror("could not enable watchpoints: " 1967 "opening /proc/self/ctl failed: "); 1968 } else { 1969 arc_watch = B_TRUE; 1970 } 1971 } 1972 #endif 1973 1974 refcount_init(); 1975 unique_init(); 1976 range_tree_init(); 1977 metaslab_alloc_trace_init(); 1978 zio_init(); 1979 dmu_init(); 1980 zil_init(); 1981 vdev_cache_stat_init(); 1982 zfs_prop_init(); 1983 zpool_prop_init(); 1984 zpool_feature_init(); 1985 spa_config_load(); 1986 l2arc_start(); 1987 } 1988 1989 void 1990 spa_fini(void) 1991 { 1992 l2arc_stop(); 1993 1994 spa_evict_all(); 1995 1996 vdev_cache_stat_fini(); 1997 zil_fini(); 1998 dmu_fini(); 1999 zio_fini(); 2000 metaslab_alloc_trace_fini(); 2001 range_tree_fini(); 2002 unique_fini(); 2003 refcount_fini(); 2004 2005 avl_destroy(&spa_namespace_avl); 2006 avl_destroy(&spa_spare_avl); 2007 avl_destroy(&spa_l2cache_avl); 2008 2009 cv_destroy(&spa_namespace_cv); 2010 mutex_destroy(&spa_namespace_lock); 2011 mutex_destroy(&spa_spare_lock); 2012 mutex_destroy(&spa_l2cache_lock); 2013 } 2014 2015 /* 2016 * Return whether this pool has slogs. No locking needed. 2017 * It's not a problem if the wrong answer is returned as it's only for 2018 * performance and not correctness 2019 */ 2020 boolean_t 2021 spa_has_slogs(spa_t *spa) 2022 { 2023 return (spa->spa_log_class->mc_rotor != NULL); 2024 } 2025 2026 spa_log_state_t 2027 spa_get_log_state(spa_t *spa) 2028 { 2029 return (spa->spa_log_state); 2030 } 2031 2032 void 2033 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2034 { 2035 spa->spa_log_state = state; 2036 } 2037 2038 boolean_t 2039 spa_is_root(spa_t *spa) 2040 { 2041 return (spa->spa_is_root); 2042 } 2043 2044 boolean_t 2045 spa_writeable(spa_t *spa) 2046 { 2047 return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config); 2048 } 2049 2050 /* 2051 * Returns true if there is a pending sync task in any of the current 2052 * syncing txg, the current quiescing txg, or the current open txg. 2053 */ 2054 boolean_t 2055 spa_has_pending_synctask(spa_t *spa) 2056 { 2057 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2058 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2059 } 2060 2061 int 2062 spa_mode(spa_t *spa) 2063 { 2064 return (spa->spa_mode); 2065 } 2066 2067 uint64_t 2068 spa_bootfs(spa_t *spa) 2069 { 2070 return (spa->spa_bootfs); 2071 } 2072 2073 uint64_t 2074 spa_delegation(spa_t *spa) 2075 { 2076 return (spa->spa_delegation); 2077 } 2078 2079 objset_t * 2080 spa_meta_objset(spa_t *spa) 2081 { 2082 return (spa->spa_meta_objset); 2083 } 2084 2085 enum zio_checksum 2086 spa_dedup_checksum(spa_t *spa) 2087 { 2088 return (spa->spa_dedup_checksum); 2089 } 2090 2091 /* 2092 * Reset pool scan stat per scan pass (or reboot). 2093 */ 2094 void 2095 spa_scan_stat_init(spa_t *spa) 2096 { 2097 /* data not stored on disk */ 2098 spa->spa_scan_pass_start = gethrestime_sec(); 2099 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2100 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2101 else 2102 spa->spa_scan_pass_scrub_pause = 0; 2103 spa->spa_scan_pass_scrub_spent_paused = 0; 2104 spa->spa_scan_pass_exam = 0; 2105 vdev_scan_stat_init(spa->spa_root_vdev); 2106 } 2107 2108 /* 2109 * Get scan stats for zpool status reports 2110 */ 2111 int 2112 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2113 { 2114 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2115 2116 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2117 return (SET_ERROR(ENOENT)); 2118 bzero(ps, sizeof (pool_scan_stat_t)); 2119 2120 /* data stored on disk */ 2121 ps->pss_func = scn->scn_phys.scn_func; 2122 ps->pss_start_time = scn->scn_phys.scn_start_time; 2123 ps->pss_end_time = scn->scn_phys.scn_end_time; 2124 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2125 ps->pss_examined = scn->scn_phys.scn_examined; 2126 ps->pss_to_process = scn->scn_phys.scn_to_process; 2127 ps->pss_processed = scn->scn_phys.scn_processed; 2128 ps->pss_errors = scn->scn_phys.scn_errors; 2129 ps->pss_state = scn->scn_phys.scn_state; 2130 2131 /* data not stored on disk */ 2132 ps->pss_pass_start = spa->spa_scan_pass_start; 2133 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2134 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2135 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2136 2137 return (0); 2138 } 2139 2140 boolean_t 2141 spa_debug_enabled(spa_t *spa) 2142 { 2143 return (spa->spa_debug); 2144 } 2145 2146 int 2147 spa_maxblocksize(spa_t *spa) 2148 { 2149 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2150 return (SPA_MAXBLOCKSIZE); 2151 else 2152 return (SPA_OLD_MAXBLOCKSIZE); 2153 } 2154 2155 /* 2156 * Returns the txg that the last device removal completed. No indirect mappings 2157 * have been added since this txg. 2158 */ 2159 uint64_t 2160 spa_get_last_removal_txg(spa_t *spa) 2161 { 2162 uint64_t vdevid; 2163 uint64_t ret = -1ULL; 2164 2165 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2166 /* 2167 * sr_prev_indirect_vdev is only modified while holding all the 2168 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2169 * examining it. 2170 */ 2171 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2172 2173 while (vdevid != -1ULL) { 2174 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2175 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2176 2177 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2178 2179 /* 2180 * If the removal did not remap any data, we don't care. 2181 */ 2182 if (vdev_indirect_births_count(vib) != 0) { 2183 ret = vdev_indirect_births_last_entry_txg(vib); 2184 break; 2185 } 2186 2187 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2188 } 2189 spa_config_exit(spa, SCL_VDEV, FTAG); 2190 2191 IMPLY(ret != -1ULL, 2192 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2193 2194 return (ret); 2195 } 2196 2197 boolean_t 2198 spa_trust_config(spa_t *spa) 2199 { 2200 return (spa->spa_trust_config); 2201 } 2202 2203 uint64_t 2204 spa_missing_tvds_allowed(spa_t *spa) 2205 { 2206 return (spa->spa_missing_tvds_allowed); 2207 } 2208 2209 void 2210 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2211 { 2212 spa->spa_missing_tvds = missing; 2213 } 2214 2215 boolean_t 2216 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2217 { 2218 vdev_t *rvd = spa->spa_root_vdev; 2219 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2220 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2221 return (B_FALSE); 2222 } 2223 return (B_TRUE); 2224 } 2225 2226 boolean_t 2227 spa_has_checkpoint(spa_t *spa) 2228 { 2229 return (spa->spa_checkpoint_txg != 0); 2230 } 2231 2232 boolean_t 2233 spa_importing_readonly_checkpoint(spa_t *spa) 2234 { 2235 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2236 spa->spa_mode == FREAD); 2237 } 2238 2239 uint64_t 2240 spa_min_claim_txg(spa_t *spa) 2241 { 2242 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2243 2244 if (checkpoint_txg != 0) 2245 return (checkpoint_txg + 1); 2246 2247 return (spa->spa_first_txg); 2248 } 2249 2250 /* 2251 * If there is a checkpoint, async destroys may consume more space from 2252 * the pool instead of freeing it. In an attempt to save the pool from 2253 * getting suspended when it is about to run out of space, we stop 2254 * processing async destroys. 2255 */ 2256 boolean_t 2257 spa_suspend_async_destroy(spa_t *spa) 2258 { 2259 dsl_pool_t *dp = spa_get_dsl(spa); 2260 2261 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2262 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2263 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2264 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2265 2266 if (spa_has_checkpoint(spa) && avail == 0) 2267 return (B_TRUE); 2268 2269 return (B_FALSE); 2270 } 2271