1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2014 Integros [integros.com]
28 * Copyright (c) 2017 Datto Inc.
29 * Copyright 2019 Joyent, Inc.
30 * Copyright (c) 2017, Intel Corporation.
31 * Copyright 2020 Joyent, Inc.
32 * Copyright 2022 Oxide Computer Company
33 */
34
35 #include <sys/zfs_context.h>
36 #include <sys/spa_impl.h>
37 #include <sys/spa_boot.h>
38 #include <sys/zio.h>
39 #include <sys/zio_checksum.h>
40 #include <sys/zio_compress.h>
41 #include <sys/dmu.h>
42 #include <sys/dmu_tx.h>
43 #include <sys/zap.h>
44 #include <sys/zil.h>
45 #include <sys/vdev_impl.h>
46 #include <sys/vdev_initialize.h>
47 #include <sys/vdev_trim.h>
48 #include <sys/vdev_raidz.h>
49 #include <sys/metaslab.h>
50 #include <sys/uberblock_impl.h>
51 #include <sys/txg.h>
52 #include <sys/avl.h>
53 #include <sys/unique.h>
54 #include <sys/dsl_pool.h>
55 #include <sys/dsl_dir.h>
56 #include <sys/dsl_prop.h>
57 #include <sys/dsl_scan.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/metaslab_impl.h>
60 #include <sys/arc.h>
61 #include <sys/ddt.h>
62 #include "zfs_prop.h"
63 #include <sys/btree.h>
64 #include <sys/zfeature.h>
65
66 /*
67 * SPA locking
68 *
69 * There are three basic locks for managing spa_t structures:
70 *
71 * spa_namespace_lock (global mutex)
72 *
73 * This lock must be acquired to do any of the following:
74 *
75 * - Lookup a spa_t by name
76 * - Add or remove a spa_t from the namespace
77 * - Increase spa_refcount from non-zero
78 * - Check if spa_refcount is zero
79 * - Rename a spa_t
80 * - add/remove/attach/detach devices
81 * - Held for the duration of create/destroy/import/export
82 *
83 * It does not need to handle recursion. A create or destroy may
84 * reference objects (files or zvols) in other pools, but by
85 * definition they must have an existing reference, and will never need
86 * to lookup a spa_t by name.
87 *
88 * spa_refcount (per-spa zfs_refcount_t protected by mutex)
89 *
90 * This reference count keep track of any active users of the spa_t. The
91 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
92 * the refcount is never really 'zero' - opening a pool implicitly keeps
93 * some references in the DMU. Internally we check against spa_minref, but
94 * present the image of a zero/non-zero value to consumers.
95 *
96 * spa_config_lock[] (per-spa array of rwlocks)
97 *
98 * This protects the spa_t from config changes, and must be held in
99 * the following circumstances:
100 *
101 * - RW_READER to perform I/O to the spa
102 * - RW_WRITER to change the vdev config
103 *
104 * The locking order is fairly straightforward:
105 *
106 * spa_namespace_lock -> spa_refcount
107 *
108 * The namespace lock must be acquired to increase the refcount from 0
109 * or to check if it is zero.
110 *
111 * spa_refcount -> spa_config_lock[]
112 *
113 * There must be at least one valid reference on the spa_t to acquire
114 * the config lock.
115 *
116 * spa_namespace_lock -> spa_config_lock[]
117 *
118 * The namespace lock must always be taken before the config lock.
119 *
120 *
121 * The spa_namespace_lock can be acquired directly and is globally visible.
122 *
123 * The namespace is manipulated using the following functions, all of which
124 * require the spa_namespace_lock to be held.
125 *
126 * spa_lookup() Lookup a spa_t by name.
127 *
128 * spa_add() Create a new spa_t in the namespace.
129 *
130 * spa_remove() Remove a spa_t from the namespace. This also
131 * frees up any memory associated with the spa_t.
132 *
133 * spa_next() Returns the next spa_t in the system, or the
134 * first if NULL is passed.
135 *
136 * spa_evict_all() Shutdown and remove all spa_t structures in
137 * the system.
138 *
139 * spa_guid_exists() Determine whether a pool/device guid exists.
140 *
141 * The spa_refcount is manipulated using the following functions:
142 *
143 * spa_open_ref() Adds a reference to the given spa_t. Must be
144 * called with spa_namespace_lock held if the
145 * refcount is currently zero.
146 *
147 * spa_close() Remove a reference from the spa_t. This will
148 * not free the spa_t or remove it from the
149 * namespace. No locking is required.
150 *
151 * spa_refcount_zero() Returns true if the refcount is currently
152 * zero. Must be called with spa_namespace_lock
153 * held.
154 *
155 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
156 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
157 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
158 *
159 * To read the configuration, it suffices to hold one of these locks as reader.
160 * To modify the configuration, you must hold all locks as writer. To modify
161 * vdev state without altering the vdev tree's topology (e.g. online/offline),
162 * you must hold SCL_STATE and SCL_ZIO as writer.
163 *
164 * We use these distinct config locks to avoid recursive lock entry.
165 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
166 * block allocations (SCL_ALLOC), which may require reading space maps
167 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
168 *
169 * The spa config locks cannot be normal rwlocks because we need the
170 * ability to hand off ownership. For example, SCL_ZIO is acquired
171 * by the issuing thread and later released by an interrupt thread.
172 * They do, however, obey the usual write-wanted semantics to prevent
173 * writer (i.e. system administrator) starvation.
174 *
175 * The lock acquisition rules are as follows:
176 *
177 * SCL_CONFIG
178 * Protects changes to the vdev tree topology, such as vdev
179 * add/remove/attach/detach. Protects the dirty config list
180 * (spa_config_dirty_list) and the set of spares and l2arc devices.
181 *
182 * SCL_STATE
183 * Protects changes to pool state and vdev state, such as vdev
184 * online/offline/fault/degrade/clear. Protects the dirty state list
185 * (spa_state_dirty_list) and global pool state (spa_state).
186 *
187 * SCL_ALLOC
188 * Protects changes to metaslab groups and classes.
189 * Held as reader by metaslab_alloc() and metaslab_claim().
190 *
191 * SCL_ZIO
192 * Held by bp-level zios (those which have no io_vd upon entry)
193 * to prevent changes to the vdev tree. The bp-level zio implicitly
194 * protects all of its vdev child zios, which do not hold SCL_ZIO.
195 *
196 * SCL_FREE
197 * Protects changes to metaslab groups and classes.
198 * Held as reader by metaslab_free(). SCL_FREE is distinct from
199 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
200 * blocks in zio_done() while another i/o that holds either
201 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
202 *
203 * SCL_VDEV
204 * Held as reader to prevent changes to the vdev tree during trivial
205 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
206 * other locks, and lower than all of them, to ensure that it's safe
207 * to acquire regardless of caller context.
208 *
209 * In addition, the following rules apply:
210 *
211 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
212 * The lock ordering is SCL_CONFIG > spa_props_lock.
213 *
214 * (b) I/O operations on leaf vdevs. For any zio operation that takes
215 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
216 * or zio_write_phys() -- the caller must ensure that the config cannot
217 * cannot change in the interim, and that the vdev cannot be reopened.
218 * SCL_STATE as reader suffices for both.
219 *
220 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
221 *
222 * spa_vdev_enter() Acquire the namespace lock and the config lock
223 * for writing.
224 *
225 * spa_vdev_exit() Release the config lock, wait for all I/O
226 * to complete, sync the updated configs to the
227 * cache, and release the namespace lock.
228 *
229 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
230 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
231 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
232 */
233
234 static avl_tree_t spa_namespace_avl;
235 kmutex_t spa_namespace_lock;
236 static kcondvar_t spa_namespace_cv;
237 static int spa_active_count;
238 int spa_max_replication_override = SPA_DVAS_PER_BP;
239
240 static kmutex_t spa_spare_lock;
241 static avl_tree_t spa_spare_avl;
242 static kmutex_t spa_l2cache_lock;
243 static avl_tree_t spa_l2cache_avl;
244
245 kmem_cache_t *spa_buffer_pool;
246 int spa_mode_global;
247
248 #ifdef ZFS_DEBUG
249 /*
250 * Everything except dprintf, spa, and indirect_remap is on by default
251 * in debug builds.
252 */
253 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_INDIRECT_REMAP);
254 #else
255 int zfs_flags = 0;
256 #endif
257
258 /*
259 * zfs_recover can be set to nonzero to attempt to recover from
260 * otherwise-fatal errors, typically caused by on-disk corruption. When
261 * set, calls to zfs_panic_recover() will turn into warning messages.
262 * This should only be used as a last resort, as it typically results
263 * in leaked space, or worse.
264 */
265 boolean_t zfs_recover = B_FALSE;
266
267 /*
268 * If destroy encounters an EIO while reading metadata (e.g. indirect
269 * blocks), space referenced by the missing metadata can not be freed.
270 * Normally this causes the background destroy to become "stalled", as
271 * it is unable to make forward progress. While in this stalled state,
272 * all remaining space to free from the error-encountering filesystem is
273 * "temporarily leaked". Set this flag to cause it to ignore the EIO,
274 * permanently leak the space from indirect blocks that can not be read,
275 * and continue to free everything else that it can.
276 *
277 * The default, "stalling" behavior is useful if the storage partially
278 * fails (i.e. some but not all i/os fail), and then later recovers. In
279 * this case, we will be able to continue pool operations while it is
280 * partially failed, and when it recovers, we can continue to free the
281 * space, with no leaks. However, note that this case is actually
282 * fairly rare.
283 *
284 * Typically pools either (a) fail completely (but perhaps temporarily,
285 * e.g. a top-level vdev going offline), or (b) have localized,
286 * permanent errors (e.g. disk returns the wrong data due to bit flip or
287 * firmware bug). In case (a), this setting does not matter because the
288 * pool will be suspended and the sync thread will not be able to make
289 * forward progress regardless. In case (b), because the error is
290 * permanent, the best we can do is leak the minimum amount of space,
291 * which is what setting this flag will do. Therefore, it is reasonable
292 * for this flag to normally be set, but we chose the more conservative
293 * approach of not setting it, so that there is no possibility of
294 * leaking space in the "partial temporary" failure case.
295 */
296 boolean_t zfs_free_leak_on_eio = B_FALSE;
297
298 /*
299 * Expiration time in milliseconds. This value has two meanings. First it is
300 * used to determine when the spa_deadman() logic should fire. By default the
301 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
302 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
303 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
304 * in a system panic.
305 */
306 uint64_t zfs_deadman_synctime_ms = 1000000ULL;
307
308 /*
309 * Check time in milliseconds. This defines the frequency at which we check
310 * for hung I/O.
311 */
312 uint64_t zfs_deadman_checktime_ms = 5000ULL;
313
314 /*
315 * Override the zfs deadman behavior via /etc/system. By default the
316 * deadman is enabled except on VMware and sparc deployments.
317 */
318 int zfs_deadman_enabled = -1;
319
320 #if defined(__amd64__) || defined(__i386__)
321 /*
322 * Should we allow the use of mechanisms that depend on saving and restoring
323 * the FPU state? This was disabled initially due to stability issues in
324 * the kernel FPU routines; see bug 13717. As of the fixes for 13902 and
325 * 13915, it has once again been enabled.
326 */
327 int zfs_fpu_enabled = 1;
328 #endif
329
330 /*
331 * The worst case is single-sector max-parity RAID-Z blocks, in which
332 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
333 * times the size; so just assume that. Add to this the fact that
334 * we can have up to 3 DVAs per bp, and one more factor of 2 because
335 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
336 * the worst case is:
337 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
338 */
339 int spa_asize_inflation = 24;
340
341 /*
342 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
343 * the pool to be consumed. This ensures that we don't run the pool
344 * completely out of space, due to unaccounted changes (e.g. to the MOS).
345 * It also limits the worst-case time to allocate space. If we have
346 * less than this amount of free space, most ZPL operations (e.g. write,
347 * create) will return ENOSPC.
348 *
349 * Certain operations (e.g. file removal, most administrative actions) can
350 * use half the slop space. They will only return ENOSPC if less than half
351 * the slop space is free. Typically, once the pool has less than the slop
352 * space free, the user will use these operations to free up space in the pool.
353 * These are the operations that call dsl_pool_adjustedsize() with the netfree
354 * argument set to TRUE.
355 *
356 * Operations that are almost guaranteed to free up space in the absence of
357 * a pool checkpoint can use up to three quarters of the slop space
358 * (e.g zfs destroy).
359 *
360 * A very restricted set of operations are always permitted, regardless of
361 * the amount of free space. These are the operations that call
362 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
363 * increase in the amount of space used, it is possible to run the pool
364 * completely out of space, causing it to be permanently read-only.
365 *
366 * Note that on very small pools, the slop space will be larger than
367 * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
368 * but we never allow it to be more than half the pool size.
369 *
370 * See also the comments in zfs_space_check_t.
371 */
372 int spa_slop_shift = 5;
373 uint64_t spa_min_slop = 128 * 1024 * 1024;
374
375 int spa_allocators = 4;
376
377 /*PRINTFLIKE2*/
378 void
spa_load_failed(spa_t * spa,const char * fmt,...)379 spa_load_failed(spa_t *spa, const char *fmt, ...)
380 {
381 va_list adx;
382 char buf[256];
383
384 va_start(adx, fmt);
385 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
386 va_end(adx);
387
388 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
389 spa->spa_trust_config ? "trusted" : "untrusted", buf);
390 }
391
392 /*PRINTFLIKE2*/
393 void
spa_load_note(spa_t * spa,const char * fmt,...)394 spa_load_note(spa_t *spa, const char *fmt, ...)
395 {
396 va_list adx;
397 char buf[256];
398
399 va_start(adx, fmt);
400 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
401 va_end(adx);
402
403 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
404 spa->spa_trust_config ? "trusted" : "untrusted", buf);
405 }
406
407 /*
408 * By default dedup and user data indirects land in the special class
409 */
410 int zfs_ddt_data_is_special = B_TRUE;
411 int zfs_user_indirect_is_special = B_TRUE;
412
413 /*
414 * The percentage of special class final space reserved for metadata only.
415 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
416 * let metadata into the class.
417 */
418 int zfs_special_class_metadata_reserve_pct = 25;
419
420 /*
421 * ==========================================================================
422 * SPA config locking
423 * ==========================================================================
424 */
425 static void
spa_config_lock_init(spa_t * spa)426 spa_config_lock_init(spa_t *spa)
427 {
428 for (int i = 0; i < SCL_LOCKS; i++) {
429 spa_config_lock_t *scl = &spa->spa_config_lock[i];
430 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
431 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
432 zfs_refcount_create_untracked(&scl->scl_count);
433 scl->scl_writer = NULL;
434 scl->scl_write_wanted = 0;
435 }
436 }
437
438 static void
spa_config_lock_destroy(spa_t * spa)439 spa_config_lock_destroy(spa_t *spa)
440 {
441 for (int i = 0; i < SCL_LOCKS; i++) {
442 spa_config_lock_t *scl = &spa->spa_config_lock[i];
443 mutex_destroy(&scl->scl_lock);
444 cv_destroy(&scl->scl_cv);
445 zfs_refcount_destroy(&scl->scl_count);
446 ASSERT(scl->scl_writer == NULL);
447 ASSERT(scl->scl_write_wanted == 0);
448 }
449 }
450
451 int
spa_config_tryenter(spa_t * spa,int locks,void * tag,krw_t rw)452 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
453 {
454 for (int i = 0; i < SCL_LOCKS; i++) {
455 spa_config_lock_t *scl = &spa->spa_config_lock[i];
456 if (!(locks & (1 << i)))
457 continue;
458 mutex_enter(&scl->scl_lock);
459 if (rw == RW_READER) {
460 if (scl->scl_writer || scl->scl_write_wanted) {
461 mutex_exit(&scl->scl_lock);
462 spa_config_exit(spa, locks & ((1 << i) - 1),
463 tag);
464 return (0);
465 }
466 } else {
467 ASSERT(scl->scl_writer != curthread);
468 if (!zfs_refcount_is_zero(&scl->scl_count)) {
469 mutex_exit(&scl->scl_lock);
470 spa_config_exit(spa, locks & ((1 << i) - 1),
471 tag);
472 return (0);
473 }
474 scl->scl_writer = curthread;
475 }
476 (void) zfs_refcount_add(&scl->scl_count, tag);
477 mutex_exit(&scl->scl_lock);
478 }
479 return (1);
480 }
481
482 void
spa_config_enter(spa_t * spa,int locks,void * tag,krw_t rw)483 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
484 {
485 int wlocks_held = 0;
486
487 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
488
489 for (int i = 0; i < SCL_LOCKS; i++) {
490 spa_config_lock_t *scl = &spa->spa_config_lock[i];
491 if (scl->scl_writer == curthread)
492 wlocks_held |= (1 << i);
493 if (!(locks & (1 << i)))
494 continue;
495 mutex_enter(&scl->scl_lock);
496 if (rw == RW_READER) {
497 while (scl->scl_writer || scl->scl_write_wanted) {
498 cv_wait(&scl->scl_cv, &scl->scl_lock);
499 }
500 } else {
501 ASSERT(scl->scl_writer != curthread);
502 while (!zfs_refcount_is_zero(&scl->scl_count)) {
503 scl->scl_write_wanted++;
504 cv_wait(&scl->scl_cv, &scl->scl_lock);
505 scl->scl_write_wanted--;
506 }
507 scl->scl_writer = curthread;
508 }
509 (void) zfs_refcount_add(&scl->scl_count, tag);
510 mutex_exit(&scl->scl_lock);
511 }
512 ASSERT3U(wlocks_held, <=, locks);
513 }
514
515 void
spa_config_exit(spa_t * spa,int locks,void * tag)516 spa_config_exit(spa_t *spa, int locks, void *tag)
517 {
518 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
519 spa_config_lock_t *scl = &spa->spa_config_lock[i];
520 if (!(locks & (1 << i)))
521 continue;
522 mutex_enter(&scl->scl_lock);
523 ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
524 if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
525 ASSERT(scl->scl_writer == NULL ||
526 scl->scl_writer == curthread);
527 scl->scl_writer = NULL; /* OK in either case */
528 cv_broadcast(&scl->scl_cv);
529 }
530 mutex_exit(&scl->scl_lock);
531 }
532 }
533
534 int
spa_config_held(spa_t * spa,int locks,krw_t rw)535 spa_config_held(spa_t *spa, int locks, krw_t rw)
536 {
537 int locks_held = 0;
538
539 for (int i = 0; i < SCL_LOCKS; i++) {
540 spa_config_lock_t *scl = &spa->spa_config_lock[i];
541 if (!(locks & (1 << i)))
542 continue;
543 if ((rw == RW_READER &&
544 !zfs_refcount_is_zero(&scl->scl_count)) ||
545 (rw == RW_WRITER && scl->scl_writer == curthread))
546 locks_held |= 1 << i;
547 }
548
549 return (locks_held);
550 }
551
552 /*
553 * ==========================================================================
554 * SPA namespace functions
555 * ==========================================================================
556 */
557
558 /*
559 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
560 * Returns NULL if no matching spa_t is found.
561 */
562 spa_t *
spa_lookup(const char * name)563 spa_lookup(const char *name)
564 {
565 static spa_t search; /* spa_t is large; don't allocate on stack */
566 spa_t *spa;
567 avl_index_t where;
568 char *cp;
569
570 ASSERT(MUTEX_HELD(&spa_namespace_lock));
571
572 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
573
574 /*
575 * If it's a full dataset name, figure out the pool name and
576 * just use that.
577 */
578 cp = strpbrk(search.spa_name, "/@#");
579 if (cp != NULL)
580 *cp = '\0';
581
582 spa = avl_find(&spa_namespace_avl, &search, &where);
583
584 return (spa);
585 }
586
587 /*
588 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
589 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
590 * looking for potentially hung I/Os.
591 */
592 void
spa_deadman(void * arg)593 spa_deadman(void *arg)
594 {
595 spa_t *spa = arg;
596
597 /*
598 * Disable the deadman timer if the pool is suspended.
599 */
600 if (spa_suspended(spa)) {
601 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
602 return;
603 }
604
605 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
606 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
607 ++spa->spa_deadman_calls);
608 if (zfs_deadman_enabled)
609 vdev_deadman(spa->spa_root_vdev);
610 }
611
612 int
spa_log_sm_sort_by_txg(const void * va,const void * vb)613 spa_log_sm_sort_by_txg(const void *va, const void *vb)
614 {
615 const spa_log_sm_t *a = va;
616 const spa_log_sm_t *b = vb;
617
618 return (TREE_CMP(a->sls_txg, b->sls_txg));
619 }
620
621 /*
622 * Create an uninitialized spa_t with the given name. Requires
623 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
624 * exist by calling spa_lookup() first.
625 */
626 spa_t *
spa_add(const char * name,nvlist_t * config,const char * altroot)627 spa_add(const char *name, nvlist_t *config, const char *altroot)
628 {
629 spa_t *spa;
630 spa_config_dirent_t *dp;
631 cyc_handler_t hdlr;
632 cyc_time_t when;
633
634 ASSERT(MUTEX_HELD(&spa_namespace_lock));
635
636 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
637
638 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
639 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
640 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
641 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
642 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
643 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
644 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
645 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
646 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
647 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
648 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
649 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
650 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
651 mutex_init(&spa->spa_imp_kstat_lock, NULL, MUTEX_DEFAULT, NULL);
652
653 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
654 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
655 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
656 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
657 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
658
659 for (int t = 0; t < TXG_SIZE; t++)
660 bplist_create(&spa->spa_free_bplist[t]);
661
662 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
663 spa->spa_state = POOL_STATE_UNINITIALIZED;
664 spa->spa_freeze_txg = UINT64_MAX;
665 spa->spa_final_txg = UINT64_MAX;
666 spa->spa_load_max_txg = UINT64_MAX;
667 spa->spa_proc = &p0;
668 spa->spa_proc_state = SPA_PROC_NONE;
669 spa->spa_trust_config = B_TRUE;
670
671 hdlr.cyh_func = spa_deadman;
672 hdlr.cyh_arg = spa;
673 hdlr.cyh_level = CY_LOW_LEVEL;
674
675 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
676
677 /*
678 * This determines how often we need to check for hung I/Os after
679 * the cyclic has already fired. Since checking for hung I/Os is
680 * an expensive operation we don't want to check too frequently.
681 * Instead wait for 5 seconds before checking again.
682 */
683 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
684 when.cyt_when = CY_INFINITY;
685 mutex_enter(&cpu_lock);
686 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
687 mutex_exit(&cpu_lock);
688
689 zfs_refcount_create(&spa->spa_refcount);
690 spa_config_lock_init(spa);
691
692 avl_add(&spa_namespace_avl, spa);
693
694 /*
695 * Set the alternate root, if there is one.
696 */
697 if (altroot) {
698 spa->spa_root = spa_strdup(altroot);
699 spa_active_count++;
700 }
701
702 spa->spa_alloc_count = spa_allocators;
703 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count *
704 sizeof (kmutex_t), KM_SLEEP);
705 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count *
706 sizeof (avl_tree_t), KM_SLEEP);
707 for (int i = 0; i < spa->spa_alloc_count; i++) {
708 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL);
709 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
710 sizeof (zio_t), offsetof(zio_t, io_alloc_node));
711 }
712 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
713 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
714 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
715 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
716 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
717 offsetof(log_summary_entry_t, lse_node));
718
719 /*
720 * Every pool starts with the default cachefile
721 */
722 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
723 offsetof(spa_config_dirent_t, scd_link));
724
725 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
726 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
727 list_insert_head(&spa->spa_config_list, dp);
728
729 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
730 KM_SLEEP) == 0);
731
732 if (config != NULL) {
733 nvlist_t *features;
734
735 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
736 &features) == 0) {
737 VERIFY(nvlist_dup(features, &spa->spa_label_features,
738 0) == 0);
739 }
740
741 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
742 }
743
744 if (spa->spa_label_features == NULL) {
745 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
746 KM_SLEEP) == 0);
747 }
748
749 spa->spa_iokstat = kstat_create("zfs", 0, name,
750 "disk", KSTAT_TYPE_IO, 1, 0);
751 if (spa->spa_iokstat) {
752 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
753 kstat_install(spa->spa_iokstat);
754 }
755
756 spa->spa_min_ashift = INT_MAX;
757 spa->spa_max_ashift = 0;
758
759 /*
760 * As a pool is being created, treat all features as disabled by
761 * setting SPA_FEATURE_DISABLED for all entries in the feature
762 * refcount cache.
763 */
764 for (int i = 0; i < SPA_FEATURES; i++) {
765 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
766 }
767
768 list_create(&spa->spa_leaf_list, sizeof (vdev_t),
769 offsetof(vdev_t, vdev_leaf_node));
770
771 return (spa);
772 }
773
774 /*
775 * Removes a spa_t from the namespace, freeing up any memory used. Requires
776 * spa_namespace_lock. This is called only after the spa_t has been closed and
777 * deactivated.
778 */
779 void
spa_remove(spa_t * spa)780 spa_remove(spa_t *spa)
781 {
782 spa_config_dirent_t *dp;
783
784 ASSERT(MUTEX_HELD(&spa_namespace_lock));
785 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
786 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
787
788 nvlist_free(spa->spa_config_splitting);
789
790 avl_remove(&spa_namespace_avl, spa);
791 cv_broadcast(&spa_namespace_cv);
792
793 if (spa->spa_root) {
794 spa_strfree(spa->spa_root);
795 spa_active_count--;
796 }
797
798 while ((dp = list_head(&spa->spa_config_list)) != NULL) {
799 list_remove(&spa->spa_config_list, dp);
800 if (dp->scd_path != NULL)
801 spa_strfree(dp->scd_path);
802 kmem_free(dp, sizeof (spa_config_dirent_t));
803 }
804
805 for (int i = 0; i < spa->spa_alloc_count; i++) {
806 avl_destroy(&spa->spa_alloc_trees[i]);
807 mutex_destroy(&spa->spa_alloc_locks[i]);
808 }
809 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count *
810 sizeof (kmutex_t));
811 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
812 sizeof (avl_tree_t));
813
814 avl_destroy(&spa->spa_metaslabs_by_flushed);
815 avl_destroy(&spa->spa_sm_logs_by_txg);
816 list_destroy(&spa->spa_log_summary);
817 list_destroy(&spa->spa_config_list);
818 list_destroy(&spa->spa_leaf_list);
819
820 nvlist_free(spa->spa_label_features);
821 nvlist_free(spa->spa_load_info);
822 spa_config_set(spa, NULL);
823
824 mutex_enter(&cpu_lock);
825 if (spa->spa_deadman_cycid != CYCLIC_NONE)
826 cyclic_remove(spa->spa_deadman_cycid);
827 mutex_exit(&cpu_lock);
828 spa->spa_deadman_cycid = CYCLIC_NONE;
829
830 zfs_refcount_destroy(&spa->spa_refcount);
831
832 spa_config_lock_destroy(spa);
833
834 kstat_delete(spa->spa_iokstat);
835 spa->spa_iokstat = NULL;
836
837 for (int t = 0; t < TXG_SIZE; t++)
838 bplist_destroy(&spa->spa_free_bplist[t]);
839
840 zio_checksum_templates_free(spa);
841
842 cv_destroy(&spa->spa_async_cv);
843 cv_destroy(&spa->spa_evicting_os_cv);
844 cv_destroy(&spa->spa_proc_cv);
845 cv_destroy(&spa->spa_scrub_io_cv);
846 cv_destroy(&spa->spa_suspend_cv);
847
848 mutex_destroy(&spa->spa_flushed_ms_lock);
849 mutex_destroy(&spa->spa_async_lock);
850 mutex_destroy(&spa->spa_errlist_lock);
851 mutex_destroy(&spa->spa_errlog_lock);
852 mutex_destroy(&spa->spa_evicting_os_lock);
853 mutex_destroy(&spa->spa_history_lock);
854 mutex_destroy(&spa->spa_proc_lock);
855 mutex_destroy(&spa->spa_props_lock);
856 mutex_destroy(&spa->spa_cksum_tmpls_lock);
857 mutex_destroy(&spa->spa_scrub_lock);
858 mutex_destroy(&spa->spa_suspend_lock);
859 mutex_destroy(&spa->spa_vdev_top_lock);
860 mutex_destroy(&spa->spa_iokstat_lock);
861 mutex_destroy(&spa->spa_imp_kstat_lock);
862
863 kmem_free(spa, sizeof (spa_t));
864 }
865
866 /*
867 * Given a pool, return the next pool in the namespace, or NULL if there is
868 * none. If 'prev' is NULL, return the first pool.
869 */
870 spa_t *
spa_next(spa_t * prev)871 spa_next(spa_t *prev)
872 {
873 ASSERT(MUTEX_HELD(&spa_namespace_lock));
874
875 if (prev)
876 return (AVL_NEXT(&spa_namespace_avl, prev));
877 else
878 return (avl_first(&spa_namespace_avl));
879 }
880
881 /*
882 * ==========================================================================
883 * SPA refcount functions
884 * ==========================================================================
885 */
886
887 /*
888 * Add a reference to the given spa_t. Must have at least one reference, or
889 * have the namespace lock held.
890 */
891 void
spa_open_ref(spa_t * spa,void * tag)892 spa_open_ref(spa_t *spa, void *tag)
893 {
894 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
895 MUTEX_HELD(&spa_namespace_lock));
896 (void) zfs_refcount_add(&spa->spa_refcount, tag);
897 }
898
899 /*
900 * Remove a reference to the given spa_t. Must have at least one reference, or
901 * have the namespace lock held.
902 */
903 void
spa_close(spa_t * spa,void * tag)904 spa_close(spa_t *spa, void *tag)
905 {
906 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
907 MUTEX_HELD(&spa_namespace_lock));
908 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
909 }
910
911 /*
912 * Remove a reference to the given spa_t held by a dsl dir that is
913 * being asynchronously released. Async releases occur from a taskq
914 * performing eviction of dsl datasets and dirs. The namespace lock
915 * isn't held and the hold by the object being evicted may contribute to
916 * spa_minref (e.g. dataset or directory released during pool export),
917 * so the asserts in spa_close() do not apply.
918 */
919 void
spa_async_close(spa_t * spa,void * tag)920 spa_async_close(spa_t *spa, void *tag)
921 {
922 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
923 }
924
925 /*
926 * Check to see if the spa refcount is zero. Must be called with
927 * spa_namespace_lock held. We really compare against spa_minref, which is the
928 * number of references acquired when opening a pool
929 */
930 boolean_t
spa_refcount_zero(spa_t * spa)931 spa_refcount_zero(spa_t *spa)
932 {
933 ASSERT(MUTEX_HELD(&spa_namespace_lock));
934
935 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
936 }
937
938 /*
939 * ==========================================================================
940 * SPA spare and l2cache tracking
941 * ==========================================================================
942 */
943
944 /*
945 * Hot spares and cache devices are tracked using the same code below,
946 * for 'auxiliary' devices.
947 */
948
949 typedef struct spa_aux {
950 uint64_t aux_guid;
951 uint64_t aux_pool;
952 avl_node_t aux_avl;
953 int aux_count;
954 } spa_aux_t;
955
956 static inline int
spa_aux_compare(const void * a,const void * b)957 spa_aux_compare(const void *a, const void *b)
958 {
959 const spa_aux_t *sa = (const spa_aux_t *)a;
960 const spa_aux_t *sb = (const spa_aux_t *)b;
961
962 return (TREE_CMP(sa->aux_guid, sb->aux_guid));
963 }
964
965 void
spa_aux_add(vdev_t * vd,avl_tree_t * avl)966 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
967 {
968 avl_index_t where;
969 spa_aux_t search;
970 spa_aux_t *aux;
971
972 search.aux_guid = vd->vdev_guid;
973 if ((aux = avl_find(avl, &search, &where)) != NULL) {
974 aux->aux_count++;
975 } else {
976 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
977 aux->aux_guid = vd->vdev_guid;
978 aux->aux_count = 1;
979 avl_insert(avl, aux, where);
980 }
981 }
982
983 void
spa_aux_remove(vdev_t * vd,avl_tree_t * avl)984 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
985 {
986 spa_aux_t search;
987 spa_aux_t *aux;
988 avl_index_t where;
989
990 search.aux_guid = vd->vdev_guid;
991 aux = avl_find(avl, &search, &where);
992
993 ASSERT(aux != NULL);
994
995 if (--aux->aux_count == 0) {
996 avl_remove(avl, aux);
997 kmem_free(aux, sizeof (spa_aux_t));
998 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
999 aux->aux_pool = 0ULL;
1000 }
1001 }
1002
1003 boolean_t
spa_aux_exists(uint64_t guid,uint64_t * pool,int * refcnt,avl_tree_t * avl)1004 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
1005 {
1006 spa_aux_t search, *found;
1007
1008 search.aux_guid = guid;
1009 found = avl_find(avl, &search, NULL);
1010
1011 if (pool) {
1012 if (found)
1013 *pool = found->aux_pool;
1014 else
1015 *pool = 0ULL;
1016 }
1017
1018 if (refcnt) {
1019 if (found)
1020 *refcnt = found->aux_count;
1021 else
1022 *refcnt = 0;
1023 }
1024
1025 return (found != NULL);
1026 }
1027
1028 void
spa_aux_activate(vdev_t * vd,avl_tree_t * avl)1029 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
1030 {
1031 spa_aux_t search, *found;
1032 avl_index_t where;
1033
1034 search.aux_guid = vd->vdev_guid;
1035 found = avl_find(avl, &search, &where);
1036 ASSERT(found != NULL);
1037 ASSERT(found->aux_pool == 0ULL);
1038
1039 found->aux_pool = spa_guid(vd->vdev_spa);
1040 }
1041
1042 /*
1043 * Spares are tracked globally due to the following constraints:
1044 *
1045 * - A spare may be part of multiple pools.
1046 * - A spare may be added to a pool even if it's actively in use within
1047 * another pool.
1048 * - A spare in use in any pool can only be the source of a replacement if
1049 * the target is a spare in the same pool.
1050 *
1051 * We keep track of all spares on the system through the use of a reference
1052 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
1053 * spare, then we bump the reference count in the AVL tree. In addition, we set
1054 * the 'vdev_isspare' member to indicate that the device is a spare (active or
1055 * inactive). When a spare is made active (used to replace a device in the
1056 * pool), we also keep track of which pool its been made a part of.
1057 *
1058 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
1059 * called under the spa_namespace lock as part of vdev reconfiguration. The
1060 * separate spare lock exists for the status query path, which does not need to
1061 * be completely consistent with respect to other vdev configuration changes.
1062 */
1063
1064 /*
1065 * Poll the spare vdevs to make sure they are not faulty.
1066 *
1067 * The probe operation will raise an ENXIO error and create an FM ereport if the
1068 * probe fails.
1069 */
1070 void
spa_spare_poll(spa_t * spa)1071 spa_spare_poll(spa_t *spa)
1072 {
1073 boolean_t async_request = B_FALSE;
1074 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1075 for (int i = 0; i < spa->spa_spares.sav_count; i++) {
1076 spa_aux_t search, *found;
1077 vdev_t *vd = spa->spa_spares.sav_vdevs[i];
1078
1079 search.aux_guid = vd->vdev_guid;
1080
1081 mutex_enter(&spa_spare_lock);
1082 found = avl_find(&spa_spare_avl, &search, NULL);
1083 /* This spare is in use by a pool. */
1084 if (found != NULL && found->aux_pool != 0) {
1085 mutex_exit(&spa_spare_lock);
1086 continue;
1087 }
1088 mutex_exit(&spa_spare_lock);
1089
1090 vd->vdev_probe_wanted = B_TRUE;
1091 async_request = B_TRUE;
1092 }
1093 if (async_request)
1094 spa_async_request(spa, SPA_ASYNC_PROBE);
1095
1096 spa_config_exit(spa, SCL_STATE, FTAG);
1097 }
1098
1099 static int
spa_spare_compare(const void * a,const void * b)1100 spa_spare_compare(const void *a, const void *b)
1101 {
1102 return (spa_aux_compare(a, b));
1103 }
1104
1105 void
spa_spare_add(vdev_t * vd)1106 spa_spare_add(vdev_t *vd)
1107 {
1108 mutex_enter(&spa_spare_lock);
1109 ASSERT(!vd->vdev_isspare);
1110 spa_aux_add(vd, &spa_spare_avl);
1111 vd->vdev_isspare = B_TRUE;
1112 mutex_exit(&spa_spare_lock);
1113 }
1114
1115 void
spa_spare_remove(vdev_t * vd)1116 spa_spare_remove(vdev_t *vd)
1117 {
1118 mutex_enter(&spa_spare_lock);
1119 ASSERT(vd->vdev_isspare);
1120 spa_aux_remove(vd, &spa_spare_avl);
1121 vd->vdev_isspare = B_FALSE;
1122 mutex_exit(&spa_spare_lock);
1123 }
1124
1125 boolean_t
spa_spare_exists(uint64_t guid,uint64_t * pool,int * refcnt)1126 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1127 {
1128 boolean_t found;
1129
1130 mutex_enter(&spa_spare_lock);
1131 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1132 mutex_exit(&spa_spare_lock);
1133
1134 return (found);
1135 }
1136
1137 void
spa_spare_activate(vdev_t * vd)1138 spa_spare_activate(vdev_t *vd)
1139 {
1140 mutex_enter(&spa_spare_lock);
1141 ASSERT(vd->vdev_isspare);
1142 spa_aux_activate(vd, &spa_spare_avl);
1143 mutex_exit(&spa_spare_lock);
1144 }
1145
1146 /*
1147 * Level 2 ARC devices are tracked globally for the same reasons as spares.
1148 * Cache devices currently only support one pool per cache device, and so
1149 * for these devices the aux reference count is currently unused beyond 1.
1150 */
1151
1152 static int
spa_l2cache_compare(const void * a,const void * b)1153 spa_l2cache_compare(const void *a, const void *b)
1154 {
1155 return (spa_aux_compare(a, b));
1156 }
1157
1158 void
spa_l2cache_add(vdev_t * vd)1159 spa_l2cache_add(vdev_t *vd)
1160 {
1161 mutex_enter(&spa_l2cache_lock);
1162 ASSERT(!vd->vdev_isl2cache);
1163 spa_aux_add(vd, &spa_l2cache_avl);
1164 vd->vdev_isl2cache = B_TRUE;
1165 mutex_exit(&spa_l2cache_lock);
1166 }
1167
1168 void
spa_l2cache_remove(vdev_t * vd)1169 spa_l2cache_remove(vdev_t *vd)
1170 {
1171 mutex_enter(&spa_l2cache_lock);
1172 ASSERT(vd->vdev_isl2cache);
1173 spa_aux_remove(vd, &spa_l2cache_avl);
1174 vd->vdev_isl2cache = B_FALSE;
1175 mutex_exit(&spa_l2cache_lock);
1176 }
1177
1178 boolean_t
spa_l2cache_exists(uint64_t guid,uint64_t * pool)1179 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1180 {
1181 boolean_t found;
1182
1183 mutex_enter(&spa_l2cache_lock);
1184 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1185 mutex_exit(&spa_l2cache_lock);
1186
1187 return (found);
1188 }
1189
1190 void
spa_l2cache_activate(vdev_t * vd)1191 spa_l2cache_activate(vdev_t *vd)
1192 {
1193 mutex_enter(&spa_l2cache_lock);
1194 ASSERT(vd->vdev_isl2cache);
1195 spa_aux_activate(vd, &spa_l2cache_avl);
1196 mutex_exit(&spa_l2cache_lock);
1197 }
1198
1199 /*
1200 * ==========================================================================
1201 * SPA vdev locking
1202 * ==========================================================================
1203 */
1204
1205 /*
1206 * Lock the given spa_t for the purpose of adding or removing a vdev.
1207 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1208 * It returns the next transaction group for the spa_t.
1209 */
1210 uint64_t
spa_vdev_enter(spa_t * spa)1211 spa_vdev_enter(spa_t *spa)
1212 {
1213 mutex_enter(&spa->spa_vdev_top_lock);
1214 mutex_enter(&spa_namespace_lock);
1215
1216 vdev_autotrim_stop_all(spa);
1217
1218 return (spa_vdev_config_enter(spa));
1219 }
1220
1221 /*
1222 * Internal implementation for spa_vdev_enter(). Used when a vdev
1223 * operation requires multiple syncs (i.e. removing a device) while
1224 * keeping the spa_namespace_lock held.
1225 */
1226 uint64_t
spa_vdev_config_enter(spa_t * spa)1227 spa_vdev_config_enter(spa_t *spa)
1228 {
1229 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1230
1231 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1232
1233 return (spa_last_synced_txg(spa) + 1);
1234 }
1235
1236 /*
1237 * Used in combination with spa_vdev_config_enter() to allow the syncing
1238 * of multiple transactions without releasing the spa_namespace_lock.
1239 */
1240 void
spa_vdev_config_exit(spa_t * spa,vdev_t * vd,uint64_t txg,int error,char * tag)1241 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1242 {
1243 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1244
1245 int config_changed = B_FALSE;
1246
1247 ASSERT(txg > spa_last_synced_txg(spa));
1248
1249 spa->spa_pending_vdev = NULL;
1250
1251 /*
1252 * Reassess the DTLs.
1253 */
1254 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1255
1256 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1257 config_changed = B_TRUE;
1258 spa->spa_config_generation++;
1259 }
1260
1261 /*
1262 * Verify the metaslab classes.
1263 */
1264 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1265 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1266 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
1267 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
1268
1269 spa_config_exit(spa, SCL_ALL, spa);
1270
1271 /*
1272 * Panic the system if the specified tag requires it. This
1273 * is useful for ensuring that configurations are updated
1274 * transactionally.
1275 */
1276 if (zio_injection_enabled)
1277 zio_handle_panic_injection(spa, tag, 0);
1278
1279 /*
1280 * Note: this txg_wait_synced() is important because it ensures
1281 * that there won't be more than one config change per txg.
1282 * This allows us to use the txg as the generation number.
1283 */
1284 if (error == 0)
1285 txg_wait_synced(spa->spa_dsl_pool, txg);
1286
1287 if (vd != NULL) {
1288 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1289 if (vd->vdev_ops->vdev_op_leaf) {
1290 mutex_enter(&vd->vdev_initialize_lock);
1291 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
1292 NULL);
1293 mutex_exit(&vd->vdev_initialize_lock);
1294
1295 mutex_enter(&vd->vdev_trim_lock);
1296 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
1297 mutex_exit(&vd->vdev_trim_lock);
1298 }
1299
1300 /*
1301 * The vdev may be both a leaf and top-level device.
1302 */
1303 vdev_autotrim_stop_wait(vd);
1304
1305 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1306 vdev_free(vd);
1307 spa_config_exit(spa, SCL_ALL, spa);
1308 }
1309
1310 /*
1311 * If the config changed, update the config cache.
1312 */
1313 if (config_changed)
1314 spa_write_cachefile(spa, B_FALSE, B_TRUE);
1315 }
1316
1317 /*
1318 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1319 * locking of spa_vdev_enter(), we also want make sure the transactions have
1320 * synced to disk, and then update the global configuration cache with the new
1321 * information.
1322 */
1323 int
spa_vdev_exit(spa_t * spa,vdev_t * vd,uint64_t txg,int error)1324 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1325 {
1326 vdev_autotrim_restart(spa);
1327
1328 spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1329 mutex_exit(&spa_namespace_lock);
1330 mutex_exit(&spa->spa_vdev_top_lock);
1331
1332 return (error);
1333 }
1334
1335 /*
1336 * Lock the given spa_t for the purpose of changing vdev state.
1337 */
1338 void
spa_vdev_state_enter(spa_t * spa,int oplocks)1339 spa_vdev_state_enter(spa_t *spa, int oplocks)
1340 {
1341 int locks = SCL_STATE_ALL | oplocks;
1342
1343 /*
1344 * Root pools may need to read of the underlying devfs filesystem
1345 * when opening up a vdev. Unfortunately if we're holding the
1346 * SCL_ZIO lock it will result in a deadlock when we try to issue
1347 * the read from the root filesystem. Instead we "prefetch"
1348 * the associated vnodes that we need prior to opening the
1349 * underlying devices and cache them so that we can prevent
1350 * any I/O when we are doing the actual open.
1351 */
1352 if (spa_is_root(spa)) {
1353 int low = locks & ~(SCL_ZIO - 1);
1354 int high = locks & ~low;
1355
1356 spa_config_enter(spa, high, spa, RW_WRITER);
1357 vdev_hold(spa->spa_root_vdev);
1358 spa_config_enter(spa, low, spa, RW_WRITER);
1359 } else {
1360 spa_config_enter(spa, locks, spa, RW_WRITER);
1361 }
1362 spa->spa_vdev_locks = locks;
1363 }
1364
1365 int
spa_vdev_state_exit(spa_t * spa,vdev_t * vd,int error)1366 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1367 {
1368 boolean_t config_changed = B_FALSE;
1369
1370 if (vd != NULL || error == 0)
1371 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1372 0, 0, B_FALSE);
1373
1374 if (vd != NULL) {
1375 vdev_state_dirty(vd->vdev_top);
1376 config_changed = B_TRUE;
1377 spa->spa_config_generation++;
1378 }
1379
1380 if (spa_is_root(spa))
1381 vdev_rele(spa->spa_root_vdev);
1382
1383 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1384 spa_config_exit(spa, spa->spa_vdev_locks, spa);
1385
1386 /*
1387 * If anything changed, wait for it to sync. This ensures that,
1388 * from the system administrator's perspective, zpool(8) commands
1389 * are synchronous. This is important for things like zpool offline:
1390 * when the command completes, you expect no further I/O from ZFS.
1391 */
1392 if (vd != NULL)
1393 txg_wait_synced(spa->spa_dsl_pool, 0);
1394
1395 /*
1396 * If the config changed, update the config cache.
1397 */
1398 if (config_changed) {
1399 mutex_enter(&spa_namespace_lock);
1400 spa_write_cachefile(spa, B_FALSE, B_TRUE);
1401 mutex_exit(&spa_namespace_lock);
1402 }
1403
1404 return (error);
1405 }
1406
1407 /*
1408 * ==========================================================================
1409 * Miscellaneous functions
1410 * ==========================================================================
1411 */
1412
1413 void
spa_activate_mos_feature(spa_t * spa,const char * feature,dmu_tx_t * tx)1414 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1415 {
1416 if (!nvlist_exists(spa->spa_label_features, feature)) {
1417 fnvlist_add_boolean(spa->spa_label_features, feature);
1418 /*
1419 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1420 * dirty the vdev config because lock SCL_CONFIG is not held.
1421 * Thankfully, in this case we don't need to dirty the config
1422 * because it will be written out anyway when we finish
1423 * creating the pool.
1424 */
1425 if (tx->tx_txg != TXG_INITIAL)
1426 vdev_config_dirty(spa->spa_root_vdev);
1427 }
1428 }
1429
1430 void
spa_deactivate_mos_feature(spa_t * spa,const char * feature)1431 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1432 {
1433 if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1434 vdev_config_dirty(spa->spa_root_vdev);
1435 }
1436
1437 /*
1438 * Return the spa_t associated with given pool_guid, if it exists. If
1439 * device_guid is non-zero, determine whether the pool exists *and* contains
1440 * a device with the specified device_guid.
1441 */
1442 spa_t *
spa_by_guid(uint64_t pool_guid,uint64_t device_guid)1443 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1444 {
1445 spa_t *spa;
1446 avl_tree_t *t = &spa_namespace_avl;
1447
1448 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1449
1450 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1451 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1452 continue;
1453 if (spa->spa_root_vdev == NULL)
1454 continue;
1455 if (spa_guid(spa) == pool_guid) {
1456 if (device_guid == 0)
1457 break;
1458
1459 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1460 device_guid) != NULL)
1461 break;
1462
1463 /*
1464 * Check any devices we may be in the process of adding.
1465 */
1466 if (spa->spa_pending_vdev) {
1467 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1468 device_guid) != NULL)
1469 break;
1470 }
1471 }
1472 }
1473
1474 return (spa);
1475 }
1476
1477 /*
1478 * Determine whether a pool with the given pool_guid exists.
1479 */
1480 boolean_t
spa_guid_exists(uint64_t pool_guid,uint64_t device_guid)1481 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1482 {
1483 return (spa_by_guid(pool_guid, device_guid) != NULL);
1484 }
1485
1486 char *
spa_strdup(const char * s)1487 spa_strdup(const char *s)
1488 {
1489 size_t len;
1490 char *new;
1491
1492 len = strlen(s);
1493 new = kmem_alloc(len + 1, KM_SLEEP);
1494 bcopy(s, new, len);
1495 new[len] = '\0';
1496
1497 return (new);
1498 }
1499
1500 void
spa_strfree(char * s)1501 spa_strfree(char *s)
1502 {
1503 kmem_free(s, strlen(s) + 1);
1504 }
1505
1506 uint64_t
spa_get_random(uint64_t range)1507 spa_get_random(uint64_t range)
1508 {
1509 uint64_t r;
1510
1511 ASSERT(range != 0);
1512
1513 if (range == 1)
1514 return (0);
1515
1516 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1517
1518 return (r % range);
1519 }
1520
1521 uint64_t
spa_generate_guid(spa_t * spa)1522 spa_generate_guid(spa_t *spa)
1523 {
1524 uint64_t guid = spa_get_random(-1ULL);
1525
1526 if (spa != NULL) {
1527 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1528 guid = spa_get_random(-1ULL);
1529 } else {
1530 while (guid == 0 || spa_guid_exists(guid, 0))
1531 guid = spa_get_random(-1ULL);
1532 }
1533
1534 return (guid);
1535 }
1536
1537 void
snprintf_blkptr(char * buf,size_t buflen,const blkptr_t * bp)1538 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1539 {
1540 char type[256];
1541 char *checksum = NULL;
1542 char *compress = NULL;
1543
1544 if (bp != NULL) {
1545 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1546 dmu_object_byteswap_t bswap =
1547 DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1548 (void) snprintf(type, sizeof (type), "bswap %s %s",
1549 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1550 "metadata" : "data",
1551 dmu_ot_byteswap[bswap].ob_name);
1552 } else {
1553 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1554 sizeof (type));
1555 }
1556 if (!BP_IS_EMBEDDED(bp)) {
1557 checksum =
1558 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1559 }
1560 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1561 }
1562
1563 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1564 compress);
1565 }
1566
1567 void
spa_freeze(spa_t * spa)1568 spa_freeze(spa_t *spa)
1569 {
1570 uint64_t freeze_txg = 0;
1571
1572 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1573 if (spa->spa_freeze_txg == UINT64_MAX) {
1574 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1575 spa->spa_freeze_txg = freeze_txg;
1576 }
1577 spa_config_exit(spa, SCL_ALL, FTAG);
1578 if (freeze_txg != 0)
1579 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1580 }
1581
1582 void
zfs_panic_recover(const char * fmt,...)1583 zfs_panic_recover(const char *fmt, ...)
1584 {
1585 va_list adx;
1586
1587 va_start(adx, fmt);
1588 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1589 va_end(adx);
1590 }
1591
1592 /*
1593 * This is a stripped-down version of strtoull, suitable only for converting
1594 * lowercase hexadecimal numbers that don't overflow.
1595 */
1596 uint64_t
zfs_strtonum(const char * str,char ** nptr)1597 zfs_strtonum(const char *str, char **nptr)
1598 {
1599 uint64_t val = 0;
1600 char c;
1601 int digit;
1602
1603 while ((c = *str) != '\0') {
1604 if (c >= '0' && c <= '9')
1605 digit = c - '0';
1606 else if (c >= 'a' && c <= 'f')
1607 digit = 10 + c - 'a';
1608 else
1609 break;
1610
1611 val *= 16;
1612 val += digit;
1613
1614 str++;
1615 }
1616
1617 if (nptr)
1618 *nptr = (char *)str;
1619
1620 return (val);
1621 }
1622
1623 void
spa_activate_allocation_classes(spa_t * spa,dmu_tx_t * tx)1624 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
1625 {
1626 /*
1627 * We bump the feature refcount for each special vdev added to the pool
1628 */
1629 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
1630 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
1631 }
1632
1633 /*
1634 * ==========================================================================
1635 * Accessor functions
1636 * ==========================================================================
1637 */
1638
1639 boolean_t
spa_shutting_down(spa_t * spa)1640 spa_shutting_down(spa_t *spa)
1641 {
1642 return (spa->spa_async_suspended);
1643 }
1644
1645 dsl_pool_t *
spa_get_dsl(spa_t * spa)1646 spa_get_dsl(spa_t *spa)
1647 {
1648 return (spa->spa_dsl_pool);
1649 }
1650
1651 boolean_t
spa_is_initializing(spa_t * spa)1652 spa_is_initializing(spa_t *spa)
1653 {
1654 return (spa->spa_is_initializing);
1655 }
1656
1657 boolean_t
spa_indirect_vdevs_loaded(spa_t * spa)1658 spa_indirect_vdevs_loaded(spa_t *spa)
1659 {
1660 return (spa->spa_indirect_vdevs_loaded);
1661 }
1662
1663 blkptr_t *
spa_get_rootblkptr(spa_t * spa)1664 spa_get_rootblkptr(spa_t *spa)
1665 {
1666 return (&spa->spa_ubsync.ub_rootbp);
1667 }
1668
1669 void
spa_set_rootblkptr(spa_t * spa,const blkptr_t * bp)1670 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1671 {
1672 spa->spa_uberblock.ub_rootbp = *bp;
1673 }
1674
1675 void
spa_altroot(spa_t * spa,char * buf,size_t buflen)1676 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1677 {
1678 if (spa->spa_root == NULL)
1679 buf[0] = '\0';
1680 else
1681 (void) strncpy(buf, spa->spa_root, buflen);
1682 }
1683
1684 int
spa_sync_pass(spa_t * spa)1685 spa_sync_pass(spa_t *spa)
1686 {
1687 return (spa->spa_sync_pass);
1688 }
1689
1690 char *
spa_name(spa_t * spa)1691 spa_name(spa_t *spa)
1692 {
1693 return (spa->spa_name);
1694 }
1695
1696 uint64_t
spa_guid(spa_t * spa)1697 spa_guid(spa_t *spa)
1698 {
1699 dsl_pool_t *dp = spa_get_dsl(spa);
1700 uint64_t guid;
1701
1702 /*
1703 * If we fail to parse the config during spa_load(), we can go through
1704 * the error path (which posts an ereport) and end up here with no root
1705 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
1706 * this case.
1707 */
1708 if (spa->spa_root_vdev == NULL)
1709 return (spa->spa_config_guid);
1710
1711 guid = spa->spa_last_synced_guid != 0 ?
1712 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1713
1714 /*
1715 * Return the most recently synced out guid unless we're
1716 * in syncing context.
1717 */
1718 if (dp && dsl_pool_sync_context(dp))
1719 return (spa->spa_root_vdev->vdev_guid);
1720 else
1721 return (guid);
1722 }
1723
1724 uint64_t
spa_load_guid(spa_t * spa)1725 spa_load_guid(spa_t *spa)
1726 {
1727 /*
1728 * This is a GUID that exists solely as a reference for the
1729 * purposes of the arc. It is generated at load time, and
1730 * is never written to persistent storage.
1731 */
1732 return (spa->spa_load_guid);
1733 }
1734
1735 uint64_t
spa_last_synced_txg(spa_t * spa)1736 spa_last_synced_txg(spa_t *spa)
1737 {
1738 return (spa->spa_ubsync.ub_txg);
1739 }
1740
1741 uint64_t
spa_first_txg(spa_t * spa)1742 spa_first_txg(spa_t *spa)
1743 {
1744 return (spa->spa_first_txg);
1745 }
1746
1747 uint64_t
spa_syncing_txg(spa_t * spa)1748 spa_syncing_txg(spa_t *spa)
1749 {
1750 return (spa->spa_syncing_txg);
1751 }
1752
1753 /*
1754 * Return the last txg where data can be dirtied. The final txgs
1755 * will be used to just clear out any deferred frees that remain.
1756 */
1757 uint64_t
spa_final_dirty_txg(spa_t * spa)1758 spa_final_dirty_txg(spa_t *spa)
1759 {
1760 return (spa->spa_final_txg - TXG_DEFER_SIZE);
1761 }
1762
1763 pool_state_t
spa_state(spa_t * spa)1764 spa_state(spa_t *spa)
1765 {
1766 return (spa->spa_state);
1767 }
1768
1769 spa_load_state_t
spa_load_state(spa_t * spa)1770 spa_load_state(spa_t *spa)
1771 {
1772 return (spa->spa_load_state);
1773 }
1774
1775 uint64_t
spa_freeze_txg(spa_t * spa)1776 spa_freeze_txg(spa_t *spa)
1777 {
1778 return (spa->spa_freeze_txg);
1779 }
1780
1781 /* ARGSUSED */
1782 uint64_t
spa_get_worst_case_asize(spa_t * spa,uint64_t lsize)1783 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1784 {
1785 return (lsize * spa_asize_inflation);
1786 }
1787
1788 /*
1789 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%),
1790 * or at least 128MB, unless that would cause it to be more than half the
1791 * pool size.
1792 *
1793 * See the comment above spa_slop_shift for details.
1794 */
1795 uint64_t
spa_get_slop_space(spa_t * spa)1796 spa_get_slop_space(spa_t *spa)
1797 {
1798 uint64_t space = spa_get_dspace(spa);
1799 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop)));
1800 }
1801
1802 uint64_t
spa_get_dspace(spa_t * spa)1803 spa_get_dspace(spa_t *spa)
1804 {
1805 return (spa->spa_dspace);
1806 }
1807
1808 uint64_t
spa_get_checkpoint_space(spa_t * spa)1809 spa_get_checkpoint_space(spa_t *spa)
1810 {
1811 return (spa->spa_checkpoint_info.sci_dspace);
1812 }
1813
1814 void
spa_update_dspace(spa_t * spa)1815 spa_update_dspace(spa_t *spa)
1816 {
1817 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1818 ddt_get_dedup_dspace(spa);
1819 if (spa->spa_vdev_removal != NULL) {
1820 /*
1821 * We can't allocate from the removing device, so
1822 * subtract its size. This prevents the DMU/DSL from
1823 * filling up the (now smaller) pool while we are in the
1824 * middle of removing the device.
1825 *
1826 * Note that the DMU/DSL doesn't actually know or care
1827 * how much space is allocated (it does its own tracking
1828 * of how much space has been logically used). So it
1829 * doesn't matter that the data we are moving may be
1830 * allocated twice (on the old device and the new
1831 * device).
1832 */
1833 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1834 vdev_t *vd =
1835 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
1836 spa->spa_dspace -= spa_deflate(spa) ?
1837 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1838 spa_config_exit(spa, SCL_VDEV, FTAG);
1839 }
1840 }
1841
1842 /*
1843 * Return the failure mode that has been set to this pool. The default
1844 * behavior will be to block all I/Os when a complete failure occurs.
1845 */
1846 uint8_t
spa_get_failmode(spa_t * spa)1847 spa_get_failmode(spa_t *spa)
1848 {
1849 return (spa->spa_failmode);
1850 }
1851
1852 boolean_t
spa_suspended(spa_t * spa)1853 spa_suspended(spa_t *spa)
1854 {
1855 return (spa->spa_suspended != ZIO_SUSPEND_NONE);
1856 }
1857
1858 uint64_t
spa_version(spa_t * spa)1859 spa_version(spa_t *spa)
1860 {
1861 return (spa->spa_ubsync.ub_version);
1862 }
1863
1864 boolean_t
spa_deflate(spa_t * spa)1865 spa_deflate(spa_t *spa)
1866 {
1867 return (spa->spa_deflate);
1868 }
1869
1870 metaslab_class_t *
spa_normal_class(spa_t * spa)1871 spa_normal_class(spa_t *spa)
1872 {
1873 return (spa->spa_normal_class);
1874 }
1875
1876 metaslab_class_t *
spa_log_class(spa_t * spa)1877 spa_log_class(spa_t *spa)
1878 {
1879 return (spa->spa_log_class);
1880 }
1881
1882 metaslab_class_t *
spa_special_class(spa_t * spa)1883 spa_special_class(spa_t *spa)
1884 {
1885 return (spa->spa_special_class);
1886 }
1887
1888 metaslab_class_t *
spa_dedup_class(spa_t * spa)1889 spa_dedup_class(spa_t *spa)
1890 {
1891 return (spa->spa_dedup_class);
1892 }
1893
1894 /*
1895 * Locate an appropriate allocation class
1896 */
1897 metaslab_class_t *
spa_preferred_class(spa_t * spa,uint64_t size,dmu_object_type_t objtype,uint_t level,uint_t special_smallblk)1898 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
1899 uint_t level, uint_t special_smallblk)
1900 {
1901 if (DMU_OT_IS_ZIL(objtype)) {
1902 if (spa->spa_log_class->mc_groups != 0)
1903 return (spa_log_class(spa));
1904 else
1905 return (spa_normal_class(spa));
1906 }
1907
1908 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
1909
1910 if (DMU_OT_IS_DDT(objtype)) {
1911 if (spa->spa_dedup_class->mc_groups != 0)
1912 return (spa_dedup_class(spa));
1913 else if (has_special_class && zfs_ddt_data_is_special)
1914 return (spa_special_class(spa));
1915 else
1916 return (spa_normal_class(spa));
1917 }
1918
1919 /* Indirect blocks for user data can land in special if allowed */
1920 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
1921 if (has_special_class && zfs_user_indirect_is_special)
1922 return (spa_special_class(spa));
1923 else
1924 return (spa_normal_class(spa));
1925 }
1926
1927 if (DMU_OT_IS_METADATA(objtype) || level > 0) {
1928 if (has_special_class)
1929 return (spa_special_class(spa));
1930 else
1931 return (spa_normal_class(spa));
1932 }
1933
1934 /*
1935 * Allow small file blocks in special class in some cases (like
1936 * for the dRAID vdev feature). But always leave a reserve of
1937 * zfs_special_class_metadata_reserve_pct exclusively for metadata.
1938 */
1939 if (DMU_OT_IS_FILE(objtype) &&
1940 has_special_class && size <= special_smallblk) {
1941 metaslab_class_t *special = spa_special_class(spa);
1942 uint64_t alloc = metaslab_class_get_alloc(special);
1943 uint64_t space = metaslab_class_get_space(special);
1944 uint64_t limit =
1945 (space * (100 - zfs_special_class_metadata_reserve_pct))
1946 / 100;
1947
1948 if (alloc < limit)
1949 return (special);
1950 }
1951
1952 return (spa_normal_class(spa));
1953 }
1954
1955 void
spa_evicting_os_register(spa_t * spa,objset_t * os)1956 spa_evicting_os_register(spa_t *spa, objset_t *os)
1957 {
1958 mutex_enter(&spa->spa_evicting_os_lock);
1959 list_insert_head(&spa->spa_evicting_os_list, os);
1960 mutex_exit(&spa->spa_evicting_os_lock);
1961 }
1962
1963 void
spa_evicting_os_deregister(spa_t * spa,objset_t * os)1964 spa_evicting_os_deregister(spa_t *spa, objset_t *os)
1965 {
1966 mutex_enter(&spa->spa_evicting_os_lock);
1967 list_remove(&spa->spa_evicting_os_list, os);
1968 cv_broadcast(&spa->spa_evicting_os_cv);
1969 mutex_exit(&spa->spa_evicting_os_lock);
1970 }
1971
1972 void
spa_evicting_os_wait(spa_t * spa)1973 spa_evicting_os_wait(spa_t *spa)
1974 {
1975 mutex_enter(&spa->spa_evicting_os_lock);
1976 while (!list_is_empty(&spa->spa_evicting_os_list))
1977 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
1978 mutex_exit(&spa->spa_evicting_os_lock);
1979
1980 dmu_buf_user_evict_wait();
1981 }
1982
1983 int
spa_max_replication(spa_t * spa)1984 spa_max_replication(spa_t *spa)
1985 {
1986 /*
1987 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1988 * handle BPs with more than one DVA allocated. Set our max
1989 * replication level accordingly.
1990 */
1991 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1992 return (1);
1993 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1994 }
1995
1996 int
spa_prev_software_version(spa_t * spa)1997 spa_prev_software_version(spa_t *spa)
1998 {
1999 return (spa->spa_prev_software_version);
2000 }
2001
2002 uint64_t
spa_deadman_synctime(spa_t * spa)2003 spa_deadman_synctime(spa_t *spa)
2004 {
2005 return (spa->spa_deadman_synctime);
2006 }
2007
2008 spa_autotrim_t
spa_get_autotrim(spa_t * spa)2009 spa_get_autotrim(spa_t *spa)
2010 {
2011 return (spa->spa_autotrim);
2012 }
2013
2014 uint64_t
dva_get_dsize_sync(spa_t * spa,const dva_t * dva)2015 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
2016 {
2017 uint64_t asize = DVA_GET_ASIZE(dva);
2018 uint64_t dsize = asize;
2019
2020 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2021
2022 if (asize != 0 && spa->spa_deflate) {
2023 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
2024 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
2025 }
2026
2027 return (dsize);
2028 }
2029
2030 uint64_t
bp_get_dsize_sync(spa_t * spa,const blkptr_t * bp)2031 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
2032 {
2033 uint64_t dsize = 0;
2034
2035 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2036 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2037
2038 return (dsize);
2039 }
2040
2041 uint64_t
bp_get_dsize(spa_t * spa,const blkptr_t * bp)2042 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
2043 {
2044 uint64_t dsize = 0;
2045
2046 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2047
2048 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2049 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2050
2051 spa_config_exit(spa, SCL_VDEV, FTAG);
2052
2053 return (dsize);
2054 }
2055
2056 uint64_t
spa_dirty_data(spa_t * spa)2057 spa_dirty_data(spa_t *spa)
2058 {
2059 return (spa->spa_dsl_pool->dp_dirty_total);
2060 }
2061
2062 /*
2063 * ==========================================================================
2064 * SPA Import Progress Routines
2065 * The illumos implementation of these are different from OpenZFS. OpenZFS
2066 * uses the Linux /proc fs, whereas we use a kstat on the spa.
2067 * ==========================================================================
2068 */
2069
2070 typedef struct spa_import_progress {
2071 kstat_named_t sip_load_state;
2072 kstat_named_t sip_mmp_sec_remaining; /* MMP activity check */
2073 kstat_named_t sip_load_max_txg; /* rewind txg */
2074 } spa_import_progress_t;
2075
2076 static void
spa_import_progress_init(void)2077 spa_import_progress_init(void)
2078 {
2079 }
2080
2081 static void
spa_import_progress_destroy(void)2082 spa_import_progress_destroy(void)
2083 {
2084 }
2085
2086 void spa_import_progress_add(spa_t *);
2087
2088 int
spa_import_progress_set_state(spa_t * spa,spa_load_state_t load_state)2089 spa_import_progress_set_state(spa_t *spa, spa_load_state_t load_state)
2090 {
2091 if (spa->spa_imp_kstat == NULL)
2092 spa_import_progress_add(spa);
2093
2094 mutex_enter(&spa->spa_imp_kstat_lock);
2095 if (spa->spa_imp_kstat != NULL) {
2096 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data;
2097 if (sip != NULL)
2098 sip->sip_load_state.value.ui64 = (uint64_t)load_state;
2099 }
2100 mutex_exit(&spa->spa_imp_kstat_lock);
2101
2102 return (0);
2103 }
2104
2105 int
spa_import_progress_set_max_txg(spa_t * spa,uint64_t load_max_txg)2106 spa_import_progress_set_max_txg(spa_t *spa, uint64_t load_max_txg)
2107 {
2108 if (spa->spa_imp_kstat == NULL)
2109 spa_import_progress_add(spa);
2110
2111 mutex_enter(&spa->spa_imp_kstat_lock);
2112 if (spa->spa_imp_kstat != NULL) {
2113 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data;
2114 if (sip != NULL)
2115 sip->sip_load_max_txg.value.ui64 = load_max_txg;
2116 }
2117 mutex_exit(&spa->spa_imp_kstat_lock);
2118
2119 return (0);
2120 }
2121
2122 int
spa_import_progress_set_mmp_check(spa_t * spa,uint64_t mmp_sec_remaining)2123 spa_import_progress_set_mmp_check(spa_t *spa, uint64_t mmp_sec_remaining)
2124 {
2125 if (spa->spa_imp_kstat == NULL)
2126 spa_import_progress_add(spa);
2127
2128 mutex_enter(&spa->spa_imp_kstat_lock);
2129 if (spa->spa_imp_kstat != NULL) {
2130 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data;
2131 if (sip != NULL)
2132 sip->sip_mmp_sec_remaining.value.ui64 =
2133 mmp_sec_remaining;
2134 }
2135 mutex_exit(&spa->spa_imp_kstat_lock);
2136
2137 return (0);
2138 }
2139
2140 /*
2141 * A new import is in progress. Add an entry.
2142 */
2143 void
spa_import_progress_add(spa_t * spa)2144 spa_import_progress_add(spa_t *spa)
2145 {
2146 char *poolname = NULL;
2147 spa_import_progress_t *sip;
2148
2149 mutex_enter(&spa->spa_imp_kstat_lock);
2150 if (spa->spa_imp_kstat != NULL) {
2151 sip = spa->spa_imp_kstat->ks_data;
2152 sip->sip_load_state.value.ui64 = (uint64_t)spa_load_state(spa);
2153 mutex_exit(&spa->spa_imp_kstat_lock);
2154 return;
2155 }
2156
2157 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
2158 &poolname);
2159 if (poolname == NULL)
2160 poolname = spa_name(spa);
2161
2162 spa->spa_imp_kstat = kstat_create("zfs_import", 0, poolname,
2163 "zfs_misc", KSTAT_TYPE_NAMED,
2164 sizeof (spa_import_progress_t) / sizeof (kstat_named_t),
2165 KSTAT_FLAG_VIRTUAL);
2166 if (spa->spa_imp_kstat != NULL) {
2167 sip = kmem_alloc(sizeof (spa_import_progress_t), KM_SLEEP);
2168 spa->spa_imp_kstat->ks_data = sip;
2169
2170 sip->sip_load_state.value.ui64 = (uint64_t)spa_load_state(spa);
2171
2172 kstat_named_init(&sip->sip_load_state,
2173 "spa_load_state", KSTAT_DATA_UINT64);
2174 kstat_named_init(&sip->sip_mmp_sec_remaining,
2175 "mmp_sec_remaining", KSTAT_DATA_UINT64);
2176 kstat_named_init(&sip->sip_load_max_txg,
2177 "spa_load_max_txg", KSTAT_DATA_UINT64);
2178 spa->spa_imp_kstat->ks_lock = &spa->spa_imp_kstat_lock;
2179 kstat_install(spa->spa_imp_kstat);
2180 }
2181 mutex_exit(&spa->spa_imp_kstat_lock);
2182 }
2183
2184 void
spa_import_progress_remove(spa_t * spa)2185 spa_import_progress_remove(spa_t *spa)
2186 {
2187 if (spa->spa_imp_kstat != NULL) {
2188 void *data = spa->spa_imp_kstat->ks_data;
2189
2190 kstat_delete(spa->spa_imp_kstat);
2191 spa->spa_imp_kstat = NULL;
2192 kmem_free(data, sizeof (spa_import_progress_t));
2193 }
2194 }
2195
2196 /*
2197 * ==========================================================================
2198 * Initialization and Termination
2199 * ==========================================================================
2200 */
2201
2202 static int
spa_name_compare(const void * a1,const void * a2)2203 spa_name_compare(const void *a1, const void *a2)
2204 {
2205 const spa_t *s1 = a1;
2206 const spa_t *s2 = a2;
2207 int s;
2208
2209 s = strcmp(s1->spa_name, s2->spa_name);
2210
2211 return (TREE_ISIGN(s));
2212 }
2213
2214 int
spa_busy(void)2215 spa_busy(void)
2216 {
2217 return (spa_active_count);
2218 }
2219
2220 void
spa_boot_init()2221 spa_boot_init()
2222 {
2223 spa_config_load();
2224 }
2225
2226 void
spa_init(int mode)2227 spa_init(int mode)
2228 {
2229 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
2230 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
2231 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
2232 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
2233
2234 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
2235 offsetof(spa_t, spa_avl));
2236
2237 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
2238 offsetof(spa_aux_t, aux_avl));
2239
2240 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
2241 offsetof(spa_aux_t, aux_avl));
2242
2243 spa_mode_global = mode;
2244
2245 #ifdef _KERNEL
2246 spa_arch_init();
2247 #else
2248 if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
2249 arc_procfd = open("/proc/self/ctl", O_WRONLY);
2250 if (arc_procfd == -1) {
2251 perror("could not enable watchpoints: "
2252 "opening /proc/self/ctl failed: ");
2253 } else {
2254 arc_watch = B_TRUE;
2255 }
2256 }
2257 #endif
2258
2259 zfs_refcount_init();
2260 unique_init();
2261 zfs_btree_init();
2262 metaslab_stat_init();
2263 zio_init();
2264 dmu_init();
2265 zil_init();
2266 vdev_cache_stat_init();
2267 vdev_mirror_stat_init();
2268 vdev_raidz_math_init();
2269 zfs_prop_init();
2270 zpool_prop_init();
2271 zpool_feature_init();
2272 spa_config_load();
2273 l2arc_start();
2274 scan_init();
2275 spa_import_progress_init();
2276 }
2277
2278 void
spa_fini(void)2279 spa_fini(void)
2280 {
2281 l2arc_stop();
2282
2283 spa_evict_all();
2284
2285 vdev_cache_stat_fini();
2286 vdev_mirror_stat_fini();
2287 vdev_raidz_math_fini();
2288 zil_fini();
2289 dmu_fini();
2290 zio_fini();
2291 metaslab_stat_fini();
2292 zfs_btree_fini();
2293 unique_fini();
2294 zfs_refcount_fini();
2295 scan_fini();
2296 spa_import_progress_destroy();
2297
2298 avl_destroy(&spa_namespace_avl);
2299 avl_destroy(&spa_spare_avl);
2300 avl_destroy(&spa_l2cache_avl);
2301
2302 cv_destroy(&spa_namespace_cv);
2303 mutex_destroy(&spa_namespace_lock);
2304 mutex_destroy(&spa_spare_lock);
2305 mutex_destroy(&spa_l2cache_lock);
2306 }
2307
2308 /*
2309 * Return whether this pool has slogs. No locking needed.
2310 * It's not a problem if the wrong answer is returned as it's only for
2311 * performance and not correctness
2312 */
2313 boolean_t
spa_has_slogs(spa_t * spa)2314 spa_has_slogs(spa_t *spa)
2315 {
2316 return (spa->spa_log_class->mc_rotor != NULL);
2317 }
2318
2319 spa_log_state_t
spa_get_log_state(spa_t * spa)2320 spa_get_log_state(spa_t *spa)
2321 {
2322 return (spa->spa_log_state);
2323 }
2324
2325 void
spa_set_log_state(spa_t * spa,spa_log_state_t state)2326 spa_set_log_state(spa_t *spa, spa_log_state_t state)
2327 {
2328 spa->spa_log_state = state;
2329 }
2330
2331 boolean_t
spa_is_root(spa_t * spa)2332 spa_is_root(spa_t *spa)
2333 {
2334 return (spa->spa_is_root);
2335 }
2336
2337 boolean_t
spa_writeable(spa_t * spa)2338 spa_writeable(spa_t *spa)
2339 {
2340 return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config);
2341 }
2342
2343 /*
2344 * Returns true if there is a pending sync task in any of the current
2345 * syncing txg, the current quiescing txg, or the current open txg.
2346 */
2347 boolean_t
spa_has_pending_synctask(spa_t * spa)2348 spa_has_pending_synctask(spa_t *spa)
2349 {
2350 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2351 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2352 }
2353
2354 int
spa_mode(spa_t * spa)2355 spa_mode(spa_t *spa)
2356 {
2357 return (spa->spa_mode);
2358 }
2359
2360 uint64_t
spa_bootfs(spa_t * spa)2361 spa_bootfs(spa_t *spa)
2362 {
2363 return (spa->spa_bootfs);
2364 }
2365
2366 uint64_t
spa_delegation(spa_t * spa)2367 spa_delegation(spa_t *spa)
2368 {
2369 return (spa->spa_delegation);
2370 }
2371
2372 objset_t *
spa_meta_objset(spa_t * spa)2373 spa_meta_objset(spa_t *spa)
2374 {
2375 return (spa->spa_meta_objset);
2376 }
2377
2378 enum zio_checksum
spa_dedup_checksum(spa_t * spa)2379 spa_dedup_checksum(spa_t *spa)
2380 {
2381 return (spa->spa_dedup_checksum);
2382 }
2383
2384 /*
2385 * Reset pool scan stat per scan pass (or reboot).
2386 */
2387 void
spa_scan_stat_init(spa_t * spa)2388 spa_scan_stat_init(spa_t *spa)
2389 {
2390 /* data not stored on disk */
2391 spa->spa_scan_pass_start = gethrestime_sec();
2392 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2393 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2394 else
2395 spa->spa_scan_pass_scrub_pause = 0;
2396 spa->spa_scan_pass_scrub_spent_paused = 0;
2397 spa->spa_scan_pass_exam = 0;
2398 spa->spa_scan_pass_issued = 0;
2399 vdev_scan_stat_init(spa->spa_root_vdev);
2400 }
2401
2402 /*
2403 * Get scan stats for zpool status reports
2404 */
2405 int
spa_scan_get_stats(spa_t * spa,pool_scan_stat_t * ps)2406 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2407 {
2408 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2409
2410 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2411 return (SET_ERROR(ENOENT));
2412 bzero(ps, sizeof (pool_scan_stat_t));
2413
2414 /* data stored on disk */
2415 ps->pss_func = scn->scn_phys.scn_func;
2416 ps->pss_state = scn->scn_phys.scn_state;
2417 ps->pss_start_time = scn->scn_phys.scn_start_time;
2418 ps->pss_end_time = scn->scn_phys.scn_end_time;
2419 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2420 ps->pss_to_process = scn->scn_phys.scn_to_process;
2421 ps->pss_processed = scn->scn_phys.scn_processed;
2422 ps->pss_errors = scn->scn_phys.scn_errors;
2423 ps->pss_examined = scn->scn_phys.scn_examined;
2424 ps->pss_issued =
2425 scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
2426 ps->pss_state = scn->scn_phys.scn_state;
2427
2428 /* data not stored on disk */
2429 ps->pss_pass_start = spa->spa_scan_pass_start;
2430 ps->pss_pass_exam = spa->spa_scan_pass_exam;
2431 ps->pss_pass_issued = spa->spa_scan_pass_issued;
2432 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2433 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2434
2435 return (0);
2436 }
2437
2438 int
spa_maxblocksize(spa_t * spa)2439 spa_maxblocksize(spa_t *spa)
2440 {
2441 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2442 return (SPA_MAXBLOCKSIZE);
2443 else
2444 return (SPA_OLD_MAXBLOCKSIZE);
2445 }
2446
2447 int
spa_maxdnodesize(spa_t * spa)2448 spa_maxdnodesize(spa_t *spa)
2449 {
2450 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
2451 return (DNODE_MAX_SIZE);
2452 else
2453 return (DNODE_MIN_SIZE);
2454 }
2455
2456 boolean_t
spa_multihost(spa_t * spa)2457 spa_multihost(spa_t *spa)
2458 {
2459 return (spa->spa_multihost ? B_TRUE : B_FALSE);
2460 }
2461
2462 unsigned long
spa_get_hostid(void)2463 spa_get_hostid(void)
2464 {
2465 unsigned long myhostid;
2466
2467 #ifdef _KERNEL
2468 myhostid = zone_get_hostid(NULL);
2469 #else /* _KERNEL */
2470 /*
2471 * We're emulating the system's hostid in userland, so
2472 * we can't use zone_get_hostid().
2473 */
2474 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2475 #endif /* _KERNEL */
2476
2477 return (myhostid);
2478 }
2479
2480 /*
2481 * Returns the txg that the last device removal completed. No indirect mappings
2482 * have been added since this txg.
2483 */
2484 uint64_t
spa_get_last_removal_txg(spa_t * spa)2485 spa_get_last_removal_txg(spa_t *spa)
2486 {
2487 uint64_t vdevid;
2488 uint64_t ret = -1ULL;
2489
2490 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2491 /*
2492 * sr_prev_indirect_vdev is only modified while holding all the
2493 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2494 * examining it.
2495 */
2496 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2497
2498 while (vdevid != -1ULL) {
2499 vdev_t *vd = vdev_lookup_top(spa, vdevid);
2500 vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2501
2502 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2503
2504 /*
2505 * If the removal did not remap any data, we don't care.
2506 */
2507 if (vdev_indirect_births_count(vib) != 0) {
2508 ret = vdev_indirect_births_last_entry_txg(vib);
2509 break;
2510 }
2511
2512 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2513 }
2514 spa_config_exit(spa, SCL_VDEV, FTAG);
2515
2516 IMPLY(ret != -1ULL,
2517 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2518
2519 return (ret);
2520 }
2521
2522 boolean_t
spa_trust_config(spa_t * spa)2523 spa_trust_config(spa_t *spa)
2524 {
2525 return (spa->spa_trust_config);
2526 }
2527
2528 uint64_t
spa_missing_tvds_allowed(spa_t * spa)2529 spa_missing_tvds_allowed(spa_t *spa)
2530 {
2531 return (spa->spa_missing_tvds_allowed);
2532 }
2533
2534 space_map_t *
spa_syncing_log_sm(spa_t * spa)2535 spa_syncing_log_sm(spa_t *spa)
2536 {
2537 return (spa->spa_syncing_log_sm);
2538 }
2539
2540 void
spa_set_missing_tvds(spa_t * spa,uint64_t missing)2541 spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2542 {
2543 spa->spa_missing_tvds = missing;
2544 }
2545
2546 boolean_t
spa_top_vdevs_spacemap_addressable(spa_t * spa)2547 spa_top_vdevs_spacemap_addressable(spa_t *spa)
2548 {
2549 vdev_t *rvd = spa->spa_root_vdev;
2550 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2551 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2552 return (B_FALSE);
2553 }
2554 return (B_TRUE);
2555 }
2556
2557 boolean_t
spa_has_checkpoint(spa_t * spa)2558 spa_has_checkpoint(spa_t *spa)
2559 {
2560 return (spa->spa_checkpoint_txg != 0);
2561 }
2562
2563 boolean_t
spa_importing_readonly_checkpoint(spa_t * spa)2564 spa_importing_readonly_checkpoint(spa_t *spa)
2565 {
2566 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
2567 spa->spa_mode == FREAD);
2568 }
2569
2570 uint64_t
spa_min_claim_txg(spa_t * spa)2571 spa_min_claim_txg(spa_t *spa)
2572 {
2573 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2574
2575 if (checkpoint_txg != 0)
2576 return (checkpoint_txg + 1);
2577
2578 return (spa->spa_first_txg);
2579 }
2580
2581 /*
2582 * If there is a checkpoint, async destroys may consume more space from
2583 * the pool instead of freeing it. In an attempt to save the pool from
2584 * getting suspended when it is about to run out of space, we stop
2585 * processing async destroys.
2586 */
2587 boolean_t
spa_suspend_async_destroy(spa_t * spa)2588 spa_suspend_async_destroy(spa_t *spa)
2589 {
2590 dsl_pool_t *dp = spa_get_dsl(spa);
2591
2592 uint64_t unreserved = dsl_pool_unreserved_space(dp,
2593 ZFS_SPACE_CHECK_EXTRA_RESERVED);
2594 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
2595 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
2596
2597 if (spa_has_checkpoint(spa) && avail == 0)
2598 return (B_TRUE);
2599
2600 return (B_FALSE);
2601 }
2602
2603 /*
2604 * Generate a LUN expansion event. This routine does not use
2605 * ddi_log_sysevent() because that would require a dev_info_t, and we may not
2606 * have one available.
2607 */
2608 void
zfs_post_dle_sysevent(const char * physpath)2609 zfs_post_dle_sysevent(const char *physpath)
2610 {
2611 #ifdef _KERNEL
2612 sysevent_t *ev = sysevent_alloc(EC_DEV_STATUS, ESC_DEV_DLE,
2613 SUNW_KERN_PUB "zfs", SE_SLEEP);
2614 sysevent_attr_list_t *attr = NULL;
2615 sysevent_id_t eid;
2616
2617 VERIFY(ev != NULL);
2618
2619 /*
2620 * The only attribute is the /devices path of the expanding device:
2621 */
2622 sysevent_value_t value = {
2623 .value_type = SE_DATA_TYPE_STRING,
2624 .value = {
2625 .sv_string = (char *)physpath,
2626 },
2627 };
2628 if (sysevent_add_attr(&attr, DEV_PHYS_PATH, &value, SE_SLEEP) != 0) {
2629 sysevent_free(ev);
2630 return;
2631 }
2632
2633 if (sysevent_attach_attributes(ev, attr) != 0) {
2634 sysevent_free_attr(attr);
2635 sysevent_free(ev);
2636 return;
2637 }
2638
2639 (void) log_sysevent(ev, SE_SLEEP, &eid);
2640 sysevent_free(ev);
2641 #endif
2642 }
2643