Lines Matching refs:spa

161 static boolean_t spa_has_active_shared_spare(spa_t *spa);
162 static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
163 static void spa_vdev_resilver_done(spa_t *spa);
278 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) in spa_prop_get_config() argument
280 vdev_t *rvd = spa->spa_root_vdev; in spa_prop_get_config()
281 dsl_pool_t *pool = spa->spa_dsl_pool; in spa_prop_get_config()
285 metaslab_class_t *mc = spa_normal_class(spa); in spa_prop_get_config()
287 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); in spa_prop_get_config()
291 alloc += metaslab_class_get_alloc(spa_special_class(spa)); in spa_prop_get_config()
292 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); in spa_prop_get_config()
295 size += metaslab_class_get_space(spa_special_class(spa)); in spa_prop_get_config()
296 size += metaslab_class_get_space(spa_dedup_class(spa)); in spa_prop_get_config()
298 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); in spa_prop_get_config()
304 spa->spa_checkpoint_info.sci_dspace, src); in spa_prop_get_config()
311 (spa_mode(spa) == FREAD), src); in spa_prop_get_config()
317 ddt_get_pool_dedup_ratio(spa), src); in spa_prop_get_config()
322 version = spa_version(spa); in spa_prop_get_config()
354 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); in spa_prop_get_config()
356 if (spa->spa_comment != NULL) { in spa_prop_get_config()
357 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, in spa_prop_get_config()
361 if (spa->spa_root != NULL) in spa_prop_get_config()
362 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, in spa_prop_get_config()
365 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { in spa_prop_get_config()
373 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) { in spa_prop_get_config()
381 if ((dp = list_head(&spa->spa_config_list)) != NULL) { in spa_prop_get_config()
396 spa_prop_get(spa_t *spa, nvlist_t **nvp) in spa_prop_get() argument
398 objset_t *mos = spa->spa_meta_objset; in spa_prop_get()
405 mutex_enter(&spa->spa_props_lock); in spa_prop_get()
410 spa_prop_get_config(spa, nvp); in spa_prop_get()
413 if (mos == NULL || spa->spa_pool_props_object == 0) { in spa_prop_get()
414 mutex_exit(&spa->spa_props_lock); in spa_prop_get()
421 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); in spa_prop_get()
443 dp = spa_get_dsl(spa); in spa_prop_get()
472 err = zap_lookup(mos, spa->spa_pool_props_object, in spa_prop_get()
487 mutex_exit(&spa->spa_props_lock); in spa_prop_get()
503 spa_prop_validate(spa_t *spa, nvlist_t *props) in spa_prop_validate() argument
554 (intval < spa_version(spa) || in spa_prop_validate()
586 if (spa_version(spa) < SPA_VERSION_BOOTFS) { in spa_prop_validate()
594 if (!vdev_is_bootable(spa->spa_root_vdev)) { in spa_prop_validate()
653 if (!error && spa_suspended(spa)) { in spa_prop_validate()
654 spa->spa_failmode = intval; in spa_prop_validate()
702 if (spa_version(spa) < SPA_VERSION_DEDUP) in spa_prop_validate()
730 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) in spa_configfile_set() argument
749 list_insert_head(&spa->spa_config_list, dp); in spa_configfile_set()
751 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); in spa_configfile_set()
755 spa_prop_set(spa_t *spa, nvlist_t *nvp) in spa_prop_set() argument
761 if ((error = spa_prop_validate(spa, nvp)) != 0) in spa_prop_set()
784 if (ver == spa_version(spa)) in spa_prop_set()
793 error = dsl_sync_task(spa->spa_name, NULL, in spa_prop_set()
806 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, in spa_prop_set()
817 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) in spa_prop_clear_bootfs() argument
819 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { in spa_prop_clear_bootfs()
820 VERIFY(zap_remove(spa->spa_meta_objset, in spa_prop_clear_bootfs()
821 spa->spa_pool_props_object, in spa_prop_clear_bootfs()
823 spa->spa_bootfs = 0; in spa_prop_clear_bootfs()
832 spa_t *spa = dmu_tx_pool(tx)->dp_spa; in spa_change_guid_check() local
833 vdev_t *rvd = spa->spa_root_vdev; in spa_change_guid_check()
836 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { in spa_change_guid_check()
837 int error = (spa_has_checkpoint(spa)) ? in spa_change_guid_check()
842 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); in spa_change_guid_check()
844 spa_config_exit(spa, SCL_STATE, FTAG); in spa_change_guid_check()
849 ASSERT3U(spa_guid(spa), !=, *newguid); in spa_change_guid_check()
858 spa_t *spa = dmu_tx_pool(tx)->dp_spa; in spa_change_guid_sync() local
860 vdev_t *rvd = spa->spa_root_vdev; in spa_change_guid_sync()
862 oldguid = spa_guid(spa); in spa_change_guid_sync()
864 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); in spa_change_guid_sync()
868 spa_config_exit(spa, SCL_STATE, FTAG); in spa_change_guid_sync()
870 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", in spa_change_guid_sync()
884 spa_change_guid(spa_t *spa) in spa_change_guid() argument
889 mutex_enter(&spa->spa_vdev_top_lock); in spa_change_guid()
893 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, in spa_change_guid()
897 spa_write_cachefile(spa, B_FALSE, B_TRUE); in spa_change_guid()
898 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID); in spa_change_guid()
902 mutex_exit(&spa->spa_vdev_top_lock); in spa_change_guid()
931 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) in spa_get_errlists() argument
933 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); in spa_get_errlists()
935 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); in spa_get_errlists()
936 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); in spa_get_errlists()
938 avl_create(&spa->spa_errlist_scrub, in spa_get_errlists()
941 avl_create(&spa->spa_errlist_last, in spa_get_errlists()
947 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) in spa_taskqs_init() argument
953 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; in spa_taskqs_init()
999 if (zio_taskq_sysdc && spa->spa_proc != &p0) { in spa_taskqs_init()
1004 spa->spa_proc, zio_taskq_basedc, flags); in spa_taskqs_init()
1016 INT_MAX, spa->spa_proc, flags); in spa_taskqs_init()
1024 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) in spa_taskqs_fini() argument
1026 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; in spa_taskqs_fini()
1049 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, in spa_taskq_dispatch_ent() argument
1052 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; in spa_taskq_dispatch_ent()
1068 spa_create_zio_taskqs(spa_t *spa) in spa_create_zio_taskqs() argument
1072 spa_taskqs_init(spa, t, q); in spa_create_zio_taskqs()
1082 spa_t *spa = arg; in spa_thread() local
1087 spa); in spa_thread()
1089 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, in spa_thread()
1094 "zpool-%s", spa->spa_name); in spa_thread()
1110 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); in spa_thread()
1123 spa->spa_proc = curproc; in spa_thread()
1124 spa->spa_did = curthread->t_did; in spa_thread()
1126 spa_create_zio_taskqs(spa); in spa_thread()
1128 mutex_enter(&spa->spa_proc_lock); in spa_thread()
1129 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); in spa_thread()
1131 spa->spa_proc_state = SPA_PROC_ACTIVE; in spa_thread()
1132 cv_broadcast(&spa->spa_proc_cv); in spa_thread()
1135 while (spa->spa_proc_state == SPA_PROC_ACTIVE) in spa_thread()
1136 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); in spa_thread()
1137 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); in spa_thread()
1139 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); in spa_thread()
1140 spa->spa_proc_state = SPA_PROC_GONE; in spa_thread()
1141 spa->spa_proc = &p0; in spa_thread()
1142 cv_broadcast(&spa->spa_proc_cv); in spa_thread()
1154 spa_activate(spa_t *spa, int mode) in spa_activate() argument
1156 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); in spa_activate()
1158 spa->spa_state = POOL_STATE_ACTIVE; in spa_activate()
1159 spa->spa_mode = mode; in spa_activate()
1161 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); in spa_activate()
1162 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); in spa_activate()
1163 spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops); in spa_activate()
1164 spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops); in spa_activate()
1167 mutex_enter(&spa->spa_proc_lock); in spa_activate()
1168 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); in spa_activate()
1169 ASSERT(spa->spa_proc == &p0); in spa_activate()
1170 spa->spa_did = 0; in spa_activate()
1173 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { in spa_activate()
1174 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, in spa_activate()
1176 spa->spa_proc_state = SPA_PROC_CREATED; in spa_activate()
1177 while (spa->spa_proc_state == SPA_PROC_CREATED) { in spa_activate()
1178 cv_wait(&spa->spa_proc_cv, in spa_activate()
1179 &spa->spa_proc_lock); in spa_activate()
1181 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); in spa_activate()
1182 ASSERT(spa->spa_proc != &p0); in spa_activate()
1183 ASSERT(spa->spa_did != 0); in spa_activate()
1188 spa->spa_name); in spa_activate()
1192 mutex_exit(&spa->spa_proc_lock); in spa_activate()
1195 if (spa->spa_proc == &p0) { in spa_activate()
1196 spa_create_zio_taskqs(spa); in spa_activate()
1200 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, in spa_activate()
1204 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), in spa_activate()
1206 list_create(&spa->spa_evicting_os_list, sizeof (objset_t), in spa_activate()
1208 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), in spa_activate()
1211 txg_list_create(&spa->spa_vdev_txg_list, spa, in spa_activate()
1214 avl_create(&spa->spa_errlist_scrub, in spa_activate()
1217 avl_create(&spa->spa_errlist_last, in spa_activate()
1221 spa_keystore_init(&spa->spa_keystore); in spa_activate()
1227 spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus, in spa_activate()
1235 spa_deactivate(spa_t *spa) in spa_deactivate() argument
1237 ASSERT(spa->spa_sync_on == B_FALSE); in spa_deactivate()
1238 ASSERT(spa->spa_dsl_pool == NULL); in spa_deactivate()
1239 ASSERT(spa->spa_root_vdev == NULL); in spa_deactivate()
1240 ASSERT(spa->spa_async_zio_root == NULL); in spa_deactivate()
1241 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); in spa_deactivate()
1243 spa_evicting_os_wait(spa); in spa_deactivate()
1245 if (spa->spa_upgrade_taskq) { in spa_deactivate()
1246 taskq_destroy(spa->spa_upgrade_taskq); in spa_deactivate()
1247 spa->spa_upgrade_taskq = NULL; in spa_deactivate()
1250 txg_list_destroy(&spa->spa_vdev_txg_list); in spa_deactivate()
1252 list_destroy(&spa->spa_config_dirty_list); in spa_deactivate()
1253 list_destroy(&spa->spa_evicting_os_list); in spa_deactivate()
1254 list_destroy(&spa->spa_state_dirty_list); in spa_deactivate()
1258 spa_taskqs_fini(spa, t, q); in spa_deactivate()
1263 ASSERT3P(spa->spa_txg_zio[i], !=, NULL); in spa_deactivate()
1264 VERIFY0(zio_wait(spa->spa_txg_zio[i])); in spa_deactivate()
1265 spa->spa_txg_zio[i] = NULL; in spa_deactivate()
1268 metaslab_class_destroy(spa->spa_normal_class); in spa_deactivate()
1269 spa->spa_normal_class = NULL; in spa_deactivate()
1271 metaslab_class_destroy(spa->spa_log_class); in spa_deactivate()
1272 spa->spa_log_class = NULL; in spa_deactivate()
1274 metaslab_class_destroy(spa->spa_special_class); in spa_deactivate()
1275 spa->spa_special_class = NULL; in spa_deactivate()
1277 metaslab_class_destroy(spa->spa_dedup_class); in spa_deactivate()
1278 spa->spa_dedup_class = NULL; in spa_deactivate()
1284 spa_errlog_drain(spa); in spa_deactivate()
1285 avl_destroy(&spa->spa_errlist_scrub); in spa_deactivate()
1286 avl_destroy(&spa->spa_errlist_last); in spa_deactivate()
1288 spa_keystore_fini(&spa->spa_keystore); in spa_deactivate()
1290 spa->spa_state = POOL_STATE_UNINITIALIZED; in spa_deactivate()
1292 mutex_enter(&spa->spa_proc_lock); in spa_deactivate()
1293 if (spa->spa_proc_state != SPA_PROC_NONE) { in spa_deactivate()
1294 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); in spa_deactivate()
1295 spa->spa_proc_state = SPA_PROC_DEACTIVATE; in spa_deactivate()
1296 cv_broadcast(&spa->spa_proc_cv); in spa_deactivate()
1297 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { in spa_deactivate()
1298 ASSERT(spa->spa_proc != &p0); in spa_deactivate()
1299 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); in spa_deactivate()
1301 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); in spa_deactivate()
1302 spa->spa_proc_state = SPA_PROC_NONE; in spa_deactivate()
1304 ASSERT(spa->spa_proc == &p0); in spa_deactivate()
1305 mutex_exit(&spa->spa_proc_lock); in spa_deactivate()
1312 if (spa->spa_did != 0) { in spa_deactivate()
1313 thread_join(spa->spa_did); in spa_deactivate()
1314 spa->spa_did = 0; in spa_deactivate()
1325 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, in spa_config_parse() argument
1332 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) in spa_config_parse()
1352 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, in spa_config_parse()
1366 spa_should_flush_logs_on_unload(spa_t *spa) in spa_should_flush_logs_on_unload() argument
1368 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) in spa_should_flush_logs_on_unload()
1371 if (!spa_writeable(spa)) in spa_should_flush_logs_on_unload()
1374 if (!spa->spa_sync_on) in spa_should_flush_logs_on_unload()
1377 if (spa_state(spa) != POOL_STATE_EXPORTED) in spa_should_flush_logs_on_unload()
1391 spa_unload_log_sm_flush_all(spa_t *spa) in spa_unload_log_sm_flush_all() argument
1393 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); in spa_unload_log_sm_flush_all()
1397 ASSERT3U(spa->spa_log_flushall_txg, ==, 0); in spa_unload_log_sm_flush_all()
1398 spa->spa_log_flushall_txg = dmu_tx_get_txg(tx); in spa_unload_log_sm_flush_all()
1401 txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg); in spa_unload_log_sm_flush_all()
1405 spa_unload_log_sm_metadata(spa_t *spa) in spa_unload_log_sm_metadata() argument
1410 while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg, in spa_unload_log_sm_metadata()
1416 for (log_summary_entry_t *e = list_head(&spa->spa_log_summary); in spa_unload_log_sm_metadata()
1417 e != NULL; e = list_head(&spa->spa_log_summary)) { in spa_unload_log_sm_metadata()
1419 list_remove(&spa->spa_log_summary, e); in spa_unload_log_sm_metadata()
1423 spa->spa_unflushed_stats.sus_nblocks = 0; in spa_unload_log_sm_metadata()
1424 spa->spa_unflushed_stats.sus_memused = 0; in spa_unload_log_sm_metadata()
1425 spa->spa_unflushed_stats.sus_blocklimit = 0; in spa_unload_log_sm_metadata()
1432 spa_unload(spa_t *spa) in spa_unload() argument
1435 ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED); in spa_unload()
1437 spa_import_progress_remove(spa); in spa_unload()
1438 spa_load_note(spa, "UNLOADING"); in spa_unload()
1446 if (spa_should_flush_logs_on_unload(spa)) in spa_unload()
1447 spa_unload_log_sm_flush_all(spa); in spa_unload()
1452 spa_async_suspend(spa); in spa_unload()
1454 if (spa->spa_root_vdev) { in spa_unload()
1455 vdev_t *root_vdev = spa->spa_root_vdev; in spa_unload()
1458 vdev_autotrim_stop_all(spa); in spa_unload()
1464 if (spa->spa_sync_on) { in spa_unload()
1465 txg_sync_stop(spa->spa_dsl_pool); in spa_unload()
1466 spa->spa_sync_on = B_FALSE; in spa_unload()
1473 if (spa->spa_root_vdev != NULL) { in spa_unload()
1474 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) { in spa_unload()
1475 vdev_t *vc = spa->spa_root_vdev->vdev_child[c]; in spa_unload()
1481 if (spa->spa_mmp.mmp_thread) in spa_unload()
1482 mmp_thread_stop(spa); in spa_unload()
1487 if (spa->spa_async_zio_root != NULL) { in spa_unload()
1489 (void) zio_wait(spa->spa_async_zio_root[i]); in spa_unload()
1490 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); in spa_unload()
1491 spa->spa_async_zio_root = NULL; in spa_unload()
1494 if (spa->spa_vdev_removal != NULL) { in spa_unload()
1495 spa_vdev_removal_destroy(spa->spa_vdev_removal); in spa_unload()
1496 spa->spa_vdev_removal = NULL; in spa_unload()
1499 if (spa->spa_condense_zthr != NULL) { in spa_unload()
1500 zthr_destroy(spa->spa_condense_zthr); in spa_unload()
1501 spa->spa_condense_zthr = NULL; in spa_unload()
1504 if (spa->spa_checkpoint_discard_zthr != NULL) { in spa_unload()
1505 zthr_destroy(spa->spa_checkpoint_discard_zthr); in spa_unload()
1506 spa->spa_checkpoint_discard_zthr = NULL; in spa_unload()
1509 spa_condense_fini(spa); in spa_unload()
1511 bpobj_close(&spa->spa_deferred_bpobj); in spa_unload()
1513 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); in spa_unload()
1518 if (spa->spa_root_vdev) in spa_unload()
1519 vdev_free(spa->spa_root_vdev); in spa_unload()
1520 ASSERT(spa->spa_root_vdev == NULL); in spa_unload()
1525 if (spa->spa_dsl_pool) { in spa_unload()
1526 dsl_pool_close(spa->spa_dsl_pool); in spa_unload()
1527 spa->spa_dsl_pool = NULL; in spa_unload()
1528 spa->spa_meta_objset = NULL; in spa_unload()
1531 ddt_unload(spa); in spa_unload()
1532 spa_unload_log_sm_metadata(spa); in spa_unload()
1537 spa_l2cache_drop(spa); in spa_unload()
1539 for (int i = 0; i < spa->spa_spares.sav_count; i++) in spa_unload()
1540 vdev_free(spa->spa_spares.sav_vdevs[i]); in spa_unload()
1541 if (spa->spa_spares.sav_vdevs) { in spa_unload()
1542 kmem_free(spa->spa_spares.sav_vdevs, in spa_unload()
1543 spa->spa_spares.sav_count * sizeof (void *)); in spa_unload()
1544 spa->spa_spares.sav_vdevs = NULL; in spa_unload()
1546 if (spa->spa_spares.sav_config) { in spa_unload()
1547 nvlist_free(spa->spa_spares.sav_config); in spa_unload()
1548 spa->spa_spares.sav_config = NULL; in spa_unload()
1550 spa->spa_spares.sav_count = 0; in spa_unload()
1552 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in spa_unload()
1553 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); in spa_unload()
1554 vdev_free(spa->spa_l2cache.sav_vdevs[i]); in spa_unload()
1556 if (spa->spa_l2cache.sav_vdevs) { in spa_unload()
1557 kmem_free(spa->spa_l2cache.sav_vdevs, in spa_unload()
1558 spa->spa_l2cache.sav_count * sizeof (void *)); in spa_unload()
1559 spa->spa_l2cache.sav_vdevs = NULL; in spa_unload()
1561 if (spa->spa_l2cache.sav_config) { in spa_unload()
1562 nvlist_free(spa->spa_l2cache.sav_config); in spa_unload()
1563 spa->spa_l2cache.sav_config = NULL; in spa_unload()
1565 spa->spa_l2cache.sav_count = 0; in spa_unload()
1567 spa->spa_async_suspended = 0; in spa_unload()
1569 spa->spa_indirect_vdevs_loaded = B_FALSE; in spa_unload()
1571 if (spa->spa_comment != NULL) { in spa_unload()
1572 spa_strfree(spa->spa_comment); in spa_unload()
1573 spa->spa_comment = NULL; in spa_unload()
1576 spa_config_exit(spa, SCL_ALL, spa); in spa_unload()
1586 spa_load_spares(spa_t *spa) in spa_load_spares() argument
1601 if (!spa_writeable(spa)) in spa_load_spares()
1605 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); in spa_load_spares()
1610 for (i = 0; i < spa->spa_spares.sav_count; i++) { in spa_load_spares()
1611 vd = spa->spa_spares.sav_vdevs[i]; in spa_load_spares()
1614 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, in spa_load_spares()
1621 if (spa->spa_spares.sav_vdevs) in spa_load_spares()
1622 kmem_free(spa->spa_spares.sav_vdevs, in spa_load_spares()
1623 spa->spa_spares.sav_count * sizeof (void *)); in spa_load_spares()
1625 if (spa->spa_spares.sav_config == NULL) in spa_load_spares()
1628 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, in spa_load_spares()
1631 spa->spa_spares.sav_count = (int)nspares; in spa_load_spares()
1632 spa->spa_spares.sav_vdevs = NULL; in spa_load_spares()
1646 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), in spa_load_spares()
1648 for (i = 0; i < spa->spa_spares.sav_count; i++) { in spa_load_spares()
1649 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, in spa_load_spares()
1653 spa->spa_spares.sav_vdevs[i] = vd; in spa_load_spares()
1655 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, in spa_load_spares()
1678 vd->vdev_aux = &spa->spa_spares; in spa_load_spares()
1691 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, in spa_load_spares()
1694 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), in spa_load_spares()
1696 for (i = 0; i < spa->spa_spares.sav_count; i++) in spa_load_spares()
1697 spares[i] = vdev_config_generate(spa, in spa_load_spares()
1698 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); in spa_load_spares()
1699 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, in spa_load_spares()
1700 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); in spa_load_spares()
1701 for (i = 0; i < spa->spa_spares.sav_count; i++) in spa_load_spares()
1703 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); in spa_load_spares()
1715 spa_load_l2cache(spa_t *spa) in spa_load_l2cache() argument
1722 spa_aux_vdev_t *sav = &spa->spa_l2cache; in spa_load_l2cache()
1733 if (!spa_writeable(spa)) in spa_load_l2cache()
1737 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); in spa_load_l2cache()
1779 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, in spa_load_l2cache()
1801 l2arc_add_vdev(spa, vd); in spa_load_l2cache()
1845 l2cache[i] = vdev_config_generate(spa, in spa_load_l2cache()
1857 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) in load_nvlist() argument
1865 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); in load_nvlist()
1873 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, in load_nvlist()
1887 spa_healthy_core_tvds(spa_t *spa) in spa_healthy_core_tvds() argument
1889 vdev_t *rvd = spa->spa_root_vdev; in spa_healthy_core_tvds()
1921 spa_check_for_missing_logs(spa_t *spa) in spa_check_for_missing_logs() argument
1923 vdev_t *rvd = spa->spa_root_vdev; in spa_check_for_missing_logs()
1930 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { in spa_check_for_missing_logs()
1948 child[idx++] = vdev_config_generate(spa, tvd, in spa_check_for_missing_logs()
1956 fnvlist_add_nvlist(spa->spa_load_info, in spa_check_for_missing_logs()
1966 spa_load_failed(spa, "some log devices are missing"); in spa_check_for_missing_logs()
1976 spa_set_log_state(spa, SPA_LOG_CLEAR); in spa_check_for_missing_logs()
1977 spa_load_note(spa, "some log devices are " in spa_check_for_missing_logs()
1992 spa_check_logs(spa_t *spa) in spa_check_logs() argument
1995 dsl_pool_t *dp = spa_get_dsl(spa); in spa_check_logs()
1997 switch (spa->spa_log_state) { in spa_check_logs()
2004 spa_set_log_state(spa, SPA_LOG_MISSING); in spa_check_logs()
2011 spa_passivate_log(spa_t *spa) in spa_passivate_log() argument
2013 vdev_t *rvd = spa->spa_root_vdev; in spa_passivate_log()
2016 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); in spa_passivate_log()
2018 if (!spa_has_slogs(spa)) in spa_passivate_log()
2035 spa_activate_log(spa_t *spa) in spa_activate_log() argument
2037 vdev_t *rvd = spa->spa_root_vdev; in spa_activate_log()
2039 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); in spa_activate_log()
2051 spa_reset_logs(spa_t *spa) in spa_reset_logs() argument
2055 error = dmu_objset_find(spa_name(spa), zil_reset, in spa_reset_logs()
2063 txg_wait_synced(spa->spa_dsl_pool, 0); in spa_reset_logs()
2078 spa_t *spa = zio->io_spa; in spa_claim_notify() local
2083 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ in spa_claim_notify()
2084 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) in spa_claim_notify()
2085 spa->spa_claim_max_txg = zio->io_bp->blk_birth; in spa_claim_notify()
2086 mutex_exit(&spa->spa_props_lock); in spa_claim_notify()
2101 spa_t *spa = zio->io_spa; in spa_load_verify_done() local
2112 mutex_enter(&spa->spa_scrub_lock); in spa_load_verify_done()
2113 spa->spa_load_verify_ios--; in spa_load_verify_done()
2114 cv_broadcast(&spa->spa_scrub_io_cv); in spa_load_verify_done()
2115 mutex_exit(&spa->spa_scrub_lock); in spa_load_verify_done()
2128 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, in spa_load_verify_cb() argument
2146 mutex_enter(&spa->spa_scrub_lock); in spa_load_verify_cb()
2147 while (spa->spa_load_verify_ios >= spa_load_verify_maxinflight) in spa_load_verify_cb()
2148 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); in spa_load_verify_cb()
2149 spa->spa_load_verify_ios++; in spa_load_verify_cb()
2150 mutex_exit(&spa->spa_scrub_lock); in spa_load_verify_cb()
2152 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size, in spa_load_verify_cb()
2170 spa_load_verify(spa_t *spa) in spa_load_verify() argument
2178 zpool_get_load_policy(spa->spa_config, &policy); in spa_load_verify()
2183 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); in spa_load_verify()
2184 error = dmu_objset_find_dp(spa->spa_dsl_pool, in spa_load_verify()
2185 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, in spa_load_verify()
2187 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); in spa_load_verify()
2191 rio = zio_root(spa, NULL, &sle, in spa_load_verify()
2195 if (spa->spa_extreme_rewind) { in spa_load_verify()
2196 spa_load_note(spa, "performing a complete scan of the " in spa_load_verify()
2202 error = traverse_pool(spa, spa->spa_verify_min_txg, in spa_load_verify()
2209 spa->spa_load_meta_errors = sle.sle_meta_count; in spa_load_verify()
2210 spa->spa_load_data_errors = sle.sle_data_count; in spa_load_verify()
2213 spa_load_note(spa, "spa_load_verify found %llu metadata errors " in spa_load_verify()
2224 spa->spa_load_txg = spa->spa_uberblock.ub_txg; in spa_load_verify()
2225 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; in spa_load_verify()
2227 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; in spa_load_verify()
2228 VERIFY(nvlist_add_uint64(spa->spa_load_info, in spa_load_verify()
2229 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); in spa_load_verify()
2230 VERIFY(nvlist_add_int64(spa->spa_load_info, in spa_load_verify()
2232 VERIFY(nvlist_add_uint64(spa->spa_load_info, in spa_load_verify()
2235 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; in spa_load_verify()
2254 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) in spa_prop_find() argument
2256 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, in spa_prop_find()
2264 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent) in spa_dir_prop() argument
2266 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, in spa_dir_prop()
2270 spa_load_failed(spa, "couldn't get '%s' value in MOS directory " in spa_dir_prop()
2285 spa_spawn_aux_threads(spa_t *spa) in spa_spawn_aux_threads() argument
2287 ASSERT(spa_writeable(spa)); in spa_spawn_aux_threads()
2291 spa_start_indirect_condensing_thread(spa); in spa_spawn_aux_threads()
2293 ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL); in spa_spawn_aux_threads()
2294 spa->spa_checkpoint_discard_zthr = in spa_spawn_aux_threads()
2296 spa_checkpoint_discard_thread, spa); in spa_spawn_aux_threads()
2316 spa_try_repair(spa_t *spa, nvlist_t *config) in spa_try_repair() argument
2341 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); in spa_try_repair()
2355 vdev_reopen(spa->spa_root_vdev); in spa_try_repair()
2375 vdev_reopen(spa->spa_root_vdev); in spa_try_repair()
2382 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) in spa_load() argument
2387 spa->spa_load_state = state; in spa_load()
2388 (void) spa_import_progress_set_state(spa, spa_load_state(spa)); in spa_load()
2390 gethrestime(&spa->spa_loaded_ts); in spa_load()
2391 error = spa_load_impl(spa, type, &ereport); in spa_load()
2397 spa_evicting_os_wait(spa); in spa_load()
2398 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); in spa_load()
2401 spa->spa_loaded_ts.tv_sec = 0; in spa_load()
2402 spa->spa_loaded_ts.tv_nsec = 0; in spa_load()
2405 (void) zfs_ereport_post(ereport, spa, in spa_load()
2409 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; in spa_load()
2410 spa->spa_ena = 0; in spa_load()
2412 (void) spa_import_progress_set_state(spa, spa_load_state(spa)); in spa_load()
2425 spa_t *spa = vd->vdev_spa; in vdev_count_verify_zaps() local
2429 ASSERT0(zap_lookup_int(spa->spa_meta_objset, in vdev_count_verify_zaps()
2430 spa->spa_all_vdev_zaps, vd->vdev_top_zap)); in vdev_count_verify_zaps()
2434 ASSERT0(zap_lookup_int(spa->spa_meta_objset, in vdev_count_verify_zaps()
2435 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); in vdev_count_verify_zaps()
2449 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, in spa_activity_check_required() argument
2475 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) in spa_activity_check_required()
2520 spa_activity_check_duration(spa_t *spa, uberblock_t *ub) in spa_activity_check_duration() argument
2574 vdev_count_leaves(spa)); in spa_activity_check_duration()
2591 spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) in spa_activity_check() argument
2600 vdev_t *rvd = spa->spa_root_vdev; in spa_activity_check()
2629 import_delay = spa_activity_check_duration(spa, ub); in spa_activity_check()
2637 (void) spa_import_progress_set_mmp_check(spa, in spa_activity_check()
2691 fnvlist_add_string(spa->spa_load_info, in spa_activity_check()
2698 fnvlist_add_uint64(spa->spa_load_info, in spa_activity_check()
2703 fnvlist_add_uint64(spa->spa_load_info, in spa_activity_check()
2705 fnvlist_add_uint64(spa->spa_load_info, in spa_activity_check()
2718 spa_verify_host(spa_t *spa, nvlist_t *mos_config) in spa_verify_host() argument
2724 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config, in spa_verify_host()
2736 spa_name(spa), hostname, (u_longlong_t)hostid); in spa_verify_host()
2737 spa_load_failed(spa, "hostid verification failed: pool " in spa_verify_host()
2748 spa_ld_parse_config(spa_t *spa, spa_import_type_t type) argument
2751 nvlist_t *nvtree, *nvl, *config = spa->spa_config;
2762 &spa->spa_ubsync.ub_version) != 0)
2763 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2766 spa_load_failed(spa, "invalid config provided: '%s' missing",
2781 if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
2782 spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
2785 if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
2786 spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
2788 !spa_importing_readonly_checkpoint(spa)) {
2790 spa_load_failed(spa, "a pool with guid %llu is already open",
2795 spa->spa_config_guid = pool_guid;
2797 nvlist_free(spa->spa_load_info);
2798 spa->spa_load_info = fnvlist_alloc();
2800 ASSERT(spa->spa_comment == NULL);
2802 spa->spa_comment = spa_strdup(comment);
2805 &spa->spa_config_txg);
2808 spa->spa_config_splitting = fnvlist_dup(nvl);
2811 spa_load_failed(spa, "invalid config provided: '%s' missing",
2819 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2822 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2832 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2835 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
2836 spa_config_exit(spa, SCL_ALL, FTAG);
2839 spa_load_failed(spa, "unable to parse config [error=%d]",
2844 ASSERT(spa->spa_root_vdev == rvd);
2845 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
2846 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
2849 ASSERT(spa_guid(spa) == pool_guid);
2860 spa_ld_open_vdevs(spa_t *spa) argument
2868 if (spa->spa_trust_config) {
2869 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
2870 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
2871 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
2872 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
2873 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
2875 spa->spa_missing_tvds_allowed = 0;
2878 spa->spa_missing_tvds_allowed =
2879 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
2881 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2882 error = vdev_open(spa->spa_root_vdev);
2883 spa_config_exit(spa, SCL_ALL, FTAG);
2885 if (spa->spa_missing_tvds != 0) {
2886 spa_load_note(spa, "vdev tree has %lld missing top-level "
2887 "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
2888 if (spa->spa_trust_config && (spa->spa_mode & FWRITE)) {
2898 spa_load_note(spa, "pools with missing top-level "
2902 spa_load_note(spa, "current settings allow for maximum "
2904 (u_longlong_t)spa->spa_missing_tvds_allowed);
2908 spa_load_failed(spa, "unable to open vdev tree [error=%d]",
2911 if (spa->spa_missing_tvds != 0 || error != 0)
2912 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
2924 spa_ld_validate_vdevs(spa_t *spa) argument
2927 vdev_t *rvd = spa->spa_root_vdev;
2929 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2931 spa_config_exit(spa, SCL_ALL, FTAG);
2934 spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
2939 spa_load_failed(spa, "cannot open vdev tree after invalidating "
2949 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub) argument
2951 spa->spa_state = POOL_STATE_ACTIVE;
2952 spa->spa_ubsync = spa->spa_uberblock;
2953 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2954 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2955 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2956 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2957 spa->spa_claim_max_txg = spa->spa_first_txg;
2958 spa->spa_prev_software_version = ub->ub_software_version;
2962 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) argument
2964 vdev_t *rvd = spa->spa_root_vdev;
2966 uberblock_t *ub = &spa->spa_uberblock;
2987 spa_importing_readonly_checkpoint(spa)) {
2988 spa_ld_select_uberblock_done(spa, ub);
3002 spa_load_failed(spa, "no valid uberblock found");
3006 if (spa->spa_load_max_txg != UINT64_MAX) {
3007 (void) spa_import_progress_set_max_txg(spa,
3008 (u_longlong_t)spa->spa_load_max_txg);
3010 spa_load_note(spa, "using uberblock with txg=%llu",
3018 activity_check = spa_activity_check_required(spa, ub, label,
3019 spa->spa_config);
3024 fnvlist_add_uint64(spa->spa_load_info,
3029 int error = spa_activity_check(spa, ub, spa->spa_config);
3035 fnvlist_add_uint64(spa->spa_load_info,
3037 fnvlist_add_uint64(spa->spa_load_info,
3039 fnvlist_add_uint16(spa->spa_load_info,
3049 spa_load_failed(spa, "version %llu is not supported",
3062 spa_load_failed(spa, "label config unavailable");
3070 spa_load_failed(spa, "invalid label: '%s' missing",
3080 nvlist_free(spa->spa_label_features);
3081 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
3097 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
3099 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
3107 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
3110 spa_load_failed(spa, "some features are unsupported");
3118 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
3119 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3120 spa_try_repair(spa, spa->spa_config);
3121 spa_config_exit(spa, SCL_ALL, FTAG);
3122 nvlist_free(spa->spa_config_splitting);
3123 spa->spa_config_splitting = NULL;
3129 spa_ld_select_uberblock_done(spa, ub);
3135 spa_ld_open_rootbp(spa_t *spa) argument
3138 vdev_t *rvd = spa->spa_root_vdev;
3140 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
3142 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
3146 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
3152 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type, argument
3155 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
3161 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
3172 healthy_tvds = spa_healthy_core_tvds(spa);
3174 if (load_nvlist(spa, spa->spa_config_object, &mos_config)
3176 spa_load_failed(spa, "unable to retrieve MOS config");
3184 if (spa->spa_load_state == SPA_LOAD_OPEN) {
3185 error = spa_verify_host(spa, mos_config);
3194 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3199 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
3212 spa_load_note(spa, "provided vdev tree:");
3214 spa_load_note(spa, "MOS vdev tree:");
3218 spa_load_note(spa, "vdev_copy_path_strict failed, falling "
3225 spa->spa_root_vdev = mrvd;
3227 spa_config_exit(spa, SCL_ALL, FTAG);
3239 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
3240 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
3243 spa_config_set(spa, mos_config);
3244 spa->spa_config_source = SPA_CONFIG_SRC_MOS;
3252 spa->spa_trust_config = B_TRUE;
3257 error = spa_ld_open_vdevs(spa);
3261 error = spa_ld_validate_vdevs(spa);
3266 spa_load_note(spa, "final vdev tree:");
3270 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
3271 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
3279 healthy_tvds_mos = spa_healthy_core_tvds(spa);
3282 spa_load_note(spa, "config provided misses too many "
3286 spa_load_note(spa, "vdev tree:");
3289 spa_load_failed(spa, "config was already "
3294 spa_load_note(spa, "spa must be reloaded using MOS "
3300 error = spa_check_for_missing_logs(spa);
3304 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
3305 spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
3307 (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
3317 spa_ld_open_indirect_vdev_metadata(spa_t *spa) argument
3320 vdev_t *rvd = spa->spa_root_vdev;
3326 error = spa_remove_init(spa);
3328 spa_load_failed(spa, "spa_remove_init failed [error=%d]",
3336 error = spa_condense_init(spa);
3338 spa_load_failed(spa, "spa_condense_init failed [error=%d]",
3347 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) argument
3350 vdev_t *rvd = spa->spa_root_vdev;
3352 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
3356 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
3357 &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
3361 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
3362 &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
3366 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
3367 &spa->spa_feat_desc_obj, B_TRUE) != 0) {
3374 if (!spa_features_check(spa, B_FALSE,
3378 if (spa_writeable(spa) ||
3379 spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
3380 if (!spa_features_check(spa, B_TRUE,
3386 fnvlist_add_nvlist(spa->spa_load_info,
3390 fnvlist_add_nvlist(spa->spa_load_info,
3398 fnvlist_add_boolean(spa->spa_load_info,
3422 spa_writeable(spa))) {
3423 spa_load_failed(spa, "pool uses unsupported features");
3435 error = feature_get_refcount_from_disk(spa,
3438 spa->spa_feat_refcount_cache[i] = refcount;
3440 spa->spa_feat_refcount_cache[i] =
3443 spa_load_failed(spa, "error getting refcount "
3452 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
3453 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
3454 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
3463 if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
3464 !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
3465 spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
3472 spa_ld_load_special_directories(spa_t *spa) argument
3475 vdev_t *rvd = spa->spa_root_vdev;
3477 spa->spa_is_initializing = B_TRUE;
3478 error = dsl_pool_open(spa->spa_dsl_pool);
3479 spa->spa_is_initializing = B_FALSE;
3481 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
3489 spa_ld_get_props(spa_t *spa) argument
3493 vdev_t *rvd = spa->spa_root_vdev;
3496 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3498 sizeof (spa->spa_cksum_salt.zcs_bytes),
3499 spa->spa_cksum_salt.zcs_bytes);
3502 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3503 sizeof (spa->spa_cksum_salt.zcs_bytes));
3505 spa_load_failed(spa, "unable to retrieve checksum salt from "
3510 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
3512 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
3514 spa_load_failed(spa, "error opening deferred-frees bpobj "
3524 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
3528 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
3529 &spa->spa_creation_version, B_FALSE);
3537 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
3542 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
3543 &spa->spa_errlog_scrub, B_FALSE);
3551 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
3564 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
3565 spa_load_failed(spa, "unable to retrieve MOS config");
3569 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
3570 &spa->spa_all_vdev_zaps, B_FALSE);
3575 spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
3576 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
3585 spa->spa_avz_action = AVZ_ACTION_DESTROY;
3590 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
3594 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3596 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
3604 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
3605 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
3606 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
3607 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
3608 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
3609 spa_prop_find(spa, ZPOOL_PROP_BOOTSIZE, &spa->spa_bootsize);
3610 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
3611 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
3612 &spa->spa_dedup_ditto);
3613 spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
3614 spa->spa_autoreplace = (autoreplace != 0);
3622 if (spa->spa_missing_tvds > 0 &&
3623 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
3624 spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3625 spa_load_note(spa, "forcing failmode to 'continue' "
3627 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
3634 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type) argument
3637 vdev_t *rvd = spa->spa_root_vdev;
3648 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
3653 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
3654 if (load_nvlist(spa, spa->spa_spares.sav_object,
3655 &spa->spa_spares.sav_config) != 0) {
3656 spa_load_failed(spa, "error loading spares nvlist");
3660 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3661 spa_load_spares(spa);
3662 spa_config_exit(spa, SCL_ALL, FTAG);
3664 spa->spa_spares.sav_sync = B_TRUE;
3670 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
3671 &spa->spa_l2cache.sav_object, B_FALSE);
3675 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
3676 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
3677 &spa->spa_l2cache.sav_config) != 0) {
3678 spa_load_failed(spa, "error loading l2cache nvlist");
3682 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3683 spa_load_l2cache(spa);
3684 spa_config_exit(spa, SCL_ALL, FTAG);
3686 spa->spa_l2cache.sav_sync = B_TRUE;
3693 spa_ld_load_vdev_metadata(spa_t *spa) argument
3696 vdev_t *rvd = spa->spa_root_vdev;
3703 if (spa_multihost(spa) && spa_get_hostid() == 0 &&
3704 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
3705 fnvlist_add_uint64(spa->spa_load_info,
3717 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3718 spa_check_removed(spa->spa_root_vdev);
3724 if (spa->spa_load_state != SPA_LOAD_IMPORT) {
3725 spa_aux_check_removed(&spa->spa_spares);
3726 spa_aux_check_removed(&spa->spa_l2cache);
3735 spa_load_failed(spa, "vdev_load failed [error=%d]", error);
3739 error = spa_ld_log_spacemaps(spa);
3741 spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
3749 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3751 spa_config_exit(spa, SCL_ALL, FTAG);
3757 spa_ld_load_dedup_tables(spa_t *spa) argument
3760 vdev_t *rvd = spa->spa_root_vdev;
3762 error = ddt_load(spa);
3764 spa_load_failed(spa, "ddt_load failed [error=%d]", error);
3772 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport) argument
3774 vdev_t *rvd = spa->spa_root_vdev;
3776 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
3777 boolean_t missing = spa_check_logs(spa);
3779 if (spa->spa_missing_tvds != 0) {
3780 spa_load_note(spa, "spa_check_logs failed "
3784 spa_load_failed(spa, "spa_check_logs failed");
3795 spa_ld_verify_pool_data(spa_t *spa) argument
3798 vdev_t *rvd = spa->spa_root_vdev;
3804 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3805 error = spa_load_verify(spa);
3807 spa_load_failed(spa, "spa_load_verify failed "
3818 spa_ld_claim_log_blocks(spa_t *spa) argument
3821 dsl_pool_t *dp = spa_get_dsl(spa);
3830 spa->spa_claiming = B_TRUE;
3832 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
3837 spa->spa_claiming = B_FALSE;
3839 spa_set_log_state(spa, SPA_LOG_GOOD);
3843 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, argument
3846 vdev_t *rvd = spa->spa_root_vdev;
3856 if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
3857 spa->spa_load_state == SPA_LOAD_IMPORT ||
3858 spa->spa_load_state == SPA_LOAD_RECOVER ||
3859 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
3871 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
3875 spa_ld_prepare_for_reload(spa_t *spa) argument
3877 int mode = spa->spa_mode;
3878 int async_suspended = spa->spa_async_suspended;
3880 spa_unload(spa);
3881 spa_deactivate(spa);
3882 spa_activate(spa, mode);
3889 spa->spa_async_suspended = async_suspended;
3893 spa_ld_read_checkpoint_txg(spa_t *spa) argument
3898 ASSERT0(spa->spa_checkpoint_txg);
3901 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3914 spa->spa_checkpoint_txg = checkpoint.ub_txg;
3915 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
3921 spa_ld_mos_init(spa_t *spa, spa_import_type_t type) argument
3926 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
3937 spa->spa_trust_config = B_FALSE;
3942 error = spa_ld_parse_config(spa, type);
3946 spa_import_progress_add(spa);
3955 error = spa_ld_open_vdevs(spa);
3967 error = spa_ld_validate_vdevs(spa);
3979 error = spa_ld_select_uberblock(spa, type);
3988 error = spa_ld_open_rootbp(spa);
3996 spa_ld_checkpoint_rewind(spa_t *spa) argument
4002 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4004 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4009 spa_load_failed(spa, "unable to retrieve checkpointed "
4018 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
4028 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
4034 spa->spa_uberblock = checkpoint;
4048 if (spa_writeable(spa)) {
4049 vdev_t *rvd = spa->spa_root_vdev;
4051 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4072 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
4074 spa->spa_last_synced_guid = rvd->vdev_guid;
4075 spa_config_exit(spa, SCL_ALL, FTAG);
4078 spa_load_failed(spa, "failed to write checkpointed "
4088 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type, argument
4098 error = spa_ld_mos_init(spa, type);
4106 error = spa_ld_trusted_config(spa, type, B_FALSE);
4115 spa_ld_prepare_for_reload(spa);
4116 spa_load_note(spa, "RELOADING");
4117 error = spa_ld_mos_init(spa, type);
4121 error = spa_ld_trusted_config(spa, type, B_TRUE);
4139 spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport) argument
4144 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4148 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
4150 spa_load_note(spa, "LOADING");
4152 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
4175 error = spa_ld_checkpoint_rewind(spa);
4183 spa_ld_prepare_for_reload(spa);
4184 spa_load_note(spa, "LOADING checkpointed uberblock");
4185 error = spa_ld_mos_with_trusted_config(spa, type, NULL);
4193 error = spa_ld_read_checkpoint_txg(spa);
4205 error = spa_ld_open_indirect_vdev_metadata(spa);
4213 error = spa_ld_check_features(spa, &missing_feat_write);
4221 error = spa_ld_load_special_directories(spa);
4228 error = spa_ld_get_props(spa);
4236 error = spa_ld_open_aux_vdevs(spa, type);
4244 error = spa_ld_load_vdev_metadata(spa);
4248 error = spa_ld_load_dedup_tables(spa);
4256 error = spa_ld_verify_logs(spa, type, ereport);
4261 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
4268 return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
4277 error = spa_ld_verify_pool_data(spa);
4286 spa_update_dspace(spa);
4293 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
4294 spa->spa_load_max_txg == UINT64_MAX)) {
4295 uint64_t config_cache_txg = spa->spa_config_txg;
4297 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
4304 spa_history_log_internal(spa, "checkpoint rewind",
4306 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
4312 spa_ld_claim_log_blocks(spa);
4317 spa->spa_sync_on = B_TRUE;
4318 txg_sync_start(spa->spa_dsl_pool);
4319 mmp_thread_start(spa);
4328 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
4335 spa_ld_check_for_config_update(spa, config_cache_txg,
4341 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
4342 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
4343 spa_async_request(spa, SPA_ASYNC_RESILVER);
4349 spa_history_log_version(spa, "open");
4351 spa_restart_removal(spa);
4352 spa_spawn_aux_threads(spa);
4363 (void) dmu_objset_find(spa_name(spa),
4369 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
4371 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4372 vdev_initialize_restart(spa->spa_root_vdev);
4373 vdev_trim_restart(spa->spa_root_vdev);
4374 vdev_autotrim_restart(spa);
4375 spa_config_exit(spa, SCL_CONFIG, FTAG);
4378 spa_import_progress_remove(spa);
4379 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
4381 spa_load_note(spa, "LOADED");
4387 spa_load_retry(spa_t *spa, spa_load_state_t state) argument
4389 int mode = spa->spa_mode;
4391 spa_unload(spa);
4392 spa_deactivate(spa);
4394 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
4396 spa_activate(spa, mode);
4397 spa_async_suspend(spa);
4399 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
4400 (u_longlong_t)spa->spa_load_max_txg);
4402 return (spa_load(spa, state, SPA_IMPORT_EXISTING));
4413 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, argument
4422 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
4423 spa->spa_load_max_txg = spa->spa_load_txg;
4424 spa_set_log_state(spa, SPA_LOG_CLEAR);
4426 spa->spa_load_max_txg = max_request;
4428 spa->spa_extreme_rewind = B_TRUE;
4431 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
4440 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4441 spa_import_progress_remove(spa);
4445 if (spa->spa_root_vdev != NULL)
4446 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4448 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
4449 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
4453 spa_import_progress_remove(spa);
4459 spa_set_log_state(spa, SPA_LOG_CLEAR);
4466 loadinfo = spa->spa_load_info;
4467 spa->spa_load_info = fnvlist_alloc();
4470 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
4471 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
4479 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
4480 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
4481 if (spa->spa_load_max_txg < safe_rewind_txg)
4482 spa->spa_extreme_rewind = B_TRUE;
4483 rewind_error = spa_load_retry(spa, state);
4486 spa->spa_extreme_rewind = B_FALSE;
4487 spa->spa_load_max_txg = UINT64_MAX;
4490 spa_config_set(spa, config);
4496 spa_import_progress_remove(spa);
4501 spa->spa_load_info);
4504 fnvlist_free(spa->spa_load_info);
4505 spa->spa_load_info = loadinfo;
4507 spa_import_progress_remove(spa);
4528 spa_t *spa; local
4546 if ((spa = spa_lookup(pool)) == NULL) {
4552 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
4555 zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
4560 spa_activate(spa, spa_mode_global);
4563 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
4564 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
4567 error = spa_load_best(spa, state, policy.zlp_txg,
4578 spa_unload(spa);
4579 spa_deactivate(spa);
4580 spa_write_cachefile(spa, B_TRUE, B_TRUE);
4581 spa_remove(spa);
4593 if (config != NULL && spa->spa_config) {
4594 VERIFY(nvlist_dup(spa->spa_config, config,
4598 spa->spa_load_info) == 0);
4600 spa_unload(spa);
4601 spa_deactivate(spa);
4602 spa->spa_last_open_failed = error;
4610 spa_open_ref(spa, tag);
4613 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4621 spa->spa_load_info) == 0);
4625 spa->spa_last_open_failed = 0;
4626 spa->spa_last_ubsync_txg = 0;
4627 spa->spa_load_txg = 0;
4631 *spapp = spa;
4656 spa_t *spa; local
4659 if ((spa = spa_lookup(name)) == NULL) {
4663 spa->spa_inject_ref++;
4666 return (spa);
4670 spa_inject_delref(spa_t *spa) argument
4673 spa->spa_inject_ref--;
4681 spa_add_spares(spa_t *spa, nvlist_t *config) argument
4691 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4693 if (spa->spa_spares.sav_count == 0)
4698 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
4730 spa_add_l2cache(spa_t *spa, nvlist_t *config) argument
4740 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4742 if (spa->spa_l2cache.sav_count == 0)
4747 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
4764 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
4766 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
4767 vd = spa->spa_l2cache.sav_vdevs[j];
4784 spa_add_feature_stats(spa_t *spa, nvlist_t *config) argument
4790 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4793 if (spa->spa_feat_for_read_obj != 0) {
4794 for (zap_cursor_init(&zc, spa->spa_meta_objset,
4795 spa->spa_feat_for_read_obj);
4806 if (spa->spa_feat_for_write_obj != 0) {
4807 for (zap_cursor_init(&zc, spa->spa_meta_objset,
4808 spa->spa_feat_for_write_obj);
4829 spa_t *spa; local
4832 error = spa_open_common(name, &spa, FTAG, NULL, config);
4834 if (spa != NULL) {
4840 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4845 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
4846 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
4852 spa_get_errlog_size(spa)) == 0);
4854 if (spa_suspended(spa)) {
4857 spa->spa_failmode) == 0);
4860 spa->spa_suspended) == 0);
4863 spa_add_spares(spa, *config);
4864 spa_add_l2cache(spa, *config);
4865 spa_add_feature_stats(spa, *config);
4874 if (spa == NULL) {
4876 spa = spa_lookup(name);
4877 if (spa)
4878 spa_altroot(spa, altroot, buflen);
4881 spa = NULL;
4884 spa_altroot(spa, altroot, buflen);
4888 if (spa != NULL) {
4889 spa_config_exit(spa, SCL_CONFIG, FTAG);
4890 spa_close(spa, FTAG);
4903 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, argument
4912 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4927 if (spa_version(spa) < version)
4938 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
4972 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) argument
4976 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4978 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
4979 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
4984 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
4985 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
5039 spa_l2cache_drop(spa_t *spa) argument
5043 spa_aux_vdev_t *sav = &spa->spa_l2cache;
5080 spa_t *spa; local
5118 spa = spa_add(poolname, nvl, altroot);
5120 spa_activate(spa, spa_mode_global);
5122 if (props && (error = spa_prop_validate(spa, props))) {
5123 spa_deactivate(spa);
5124 spa_remove(spa);
5133 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
5152 spa_deactivate(spa);
5153 spa_remove(spa);
5165 spa->spa_first_txg = txg;
5166 spa->spa_uberblock.ub_txg = txg - 1;
5167 spa->spa_uberblock.ub_version = version;
5168 spa->spa_ubsync = spa->spa_uberblock;
5169 spa->spa_load_state = SPA_LOAD_CREATE;
5170 spa->spa_removing_phys.sr_state = DSS_NONE;
5171 spa->spa_removing_phys.sr_removing_vdev = -1;
5172 spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
5173 spa->spa_indirect_vdevs_loaded = B_TRUE;
5178 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
5181 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
5189 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5191 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
5194 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
5201 (error = spa_validate_aux(spa, nvroot, txg,
5215 spa_config_exit(spa, SCL_ALL, FTAG);
5218 spa_unload(spa);
5219 spa_deactivate(spa);
5220 spa_remove(spa);
5230 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
5232 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
5234 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5235 spa_load_spares(spa);
5236 spa_config_exit(spa, SCL_ALL, FTAG);
5237 spa->spa_spares.sav_sync = B_TRUE;
5245 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
5247 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
5249 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5250 spa_load_l2cache(spa);
5251 spa_config_exit(spa, SCL_ALL, FTAG);
5252 spa->spa_l2cache.sav_sync = B_TRUE;
5255 spa->spa_is_initializing = B_TRUE;
5256 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
5257 spa->spa_is_initializing = B_FALSE;
5262 ddt_create(spa);
5264 spa_update_dspace(spa);
5271 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
5275 if (zap_add(spa->spa_meta_objset,
5277 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
5281 if (zap_add(spa->spa_meta_objset,
5289 spa->spa_deflate = TRUE;
5290 if (zap_add(spa->spa_meta_objset,
5292 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
5302 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
5303 dmu_object_set_compress(spa->spa_meta_objset, obj,
5305 if (zap_add(spa->spa_meta_objset,
5310 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
5311 spa->spa_meta_objset, obj));
5317 spa_history_create_obj(spa, tx);
5322 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
5323 sizeof (spa->spa_cksum_salt.zcs_bytes));
5328 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
5329 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
5330 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
5331 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
5332 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
5333 spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
5336 spa_configfile_set(spa, props, B_FALSE);
5342 spa->spa_sync_on = B_TRUE;
5343 txg_sync_start(spa->spa_dsl_pool);
5344 mmp_thread_start(spa);
5350 txg_wait_synced(spa->spa_dsl_pool, txg);
5352 spa_spawn_aux_threads(spa);
5354 spa_write_cachefile(spa, B_FALSE, B_TRUE);
5355 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
5357 spa_history_log_version(spa, "create");
5363 spa_evicting_os_wait(spa);
5364 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
5365 spa->spa_load_state = SPA_LOAD_NONE;
5479 spa_t *spa; local
5549 if ((spa = spa_lookup(pname)) != NULL) {
5554 spa_remove(spa);
5557 spa = spa_add(pname, config, NULL);
5558 spa->spa_is_root = B_TRUE;
5559 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
5561 &spa->spa_ubsync.ub_version) != 0)
5562 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
5569 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5570 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
5572 spa_config_exit(spa, SCL_ALL, FTAG);
5621 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
5625 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5627 spa_config_exit(spa, SCL_ALL, FTAG);
5642 spa_t *spa; local
5671 spa = spa_add(pool, config, altroot);
5672 spa->spa_import_flags = flags;
5678 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
5680 spa_configfile_set(spa, props, B_FALSE);
5682 spa_write_cachefile(spa, B_FALSE, B_TRUE);
5683 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
5689 spa_activate(spa, mode);
5694 spa_async_suspend(spa);
5700 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
5703 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5709 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
5716 spa->spa_load_info) == 0);
5718 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5723 if (spa->spa_spares.sav_config) {
5724 nvlist_free(spa->spa_spares.sav_config);
5725 spa->spa_spares.sav_config = NULL;
5726 spa_load_spares(spa);
5728 if (spa->spa_l2cache.sav_config) {
5729 nvlist_free(spa->spa_l2cache.sav_config);
5730 spa->spa_l2cache.sav_config = NULL;
5731 spa_load_l2cache(spa);
5737 error = spa_validate_aux(spa, nvroot, -1ULL,
5740 error = spa_validate_aux(spa, nvroot, -1ULL,
5742 spa_config_exit(spa, SCL_ALL, FTAG);
5745 spa_configfile_set(spa, props, B_FALSE);
5747 if (error != 0 || (props && spa_writeable(spa) &&
5748 (error = spa_prop_set(spa, props)))) {
5749 spa_unload(spa);
5750 spa_deactivate(spa);
5751 spa_remove(spa);
5756 spa_async_resume(spa);
5764 if (spa->spa_spares.sav_config)
5765 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
5768 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
5770 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
5772 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5773 spa_load_spares(spa);
5774 spa_config_exit(spa, SCL_ALL, FTAG);
5775 spa->spa_spares.sav_sync = B_TRUE;
5779 if (spa->spa_l2cache.sav_config)
5780 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
5783 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
5785 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
5787 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5788 spa_load_l2cache(spa);
5789 spa_config_exit(spa, SCL_ALL, FTAG);
5790 spa->spa_l2cache.sav_sync = B_TRUE;
5796 if (spa->spa_autoreplace) {
5797 spa_aux_check_removed(&spa->spa_spares);
5798 spa_aux_check_removed(&spa->spa_l2cache);
5801 if (spa_writeable(spa)) {
5805 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
5812 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
5814 spa_history_log_version(spa, "import");
5816 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
5828 spa_t *spa; local
5843 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
5844 spa_activate(spa, FREAD);
5849 zpool_get_load_policy(spa->spa_config, &policy);
5851 spa->spa_load_max_txg = policy.zlp_txg;
5852 spa->spa_extreme_rewind = B_TRUE;
5862 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
5864 spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
5867 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
5872 if (spa->spa_root_vdev != NULL) {
5873 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5879 spa->spa_uberblock.ub_timestamp) == 0);
5881 spa->spa_load_info) == 0);
5888 if ((!error || error == EEXIST) && spa->spa_bootfs) {
5895 if (dsl_dsobj_to_dsname(spa_name(spa),
5896 spa->spa_bootfs, tmpname) == 0) {
5918 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5919 spa_add_spares(spa, config);
5920 spa_add_l2cache(spa, config);
5921 spa_config_exit(spa, SCL_CONFIG, FTAG);
5924 spa_unload(spa);
5925 spa_deactivate(spa);
5926 spa_remove(spa);
5945 spa_t *spa; local
5954 if ((spa = spa_lookup(pool)) == NULL) {
5963 spa_open_ref(spa, FTAG);
5965 spa_async_suspend(spa);
5967 spa_close(spa, FTAG);
5973 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
5979 txg_wait_synced(spa->spa_dsl_pool, 0);
5980 spa_evicting_os_wait(spa);
5987 if (!spa_refcount_zero(spa) ||
5988 (spa->spa_inject_ref != 0 &&
5990 spa_async_resume(spa);
6002 spa_has_active_shared_spare(spa)) {
6003 spa_async_resume(spa);
6015 if (spa->spa_root_vdev != NULL) {
6016 vdev_t *rvd = spa->spa_root_vdev;
6019 vdev_autotrim_stop_all(spa);
6028 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6029 spa->spa_state = new_state;
6030 spa->spa_final_txg = spa_last_synced_txg(spa) +
6032 vdev_config_dirty(spa->spa_root_vdev);
6033 spa_config_exit(spa, SCL_ALL, FTAG);
6037 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
6039 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6040 spa_unload(spa);
6041 spa_deactivate(spa);
6044 if (oldconfig && spa->spa_config)
6045 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
6049 spa_write_cachefile(spa, B_TRUE, B_TRUE);
6050 spa_remove(spa);
6099 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) argument
6103 vdev_t *rvd = spa->spa_root_vdev;
6108 ASSERT(spa_writeable(spa));
6110 txg = spa_vdev_enter(spa);
6112 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
6114 return (spa_vdev_exit(spa, NULL, txg, error));
6116 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
6127 return (spa_vdev_exit(spa, vd, txg, EINVAL));
6131 return (spa_vdev_exit(spa, vd, txg, error));
6137 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
6138 return (spa_vdev_exit(spa, vd, txg, error));
6146 if (spa->spa_vdev_removal != NULL ||
6147 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
6150 if (spa->spa_vdev_removal != NULL &&
6151 tvd->vdev_ashift != spa->spa_max_ashift) {
6152 return (spa_vdev_exit(spa, vd, txg, EINVAL));
6156 return (spa_vdev_exit(spa, vd, txg, EINVAL));
6167 return (spa_vdev_exit(spa, vd,
6184 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
6186 spa_load_spares(spa);
6187 spa->spa_spares.sav_sync = B_TRUE;
6191 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
6193 spa_load_l2cache(spa);
6194 spa->spa_l2cache.sav_sync = B_TRUE;
6210 (void) spa_vdev_exit(spa, vd, txg, 0);
6213 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6214 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
6234 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) argument
6237 vdev_t *rvd = spa->spa_root_vdev;
6244 ASSERT(spa_writeable(spa));
6246 txg = spa_vdev_enter(spa);
6248 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
6251 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6252 error = (spa_has_checkpoint(spa)) ?
6254 return (spa_vdev_exit(spa, NULL, txg, error));
6257 if (spa->spa_vdev_removal != NULL)
6258 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6261 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
6264 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6268 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
6270 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
6273 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
6278 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
6281 return (spa_vdev_exit(spa, newrootvd, txg, error));
6287 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6296 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6306 !spa_has_spare(spa, newvd->vdev_guid))
6307 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6318 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
6319 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6322 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6335 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
6342 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
6400 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
6418 if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
6419 spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
6422 dsl_scan_restart_resilver(spa->spa_dsl_pool, dtl_max_txg);
6424 if (spa->spa_bootfs)
6425 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
6427 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
6432 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
6434 spa_history_log_internal(spa, "vdev attach", NULL,
6453 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) argument
6457 vdev_t *rvd = spa->spa_root_vdev;
6463 ASSERT(spa_writeable(spa));
6465 txg = spa_vdev_enter(spa);
6467 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
6484 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6485 error = (spa_has_checkpoint(spa)) ?
6487 return (spa_vdev_exit(spa, NULL, txg, error));
6491 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
6494 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6512 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6519 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6522 spa_version(spa) >= SPA_VERSION_SPARES);
6530 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6537 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6609 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
6642 if (spa->spa_autoexpand) {
6661 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
6664 spa_open_ref(spa, FTAG);
6666 error = spa_vdev_exit(spa, vd, txg, 0);
6668 spa_history_log_internal(spa, "detach", NULL,
6683 altspa == spa)
6695 spa_vdev_resilver_done(spa);
6700 spa_close(spa, FTAG);
6707 spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, argument
6712 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
6715 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
6717 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6720 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6723 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6727 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6771 spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, argument
6793 int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
6806 vdev_initialize_stop_wait(spa, &vd_list);
6809 txg_wait_synced(spa->spa_dsl_pool, 0);
6818 spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, argument
6823 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
6826 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
6828 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6831 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6834 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6837 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6840 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6844 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6891 spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate, argument
6913 int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
6926 vdev_trim_stop_wait(spa, &vd_list);
6929 txg_wait_synced(spa->spa_dsl_pool, 0);
6941 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, argument
6954 ASSERT(spa_writeable(spa));
6956 txg = spa_vdev_enter(spa);
6959 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6960 error = (spa_has_checkpoint(spa)) ?
6962 return (spa_vdev_exit(spa, NULL, txg, error));
6966 activate_slog = spa_passivate_log(spa);
6967 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
6968 error = spa_reset_logs(spa);
6969 txg = spa_vdev_config_enter(spa);
6972 spa_activate_log(spa);
6975 return (spa_vdev_exit(spa, NULL, txg, error));
6979 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
6987 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
6990 rvd = spa->spa_root_vdev;
7005 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7010 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7023 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
7024 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
7040 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
7055 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
7089 return (spa_vdev_exit(spa, NULL, txg, error));
7097 vdev_reopen(spa->spa_root_vdev);
7108 mutex_enter(&spa->spa_props_lock);
7109 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
7111 mutex_exit(&spa->spa_props_lock);
7112 spa->spa_config_splitting = nvl;
7113 vdev_config_dirty(spa->spa_root_vdev);
7120 spa_version(spa)) == 0);
7122 spa->spa_config_txg) == 0);
7132 newspa->spa_config_txg = spa->spa_config_txg;
7136 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
7139 zio_handle_panic_injection(spa, FTAG, 1);
7170 vdev_initialize_stop_wait(spa, &vd_initialize_list);
7171 vdev_trim_stop_wait(spa, &vd_trim_list);
7188 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
7207 zio_handle_panic_injection(spa, FTAG, 2);
7212 txg = spa_vdev_config_enter(spa);
7213 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
7221 spa_history_log_internal(spa, "detach", tx,
7227 spa->spa_avz_action = AVZ_ACTION_REBUILD;
7228 vdev_config_dirty(spa->spa_root_vdev);
7229 spa->spa_config_splitting = NULL;
7233 (void) spa_vdev_exit(spa, NULL, txg, 0);
7236 zio_handle_panic_injection(spa, FTAG, 3);
7240 "from pool %s", spa_name(spa));
7256 txg = spa_vdev_config_enter(spa);
7265 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
7266 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
7267 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
7269 vdev_reopen(spa->spa_root_vdev);
7271 nvlist_free(spa->spa_config_splitting);
7272 spa->spa_config_splitting = NULL;
7273 (void) spa_vdev_exit(spa, NULL, txg, error);
7362 spa_vdev_resilver_done(spa_t *spa) argument
7367 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7369 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
7388 spa_config_exit(spa, SCL_ALL, FTAG);
7389 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
7391 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
7393 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7396 spa_config_exit(spa, SCL_ALL, FTAG);
7403 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, argument
7409 ASSERT(spa_writeable(spa));
7411 spa_vdev_state_enter(spa, SCL_ALL);
7413 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
7414 return (spa_vdev_state_exit(spa, NULL, ENOENT));
7417 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
7436 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
7440 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) argument
7442 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
7446 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) argument
7448 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
7457 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd) argument
7459 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7461 if (dsl_scan_resilvering(spa->spa_dsl_pool))
7464 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
7468 spa_scan_stop(spa_t *spa) argument
7470 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7471 if (dsl_scan_resilvering(spa->spa_dsl_pool))
7473 return (dsl_scan_cancel(spa->spa_dsl_pool));
7477 spa_scan(spa_t *spa, pool_scan_func_t func) argument
7479 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7485 !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
7493 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
7494 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
7498 return (dsl_scan(spa->spa_dsl_pool, func));
7508 spa_async_remove(spa_t *spa, vdev_t *vd) argument
7529 spa_async_remove(spa, vd->vdev_child[c]);
7533 spa_async_probe(spa_t *spa, vdev_t *vd) argument
7541 spa_async_probe(spa, vd->vdev_child[c]);
7545 spa_async_autoexpand(spa_t *spa, vdev_t *vd) argument
7549 if (!spa->spa_autoexpand)
7554 spa_async_autoexpand(spa, cvd);
7571 spa_t *spa = (spa_t *)arg; local
7572 dsl_pool_t *dp = spa->spa_dsl_pool;
7575 ASSERT(spa->spa_sync_on);
7577 mutex_enter(&spa->spa_async_lock);
7578 tasks = spa->spa_async_tasks;
7579 spa->spa_async_tasks = 0;
7580 mutex_exit(&spa->spa_async_lock);
7589 old_space = metaslab_class_get_space(spa_normal_class(spa));
7590 old_space += metaslab_class_get_space(spa_special_class(spa));
7591 old_space += metaslab_class_get_space(spa_dedup_class(spa));
7593 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
7595 new_space = metaslab_class_get_space(spa_normal_class(spa));
7596 new_space += metaslab_class_get_space(spa_special_class(spa));
7597 new_space += metaslab_class_get_space(spa_dedup_class(spa));
7605 spa_history_log_internal(spa, "vdev online", NULL,
7607 spa_name(spa), new_space, new_space - old_space);
7615 spa_vdev_state_enter(spa, SCL_NONE);
7616 spa_async_remove(spa, spa->spa_root_vdev);
7617 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
7618 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
7619 for (int i = 0; i < spa->spa_spares.sav_count; i++)
7620 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
7621 (void) spa_vdev_state_exit(spa, NULL, 0);
7624 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
7625 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
7626 spa_async_autoexpand(spa, spa->spa_root_vdev);
7627 spa_config_exit(spa, SCL_CONFIG, FTAG);
7634 spa_vdev_state_enter(spa, SCL_NONE);
7635 spa_async_probe(spa, spa->spa_root_vdev);
7636 for (int i = 0; i < spa->spa_spares.sav_count; i++)
7637 spa_async_probe(spa, spa->spa_spares.sav_vdevs[i]);
7638 (void) spa_vdev_state_exit(spa, NULL, 0);
7645 spa_vdev_resilver_done(spa);
7657 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
7658 vdev_initialize_restart(spa->spa_root_vdev);
7659 spa_config_exit(spa, SCL_CONFIG, FTAG);
7665 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
7666 vdev_trim_restart(spa->spa_root_vdev);
7667 spa_config_exit(spa, SCL_CONFIG, FTAG);
7673 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
7674 vdev_autotrim_restart(spa);
7675 spa_config_exit(spa, SCL_CONFIG, FTAG);
7684 spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
7685 l2arc_spa_rebuild_start(spa);
7686 spa_config_exit(spa, SCL_L2ARC, FTAG);
7693 mutex_enter(&spa->spa_async_lock);
7694 spa->spa_async_thread = NULL;
7695 cv_broadcast(&spa->spa_async_cv);
7696 mutex_exit(&spa->spa_async_lock);
7701 spa_async_suspend(spa_t *spa) argument
7703 mutex_enter(&spa->spa_async_lock);
7704 spa->spa_async_suspended++;
7705 while (spa->spa_async_thread != NULL)
7706 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
7707 mutex_exit(&spa->spa_async_lock);
7709 spa_vdev_remove_suspend(spa);
7711 zthr_t *condense_thread = spa->spa_condense_zthr;
7715 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
7721 spa_async_resume(spa_t *spa) argument
7723 mutex_enter(&spa->spa_async_lock);
7724 ASSERT(spa->spa_async_suspended != 0);
7725 spa->spa_async_suspended--;
7726 mutex_exit(&spa->spa_async_lock);
7727 spa_restart_removal(spa);
7729 zthr_t *condense_thread = spa->spa_condense_zthr;
7733 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
7739 spa_async_tasks_pending(spa_t *spa) argument
7745 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
7746 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
7747 if (spa->spa_ccw_fail_time == 0) {
7751 (gethrtime() - spa->spa_ccw_fail_time) <
7759 spa_async_dispatch(spa_t *spa) argument
7761 mutex_enter(&spa->spa_async_lock);
7762 if (spa_async_tasks_pending(spa) &&
7763 !spa->spa_async_suspended &&
7764 spa->spa_async_thread == NULL &&
7766 spa->spa_async_thread = thread_create(NULL, 0,
7767 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
7768 mutex_exit(&spa->spa_async_lock);
7772 spa_async_request(spa_t *spa, int task) argument
7774 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
7775 mutex_enter(&spa->spa_async_lock);
7776 spa->spa_async_tasks |= task;
7777 mutex_exit(&spa->spa_async_lock);
7781 spa_async_tasks(spa_t *spa) argument
7783 return (spa->spa_async_tasks);
7815 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) argument
7817 zio_t *zio = zio_root(spa, NULL, NULL, 0);
7827 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) argument
7829 if (spa_sync_pass(spa) != 1)
7844 zio_t *zio = zio_root(spa, NULL, NULL, 0);
7845 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
7852 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) argument
7873 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
7877 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
7884 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, argument
7900 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
7903 VERIFY(zap_update(spa->spa_meta_objset,
7914 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
7923 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
7936 spa_t *spa = vd->vdev_spa; local
7938 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
7942 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
7951 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) argument
7961 if (list_is_empty(&spa->spa_config_dirty_list) &&
7962 spa->spa_avz_action == AVZ_ACTION_NONE)
7965 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
7967 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
7968 spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
7969 spa->spa_all_vdev_zaps != 0);
7971 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
7973 uint64_t new_avz = zap_create(spa->spa_meta_objset,
7975 spa_avz_build(spa->spa_root_vdev, new_avz, tx);
7981 for (zap_cursor_init(&zc, spa->spa_meta_objset,
7982 spa->spa_all_vdev_zaps);
7986 if (zap_lookup_int(spa->spa_meta_objset, new_avz,
7992 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
8000 VERIFY0(zap_destroy(spa->spa_meta_objset,
8001 spa->spa_all_vdev_zaps, tx));
8004 VERIFY0(zap_update(spa->spa_meta_objset,
8008 spa->spa_all_vdev_zaps = new_avz;
8009 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
8014 for (zap_cursor_init(&zc, spa->spa_meta_objset,
8015 spa->spa_all_vdev_zaps);
8019 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
8025 VERIFY0(zap_destroy(spa->spa_meta_objset,
8026 spa->spa_all_vdev_zaps, tx));
8027 VERIFY0(zap_remove(spa->spa_meta_objset,
8029 spa->spa_all_vdev_zaps = 0;
8032 if (spa->spa_all_vdev_zaps == 0) {
8033 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
8037 spa->spa_avz_action = AVZ_ACTION_NONE;
8040 vdev_construct_zaps(spa->spa_root_vdev, tx);
8042 config = spa_config_generate(spa, spa->spa_root_vdev,
8049 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
8051 spa->spa_uberblock.ub_version);
8053 spa_config_exit(spa, SCL_STATE, FTAG);
8055 nvlist_free(spa->spa_config_syncing);
8056 spa->spa_config_syncing = config;
8058 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
8066 spa_t *spa = dmu_tx_pool(tx)->dp_spa; local
8074 ASSERT(version >= spa_version(spa));
8076 spa->spa_uberblock.ub_version = version;
8077 vdev_config_dirty(spa->spa_root_vdev);
8078 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
8088 spa_t *spa = dmu_tx_pool(tx)->dp_spa; local
8089 objset_t *mos = spa->spa_meta_objset;
8092 mutex_enter(&spa->spa_props_lock);
8112 spa_feature_enable(spa, fid, tx);
8113 spa_history_log_internal(spa, "set", tx,
8123 ASSERT3U(spa_version(spa), >=, intval);
8131 ASSERT(spa->spa_root != NULL);
8143 if (spa->spa_comment != NULL)
8144 spa_strfree(spa->spa_comment);
8145 spa->spa_comment = spa_strdup(strval);
8153 vdev_config_dirty(spa->spa_root_vdev);
8154 spa_history_log_internal(spa, "set", tx,
8161 if (spa->spa_pool_props_object == 0) {
8162 spa->spa_pool_props_object =
8176 spa->spa_pool_props_object, propname,
8178 spa_history_log_internal(spa, "set", tx,
8189 spa->spa_pool_props_object, propname,
8191 spa_history_log_internal(spa, "set", tx,
8199 spa->spa_delegation = intval;
8202 spa->spa_bootfs = intval;
8205 spa->spa_failmode = intval;
8208 spa->spa_autotrim = intval;
8209 spa_async_request(spa,
8213 spa->spa_autoexpand = intval;
8215 spa_async_request(spa,
8219 spa->spa_multihost = intval;
8222 spa->spa_dedup_ditto = intval;
8231 mutex_exit(&spa->spa_props_lock);
8242 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) argument
8244 if (spa_sync_pass(spa) != 1)
8247 dsl_pool_t *dp = spa->spa_dsl_pool;
8250 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
8251 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
8255 spa->spa_minref += 3;
8258 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
8259 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
8263 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
8264 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
8268 spa->spa_minref += 3;
8271 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
8272 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
8273 spa_feature_create_zap_objects(spa, tx);
8282 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
8283 boolean_t lz4_en = spa_feature_is_enabled(spa,
8285 boolean_t lz4_ac = spa_feature_is_active(spa,
8289 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
8297 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
8299 VERIFY0(zap_add(spa->spa_meta_objset,
8301 sizeof (spa->spa_cksum_salt.zcs_bytes),
8302 spa->spa_cksum_salt.zcs_bytes, tx));
8347 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa) argument
8349 ASSERT(spa_writeable(spa));
8351 vdev_t *rvd = spa->spa_root_vdev;
8354 metaslab_class_t *normal = spa_normal_class(spa);
8355 metaslab_class_t *special = spa_special_class(spa);
8356 metaslab_class_t *dedup = spa_dedup_class(spa);
8375 for (int i = 0; i < spa->spa_alloc_count; i++)
8380 for (int i = 0; i < spa->spa_alloc_count; i++) {
8387 for (int i = 0; i < spa->spa_alloc_count; i++) {
8401 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx) argument
8403 ASSERT(spa_writeable(spa));
8405 vdev_t *rvd = spa->spa_root_vdev;
8418 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx) argument
8420 objset_t *mos = spa->spa_meta_objset;
8421 dsl_pool_t *dp = spa->spa_dsl_pool;
8423 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
8426 int pass = ++spa->spa_sync_pass;
8428 spa_sync_config_object(spa, tx);
8429 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
8431 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
8433 spa_errlog_sync(spa, txg);
8437 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
8444 spa_sync_frees(spa, free_bpl, tx);
8452 &spa->spa_deferred_bpobj, tx);
8455 ddt_sync(spa, txg);
8457 svr_sync(spa, tx);
8458 spa_sync_upgrades(spa, tx);
8460 spa_flush_metaslabs(spa, tx);
8463 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
8477 spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
8491 spa_sync_deferred_frees(spa, tx);
8505 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx) argument
8507 vdev_t *rvd = spa->spa_root_vdev;
8517 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
8519 if (list_is_empty(&spa->spa_config_dirty_list)) {
8549 spa->spa_last_synced_guid = rvd->vdev_guid;
8551 spa_config_exit(spa, SCL_STATE, FTAG);
8555 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
8556 zio_resume_wait(spa);
8565 spa_sync(spa_t *spa, uint64_t txg) argument
8569 VERIFY(spa_writeable(spa));
8575 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
8576 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
8582 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8584 spa->spa_syncing_txg = txg;
8585 spa->spa_sync_pass = 0;
8587 for (int i = 0; i < spa->spa_alloc_count; i++) {
8588 mutex_enter(&spa->spa_alloc_locks[i]);
8589 VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i]));
8590 mutex_exit(&spa->spa_alloc_locks[i]);
8597 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
8598 while (list_head(&spa->spa_state_dirty_list) != NULL) {
8606 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8607 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
8608 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
8612 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8613 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
8615 spa_config_exit(spa, SCL_STATE, FTAG);
8617 dsl_pool_t *dp = spa->spa_dsl_pool;
8620 spa->spa_sync_starttime = gethrtime();
8621 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid,
8622 spa->spa_sync_starttime + spa->spa_deadman_synctime));
8628 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
8629 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
8630 vdev_t *rvd = spa->spa_root_vdev;
8639 spa->spa_deflate = TRUE;
8640 VERIFY0(zap_add(spa->spa_meta_objset,
8642 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
8646 spa_sync_adjust_vdev_max_queue_depth(spa);
8648 spa_sync_condense_indirect(spa, tx);
8650 spa_sync_iterate_to_convergence(spa, tx);
8653 if (!list_is_empty(&spa->spa_config_dirty_list)) {
8662 ASSERT0(zap_count(spa->spa_meta_objset,
8663 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
8664 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
8669 if (spa->spa_vdev_removal != NULL) {
8670 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
8673 spa_sync_rewrite_vdev_config(spa, tx);
8676 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
8681 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
8688 if (spa->spa_config_syncing != NULL) {
8689 spa_config_set(spa, spa->spa_config_syncing);
8690 spa->spa_config_txg = txg;
8691 spa->spa_config_syncing = NULL;
8696 for (int i = 0; i < spa->spa_alloc_count; i++) {
8697 mutex_enter(&spa->spa_alloc_locks[i]);
8698 VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i]));
8699 mutex_exit(&spa->spa_alloc_locks[i]);
8705 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
8709 metaslab_class_evict_old(spa->spa_normal_class, txg);
8710 metaslab_class_evict_old(spa->spa_log_class, txg);
8712 spa_sync_close_syncing_log_sm(spa);
8714 spa_update_dspace(spa);
8722 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
8727 spa->spa_sync_pass = 0;
8735 spa->spa_ubsync = spa->spa_uberblock;
8736 spa_config_exit(spa, SCL_CONFIG, FTAG);
8738 spa_handle_ignored_writes(spa);
8742 NSEC2SEC(gethrtime() - spa->spa_spares_last_polled) >
8744 spa_spare_poll(spa);
8745 spa->spa_spares_last_polled = gethrtime();
8751 spa_async_dispatch(spa);
8762 spa_t *spa = NULL; local
8764 while ((spa = spa_next(spa)) != NULL) {
8765 if (spa_state(spa) != POOL_STATE_ACTIVE ||
8766 !spa_writeable(spa) || spa_suspended(spa))
8768 spa_open_ref(spa, FTAG);
8770 txg_wait_synced(spa_get_dsl(spa), 0);
8772 spa_close(spa, FTAG);
8789 spa_t *spa; local
8796 while ((spa = spa_next(NULL)) != NULL) {
8802 spa_open_ref(spa, FTAG);
8804 spa_async_suspend(spa);
8806 spa_close(spa, FTAG);
8808 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
8809 spa_unload(spa);
8810 spa_deactivate(spa);
8812 spa_remove(spa);
8818 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) argument
8823 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
8827 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
8828 vd = spa->spa_l2cache.sav_vdevs[i];
8833 for (i = 0; i < spa->spa_spares.sav_count; i++) {
8834 vd = spa->spa_spares.sav_vdevs[i];
8844 spa_upgrade(spa_t *spa, uint64_t version) argument
8846 ASSERT(spa_writeable(spa));
8848 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
8855 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
8856 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
8858 spa->spa_uberblock.ub_version = version;
8859 vdev_config_dirty(spa->spa_root_vdev);
8861 spa_config_exit(spa, SCL_ALL, FTAG);
8863 txg_wait_synced(spa_get_dsl(spa), 0);
8867 spa_has_spare(spa_t *spa, uint64_t guid) argument
8871 spa_aux_vdev_t *sav = &spa->spa_spares;
8891 spa_has_active_shared_spare(spa_t *spa) argument
8895 spa_aux_vdev_t *sav = &spa->spa_spares;
8899 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
8908 spa_total_metaslabs(spa_t *spa) argument
8910 vdev_t *rvd = spa->spa_root_vdev;
8923 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) argument
8935 value.value.sv_string = spa_name(spa);
8940 value.value.sv_uint64 = spa_guid(spa);
9003 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) argument
9005 spa_event_post(spa_event_create(spa, vd, hist_nvl, name));