1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/spa_impl.h> 29 #include <sys/dmu.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/zap.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/metaslab.h> 34 #include <sys/metaslab_impl.h> 35 #include <sys/uberblock_impl.h> 36 #include <sys/txg.h> 37 #include <sys/avl.h> 38 #include <sys/bpobj.h> 39 #include <sys/dsl_pool.h> 40 #include <sys/dsl_synctask.h> 41 #include <sys/dsl_dir.h> 42 #include <sys/arc.h> 43 #include <sys/zfeature.h> 44 #include <sys/vdev_indirect_births.h> 45 #include <sys/vdev_indirect_mapping.h> 46 #include <sys/abd.h> 47 48 /* 49 * This file contains the necessary logic to remove vdevs from a 50 * storage pool. Currently, the only devices that can be removed 51 * are log, cache, and spare devices; and top level vdevs from a pool 52 * w/o raidz. (Note that members of a mirror can also be removed 53 * by the detach operation.) 54 * 55 * Log vdevs are removed by evacuating them and then turning the vdev 56 * into a hole vdev while holding spa config locks. 57 * 58 * Top level vdevs are removed and converted into an indirect vdev via 59 * a multi-step process: 60 * 61 * - Disable allocations from this device (spa_vdev_remove_top). 62 * 63 * - From a new thread (spa_vdev_remove_thread), copy data from 64 * the removing vdev to a different vdev. The copy happens in open 65 * context (spa_vdev_copy_impl) and issues a sync task 66 * (vdev_mapping_sync) so the sync thread can update the partial 67 * indirect mappings in core and on disk. 68 * 69 * - If a free happens during a removal, it is freed from the 70 * removing vdev, and if it has already been copied, from the new 71 * location as well (free_from_removing_vdev). 72 * 73 * - After the removal is completed, the copy thread converts the vdev 74 * into an indirect vdev (vdev_remove_complete) before instructing 75 * the sync thread to destroy the space maps and finish the removal 76 * (spa_finish_removal). 77 */ 78 79 typedef struct vdev_copy_arg { 80 metaslab_t *vca_msp; 81 uint64_t vca_outstanding_bytes; 82 kcondvar_t vca_cv; 83 kmutex_t vca_lock; 84 } vdev_copy_arg_t; 85 86 typedef struct vdev_copy_seg_arg { 87 vdev_copy_arg_t *vcsa_copy_arg; 88 uint64_t vcsa_txg; 89 dva_t *vcsa_dest_dva; 90 blkptr_t *vcsa_dest_bp; 91 } vdev_copy_seg_arg_t; 92 93 /* 94 * The maximum amount of allowed data we're allowed to copy from a device 95 * at a time when removing it. 96 */ 97 int zfs_remove_max_copy_bytes = 8 * 1024 * 1024; 98 99 /* 100 * The largest contiguous segment that we will attempt to allocate when 101 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If 102 * there is a performance problem with attempting to allocate large blocks, 103 * consider decreasing this. 104 * 105 * Note: we will issue I/Os of up to this size. The mpt driver does not 106 * respond well to I/Os larger than 1MB, so we set this to 1MB. (When 107 * mpt processes an I/O larger than 1MB, it needs to do an allocation of 108 * 2 physically contiguous pages; if this allocation fails, mpt will drop 109 * the I/O and hang the device.) 110 */ 111 int zfs_remove_max_segment = 1024 * 1024; 112 113 /* 114 * This is used by the test suite so that it can ensure that certain 115 * actions happen while in the middle of a removal. 116 */ 117 uint64_t zfs_remove_max_bytes_pause = UINT64_MAX; 118 119 #define VDEV_REMOVAL_ZAP_OBJS "lzap" 120 121 static void spa_vdev_remove_thread(void *arg); 122 123 static void 124 spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx) 125 { 126 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset, 127 DMU_POOL_DIRECTORY_OBJECT, 128 DMU_POOL_REMOVING, sizeof (uint64_t), 129 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 130 &spa->spa_removing_phys, tx)); 131 } 132 133 static nvlist_t * 134 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 135 { 136 for (int i = 0; i < count; i++) { 137 uint64_t guid = 138 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID); 139 140 if (guid == target_guid) 141 return (nvpp[i]); 142 } 143 144 return (NULL); 145 } 146 147 static void 148 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 149 nvlist_t *dev_to_remove) 150 { 151 nvlist_t **newdev = NULL; 152 153 if (count > 1) 154 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 155 156 for (int i = 0, j = 0; i < count; i++) { 157 if (dev[i] == dev_to_remove) 158 continue; 159 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 160 } 161 162 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 163 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 164 165 for (int i = 0; i < count - 1; i++) 166 nvlist_free(newdev[i]); 167 168 if (count > 1) 169 kmem_free(newdev, (count - 1) * sizeof (void *)); 170 } 171 172 static spa_vdev_removal_t * 173 spa_vdev_removal_create(vdev_t *vd) 174 { 175 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); 176 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); 177 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); 178 svr->svr_allocd_segs = range_tree_create(NULL, NULL); 179 svr->svr_vdev = vd; 180 181 for (int i = 0; i < TXG_SIZE; i++) { 182 svr->svr_frees[i] = range_tree_create(NULL, NULL); 183 list_create(&svr->svr_new_segments[i], 184 sizeof (vdev_indirect_mapping_entry_t), 185 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 186 } 187 188 return (svr); 189 } 190 191 void 192 spa_vdev_removal_destroy(spa_vdev_removal_t *svr) 193 { 194 for (int i = 0; i < TXG_SIZE; i++) { 195 ASSERT0(svr->svr_bytes_done[i]); 196 ASSERT0(svr->svr_max_offset_to_sync[i]); 197 range_tree_destroy(svr->svr_frees[i]); 198 list_destroy(&svr->svr_new_segments[i]); 199 } 200 201 range_tree_destroy(svr->svr_allocd_segs); 202 mutex_destroy(&svr->svr_lock); 203 cv_destroy(&svr->svr_cv); 204 kmem_free(svr, sizeof (*svr)); 205 } 206 207 /* 208 * This is called as a synctask in the txg in which we will mark this vdev 209 * as removing (in the config stored in the MOS). 210 * 211 * It begins the evacuation of a toplevel vdev by: 212 * - initializing the spa_removing_phys which tracks this removal 213 * - computing the amount of space to remove for accounting purposes 214 * - dirtying all dbufs in the spa_config_object 215 * - creating the spa_vdev_removal 216 * - starting the spa_vdev_remove_thread 217 */ 218 static void 219 vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx) 220 { 221 vdev_t *vd = arg; 222 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 223 spa_t *spa = vd->vdev_spa; 224 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset; 225 spa_vdev_removal_t *svr = NULL; 226 uint64_t txg = dmu_tx_get_txg(tx); 227 228 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 229 svr = spa_vdev_removal_create(vd); 230 231 ASSERT(vd->vdev_removing); 232 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 233 234 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 235 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 236 /* 237 * By activating the OBSOLETE_COUNTS feature, we prevent 238 * the pool from being downgraded and ensure that the 239 * refcounts are precise. 240 */ 241 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 242 uint64_t one = 1; 243 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap, 244 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1, 245 &one, tx)); 246 ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0); 247 } 248 249 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx); 250 vd->vdev_indirect_mapping = 251 vdev_indirect_mapping_open(mos, vic->vic_mapping_object); 252 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx); 253 vd->vdev_indirect_births = 254 vdev_indirect_births_open(mos, vic->vic_births_object); 255 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id; 256 spa->spa_removing_phys.sr_start_time = gethrestime_sec(); 257 spa->spa_removing_phys.sr_end_time = 0; 258 spa->spa_removing_phys.sr_state = DSS_SCANNING; 259 spa->spa_removing_phys.sr_to_copy = 0; 260 spa->spa_removing_phys.sr_copied = 0; 261 262 /* 263 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because 264 * there may be space in the defer tree, which is free, but still 265 * counted in vs_alloc. 266 */ 267 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 268 metaslab_t *ms = vd->vdev_ms[i]; 269 if (ms->ms_sm == NULL) 270 continue; 271 272 /* 273 * Sync tasks happen before metaslab_sync(), therefore 274 * smp_alloc and sm_alloc must be the same. 275 */ 276 ASSERT3U(space_map_allocated(ms->ms_sm), ==, 277 ms->ms_sm->sm_phys->smp_alloc); 278 279 spa->spa_removing_phys.sr_to_copy += 280 space_map_allocated(ms->ms_sm); 281 282 /* 283 * Space which we are freeing this txg does not need to 284 * be copied. 285 */ 286 spa->spa_removing_phys.sr_to_copy -= 287 range_tree_space(ms->ms_freeing); 288 289 ASSERT0(range_tree_space(ms->ms_freed)); 290 for (int t = 0; t < TXG_SIZE; t++) 291 ASSERT0(range_tree_space(ms->ms_allocating[t])); 292 } 293 294 /* 295 * Sync tasks are called before metaslab_sync(), so there should 296 * be no already-synced metaslabs in the TXG_CLEAN list. 297 */ 298 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL); 299 300 spa_sync_removing_state(spa, tx); 301 302 /* 303 * All blocks that we need to read the most recent mapping must be 304 * stored on concrete vdevs. Therefore, we must dirty anything that 305 * is read before spa_remove_init(). Specifically, the 306 * spa_config_object. (Note that although we already modified the 307 * spa_config_object in spa_sync_removing_state, that may not have 308 * modified all blocks of the object.) 309 */ 310 dmu_object_info_t doi; 311 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi)); 312 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) { 313 dmu_buf_t *dbuf; 314 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT, 315 offset, FTAG, &dbuf, 0)); 316 dmu_buf_will_dirty(dbuf, tx); 317 offset += dbuf->db_size; 318 dmu_buf_rele(dbuf, FTAG); 319 } 320 321 /* 322 * Now that we've allocated the im_object, dirty the vdev to ensure 323 * that the object gets written to the config on disk. 324 */ 325 vdev_config_dirty(vd); 326 327 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu " 328 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx), 329 vic->vic_mapping_object); 330 331 spa_history_log_internal(spa, "vdev remove started", tx, 332 "%s vdev %llu %s", spa_name(spa), vd->vdev_id, 333 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 334 /* 335 * Setting spa_vdev_removal causes subsequent frees to call 336 * free_from_removing_vdev(). Note that we don't need any locking 337 * because we are the sync thread, and metaslab_free_impl() is only 338 * called from syncing context (potentially from a zio taskq thread, 339 * but in any case only when there are outstanding free i/os, which 340 * there are not). 341 */ 342 ASSERT3P(spa->spa_vdev_removal, ==, NULL); 343 spa->spa_vdev_removal = svr; 344 svr->svr_thread = thread_create(NULL, 0, 345 spa_vdev_remove_thread, vd, 0, &p0, TS_RUN, minclsyspri); 346 } 347 348 /* 349 * When we are opening a pool, we must read the mapping for each 350 * indirect vdev in order from most recently removed to least 351 * recently removed. We do this because the blocks for the mapping 352 * of older indirect vdevs may be stored on more recently removed vdevs. 353 * In order to read each indirect mapping object, we must have 354 * initialized all more recently removed vdevs. 355 */ 356 int 357 spa_remove_init(spa_t *spa) 358 { 359 int error; 360 361 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset, 362 DMU_POOL_DIRECTORY_OBJECT, 363 DMU_POOL_REMOVING, sizeof (uint64_t), 364 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 365 &spa->spa_removing_phys); 366 367 if (error == ENOENT) { 368 spa->spa_removing_phys.sr_state = DSS_NONE; 369 spa->spa_removing_phys.sr_removing_vdev = -1; 370 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 371 return (0); 372 } else if (error != 0) { 373 return (error); 374 } 375 376 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) { 377 /* 378 * We are currently removing a vdev. Create and 379 * initialize a spa_vdev_removal_t from the bonus 380 * buffer of the removing vdevs vdev_im_object, and 381 * initialize its partial mapping. 382 */ 383 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 384 vdev_t *vd = vdev_lookup_top(spa, 385 spa->spa_removing_phys.sr_removing_vdev); 386 spa_config_exit(spa, SCL_STATE, FTAG); 387 388 if (vd == NULL) 389 return (EINVAL); 390 391 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 392 393 ASSERT(vdev_is_concrete(vd)); 394 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd); 395 ASSERT(svr->svr_vdev->vdev_removing); 396 397 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 398 spa->spa_meta_objset, vic->vic_mapping_object); 399 vd->vdev_indirect_births = vdev_indirect_births_open( 400 spa->spa_meta_objset, vic->vic_births_object); 401 402 spa->spa_vdev_removal = svr; 403 } 404 405 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 406 uint64_t indirect_vdev_id = 407 spa->spa_removing_phys.sr_prev_indirect_vdev; 408 while (indirect_vdev_id != UINT64_MAX) { 409 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id); 410 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 411 412 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 413 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 414 spa->spa_meta_objset, vic->vic_mapping_object); 415 vd->vdev_indirect_births = vdev_indirect_births_open( 416 spa->spa_meta_objset, vic->vic_births_object); 417 418 indirect_vdev_id = vic->vic_prev_indirect_vdev; 419 } 420 spa_config_exit(spa, SCL_STATE, FTAG); 421 422 /* 423 * Now that we've loaded all the indirect mappings, we can allow 424 * reads from other blocks (e.g. via predictive prefetch). 425 */ 426 spa->spa_indirect_vdevs_loaded = B_TRUE; 427 return (0); 428 } 429 430 void 431 spa_restart_removal(spa_t *spa) 432 { 433 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 434 435 if (svr == NULL) 436 return; 437 438 /* 439 * In general when this function is called there is no 440 * removal thread running. The only scenario where this 441 * is not true is during spa_import() where this function 442 * is called twice [once from spa_import_impl() and 443 * spa_async_resume()]. Thus, in the scenario where we 444 * import a pool that has an ongoing removal we don't 445 * want to spawn a second thread. 446 */ 447 if (svr->svr_thread != NULL) 448 return; 449 450 if (!spa_writeable(spa)) 451 return; 452 453 vdev_t *vd = svr->svr_vdev; 454 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 455 456 ASSERT3P(vd, !=, NULL); 457 ASSERT(vd->vdev_removing); 458 459 zfs_dbgmsg("restarting removal of %llu at count=%llu", 460 vd->vdev_id, vdev_indirect_mapping_num_entries(vim)); 461 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, vd, 462 0, &p0, TS_RUN, minclsyspri); 463 } 464 465 /* 466 * Process freeing from a device which is in the middle of being removed. 467 * We must handle this carefully so that we attempt to copy freed data, 468 * and we correctly free already-copied data. 469 */ 470 void 471 free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size) 472 { 473 spa_t *spa = vd->vdev_spa; 474 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 475 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 476 uint64_t txg = spa_syncing_txg(spa); 477 uint64_t max_offset_yet = 0; 478 479 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 480 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==, 481 vdev_indirect_mapping_object(vim)); 482 ASSERT3P(vd, ==, svr->svr_vdev); 483 484 mutex_enter(&svr->svr_lock); 485 486 /* 487 * Remove the segment from the removing vdev's spacemap. This 488 * ensures that we will not attempt to copy this space (if the 489 * removal thread has not yet visited it), and also ensures 490 * that we know what is actually allocated on the new vdevs 491 * (needed if we cancel the removal). 492 * 493 * Note: we must do the metaslab_free_concrete() with the svr_lock 494 * held, so that the remove_thread can not load this metaslab and then 495 * visit this offset between the time that we metaslab_free_concrete() 496 * and when we check to see if it has been visited. 497 * 498 * Note: The checkpoint flag is set to false as having/taking 499 * a checkpoint and removing a device can't happen at the same 500 * time. 501 */ 502 ASSERT(!spa_has_checkpoint(spa)); 503 metaslab_free_concrete(vd, offset, size, B_FALSE); 504 505 uint64_t synced_size = 0; 506 uint64_t synced_offset = 0; 507 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim); 508 if (offset < max_offset_synced) { 509 /* 510 * The mapping for this offset is already on disk. 511 * Free from the new location. 512 * 513 * Note that we use svr_max_synced_offset because it is 514 * updated atomically with respect to the in-core mapping. 515 * By contrast, vim_max_offset is not. 516 * 517 * This block may be split between a synced entry and an 518 * in-flight or unvisited entry. Only process the synced 519 * portion of it here. 520 */ 521 synced_size = MIN(size, max_offset_synced - offset); 522 synced_offset = offset; 523 524 ASSERT3U(max_offset_yet, <=, max_offset_synced); 525 max_offset_yet = max_offset_synced; 526 527 DTRACE_PROBE3(remove__free__synced, 528 spa_t *, spa, 529 uint64_t, offset, 530 uint64_t, synced_size); 531 532 size -= synced_size; 533 offset += synced_size; 534 } 535 536 /* 537 * Look at all in-flight txgs starting from the currently syncing one 538 * and see if a section of this free is being copied. By starting from 539 * this txg and iterating forward, we might find that this region 540 * was copied in two different txgs and handle it appropriately. 541 */ 542 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) { 543 int txgoff = (txg + i) & TXG_MASK; 544 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) { 545 /* 546 * The mapping for this offset is in flight, and 547 * will be synced in txg+i. 548 */ 549 uint64_t inflight_size = MIN(size, 550 svr->svr_max_offset_to_sync[txgoff] - offset); 551 552 DTRACE_PROBE4(remove__free__inflight, 553 spa_t *, spa, 554 uint64_t, offset, 555 uint64_t, inflight_size, 556 uint64_t, txg + i); 557 558 /* 559 * We copy data in order of increasing offset. 560 * Therefore the max_offset_to_sync[] must increase 561 * (or be zero, indicating that nothing is being 562 * copied in that txg). 563 */ 564 if (svr->svr_max_offset_to_sync[txgoff] != 0) { 565 ASSERT3U(svr->svr_max_offset_to_sync[txgoff], 566 >=, max_offset_yet); 567 max_offset_yet = 568 svr->svr_max_offset_to_sync[txgoff]; 569 } 570 571 /* 572 * We've already committed to copying this segment: 573 * we have allocated space elsewhere in the pool for 574 * it and have an IO outstanding to copy the data. We 575 * cannot free the space before the copy has 576 * completed, or else the copy IO might overwrite any 577 * new data. To free that space, we record the 578 * segment in the appropriate svr_frees tree and free 579 * the mapped space later, in the txg where we have 580 * completed the copy and synced the mapping (see 581 * vdev_mapping_sync). 582 */ 583 range_tree_add(svr->svr_frees[txgoff], 584 offset, inflight_size); 585 size -= inflight_size; 586 offset += inflight_size; 587 588 /* 589 * This space is already accounted for as being 590 * done, because it is being copied in txg+i. 591 * However, if i!=0, then it is being copied in 592 * a future txg. If we crash after this txg 593 * syncs but before txg+i syncs, then the space 594 * will be free. Therefore we must account 595 * for the space being done in *this* txg 596 * (when it is freed) rather than the future txg 597 * (when it will be copied). 598 */ 599 ASSERT3U(svr->svr_bytes_done[txgoff], >=, 600 inflight_size); 601 svr->svr_bytes_done[txgoff] -= inflight_size; 602 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; 603 } 604 } 605 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); 606 607 if (size > 0) { 608 /* 609 * The copy thread has not yet visited this offset. Ensure 610 * that it doesn't. 611 */ 612 613 DTRACE_PROBE3(remove__free__unvisited, 614 spa_t *, spa, 615 uint64_t, offset, 616 uint64_t, size); 617 618 if (svr->svr_allocd_segs != NULL) 619 range_tree_clear(svr->svr_allocd_segs, offset, size); 620 621 /* 622 * Since we now do not need to copy this data, for 623 * accounting purposes we have done our job and can count 624 * it as completed. 625 */ 626 svr->svr_bytes_done[txg & TXG_MASK] += size; 627 } 628 mutex_exit(&svr->svr_lock); 629 630 /* 631 * Now that we have dropped svr_lock, process the synced portion 632 * of this free. 633 */ 634 if (synced_size > 0) { 635 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size); 636 637 /* 638 * Note: this can only be called from syncing context, 639 * and the vdev_indirect_mapping is only changed from the 640 * sync thread, so we don't need svr_lock while doing 641 * metaslab_free_impl_cb. 642 */ 643 boolean_t checkpoint = B_FALSE; 644 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size, 645 metaslab_free_impl_cb, &checkpoint); 646 } 647 } 648 649 /* 650 * Stop an active removal and update the spa_removing phys. 651 */ 652 static void 653 spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx) 654 { 655 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 656 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa)); 657 658 /* Ensure the removal thread has completed before we free the svr. */ 659 spa_vdev_remove_suspend(spa); 660 661 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED); 662 663 if (state == DSS_FINISHED) { 664 spa_removing_phys_t *srp = &spa->spa_removing_phys; 665 vdev_t *vd = svr->svr_vdev; 666 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 667 668 if (srp->sr_prev_indirect_vdev != UINT64_MAX) { 669 vdev_t *pvd = vdev_lookup_top(spa, 670 srp->sr_prev_indirect_vdev); 671 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops); 672 } 673 674 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev; 675 srp->sr_prev_indirect_vdev = vd->vdev_id; 676 } 677 spa->spa_removing_phys.sr_state = state; 678 spa->spa_removing_phys.sr_end_time = gethrestime_sec(); 679 680 spa->spa_vdev_removal = NULL; 681 spa_vdev_removal_destroy(svr); 682 683 spa_sync_removing_state(spa, tx); 684 685 vdev_config_dirty(spa->spa_root_vdev); 686 } 687 688 static void 689 free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size) 690 { 691 vdev_t *vd = arg; 692 vdev_indirect_mark_obsolete(vd, offset, size); 693 boolean_t checkpoint = B_FALSE; 694 vdev_indirect_ops.vdev_op_remap(vd, offset, size, 695 metaslab_free_impl_cb, &checkpoint); 696 } 697 698 /* 699 * On behalf of the removal thread, syncs an incremental bit more of 700 * the indirect mapping to disk and updates the in-memory mapping. 701 * Called as a sync task in every txg that the removal thread makes progress. 702 */ 703 static void 704 vdev_mapping_sync(void *arg, dmu_tx_t *tx) 705 { 706 spa_vdev_removal_t *svr = arg; 707 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 708 vdev_t *vd = svr->svr_vdev; 709 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 710 uint64_t txg = dmu_tx_get_txg(tx); 711 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 712 713 ASSERT(vic->vic_mapping_object != 0); 714 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 715 716 vdev_indirect_mapping_add_entries(vim, 717 &svr->svr_new_segments[txg & TXG_MASK], tx); 718 vdev_indirect_births_add_entry(vd->vdev_indirect_births, 719 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx); 720 721 /* 722 * Free the copied data for anything that was freed while the 723 * mapping entries were in flight. 724 */ 725 mutex_enter(&svr->svr_lock); 726 range_tree_vacate(svr->svr_frees[txg & TXG_MASK], 727 free_mapped_segment_cb, vd); 728 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, 729 vdev_indirect_mapping_max_offset(vim)); 730 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; 731 mutex_exit(&svr->svr_lock); 732 733 spa_sync_removing_state(spa, tx); 734 } 735 736 static void 737 spa_vdev_copy_segment_write_done(zio_t *zio) 738 { 739 vdev_copy_seg_arg_t *vcsa = zio->io_private; 740 vdev_copy_arg_t *vca = vcsa->vcsa_copy_arg; 741 spa_config_exit(zio->io_spa, SCL_STATE, FTAG); 742 abd_free(zio->io_abd); 743 744 mutex_enter(&vca->vca_lock); 745 vca->vca_outstanding_bytes -= zio->io_size; 746 cv_signal(&vca->vca_cv); 747 mutex_exit(&vca->vca_lock); 748 749 ASSERT0(zio->io_error); 750 kmem_free(vcsa->vcsa_dest_bp, sizeof (blkptr_t)); 751 kmem_free(vcsa, sizeof (vdev_copy_seg_arg_t)); 752 } 753 754 static void 755 spa_vdev_copy_segment_read_done(zio_t *zio) 756 { 757 vdev_copy_seg_arg_t *vcsa = zio->io_private; 758 dva_t *dest_dva = vcsa->vcsa_dest_dva; 759 uint64_t txg = vcsa->vcsa_txg; 760 spa_t *spa = zio->io_spa; 761 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(dest_dva)); 762 blkptr_t *bp = NULL; 763 dva_t *dva = NULL; 764 uint64_t size = zio->io_size; 765 766 ASSERT3P(dest_vd, !=, NULL); 767 ASSERT0(zio->io_error); 768 769 vcsa->vcsa_dest_bp = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); 770 bp = vcsa->vcsa_dest_bp; 771 dva = bp->blk_dva; 772 773 BP_ZERO(bp); 774 775 /* initialize with dest_dva */ 776 bcopy(dest_dva, dva, sizeof (dva_t)); 777 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 778 779 BP_SET_LSIZE(bp, size); 780 BP_SET_PSIZE(bp, size); 781 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 782 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 783 BP_SET_TYPE(bp, DMU_OT_NONE); 784 BP_SET_LEVEL(bp, 0); 785 BP_SET_DEDUP(bp, 0); 786 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 787 788 zio_nowait(zio_rewrite(spa->spa_txg_zio[txg & TXG_MASK], spa, 789 txg, bp, zio->io_abd, size, 790 spa_vdev_copy_segment_write_done, vcsa, 791 ZIO_PRIORITY_REMOVAL, 0, NULL)); 792 } 793 794 static int 795 spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg, 796 vdev_copy_arg_t *vca, zio_alloc_list_t *zal) 797 { 798 metaslab_group_t *mg = vd->vdev_mg; 799 spa_t *spa = vd->vdev_spa; 800 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 801 vdev_indirect_mapping_entry_t *entry; 802 vdev_copy_seg_arg_t *private; 803 dva_t dst = { 0 }; 804 blkptr_t blk, *bp = &blk; 805 dva_t *dva = bp->blk_dva; 806 807 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 808 809 int error = metaslab_alloc_dva(spa, mg->mg_class, size, 810 &dst, 0, NULL, txg, 0, zal); 811 if (error != 0) 812 return (error); 813 814 /* 815 * We can't have any padding of the allocated size, otherwise we will 816 * misunderstand what's allocated, and the size of the mapping. 817 * The caller ensures this will be true by passing in a size that is 818 * aligned to the worst (highest) ashift in the pool. 819 */ 820 ASSERT3U(DVA_GET_ASIZE(&dst), ==, size); 821 822 mutex_enter(&vca->vca_lock); 823 vca->vca_outstanding_bytes += size; 824 mutex_exit(&vca->vca_lock); 825 826 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP); 827 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); 828 entry->vime_mapping.vimep_dst = dst; 829 830 private = kmem_alloc(sizeof (vdev_copy_seg_arg_t), KM_SLEEP); 831 private->vcsa_dest_dva = &entry->vime_mapping.vimep_dst; 832 private->vcsa_txg = txg; 833 private->vcsa_copy_arg = vca; 834 835 /* 836 * This lock is eventually released by the donefunc for the 837 * zio_write_phys that finishes copying the data. 838 */ 839 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 840 841 /* 842 * Do logical I/O, letting the redundancy vdevs (like mirror) 843 * handle their own I/O instead of duplicating that code here. 844 */ 845 BP_ZERO(bp); 846 847 DVA_SET_VDEV(&dva[0], vd->vdev_id); 848 DVA_SET_OFFSET(&dva[0], start); 849 DVA_SET_GANG(&dva[0], 0); 850 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, size)); 851 852 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 853 854 BP_SET_LSIZE(bp, size); 855 BP_SET_PSIZE(bp, size); 856 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 857 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 858 BP_SET_TYPE(bp, DMU_OT_NONE); 859 BP_SET_LEVEL(bp, 0); 860 BP_SET_DEDUP(bp, 0); 861 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 862 863 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, 864 bp, abd_alloc_for_io(size, B_FALSE), size, 865 spa_vdev_copy_segment_read_done, private, 866 ZIO_PRIORITY_REMOVAL, 0, NULL)); 867 868 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); 869 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift); 870 vdev_dirty(vd, 0, NULL, txg); 871 872 return (0); 873 } 874 875 /* 876 * Complete the removal of a toplevel vdev. This is called as a 877 * synctask in the same txg that we will sync out the new config (to the 878 * MOS object) which indicates that this vdev is indirect. 879 */ 880 static void 881 vdev_remove_complete_sync(void *arg, dmu_tx_t *tx) 882 { 883 spa_vdev_removal_t *svr = arg; 884 vdev_t *vd = svr->svr_vdev; 885 spa_t *spa = vd->vdev_spa; 886 887 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 888 889 for (int i = 0; i < TXG_SIZE; i++) { 890 ASSERT0(svr->svr_bytes_done[i]); 891 } 892 893 ASSERT3U(spa->spa_removing_phys.sr_copied, ==, 894 spa->spa_removing_phys.sr_to_copy); 895 896 vdev_destroy_spacemaps(vd, tx); 897 898 /* destroy leaf zaps, if any */ 899 ASSERT3P(svr->svr_zaplist, !=, NULL); 900 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL); 901 pair != NULL; 902 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) { 903 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx); 904 } 905 fnvlist_free(svr->svr_zaplist); 906 907 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx); 908 /* vd->vdev_path is not available here */ 909 spa_history_log_internal(spa, "vdev remove completed", tx, 910 "%s vdev %llu", spa_name(spa), vd->vdev_id); 911 } 912 913 static void 914 vdev_indirect_state_transfer(vdev_t *ivd, vdev_t *vd) 915 { 916 ivd->vdev_indirect_config = vd->vdev_indirect_config; 917 918 ASSERT3P(ivd->vdev_indirect_mapping, ==, NULL); 919 ASSERT(vd->vdev_indirect_mapping != NULL); 920 ivd->vdev_indirect_mapping = vd->vdev_indirect_mapping; 921 vd->vdev_indirect_mapping = NULL; 922 923 ASSERT3P(ivd->vdev_indirect_births, ==, NULL); 924 ASSERT(vd->vdev_indirect_births != NULL); 925 ivd->vdev_indirect_births = vd->vdev_indirect_births; 926 vd->vdev_indirect_births = NULL; 927 928 ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 929 ASSERT0(range_tree_space(ivd->vdev_obsolete_segments)); 930 931 if (vd->vdev_obsolete_sm != NULL) { 932 ASSERT3U(ivd->vdev_asize, ==, vd->vdev_asize); 933 934 /* 935 * We cannot use space_map_{open,close} because we hold all 936 * the config locks as writer. 937 */ 938 ASSERT3P(ivd->vdev_obsolete_sm, ==, NULL); 939 ivd->vdev_obsolete_sm = vd->vdev_obsolete_sm; 940 vd->vdev_obsolete_sm = NULL; 941 } 942 } 943 944 static void 945 vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist) 946 { 947 ASSERT3P(zlist, !=, NULL); 948 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 949 950 if (vd->vdev_leaf_zap != 0) { 951 char zkey[32]; 952 (void) snprintf(zkey, sizeof (zkey), "%s-%"PRIu64, 953 VDEV_REMOVAL_ZAP_OBJS, vd->vdev_leaf_zap); 954 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap); 955 } 956 957 for (uint64_t id = 0; id < vd->vdev_children; id++) { 958 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist); 959 } 960 } 961 962 static void 963 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) 964 { 965 vdev_t *ivd; 966 dmu_tx_t *tx; 967 spa_t *spa = vd->vdev_spa; 968 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 969 970 /* 971 * First, build a list of leaf zaps to be destroyed. 972 * This is passed to the sync context thread, 973 * which does the actual unlinking. 974 */ 975 svr->svr_zaplist = fnvlist_alloc(); 976 vdev_remove_enlist_zaps(vd, svr->svr_zaplist); 977 978 ivd = vdev_add_parent(vd, &vdev_indirect_ops); 979 980 vd->vdev_leaf_zap = 0; 981 982 vdev_remove_child(ivd, vd); 983 vdev_compact_children(ivd); 984 985 vdev_indirect_state_transfer(ivd, vd); 986 987 svr->svr_vdev = ivd; 988 989 ASSERT(!ivd->vdev_removing); 990 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 991 992 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 993 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr, 994 0, ZFS_SPACE_CHECK_NONE, tx); 995 dmu_tx_commit(tx); 996 997 /* 998 * Indicate that this thread has exited. 999 * After this, we can not use svr. 1000 */ 1001 mutex_enter(&svr->svr_lock); 1002 svr->svr_thread = NULL; 1003 cv_broadcast(&svr->svr_cv); 1004 mutex_exit(&svr->svr_lock); 1005 } 1006 1007 /* 1008 * Complete the removal of a toplevel vdev. This is called in open 1009 * context by the removal thread after we have copied all vdev's data. 1010 */ 1011 static void 1012 vdev_remove_complete(vdev_t *vd) 1013 { 1014 spa_t *spa = vd->vdev_spa; 1015 uint64_t txg; 1016 1017 /* 1018 * Wait for any deferred frees to be synced before we call 1019 * vdev_metaslab_fini() 1020 */ 1021 txg_wait_synced(spa->spa_dsl_pool, 0); 1022 1023 txg = spa_vdev_enter(spa); 1024 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu", 1025 vd->vdev_id, txg); 1026 1027 /* 1028 * Discard allocation state. 1029 */ 1030 if (vd->vdev_mg != NULL) { 1031 vdev_metaslab_fini(vd); 1032 metaslab_group_destroy(vd->vdev_mg); 1033 vd->vdev_mg = NULL; 1034 } 1035 ASSERT0(vd->vdev_stat.vs_space); 1036 ASSERT0(vd->vdev_stat.vs_dspace); 1037 1038 vdev_remove_replace_with_indirect(vd, txg); 1039 1040 /* 1041 * We now release the locks, allowing spa_sync to run and finish the 1042 * removal via vdev_remove_complete_sync in syncing context. 1043 */ 1044 (void) spa_vdev_exit(spa, NULL, txg, 0); 1045 1046 /* 1047 * Top ZAP should have been transferred to the indirect vdev in 1048 * vdev_remove_replace_with_indirect. 1049 */ 1050 ASSERT0(vd->vdev_top_zap); 1051 1052 /* 1053 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect. 1054 */ 1055 ASSERT0(vd->vdev_leaf_zap); 1056 1057 txg = spa_vdev_enter(spa); 1058 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1059 /* 1060 * Request to update the config and the config cachefile. 1061 */ 1062 vdev_config_dirty(spa->spa_root_vdev); 1063 (void) spa_vdev_exit(spa, vd, txg, 0); 1064 } 1065 1066 /* 1067 * Evacuates a segment of size at most max_alloc from the vdev 1068 * via repeated calls to spa_vdev_copy_segment. If an allocation 1069 * fails, the pool is probably too fragmented to handle such a 1070 * large size, so decrease max_alloc so that the caller will not try 1071 * this size again this txg. 1072 */ 1073 static void 1074 spa_vdev_copy_impl(spa_vdev_removal_t *svr, vdev_copy_arg_t *vca, 1075 uint64_t *max_alloc, dmu_tx_t *tx) 1076 { 1077 uint64_t txg = dmu_tx_get_txg(tx); 1078 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1079 1080 mutex_enter(&svr->svr_lock); 1081 1082 range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root); 1083 if (rs == NULL) { 1084 mutex_exit(&svr->svr_lock); 1085 return; 1086 } 1087 uint64_t offset = rs->rs_start; 1088 uint64_t length = MIN(rs->rs_end - rs->rs_start, *max_alloc); 1089 1090 range_tree_remove(svr->svr_allocd_segs, offset, length); 1091 1092 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) { 1093 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync, 1094 svr, 0, ZFS_SPACE_CHECK_NONE, tx); 1095 } 1096 1097 svr->svr_max_offset_to_sync[txg & TXG_MASK] = offset + length; 1098 1099 /* 1100 * Note: this is the amount of *allocated* space 1101 * that we are taking care of each txg. 1102 */ 1103 svr->svr_bytes_done[txg & TXG_MASK] += length; 1104 1105 mutex_exit(&svr->svr_lock); 1106 1107 zio_alloc_list_t zal; 1108 metaslab_trace_init(&zal); 1109 uint64_t thismax = *max_alloc; 1110 while (length > 0) { 1111 uint64_t mylen = MIN(length, thismax); 1112 1113 int error = spa_vdev_copy_segment(svr->svr_vdev, 1114 offset, mylen, txg, vca, &zal); 1115 1116 if (error == ENOSPC) { 1117 /* 1118 * Cut our segment in half, and don't try this 1119 * segment size again this txg. Note that the 1120 * allocation size must be aligned to the highest 1121 * ashift in the pool, so that the allocation will 1122 * not be padded out to a multiple of the ashift, 1123 * which could cause us to think that this mapping 1124 * is larger than we intended. 1125 */ 1126 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); 1127 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); 1128 thismax = P2ROUNDUP(mylen / 2, 1129 1 << spa->spa_max_ashift); 1130 ASSERT3U(thismax, <, mylen); 1131 /* 1132 * The minimum-size allocation can not fail. 1133 */ 1134 ASSERT3U(mylen, >, 1 << spa->spa_max_ashift); 1135 *max_alloc = mylen - (1 << spa->spa_max_ashift); 1136 } else { 1137 ASSERT0(error); 1138 length -= mylen; 1139 offset += mylen; 1140 1141 /* 1142 * We've performed an allocation, so reset the 1143 * alloc trace list. 1144 */ 1145 metaslab_trace_fini(&zal); 1146 metaslab_trace_init(&zal); 1147 } 1148 } 1149 metaslab_trace_fini(&zal); 1150 } 1151 1152 /* 1153 * The removal thread operates in open context. It iterates over all 1154 * allocated space in the vdev, by loading each metaslab's spacemap. 1155 * For each contiguous segment of allocated space (capping the segment 1156 * size at SPA_MAXBLOCKSIZE), we: 1157 * - Allocate space for it on another vdev. 1158 * - Create a new mapping from the old location to the new location 1159 * (as a record in svr_new_segments). 1160 * - Initiate a logical read zio to get the data off the removing disk. 1161 * - In the read zio's done callback, initiate a logical write zio to 1162 * write it to the new vdev. 1163 * Note that all of this will take effect when a particular TXG syncs. 1164 * The sync thread ensures that all the phys reads and writes for the syncing 1165 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk 1166 * (see vdev_mapping_sync()). 1167 */ 1168 static void 1169 spa_vdev_remove_thread(void *arg) 1170 { 1171 vdev_t *vd = arg; 1172 spa_t *spa = vd->vdev_spa; 1173 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1174 vdev_copy_arg_t vca; 1175 uint64_t max_alloc = zfs_remove_max_segment; 1176 uint64_t last_txg = 0; 1177 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1178 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim); 1179 1180 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops); 1181 ASSERT(vdev_is_concrete(vd)); 1182 ASSERT(vd->vdev_removing); 1183 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 1184 ASSERT3P(svr->svr_vdev, ==, vd); 1185 ASSERT(vim != NULL); 1186 1187 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL); 1188 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL); 1189 vca.vca_outstanding_bytes = 0; 1190 1191 mutex_enter(&svr->svr_lock); 1192 1193 /* 1194 * Start from vim_max_offset so we pick up where we left off 1195 * if we are restarting the removal after opening the pool. 1196 */ 1197 uint64_t msi; 1198 for (msi = start_offset >> vd->vdev_ms_shift; 1199 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) { 1200 metaslab_t *msp = vd->vdev_ms[msi]; 1201 ASSERT3U(msi, <=, vd->vdev_ms_count); 1202 1203 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1204 1205 mutex_enter(&msp->ms_sync_lock); 1206 mutex_enter(&msp->ms_lock); 1207 1208 /* 1209 * Assert nothing in flight -- ms_*tree is empty. 1210 */ 1211 for (int i = 0; i < TXG_SIZE; i++) { 1212 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1213 } 1214 1215 /* 1216 * If the metaslab has ever been allocated from (ms_sm!=NULL), 1217 * read the allocated segments from the space map object 1218 * into svr_allocd_segs. Since we do this while holding 1219 * svr_lock and ms_sync_lock, concurrent frees (which 1220 * would have modified the space map) will wait for us 1221 * to finish loading the spacemap, and then take the 1222 * appropriate action (see free_from_removing_vdev()). 1223 */ 1224 if (msp->ms_sm != NULL) { 1225 space_map_t *sm = NULL; 1226 1227 /* 1228 * We have to open a new space map here, because 1229 * ms_sm's sm_length and sm_alloc may not reflect 1230 * what's in the object contents, if we are in between 1231 * metaslab_sync() and metaslab_sync_done(). 1232 */ 1233 VERIFY0(space_map_open(&sm, 1234 spa->spa_dsl_pool->dp_meta_objset, 1235 msp->ms_sm->sm_object, msp->ms_sm->sm_start, 1236 msp->ms_sm->sm_size, msp->ms_sm->sm_shift)); 1237 space_map_update(sm); 1238 VERIFY0(space_map_load(sm, svr->svr_allocd_segs, 1239 SM_ALLOC)); 1240 space_map_close(sm); 1241 1242 range_tree_walk(msp->ms_freeing, 1243 range_tree_remove, svr->svr_allocd_segs); 1244 1245 /* 1246 * When we are resuming from a paused removal (i.e. 1247 * when importing a pool with a removal in progress), 1248 * discard any state that we have already processed. 1249 */ 1250 range_tree_clear(svr->svr_allocd_segs, 0, start_offset); 1251 } 1252 mutex_exit(&msp->ms_lock); 1253 mutex_exit(&msp->ms_sync_lock); 1254 1255 vca.vca_msp = msp; 1256 zfs_dbgmsg("copying %llu segments for metaslab %llu", 1257 avl_numnodes(&svr->svr_allocd_segs->rt_root), 1258 msp->ms_id); 1259 1260 while (!svr->svr_thread_exit && 1261 !range_tree_is_empty(svr->svr_allocd_segs)) { 1262 1263 mutex_exit(&svr->svr_lock); 1264 1265 /* 1266 * This delay will pause the removal around the point 1267 * specified by zfs_remove_max_bytes_pause. We do this 1268 * solely from the test suite or during debugging. 1269 */ 1270 uint64_t bytes_copied = 1271 spa->spa_removing_phys.sr_copied; 1272 for (int i = 0; i < TXG_SIZE; i++) 1273 bytes_copied += svr->svr_bytes_done[i]; 1274 while (zfs_remove_max_bytes_pause <= bytes_copied && 1275 !svr->svr_thread_exit) 1276 delay(hz); 1277 1278 mutex_enter(&vca.vca_lock); 1279 while (vca.vca_outstanding_bytes > 1280 zfs_remove_max_copy_bytes) { 1281 cv_wait(&vca.vca_cv, &vca.vca_lock); 1282 } 1283 mutex_exit(&vca.vca_lock); 1284 1285 dmu_tx_t *tx = 1286 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1287 1288 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1289 uint64_t txg = dmu_tx_get_txg(tx); 1290 1291 if (txg != last_txg) 1292 max_alloc = zfs_remove_max_segment; 1293 last_txg = txg; 1294 1295 spa_vdev_copy_impl(svr, &vca, &max_alloc, tx); 1296 1297 dmu_tx_commit(tx); 1298 mutex_enter(&svr->svr_lock); 1299 } 1300 } 1301 1302 mutex_exit(&svr->svr_lock); 1303 /* 1304 * Wait for all copies to finish before cleaning up the vca. 1305 */ 1306 txg_wait_synced(spa->spa_dsl_pool, 0); 1307 ASSERT0(vca.vca_outstanding_bytes); 1308 1309 mutex_destroy(&vca.vca_lock); 1310 cv_destroy(&vca.vca_cv); 1311 1312 if (svr->svr_thread_exit) { 1313 mutex_enter(&svr->svr_lock); 1314 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); 1315 svr->svr_thread = NULL; 1316 cv_broadcast(&svr->svr_cv); 1317 mutex_exit(&svr->svr_lock); 1318 } else { 1319 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1320 vdev_remove_complete(vd); 1321 } 1322 } 1323 1324 void 1325 spa_vdev_remove_suspend(spa_t *spa) 1326 { 1327 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1328 1329 if (svr == NULL) 1330 return; 1331 1332 mutex_enter(&svr->svr_lock); 1333 svr->svr_thread_exit = B_TRUE; 1334 while (svr->svr_thread != NULL) 1335 cv_wait(&svr->svr_cv, &svr->svr_lock); 1336 svr->svr_thread_exit = B_FALSE; 1337 mutex_exit(&svr->svr_lock); 1338 } 1339 1340 /* ARGSUSED */ 1341 static int 1342 spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) 1343 { 1344 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1345 1346 if (spa->spa_vdev_removal == NULL) 1347 return (ENOTACTIVE); 1348 return (0); 1349 } 1350 1351 /* 1352 * Cancel a removal by freeing all entries from the partial mapping 1353 * and marking the vdev as no longer being removing. 1354 */ 1355 /* ARGSUSED */ 1356 static void 1357 spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) 1358 { 1359 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1360 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1361 vdev_t *vd = svr->svr_vdev; 1362 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1363 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1364 objset_t *mos = spa->spa_meta_objset; 1365 1366 ASSERT3P(svr->svr_thread, ==, NULL); 1367 1368 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 1369 if (vdev_obsolete_counts_are_precise(vd)) { 1370 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1371 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1372 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx)); 1373 } 1374 1375 if (vdev_obsolete_sm_object(vd) != 0) { 1376 ASSERT(vd->vdev_obsolete_sm != NULL); 1377 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 1378 space_map_object(vd->vdev_obsolete_sm)); 1379 1380 space_map_free(vd->vdev_obsolete_sm, tx); 1381 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1382 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 1383 space_map_close(vd->vdev_obsolete_sm); 1384 vd->vdev_obsolete_sm = NULL; 1385 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1386 } 1387 for (int i = 0; i < TXG_SIZE; i++) { 1388 ASSERT(list_is_empty(&svr->svr_new_segments[i])); 1389 ASSERT3U(svr->svr_max_offset_to_sync[i], <=, 1390 vdev_indirect_mapping_max_offset(vim)); 1391 } 1392 1393 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 1394 metaslab_t *msp = vd->vdev_ms[msi]; 1395 1396 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 1397 break; 1398 1399 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1400 1401 mutex_enter(&msp->ms_lock); 1402 1403 /* 1404 * Assert nothing in flight -- ms_*tree is empty. 1405 */ 1406 for (int i = 0; i < TXG_SIZE; i++) 1407 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1408 for (int i = 0; i < TXG_DEFER_SIZE; i++) 1409 ASSERT0(range_tree_space(msp->ms_defer[i])); 1410 ASSERT0(range_tree_space(msp->ms_freed)); 1411 1412 if (msp->ms_sm != NULL) { 1413 /* 1414 * Assert that the in-core spacemap has the same 1415 * length as the on-disk one, so we can use the 1416 * existing in-core spacemap to load it from disk. 1417 */ 1418 ASSERT3U(msp->ms_sm->sm_alloc, ==, 1419 msp->ms_sm->sm_phys->smp_alloc); 1420 ASSERT3U(msp->ms_sm->sm_length, ==, 1421 msp->ms_sm->sm_phys->smp_objsize); 1422 1423 mutex_enter(&svr->svr_lock); 1424 VERIFY0(space_map_load(msp->ms_sm, 1425 svr->svr_allocd_segs, SM_ALLOC)); 1426 range_tree_walk(msp->ms_freeing, 1427 range_tree_remove, svr->svr_allocd_segs); 1428 1429 /* 1430 * Clear everything past what has been synced, 1431 * because we have not allocated mappings for it yet. 1432 */ 1433 uint64_t syncd = vdev_indirect_mapping_max_offset(vim); 1434 range_tree_clear(svr->svr_allocd_segs, syncd, 1435 msp->ms_sm->sm_start + msp->ms_sm->sm_size - syncd); 1436 1437 mutex_exit(&svr->svr_lock); 1438 } 1439 mutex_exit(&msp->ms_lock); 1440 1441 mutex_enter(&svr->svr_lock); 1442 range_tree_vacate(svr->svr_allocd_segs, 1443 free_mapped_segment_cb, vd); 1444 mutex_exit(&svr->svr_lock); 1445 } 1446 1447 /* 1448 * Note: this must happen after we invoke free_mapped_segment_cb, 1449 * because it adds to the obsolete_segments. 1450 */ 1451 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 1452 1453 ASSERT3U(vic->vic_mapping_object, ==, 1454 vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); 1455 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 1456 vd->vdev_indirect_mapping = NULL; 1457 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 1458 vic->vic_mapping_object = 0; 1459 1460 ASSERT3U(vic->vic_births_object, ==, 1461 vdev_indirect_births_object(vd->vdev_indirect_births)); 1462 vdev_indirect_births_close(vd->vdev_indirect_births); 1463 vd->vdev_indirect_births = NULL; 1464 vdev_indirect_births_free(mos, vic->vic_births_object, tx); 1465 vic->vic_births_object = 0; 1466 1467 /* 1468 * We may have processed some frees from the removing vdev in this 1469 * txg, thus increasing svr_bytes_done; discard that here to 1470 * satisfy the assertions in spa_vdev_removal_destroy(). 1471 * Note that future txg's can not have any bytes_done, because 1472 * future TXG's are only modified from open context, and we have 1473 * already shut down the copying thread. 1474 */ 1475 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0; 1476 spa_finish_removal(spa, DSS_CANCELED, tx); 1477 1478 vd->vdev_removing = B_FALSE; 1479 vdev_config_dirty(vd); 1480 1481 zfs_dbgmsg("canceled device removal for vdev %llu in %llu", 1482 vd->vdev_id, dmu_tx_get_txg(tx)); 1483 spa_history_log_internal(spa, "vdev remove canceled", tx, 1484 "%s vdev %llu %s", spa_name(spa), 1485 vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1486 } 1487 1488 int 1489 spa_vdev_remove_cancel(spa_t *spa) 1490 { 1491 spa_vdev_remove_suspend(spa); 1492 1493 if (spa->spa_vdev_removal == NULL) 1494 return (ENOTACTIVE); 1495 1496 uint64_t vdid = spa->spa_vdev_removal->svr_vdev->vdev_id; 1497 1498 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check, 1499 spa_vdev_remove_cancel_sync, NULL, 0, 1500 ZFS_SPACE_CHECK_EXTRA_RESERVED); 1501 1502 if (error == 0) { 1503 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER); 1504 vdev_t *vd = vdev_lookup_top(spa, vdid); 1505 metaslab_group_activate(vd->vdev_mg); 1506 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG); 1507 } 1508 1509 return (error); 1510 } 1511 1512 /* 1513 * Called every sync pass of every txg if there's a svr. 1514 */ 1515 void 1516 svr_sync(spa_t *spa, dmu_tx_t *tx) 1517 { 1518 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1519 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1520 1521 /* 1522 * This check is necessary so that we do not dirty the 1523 * DIRECTORY_OBJECT via spa_sync_removing_state() when there 1524 * is nothing to do. Dirtying it every time would prevent us 1525 * from syncing-to-convergence. 1526 */ 1527 if (svr->svr_bytes_done[txgoff] == 0) 1528 return; 1529 1530 /* 1531 * Update progress accounting. 1532 */ 1533 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff]; 1534 svr->svr_bytes_done[txgoff] = 0; 1535 1536 spa_sync_removing_state(spa, tx); 1537 } 1538 1539 static void 1540 vdev_remove_make_hole_and_free(vdev_t *vd) 1541 { 1542 uint64_t id = vd->vdev_id; 1543 spa_t *spa = vd->vdev_spa; 1544 vdev_t *rvd = spa->spa_root_vdev; 1545 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 1546 1547 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1548 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1549 1550 vdev_free(vd); 1551 1552 if (last_vdev) { 1553 vdev_compact_children(rvd); 1554 } else { 1555 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 1556 vdev_add_child(rvd, vd); 1557 } 1558 vdev_config_dirty(rvd); 1559 1560 /* 1561 * Reassess the health of our root vdev. 1562 */ 1563 vdev_reopen(rvd); 1564 } 1565 1566 /* 1567 * Remove a log device. The config lock is held for the specified TXG. 1568 */ 1569 static int 1570 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) 1571 { 1572 metaslab_group_t *mg = vd->vdev_mg; 1573 spa_t *spa = vd->vdev_spa; 1574 int error = 0; 1575 1576 ASSERT(vd->vdev_islog); 1577 ASSERT(vd == vd->vdev_top); 1578 1579 /* 1580 * Stop allocating from this vdev. 1581 */ 1582 metaslab_group_passivate(mg); 1583 1584 /* 1585 * Wait for the youngest allocations and frees to sync, 1586 * and then wait for the deferral of those frees to finish. 1587 */ 1588 spa_vdev_config_exit(spa, NULL, 1589 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1590 1591 /* 1592 * Evacuate the device. We don't hold the config lock as writer 1593 * since we need to do I/O but we do keep the 1594 * spa_namespace_lock held. Once this completes the device 1595 * should no longer have any blocks allocated on it. 1596 */ 1597 if (vd->vdev_islog) { 1598 if (vd->vdev_stat.vs_alloc != 0) 1599 error = spa_reset_logs(spa); 1600 } 1601 1602 *txg = spa_vdev_config_enter(spa); 1603 1604 if (error != 0) { 1605 metaslab_group_activate(mg); 1606 return (error); 1607 } 1608 ASSERT0(vd->vdev_stat.vs_alloc); 1609 1610 /* 1611 * The evacuation succeeded. Remove any remaining MOS metadata 1612 * associated with this vdev, and wait for these changes to sync. 1613 */ 1614 vd->vdev_removing = B_TRUE; 1615 1616 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1617 vdev_config_dirty(vd); 1618 1619 spa_history_log_internal(spa, "vdev remove", NULL, 1620 "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id, 1621 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1622 1623 /* Make sure these changes are sync'ed */ 1624 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); 1625 1626 *txg = spa_vdev_config_enter(spa); 1627 1628 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1629 ESC_ZFS_VDEV_REMOVE_DEV); 1630 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1631 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1632 1633 /* The top ZAP should have been destroyed by vdev_remove_empty. */ 1634 ASSERT0(vd->vdev_top_zap); 1635 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */ 1636 ASSERT0(vd->vdev_leaf_zap); 1637 1638 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1639 1640 if (list_link_active(&vd->vdev_state_dirty_node)) 1641 vdev_state_clean(vd); 1642 if (list_link_active(&vd->vdev_config_dirty_node)) 1643 vdev_config_clean(vd); 1644 1645 /* 1646 * Clean up the vdev namespace. 1647 */ 1648 vdev_remove_make_hole_and_free(vd); 1649 1650 if (ev != NULL) 1651 spa_event_post(ev); 1652 1653 return (0); 1654 } 1655 1656 static int 1657 spa_vdev_remove_top_check(vdev_t *vd) 1658 { 1659 spa_t *spa = vd->vdev_spa; 1660 1661 if (vd != vd->vdev_top) 1662 return (SET_ERROR(ENOTSUP)); 1663 1664 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL)) 1665 return (SET_ERROR(ENOTSUP)); 1666 1667 /* 1668 * There has to be enough free space to remove the 1669 * device and leave double the "slop" space (i.e. we 1670 * must leave at least 3% of the pool free, in addition to 1671 * the normal slop space). 1672 */ 1673 if (dsl_dir_space_available(spa->spa_dsl_pool->dp_root_dir, 1674 NULL, 0, B_TRUE) < 1675 vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) { 1676 return (SET_ERROR(ENOSPC)); 1677 } 1678 1679 /* 1680 * There can not be a removal in progress. 1681 */ 1682 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) 1683 return (SET_ERROR(EBUSY)); 1684 1685 /* 1686 * The device must have all its data. 1687 */ 1688 if (!vdev_dtl_empty(vd, DTL_MISSING) || 1689 !vdev_dtl_empty(vd, DTL_OUTAGE)) 1690 return (SET_ERROR(EBUSY)); 1691 1692 /* 1693 * The device must be healthy. 1694 */ 1695 if (!vdev_readable(vd)) 1696 return (SET_ERROR(EIO)); 1697 1698 /* 1699 * All vdevs in normal class must have the same ashift. 1700 */ 1701 if (spa->spa_max_ashift != spa->spa_min_ashift) { 1702 return (SET_ERROR(EINVAL)); 1703 } 1704 1705 /* 1706 * All vdevs in normal class must have the same ashift 1707 * and not be raidz. 1708 */ 1709 vdev_t *rvd = spa->spa_root_vdev; 1710 int num_indirect = 0; 1711 for (uint64_t id = 0; id < rvd->vdev_children; id++) { 1712 vdev_t *cvd = rvd->vdev_child[id]; 1713 if (cvd->vdev_ashift != 0 && !cvd->vdev_islog) 1714 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift); 1715 if (cvd->vdev_ops == &vdev_indirect_ops) 1716 num_indirect++; 1717 if (!vdev_is_concrete(cvd)) 1718 continue; 1719 if (cvd->vdev_ops == &vdev_raidz_ops) 1720 return (SET_ERROR(EINVAL)); 1721 /* 1722 * Need the mirror to be mirror of leaf vdevs only 1723 */ 1724 if (cvd->vdev_ops == &vdev_mirror_ops) { 1725 for (uint64_t cid = 0; 1726 cid < cvd->vdev_children; cid++) { 1727 vdev_t *tmp = cvd->vdev_child[cid]; 1728 if (!tmp->vdev_ops->vdev_op_leaf) 1729 return (SET_ERROR(EINVAL)); 1730 } 1731 } 1732 } 1733 1734 return (0); 1735 } 1736 1737 /* 1738 * Initiate removal of a top-level vdev, reducing the total space in the pool. 1739 * The config lock is held for the specified TXG. Once initiated, 1740 * evacuation of all allocated space (copying it to other vdevs) happens 1741 * in the background (see spa_vdev_remove_thread()), and can be canceled 1742 * (see spa_vdev_remove_cancel()). If successful, the vdev will 1743 * be transformed to an indirect vdev (see spa_vdev_remove_complete()). 1744 */ 1745 static int 1746 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) 1747 { 1748 spa_t *spa = vd->vdev_spa; 1749 int error; 1750 1751 /* 1752 * Check for errors up-front, so that we don't waste time 1753 * passivating the metaslab group and clearing the ZIL if there 1754 * are errors. 1755 */ 1756 error = spa_vdev_remove_top_check(vd); 1757 if (error != 0) 1758 return (error); 1759 1760 /* 1761 * Stop allocating from this vdev. Note that we must check 1762 * that this is not the only device in the pool before 1763 * passivating, otherwise we will not be able to make 1764 * progress because we can't allocate from any vdevs. 1765 * The above check for sufficient free space serves this 1766 * purpose. 1767 */ 1768 metaslab_group_t *mg = vd->vdev_mg; 1769 metaslab_group_passivate(mg); 1770 1771 /* 1772 * Wait for the youngest allocations and frees to sync, 1773 * and then wait for the deferral of those frees to finish. 1774 */ 1775 spa_vdev_config_exit(spa, NULL, 1776 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1777 1778 /* 1779 * We must ensure that no "stubby" log blocks are allocated 1780 * on the device to be removed. These blocks could be 1781 * written at any time, including while we are in the middle 1782 * of copying them. 1783 */ 1784 error = spa_reset_logs(spa); 1785 1786 *txg = spa_vdev_config_enter(spa); 1787 1788 /* 1789 * Things might have changed while the config lock was dropped 1790 * (e.g. space usage). Check for errors again. 1791 */ 1792 if (error == 0) 1793 error = spa_vdev_remove_top_check(vd); 1794 1795 if (error != 0) { 1796 metaslab_group_activate(mg); 1797 return (error); 1798 } 1799 1800 vd->vdev_removing = B_TRUE; 1801 1802 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1803 vdev_config_dirty(vd); 1804 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg); 1805 dsl_sync_task_nowait(spa->spa_dsl_pool, 1806 vdev_remove_initiate_sync, 1807 vd, 0, ZFS_SPACE_CHECK_NONE, tx); 1808 dmu_tx_commit(tx); 1809 1810 return (0); 1811 } 1812 1813 /* 1814 * Remove a device from the pool. 1815 * 1816 * Removing a device from the vdev namespace requires several steps 1817 * and can take a significant amount of time. As a result we use 1818 * the spa_vdev_config_[enter/exit] functions which allow us to 1819 * grab and release the spa_config_lock while still holding the namespace 1820 * lock. During each step the configuration is synced out. 1821 */ 1822 int 1823 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 1824 { 1825 vdev_t *vd; 1826 nvlist_t **spares, **l2cache, *nv; 1827 uint64_t txg = 0; 1828 uint_t nspares, nl2cache; 1829 int error = 0; 1830 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 1831 sysevent_t *ev = NULL; 1832 1833 ASSERT(spa_writeable(spa)); 1834 1835 if (!locked) 1836 txg = spa_vdev_enter(spa); 1837 1838 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1839 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 1840 error = (spa_has_checkpoint(spa)) ? 1841 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 1842 1843 if (!locked) 1844 return (spa_vdev_exit(spa, NULL, txg, error)); 1845 1846 return (error); 1847 } 1848 1849 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1850 1851 if (spa->spa_spares.sav_vdevs != NULL && 1852 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1853 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 1854 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 1855 /* 1856 * Only remove the hot spare if it's not currently in use 1857 * in this pool. 1858 */ 1859 if (vd == NULL || unspare) { 1860 char *nvstr = fnvlist_lookup_string(nv, 1861 ZPOOL_CONFIG_PATH); 1862 spa_history_log_internal(spa, "vdev remove", NULL, 1863 "%s vdev (%s) %s", spa_name(spa), 1864 VDEV_TYPE_SPARE, nvstr); 1865 if (vd == NULL) 1866 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 1867 ev = spa_event_create(spa, vd, NULL, 1868 ESC_ZFS_VDEV_REMOVE_AUX); 1869 spa_vdev_remove_aux(spa->spa_spares.sav_config, 1870 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 1871 spa_load_spares(spa); 1872 spa->spa_spares.sav_sync = B_TRUE; 1873 } else { 1874 error = SET_ERROR(EBUSY); 1875 } 1876 } else if (spa->spa_l2cache.sav_vdevs != NULL && 1877 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1878 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 1879 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 1880 char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 1881 spa_history_log_internal(spa, "vdev remove", NULL, 1882 "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr); 1883 /* 1884 * Cache devices can always be removed. 1885 */ 1886 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 1887 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX); 1888 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 1889 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 1890 spa_load_l2cache(spa); 1891 spa->spa_l2cache.sav_sync = B_TRUE; 1892 } else if (vd != NULL && vd->vdev_islog) { 1893 ASSERT(!locked); 1894 error = spa_vdev_remove_log(vd, &txg); 1895 } else if (vd != NULL) { 1896 ASSERT(!locked); 1897 error = spa_vdev_remove_top(vd, &txg); 1898 } else { 1899 /* 1900 * There is no vdev of any kind with the specified guid. 1901 */ 1902 error = SET_ERROR(ENOENT); 1903 } 1904 1905 if (!locked) 1906 error = spa_vdev_exit(spa, NULL, txg, error); 1907 1908 if (ev != NULL) { 1909 if (error != 0) { 1910 spa_event_discard(ev); 1911 } else { 1912 spa_event_post(ev); 1913 } 1914 } 1915 1916 return (error); 1917 } 1918 1919 int 1920 spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) 1921 { 1922 prs->prs_state = spa->spa_removing_phys.sr_state; 1923 1924 if (prs->prs_state == DSS_NONE) 1925 return (SET_ERROR(ENOENT)); 1926 1927 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev; 1928 prs->prs_start_time = spa->spa_removing_phys.sr_start_time; 1929 prs->prs_end_time = spa->spa_removing_phys.sr_end_time; 1930 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy; 1931 prs->prs_copied = spa->spa_removing_phys.sr_copied; 1932 1933 if (spa->spa_vdev_removal != NULL) { 1934 for (int i = 0; i < TXG_SIZE; i++) { 1935 prs->prs_copied += 1936 spa->spa_vdev_removal->svr_bytes_done[i]; 1937 } 1938 } 1939 1940 prs->prs_mapping_memory = 0; 1941 uint64_t indirect_vdev_id = 1942 spa->spa_removing_phys.sr_prev_indirect_vdev; 1943 while (indirect_vdev_id != -1) { 1944 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id]; 1945 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1946 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1947 1948 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 1949 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim); 1950 indirect_vdev_id = vic->vic_prev_indirect_vdev; 1951 } 1952 1953 return (0); 1954 } 1955