1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/spa_impl.h> 29 #include <sys/dmu.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/zap.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/metaslab.h> 34 #include <sys/metaslab_impl.h> 35 #include <sys/uberblock_impl.h> 36 #include <sys/txg.h> 37 #include <sys/avl.h> 38 #include <sys/bpobj.h> 39 #include <sys/dsl_pool.h> 40 #include <sys/dsl_synctask.h> 41 #include <sys/dsl_dir.h> 42 #include <sys/arc.h> 43 #include <sys/zfeature.h> 44 #include <sys/vdev_indirect_births.h> 45 #include <sys/vdev_indirect_mapping.h> 46 #include <sys/abd.h> 47 #include <sys/vdev_initialize.h> 48 #include <sys/vdev_trim.h> 49 50 /* 51 * This file contains the necessary logic to remove vdevs from a 52 * storage pool. Currently, the only devices that can be removed 53 * are log, cache, and spare devices; and top level vdevs from a pool 54 * w/o raidz. (Note that members of a mirror can also be removed 55 * by the detach operation.) 56 * 57 * Log vdevs are removed by evacuating them and then turning the vdev 58 * into a hole vdev while holding spa config locks. 59 * 60 * Top level vdevs are removed and converted into an indirect vdev via 61 * a multi-step process: 62 * 63 * - Disable allocations from this device (spa_vdev_remove_top). 64 * 65 * - From a new thread (spa_vdev_remove_thread), copy data from 66 * the removing vdev to a different vdev. The copy happens in open 67 * context (spa_vdev_copy_impl) and issues a sync task 68 * (vdev_mapping_sync) so the sync thread can update the partial 69 * indirect mappings in core and on disk. 70 * 71 * - If a free happens during a removal, it is freed from the 72 * removing vdev, and if it has already been copied, from the new 73 * location as well (free_from_removing_vdev). 74 * 75 * - After the removal is completed, the copy thread converts the vdev 76 * into an indirect vdev (vdev_remove_complete) before instructing 77 * the sync thread to destroy the space maps and finish the removal 78 * (spa_finish_removal). 79 */ 80 81 typedef struct vdev_copy_arg { 82 metaslab_t *vca_msp; 83 uint64_t vca_outstanding_bytes; 84 kcondvar_t vca_cv; 85 kmutex_t vca_lock; 86 } vdev_copy_arg_t; 87 88 /* 89 * The maximum amount of memory we can use for outstanding i/o while 90 * doing a device removal. This determines how much i/o we can have 91 * in flight concurrently. 92 */ 93 int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; 94 95 /* 96 * The largest contiguous segment that we will attempt to allocate when 97 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If 98 * there is a performance problem with attempting to allocate large blocks, 99 * consider decreasing this. 100 * 101 * Note: we will issue I/Os of up to this size. The mpt driver does not 102 * respond well to I/Os larger than 1MB, so we set this to 1MB. (When 103 * mpt processes an I/O larger than 1MB, it needs to do an allocation of 104 * 2 physically contiguous pages; if this allocation fails, mpt will drop 105 * the I/O and hang the device.) 106 */ 107 int zfs_remove_max_segment = 1024 * 1024; 108 109 /* 110 * Allow a remap segment to span free chunks of at most this size. The main 111 * impact of a larger span is that we will read and write larger, more 112 * contiguous chunks, with more "unnecessary" data -- trading off bandwidth 113 * for iops. The value here was chosen to align with 114 * zfs_vdev_read_gap_limit, which is a similar concept when doing regular 115 * reads (but there's no reason it has to be the same). 116 * 117 * Additionally, a higher span will have the following relatively minor 118 * effects: 119 * - the mapping will be smaller, since one entry can cover more allocated 120 * segments 121 * - more of the fragmentation in the removing device will be preserved 122 * - we'll do larger allocations, which may fail and fall back on smaller 123 * allocations 124 */ 125 int vdev_removal_max_span = 32 * 1024; 126 127 /* 128 * This is used by the test suite so that it can ensure that certain 129 * actions happen while in the middle of a removal. 130 */ 131 int zfs_removal_suspend_progress = 0; 132 133 #define VDEV_REMOVAL_ZAP_OBJS "lzap" 134 135 static void spa_vdev_remove_thread(void *arg); 136 137 static void 138 spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx) 139 { 140 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset, 141 DMU_POOL_DIRECTORY_OBJECT, 142 DMU_POOL_REMOVING, sizeof (uint64_t), 143 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 144 &spa->spa_removing_phys, tx)); 145 } 146 147 static nvlist_t * 148 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 149 { 150 for (int i = 0; i < count; i++) { 151 uint64_t guid = 152 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID); 153 154 if (guid == target_guid) 155 return (nvpp[i]); 156 } 157 158 return (NULL); 159 } 160 161 static void 162 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 163 nvlist_t *dev_to_remove) 164 { 165 nvlist_t **newdev = NULL; 166 167 if (count > 1) 168 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 169 170 for (int i = 0, j = 0; i < count; i++) { 171 if (dev[i] == dev_to_remove) 172 continue; 173 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 174 } 175 176 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 177 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 178 179 for (int i = 0; i < count - 1; i++) 180 nvlist_free(newdev[i]); 181 182 if (count > 1) 183 kmem_free(newdev, (count - 1) * sizeof (void *)); 184 } 185 186 static spa_vdev_removal_t * 187 spa_vdev_removal_create(vdev_t *vd) 188 { 189 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); 190 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); 191 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); 192 svr->svr_allocd_segs = range_tree_create(NULL, NULL); 193 svr->svr_vdev_id = vd->vdev_id; 194 195 for (int i = 0; i < TXG_SIZE; i++) { 196 svr->svr_frees[i] = range_tree_create(NULL, NULL); 197 list_create(&svr->svr_new_segments[i], 198 sizeof (vdev_indirect_mapping_entry_t), 199 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 200 } 201 202 return (svr); 203 } 204 205 void 206 spa_vdev_removal_destroy(spa_vdev_removal_t *svr) 207 { 208 for (int i = 0; i < TXG_SIZE; i++) { 209 ASSERT0(svr->svr_bytes_done[i]); 210 ASSERT0(svr->svr_max_offset_to_sync[i]); 211 range_tree_destroy(svr->svr_frees[i]); 212 list_destroy(&svr->svr_new_segments[i]); 213 } 214 215 range_tree_destroy(svr->svr_allocd_segs); 216 mutex_destroy(&svr->svr_lock); 217 cv_destroy(&svr->svr_cv); 218 kmem_free(svr, sizeof (*svr)); 219 } 220 221 /* 222 * This is called as a synctask in the txg in which we will mark this vdev 223 * as removing (in the config stored in the MOS). 224 * 225 * It begins the evacuation of a toplevel vdev by: 226 * - initializing the spa_removing_phys which tracks this removal 227 * - computing the amount of space to remove for accounting purposes 228 * - dirtying all dbufs in the spa_config_object 229 * - creating the spa_vdev_removal 230 * - starting the spa_vdev_remove_thread 231 */ 232 static void 233 vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx) 234 { 235 int vdev_id = (uintptr_t)arg; 236 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 237 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 238 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 239 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset; 240 spa_vdev_removal_t *svr = NULL; 241 uint64_t txg = dmu_tx_get_txg(tx); 242 243 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 244 svr = spa_vdev_removal_create(vd); 245 246 ASSERT(vd->vdev_removing); 247 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 248 249 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 250 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 251 /* 252 * By activating the OBSOLETE_COUNTS feature, we prevent 253 * the pool from being downgraded and ensure that the 254 * refcounts are precise. 255 */ 256 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 257 uint64_t one = 1; 258 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap, 259 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1, 260 &one, tx)); 261 ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0); 262 } 263 264 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx); 265 vd->vdev_indirect_mapping = 266 vdev_indirect_mapping_open(mos, vic->vic_mapping_object); 267 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx); 268 vd->vdev_indirect_births = 269 vdev_indirect_births_open(mos, vic->vic_births_object); 270 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id; 271 spa->spa_removing_phys.sr_start_time = gethrestime_sec(); 272 spa->spa_removing_phys.sr_end_time = 0; 273 spa->spa_removing_phys.sr_state = DSS_SCANNING; 274 spa->spa_removing_phys.sr_to_copy = 0; 275 spa->spa_removing_phys.sr_copied = 0; 276 277 /* 278 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because 279 * there may be space in the defer tree, which is free, but still 280 * counted in vs_alloc. 281 */ 282 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 283 metaslab_t *ms = vd->vdev_ms[i]; 284 if (ms->ms_sm == NULL) 285 continue; 286 287 spa->spa_removing_phys.sr_to_copy += 288 metaslab_allocated_space(ms); 289 290 /* 291 * Space which we are freeing this txg does not need to 292 * be copied. 293 */ 294 spa->spa_removing_phys.sr_to_copy -= 295 range_tree_space(ms->ms_freeing); 296 297 ASSERT0(range_tree_space(ms->ms_freed)); 298 for (int t = 0; t < TXG_SIZE; t++) 299 ASSERT0(range_tree_space(ms->ms_allocating[t])); 300 } 301 302 /* 303 * Sync tasks are called before metaslab_sync(), so there should 304 * be no already-synced metaslabs in the TXG_CLEAN list. 305 */ 306 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL); 307 308 spa_sync_removing_state(spa, tx); 309 310 /* 311 * All blocks that we need to read the most recent mapping must be 312 * stored on concrete vdevs. Therefore, we must dirty anything that 313 * is read before spa_remove_init(). Specifically, the 314 * spa_config_object. (Note that although we already modified the 315 * spa_config_object in spa_sync_removing_state, that may not have 316 * modified all blocks of the object.) 317 */ 318 dmu_object_info_t doi; 319 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi)); 320 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) { 321 dmu_buf_t *dbuf; 322 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT, 323 offset, FTAG, &dbuf, 0)); 324 dmu_buf_will_dirty(dbuf, tx); 325 offset += dbuf->db_size; 326 dmu_buf_rele(dbuf, FTAG); 327 } 328 329 /* 330 * Now that we've allocated the im_object, dirty the vdev to ensure 331 * that the object gets written to the config on disk. 332 */ 333 vdev_config_dirty(vd); 334 335 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu " 336 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx), 337 vic->vic_mapping_object); 338 339 spa_history_log_internal(spa, "vdev remove started", tx, 340 "%s vdev %llu %s", spa_name(spa), vd->vdev_id, 341 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 342 /* 343 * Setting spa_vdev_removal causes subsequent frees to call 344 * free_from_removing_vdev(). Note that we don't need any locking 345 * because we are the sync thread, and metaslab_free_impl() is only 346 * called from syncing context (potentially from a zio taskq thread, 347 * but in any case only when there are outstanding free i/os, which 348 * there are not). 349 */ 350 ASSERT3P(spa->spa_vdev_removal, ==, NULL); 351 spa->spa_vdev_removal = svr; 352 svr->svr_thread = thread_create(NULL, 0, 353 spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri); 354 } 355 356 /* 357 * When we are opening a pool, we must read the mapping for each 358 * indirect vdev in order from most recently removed to least 359 * recently removed. We do this because the blocks for the mapping 360 * of older indirect vdevs may be stored on more recently removed vdevs. 361 * In order to read each indirect mapping object, we must have 362 * initialized all more recently removed vdevs. 363 */ 364 int 365 spa_remove_init(spa_t *spa) 366 { 367 int error; 368 369 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset, 370 DMU_POOL_DIRECTORY_OBJECT, 371 DMU_POOL_REMOVING, sizeof (uint64_t), 372 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 373 &spa->spa_removing_phys); 374 375 if (error == ENOENT) { 376 spa->spa_removing_phys.sr_state = DSS_NONE; 377 spa->spa_removing_phys.sr_removing_vdev = -1; 378 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 379 spa->spa_indirect_vdevs_loaded = B_TRUE; 380 return (0); 381 } else if (error != 0) { 382 return (error); 383 } 384 385 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) { 386 /* 387 * We are currently removing a vdev. Create and 388 * initialize a spa_vdev_removal_t from the bonus 389 * buffer of the removing vdevs vdev_im_object, and 390 * initialize its partial mapping. 391 */ 392 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 393 vdev_t *vd = vdev_lookup_top(spa, 394 spa->spa_removing_phys.sr_removing_vdev); 395 396 if (vd == NULL) { 397 spa_config_exit(spa, SCL_STATE, FTAG); 398 return (EINVAL); 399 } 400 401 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 402 403 ASSERT(vdev_is_concrete(vd)); 404 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd); 405 ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id); 406 ASSERT(vd->vdev_removing); 407 408 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 409 spa->spa_meta_objset, vic->vic_mapping_object); 410 vd->vdev_indirect_births = vdev_indirect_births_open( 411 spa->spa_meta_objset, vic->vic_births_object); 412 spa_config_exit(spa, SCL_STATE, FTAG); 413 414 spa->spa_vdev_removal = svr; 415 } 416 417 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 418 uint64_t indirect_vdev_id = 419 spa->spa_removing_phys.sr_prev_indirect_vdev; 420 while (indirect_vdev_id != UINT64_MAX) { 421 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id); 422 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 423 424 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 425 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 426 spa->spa_meta_objset, vic->vic_mapping_object); 427 vd->vdev_indirect_births = vdev_indirect_births_open( 428 spa->spa_meta_objset, vic->vic_births_object); 429 430 indirect_vdev_id = vic->vic_prev_indirect_vdev; 431 } 432 spa_config_exit(spa, SCL_STATE, FTAG); 433 434 /* 435 * Now that we've loaded all the indirect mappings, we can allow 436 * reads from other blocks (e.g. via predictive prefetch). 437 */ 438 spa->spa_indirect_vdevs_loaded = B_TRUE; 439 return (0); 440 } 441 442 void 443 spa_restart_removal(spa_t *spa) 444 { 445 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 446 447 if (svr == NULL) 448 return; 449 450 /* 451 * In general when this function is called there is no 452 * removal thread running. The only scenario where this 453 * is not true is during spa_import() where this function 454 * is called twice [once from spa_import_impl() and 455 * spa_async_resume()]. Thus, in the scenario where we 456 * import a pool that has an ongoing removal we don't 457 * want to spawn a second thread. 458 */ 459 if (svr->svr_thread != NULL) 460 return; 461 462 if (!spa_writeable(spa)) 463 return; 464 465 zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id); 466 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa, 467 0, &p0, TS_RUN, minclsyspri); 468 } 469 470 /* 471 * Process freeing from a device which is in the middle of being removed. 472 * We must handle this carefully so that we attempt to copy freed data, 473 * and we correctly free already-copied data. 474 */ 475 void 476 free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size) 477 { 478 spa_t *spa = vd->vdev_spa; 479 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 480 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 481 uint64_t txg = spa_syncing_txg(spa); 482 uint64_t max_offset_yet = 0; 483 484 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 485 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==, 486 vdev_indirect_mapping_object(vim)); 487 ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id); 488 489 mutex_enter(&svr->svr_lock); 490 491 /* 492 * Remove the segment from the removing vdev's spacemap. This 493 * ensures that we will not attempt to copy this space (if the 494 * removal thread has not yet visited it), and also ensures 495 * that we know what is actually allocated on the new vdevs 496 * (needed if we cancel the removal). 497 * 498 * Note: we must do the metaslab_free_concrete() with the svr_lock 499 * held, so that the remove_thread can not load this metaslab and then 500 * visit this offset between the time that we metaslab_free_concrete() 501 * and when we check to see if it has been visited. 502 * 503 * Note: The checkpoint flag is set to false as having/taking 504 * a checkpoint and removing a device can't happen at the same 505 * time. 506 */ 507 ASSERT(!spa_has_checkpoint(spa)); 508 metaslab_free_concrete(vd, offset, size, B_FALSE); 509 510 uint64_t synced_size = 0; 511 uint64_t synced_offset = 0; 512 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim); 513 if (offset < max_offset_synced) { 514 /* 515 * The mapping for this offset is already on disk. 516 * Free from the new location. 517 * 518 * Note that we use svr_max_synced_offset because it is 519 * updated atomically with respect to the in-core mapping. 520 * By contrast, vim_max_offset is not. 521 * 522 * This block may be split between a synced entry and an 523 * in-flight or unvisited entry. Only process the synced 524 * portion of it here. 525 */ 526 synced_size = MIN(size, max_offset_synced - offset); 527 synced_offset = offset; 528 529 ASSERT3U(max_offset_yet, <=, max_offset_synced); 530 max_offset_yet = max_offset_synced; 531 532 DTRACE_PROBE3(remove__free__synced, 533 spa_t *, spa, 534 uint64_t, offset, 535 uint64_t, synced_size); 536 537 size -= synced_size; 538 offset += synced_size; 539 } 540 541 /* 542 * Look at all in-flight txgs starting from the currently syncing one 543 * and see if a section of this free is being copied. By starting from 544 * this txg and iterating forward, we might find that this region 545 * was copied in two different txgs and handle it appropriately. 546 */ 547 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) { 548 int txgoff = (txg + i) & TXG_MASK; 549 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) { 550 /* 551 * The mapping for this offset is in flight, and 552 * will be synced in txg+i. 553 */ 554 uint64_t inflight_size = MIN(size, 555 svr->svr_max_offset_to_sync[txgoff] - offset); 556 557 DTRACE_PROBE4(remove__free__inflight, 558 spa_t *, spa, 559 uint64_t, offset, 560 uint64_t, inflight_size, 561 uint64_t, txg + i); 562 563 /* 564 * We copy data in order of increasing offset. 565 * Therefore the max_offset_to_sync[] must increase 566 * (or be zero, indicating that nothing is being 567 * copied in that txg). 568 */ 569 if (svr->svr_max_offset_to_sync[txgoff] != 0) { 570 ASSERT3U(svr->svr_max_offset_to_sync[txgoff], 571 >=, max_offset_yet); 572 max_offset_yet = 573 svr->svr_max_offset_to_sync[txgoff]; 574 } 575 576 /* 577 * We've already committed to copying this segment: 578 * we have allocated space elsewhere in the pool for 579 * it and have an IO outstanding to copy the data. We 580 * cannot free the space before the copy has 581 * completed, or else the copy IO might overwrite any 582 * new data. To free that space, we record the 583 * segment in the appropriate svr_frees tree and free 584 * the mapped space later, in the txg where we have 585 * completed the copy and synced the mapping (see 586 * vdev_mapping_sync). 587 */ 588 range_tree_add(svr->svr_frees[txgoff], 589 offset, inflight_size); 590 size -= inflight_size; 591 offset += inflight_size; 592 593 /* 594 * This space is already accounted for as being 595 * done, because it is being copied in txg+i. 596 * However, if i!=0, then it is being copied in 597 * a future txg. If we crash after this txg 598 * syncs but before txg+i syncs, then the space 599 * will be free. Therefore we must account 600 * for the space being done in *this* txg 601 * (when it is freed) rather than the future txg 602 * (when it will be copied). 603 */ 604 ASSERT3U(svr->svr_bytes_done[txgoff], >=, 605 inflight_size); 606 svr->svr_bytes_done[txgoff] -= inflight_size; 607 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; 608 } 609 } 610 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); 611 612 if (size > 0) { 613 /* 614 * The copy thread has not yet visited this offset. Ensure 615 * that it doesn't. 616 */ 617 618 DTRACE_PROBE3(remove__free__unvisited, 619 spa_t *, spa, 620 uint64_t, offset, 621 uint64_t, size); 622 623 if (svr->svr_allocd_segs != NULL) 624 range_tree_clear(svr->svr_allocd_segs, offset, size); 625 626 /* 627 * Since we now do not need to copy this data, for 628 * accounting purposes we have done our job and can count 629 * it as completed. 630 */ 631 svr->svr_bytes_done[txg & TXG_MASK] += size; 632 } 633 mutex_exit(&svr->svr_lock); 634 635 /* 636 * Now that we have dropped svr_lock, process the synced portion 637 * of this free. 638 */ 639 if (synced_size > 0) { 640 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size); 641 642 /* 643 * Note: this can only be called from syncing context, 644 * and the vdev_indirect_mapping is only changed from the 645 * sync thread, so we don't need svr_lock while doing 646 * metaslab_free_impl_cb. 647 */ 648 boolean_t checkpoint = B_FALSE; 649 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size, 650 metaslab_free_impl_cb, &checkpoint); 651 } 652 } 653 654 /* 655 * Stop an active removal and update the spa_removing phys. 656 */ 657 static void 658 spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx) 659 { 660 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 661 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa)); 662 663 /* Ensure the removal thread has completed before we free the svr. */ 664 spa_vdev_remove_suspend(spa); 665 666 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED); 667 668 if (state == DSS_FINISHED) { 669 spa_removing_phys_t *srp = &spa->spa_removing_phys; 670 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 671 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 672 673 if (srp->sr_prev_indirect_vdev != UINT64_MAX) { 674 vdev_t *pvd = vdev_lookup_top(spa, 675 srp->sr_prev_indirect_vdev); 676 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops); 677 } 678 679 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev; 680 srp->sr_prev_indirect_vdev = vd->vdev_id; 681 } 682 spa->spa_removing_phys.sr_state = state; 683 spa->spa_removing_phys.sr_end_time = gethrestime_sec(); 684 685 spa->spa_vdev_removal = NULL; 686 spa_vdev_removal_destroy(svr); 687 688 spa_sync_removing_state(spa, tx); 689 690 vdev_config_dirty(spa->spa_root_vdev); 691 } 692 693 static void 694 free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size) 695 { 696 vdev_t *vd = arg; 697 vdev_indirect_mark_obsolete(vd, offset, size); 698 boolean_t checkpoint = B_FALSE; 699 vdev_indirect_ops.vdev_op_remap(vd, offset, size, 700 metaslab_free_impl_cb, &checkpoint); 701 } 702 703 /* 704 * On behalf of the removal thread, syncs an incremental bit more of 705 * the indirect mapping to disk and updates the in-memory mapping. 706 * Called as a sync task in every txg that the removal thread makes progress. 707 */ 708 static void 709 vdev_mapping_sync(void *arg, dmu_tx_t *tx) 710 { 711 spa_vdev_removal_t *svr = arg; 712 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 713 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 714 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 715 uint64_t txg = dmu_tx_get_txg(tx); 716 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 717 718 ASSERT(vic->vic_mapping_object != 0); 719 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 720 721 vdev_indirect_mapping_add_entries(vim, 722 &svr->svr_new_segments[txg & TXG_MASK], tx); 723 vdev_indirect_births_add_entry(vd->vdev_indirect_births, 724 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx); 725 726 /* 727 * Free the copied data for anything that was freed while the 728 * mapping entries were in flight. 729 */ 730 mutex_enter(&svr->svr_lock); 731 range_tree_vacate(svr->svr_frees[txg & TXG_MASK], 732 free_mapped_segment_cb, vd); 733 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, 734 vdev_indirect_mapping_max_offset(vim)); 735 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; 736 mutex_exit(&svr->svr_lock); 737 738 spa_sync_removing_state(spa, tx); 739 } 740 741 typedef struct vdev_copy_segment_arg { 742 spa_t *vcsa_spa; 743 dva_t *vcsa_dest_dva; 744 uint64_t vcsa_txg; 745 range_tree_t *vcsa_obsolete_segs; 746 } vdev_copy_segment_arg_t; 747 748 static void 749 unalloc_seg(void *arg, uint64_t start, uint64_t size) 750 { 751 vdev_copy_segment_arg_t *vcsa = arg; 752 spa_t *spa = vcsa->vcsa_spa; 753 blkptr_t bp = { 0 }; 754 755 BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL); 756 BP_SET_LSIZE(&bp, size); 757 BP_SET_PSIZE(&bp, size); 758 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); 759 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF); 760 BP_SET_TYPE(&bp, DMU_OT_NONE); 761 BP_SET_LEVEL(&bp, 0); 762 BP_SET_DEDUP(&bp, 0); 763 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER); 764 765 DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva)); 766 DVA_SET_OFFSET(&bp.blk_dva[0], 767 DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start); 768 DVA_SET_ASIZE(&bp.blk_dva[0], size); 769 770 zio_free(spa, vcsa->vcsa_txg, &bp); 771 } 772 773 /* 774 * All reads and writes associated with a call to spa_vdev_copy_segment() 775 * are done. 776 */ 777 static void 778 spa_vdev_copy_segment_done(zio_t *zio) 779 { 780 vdev_copy_segment_arg_t *vcsa = zio->io_private; 781 782 range_tree_vacate(vcsa->vcsa_obsolete_segs, 783 unalloc_seg, vcsa); 784 range_tree_destroy(vcsa->vcsa_obsolete_segs); 785 kmem_free(vcsa, sizeof (*vcsa)); 786 787 spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa); 788 } 789 790 /* 791 * The write of the new location is done. 792 */ 793 static void 794 spa_vdev_copy_segment_write_done(zio_t *zio) 795 { 796 vdev_copy_arg_t *vca = zio->io_private; 797 798 abd_free(zio->io_abd); 799 800 mutex_enter(&vca->vca_lock); 801 vca->vca_outstanding_bytes -= zio->io_size; 802 cv_signal(&vca->vca_cv); 803 mutex_exit(&vca->vca_lock); 804 } 805 806 /* 807 * The read of the old location is done. The parent zio is the write to 808 * the new location. Allow it to start. 809 */ 810 static void 811 spa_vdev_copy_segment_read_done(zio_t *zio) 812 { 813 zio_nowait(zio_unique_parent(zio)); 814 } 815 816 /* 817 * If the old and new vdevs are mirrors, we will read both sides of the old 818 * mirror, and write each copy to the corresponding side of the new mirror. 819 * If the old and new vdevs have a different number of children, we will do 820 * this as best as possible. Since we aren't verifying checksums, this 821 * ensures that as long as there's a good copy of the data, we'll have a 822 * good copy after the removal, even if there's silent damage to one side 823 * of the mirror. If we're removing a mirror that has some silent damage, 824 * we'll have exactly the same damage in the new location (assuming that 825 * the new location is also a mirror). 826 * 827 * We accomplish this by creating a tree of zio_t's, with as many writes as 828 * there are "children" of the new vdev (a non-redundant vdev counts as one 829 * child, a 2-way mirror has 2 children, etc). Each write has an associated 830 * read from a child of the old vdev. Typically there will be the same 831 * number of children of the old and new vdevs. However, if there are more 832 * children of the new vdev, some child(ren) of the old vdev will be issued 833 * multiple reads. If there are more children of the old vdev, some copies 834 * will be dropped. 835 * 836 * For example, the tree of zio_t's for a 2-way mirror is: 837 * 838 * null 839 * / \ 840 * write(new vdev, child 0) write(new vdev, child 1) 841 * | | 842 * read(old vdev, child 0) read(old vdev, child 1) 843 * 844 * Child zio's complete before their parents complete. However, zio's 845 * created with zio_vdev_child_io() may be issued before their children 846 * complete. In this case we need to make sure that the children (reads) 847 * complete before the parents (writes) are *issued*. We do this by not 848 * calling zio_nowait() on each write until its corresponding read has 849 * completed. 850 * 851 * The spa_config_lock must be held while zio's created by 852 * zio_vdev_child_io() are in progress, to ensure that the vdev tree does 853 * not change (e.g. due to a concurrent "zpool attach/detach"). The "null" 854 * zio is needed to release the spa_config_lock after all the reads and 855 * writes complete. (Note that we can't grab the config lock for each read, 856 * because it is not reentrant - we could deadlock with a thread waiting 857 * for a write lock.) 858 */ 859 static void 860 spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio, 861 vdev_t *source_vd, uint64_t source_offset, 862 vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size) 863 { 864 ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0); 865 866 mutex_enter(&vca->vca_lock); 867 vca->vca_outstanding_bytes += size; 868 mutex_exit(&vca->vca_lock); 869 870 abd_t *abd = abd_alloc_for_io(size, B_FALSE); 871 872 vdev_t *source_child_vd; 873 if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) { 874 /* 875 * Source and dest are both mirrors. Copy from the same 876 * child id as we are copying to (wrapping around if there 877 * are more dest children than source children). 878 */ 879 source_child_vd = 880 source_vd->vdev_child[dest_id % source_vd->vdev_children]; 881 } else { 882 source_child_vd = source_vd; 883 } 884 885 zio_t *write_zio = zio_vdev_child_io(nzio, NULL, 886 dest_child_vd, dest_offset, abd, size, 887 ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL, 888 ZIO_FLAG_CANFAIL, 889 spa_vdev_copy_segment_write_done, vca); 890 891 zio_nowait(zio_vdev_child_io(write_zio, NULL, 892 source_child_vd, source_offset, abd, size, 893 ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL, 894 ZIO_FLAG_CANFAIL, 895 spa_vdev_copy_segment_read_done, vca)); 896 } 897 898 /* 899 * Allocate a new location for this segment, and create the zio_t's to 900 * read from the old location and write to the new location. 901 */ 902 static int 903 spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, 904 uint64_t maxalloc, uint64_t txg, 905 vdev_copy_arg_t *vca, zio_alloc_list_t *zal) 906 { 907 metaslab_group_t *mg = vd->vdev_mg; 908 spa_t *spa = vd->vdev_spa; 909 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 910 vdev_indirect_mapping_entry_t *entry; 911 dva_t dst = { 0 }; 912 uint64_t start = range_tree_min(segs); 913 914 ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE); 915 916 uint64_t size = range_tree_span(segs); 917 if (range_tree_span(segs) > maxalloc) { 918 /* 919 * We can't allocate all the segments. Prefer to end 920 * the allocation at the end of a segment, thus avoiding 921 * additional split blocks. 922 */ 923 range_seg_t search; 924 avl_index_t where; 925 search.rs_start = start + maxalloc; 926 search.rs_end = search.rs_start; 927 range_seg_t *rs = avl_find(&segs->rt_root, &search, &where); 928 if (rs == NULL) { 929 rs = avl_nearest(&segs->rt_root, where, AVL_BEFORE); 930 } else { 931 rs = AVL_PREV(&segs->rt_root, rs); 932 } 933 if (rs != NULL) { 934 size = rs->rs_end - start; 935 } else { 936 /* 937 * There are no segments that end before maxalloc. 938 * I.e. the first segment is larger than maxalloc, 939 * so we must split it. 940 */ 941 size = maxalloc; 942 } 943 } 944 ASSERT3U(size, <=, maxalloc); 945 946 /* 947 * An allocation class might not have any remaining vdevs or space 948 */ 949 metaslab_class_t *mc = mg->mg_class; 950 if (mc != spa_normal_class(spa) && mc->mc_groups <= 1) 951 mc = spa_normal_class(spa); 952 int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0, 953 zal, 0); 954 if (error == ENOSPC && mc != spa_normal_class(spa)) { 955 error = metaslab_alloc_dva(spa, spa_normal_class(spa), size, 956 &dst, 0, NULL, txg, 0, zal, 0); 957 } 958 if (error != 0) 959 return (error); 960 961 /* 962 * Determine the ranges that are not actually needed. Offsets are 963 * relative to the start of the range to be copied (i.e. relative to the 964 * local variable "start"). 965 */ 966 range_tree_t *obsolete_segs = range_tree_create(NULL, NULL); 967 968 range_seg_t *rs = avl_first(&segs->rt_root); 969 ASSERT3U(rs->rs_start, ==, start); 970 uint64_t prev_seg_end = rs->rs_end; 971 while ((rs = AVL_NEXT(&segs->rt_root, rs)) != NULL) { 972 if (rs->rs_start >= start + size) { 973 break; 974 } else { 975 range_tree_add(obsolete_segs, 976 prev_seg_end - start, 977 rs->rs_start - prev_seg_end); 978 } 979 prev_seg_end = rs->rs_end; 980 } 981 /* We don't end in the middle of an obsolete range */ 982 ASSERT3U(start + size, <=, prev_seg_end); 983 984 range_tree_clear(segs, start, size); 985 986 /* 987 * We can't have any padding of the allocated size, otherwise we will 988 * misunderstand what's allocated, and the size of the mapping. 989 * The caller ensures this will be true by passing in a size that is 990 * aligned to the worst (highest) ashift in the pool. 991 */ 992 ASSERT3U(DVA_GET_ASIZE(&dst), ==, size); 993 994 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP); 995 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); 996 entry->vime_mapping.vimep_dst = dst; 997 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 998 entry->vime_obsolete_count = range_tree_space(obsolete_segs); 999 } 1000 1001 vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP); 1002 vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst; 1003 vcsa->vcsa_obsolete_segs = obsolete_segs; 1004 vcsa->vcsa_spa = spa; 1005 vcsa->vcsa_txg = txg; 1006 1007 /* 1008 * See comment before spa_vdev_copy_one_child(). 1009 */ 1010 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 1011 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL, 1012 spa_vdev_copy_segment_done, vcsa, 0); 1013 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst)); 1014 if (dest_vd->vdev_ops == &vdev_mirror_ops) { 1015 for (int i = 0; i < dest_vd->vdev_children; i++) { 1016 vdev_t *child = dest_vd->vdev_child[i]; 1017 spa_vdev_copy_one_child(vca, nzio, vd, start, 1018 child, DVA_GET_OFFSET(&dst), i, size); 1019 } 1020 } else { 1021 spa_vdev_copy_one_child(vca, nzio, vd, start, 1022 dest_vd, DVA_GET_OFFSET(&dst), -1, size); 1023 } 1024 zio_nowait(nzio); 1025 1026 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); 1027 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift); 1028 vdev_dirty(vd, 0, NULL, txg); 1029 1030 return (0); 1031 } 1032 1033 /* 1034 * Complete the removal of a toplevel vdev. This is called as a 1035 * synctask in the same txg that we will sync out the new config (to the 1036 * MOS object) which indicates that this vdev is indirect. 1037 */ 1038 static void 1039 vdev_remove_complete_sync(void *arg, dmu_tx_t *tx) 1040 { 1041 spa_vdev_removal_t *svr = arg; 1042 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1043 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1044 1045 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 1046 1047 for (int i = 0; i < TXG_SIZE; i++) { 1048 ASSERT0(svr->svr_bytes_done[i]); 1049 } 1050 1051 ASSERT3U(spa->spa_removing_phys.sr_copied, ==, 1052 spa->spa_removing_phys.sr_to_copy); 1053 1054 vdev_destroy_spacemaps(vd, tx); 1055 1056 /* destroy leaf zaps, if any */ 1057 ASSERT3P(svr->svr_zaplist, !=, NULL); 1058 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL); 1059 pair != NULL; 1060 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) { 1061 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx); 1062 } 1063 fnvlist_free(svr->svr_zaplist); 1064 1065 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx); 1066 /* vd->vdev_path is not available here */ 1067 spa_history_log_internal(spa, "vdev remove completed", tx, 1068 "%s vdev %llu", spa_name(spa), vd->vdev_id); 1069 } 1070 1071 static void 1072 vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist) 1073 { 1074 ASSERT3P(zlist, !=, NULL); 1075 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 1076 1077 if (vd->vdev_leaf_zap != 0) { 1078 char zkey[32]; 1079 (void) snprintf(zkey, sizeof (zkey), "%s-%"PRIu64, 1080 VDEV_REMOVAL_ZAP_OBJS, vd->vdev_leaf_zap); 1081 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap); 1082 } 1083 1084 for (uint64_t id = 0; id < vd->vdev_children; id++) { 1085 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist); 1086 } 1087 } 1088 1089 static void 1090 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) 1091 { 1092 vdev_t *ivd; 1093 dmu_tx_t *tx; 1094 spa_t *spa = vd->vdev_spa; 1095 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1096 1097 /* 1098 * First, build a list of leaf zaps to be destroyed. 1099 * This is passed to the sync context thread, 1100 * which does the actual unlinking. 1101 */ 1102 svr->svr_zaplist = fnvlist_alloc(); 1103 vdev_remove_enlist_zaps(vd, svr->svr_zaplist); 1104 1105 ivd = vdev_add_parent(vd, &vdev_indirect_ops); 1106 ivd->vdev_removing = 0; 1107 1108 vd->vdev_leaf_zap = 0; 1109 1110 vdev_remove_child(ivd, vd); 1111 vdev_compact_children(ivd); 1112 1113 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 1114 1115 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1116 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr, 1117 0, ZFS_SPACE_CHECK_NONE, tx); 1118 dmu_tx_commit(tx); 1119 1120 /* 1121 * Indicate that this thread has exited. 1122 * After this, we can not use svr. 1123 */ 1124 mutex_enter(&svr->svr_lock); 1125 svr->svr_thread = NULL; 1126 cv_broadcast(&svr->svr_cv); 1127 mutex_exit(&svr->svr_lock); 1128 } 1129 1130 /* 1131 * Complete the removal of a toplevel vdev. This is called in open 1132 * context by the removal thread after we have copied all vdev's data. 1133 */ 1134 static void 1135 vdev_remove_complete(spa_t *spa) 1136 { 1137 uint64_t txg; 1138 1139 /* 1140 * Wait for any deferred frees to be synced before we call 1141 * vdev_metaslab_fini() 1142 */ 1143 txg_wait_synced(spa->spa_dsl_pool, 0); 1144 txg = spa_vdev_enter(spa); 1145 vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1146 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 1147 ASSERT3P(vd->vdev_trim_thread, ==, NULL); 1148 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL); 1149 1150 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1151 ESC_ZFS_VDEV_REMOVE_DEV); 1152 1153 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu", 1154 vd->vdev_id, txg); 1155 1156 /* 1157 * Discard allocation state. 1158 */ 1159 if (vd->vdev_mg != NULL) { 1160 vdev_metaslab_fini(vd); 1161 metaslab_group_destroy(vd->vdev_mg); 1162 vd->vdev_mg = NULL; 1163 } 1164 ASSERT0(vd->vdev_stat.vs_space); 1165 ASSERT0(vd->vdev_stat.vs_dspace); 1166 1167 vdev_remove_replace_with_indirect(vd, txg); 1168 1169 /* 1170 * We now release the locks, allowing spa_sync to run and finish the 1171 * removal via vdev_remove_complete_sync in syncing context. 1172 * 1173 * Note that we hold on to the vdev_t that has been replaced. Since 1174 * it isn't part of the vdev tree any longer, it can't be concurrently 1175 * manipulated, even while we don't have the config lock. 1176 */ 1177 (void) spa_vdev_exit(spa, NULL, txg, 0); 1178 1179 /* 1180 * Top ZAP should have been transferred to the indirect vdev in 1181 * vdev_remove_replace_with_indirect. 1182 */ 1183 ASSERT0(vd->vdev_top_zap); 1184 1185 /* 1186 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect. 1187 */ 1188 ASSERT0(vd->vdev_leaf_zap); 1189 1190 txg = spa_vdev_enter(spa); 1191 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1192 /* 1193 * Request to update the config and the config cachefile. 1194 */ 1195 vdev_config_dirty(spa->spa_root_vdev); 1196 (void) spa_vdev_exit(spa, vd, txg, 0); 1197 1198 spa_event_post(ev); 1199 } 1200 1201 /* 1202 * Evacuates a segment of size at most max_alloc from the vdev 1203 * via repeated calls to spa_vdev_copy_segment. If an allocation 1204 * fails, the pool is probably too fragmented to handle such a 1205 * large size, so decrease max_alloc so that the caller will not try 1206 * this size again this txg. 1207 */ 1208 static void 1209 spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca, 1210 uint64_t *max_alloc, dmu_tx_t *tx) 1211 { 1212 uint64_t txg = dmu_tx_get_txg(tx); 1213 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1214 1215 mutex_enter(&svr->svr_lock); 1216 1217 /* 1218 * Determine how big of a chunk to copy. We can allocate up 1219 * to max_alloc bytes, and we can span up to vdev_removal_max_span 1220 * bytes of unallocated space at a time. "segs" will track the 1221 * allocated segments that we are copying. We may also be copying 1222 * free segments (of up to vdev_removal_max_span bytes). 1223 */ 1224 range_tree_t *segs = range_tree_create(NULL, NULL); 1225 for (;;) { 1226 range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root); 1227 if (rs == NULL) 1228 break; 1229 1230 uint64_t seg_length; 1231 1232 if (range_tree_is_empty(segs)) { 1233 /* need to truncate the first seg based on max_alloc */ 1234 seg_length = 1235 MIN(rs->rs_end - rs->rs_start, *max_alloc); 1236 } else { 1237 if (rs->rs_start - range_tree_max(segs) > 1238 vdev_removal_max_span) { 1239 /* 1240 * Including this segment would cause us to 1241 * copy a larger unneeded chunk than is allowed. 1242 */ 1243 break; 1244 } else if (rs->rs_end - range_tree_min(segs) > 1245 *max_alloc) { 1246 /* 1247 * This additional segment would extend past 1248 * max_alloc. Rather than splitting this 1249 * segment, leave it for the next mapping. 1250 */ 1251 break; 1252 } else { 1253 seg_length = rs->rs_end - rs->rs_start; 1254 } 1255 } 1256 1257 range_tree_add(segs, rs->rs_start, seg_length); 1258 range_tree_remove(svr->svr_allocd_segs, 1259 rs->rs_start, seg_length); 1260 } 1261 1262 if (range_tree_is_empty(segs)) { 1263 mutex_exit(&svr->svr_lock); 1264 range_tree_destroy(segs); 1265 return; 1266 } 1267 1268 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) { 1269 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync, 1270 svr, 0, ZFS_SPACE_CHECK_NONE, tx); 1271 } 1272 1273 svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs); 1274 1275 /* 1276 * Note: this is the amount of *allocated* space 1277 * that we are taking care of each txg. 1278 */ 1279 svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs); 1280 1281 mutex_exit(&svr->svr_lock); 1282 1283 zio_alloc_list_t zal; 1284 metaslab_trace_init(&zal); 1285 uint64_t thismax = SPA_MAXBLOCKSIZE; 1286 while (!range_tree_is_empty(segs)) { 1287 int error = spa_vdev_copy_segment(vd, 1288 segs, thismax, txg, vca, &zal); 1289 1290 if (error == ENOSPC) { 1291 /* 1292 * Cut our segment in half, and don't try this 1293 * segment size again this txg. Note that the 1294 * allocation size must be aligned to the highest 1295 * ashift in the pool, so that the allocation will 1296 * not be padded out to a multiple of the ashift, 1297 * which could cause us to think that this mapping 1298 * is larger than we intended. 1299 */ 1300 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); 1301 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); 1302 uint64_t attempted = 1303 MIN(range_tree_span(segs), thismax); 1304 thismax = P2ROUNDUP(attempted / 2, 1305 1 << spa->spa_max_ashift); 1306 /* 1307 * The minimum-size allocation can not fail. 1308 */ 1309 ASSERT3U(attempted, >, 1 << spa->spa_max_ashift); 1310 *max_alloc = attempted - (1 << spa->spa_max_ashift); 1311 } else { 1312 ASSERT0(error); 1313 1314 /* 1315 * We've performed an allocation, so reset the 1316 * alloc trace list. 1317 */ 1318 metaslab_trace_fini(&zal); 1319 metaslab_trace_init(&zal); 1320 } 1321 } 1322 metaslab_trace_fini(&zal); 1323 range_tree_destroy(segs); 1324 } 1325 1326 /* 1327 * The removal thread operates in open context. It iterates over all 1328 * allocated space in the vdev, by loading each metaslab's spacemap. 1329 * For each contiguous segment of allocated space (capping the segment 1330 * size at SPA_MAXBLOCKSIZE), we: 1331 * - Allocate space for it on another vdev. 1332 * - Create a new mapping from the old location to the new location 1333 * (as a record in svr_new_segments). 1334 * - Initiate a logical read zio to get the data off the removing disk. 1335 * - In the read zio's done callback, initiate a logical write zio to 1336 * write it to the new vdev. 1337 * Note that all of this will take effect when a particular TXG syncs. 1338 * The sync thread ensures that all the phys reads and writes for the syncing 1339 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk 1340 * (see vdev_mapping_sync()). 1341 */ 1342 static void 1343 spa_vdev_remove_thread(void *arg) 1344 { 1345 spa_t *spa = arg; 1346 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1347 vdev_copy_arg_t vca; 1348 uint64_t max_alloc = zfs_remove_max_segment; 1349 uint64_t last_txg = 0; 1350 1351 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1352 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1353 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1354 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim); 1355 1356 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops); 1357 ASSERT(vdev_is_concrete(vd)); 1358 ASSERT(vd->vdev_removing); 1359 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 1360 ASSERT(vim != NULL); 1361 1362 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL); 1363 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL); 1364 vca.vca_outstanding_bytes = 0; 1365 1366 mutex_enter(&svr->svr_lock); 1367 1368 /* 1369 * Start from vim_max_offset so we pick up where we left off 1370 * if we are restarting the removal after opening the pool. 1371 */ 1372 uint64_t msi; 1373 for (msi = start_offset >> vd->vdev_ms_shift; 1374 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) { 1375 metaslab_t *msp = vd->vdev_ms[msi]; 1376 ASSERT3U(msi, <=, vd->vdev_ms_count); 1377 1378 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1379 1380 mutex_enter(&msp->ms_sync_lock); 1381 mutex_enter(&msp->ms_lock); 1382 1383 /* 1384 * Assert nothing in flight -- ms_*tree is empty. 1385 */ 1386 for (int i = 0; i < TXG_SIZE; i++) { 1387 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1388 } 1389 1390 /* 1391 * If the metaslab has ever been allocated from (ms_sm!=NULL), 1392 * read the allocated segments from the space map object 1393 * into svr_allocd_segs. Since we do this while holding 1394 * svr_lock and ms_sync_lock, concurrent frees (which 1395 * would have modified the space map) will wait for us 1396 * to finish loading the spacemap, and then take the 1397 * appropriate action (see free_from_removing_vdev()). 1398 */ 1399 if (msp->ms_sm != NULL) { 1400 VERIFY0(space_map_load(msp->ms_sm, 1401 svr->svr_allocd_segs, SM_ALLOC)); 1402 1403 range_tree_walk(msp->ms_freeing, 1404 range_tree_remove, svr->svr_allocd_segs); 1405 1406 /* 1407 * When we are resuming from a paused removal (i.e. 1408 * when importing a pool with a removal in progress), 1409 * discard any state that we have already processed. 1410 */ 1411 range_tree_clear(svr->svr_allocd_segs, 0, start_offset); 1412 } 1413 mutex_exit(&msp->ms_lock); 1414 mutex_exit(&msp->ms_sync_lock); 1415 1416 vca.vca_msp = msp; 1417 zfs_dbgmsg("copying %llu segments for metaslab %llu", 1418 avl_numnodes(&svr->svr_allocd_segs->rt_root), 1419 msp->ms_id); 1420 1421 while (!svr->svr_thread_exit && 1422 !range_tree_is_empty(svr->svr_allocd_segs)) { 1423 1424 mutex_exit(&svr->svr_lock); 1425 1426 /* 1427 * We need to periodically drop the config lock so that 1428 * writers can get in. Additionally, we can't wait 1429 * for a txg to sync while holding a config lock 1430 * (since a waiting writer could cause a 3-way deadlock 1431 * with the sync thread, which also gets a config 1432 * lock for reader). So we can't hold the config lock 1433 * while calling dmu_tx_assign(). 1434 */ 1435 spa_config_exit(spa, SCL_CONFIG, FTAG); 1436 1437 /* 1438 * This delay will pause the removal around the point 1439 * specified by zfs_removal_suspend_progress. We do this 1440 * solely from the test suite or during debugging. 1441 */ 1442 uint64_t bytes_copied = 1443 spa->spa_removing_phys.sr_copied; 1444 for (int i = 0; i < TXG_SIZE; i++) 1445 bytes_copied += svr->svr_bytes_done[i]; 1446 while (zfs_removal_suspend_progress && 1447 !svr->svr_thread_exit) 1448 delay(hz); 1449 1450 mutex_enter(&vca.vca_lock); 1451 while (vca.vca_outstanding_bytes > 1452 zfs_remove_max_copy_bytes) { 1453 cv_wait(&vca.vca_cv, &vca.vca_lock); 1454 } 1455 mutex_exit(&vca.vca_lock); 1456 1457 dmu_tx_t *tx = 1458 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1459 1460 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1461 uint64_t txg = dmu_tx_get_txg(tx); 1462 1463 /* 1464 * Reacquire the vdev_config lock. The vdev_t 1465 * that we're removing may have changed, e.g. due 1466 * to a vdev_attach or vdev_detach. 1467 */ 1468 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1469 vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1470 1471 if (txg != last_txg) 1472 max_alloc = zfs_remove_max_segment; 1473 last_txg = txg; 1474 1475 spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx); 1476 1477 dmu_tx_commit(tx); 1478 mutex_enter(&svr->svr_lock); 1479 } 1480 } 1481 1482 mutex_exit(&svr->svr_lock); 1483 1484 spa_config_exit(spa, SCL_CONFIG, FTAG); 1485 1486 /* 1487 * Wait for all copies to finish before cleaning up the vca. 1488 */ 1489 txg_wait_synced(spa->spa_dsl_pool, 0); 1490 ASSERT0(vca.vca_outstanding_bytes); 1491 1492 mutex_destroy(&vca.vca_lock); 1493 cv_destroy(&vca.vca_cv); 1494 1495 if (svr->svr_thread_exit) { 1496 mutex_enter(&svr->svr_lock); 1497 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); 1498 svr->svr_thread = NULL; 1499 cv_broadcast(&svr->svr_cv); 1500 mutex_exit(&svr->svr_lock); 1501 } else { 1502 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1503 vdev_remove_complete(spa); 1504 } 1505 } 1506 1507 void 1508 spa_vdev_remove_suspend(spa_t *spa) 1509 { 1510 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1511 1512 if (svr == NULL) 1513 return; 1514 1515 mutex_enter(&svr->svr_lock); 1516 svr->svr_thread_exit = B_TRUE; 1517 while (svr->svr_thread != NULL) 1518 cv_wait(&svr->svr_cv, &svr->svr_lock); 1519 svr->svr_thread_exit = B_FALSE; 1520 mutex_exit(&svr->svr_lock); 1521 } 1522 1523 /* ARGSUSED */ 1524 static int 1525 spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) 1526 { 1527 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1528 1529 if (spa->spa_vdev_removal == NULL) 1530 return (ENOTACTIVE); 1531 return (0); 1532 } 1533 1534 /* 1535 * Cancel a removal by freeing all entries from the partial mapping 1536 * and marking the vdev as no longer being removing. 1537 */ 1538 /* ARGSUSED */ 1539 static void 1540 spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) 1541 { 1542 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1543 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1544 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1545 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1546 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1547 objset_t *mos = spa->spa_meta_objset; 1548 1549 ASSERT3P(svr->svr_thread, ==, NULL); 1550 1551 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 1552 if (vdev_obsolete_counts_are_precise(vd)) { 1553 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1554 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1555 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx)); 1556 } 1557 1558 if (vdev_obsolete_sm_object(vd) != 0) { 1559 ASSERT(vd->vdev_obsolete_sm != NULL); 1560 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 1561 space_map_object(vd->vdev_obsolete_sm)); 1562 1563 space_map_free(vd->vdev_obsolete_sm, tx); 1564 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1565 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 1566 space_map_close(vd->vdev_obsolete_sm); 1567 vd->vdev_obsolete_sm = NULL; 1568 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1569 } 1570 for (int i = 0; i < TXG_SIZE; i++) { 1571 ASSERT(list_is_empty(&svr->svr_new_segments[i])); 1572 ASSERT3U(svr->svr_max_offset_to_sync[i], <=, 1573 vdev_indirect_mapping_max_offset(vim)); 1574 } 1575 1576 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 1577 metaslab_t *msp = vd->vdev_ms[msi]; 1578 1579 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 1580 break; 1581 1582 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1583 1584 mutex_enter(&msp->ms_lock); 1585 1586 /* 1587 * Assert nothing in flight -- ms_*tree is empty. 1588 */ 1589 for (int i = 0; i < TXG_SIZE; i++) 1590 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1591 for (int i = 0; i < TXG_DEFER_SIZE; i++) 1592 ASSERT0(range_tree_space(msp->ms_defer[i])); 1593 ASSERT0(range_tree_space(msp->ms_freed)); 1594 1595 if (msp->ms_sm != NULL) { 1596 mutex_enter(&svr->svr_lock); 1597 VERIFY0(space_map_load(msp->ms_sm, 1598 svr->svr_allocd_segs, SM_ALLOC)); 1599 range_tree_walk(msp->ms_freeing, 1600 range_tree_remove, svr->svr_allocd_segs); 1601 1602 /* 1603 * Clear everything past what has been synced, 1604 * because we have not allocated mappings for it yet. 1605 */ 1606 uint64_t syncd = vdev_indirect_mapping_max_offset(vim); 1607 uint64_t sm_end = msp->ms_sm->sm_start + 1608 msp->ms_sm->sm_size; 1609 if (sm_end > syncd) 1610 range_tree_clear(svr->svr_allocd_segs, 1611 syncd, sm_end - syncd); 1612 1613 mutex_exit(&svr->svr_lock); 1614 } 1615 mutex_exit(&msp->ms_lock); 1616 1617 mutex_enter(&svr->svr_lock); 1618 range_tree_vacate(svr->svr_allocd_segs, 1619 free_mapped_segment_cb, vd); 1620 mutex_exit(&svr->svr_lock); 1621 } 1622 1623 /* 1624 * Note: this must happen after we invoke free_mapped_segment_cb, 1625 * because it adds to the obsolete_segments. 1626 */ 1627 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 1628 1629 ASSERT3U(vic->vic_mapping_object, ==, 1630 vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); 1631 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 1632 vd->vdev_indirect_mapping = NULL; 1633 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 1634 vic->vic_mapping_object = 0; 1635 1636 ASSERT3U(vic->vic_births_object, ==, 1637 vdev_indirect_births_object(vd->vdev_indirect_births)); 1638 vdev_indirect_births_close(vd->vdev_indirect_births); 1639 vd->vdev_indirect_births = NULL; 1640 vdev_indirect_births_free(mos, vic->vic_births_object, tx); 1641 vic->vic_births_object = 0; 1642 1643 /* 1644 * We may have processed some frees from the removing vdev in this 1645 * txg, thus increasing svr_bytes_done; discard that here to 1646 * satisfy the assertions in spa_vdev_removal_destroy(). 1647 * Note that future txg's can not have any bytes_done, because 1648 * future TXG's are only modified from open context, and we have 1649 * already shut down the copying thread. 1650 */ 1651 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0; 1652 spa_finish_removal(spa, DSS_CANCELED, tx); 1653 1654 vd->vdev_removing = B_FALSE; 1655 vdev_config_dirty(vd); 1656 1657 zfs_dbgmsg("canceled device removal for vdev %llu in %llu", 1658 vd->vdev_id, dmu_tx_get_txg(tx)); 1659 spa_history_log_internal(spa, "vdev remove canceled", tx, 1660 "%s vdev %llu %s", spa_name(spa), 1661 vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1662 } 1663 1664 int 1665 spa_vdev_remove_cancel(spa_t *spa) 1666 { 1667 spa_vdev_remove_suspend(spa); 1668 1669 if (spa->spa_vdev_removal == NULL) 1670 return (ENOTACTIVE); 1671 1672 uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id; 1673 1674 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check, 1675 spa_vdev_remove_cancel_sync, NULL, 0, 1676 ZFS_SPACE_CHECK_EXTRA_RESERVED); 1677 1678 if (error == 0) { 1679 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER); 1680 vdev_t *vd = vdev_lookup_top(spa, vdid); 1681 metaslab_group_activate(vd->vdev_mg); 1682 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG); 1683 } 1684 1685 return (error); 1686 } 1687 1688 void 1689 svr_sync(spa_t *spa, dmu_tx_t *tx) 1690 { 1691 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1692 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1693 1694 if (svr == NULL) 1695 return; 1696 1697 /* 1698 * This check is necessary so that we do not dirty the 1699 * DIRECTORY_OBJECT via spa_sync_removing_state() when there 1700 * is nothing to do. Dirtying it every time would prevent us 1701 * from syncing-to-convergence. 1702 */ 1703 if (svr->svr_bytes_done[txgoff] == 0) 1704 return; 1705 1706 /* 1707 * Update progress accounting. 1708 */ 1709 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff]; 1710 svr->svr_bytes_done[txgoff] = 0; 1711 1712 spa_sync_removing_state(spa, tx); 1713 } 1714 1715 static void 1716 vdev_remove_make_hole_and_free(vdev_t *vd) 1717 { 1718 uint64_t id = vd->vdev_id; 1719 spa_t *spa = vd->vdev_spa; 1720 vdev_t *rvd = spa->spa_root_vdev; 1721 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 1722 1723 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1724 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1725 1726 vdev_free(vd); 1727 1728 if (last_vdev) { 1729 vdev_compact_children(rvd); 1730 } else { 1731 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 1732 vdev_add_child(rvd, vd); 1733 } 1734 vdev_config_dirty(rvd); 1735 1736 /* 1737 * Reassess the health of our root vdev. 1738 */ 1739 vdev_reopen(rvd); 1740 } 1741 1742 /* 1743 * Remove a log device. The config lock is held for the specified TXG. 1744 */ 1745 static int 1746 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) 1747 { 1748 metaslab_group_t *mg = vd->vdev_mg; 1749 spa_t *spa = vd->vdev_spa; 1750 int error = 0; 1751 1752 ASSERT(vd->vdev_islog); 1753 ASSERT(vd == vd->vdev_top); 1754 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1755 1756 /* 1757 * Stop allocating from this vdev. 1758 */ 1759 metaslab_group_passivate(mg); 1760 1761 /* 1762 * Wait for the youngest allocations and frees to sync, 1763 * and then wait for the deferral of those frees to finish. 1764 */ 1765 spa_vdev_config_exit(spa, NULL, 1766 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1767 1768 /* 1769 * Evacuate the device. We don't hold the config lock as 1770 * writer since we need to do I/O but we do keep the 1771 * spa_namespace_lock held. Once this completes the device 1772 * should no longer have any blocks allocated on it. 1773 */ 1774 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1775 if (vd->vdev_stat.vs_alloc != 0) 1776 error = spa_reset_logs(spa); 1777 1778 *txg = spa_vdev_config_enter(spa); 1779 1780 if (error != 0) { 1781 metaslab_group_activate(mg); 1782 return (error); 1783 } 1784 ASSERT0(vd->vdev_stat.vs_alloc); 1785 1786 /* 1787 * The evacuation succeeded. Remove any remaining MOS metadata 1788 * associated with this vdev, and wait for these changes to sync. 1789 */ 1790 vd->vdev_removing = B_TRUE; 1791 1792 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1793 vdev_config_dirty(vd); 1794 1795 vdev_metaslab_fini(vd); 1796 1797 spa_history_log_internal(spa, "vdev remove", NULL, 1798 "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id, 1799 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1800 1801 /* Make sure these changes are sync'ed */ 1802 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); 1803 1804 /* Stop initializing and TRIM */ 1805 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED); 1806 vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED); 1807 vdev_autotrim_stop_wait(vd); 1808 1809 *txg = spa_vdev_config_enter(spa); 1810 1811 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1812 ESC_ZFS_VDEV_REMOVE_DEV); 1813 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1814 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1815 1816 /* The top ZAP should have been destroyed by vdev_remove_empty. */ 1817 ASSERT0(vd->vdev_top_zap); 1818 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */ 1819 ASSERT0(vd->vdev_leaf_zap); 1820 1821 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1822 1823 if (list_link_active(&vd->vdev_state_dirty_node)) 1824 vdev_state_clean(vd); 1825 if (list_link_active(&vd->vdev_config_dirty_node)) 1826 vdev_config_clean(vd); 1827 1828 ASSERT0(vd->vdev_stat.vs_alloc); 1829 1830 /* 1831 * Clean up the vdev namespace. 1832 */ 1833 vdev_remove_make_hole_and_free(vd); 1834 1835 if (ev != NULL) 1836 spa_event_post(ev); 1837 1838 return (0); 1839 } 1840 1841 static int 1842 spa_vdev_remove_top_check(vdev_t *vd) 1843 { 1844 spa_t *spa = vd->vdev_spa; 1845 1846 if (vd != vd->vdev_top) 1847 return (SET_ERROR(ENOTSUP)); 1848 1849 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL)) 1850 return (SET_ERROR(ENOTSUP)); 1851 1852 /* available space in the pool's normal class */ 1853 uint64_t available = dsl_dir_space_available( 1854 spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE); 1855 1856 metaslab_class_t *mc = vd->vdev_mg->mg_class; 1857 1858 /* 1859 * When removing a vdev from an allocation class that has 1860 * remaining vdevs, include available space from the class. 1861 */ 1862 if (mc != spa_normal_class(spa) && mc->mc_groups > 1) { 1863 uint64_t class_avail = metaslab_class_get_space(mc) - 1864 metaslab_class_get_alloc(mc); 1865 1866 /* add class space, adjusted for overhead */ 1867 available += (class_avail * 94) / 100; 1868 } 1869 1870 /* 1871 * There has to be enough free space to remove the 1872 * device and leave double the "slop" space (i.e. we 1873 * must leave at least 3% of the pool free, in addition to 1874 * the normal slop space). 1875 */ 1876 if (available < vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) { 1877 return (SET_ERROR(ENOSPC)); 1878 } 1879 1880 /* 1881 * There can not be a removal in progress. 1882 */ 1883 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) 1884 return (SET_ERROR(EBUSY)); 1885 1886 /* 1887 * The device must have all its data. 1888 */ 1889 if (!vdev_dtl_empty(vd, DTL_MISSING) || 1890 !vdev_dtl_empty(vd, DTL_OUTAGE)) 1891 return (SET_ERROR(EBUSY)); 1892 1893 /* 1894 * The device must be healthy. 1895 */ 1896 if (!vdev_readable(vd)) 1897 return (SET_ERROR(EIO)); 1898 1899 /* 1900 * All vdevs in normal class must have the same ashift. 1901 */ 1902 if (spa->spa_max_ashift != spa->spa_min_ashift) { 1903 return (SET_ERROR(EINVAL)); 1904 } 1905 1906 /* 1907 * All vdevs in normal class must have the same ashift 1908 * and not be raidz. 1909 */ 1910 vdev_t *rvd = spa->spa_root_vdev; 1911 int num_indirect = 0; 1912 for (uint64_t id = 0; id < rvd->vdev_children; id++) { 1913 vdev_t *cvd = rvd->vdev_child[id]; 1914 if (cvd->vdev_ashift != 0 && !cvd->vdev_islog) 1915 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift); 1916 if (cvd->vdev_ops == &vdev_indirect_ops) 1917 num_indirect++; 1918 if (!vdev_is_concrete(cvd)) 1919 continue; 1920 if (cvd->vdev_ops == &vdev_raidz_ops) 1921 return (SET_ERROR(EINVAL)); 1922 /* 1923 * Need the mirror to be mirror of leaf vdevs only 1924 */ 1925 if (cvd->vdev_ops == &vdev_mirror_ops) { 1926 for (uint64_t cid = 0; 1927 cid < cvd->vdev_children; cid++) { 1928 vdev_t *tmp = cvd->vdev_child[cid]; 1929 if (!tmp->vdev_ops->vdev_op_leaf) 1930 return (SET_ERROR(EINVAL)); 1931 } 1932 } 1933 } 1934 1935 return (0); 1936 } 1937 1938 /* 1939 * Initiate removal of a top-level vdev, reducing the total space in the pool. 1940 * The config lock is held for the specified TXG. Once initiated, 1941 * evacuation of all allocated space (copying it to other vdevs) happens 1942 * in the background (see spa_vdev_remove_thread()), and can be canceled 1943 * (see spa_vdev_remove_cancel()). If successful, the vdev will 1944 * be transformed to an indirect vdev (see spa_vdev_remove_complete()). 1945 */ 1946 static int 1947 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) 1948 { 1949 spa_t *spa = vd->vdev_spa; 1950 int error; 1951 1952 /* 1953 * Check for errors up-front, so that we don't waste time 1954 * passivating the metaslab group and clearing the ZIL if there 1955 * are errors. 1956 */ 1957 error = spa_vdev_remove_top_check(vd); 1958 if (error != 0) 1959 return (error); 1960 1961 /* 1962 * Stop allocating from this vdev. Note that we must check 1963 * that this is not the only device in the pool before 1964 * passivating, otherwise we will not be able to make 1965 * progress because we can't allocate from any vdevs. 1966 * The above check for sufficient free space serves this 1967 * purpose. 1968 */ 1969 metaslab_group_t *mg = vd->vdev_mg; 1970 metaslab_group_passivate(mg); 1971 1972 /* 1973 * Wait for the youngest allocations and frees to sync, 1974 * and then wait for the deferral of those frees to finish. 1975 */ 1976 spa_vdev_config_exit(spa, NULL, 1977 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1978 1979 /* 1980 * We must ensure that no "stubby" log blocks are allocated 1981 * on the device to be removed. These blocks could be 1982 * written at any time, including while we are in the middle 1983 * of copying them. 1984 */ 1985 error = spa_reset_logs(spa); 1986 1987 /* 1988 * We stop any initializing and TRIM that is currently in progress 1989 * but leave the state as "active". This will allow the process to 1990 * resume if the removal is canceled sometime later. 1991 */ 1992 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE); 1993 vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE); 1994 vdev_autotrim_stop_wait(vd); 1995 1996 *txg = spa_vdev_config_enter(spa); 1997 1998 /* 1999 * Things might have changed while the config lock was dropped 2000 * (e.g. space usage). Check for errors again. 2001 */ 2002 if (error == 0) 2003 error = spa_vdev_remove_top_check(vd); 2004 2005 if (error != 0) { 2006 metaslab_group_activate(mg); 2007 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 2008 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); 2009 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); 2010 return (error); 2011 } 2012 2013 vd->vdev_removing = B_TRUE; 2014 2015 vdev_dirty_leaves(vd, VDD_DTL, *txg); 2016 vdev_config_dirty(vd); 2017 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg); 2018 dsl_sync_task_nowait(spa->spa_dsl_pool, 2019 vdev_remove_initiate_sync, 2020 (void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx); 2021 dmu_tx_commit(tx); 2022 2023 return (0); 2024 } 2025 2026 /* 2027 * Remove a device from the pool. 2028 * 2029 * Removing a device from the vdev namespace requires several steps 2030 * and can take a significant amount of time. As a result we use 2031 * the spa_vdev_config_[enter/exit] functions which allow us to 2032 * grab and release the spa_config_lock while still holding the namespace 2033 * lock. During each step the configuration is synced out. 2034 */ 2035 int 2036 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 2037 { 2038 vdev_t *vd; 2039 nvlist_t **spares, **l2cache, *nv; 2040 uint64_t txg = 0; 2041 uint_t nspares, nl2cache; 2042 int error = 0; 2043 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 2044 sysevent_t *ev = NULL; 2045 2046 ASSERT(spa_writeable(spa)); 2047 2048 if (!locked) 2049 txg = spa_vdev_enter(spa); 2050 2051 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2052 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 2053 error = (spa_has_checkpoint(spa)) ? 2054 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 2055 2056 if (!locked) 2057 return (spa_vdev_exit(spa, NULL, txg, error)); 2058 2059 return (error); 2060 } 2061 2062 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2063 2064 if (spa->spa_spares.sav_vdevs != NULL && 2065 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 2066 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 2067 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 2068 /* 2069 * Only remove the hot spare if it's not currently in use 2070 * in this pool. 2071 */ 2072 if (vd == NULL || unspare) { 2073 char *nvstr = fnvlist_lookup_string(nv, 2074 ZPOOL_CONFIG_PATH); 2075 spa_history_log_internal(spa, "vdev remove", NULL, 2076 "%s vdev (%s) %s", spa_name(spa), 2077 VDEV_TYPE_SPARE, nvstr); 2078 if (vd == NULL) 2079 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 2080 ev = spa_event_create(spa, vd, NULL, 2081 ESC_ZFS_VDEV_REMOVE_AUX); 2082 spa_vdev_remove_aux(spa->spa_spares.sav_config, 2083 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 2084 spa_load_spares(spa); 2085 spa->spa_spares.sav_sync = B_TRUE; 2086 } else { 2087 error = SET_ERROR(EBUSY); 2088 } 2089 } else if (spa->spa_l2cache.sav_vdevs != NULL && 2090 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 2091 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 2092 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 2093 char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 2094 spa_history_log_internal(spa, "vdev remove", NULL, 2095 "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr); 2096 /* 2097 * Cache devices can always be removed. 2098 */ 2099 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 2100 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX); 2101 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 2102 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 2103 spa_load_l2cache(spa); 2104 spa->spa_l2cache.sav_sync = B_TRUE; 2105 } else if (vd != NULL && vd->vdev_islog) { 2106 ASSERT(!locked); 2107 error = spa_vdev_remove_log(vd, &txg); 2108 } else if (vd != NULL) { 2109 ASSERT(!locked); 2110 error = spa_vdev_remove_top(vd, &txg); 2111 } else { 2112 /* 2113 * There is no vdev of any kind with the specified guid. 2114 */ 2115 error = SET_ERROR(ENOENT); 2116 } 2117 2118 if (!locked) 2119 error = spa_vdev_exit(spa, NULL, txg, error); 2120 2121 if (ev != NULL) { 2122 if (error != 0) { 2123 spa_event_discard(ev); 2124 } else { 2125 spa_event_post(ev); 2126 } 2127 } 2128 2129 return (error); 2130 } 2131 2132 int 2133 spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) 2134 { 2135 prs->prs_state = spa->spa_removing_phys.sr_state; 2136 2137 if (prs->prs_state == DSS_NONE) 2138 return (SET_ERROR(ENOENT)); 2139 2140 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev; 2141 prs->prs_start_time = spa->spa_removing_phys.sr_start_time; 2142 prs->prs_end_time = spa->spa_removing_phys.sr_end_time; 2143 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy; 2144 prs->prs_copied = spa->spa_removing_phys.sr_copied; 2145 2146 if (spa->spa_vdev_removal != NULL) { 2147 for (int i = 0; i < TXG_SIZE; i++) { 2148 prs->prs_copied += 2149 spa->spa_vdev_removal->svr_bytes_done[i]; 2150 } 2151 } 2152 2153 prs->prs_mapping_memory = 0; 2154 uint64_t indirect_vdev_id = 2155 spa->spa_removing_phys.sr_prev_indirect_vdev; 2156 while (indirect_vdev_id != -1) { 2157 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id]; 2158 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 2159 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 2160 2161 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2162 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim); 2163 indirect_vdev_id = vic->vic_prev_indirect_vdev; 2164 } 2165 2166 return (0); 2167 } 2168