15cabbc6bSPrashanth Sreenivasa /*
25cabbc6bSPrashanth Sreenivasa * CDDL HEADER START
35cabbc6bSPrashanth Sreenivasa *
45cabbc6bSPrashanth Sreenivasa * The contents of this file are subject to the terms of the
55cabbc6bSPrashanth Sreenivasa * Common Development and Distribution License (the "License").
65cabbc6bSPrashanth Sreenivasa * You may not use this file except in compliance with the License.
75cabbc6bSPrashanth Sreenivasa *
85cabbc6bSPrashanth Sreenivasa * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95cabbc6bSPrashanth Sreenivasa * or http://www.opensolaris.org/os/licensing.
105cabbc6bSPrashanth Sreenivasa * See the License for the specific language governing permissions
115cabbc6bSPrashanth Sreenivasa * and limitations under the License.
125cabbc6bSPrashanth Sreenivasa *
135cabbc6bSPrashanth Sreenivasa * When distributing Covered Code, include this CDDL HEADER in each
145cabbc6bSPrashanth Sreenivasa * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155cabbc6bSPrashanth Sreenivasa * If applicable, add the following below this CDDL HEADER, with the
165cabbc6bSPrashanth Sreenivasa * fields enclosed by brackets "[]" replaced with your own identifying
175cabbc6bSPrashanth Sreenivasa * information: Portions Copyright [yyyy] [name of copyright owner]
185cabbc6bSPrashanth Sreenivasa *
195cabbc6bSPrashanth Sreenivasa * CDDL HEADER END
205cabbc6bSPrashanth Sreenivasa */
215cabbc6bSPrashanth Sreenivasa
225cabbc6bSPrashanth Sreenivasa /*
235cabbc6bSPrashanth Sreenivasa * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
245cabbc6bSPrashanth Sreenivasa * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
255cabbc6bSPrashanth Sreenivasa */
265cabbc6bSPrashanth Sreenivasa
275cabbc6bSPrashanth Sreenivasa #include <sys/zfs_context.h>
285cabbc6bSPrashanth Sreenivasa #include <sys/spa_impl.h>
295cabbc6bSPrashanth Sreenivasa #include <sys/dmu.h>
305cabbc6bSPrashanth Sreenivasa #include <sys/dmu_tx.h>
315cabbc6bSPrashanth Sreenivasa #include <sys/zap.h>
325cabbc6bSPrashanth Sreenivasa #include <sys/vdev_impl.h>
335cabbc6bSPrashanth Sreenivasa #include <sys/metaslab.h>
345cabbc6bSPrashanth Sreenivasa #include <sys/metaslab_impl.h>
355cabbc6bSPrashanth Sreenivasa #include <sys/uberblock_impl.h>
365cabbc6bSPrashanth Sreenivasa #include <sys/txg.h>
375cabbc6bSPrashanth Sreenivasa #include <sys/avl.h>
385cabbc6bSPrashanth Sreenivasa #include <sys/bpobj.h>
395cabbc6bSPrashanth Sreenivasa #include <sys/dsl_pool.h>
405cabbc6bSPrashanth Sreenivasa #include <sys/dsl_synctask.h>
415cabbc6bSPrashanth Sreenivasa #include <sys/dsl_dir.h>
425cabbc6bSPrashanth Sreenivasa #include <sys/arc.h>
435cabbc6bSPrashanth Sreenivasa #include <sys/zfeature.h>
445cabbc6bSPrashanth Sreenivasa #include <sys/vdev_indirect_births.h>
455cabbc6bSPrashanth Sreenivasa #include <sys/vdev_indirect_mapping.h>
465cabbc6bSPrashanth Sreenivasa #include <sys/abd.h>
47094e47e9SGeorge Wilson #include <sys/vdev_initialize.h>
48084fd14fSBrian Behlendorf #include <sys/vdev_trim.h>
495cabbc6bSPrashanth Sreenivasa
505cabbc6bSPrashanth Sreenivasa /*
515cabbc6bSPrashanth Sreenivasa * This file contains the necessary logic to remove vdevs from a
525cabbc6bSPrashanth Sreenivasa * storage pool. Currently, the only devices that can be removed
535cabbc6bSPrashanth Sreenivasa * are log, cache, and spare devices; and top level vdevs from a pool
545cabbc6bSPrashanth Sreenivasa * w/o raidz. (Note that members of a mirror can also be removed
555cabbc6bSPrashanth Sreenivasa * by the detach operation.)
565cabbc6bSPrashanth Sreenivasa *
575cabbc6bSPrashanth Sreenivasa * Log vdevs are removed by evacuating them and then turning the vdev
585cabbc6bSPrashanth Sreenivasa * into a hole vdev while holding spa config locks.
595cabbc6bSPrashanth Sreenivasa *
605cabbc6bSPrashanth Sreenivasa * Top level vdevs are removed and converted into an indirect vdev via
615cabbc6bSPrashanth Sreenivasa * a multi-step process:
625cabbc6bSPrashanth Sreenivasa *
635cabbc6bSPrashanth Sreenivasa * - Disable allocations from this device (spa_vdev_remove_top).
645cabbc6bSPrashanth Sreenivasa *
655cabbc6bSPrashanth Sreenivasa * - From a new thread (spa_vdev_remove_thread), copy data from
665cabbc6bSPrashanth Sreenivasa * the removing vdev to a different vdev. The copy happens in open
675cabbc6bSPrashanth Sreenivasa * context (spa_vdev_copy_impl) and issues a sync task
685cabbc6bSPrashanth Sreenivasa * (vdev_mapping_sync) so the sync thread can update the partial
695cabbc6bSPrashanth Sreenivasa * indirect mappings in core and on disk.
705cabbc6bSPrashanth Sreenivasa *
715cabbc6bSPrashanth Sreenivasa * - If a free happens during a removal, it is freed from the
725cabbc6bSPrashanth Sreenivasa * removing vdev, and if it has already been copied, from the new
735cabbc6bSPrashanth Sreenivasa * location as well (free_from_removing_vdev).
745cabbc6bSPrashanth Sreenivasa *
755cabbc6bSPrashanth Sreenivasa * - After the removal is completed, the copy thread converts the vdev
765cabbc6bSPrashanth Sreenivasa * into an indirect vdev (vdev_remove_complete) before instructing
775cabbc6bSPrashanth Sreenivasa * the sync thread to destroy the space maps and finish the removal
785cabbc6bSPrashanth Sreenivasa * (spa_finish_removal).
795cabbc6bSPrashanth Sreenivasa */
805cabbc6bSPrashanth Sreenivasa
815cabbc6bSPrashanth Sreenivasa typedef struct vdev_copy_arg {
825cabbc6bSPrashanth Sreenivasa metaslab_t *vca_msp;
835cabbc6bSPrashanth Sreenivasa uint64_t vca_outstanding_bytes;
845cabbc6bSPrashanth Sreenivasa kcondvar_t vca_cv;
855cabbc6bSPrashanth Sreenivasa kmutex_t vca_lock;
865cabbc6bSPrashanth Sreenivasa } vdev_copy_arg_t;
875cabbc6bSPrashanth Sreenivasa
885cabbc6bSPrashanth Sreenivasa /*
893a4b1be9SMatthew Ahrens * The maximum amount of memory we can use for outstanding i/o while
903a4b1be9SMatthew Ahrens * doing a device removal. This determines how much i/o we can have
913a4b1be9SMatthew Ahrens * in flight concurrently.
925cabbc6bSPrashanth Sreenivasa */
933a4b1be9SMatthew Ahrens int zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
945cabbc6bSPrashanth Sreenivasa
955cabbc6bSPrashanth Sreenivasa /*
965cabbc6bSPrashanth Sreenivasa * The largest contiguous segment that we will attempt to allocate when
975cabbc6bSPrashanth Sreenivasa * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If
985cabbc6bSPrashanth Sreenivasa * there is a performance problem with attempting to allocate large blocks,
995cabbc6bSPrashanth Sreenivasa * consider decreasing this.
1005cabbc6bSPrashanth Sreenivasa *
1015cabbc6bSPrashanth Sreenivasa * Note: we will issue I/Os of up to this size. The mpt driver does not
1025cabbc6bSPrashanth Sreenivasa * respond well to I/Os larger than 1MB, so we set this to 1MB. (When
1035cabbc6bSPrashanth Sreenivasa * mpt processes an I/O larger than 1MB, it needs to do an allocation of
1045cabbc6bSPrashanth Sreenivasa * 2 physically contiguous pages; if this allocation fails, mpt will drop
1055cabbc6bSPrashanth Sreenivasa * the I/O and hang the device.)
1065cabbc6bSPrashanth Sreenivasa */
1075cabbc6bSPrashanth Sreenivasa int zfs_remove_max_segment = 1024 * 1024;
1085cabbc6bSPrashanth Sreenivasa
109cfd63e1bSMatthew Ahrens /*
110cfd63e1bSMatthew Ahrens * Allow a remap segment to span free chunks of at most this size. The main
111cfd63e1bSMatthew Ahrens * impact of a larger span is that we will read and write larger, more
112cfd63e1bSMatthew Ahrens * contiguous chunks, with more "unnecessary" data -- trading off bandwidth
113cfd63e1bSMatthew Ahrens * for iops. The value here was chosen to align with
114cfd63e1bSMatthew Ahrens * zfs_vdev_read_gap_limit, which is a similar concept when doing regular
115cfd63e1bSMatthew Ahrens * reads (but there's no reason it has to be the same).
116cfd63e1bSMatthew Ahrens *
117cfd63e1bSMatthew Ahrens * Additionally, a higher span will have the following relatively minor
118cfd63e1bSMatthew Ahrens * effects:
119cfd63e1bSMatthew Ahrens * - the mapping will be smaller, since one entry can cover more allocated
120cfd63e1bSMatthew Ahrens * segments
121cfd63e1bSMatthew Ahrens * - more of the fragmentation in the removing device will be preserved
122cfd63e1bSMatthew Ahrens * - we'll do larger allocations, which may fail and fall back on smaller
123cfd63e1bSMatthew Ahrens * allocations
124cfd63e1bSMatthew Ahrens */
125cfd63e1bSMatthew Ahrens int vdev_removal_max_span = 32 * 1024;
126cfd63e1bSMatthew Ahrens
12786714001SSerapheim Dimitropoulos /*
12886714001SSerapheim Dimitropoulos * This is used by the test suite so that it can ensure that certain
12986714001SSerapheim Dimitropoulos * actions happen while in the middle of a removal.
13086714001SSerapheim Dimitropoulos */
131e4c795beSTom Caputi int zfs_removal_suspend_progress = 0;
13286714001SSerapheim Dimitropoulos
1335cabbc6bSPrashanth Sreenivasa #define VDEV_REMOVAL_ZAP_OBJS "lzap"
1345cabbc6bSPrashanth Sreenivasa
1355cabbc6bSPrashanth Sreenivasa static void spa_vdev_remove_thread(void *arg);
1365cabbc6bSPrashanth Sreenivasa
1375cabbc6bSPrashanth Sreenivasa static void
spa_sync_removing_state(spa_t * spa,dmu_tx_t * tx)1385cabbc6bSPrashanth Sreenivasa spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx)
1395cabbc6bSPrashanth Sreenivasa {
1405cabbc6bSPrashanth Sreenivasa VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset,
1415cabbc6bSPrashanth Sreenivasa DMU_POOL_DIRECTORY_OBJECT,
1425cabbc6bSPrashanth Sreenivasa DMU_POOL_REMOVING, sizeof (uint64_t),
1435cabbc6bSPrashanth Sreenivasa sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
1445cabbc6bSPrashanth Sreenivasa &spa->spa_removing_phys, tx));
1455cabbc6bSPrashanth Sreenivasa }
1465cabbc6bSPrashanth Sreenivasa
1475cabbc6bSPrashanth Sreenivasa static nvlist_t *
spa_nvlist_lookup_by_guid(nvlist_t ** nvpp,int count,uint64_t target_guid)1485cabbc6bSPrashanth Sreenivasa spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
1495cabbc6bSPrashanth Sreenivasa {
1505cabbc6bSPrashanth Sreenivasa for (int i = 0; i < count; i++) {
1515cabbc6bSPrashanth Sreenivasa uint64_t guid =
1525cabbc6bSPrashanth Sreenivasa fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID);
1535cabbc6bSPrashanth Sreenivasa
1545cabbc6bSPrashanth Sreenivasa if (guid == target_guid)
1555cabbc6bSPrashanth Sreenivasa return (nvpp[i]);
1565cabbc6bSPrashanth Sreenivasa }
1575cabbc6bSPrashanth Sreenivasa
1585cabbc6bSPrashanth Sreenivasa return (NULL);
1595cabbc6bSPrashanth Sreenivasa }
1605cabbc6bSPrashanth Sreenivasa
1615cabbc6bSPrashanth Sreenivasa static void
spa_vdev_remove_aux(nvlist_t * config,char * name,nvlist_t ** dev,int count,nvlist_t * dev_to_remove)1625cabbc6bSPrashanth Sreenivasa spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
1635cabbc6bSPrashanth Sreenivasa nvlist_t *dev_to_remove)
1645cabbc6bSPrashanth Sreenivasa {
1655cabbc6bSPrashanth Sreenivasa nvlist_t **newdev = NULL;
1665cabbc6bSPrashanth Sreenivasa
1675cabbc6bSPrashanth Sreenivasa if (count > 1)
1685cabbc6bSPrashanth Sreenivasa newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
1695cabbc6bSPrashanth Sreenivasa
1705cabbc6bSPrashanth Sreenivasa for (int i = 0, j = 0; i < count; i++) {
1715cabbc6bSPrashanth Sreenivasa if (dev[i] == dev_to_remove)
1725cabbc6bSPrashanth Sreenivasa continue;
1735cabbc6bSPrashanth Sreenivasa VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
1745cabbc6bSPrashanth Sreenivasa }
1755cabbc6bSPrashanth Sreenivasa
1765cabbc6bSPrashanth Sreenivasa VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
1775cabbc6bSPrashanth Sreenivasa VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
1785cabbc6bSPrashanth Sreenivasa
1795cabbc6bSPrashanth Sreenivasa for (int i = 0; i < count - 1; i++)
1805cabbc6bSPrashanth Sreenivasa nvlist_free(newdev[i]);
1815cabbc6bSPrashanth Sreenivasa
1825cabbc6bSPrashanth Sreenivasa if (count > 1)
1835cabbc6bSPrashanth Sreenivasa kmem_free(newdev, (count - 1) * sizeof (void *));
1845cabbc6bSPrashanth Sreenivasa }
1855cabbc6bSPrashanth Sreenivasa
1865cabbc6bSPrashanth Sreenivasa static spa_vdev_removal_t *
spa_vdev_removal_create(vdev_t * vd)1875cabbc6bSPrashanth Sreenivasa spa_vdev_removal_create(vdev_t *vd)
1885cabbc6bSPrashanth Sreenivasa {
1895cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
1905cabbc6bSPrashanth Sreenivasa mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
1915cabbc6bSPrashanth Sreenivasa cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
192*4d7988d6SPaul Dagnelie svr->svr_allocd_segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
1933a4b1be9SMatthew Ahrens svr->svr_vdev_id = vd->vdev_id;
1945cabbc6bSPrashanth Sreenivasa
1955cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_SIZE; i++) {
196*4d7988d6SPaul Dagnelie svr->svr_frees[i] = range_tree_create(NULL, RANGE_SEG64, NULL,
197*4d7988d6SPaul Dagnelie 0, 0);
1985cabbc6bSPrashanth Sreenivasa list_create(&svr->svr_new_segments[i],
1995cabbc6bSPrashanth Sreenivasa sizeof (vdev_indirect_mapping_entry_t),
2005cabbc6bSPrashanth Sreenivasa offsetof(vdev_indirect_mapping_entry_t, vime_node));
2015cabbc6bSPrashanth Sreenivasa }
2025cabbc6bSPrashanth Sreenivasa
2035cabbc6bSPrashanth Sreenivasa return (svr);
2045cabbc6bSPrashanth Sreenivasa }
2055cabbc6bSPrashanth Sreenivasa
2065cabbc6bSPrashanth Sreenivasa void
spa_vdev_removal_destroy(spa_vdev_removal_t * svr)2075cabbc6bSPrashanth Sreenivasa spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
2085cabbc6bSPrashanth Sreenivasa {
2095cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_SIZE; i++) {
2105cabbc6bSPrashanth Sreenivasa ASSERT0(svr->svr_bytes_done[i]);
2115cabbc6bSPrashanth Sreenivasa ASSERT0(svr->svr_max_offset_to_sync[i]);
2125cabbc6bSPrashanth Sreenivasa range_tree_destroy(svr->svr_frees[i]);
2135cabbc6bSPrashanth Sreenivasa list_destroy(&svr->svr_new_segments[i]);
2145cabbc6bSPrashanth Sreenivasa }
2155cabbc6bSPrashanth Sreenivasa
2165cabbc6bSPrashanth Sreenivasa range_tree_destroy(svr->svr_allocd_segs);
2175cabbc6bSPrashanth Sreenivasa mutex_destroy(&svr->svr_lock);
2185cabbc6bSPrashanth Sreenivasa cv_destroy(&svr->svr_cv);
2195cabbc6bSPrashanth Sreenivasa kmem_free(svr, sizeof (*svr));
2205cabbc6bSPrashanth Sreenivasa }
2215cabbc6bSPrashanth Sreenivasa
2225cabbc6bSPrashanth Sreenivasa /*
2235cabbc6bSPrashanth Sreenivasa * This is called as a synctask in the txg in which we will mark this vdev
2245cabbc6bSPrashanth Sreenivasa * as removing (in the config stored in the MOS).
2255cabbc6bSPrashanth Sreenivasa *
2265cabbc6bSPrashanth Sreenivasa * It begins the evacuation of a toplevel vdev by:
2275cabbc6bSPrashanth Sreenivasa * - initializing the spa_removing_phys which tracks this removal
2285cabbc6bSPrashanth Sreenivasa * - computing the amount of space to remove for accounting purposes
2295cabbc6bSPrashanth Sreenivasa * - dirtying all dbufs in the spa_config_object
2305cabbc6bSPrashanth Sreenivasa * - creating the spa_vdev_removal
2315cabbc6bSPrashanth Sreenivasa * - starting the spa_vdev_remove_thread
2325cabbc6bSPrashanth Sreenivasa */
2335cabbc6bSPrashanth Sreenivasa static void
vdev_remove_initiate_sync(void * arg,dmu_tx_t * tx)2345cabbc6bSPrashanth Sreenivasa vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
2355cabbc6bSPrashanth Sreenivasa {
2363a4b1be9SMatthew Ahrens int vdev_id = (uintptr_t)arg;
2373a4b1be9SMatthew Ahrens spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2383a4b1be9SMatthew Ahrens vdev_t *vd = vdev_lookup_top(spa, vdev_id);
2395cabbc6bSPrashanth Sreenivasa vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
2405cabbc6bSPrashanth Sreenivasa objset_t *mos = spa->spa_dsl_pool->dp_meta_objset;
2415cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = NULL;
2425cabbc6bSPrashanth Sreenivasa uint64_t txg = dmu_tx_get_txg(tx);
2435cabbc6bSPrashanth Sreenivasa
2445cabbc6bSPrashanth Sreenivasa ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
2455cabbc6bSPrashanth Sreenivasa svr = spa_vdev_removal_create(vd);
2465cabbc6bSPrashanth Sreenivasa
2475cabbc6bSPrashanth Sreenivasa ASSERT(vd->vdev_removing);
2485cabbc6bSPrashanth Sreenivasa ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
2495cabbc6bSPrashanth Sreenivasa
2505cabbc6bSPrashanth Sreenivasa spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
2515cabbc6bSPrashanth Sreenivasa if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
2525cabbc6bSPrashanth Sreenivasa /*
2535cabbc6bSPrashanth Sreenivasa * By activating the OBSOLETE_COUNTS feature, we prevent
2545cabbc6bSPrashanth Sreenivasa * the pool from being downgraded and ensure that the
2555cabbc6bSPrashanth Sreenivasa * refcounts are precise.
2565cabbc6bSPrashanth Sreenivasa */
2575cabbc6bSPrashanth Sreenivasa spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
2585cabbc6bSPrashanth Sreenivasa uint64_t one = 1;
2595cabbc6bSPrashanth Sreenivasa VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
2605cabbc6bSPrashanth Sreenivasa VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
2615cabbc6bSPrashanth Sreenivasa &one, tx));
2625cabbc6bSPrashanth Sreenivasa ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0);
2635cabbc6bSPrashanth Sreenivasa }
2645cabbc6bSPrashanth Sreenivasa
2655cabbc6bSPrashanth Sreenivasa vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
2665cabbc6bSPrashanth Sreenivasa vd->vdev_indirect_mapping =
2675cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_open(mos, vic->vic_mapping_object);
2685cabbc6bSPrashanth Sreenivasa vic->vic_births_object = vdev_indirect_births_alloc(mos, tx);
2695cabbc6bSPrashanth Sreenivasa vd->vdev_indirect_births =
2705cabbc6bSPrashanth Sreenivasa vdev_indirect_births_open(mos, vic->vic_births_object);
2715cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id;
2725cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_start_time = gethrestime_sec();
2735cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_end_time = 0;
2745cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_state = DSS_SCANNING;
2755cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_to_copy = 0;
2765cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_copied = 0;
2775cabbc6bSPrashanth Sreenivasa
2785cabbc6bSPrashanth Sreenivasa /*
2795cabbc6bSPrashanth Sreenivasa * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because
2805cabbc6bSPrashanth Sreenivasa * there may be space in the defer tree, which is free, but still
2815cabbc6bSPrashanth Sreenivasa * counted in vs_alloc.
2825cabbc6bSPrashanth Sreenivasa */
2835cabbc6bSPrashanth Sreenivasa for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
2845cabbc6bSPrashanth Sreenivasa metaslab_t *ms = vd->vdev_ms[i];
2855cabbc6bSPrashanth Sreenivasa if (ms->ms_sm == NULL)
2865cabbc6bSPrashanth Sreenivasa continue;
2875cabbc6bSPrashanth Sreenivasa
2885cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_to_copy +=
289555d674dSSerapheim Dimitropoulos metaslab_allocated_space(ms);
2905cabbc6bSPrashanth Sreenivasa
2915cabbc6bSPrashanth Sreenivasa /*
2925cabbc6bSPrashanth Sreenivasa * Space which we are freeing this txg does not need to
2935cabbc6bSPrashanth Sreenivasa * be copied.
2945cabbc6bSPrashanth Sreenivasa */
2955cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_to_copy -=
29686714001SSerapheim Dimitropoulos range_tree_space(ms->ms_freeing);
2975cabbc6bSPrashanth Sreenivasa
29886714001SSerapheim Dimitropoulos ASSERT0(range_tree_space(ms->ms_freed));
2995cabbc6bSPrashanth Sreenivasa for (int t = 0; t < TXG_SIZE; t++)
30086714001SSerapheim Dimitropoulos ASSERT0(range_tree_space(ms->ms_allocating[t]));
3015cabbc6bSPrashanth Sreenivasa }
3025cabbc6bSPrashanth Sreenivasa
3035cabbc6bSPrashanth Sreenivasa /*
3045cabbc6bSPrashanth Sreenivasa * Sync tasks are called before metaslab_sync(), so there should
3055cabbc6bSPrashanth Sreenivasa * be no already-synced metaslabs in the TXG_CLEAN list.
3065cabbc6bSPrashanth Sreenivasa */
3075cabbc6bSPrashanth Sreenivasa ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
3085cabbc6bSPrashanth Sreenivasa
3095cabbc6bSPrashanth Sreenivasa spa_sync_removing_state(spa, tx);
3105cabbc6bSPrashanth Sreenivasa
3115cabbc6bSPrashanth Sreenivasa /*
3125cabbc6bSPrashanth Sreenivasa * All blocks that we need to read the most recent mapping must be
3135cabbc6bSPrashanth Sreenivasa * stored on concrete vdevs. Therefore, we must dirty anything that
3145cabbc6bSPrashanth Sreenivasa * is read before spa_remove_init(). Specifically, the
3155cabbc6bSPrashanth Sreenivasa * spa_config_object. (Note that although we already modified the
3165cabbc6bSPrashanth Sreenivasa * spa_config_object in spa_sync_removing_state, that may not have
3175cabbc6bSPrashanth Sreenivasa * modified all blocks of the object.)
3185cabbc6bSPrashanth Sreenivasa */
3195cabbc6bSPrashanth Sreenivasa dmu_object_info_t doi;
3205cabbc6bSPrashanth Sreenivasa VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi));
3215cabbc6bSPrashanth Sreenivasa for (uint64_t offset = 0; offset < doi.doi_max_offset; ) {
3225cabbc6bSPrashanth Sreenivasa dmu_buf_t *dbuf;
3235cabbc6bSPrashanth Sreenivasa VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT,
3245cabbc6bSPrashanth Sreenivasa offset, FTAG, &dbuf, 0));
3255cabbc6bSPrashanth Sreenivasa dmu_buf_will_dirty(dbuf, tx);
3265cabbc6bSPrashanth Sreenivasa offset += dbuf->db_size;
3275cabbc6bSPrashanth Sreenivasa dmu_buf_rele(dbuf, FTAG);
3285cabbc6bSPrashanth Sreenivasa }
3295cabbc6bSPrashanth Sreenivasa
3305cabbc6bSPrashanth Sreenivasa /*
3315cabbc6bSPrashanth Sreenivasa * Now that we've allocated the im_object, dirty the vdev to ensure
3325cabbc6bSPrashanth Sreenivasa * that the object gets written to the config on disk.
3335cabbc6bSPrashanth Sreenivasa */
3345cabbc6bSPrashanth Sreenivasa vdev_config_dirty(vd);
3355cabbc6bSPrashanth Sreenivasa
3365cabbc6bSPrashanth Sreenivasa zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu "
3375cabbc6bSPrashanth Sreenivasa "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx),
3385cabbc6bSPrashanth Sreenivasa vic->vic_mapping_object);
3395cabbc6bSPrashanth Sreenivasa
3405cabbc6bSPrashanth Sreenivasa spa_history_log_internal(spa, "vdev remove started", tx,
3415cabbc6bSPrashanth Sreenivasa "%s vdev %llu %s", spa_name(spa), vd->vdev_id,
3425cabbc6bSPrashanth Sreenivasa (vd->vdev_path != NULL) ? vd->vdev_path : "-");
3435cabbc6bSPrashanth Sreenivasa /*
3445cabbc6bSPrashanth Sreenivasa * Setting spa_vdev_removal causes subsequent frees to call
3455cabbc6bSPrashanth Sreenivasa * free_from_removing_vdev(). Note that we don't need any locking
3465cabbc6bSPrashanth Sreenivasa * because we are the sync thread, and metaslab_free_impl() is only
3475cabbc6bSPrashanth Sreenivasa * called from syncing context (potentially from a zio taskq thread,
3485cabbc6bSPrashanth Sreenivasa * but in any case only when there are outstanding free i/os, which
3495cabbc6bSPrashanth Sreenivasa * there are not).
3505cabbc6bSPrashanth Sreenivasa */
3515cabbc6bSPrashanth Sreenivasa ASSERT3P(spa->spa_vdev_removal, ==, NULL);
3525cabbc6bSPrashanth Sreenivasa spa->spa_vdev_removal = svr;
3535cabbc6bSPrashanth Sreenivasa svr->svr_thread = thread_create(NULL, 0,
3543a4b1be9SMatthew Ahrens spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri);
3555cabbc6bSPrashanth Sreenivasa }
3565cabbc6bSPrashanth Sreenivasa
3575cabbc6bSPrashanth Sreenivasa /*
3585cabbc6bSPrashanth Sreenivasa * When we are opening a pool, we must read the mapping for each
3595cabbc6bSPrashanth Sreenivasa * indirect vdev in order from most recently removed to least
3605cabbc6bSPrashanth Sreenivasa * recently removed. We do this because the blocks for the mapping
3615cabbc6bSPrashanth Sreenivasa * of older indirect vdevs may be stored on more recently removed vdevs.
3625cabbc6bSPrashanth Sreenivasa * In order to read each indirect mapping object, we must have
3635cabbc6bSPrashanth Sreenivasa * initialized all more recently removed vdevs.
3645cabbc6bSPrashanth Sreenivasa */
3655cabbc6bSPrashanth Sreenivasa int
spa_remove_init(spa_t * spa)3665cabbc6bSPrashanth Sreenivasa spa_remove_init(spa_t *spa)
3675cabbc6bSPrashanth Sreenivasa {
3685cabbc6bSPrashanth Sreenivasa int error;
3695cabbc6bSPrashanth Sreenivasa
3705cabbc6bSPrashanth Sreenivasa error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset,
3715cabbc6bSPrashanth Sreenivasa DMU_POOL_DIRECTORY_OBJECT,
3725cabbc6bSPrashanth Sreenivasa DMU_POOL_REMOVING, sizeof (uint64_t),
3735cabbc6bSPrashanth Sreenivasa sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
3745cabbc6bSPrashanth Sreenivasa &spa->spa_removing_phys);
3755cabbc6bSPrashanth Sreenivasa
3765cabbc6bSPrashanth Sreenivasa if (error == ENOENT) {
3775cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_state = DSS_NONE;
3785cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_removing_vdev = -1;
3795cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
38047b8d4b8SAlexander Motin spa->spa_indirect_vdevs_loaded = B_TRUE;
3815cabbc6bSPrashanth Sreenivasa return (0);
3825cabbc6bSPrashanth Sreenivasa } else if (error != 0) {
3835cabbc6bSPrashanth Sreenivasa return (error);
3845cabbc6bSPrashanth Sreenivasa }
3855cabbc6bSPrashanth Sreenivasa
3865cabbc6bSPrashanth Sreenivasa if (spa->spa_removing_phys.sr_state == DSS_SCANNING) {
3875cabbc6bSPrashanth Sreenivasa /*
3885cabbc6bSPrashanth Sreenivasa * We are currently removing a vdev. Create and
3895cabbc6bSPrashanth Sreenivasa * initialize a spa_vdev_removal_t from the bonus
3905cabbc6bSPrashanth Sreenivasa * buffer of the removing vdevs vdev_im_object, and
3915cabbc6bSPrashanth Sreenivasa * initialize its partial mapping.
3925cabbc6bSPrashanth Sreenivasa */
3935cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3945cabbc6bSPrashanth Sreenivasa vdev_t *vd = vdev_lookup_top(spa,
3955cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_removing_vdev);
3965cabbc6bSPrashanth Sreenivasa
3973a4b1be9SMatthew Ahrens if (vd == NULL) {
3983a4b1be9SMatthew Ahrens spa_config_exit(spa, SCL_STATE, FTAG);
3995cabbc6bSPrashanth Sreenivasa return (EINVAL);
4003a4b1be9SMatthew Ahrens }
4015cabbc6bSPrashanth Sreenivasa
4025cabbc6bSPrashanth Sreenivasa vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
4035cabbc6bSPrashanth Sreenivasa
4045cabbc6bSPrashanth Sreenivasa ASSERT(vdev_is_concrete(vd));
4055cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa_vdev_removal_create(vd);
4063a4b1be9SMatthew Ahrens ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id);
4073a4b1be9SMatthew Ahrens ASSERT(vd->vdev_removing);
4085cabbc6bSPrashanth Sreenivasa
4095cabbc6bSPrashanth Sreenivasa vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
4105cabbc6bSPrashanth Sreenivasa spa->spa_meta_objset, vic->vic_mapping_object);
4115cabbc6bSPrashanth Sreenivasa vd->vdev_indirect_births = vdev_indirect_births_open(
4125cabbc6bSPrashanth Sreenivasa spa->spa_meta_objset, vic->vic_births_object);
4133a4b1be9SMatthew Ahrens spa_config_exit(spa, SCL_STATE, FTAG);
4145cabbc6bSPrashanth Sreenivasa
4155cabbc6bSPrashanth Sreenivasa spa->spa_vdev_removal = svr;
4165cabbc6bSPrashanth Sreenivasa }
4175cabbc6bSPrashanth Sreenivasa
4185cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4195cabbc6bSPrashanth Sreenivasa uint64_t indirect_vdev_id =
4205cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_prev_indirect_vdev;
4215cabbc6bSPrashanth Sreenivasa while (indirect_vdev_id != UINT64_MAX) {
4225cabbc6bSPrashanth Sreenivasa vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id);
4235cabbc6bSPrashanth Sreenivasa vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
4245cabbc6bSPrashanth Sreenivasa
4255cabbc6bSPrashanth Sreenivasa ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
4265cabbc6bSPrashanth Sreenivasa vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
4275cabbc6bSPrashanth Sreenivasa spa->spa_meta_objset, vic->vic_mapping_object);
4285cabbc6bSPrashanth Sreenivasa vd->vdev_indirect_births = vdev_indirect_births_open(
4295cabbc6bSPrashanth Sreenivasa spa->spa_meta_objset, vic->vic_births_object);
4305cabbc6bSPrashanth Sreenivasa
4315cabbc6bSPrashanth Sreenivasa indirect_vdev_id = vic->vic_prev_indirect_vdev;
4325cabbc6bSPrashanth Sreenivasa }
4335cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_STATE, FTAG);
4345cabbc6bSPrashanth Sreenivasa
4355cabbc6bSPrashanth Sreenivasa /*
4365cabbc6bSPrashanth Sreenivasa * Now that we've loaded all the indirect mappings, we can allow
4375cabbc6bSPrashanth Sreenivasa * reads from other blocks (e.g. via predictive prefetch).
4385cabbc6bSPrashanth Sreenivasa */
4395cabbc6bSPrashanth Sreenivasa spa->spa_indirect_vdevs_loaded = B_TRUE;
4405cabbc6bSPrashanth Sreenivasa return (0);
4415cabbc6bSPrashanth Sreenivasa }
4425cabbc6bSPrashanth Sreenivasa
4435cabbc6bSPrashanth Sreenivasa void
spa_restart_removal(spa_t * spa)4445cabbc6bSPrashanth Sreenivasa spa_restart_removal(spa_t *spa)
4455cabbc6bSPrashanth Sreenivasa {
4465cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
4475cabbc6bSPrashanth Sreenivasa
4485cabbc6bSPrashanth Sreenivasa if (svr == NULL)
4495cabbc6bSPrashanth Sreenivasa return;
4505cabbc6bSPrashanth Sreenivasa
4515cabbc6bSPrashanth Sreenivasa /*
4525cabbc6bSPrashanth Sreenivasa * In general when this function is called there is no
4535cabbc6bSPrashanth Sreenivasa * removal thread running. The only scenario where this
4545cabbc6bSPrashanth Sreenivasa * is not true is during spa_import() where this function
4555cabbc6bSPrashanth Sreenivasa * is called twice [once from spa_import_impl() and
4565cabbc6bSPrashanth Sreenivasa * spa_async_resume()]. Thus, in the scenario where we
4575cabbc6bSPrashanth Sreenivasa * import a pool that has an ongoing removal we don't
4585cabbc6bSPrashanth Sreenivasa * want to spawn a second thread.
4595cabbc6bSPrashanth Sreenivasa */
4605cabbc6bSPrashanth Sreenivasa if (svr->svr_thread != NULL)
4615cabbc6bSPrashanth Sreenivasa return;
4625cabbc6bSPrashanth Sreenivasa
4635cabbc6bSPrashanth Sreenivasa if (!spa_writeable(spa))
4645cabbc6bSPrashanth Sreenivasa return;
4655cabbc6bSPrashanth Sreenivasa
4663a4b1be9SMatthew Ahrens zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id);
4673a4b1be9SMatthew Ahrens svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa,
4685cabbc6bSPrashanth Sreenivasa 0, &p0, TS_RUN, minclsyspri);
4695cabbc6bSPrashanth Sreenivasa }
4705cabbc6bSPrashanth Sreenivasa
4715cabbc6bSPrashanth Sreenivasa /*
4725cabbc6bSPrashanth Sreenivasa * Process freeing from a device which is in the middle of being removed.
4735cabbc6bSPrashanth Sreenivasa * We must handle this carefully so that we attempt to copy freed data,
4745cabbc6bSPrashanth Sreenivasa * and we correctly free already-copied data.
4755cabbc6bSPrashanth Sreenivasa */
4765cabbc6bSPrashanth Sreenivasa void
free_from_removing_vdev(vdev_t * vd,uint64_t offset,uint64_t size)47786714001SSerapheim Dimitropoulos free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
4785cabbc6bSPrashanth Sreenivasa {
4795cabbc6bSPrashanth Sreenivasa spa_t *spa = vd->vdev_spa;
4805cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
4815cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
48286714001SSerapheim Dimitropoulos uint64_t txg = spa_syncing_txg(spa);
4835cabbc6bSPrashanth Sreenivasa uint64_t max_offset_yet = 0;
4845cabbc6bSPrashanth Sreenivasa
4855cabbc6bSPrashanth Sreenivasa ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
4865cabbc6bSPrashanth Sreenivasa ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
4875cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_object(vim));
4883a4b1be9SMatthew Ahrens ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id);
4895cabbc6bSPrashanth Sreenivasa
4905cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
4915cabbc6bSPrashanth Sreenivasa
4925cabbc6bSPrashanth Sreenivasa /*
4935cabbc6bSPrashanth Sreenivasa * Remove the segment from the removing vdev's spacemap. This
4945cabbc6bSPrashanth Sreenivasa * ensures that we will not attempt to copy this space (if the
4955cabbc6bSPrashanth Sreenivasa * removal thread has not yet visited it), and also ensures
4965cabbc6bSPrashanth Sreenivasa * that we know what is actually allocated on the new vdevs
4975cabbc6bSPrashanth Sreenivasa * (needed if we cancel the removal).
4985cabbc6bSPrashanth Sreenivasa *
4995cabbc6bSPrashanth Sreenivasa * Note: we must do the metaslab_free_concrete() with the svr_lock
5005cabbc6bSPrashanth Sreenivasa * held, so that the remove_thread can not load this metaslab and then
5015cabbc6bSPrashanth Sreenivasa * visit this offset between the time that we metaslab_free_concrete()
5025cabbc6bSPrashanth Sreenivasa * and when we check to see if it has been visited.
50386714001SSerapheim Dimitropoulos *
50486714001SSerapheim Dimitropoulos * Note: The checkpoint flag is set to false as having/taking
50586714001SSerapheim Dimitropoulos * a checkpoint and removing a device can't happen at the same
50686714001SSerapheim Dimitropoulos * time.
5075cabbc6bSPrashanth Sreenivasa */
50886714001SSerapheim Dimitropoulos ASSERT(!spa_has_checkpoint(spa));
50986714001SSerapheim Dimitropoulos metaslab_free_concrete(vd, offset, size, B_FALSE);
5105cabbc6bSPrashanth Sreenivasa
5115cabbc6bSPrashanth Sreenivasa uint64_t synced_size = 0;
5125cabbc6bSPrashanth Sreenivasa uint64_t synced_offset = 0;
5135cabbc6bSPrashanth Sreenivasa uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim);
5145cabbc6bSPrashanth Sreenivasa if (offset < max_offset_synced) {
5155cabbc6bSPrashanth Sreenivasa /*
5165cabbc6bSPrashanth Sreenivasa * The mapping for this offset is already on disk.
5175cabbc6bSPrashanth Sreenivasa * Free from the new location.
5185cabbc6bSPrashanth Sreenivasa *
5195cabbc6bSPrashanth Sreenivasa * Note that we use svr_max_synced_offset because it is
5205cabbc6bSPrashanth Sreenivasa * updated atomically with respect to the in-core mapping.
5215cabbc6bSPrashanth Sreenivasa * By contrast, vim_max_offset is not.
5225cabbc6bSPrashanth Sreenivasa *
5235cabbc6bSPrashanth Sreenivasa * This block may be split between a synced entry and an
5245cabbc6bSPrashanth Sreenivasa * in-flight or unvisited entry. Only process the synced
5255cabbc6bSPrashanth Sreenivasa * portion of it here.
5265cabbc6bSPrashanth Sreenivasa */
5275cabbc6bSPrashanth Sreenivasa synced_size = MIN(size, max_offset_synced - offset);
5285cabbc6bSPrashanth Sreenivasa synced_offset = offset;
5295cabbc6bSPrashanth Sreenivasa
5305cabbc6bSPrashanth Sreenivasa ASSERT3U(max_offset_yet, <=, max_offset_synced);
5315cabbc6bSPrashanth Sreenivasa max_offset_yet = max_offset_synced;
5325cabbc6bSPrashanth Sreenivasa
5335cabbc6bSPrashanth Sreenivasa DTRACE_PROBE3(remove__free__synced,
5345cabbc6bSPrashanth Sreenivasa spa_t *, spa,
5355cabbc6bSPrashanth Sreenivasa uint64_t, offset,
5365cabbc6bSPrashanth Sreenivasa uint64_t, synced_size);
5375cabbc6bSPrashanth Sreenivasa
5385cabbc6bSPrashanth Sreenivasa size -= synced_size;
5395cabbc6bSPrashanth Sreenivasa offset += synced_size;
5405cabbc6bSPrashanth Sreenivasa }
5415cabbc6bSPrashanth Sreenivasa
5425cabbc6bSPrashanth Sreenivasa /*
5435cabbc6bSPrashanth Sreenivasa * Look at all in-flight txgs starting from the currently syncing one
5445cabbc6bSPrashanth Sreenivasa * and see if a section of this free is being copied. By starting from
5455cabbc6bSPrashanth Sreenivasa * this txg and iterating forward, we might find that this region
5465cabbc6bSPrashanth Sreenivasa * was copied in two different txgs and handle it appropriately.
5475cabbc6bSPrashanth Sreenivasa */
5485cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_CONCURRENT_STATES; i++) {
5495cabbc6bSPrashanth Sreenivasa int txgoff = (txg + i) & TXG_MASK;
5505cabbc6bSPrashanth Sreenivasa if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) {
5515cabbc6bSPrashanth Sreenivasa /*
5525cabbc6bSPrashanth Sreenivasa * The mapping for this offset is in flight, and
5535cabbc6bSPrashanth Sreenivasa * will be synced in txg+i.
5545cabbc6bSPrashanth Sreenivasa */
5555cabbc6bSPrashanth Sreenivasa uint64_t inflight_size = MIN(size,
5565cabbc6bSPrashanth Sreenivasa svr->svr_max_offset_to_sync[txgoff] - offset);
5575cabbc6bSPrashanth Sreenivasa
5585cabbc6bSPrashanth Sreenivasa DTRACE_PROBE4(remove__free__inflight,
5595cabbc6bSPrashanth Sreenivasa spa_t *, spa,
5605cabbc6bSPrashanth Sreenivasa uint64_t, offset,
5615cabbc6bSPrashanth Sreenivasa uint64_t, inflight_size,
5625cabbc6bSPrashanth Sreenivasa uint64_t, txg + i);
5635cabbc6bSPrashanth Sreenivasa
5645cabbc6bSPrashanth Sreenivasa /*
5655cabbc6bSPrashanth Sreenivasa * We copy data in order of increasing offset.
5665cabbc6bSPrashanth Sreenivasa * Therefore the max_offset_to_sync[] must increase
5675cabbc6bSPrashanth Sreenivasa * (or be zero, indicating that nothing is being
5685cabbc6bSPrashanth Sreenivasa * copied in that txg).
5695cabbc6bSPrashanth Sreenivasa */
5705cabbc6bSPrashanth Sreenivasa if (svr->svr_max_offset_to_sync[txgoff] != 0) {
5715cabbc6bSPrashanth Sreenivasa ASSERT3U(svr->svr_max_offset_to_sync[txgoff],
5725cabbc6bSPrashanth Sreenivasa >=, max_offset_yet);
5735cabbc6bSPrashanth Sreenivasa max_offset_yet =
5745cabbc6bSPrashanth Sreenivasa svr->svr_max_offset_to_sync[txgoff];
5755cabbc6bSPrashanth Sreenivasa }
5765cabbc6bSPrashanth Sreenivasa
5775cabbc6bSPrashanth Sreenivasa /*
5785cabbc6bSPrashanth Sreenivasa * We've already committed to copying this segment:
5795cabbc6bSPrashanth Sreenivasa * we have allocated space elsewhere in the pool for
5805cabbc6bSPrashanth Sreenivasa * it and have an IO outstanding to copy the data. We
5815cabbc6bSPrashanth Sreenivasa * cannot free the space before the copy has
5825cabbc6bSPrashanth Sreenivasa * completed, or else the copy IO might overwrite any
5835cabbc6bSPrashanth Sreenivasa * new data. To free that space, we record the
5845cabbc6bSPrashanth Sreenivasa * segment in the appropriate svr_frees tree and free
5855cabbc6bSPrashanth Sreenivasa * the mapped space later, in the txg where we have
5865cabbc6bSPrashanth Sreenivasa * completed the copy and synced the mapping (see
5875cabbc6bSPrashanth Sreenivasa * vdev_mapping_sync).
5885cabbc6bSPrashanth Sreenivasa */
5895cabbc6bSPrashanth Sreenivasa range_tree_add(svr->svr_frees[txgoff],
5905cabbc6bSPrashanth Sreenivasa offset, inflight_size);
5915cabbc6bSPrashanth Sreenivasa size -= inflight_size;
5925cabbc6bSPrashanth Sreenivasa offset += inflight_size;
5935cabbc6bSPrashanth Sreenivasa
5945cabbc6bSPrashanth Sreenivasa /*
5955cabbc6bSPrashanth Sreenivasa * This space is already accounted for as being
5965cabbc6bSPrashanth Sreenivasa * done, because it is being copied in txg+i.
5975cabbc6bSPrashanth Sreenivasa * However, if i!=0, then it is being copied in
5985cabbc6bSPrashanth Sreenivasa * a future txg. If we crash after this txg
5995cabbc6bSPrashanth Sreenivasa * syncs but before txg+i syncs, then the space
6005cabbc6bSPrashanth Sreenivasa * will be free. Therefore we must account
6015cabbc6bSPrashanth Sreenivasa * for the space being done in *this* txg
6025cabbc6bSPrashanth Sreenivasa * (when it is freed) rather than the future txg
6035cabbc6bSPrashanth Sreenivasa * (when it will be copied).
6045cabbc6bSPrashanth Sreenivasa */
6055cabbc6bSPrashanth Sreenivasa ASSERT3U(svr->svr_bytes_done[txgoff], >=,
6065cabbc6bSPrashanth Sreenivasa inflight_size);
6075cabbc6bSPrashanth Sreenivasa svr->svr_bytes_done[txgoff] -= inflight_size;
6085cabbc6bSPrashanth Sreenivasa svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
6095cabbc6bSPrashanth Sreenivasa }
6105cabbc6bSPrashanth Sreenivasa }
6115cabbc6bSPrashanth Sreenivasa ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
6125cabbc6bSPrashanth Sreenivasa
6135cabbc6bSPrashanth Sreenivasa if (size > 0) {
6145cabbc6bSPrashanth Sreenivasa /*
6155cabbc6bSPrashanth Sreenivasa * The copy thread has not yet visited this offset. Ensure
6165cabbc6bSPrashanth Sreenivasa * that it doesn't.
6175cabbc6bSPrashanth Sreenivasa */
6185cabbc6bSPrashanth Sreenivasa
6195cabbc6bSPrashanth Sreenivasa DTRACE_PROBE3(remove__free__unvisited,
6205cabbc6bSPrashanth Sreenivasa spa_t *, spa,
6215cabbc6bSPrashanth Sreenivasa uint64_t, offset,
6225cabbc6bSPrashanth Sreenivasa uint64_t, size);
6235cabbc6bSPrashanth Sreenivasa
6245cabbc6bSPrashanth Sreenivasa if (svr->svr_allocd_segs != NULL)
6255cabbc6bSPrashanth Sreenivasa range_tree_clear(svr->svr_allocd_segs, offset, size);
6265cabbc6bSPrashanth Sreenivasa
6275cabbc6bSPrashanth Sreenivasa /*
6285cabbc6bSPrashanth Sreenivasa * Since we now do not need to copy this data, for
6295cabbc6bSPrashanth Sreenivasa * accounting purposes we have done our job and can count
6305cabbc6bSPrashanth Sreenivasa * it as completed.
6315cabbc6bSPrashanth Sreenivasa */
6325cabbc6bSPrashanth Sreenivasa svr->svr_bytes_done[txg & TXG_MASK] += size;
6335cabbc6bSPrashanth Sreenivasa }
6345cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
6355cabbc6bSPrashanth Sreenivasa
6365cabbc6bSPrashanth Sreenivasa /*
6375cabbc6bSPrashanth Sreenivasa * Now that we have dropped svr_lock, process the synced portion
6385cabbc6bSPrashanth Sreenivasa * of this free.
6395cabbc6bSPrashanth Sreenivasa */
6405cabbc6bSPrashanth Sreenivasa if (synced_size > 0) {
64186714001SSerapheim Dimitropoulos vdev_indirect_mark_obsolete(vd, synced_offset, synced_size);
64286714001SSerapheim Dimitropoulos
6435cabbc6bSPrashanth Sreenivasa /*
6445cabbc6bSPrashanth Sreenivasa * Note: this can only be called from syncing context,
6455cabbc6bSPrashanth Sreenivasa * and the vdev_indirect_mapping is only changed from the
6465cabbc6bSPrashanth Sreenivasa * sync thread, so we don't need svr_lock while doing
6475cabbc6bSPrashanth Sreenivasa * metaslab_free_impl_cb.
6485cabbc6bSPrashanth Sreenivasa */
64986714001SSerapheim Dimitropoulos boolean_t checkpoint = B_FALSE;
6505cabbc6bSPrashanth Sreenivasa vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
65186714001SSerapheim Dimitropoulos metaslab_free_impl_cb, &checkpoint);
6525cabbc6bSPrashanth Sreenivasa }
6535cabbc6bSPrashanth Sreenivasa }
6545cabbc6bSPrashanth Sreenivasa
6555cabbc6bSPrashanth Sreenivasa /*
6565cabbc6bSPrashanth Sreenivasa * Stop an active removal and update the spa_removing phys.
6575cabbc6bSPrashanth Sreenivasa */
6585cabbc6bSPrashanth Sreenivasa static void
spa_finish_removal(spa_t * spa,dsl_scan_state_t state,dmu_tx_t * tx)6595cabbc6bSPrashanth Sreenivasa spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
6605cabbc6bSPrashanth Sreenivasa {
6615cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
6625cabbc6bSPrashanth Sreenivasa ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa));
6635cabbc6bSPrashanth Sreenivasa
6645cabbc6bSPrashanth Sreenivasa /* Ensure the removal thread has completed before we free the svr. */
6655cabbc6bSPrashanth Sreenivasa spa_vdev_remove_suspend(spa);
6665cabbc6bSPrashanth Sreenivasa
6675cabbc6bSPrashanth Sreenivasa ASSERT(state == DSS_FINISHED || state == DSS_CANCELED);
6685cabbc6bSPrashanth Sreenivasa
6695cabbc6bSPrashanth Sreenivasa if (state == DSS_FINISHED) {
6705cabbc6bSPrashanth Sreenivasa spa_removing_phys_t *srp = &spa->spa_removing_phys;
6713a4b1be9SMatthew Ahrens vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
6725cabbc6bSPrashanth Sreenivasa vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
6735cabbc6bSPrashanth Sreenivasa
6745cabbc6bSPrashanth Sreenivasa if (srp->sr_prev_indirect_vdev != UINT64_MAX) {
6755cabbc6bSPrashanth Sreenivasa vdev_t *pvd = vdev_lookup_top(spa,
6765cabbc6bSPrashanth Sreenivasa srp->sr_prev_indirect_vdev);
6775cabbc6bSPrashanth Sreenivasa ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops);
6785cabbc6bSPrashanth Sreenivasa }
6795cabbc6bSPrashanth Sreenivasa
6805cabbc6bSPrashanth Sreenivasa vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev;
6815cabbc6bSPrashanth Sreenivasa srp->sr_prev_indirect_vdev = vd->vdev_id;
6825cabbc6bSPrashanth Sreenivasa }
6835cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_state = state;
6845cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_end_time = gethrestime_sec();
6855cabbc6bSPrashanth Sreenivasa
6865cabbc6bSPrashanth Sreenivasa spa->spa_vdev_removal = NULL;
6875cabbc6bSPrashanth Sreenivasa spa_vdev_removal_destroy(svr);
6885cabbc6bSPrashanth Sreenivasa
6895cabbc6bSPrashanth Sreenivasa spa_sync_removing_state(spa, tx);
6905cabbc6bSPrashanth Sreenivasa
6915cabbc6bSPrashanth Sreenivasa vdev_config_dirty(spa->spa_root_vdev);
6925cabbc6bSPrashanth Sreenivasa }
6935cabbc6bSPrashanth Sreenivasa
6945cabbc6bSPrashanth Sreenivasa static void
free_mapped_segment_cb(void * arg,uint64_t offset,uint64_t size)6955cabbc6bSPrashanth Sreenivasa free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
6965cabbc6bSPrashanth Sreenivasa {
6975cabbc6bSPrashanth Sreenivasa vdev_t *vd = arg;
69886714001SSerapheim Dimitropoulos vdev_indirect_mark_obsolete(vd, offset, size);
69986714001SSerapheim Dimitropoulos boolean_t checkpoint = B_FALSE;
7005cabbc6bSPrashanth Sreenivasa vdev_indirect_ops.vdev_op_remap(vd, offset, size,
70186714001SSerapheim Dimitropoulos metaslab_free_impl_cb, &checkpoint);
7025cabbc6bSPrashanth Sreenivasa }
7035cabbc6bSPrashanth Sreenivasa
7045cabbc6bSPrashanth Sreenivasa /*
7055cabbc6bSPrashanth Sreenivasa * On behalf of the removal thread, syncs an incremental bit more of
7065cabbc6bSPrashanth Sreenivasa * the indirect mapping to disk and updates the in-memory mapping.
7075cabbc6bSPrashanth Sreenivasa * Called as a sync task in every txg that the removal thread makes progress.
7085cabbc6bSPrashanth Sreenivasa */
7095cabbc6bSPrashanth Sreenivasa static void
vdev_mapping_sync(void * arg,dmu_tx_t * tx)7105cabbc6bSPrashanth Sreenivasa vdev_mapping_sync(void *arg, dmu_tx_t *tx)
7115cabbc6bSPrashanth Sreenivasa {
7125cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = arg;
7135cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_tx_pool(tx)->dp_spa;
7143a4b1be9SMatthew Ahrens vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
7155cabbc6bSPrashanth Sreenivasa vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
7165cabbc6bSPrashanth Sreenivasa uint64_t txg = dmu_tx_get_txg(tx);
7175cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
7185cabbc6bSPrashanth Sreenivasa
7195cabbc6bSPrashanth Sreenivasa ASSERT(vic->vic_mapping_object != 0);
7205cabbc6bSPrashanth Sreenivasa ASSERT3U(txg, ==, spa_syncing_txg(spa));
7215cabbc6bSPrashanth Sreenivasa
7225cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_add_entries(vim,
7235cabbc6bSPrashanth Sreenivasa &svr->svr_new_segments[txg & TXG_MASK], tx);
7245cabbc6bSPrashanth Sreenivasa vdev_indirect_births_add_entry(vd->vdev_indirect_births,
7255cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx);
7265cabbc6bSPrashanth Sreenivasa
7275cabbc6bSPrashanth Sreenivasa /*
7285cabbc6bSPrashanth Sreenivasa * Free the copied data for anything that was freed while the
7295cabbc6bSPrashanth Sreenivasa * mapping entries were in flight.
7305cabbc6bSPrashanth Sreenivasa */
7315cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
7325cabbc6bSPrashanth Sreenivasa range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
7335cabbc6bSPrashanth Sreenivasa free_mapped_segment_cb, vd);
7345cabbc6bSPrashanth Sreenivasa ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
7355cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_max_offset(vim));
7365cabbc6bSPrashanth Sreenivasa svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
7375cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
7385cabbc6bSPrashanth Sreenivasa
7395cabbc6bSPrashanth Sreenivasa spa_sync_removing_state(spa, tx);
7405cabbc6bSPrashanth Sreenivasa }
7415cabbc6bSPrashanth Sreenivasa
742cfd63e1bSMatthew Ahrens typedef struct vdev_copy_segment_arg {
743cfd63e1bSMatthew Ahrens spa_t *vcsa_spa;
744cfd63e1bSMatthew Ahrens dva_t *vcsa_dest_dva;
745cfd63e1bSMatthew Ahrens uint64_t vcsa_txg;
746cfd63e1bSMatthew Ahrens range_tree_t *vcsa_obsolete_segs;
747cfd63e1bSMatthew Ahrens } vdev_copy_segment_arg_t;
748cfd63e1bSMatthew Ahrens
749cfd63e1bSMatthew Ahrens static void
unalloc_seg(void * arg,uint64_t start,uint64_t size)750cfd63e1bSMatthew Ahrens unalloc_seg(void *arg, uint64_t start, uint64_t size)
751cfd63e1bSMatthew Ahrens {
752cfd63e1bSMatthew Ahrens vdev_copy_segment_arg_t *vcsa = arg;
753cfd63e1bSMatthew Ahrens spa_t *spa = vcsa->vcsa_spa;
754cfd63e1bSMatthew Ahrens blkptr_t bp = { 0 };
755cfd63e1bSMatthew Ahrens
756cfd63e1bSMatthew Ahrens BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL);
757cfd63e1bSMatthew Ahrens BP_SET_LSIZE(&bp, size);
758cfd63e1bSMatthew Ahrens BP_SET_PSIZE(&bp, size);
759cfd63e1bSMatthew Ahrens BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
760cfd63e1bSMatthew Ahrens BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF);
761cfd63e1bSMatthew Ahrens BP_SET_TYPE(&bp, DMU_OT_NONE);
762cfd63e1bSMatthew Ahrens BP_SET_LEVEL(&bp, 0);
763cfd63e1bSMatthew Ahrens BP_SET_DEDUP(&bp, 0);
764cfd63e1bSMatthew Ahrens BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
765cfd63e1bSMatthew Ahrens
766cfd63e1bSMatthew Ahrens DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva));
767cfd63e1bSMatthew Ahrens DVA_SET_OFFSET(&bp.blk_dva[0],
768cfd63e1bSMatthew Ahrens DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start);
769cfd63e1bSMatthew Ahrens DVA_SET_ASIZE(&bp.blk_dva[0], size);
770cfd63e1bSMatthew Ahrens
771cfd63e1bSMatthew Ahrens zio_free(spa, vcsa->vcsa_txg, &bp);
772cfd63e1bSMatthew Ahrens }
773cfd63e1bSMatthew Ahrens
7743a4b1be9SMatthew Ahrens /*
7753a4b1be9SMatthew Ahrens * All reads and writes associated with a call to spa_vdev_copy_segment()
7763a4b1be9SMatthew Ahrens * are done.
7773a4b1be9SMatthew Ahrens */
7783a4b1be9SMatthew Ahrens static void
spa_vdev_copy_segment_done(zio_t * zio)779cfd63e1bSMatthew Ahrens spa_vdev_copy_segment_done(zio_t *zio)
7803a4b1be9SMatthew Ahrens {
781cfd63e1bSMatthew Ahrens vdev_copy_segment_arg_t *vcsa = zio->io_private;
782cfd63e1bSMatthew Ahrens
783cfd63e1bSMatthew Ahrens range_tree_vacate(vcsa->vcsa_obsolete_segs,
784cfd63e1bSMatthew Ahrens unalloc_seg, vcsa);
785cfd63e1bSMatthew Ahrens range_tree_destroy(vcsa->vcsa_obsolete_segs);
786cfd63e1bSMatthew Ahrens kmem_free(vcsa, sizeof (*vcsa));
787cfd63e1bSMatthew Ahrens
7883a4b1be9SMatthew Ahrens spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
7893a4b1be9SMatthew Ahrens }
7903a4b1be9SMatthew Ahrens
7913a4b1be9SMatthew Ahrens /*
7923a4b1be9SMatthew Ahrens * The write of the new location is done.
7933a4b1be9SMatthew Ahrens */
7945cabbc6bSPrashanth Sreenivasa static void
spa_vdev_copy_segment_write_done(zio_t * zio)7955cabbc6bSPrashanth Sreenivasa spa_vdev_copy_segment_write_done(zio_t *zio)
7965cabbc6bSPrashanth Sreenivasa {
7973a4b1be9SMatthew Ahrens vdev_copy_arg_t *vca = zio->io_private;
7983a4b1be9SMatthew Ahrens
7995cabbc6bSPrashanth Sreenivasa abd_free(zio->io_abd);
8005cabbc6bSPrashanth Sreenivasa
8015cabbc6bSPrashanth Sreenivasa mutex_enter(&vca->vca_lock);
8025cabbc6bSPrashanth Sreenivasa vca->vca_outstanding_bytes -= zio->io_size;
8035cabbc6bSPrashanth Sreenivasa cv_signal(&vca->vca_cv);
8045cabbc6bSPrashanth Sreenivasa mutex_exit(&vca->vca_lock);
8055cabbc6bSPrashanth Sreenivasa }
8065cabbc6bSPrashanth Sreenivasa
8073a4b1be9SMatthew Ahrens /*
8083a4b1be9SMatthew Ahrens * The read of the old location is done. The parent zio is the write to
8093a4b1be9SMatthew Ahrens * the new location. Allow it to start.
8103a4b1be9SMatthew Ahrens */
8115cabbc6bSPrashanth Sreenivasa static void
spa_vdev_copy_segment_read_done(zio_t * zio)8125cabbc6bSPrashanth Sreenivasa spa_vdev_copy_segment_read_done(zio_t *zio)
8135cabbc6bSPrashanth Sreenivasa {
8143a4b1be9SMatthew Ahrens zio_nowait(zio_unique_parent(zio));
8155cabbc6bSPrashanth Sreenivasa }
8165cabbc6bSPrashanth Sreenivasa
8173a4b1be9SMatthew Ahrens /*
8183a4b1be9SMatthew Ahrens * If the old and new vdevs are mirrors, we will read both sides of the old
8193a4b1be9SMatthew Ahrens * mirror, and write each copy to the corresponding side of the new mirror.
8203a4b1be9SMatthew Ahrens * If the old and new vdevs have a different number of children, we will do
8213a4b1be9SMatthew Ahrens * this as best as possible. Since we aren't verifying checksums, this
8223a4b1be9SMatthew Ahrens * ensures that as long as there's a good copy of the data, we'll have a
8233a4b1be9SMatthew Ahrens * good copy after the removal, even if there's silent damage to one side
8243a4b1be9SMatthew Ahrens * of the mirror. If we're removing a mirror that has some silent damage,
8253a4b1be9SMatthew Ahrens * we'll have exactly the same damage in the new location (assuming that
8263a4b1be9SMatthew Ahrens * the new location is also a mirror).
8273a4b1be9SMatthew Ahrens *
8283a4b1be9SMatthew Ahrens * We accomplish this by creating a tree of zio_t's, with as many writes as
8293a4b1be9SMatthew Ahrens * there are "children" of the new vdev (a non-redundant vdev counts as one
8303a4b1be9SMatthew Ahrens * child, a 2-way mirror has 2 children, etc). Each write has an associated
8313a4b1be9SMatthew Ahrens * read from a child of the old vdev. Typically there will be the same
8323a4b1be9SMatthew Ahrens * number of children of the old and new vdevs. However, if there are more
8333a4b1be9SMatthew Ahrens * children of the new vdev, some child(ren) of the old vdev will be issued
8343a4b1be9SMatthew Ahrens * multiple reads. If there are more children of the old vdev, some copies
8353a4b1be9SMatthew Ahrens * will be dropped.
8363a4b1be9SMatthew Ahrens *
8373a4b1be9SMatthew Ahrens * For example, the tree of zio_t's for a 2-way mirror is:
8383a4b1be9SMatthew Ahrens *
8393a4b1be9SMatthew Ahrens * null
8403a4b1be9SMatthew Ahrens * / \
8413a4b1be9SMatthew Ahrens * write(new vdev, child 0) write(new vdev, child 1)
8423a4b1be9SMatthew Ahrens * | |
8433a4b1be9SMatthew Ahrens * read(old vdev, child 0) read(old vdev, child 1)
8443a4b1be9SMatthew Ahrens *
8453a4b1be9SMatthew Ahrens * Child zio's complete before their parents complete. However, zio's
8463a4b1be9SMatthew Ahrens * created with zio_vdev_child_io() may be issued before their children
8473a4b1be9SMatthew Ahrens * complete. In this case we need to make sure that the children (reads)
8483a4b1be9SMatthew Ahrens * complete before the parents (writes) are *issued*. We do this by not
8493a4b1be9SMatthew Ahrens * calling zio_nowait() on each write until its corresponding read has
8503a4b1be9SMatthew Ahrens * completed.
8513a4b1be9SMatthew Ahrens *
8523a4b1be9SMatthew Ahrens * The spa_config_lock must be held while zio's created by
8533a4b1be9SMatthew Ahrens * zio_vdev_child_io() are in progress, to ensure that the vdev tree does
8543a4b1be9SMatthew Ahrens * not change (e.g. due to a concurrent "zpool attach/detach"). The "null"
8553a4b1be9SMatthew Ahrens * zio is needed to release the spa_config_lock after all the reads and
8563a4b1be9SMatthew Ahrens * writes complete. (Note that we can't grab the config lock for each read,
8573a4b1be9SMatthew Ahrens * because it is not reentrant - we could deadlock with a thread waiting
8583a4b1be9SMatthew Ahrens * for a write lock.)
8593a4b1be9SMatthew Ahrens */
8603a4b1be9SMatthew Ahrens static void
spa_vdev_copy_one_child(vdev_copy_arg_t * vca,zio_t * nzio,vdev_t * source_vd,uint64_t source_offset,vdev_t * dest_child_vd,uint64_t dest_offset,int dest_id,uint64_t size)8613a4b1be9SMatthew Ahrens spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio,
8623a4b1be9SMatthew Ahrens vdev_t *source_vd, uint64_t source_offset,
8633a4b1be9SMatthew Ahrens vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size)
8643a4b1be9SMatthew Ahrens {
8653a4b1be9SMatthew Ahrens ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0);
8663a4b1be9SMatthew Ahrens
8673a4b1be9SMatthew Ahrens mutex_enter(&vca->vca_lock);
8683a4b1be9SMatthew Ahrens vca->vca_outstanding_bytes += size;
8693a4b1be9SMatthew Ahrens mutex_exit(&vca->vca_lock);
8703a4b1be9SMatthew Ahrens
8713a4b1be9SMatthew Ahrens abd_t *abd = abd_alloc_for_io(size, B_FALSE);
8723a4b1be9SMatthew Ahrens
8733a4b1be9SMatthew Ahrens vdev_t *source_child_vd;
8743a4b1be9SMatthew Ahrens if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) {
8753a4b1be9SMatthew Ahrens /*
8763a4b1be9SMatthew Ahrens * Source and dest are both mirrors. Copy from the same
8773a4b1be9SMatthew Ahrens * child id as we are copying to (wrapping around if there
8783a4b1be9SMatthew Ahrens * are more dest children than source children).
8793a4b1be9SMatthew Ahrens */
8803a4b1be9SMatthew Ahrens source_child_vd =
8813a4b1be9SMatthew Ahrens source_vd->vdev_child[dest_id % source_vd->vdev_children];
8823a4b1be9SMatthew Ahrens } else {
8833a4b1be9SMatthew Ahrens source_child_vd = source_vd;
8843a4b1be9SMatthew Ahrens }
8853a4b1be9SMatthew Ahrens
8863a4b1be9SMatthew Ahrens zio_t *write_zio = zio_vdev_child_io(nzio, NULL,
8873a4b1be9SMatthew Ahrens dest_child_vd, dest_offset, abd, size,
8883a4b1be9SMatthew Ahrens ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
8893a4b1be9SMatthew Ahrens ZIO_FLAG_CANFAIL,
8903a4b1be9SMatthew Ahrens spa_vdev_copy_segment_write_done, vca);
8913a4b1be9SMatthew Ahrens
8923a4b1be9SMatthew Ahrens zio_nowait(zio_vdev_child_io(write_zio, NULL,
8933a4b1be9SMatthew Ahrens source_child_vd, source_offset, abd, size,
8943a4b1be9SMatthew Ahrens ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
8953a4b1be9SMatthew Ahrens ZIO_FLAG_CANFAIL,
8963a4b1be9SMatthew Ahrens spa_vdev_copy_segment_read_done, vca));
8973a4b1be9SMatthew Ahrens }
8983a4b1be9SMatthew Ahrens
8993a4b1be9SMatthew Ahrens /*
9003a4b1be9SMatthew Ahrens * Allocate a new location for this segment, and create the zio_t's to
9013a4b1be9SMatthew Ahrens * read from the old location and write to the new location.
9023a4b1be9SMatthew Ahrens */
9035cabbc6bSPrashanth Sreenivasa static int
spa_vdev_copy_segment(vdev_t * vd,range_tree_t * segs,uint64_t maxalloc,uint64_t txg,vdev_copy_arg_t * vca,zio_alloc_list_t * zal)904cfd63e1bSMatthew Ahrens spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
905cfd63e1bSMatthew Ahrens uint64_t maxalloc, uint64_t txg,
9065cabbc6bSPrashanth Sreenivasa vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
9075cabbc6bSPrashanth Sreenivasa {
9085cabbc6bSPrashanth Sreenivasa metaslab_group_t *mg = vd->vdev_mg;
9095cabbc6bSPrashanth Sreenivasa spa_t *spa = vd->vdev_spa;
9105cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
9115cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_entry_t *entry;
9125cabbc6bSPrashanth Sreenivasa dva_t dst = { 0 };
913cfd63e1bSMatthew Ahrens uint64_t start = range_tree_min(segs);
9145cabbc6bSPrashanth Sreenivasa
915cfd63e1bSMatthew Ahrens ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE);
916cfd63e1bSMatthew Ahrens
917cfd63e1bSMatthew Ahrens uint64_t size = range_tree_span(segs);
918cfd63e1bSMatthew Ahrens if (range_tree_span(segs) > maxalloc) {
919cfd63e1bSMatthew Ahrens /*
920cfd63e1bSMatthew Ahrens * We can't allocate all the segments. Prefer to end
921cfd63e1bSMatthew Ahrens * the allocation at the end of a segment, thus avoiding
922cfd63e1bSMatthew Ahrens * additional split blocks.
923cfd63e1bSMatthew Ahrens */
924*4d7988d6SPaul Dagnelie range_seg_max_t search;
925*4d7988d6SPaul Dagnelie zfs_btree_index_t where;
926*4d7988d6SPaul Dagnelie rs_set_start(&search, segs, start + maxalloc);
927*4d7988d6SPaul Dagnelie rs_set_end(&search, segs, start + maxalloc);
928*4d7988d6SPaul Dagnelie (void) zfs_btree_find(&segs->rt_root, &search, &where);
929*4d7988d6SPaul Dagnelie range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where,
930*4d7988d6SPaul Dagnelie &where);
931cfd63e1bSMatthew Ahrens if (rs != NULL) {
932*4d7988d6SPaul Dagnelie size = rs_get_end(rs, segs) - start;
933cfd63e1bSMatthew Ahrens } else {
934cfd63e1bSMatthew Ahrens /*
935cfd63e1bSMatthew Ahrens * There are no segments that end before maxalloc.
936cfd63e1bSMatthew Ahrens * I.e. the first segment is larger than maxalloc,
937cfd63e1bSMatthew Ahrens * so we must split it.
938cfd63e1bSMatthew Ahrens */
939cfd63e1bSMatthew Ahrens size = maxalloc;
940cfd63e1bSMatthew Ahrens }
941cfd63e1bSMatthew Ahrens }
942cfd63e1bSMatthew Ahrens ASSERT3U(size, <=, maxalloc);
9435cabbc6bSPrashanth Sreenivasa
944f78cdc34SPaul Dagnelie /*
945663207adSDon Brady * An allocation class might not have any remaining vdevs or space
946f78cdc34SPaul Dagnelie */
947663207adSDon Brady metaslab_class_t *mc = mg->mg_class;
948663207adSDon Brady if (mc != spa_normal_class(spa) && mc->mc_groups <= 1)
949663207adSDon Brady mc = spa_normal_class(spa);
950663207adSDon Brady int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0,
951663207adSDon Brady zal, 0);
952663207adSDon Brady if (error == ENOSPC && mc != spa_normal_class(spa)) {
953663207adSDon Brady error = metaslab_alloc_dva(spa, spa_normal_class(spa), size,
954663207adSDon Brady &dst, 0, NULL, txg, 0, zal, 0);
955663207adSDon Brady }
9565cabbc6bSPrashanth Sreenivasa if (error != 0)
9575cabbc6bSPrashanth Sreenivasa return (error);
9585cabbc6bSPrashanth Sreenivasa
959cfd63e1bSMatthew Ahrens /*
960cfd63e1bSMatthew Ahrens * Determine the ranges that are not actually needed. Offsets are
961cfd63e1bSMatthew Ahrens * relative to the start of the range to be copied (i.e. relative to the
962cfd63e1bSMatthew Ahrens * local variable "start").
963cfd63e1bSMatthew Ahrens */
964*4d7988d6SPaul Dagnelie range_tree_t *obsolete_segs = range_tree_create(NULL, RANGE_SEG64, NULL,
965*4d7988d6SPaul Dagnelie 0, 0);
966*4d7988d6SPaul Dagnelie
967*4d7988d6SPaul Dagnelie zfs_btree_index_t where;
968*4d7988d6SPaul Dagnelie range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where);
969*4d7988d6SPaul Dagnelie ASSERT3U(rs_get_start(rs, segs), ==, start);
970*4d7988d6SPaul Dagnelie uint64_t prev_seg_end = rs_get_end(rs, segs);
971*4d7988d6SPaul Dagnelie while ((rs = zfs_btree_next(&segs->rt_root, &where, &where)) != NULL) {
972*4d7988d6SPaul Dagnelie if (rs_get_start(rs, segs) >= start + size) {
973cfd63e1bSMatthew Ahrens break;
974cfd63e1bSMatthew Ahrens } else {
975cfd63e1bSMatthew Ahrens range_tree_add(obsolete_segs,
976cfd63e1bSMatthew Ahrens prev_seg_end - start,
977*4d7988d6SPaul Dagnelie rs_get_start(rs, segs) - prev_seg_end);
978cfd63e1bSMatthew Ahrens }
979*4d7988d6SPaul Dagnelie prev_seg_end = rs_get_end(rs, segs);
980cfd63e1bSMatthew Ahrens }
981cfd63e1bSMatthew Ahrens /* We don't end in the middle of an obsolete range */
982cfd63e1bSMatthew Ahrens ASSERT3U(start + size, <=, prev_seg_end);
983cfd63e1bSMatthew Ahrens
984cfd63e1bSMatthew Ahrens range_tree_clear(segs, start, size);
985cfd63e1bSMatthew Ahrens
9865cabbc6bSPrashanth Sreenivasa /*
9875cabbc6bSPrashanth Sreenivasa * We can't have any padding of the allocated size, otherwise we will
9885cabbc6bSPrashanth Sreenivasa * misunderstand what's allocated, and the size of the mapping.
9895cabbc6bSPrashanth Sreenivasa * The caller ensures this will be true by passing in a size that is
9905cabbc6bSPrashanth Sreenivasa * aligned to the worst (highest) ashift in the pool.
9915cabbc6bSPrashanth Sreenivasa */
9925cabbc6bSPrashanth Sreenivasa ASSERT3U(DVA_GET_ASIZE(&dst), ==, size);
9935cabbc6bSPrashanth Sreenivasa
9945cabbc6bSPrashanth Sreenivasa entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
9955cabbc6bSPrashanth Sreenivasa DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
9965cabbc6bSPrashanth Sreenivasa entry->vime_mapping.vimep_dst = dst;
997cfd63e1bSMatthew Ahrens if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
998cfd63e1bSMatthew Ahrens entry->vime_obsolete_count = range_tree_space(obsolete_segs);
999cfd63e1bSMatthew Ahrens }
1000cfd63e1bSMatthew Ahrens
1001cfd63e1bSMatthew Ahrens vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP);
1002cfd63e1bSMatthew Ahrens vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
1003cfd63e1bSMatthew Ahrens vcsa->vcsa_obsolete_segs = obsolete_segs;
1004cfd63e1bSMatthew Ahrens vcsa->vcsa_spa = spa;
1005cfd63e1bSMatthew Ahrens vcsa->vcsa_txg = txg;
10065cabbc6bSPrashanth Sreenivasa
10075cabbc6bSPrashanth Sreenivasa /*
10083a4b1be9SMatthew Ahrens * See comment before spa_vdev_copy_one_child().
10095cabbc6bSPrashanth Sreenivasa */
10103a4b1be9SMatthew Ahrens spa_config_enter(spa, SCL_STATE, spa, RW_READER);
10113a4b1be9SMatthew Ahrens zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
1012cfd63e1bSMatthew Ahrens spa_vdev_copy_segment_done, vcsa, 0);
10133a4b1be9SMatthew Ahrens vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst));
10143a4b1be9SMatthew Ahrens if (dest_vd->vdev_ops == &vdev_mirror_ops) {
10153a4b1be9SMatthew Ahrens for (int i = 0; i < dest_vd->vdev_children; i++) {
10163a4b1be9SMatthew Ahrens vdev_t *child = dest_vd->vdev_child[i];
10173a4b1be9SMatthew Ahrens spa_vdev_copy_one_child(vca, nzio, vd, start,
10183a4b1be9SMatthew Ahrens child, DVA_GET_OFFSET(&dst), i, size);
10193a4b1be9SMatthew Ahrens }
10203a4b1be9SMatthew Ahrens } else {
10213a4b1be9SMatthew Ahrens spa_vdev_copy_one_child(vca, nzio, vd, start,
10223a4b1be9SMatthew Ahrens dest_vd, DVA_GET_OFFSET(&dst), -1, size);
10233a4b1be9SMatthew Ahrens }
10243a4b1be9SMatthew Ahrens zio_nowait(nzio);
10255cabbc6bSPrashanth Sreenivasa
10265cabbc6bSPrashanth Sreenivasa list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
10275cabbc6bSPrashanth Sreenivasa ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift);
10285cabbc6bSPrashanth Sreenivasa vdev_dirty(vd, 0, NULL, txg);
10295cabbc6bSPrashanth Sreenivasa
10305cabbc6bSPrashanth Sreenivasa return (0);
10315cabbc6bSPrashanth Sreenivasa }
10325cabbc6bSPrashanth Sreenivasa
10335cabbc6bSPrashanth Sreenivasa /*
10345cabbc6bSPrashanth Sreenivasa * Complete the removal of a toplevel vdev. This is called as a
10355cabbc6bSPrashanth Sreenivasa * synctask in the same txg that we will sync out the new config (to the
10365cabbc6bSPrashanth Sreenivasa * MOS object) which indicates that this vdev is indirect.
10375cabbc6bSPrashanth Sreenivasa */
10385cabbc6bSPrashanth Sreenivasa static void
vdev_remove_complete_sync(void * arg,dmu_tx_t * tx)10395cabbc6bSPrashanth Sreenivasa vdev_remove_complete_sync(void *arg, dmu_tx_t *tx)
10405cabbc6bSPrashanth Sreenivasa {
10415cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = arg;
10423a4b1be9SMatthew Ahrens spa_t *spa = dmu_tx_pool(tx)->dp_spa;
10433a4b1be9SMatthew Ahrens vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
10445cabbc6bSPrashanth Sreenivasa
10455cabbc6bSPrashanth Sreenivasa ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
10465cabbc6bSPrashanth Sreenivasa
10475cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_SIZE; i++) {
10485cabbc6bSPrashanth Sreenivasa ASSERT0(svr->svr_bytes_done[i]);
10495cabbc6bSPrashanth Sreenivasa }
10505cabbc6bSPrashanth Sreenivasa
10515cabbc6bSPrashanth Sreenivasa ASSERT3U(spa->spa_removing_phys.sr_copied, ==,
10525cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_to_copy);
10535cabbc6bSPrashanth Sreenivasa
10545cabbc6bSPrashanth Sreenivasa vdev_destroy_spacemaps(vd, tx);
10555cabbc6bSPrashanth Sreenivasa
10565cabbc6bSPrashanth Sreenivasa /* destroy leaf zaps, if any */
10575cabbc6bSPrashanth Sreenivasa ASSERT3P(svr->svr_zaplist, !=, NULL);
10585cabbc6bSPrashanth Sreenivasa for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL);
10595cabbc6bSPrashanth Sreenivasa pair != NULL;
10605cabbc6bSPrashanth Sreenivasa pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) {
10615cabbc6bSPrashanth Sreenivasa vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx);
10625cabbc6bSPrashanth Sreenivasa }
10635cabbc6bSPrashanth Sreenivasa fnvlist_free(svr->svr_zaplist);
10645cabbc6bSPrashanth Sreenivasa
10655cabbc6bSPrashanth Sreenivasa spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx);
10665cabbc6bSPrashanth Sreenivasa /* vd->vdev_path is not available here */
10675cabbc6bSPrashanth Sreenivasa spa_history_log_internal(spa, "vdev remove completed", tx,
10685cabbc6bSPrashanth Sreenivasa "%s vdev %llu", spa_name(spa), vd->vdev_id);
10695cabbc6bSPrashanth Sreenivasa }
10705cabbc6bSPrashanth Sreenivasa
10715cabbc6bSPrashanth Sreenivasa static void
vdev_remove_enlist_zaps(vdev_t * vd,nvlist_t * zlist)10725cabbc6bSPrashanth Sreenivasa vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist)
10735cabbc6bSPrashanth Sreenivasa {
10745cabbc6bSPrashanth Sreenivasa ASSERT3P(zlist, !=, NULL);
10755cabbc6bSPrashanth Sreenivasa ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
10765cabbc6bSPrashanth Sreenivasa
10775cabbc6bSPrashanth Sreenivasa if (vd->vdev_leaf_zap != 0) {
10785cabbc6bSPrashanth Sreenivasa char zkey[32];
10795cabbc6bSPrashanth Sreenivasa (void) snprintf(zkey, sizeof (zkey), "%s-%"PRIu64,
10805cabbc6bSPrashanth Sreenivasa VDEV_REMOVAL_ZAP_OBJS, vd->vdev_leaf_zap);
10815cabbc6bSPrashanth Sreenivasa fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap);
10825cabbc6bSPrashanth Sreenivasa }
10835cabbc6bSPrashanth Sreenivasa
10845cabbc6bSPrashanth Sreenivasa for (uint64_t id = 0; id < vd->vdev_children; id++) {
10855cabbc6bSPrashanth Sreenivasa vdev_remove_enlist_zaps(vd->vdev_child[id], zlist);
10865cabbc6bSPrashanth Sreenivasa }
10875cabbc6bSPrashanth Sreenivasa }
10885cabbc6bSPrashanth Sreenivasa
10895cabbc6bSPrashanth Sreenivasa static void
vdev_remove_replace_with_indirect(vdev_t * vd,uint64_t txg)10905cabbc6bSPrashanth Sreenivasa vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
10915cabbc6bSPrashanth Sreenivasa {
10925cabbc6bSPrashanth Sreenivasa vdev_t *ivd;
10935cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx;
10945cabbc6bSPrashanth Sreenivasa spa_t *spa = vd->vdev_spa;
10955cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
10965cabbc6bSPrashanth Sreenivasa
10975cabbc6bSPrashanth Sreenivasa /*
10985cabbc6bSPrashanth Sreenivasa * First, build a list of leaf zaps to be destroyed.
10995cabbc6bSPrashanth Sreenivasa * This is passed to the sync context thread,
11005cabbc6bSPrashanth Sreenivasa * which does the actual unlinking.
11015cabbc6bSPrashanth Sreenivasa */
11025cabbc6bSPrashanth Sreenivasa svr->svr_zaplist = fnvlist_alloc();
11035cabbc6bSPrashanth Sreenivasa vdev_remove_enlist_zaps(vd, svr->svr_zaplist);
11045cabbc6bSPrashanth Sreenivasa
11055cabbc6bSPrashanth Sreenivasa ivd = vdev_add_parent(vd, &vdev_indirect_ops);
11063a4b1be9SMatthew Ahrens ivd->vdev_removing = 0;
11075cabbc6bSPrashanth Sreenivasa
11085cabbc6bSPrashanth Sreenivasa vd->vdev_leaf_zap = 0;
11095cabbc6bSPrashanth Sreenivasa
11105cabbc6bSPrashanth Sreenivasa vdev_remove_child(ivd, vd);
11115cabbc6bSPrashanth Sreenivasa vdev_compact_children(ivd);
11125cabbc6bSPrashanth Sreenivasa
11135cabbc6bSPrashanth Sreenivasa ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
11145cabbc6bSPrashanth Sreenivasa
11155cabbc6bSPrashanth Sreenivasa tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
11165cabbc6bSPrashanth Sreenivasa dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr,
11175cabbc6bSPrashanth Sreenivasa 0, ZFS_SPACE_CHECK_NONE, tx);
11185cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx);
11195cabbc6bSPrashanth Sreenivasa
11205cabbc6bSPrashanth Sreenivasa /*
11215cabbc6bSPrashanth Sreenivasa * Indicate that this thread has exited.
11225cabbc6bSPrashanth Sreenivasa * After this, we can not use svr.
11235cabbc6bSPrashanth Sreenivasa */
11245cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
11255cabbc6bSPrashanth Sreenivasa svr->svr_thread = NULL;
11265cabbc6bSPrashanth Sreenivasa cv_broadcast(&svr->svr_cv);
11275cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
11285cabbc6bSPrashanth Sreenivasa }
11295cabbc6bSPrashanth Sreenivasa
11305cabbc6bSPrashanth Sreenivasa /*
11315cabbc6bSPrashanth Sreenivasa * Complete the removal of a toplevel vdev. This is called in open
11325cabbc6bSPrashanth Sreenivasa * context by the removal thread after we have copied all vdev's data.
11335cabbc6bSPrashanth Sreenivasa */
11345cabbc6bSPrashanth Sreenivasa static void
vdev_remove_complete(spa_t * spa)11353a4b1be9SMatthew Ahrens vdev_remove_complete(spa_t *spa)
11365cabbc6bSPrashanth Sreenivasa {
11375cabbc6bSPrashanth Sreenivasa uint64_t txg;
11385cabbc6bSPrashanth Sreenivasa
11395cabbc6bSPrashanth Sreenivasa /*
11405cabbc6bSPrashanth Sreenivasa * Wait for any deferred frees to be synced before we call
11415cabbc6bSPrashanth Sreenivasa * vdev_metaslab_fini()
11425cabbc6bSPrashanth Sreenivasa */
11435cabbc6bSPrashanth Sreenivasa txg_wait_synced(spa->spa_dsl_pool, 0);
11445cabbc6bSPrashanth Sreenivasa txg = spa_vdev_enter(spa);
11453a4b1be9SMatthew Ahrens vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
1146094e47e9SGeorge Wilson ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
1147084fd14fSBrian Behlendorf ASSERT3P(vd->vdev_trim_thread, ==, NULL);
1148084fd14fSBrian Behlendorf ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
11493a4b1be9SMatthew Ahrens
11503a4b1be9SMatthew Ahrens sysevent_t *ev = spa_event_create(spa, vd, NULL,
11513a4b1be9SMatthew Ahrens ESC_ZFS_VDEV_REMOVE_DEV);
11523a4b1be9SMatthew Ahrens
11535cabbc6bSPrashanth Sreenivasa zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
11545cabbc6bSPrashanth Sreenivasa vd->vdev_id, txg);
11555cabbc6bSPrashanth Sreenivasa
11565cabbc6bSPrashanth Sreenivasa /*
11575cabbc6bSPrashanth Sreenivasa * Discard allocation state.
11585cabbc6bSPrashanth Sreenivasa */
11595cabbc6bSPrashanth Sreenivasa if (vd->vdev_mg != NULL) {
11605cabbc6bSPrashanth Sreenivasa vdev_metaslab_fini(vd);
11615cabbc6bSPrashanth Sreenivasa metaslab_group_destroy(vd->vdev_mg);
11625cabbc6bSPrashanth Sreenivasa vd->vdev_mg = NULL;
1163814dcd43SSerapheim Dimitropoulos spa_log_sm_set_blocklimit(spa);
11645cabbc6bSPrashanth Sreenivasa }
11655cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_stat.vs_space);
11665cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_stat.vs_dspace);
11675cabbc6bSPrashanth Sreenivasa
11685cabbc6bSPrashanth Sreenivasa vdev_remove_replace_with_indirect(vd, txg);
11695cabbc6bSPrashanth Sreenivasa
11705cabbc6bSPrashanth Sreenivasa /*
11715cabbc6bSPrashanth Sreenivasa * We now release the locks, allowing spa_sync to run and finish the
11725cabbc6bSPrashanth Sreenivasa * removal via vdev_remove_complete_sync in syncing context.
11733a4b1be9SMatthew Ahrens *
11743a4b1be9SMatthew Ahrens * Note that we hold on to the vdev_t that has been replaced. Since
11753a4b1be9SMatthew Ahrens * it isn't part of the vdev tree any longer, it can't be concurrently
11763a4b1be9SMatthew Ahrens * manipulated, even while we don't have the config lock.
11775cabbc6bSPrashanth Sreenivasa */
11785cabbc6bSPrashanth Sreenivasa (void) spa_vdev_exit(spa, NULL, txg, 0);
11795cabbc6bSPrashanth Sreenivasa
11805cabbc6bSPrashanth Sreenivasa /*
11815cabbc6bSPrashanth Sreenivasa * Top ZAP should have been transferred to the indirect vdev in
11825cabbc6bSPrashanth Sreenivasa * vdev_remove_replace_with_indirect.
11835cabbc6bSPrashanth Sreenivasa */
11845cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_top_zap);
11855cabbc6bSPrashanth Sreenivasa
11865cabbc6bSPrashanth Sreenivasa /*
11875cabbc6bSPrashanth Sreenivasa * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect.
11885cabbc6bSPrashanth Sreenivasa */
11895cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_leaf_zap);
11905cabbc6bSPrashanth Sreenivasa
11915cabbc6bSPrashanth Sreenivasa txg = spa_vdev_enter(spa);
11925cabbc6bSPrashanth Sreenivasa (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
11935cabbc6bSPrashanth Sreenivasa /*
11945cabbc6bSPrashanth Sreenivasa * Request to update the config and the config cachefile.
11955cabbc6bSPrashanth Sreenivasa */
11965cabbc6bSPrashanth Sreenivasa vdev_config_dirty(spa->spa_root_vdev);
11975cabbc6bSPrashanth Sreenivasa (void) spa_vdev_exit(spa, vd, txg, 0);
11983a4b1be9SMatthew Ahrens
11993a4b1be9SMatthew Ahrens spa_event_post(ev);
12005cabbc6bSPrashanth Sreenivasa }
12015cabbc6bSPrashanth Sreenivasa
12025cabbc6bSPrashanth Sreenivasa /*
12035cabbc6bSPrashanth Sreenivasa * Evacuates a segment of size at most max_alloc from the vdev
12045cabbc6bSPrashanth Sreenivasa * via repeated calls to spa_vdev_copy_segment. If an allocation
12055cabbc6bSPrashanth Sreenivasa * fails, the pool is probably too fragmented to handle such a
12065cabbc6bSPrashanth Sreenivasa * large size, so decrease max_alloc so that the caller will not try
12075cabbc6bSPrashanth Sreenivasa * this size again this txg.
12085cabbc6bSPrashanth Sreenivasa */
12095cabbc6bSPrashanth Sreenivasa static void
spa_vdev_copy_impl(vdev_t * vd,spa_vdev_removal_t * svr,vdev_copy_arg_t * vca,uint64_t * max_alloc,dmu_tx_t * tx)12103a4b1be9SMatthew Ahrens spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
12115cabbc6bSPrashanth Sreenivasa uint64_t *max_alloc, dmu_tx_t *tx)
12125cabbc6bSPrashanth Sreenivasa {
12135cabbc6bSPrashanth Sreenivasa uint64_t txg = dmu_tx_get_txg(tx);
12145cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_tx_pool(tx)->dp_spa;
12155cabbc6bSPrashanth Sreenivasa
12165cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
12175cabbc6bSPrashanth Sreenivasa
1218cfd63e1bSMatthew Ahrens /*
1219cfd63e1bSMatthew Ahrens * Determine how big of a chunk to copy. We can allocate up
1220cfd63e1bSMatthew Ahrens * to max_alloc bytes, and we can span up to vdev_removal_max_span
1221cfd63e1bSMatthew Ahrens * bytes of unallocated space at a time. "segs" will track the
1222cfd63e1bSMatthew Ahrens * allocated segments that we are copying. We may also be copying
1223cfd63e1bSMatthew Ahrens * free segments (of up to vdev_removal_max_span bytes).
1224cfd63e1bSMatthew Ahrens */
1225*4d7988d6SPaul Dagnelie range_tree_t *segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
1226cfd63e1bSMatthew Ahrens for (;;) {
1227*4d7988d6SPaul Dagnelie range_tree_t *rt = svr->svr_allocd_segs;
1228*4d7988d6SPaul Dagnelie range_seg_t *rs = range_tree_first(rt);
1229*4d7988d6SPaul Dagnelie
1230cfd63e1bSMatthew Ahrens if (rs == NULL)
1231cfd63e1bSMatthew Ahrens break;
1232cfd63e1bSMatthew Ahrens
1233cfd63e1bSMatthew Ahrens uint64_t seg_length;
1234cfd63e1bSMatthew Ahrens
1235cfd63e1bSMatthew Ahrens if (range_tree_is_empty(segs)) {
1236cfd63e1bSMatthew Ahrens /* need to truncate the first seg based on max_alloc */
1237*4d7988d6SPaul Dagnelie seg_length = MIN(rs_get_end(rs, rt) - rs_get_start(rs,
1238*4d7988d6SPaul Dagnelie rt), *max_alloc);
1239cfd63e1bSMatthew Ahrens } else {
1240*4d7988d6SPaul Dagnelie if (rs_get_start(rs, rt) - range_tree_max(segs) >
1241cfd63e1bSMatthew Ahrens vdev_removal_max_span) {
1242cfd63e1bSMatthew Ahrens /*
1243cfd63e1bSMatthew Ahrens * Including this segment would cause us to
1244cfd63e1bSMatthew Ahrens * copy a larger unneeded chunk than is allowed.
1245cfd63e1bSMatthew Ahrens */
1246cfd63e1bSMatthew Ahrens break;
1247*4d7988d6SPaul Dagnelie } else if (rs_get_end(rs, rt) - range_tree_min(segs) >
1248cfd63e1bSMatthew Ahrens *max_alloc) {
1249cfd63e1bSMatthew Ahrens /*
1250cfd63e1bSMatthew Ahrens * This additional segment would extend past
1251cfd63e1bSMatthew Ahrens * max_alloc. Rather than splitting this
1252cfd63e1bSMatthew Ahrens * segment, leave it for the next mapping.
1253cfd63e1bSMatthew Ahrens */
1254cfd63e1bSMatthew Ahrens break;
1255cfd63e1bSMatthew Ahrens } else {
1256*4d7988d6SPaul Dagnelie seg_length = rs_get_end(rs, rt) -
1257*4d7988d6SPaul Dagnelie rs_get_start(rs, rt);
1258cfd63e1bSMatthew Ahrens }
1259cfd63e1bSMatthew Ahrens }
1260cfd63e1bSMatthew Ahrens
1261*4d7988d6SPaul Dagnelie range_tree_add(segs, rs_get_start(rs, rt), seg_length);
1262cfd63e1bSMatthew Ahrens range_tree_remove(svr->svr_allocd_segs,
1263*4d7988d6SPaul Dagnelie rs_get_start(rs, rt), seg_length);
1264cfd63e1bSMatthew Ahrens }
1265cfd63e1bSMatthew Ahrens
1266cfd63e1bSMatthew Ahrens if (range_tree_is_empty(segs)) {
12675cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
1268cfd63e1bSMatthew Ahrens range_tree_destroy(segs);
12695cabbc6bSPrashanth Sreenivasa return;
12705cabbc6bSPrashanth Sreenivasa }
12715cabbc6bSPrashanth Sreenivasa
12725cabbc6bSPrashanth Sreenivasa if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
12735cabbc6bSPrashanth Sreenivasa dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
12745cabbc6bSPrashanth Sreenivasa svr, 0, ZFS_SPACE_CHECK_NONE, tx);
12755cabbc6bSPrashanth Sreenivasa }
12765cabbc6bSPrashanth Sreenivasa
1277cfd63e1bSMatthew Ahrens svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
12785cabbc6bSPrashanth Sreenivasa
12795cabbc6bSPrashanth Sreenivasa /*
12805cabbc6bSPrashanth Sreenivasa * Note: this is the amount of *allocated* space
12815cabbc6bSPrashanth Sreenivasa * that we are taking care of each txg.
12825cabbc6bSPrashanth Sreenivasa */
1283cfd63e1bSMatthew Ahrens svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
12845cabbc6bSPrashanth Sreenivasa
12855cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
12865cabbc6bSPrashanth Sreenivasa
12875cabbc6bSPrashanth Sreenivasa zio_alloc_list_t zal;
12885cabbc6bSPrashanth Sreenivasa metaslab_trace_init(&zal);
1289cfd63e1bSMatthew Ahrens uint64_t thismax = SPA_MAXBLOCKSIZE;
1290cfd63e1bSMatthew Ahrens while (!range_tree_is_empty(segs)) {
12913a4b1be9SMatthew Ahrens int error = spa_vdev_copy_segment(vd,
1292cfd63e1bSMatthew Ahrens segs, thismax, txg, vca, &zal);
12935cabbc6bSPrashanth Sreenivasa
12945cabbc6bSPrashanth Sreenivasa if (error == ENOSPC) {
12955cabbc6bSPrashanth Sreenivasa /*
12965cabbc6bSPrashanth Sreenivasa * Cut our segment in half, and don't try this
12975cabbc6bSPrashanth Sreenivasa * segment size again this txg. Note that the
12985cabbc6bSPrashanth Sreenivasa * allocation size must be aligned to the highest
12995cabbc6bSPrashanth Sreenivasa * ashift in the pool, so that the allocation will
13005cabbc6bSPrashanth Sreenivasa * not be padded out to a multiple of the ashift,
13015cabbc6bSPrashanth Sreenivasa * which could cause us to think that this mapping
13025cabbc6bSPrashanth Sreenivasa * is larger than we intended.
13035cabbc6bSPrashanth Sreenivasa */
13045cabbc6bSPrashanth Sreenivasa ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
13055cabbc6bSPrashanth Sreenivasa ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
1306cfd63e1bSMatthew Ahrens uint64_t attempted =
1307cfd63e1bSMatthew Ahrens MIN(range_tree_span(segs), thismax);
1308cfd63e1bSMatthew Ahrens thismax = P2ROUNDUP(attempted / 2,
13095cabbc6bSPrashanth Sreenivasa 1 << spa->spa_max_ashift);
13105cabbc6bSPrashanth Sreenivasa /*
13115cabbc6bSPrashanth Sreenivasa * The minimum-size allocation can not fail.
13125cabbc6bSPrashanth Sreenivasa */
1313cfd63e1bSMatthew Ahrens ASSERT3U(attempted, >, 1 << spa->spa_max_ashift);
1314cfd63e1bSMatthew Ahrens *max_alloc = attempted - (1 << spa->spa_max_ashift);
13155cabbc6bSPrashanth Sreenivasa } else {
13165cabbc6bSPrashanth Sreenivasa ASSERT0(error);
13175cabbc6bSPrashanth Sreenivasa
13185cabbc6bSPrashanth Sreenivasa /*
13195cabbc6bSPrashanth Sreenivasa * We've performed an allocation, so reset the
13205cabbc6bSPrashanth Sreenivasa * alloc trace list.
13215cabbc6bSPrashanth Sreenivasa */
13225cabbc6bSPrashanth Sreenivasa metaslab_trace_fini(&zal);
13235cabbc6bSPrashanth Sreenivasa metaslab_trace_init(&zal);
13245cabbc6bSPrashanth Sreenivasa }
13255cabbc6bSPrashanth Sreenivasa }
13265cabbc6bSPrashanth Sreenivasa metaslab_trace_fini(&zal);
1327cfd63e1bSMatthew Ahrens range_tree_destroy(segs);
13285cabbc6bSPrashanth Sreenivasa }
13295cabbc6bSPrashanth Sreenivasa
13305cabbc6bSPrashanth Sreenivasa /*
13315cabbc6bSPrashanth Sreenivasa * The removal thread operates in open context. It iterates over all
13325cabbc6bSPrashanth Sreenivasa * allocated space in the vdev, by loading each metaslab's spacemap.
13335cabbc6bSPrashanth Sreenivasa * For each contiguous segment of allocated space (capping the segment
13345cabbc6bSPrashanth Sreenivasa * size at SPA_MAXBLOCKSIZE), we:
13355cabbc6bSPrashanth Sreenivasa * - Allocate space for it on another vdev.
13365cabbc6bSPrashanth Sreenivasa * - Create a new mapping from the old location to the new location
13375cabbc6bSPrashanth Sreenivasa * (as a record in svr_new_segments).
13385cabbc6bSPrashanth Sreenivasa * - Initiate a logical read zio to get the data off the removing disk.
13395cabbc6bSPrashanth Sreenivasa * - In the read zio's done callback, initiate a logical write zio to
13405cabbc6bSPrashanth Sreenivasa * write it to the new vdev.
13415cabbc6bSPrashanth Sreenivasa * Note that all of this will take effect when a particular TXG syncs.
13425cabbc6bSPrashanth Sreenivasa * The sync thread ensures that all the phys reads and writes for the syncing
13435cabbc6bSPrashanth Sreenivasa * TXG have completed (see spa_txg_zio) and writes the new mappings to disk
13445cabbc6bSPrashanth Sreenivasa * (see vdev_mapping_sync()).
13455cabbc6bSPrashanth Sreenivasa */
13465cabbc6bSPrashanth Sreenivasa static void
spa_vdev_remove_thread(void * arg)13475cabbc6bSPrashanth Sreenivasa spa_vdev_remove_thread(void *arg)
13485cabbc6bSPrashanth Sreenivasa {
13493a4b1be9SMatthew Ahrens spa_t *spa = arg;
13505cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
13515cabbc6bSPrashanth Sreenivasa vdev_copy_arg_t vca;
13525cabbc6bSPrashanth Sreenivasa uint64_t max_alloc = zfs_remove_max_segment;
13535cabbc6bSPrashanth Sreenivasa uint64_t last_txg = 0;
13543a4b1be9SMatthew Ahrens
13553a4b1be9SMatthew Ahrens spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
13563a4b1be9SMatthew Ahrens vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
13575cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
13585cabbc6bSPrashanth Sreenivasa uint64_t start_offset = vdev_indirect_mapping_max_offset(vim);
13595cabbc6bSPrashanth Sreenivasa
13605cabbc6bSPrashanth Sreenivasa ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
13615cabbc6bSPrashanth Sreenivasa ASSERT(vdev_is_concrete(vd));
13625cabbc6bSPrashanth Sreenivasa ASSERT(vd->vdev_removing);
13635cabbc6bSPrashanth Sreenivasa ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
13645cabbc6bSPrashanth Sreenivasa ASSERT(vim != NULL);
13655cabbc6bSPrashanth Sreenivasa
13665cabbc6bSPrashanth Sreenivasa mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL);
13675cabbc6bSPrashanth Sreenivasa cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL);
13685cabbc6bSPrashanth Sreenivasa vca.vca_outstanding_bytes = 0;
13695cabbc6bSPrashanth Sreenivasa
13705cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
13715cabbc6bSPrashanth Sreenivasa
13725cabbc6bSPrashanth Sreenivasa /*
13735cabbc6bSPrashanth Sreenivasa * Start from vim_max_offset so we pick up where we left off
13745cabbc6bSPrashanth Sreenivasa * if we are restarting the removal after opening the pool.
13755cabbc6bSPrashanth Sreenivasa */
13765cabbc6bSPrashanth Sreenivasa uint64_t msi;
13775cabbc6bSPrashanth Sreenivasa for (msi = start_offset >> vd->vdev_ms_shift;
13785cabbc6bSPrashanth Sreenivasa msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) {
13795cabbc6bSPrashanth Sreenivasa metaslab_t *msp = vd->vdev_ms[msi];
13805cabbc6bSPrashanth Sreenivasa ASSERT3U(msi, <=, vd->vdev_ms_count);
13815cabbc6bSPrashanth Sreenivasa
13825cabbc6bSPrashanth Sreenivasa ASSERT0(range_tree_space(svr->svr_allocd_segs));
13835cabbc6bSPrashanth Sreenivasa
13845cabbc6bSPrashanth Sreenivasa mutex_enter(&msp->ms_sync_lock);
13855cabbc6bSPrashanth Sreenivasa mutex_enter(&msp->ms_lock);
13865cabbc6bSPrashanth Sreenivasa
13875cabbc6bSPrashanth Sreenivasa /*
13885cabbc6bSPrashanth Sreenivasa * Assert nothing in flight -- ms_*tree is empty.
13895cabbc6bSPrashanth Sreenivasa */
13905cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_SIZE; i++) {
139186714001SSerapheim Dimitropoulos ASSERT0(range_tree_space(msp->ms_allocating[i]));
13925cabbc6bSPrashanth Sreenivasa }
13935cabbc6bSPrashanth Sreenivasa
13945cabbc6bSPrashanth Sreenivasa /*
13955cabbc6bSPrashanth Sreenivasa * If the metaslab has ever been allocated from (ms_sm!=NULL),
13965cabbc6bSPrashanth Sreenivasa * read the allocated segments from the space map object
13975cabbc6bSPrashanth Sreenivasa * into svr_allocd_segs. Since we do this while holding
13985cabbc6bSPrashanth Sreenivasa * svr_lock and ms_sync_lock, concurrent frees (which
13995cabbc6bSPrashanth Sreenivasa * would have modified the space map) will wait for us
14005cabbc6bSPrashanth Sreenivasa * to finish loading the spacemap, and then take the
14015cabbc6bSPrashanth Sreenivasa * appropriate action (see free_from_removing_vdev()).
14025cabbc6bSPrashanth Sreenivasa */
14035cabbc6bSPrashanth Sreenivasa if (msp->ms_sm != NULL) {
1404555d674dSSerapheim Dimitropoulos VERIFY0(space_map_load(msp->ms_sm,
1405555d674dSSerapheim Dimitropoulos svr->svr_allocd_segs, SM_ALLOC));
14065cabbc6bSPrashanth Sreenivasa
1407814dcd43SSerapheim Dimitropoulos range_tree_walk(msp->ms_unflushed_allocs,
1408814dcd43SSerapheim Dimitropoulos range_tree_add, svr->svr_allocd_segs);
1409814dcd43SSerapheim Dimitropoulos range_tree_walk(msp->ms_unflushed_frees,
1410814dcd43SSerapheim Dimitropoulos range_tree_remove, svr->svr_allocd_segs);
141186714001SSerapheim Dimitropoulos range_tree_walk(msp->ms_freeing,
14125cabbc6bSPrashanth Sreenivasa range_tree_remove, svr->svr_allocd_segs);
14135cabbc6bSPrashanth Sreenivasa
14145cabbc6bSPrashanth Sreenivasa /*
14155cabbc6bSPrashanth Sreenivasa * When we are resuming from a paused removal (i.e.
14165cabbc6bSPrashanth Sreenivasa * when importing a pool with a removal in progress),
14175cabbc6bSPrashanth Sreenivasa * discard any state that we have already processed.
14185cabbc6bSPrashanth Sreenivasa */
14195cabbc6bSPrashanth Sreenivasa range_tree_clear(svr->svr_allocd_segs, 0, start_offset);
14205cabbc6bSPrashanth Sreenivasa }
14215cabbc6bSPrashanth Sreenivasa mutex_exit(&msp->ms_lock);
14225cabbc6bSPrashanth Sreenivasa mutex_exit(&msp->ms_sync_lock);
14235cabbc6bSPrashanth Sreenivasa
14245cabbc6bSPrashanth Sreenivasa vca.vca_msp = msp;
14255cabbc6bSPrashanth Sreenivasa zfs_dbgmsg("copying %llu segments for metaslab %llu",
1426*4d7988d6SPaul Dagnelie zfs_btree_numnodes(&svr->svr_allocd_segs->rt_root),
14275cabbc6bSPrashanth Sreenivasa msp->ms_id);
14285cabbc6bSPrashanth Sreenivasa
14295cabbc6bSPrashanth Sreenivasa while (!svr->svr_thread_exit &&
143086714001SSerapheim Dimitropoulos !range_tree_is_empty(svr->svr_allocd_segs)) {
14315cabbc6bSPrashanth Sreenivasa
14325cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
14335cabbc6bSPrashanth Sreenivasa
14343a4b1be9SMatthew Ahrens /*
14353a4b1be9SMatthew Ahrens * We need to periodically drop the config lock so that
14363a4b1be9SMatthew Ahrens * writers can get in. Additionally, we can't wait
14373a4b1be9SMatthew Ahrens * for a txg to sync while holding a config lock
14383a4b1be9SMatthew Ahrens * (since a waiting writer could cause a 3-way deadlock
14393a4b1be9SMatthew Ahrens * with the sync thread, which also gets a config
14403a4b1be9SMatthew Ahrens * lock for reader). So we can't hold the config lock
14413a4b1be9SMatthew Ahrens * while calling dmu_tx_assign().
14423a4b1be9SMatthew Ahrens */
14433a4b1be9SMatthew Ahrens spa_config_exit(spa, SCL_CONFIG, FTAG);
14443a4b1be9SMatthew Ahrens
144586714001SSerapheim Dimitropoulos /*
144686714001SSerapheim Dimitropoulos * This delay will pause the removal around the point
1447e4c795beSTom Caputi * specified by zfs_removal_suspend_progress. We do this
144886714001SSerapheim Dimitropoulos * solely from the test suite or during debugging.
144986714001SSerapheim Dimitropoulos */
145086714001SSerapheim Dimitropoulos uint64_t bytes_copied =
145186714001SSerapheim Dimitropoulos spa->spa_removing_phys.sr_copied;
145286714001SSerapheim Dimitropoulos for (int i = 0; i < TXG_SIZE; i++)
145386714001SSerapheim Dimitropoulos bytes_copied += svr->svr_bytes_done[i];
1454e4c795beSTom Caputi while (zfs_removal_suspend_progress &&
145586714001SSerapheim Dimitropoulos !svr->svr_thread_exit)
145686714001SSerapheim Dimitropoulos delay(hz);
145786714001SSerapheim Dimitropoulos
14585cabbc6bSPrashanth Sreenivasa mutex_enter(&vca.vca_lock);
14595cabbc6bSPrashanth Sreenivasa while (vca.vca_outstanding_bytes >
14605cabbc6bSPrashanth Sreenivasa zfs_remove_max_copy_bytes) {
14615cabbc6bSPrashanth Sreenivasa cv_wait(&vca.vca_cv, &vca.vca_lock);
14625cabbc6bSPrashanth Sreenivasa }
14635cabbc6bSPrashanth Sreenivasa mutex_exit(&vca.vca_lock);
14645cabbc6bSPrashanth Sreenivasa
14655cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx =
14665cabbc6bSPrashanth Sreenivasa dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
14675cabbc6bSPrashanth Sreenivasa
14685cabbc6bSPrashanth Sreenivasa VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
14695cabbc6bSPrashanth Sreenivasa uint64_t txg = dmu_tx_get_txg(tx);
14705cabbc6bSPrashanth Sreenivasa
14713a4b1be9SMatthew Ahrens /*
14723a4b1be9SMatthew Ahrens * Reacquire the vdev_config lock. The vdev_t
14733a4b1be9SMatthew Ahrens * that we're removing may have changed, e.g. due
14743a4b1be9SMatthew Ahrens * to a vdev_attach or vdev_detach.
14753a4b1be9SMatthew Ahrens */
14763a4b1be9SMatthew Ahrens spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
14773a4b1be9SMatthew Ahrens vd = vdev_lookup_top(spa, svr->svr_vdev_id);
14783a4b1be9SMatthew Ahrens
14795cabbc6bSPrashanth Sreenivasa if (txg != last_txg)
14805cabbc6bSPrashanth Sreenivasa max_alloc = zfs_remove_max_segment;
14815cabbc6bSPrashanth Sreenivasa last_txg = txg;
14825cabbc6bSPrashanth Sreenivasa
14833a4b1be9SMatthew Ahrens spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx);
14845cabbc6bSPrashanth Sreenivasa
14855cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx);
14865cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
14875cabbc6bSPrashanth Sreenivasa }
14885cabbc6bSPrashanth Sreenivasa }
14895cabbc6bSPrashanth Sreenivasa
14905cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
14913a4b1be9SMatthew Ahrens
14923a4b1be9SMatthew Ahrens spa_config_exit(spa, SCL_CONFIG, FTAG);
14933a4b1be9SMatthew Ahrens
14945cabbc6bSPrashanth Sreenivasa /*
14955cabbc6bSPrashanth Sreenivasa * Wait for all copies to finish before cleaning up the vca.
14965cabbc6bSPrashanth Sreenivasa */
14975cabbc6bSPrashanth Sreenivasa txg_wait_synced(spa->spa_dsl_pool, 0);
14985cabbc6bSPrashanth Sreenivasa ASSERT0(vca.vca_outstanding_bytes);
14995cabbc6bSPrashanth Sreenivasa
15005cabbc6bSPrashanth Sreenivasa mutex_destroy(&vca.vca_lock);
15015cabbc6bSPrashanth Sreenivasa cv_destroy(&vca.vca_cv);
15025cabbc6bSPrashanth Sreenivasa
15035cabbc6bSPrashanth Sreenivasa if (svr->svr_thread_exit) {
15045cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
15055cabbc6bSPrashanth Sreenivasa range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
15065cabbc6bSPrashanth Sreenivasa svr->svr_thread = NULL;
15075cabbc6bSPrashanth Sreenivasa cv_broadcast(&svr->svr_cv);
15085cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
15095cabbc6bSPrashanth Sreenivasa } else {
15105cabbc6bSPrashanth Sreenivasa ASSERT0(range_tree_space(svr->svr_allocd_segs));
15113a4b1be9SMatthew Ahrens vdev_remove_complete(spa);
15125cabbc6bSPrashanth Sreenivasa }
15135cabbc6bSPrashanth Sreenivasa }
15145cabbc6bSPrashanth Sreenivasa
15155cabbc6bSPrashanth Sreenivasa void
spa_vdev_remove_suspend(spa_t * spa)15165cabbc6bSPrashanth Sreenivasa spa_vdev_remove_suspend(spa_t *spa)
15175cabbc6bSPrashanth Sreenivasa {
15185cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
15195cabbc6bSPrashanth Sreenivasa
15205cabbc6bSPrashanth Sreenivasa if (svr == NULL)
15215cabbc6bSPrashanth Sreenivasa return;
15225cabbc6bSPrashanth Sreenivasa
15235cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
15245cabbc6bSPrashanth Sreenivasa svr->svr_thread_exit = B_TRUE;
15255cabbc6bSPrashanth Sreenivasa while (svr->svr_thread != NULL)
15265cabbc6bSPrashanth Sreenivasa cv_wait(&svr->svr_cv, &svr->svr_lock);
15275cabbc6bSPrashanth Sreenivasa svr->svr_thread_exit = B_FALSE;
15285cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
15295cabbc6bSPrashanth Sreenivasa }
15305cabbc6bSPrashanth Sreenivasa
15315cabbc6bSPrashanth Sreenivasa /* ARGSUSED */
15325cabbc6bSPrashanth Sreenivasa static int
spa_vdev_remove_cancel_check(void * arg,dmu_tx_t * tx)15335cabbc6bSPrashanth Sreenivasa spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
15345cabbc6bSPrashanth Sreenivasa {
15355cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_tx_pool(tx)->dp_spa;
15365cabbc6bSPrashanth Sreenivasa
15375cabbc6bSPrashanth Sreenivasa if (spa->spa_vdev_removal == NULL)
15385cabbc6bSPrashanth Sreenivasa return (ENOTACTIVE);
15395cabbc6bSPrashanth Sreenivasa return (0);
15405cabbc6bSPrashanth Sreenivasa }
15415cabbc6bSPrashanth Sreenivasa
15425cabbc6bSPrashanth Sreenivasa /*
15435cabbc6bSPrashanth Sreenivasa * Cancel a removal by freeing all entries from the partial mapping
15445cabbc6bSPrashanth Sreenivasa * and marking the vdev as no longer being removing.
15455cabbc6bSPrashanth Sreenivasa */
15465cabbc6bSPrashanth Sreenivasa /* ARGSUSED */
15475cabbc6bSPrashanth Sreenivasa static void
spa_vdev_remove_cancel_sync(void * arg,dmu_tx_t * tx)15485cabbc6bSPrashanth Sreenivasa spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
15495cabbc6bSPrashanth Sreenivasa {
15505cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_tx_pool(tx)->dp_spa;
15515cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
15523a4b1be9SMatthew Ahrens vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
15535cabbc6bSPrashanth Sreenivasa vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
15545cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
15555cabbc6bSPrashanth Sreenivasa objset_t *mos = spa->spa_meta_objset;
15565cabbc6bSPrashanth Sreenivasa
15575cabbc6bSPrashanth Sreenivasa ASSERT3P(svr->svr_thread, ==, NULL);
15585cabbc6bSPrashanth Sreenivasa
15595cabbc6bSPrashanth Sreenivasa spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
15605cabbc6bSPrashanth Sreenivasa if (vdev_obsolete_counts_are_precise(vd)) {
15615cabbc6bSPrashanth Sreenivasa spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
15625cabbc6bSPrashanth Sreenivasa VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
15635cabbc6bSPrashanth Sreenivasa VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
15645cabbc6bSPrashanth Sreenivasa }
15655cabbc6bSPrashanth Sreenivasa
15665cabbc6bSPrashanth Sreenivasa if (vdev_obsolete_sm_object(vd) != 0) {
15675cabbc6bSPrashanth Sreenivasa ASSERT(vd->vdev_obsolete_sm != NULL);
15685cabbc6bSPrashanth Sreenivasa ASSERT3U(vdev_obsolete_sm_object(vd), ==,
15695cabbc6bSPrashanth Sreenivasa space_map_object(vd->vdev_obsolete_sm));
15705cabbc6bSPrashanth Sreenivasa
15715cabbc6bSPrashanth Sreenivasa space_map_free(vd->vdev_obsolete_sm, tx);
15725cabbc6bSPrashanth Sreenivasa VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
15735cabbc6bSPrashanth Sreenivasa VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
15745cabbc6bSPrashanth Sreenivasa space_map_close(vd->vdev_obsolete_sm);
15755cabbc6bSPrashanth Sreenivasa vd->vdev_obsolete_sm = NULL;
15765cabbc6bSPrashanth Sreenivasa spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
15775cabbc6bSPrashanth Sreenivasa }
15785cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_SIZE; i++) {
15795cabbc6bSPrashanth Sreenivasa ASSERT(list_is_empty(&svr->svr_new_segments[i]));
15805cabbc6bSPrashanth Sreenivasa ASSERT3U(svr->svr_max_offset_to_sync[i], <=,
15815cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_max_offset(vim));
15825cabbc6bSPrashanth Sreenivasa }
15835cabbc6bSPrashanth Sreenivasa
15845cabbc6bSPrashanth Sreenivasa for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
15855cabbc6bSPrashanth Sreenivasa metaslab_t *msp = vd->vdev_ms[msi];
15865cabbc6bSPrashanth Sreenivasa
15875cabbc6bSPrashanth Sreenivasa if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
15885cabbc6bSPrashanth Sreenivasa break;
15895cabbc6bSPrashanth Sreenivasa
15905cabbc6bSPrashanth Sreenivasa ASSERT0(range_tree_space(svr->svr_allocd_segs));
15915cabbc6bSPrashanth Sreenivasa
15925cabbc6bSPrashanth Sreenivasa mutex_enter(&msp->ms_lock);
15935cabbc6bSPrashanth Sreenivasa
15945cabbc6bSPrashanth Sreenivasa /*
15955cabbc6bSPrashanth Sreenivasa * Assert nothing in flight -- ms_*tree is empty.
15965cabbc6bSPrashanth Sreenivasa */
15975cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_SIZE; i++)
159886714001SSerapheim Dimitropoulos ASSERT0(range_tree_space(msp->ms_allocating[i]));
15995cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_DEFER_SIZE; i++)
160086714001SSerapheim Dimitropoulos ASSERT0(range_tree_space(msp->ms_defer[i]));
160186714001SSerapheim Dimitropoulos ASSERT0(range_tree_space(msp->ms_freed));
16025cabbc6bSPrashanth Sreenivasa
16035cabbc6bSPrashanth Sreenivasa if (msp->ms_sm != NULL) {
16045cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
16055cabbc6bSPrashanth Sreenivasa VERIFY0(space_map_load(msp->ms_sm,
16065cabbc6bSPrashanth Sreenivasa svr->svr_allocd_segs, SM_ALLOC));
1607814dcd43SSerapheim Dimitropoulos
1608814dcd43SSerapheim Dimitropoulos range_tree_walk(msp->ms_unflushed_allocs,
1609814dcd43SSerapheim Dimitropoulos range_tree_add, svr->svr_allocd_segs);
1610814dcd43SSerapheim Dimitropoulos range_tree_walk(msp->ms_unflushed_frees,
1611814dcd43SSerapheim Dimitropoulos range_tree_remove, svr->svr_allocd_segs);
161286714001SSerapheim Dimitropoulos range_tree_walk(msp->ms_freeing,
16135cabbc6bSPrashanth Sreenivasa range_tree_remove, svr->svr_allocd_segs);
16145cabbc6bSPrashanth Sreenivasa
16155cabbc6bSPrashanth Sreenivasa /*
16165cabbc6bSPrashanth Sreenivasa * Clear everything past what has been synced,
16175cabbc6bSPrashanth Sreenivasa * because we have not allocated mappings for it yet.
16185cabbc6bSPrashanth Sreenivasa */
16195cabbc6bSPrashanth Sreenivasa uint64_t syncd = vdev_indirect_mapping_max_offset(vim);
16203a4b1be9SMatthew Ahrens uint64_t sm_end = msp->ms_sm->sm_start +
16213a4b1be9SMatthew Ahrens msp->ms_sm->sm_size;
16223a4b1be9SMatthew Ahrens if (sm_end > syncd)
16233a4b1be9SMatthew Ahrens range_tree_clear(svr->svr_allocd_segs,
16243a4b1be9SMatthew Ahrens syncd, sm_end - syncd);
16255cabbc6bSPrashanth Sreenivasa
16265cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
16275cabbc6bSPrashanth Sreenivasa }
16285cabbc6bSPrashanth Sreenivasa mutex_exit(&msp->ms_lock);
16295cabbc6bSPrashanth Sreenivasa
16305cabbc6bSPrashanth Sreenivasa mutex_enter(&svr->svr_lock);
16315cabbc6bSPrashanth Sreenivasa range_tree_vacate(svr->svr_allocd_segs,
16325cabbc6bSPrashanth Sreenivasa free_mapped_segment_cb, vd);
16335cabbc6bSPrashanth Sreenivasa mutex_exit(&svr->svr_lock);
16345cabbc6bSPrashanth Sreenivasa }
16355cabbc6bSPrashanth Sreenivasa
16365cabbc6bSPrashanth Sreenivasa /*
16375cabbc6bSPrashanth Sreenivasa * Note: this must happen after we invoke free_mapped_segment_cb,
16385cabbc6bSPrashanth Sreenivasa * because it adds to the obsolete_segments.
16395cabbc6bSPrashanth Sreenivasa */
16405cabbc6bSPrashanth Sreenivasa range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
16415cabbc6bSPrashanth Sreenivasa
16425cabbc6bSPrashanth Sreenivasa ASSERT3U(vic->vic_mapping_object, ==,
16435cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_object(vd->vdev_indirect_mapping));
16445cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
16455cabbc6bSPrashanth Sreenivasa vd->vdev_indirect_mapping = NULL;
16465cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
16475cabbc6bSPrashanth Sreenivasa vic->vic_mapping_object = 0;
16485cabbc6bSPrashanth Sreenivasa
16495cabbc6bSPrashanth Sreenivasa ASSERT3U(vic->vic_births_object, ==,
16505cabbc6bSPrashanth Sreenivasa vdev_indirect_births_object(vd->vdev_indirect_births));
16515cabbc6bSPrashanth Sreenivasa vdev_indirect_births_close(vd->vdev_indirect_births);
16525cabbc6bSPrashanth Sreenivasa vd->vdev_indirect_births = NULL;
16535cabbc6bSPrashanth Sreenivasa vdev_indirect_births_free(mos, vic->vic_births_object, tx);
16545cabbc6bSPrashanth Sreenivasa vic->vic_births_object = 0;
16555cabbc6bSPrashanth Sreenivasa
16565cabbc6bSPrashanth Sreenivasa /*
16575cabbc6bSPrashanth Sreenivasa * We may have processed some frees from the removing vdev in this
16585cabbc6bSPrashanth Sreenivasa * txg, thus increasing svr_bytes_done; discard that here to
16595cabbc6bSPrashanth Sreenivasa * satisfy the assertions in spa_vdev_removal_destroy().
16605cabbc6bSPrashanth Sreenivasa * Note that future txg's can not have any bytes_done, because
16615cabbc6bSPrashanth Sreenivasa * future TXG's are only modified from open context, and we have
16625cabbc6bSPrashanth Sreenivasa * already shut down the copying thread.
16635cabbc6bSPrashanth Sreenivasa */
16645cabbc6bSPrashanth Sreenivasa svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
16655cabbc6bSPrashanth Sreenivasa spa_finish_removal(spa, DSS_CANCELED, tx);
16665cabbc6bSPrashanth Sreenivasa
16675cabbc6bSPrashanth Sreenivasa vd->vdev_removing = B_FALSE;
16685cabbc6bSPrashanth Sreenivasa vdev_config_dirty(vd);
16695cabbc6bSPrashanth Sreenivasa
16705cabbc6bSPrashanth Sreenivasa zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
16715cabbc6bSPrashanth Sreenivasa vd->vdev_id, dmu_tx_get_txg(tx));
16725cabbc6bSPrashanth Sreenivasa spa_history_log_internal(spa, "vdev remove canceled", tx,
16735cabbc6bSPrashanth Sreenivasa "%s vdev %llu %s", spa_name(spa),
16745cabbc6bSPrashanth Sreenivasa vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-");
16755cabbc6bSPrashanth Sreenivasa }
16765cabbc6bSPrashanth Sreenivasa
16775cabbc6bSPrashanth Sreenivasa int
spa_vdev_remove_cancel(spa_t * spa)16785cabbc6bSPrashanth Sreenivasa spa_vdev_remove_cancel(spa_t *spa)
16795cabbc6bSPrashanth Sreenivasa {
16805cabbc6bSPrashanth Sreenivasa spa_vdev_remove_suspend(spa);
16815cabbc6bSPrashanth Sreenivasa
16825cabbc6bSPrashanth Sreenivasa if (spa->spa_vdev_removal == NULL)
16835cabbc6bSPrashanth Sreenivasa return (ENOTACTIVE);
16845cabbc6bSPrashanth Sreenivasa
16853a4b1be9SMatthew Ahrens uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id;
16865cabbc6bSPrashanth Sreenivasa
16875cabbc6bSPrashanth Sreenivasa int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
168886714001SSerapheim Dimitropoulos spa_vdev_remove_cancel_sync, NULL, 0,
168986714001SSerapheim Dimitropoulos ZFS_SPACE_CHECK_EXTRA_RESERVED);
16905cabbc6bSPrashanth Sreenivasa
16915cabbc6bSPrashanth Sreenivasa if (error == 0) {
16925cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
16935cabbc6bSPrashanth Sreenivasa vdev_t *vd = vdev_lookup_top(spa, vdid);
16945cabbc6bSPrashanth Sreenivasa metaslab_group_activate(vd->vdev_mg);
16955cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG);
16965cabbc6bSPrashanth Sreenivasa }
16975cabbc6bSPrashanth Sreenivasa
16985cabbc6bSPrashanth Sreenivasa return (error);
16995cabbc6bSPrashanth Sreenivasa }
17005cabbc6bSPrashanth Sreenivasa
17015cabbc6bSPrashanth Sreenivasa void
svr_sync(spa_t * spa,dmu_tx_t * tx)17025cabbc6bSPrashanth Sreenivasa svr_sync(spa_t *spa, dmu_tx_t *tx)
17035cabbc6bSPrashanth Sreenivasa {
17045cabbc6bSPrashanth Sreenivasa spa_vdev_removal_t *svr = spa->spa_vdev_removal;
17055cabbc6bSPrashanth Sreenivasa int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
17065cabbc6bSPrashanth Sreenivasa
17079740f25fSSerapheim Dimitropoulos if (svr == NULL)
17089740f25fSSerapheim Dimitropoulos return;
17099740f25fSSerapheim Dimitropoulos
17105cabbc6bSPrashanth Sreenivasa /*
17115cabbc6bSPrashanth Sreenivasa * This check is necessary so that we do not dirty the
17125cabbc6bSPrashanth Sreenivasa * DIRECTORY_OBJECT via spa_sync_removing_state() when there
17135cabbc6bSPrashanth Sreenivasa * is nothing to do. Dirtying it every time would prevent us
17145cabbc6bSPrashanth Sreenivasa * from syncing-to-convergence.
17155cabbc6bSPrashanth Sreenivasa */
17165cabbc6bSPrashanth Sreenivasa if (svr->svr_bytes_done[txgoff] == 0)
17175cabbc6bSPrashanth Sreenivasa return;
17185cabbc6bSPrashanth Sreenivasa
17195cabbc6bSPrashanth Sreenivasa /*
17205cabbc6bSPrashanth Sreenivasa * Update progress accounting.
17215cabbc6bSPrashanth Sreenivasa */
17225cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff];
17235cabbc6bSPrashanth Sreenivasa svr->svr_bytes_done[txgoff] = 0;
17245cabbc6bSPrashanth Sreenivasa
17255cabbc6bSPrashanth Sreenivasa spa_sync_removing_state(spa, tx);
17265cabbc6bSPrashanth Sreenivasa }
17275cabbc6bSPrashanth Sreenivasa
17285cabbc6bSPrashanth Sreenivasa static void
vdev_remove_make_hole_and_free(vdev_t * vd)17295cabbc6bSPrashanth Sreenivasa vdev_remove_make_hole_and_free(vdev_t *vd)
17305cabbc6bSPrashanth Sreenivasa {
17315cabbc6bSPrashanth Sreenivasa uint64_t id = vd->vdev_id;
17325cabbc6bSPrashanth Sreenivasa spa_t *spa = vd->vdev_spa;
17335cabbc6bSPrashanth Sreenivasa vdev_t *rvd = spa->spa_root_vdev;
17345cabbc6bSPrashanth Sreenivasa
17355cabbc6bSPrashanth Sreenivasa ASSERT(MUTEX_HELD(&spa_namespace_lock));
17365cabbc6bSPrashanth Sreenivasa ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
17375cabbc6bSPrashanth Sreenivasa
17385cabbc6bSPrashanth Sreenivasa vdev_free(vd);
17395cabbc6bSPrashanth Sreenivasa
1740814dcd43SSerapheim Dimitropoulos vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
1741814dcd43SSerapheim Dimitropoulos vdev_add_child(rvd, vd);
17425cabbc6bSPrashanth Sreenivasa vdev_config_dirty(rvd);
17435cabbc6bSPrashanth Sreenivasa
17445cabbc6bSPrashanth Sreenivasa /*
17455cabbc6bSPrashanth Sreenivasa * Reassess the health of our root vdev.
17465cabbc6bSPrashanth Sreenivasa */
17475cabbc6bSPrashanth Sreenivasa vdev_reopen(rvd);
17485cabbc6bSPrashanth Sreenivasa }
17495cabbc6bSPrashanth Sreenivasa
17505cabbc6bSPrashanth Sreenivasa /*
17515cabbc6bSPrashanth Sreenivasa * Remove a log device. The config lock is held for the specified TXG.
17525cabbc6bSPrashanth Sreenivasa */
17535cabbc6bSPrashanth Sreenivasa static int
spa_vdev_remove_log(vdev_t * vd,uint64_t * txg)17545cabbc6bSPrashanth Sreenivasa spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
17555cabbc6bSPrashanth Sreenivasa {
17565cabbc6bSPrashanth Sreenivasa metaslab_group_t *mg = vd->vdev_mg;
17575cabbc6bSPrashanth Sreenivasa spa_t *spa = vd->vdev_spa;
17585cabbc6bSPrashanth Sreenivasa int error = 0;
17595cabbc6bSPrashanth Sreenivasa
17605cabbc6bSPrashanth Sreenivasa ASSERT(vd->vdev_islog);
17615cabbc6bSPrashanth Sreenivasa ASSERT(vd == vd->vdev_top);
1762555d674dSSerapheim Dimitropoulos ASSERT(MUTEX_HELD(&spa_namespace_lock));
17635cabbc6bSPrashanth Sreenivasa
17645cabbc6bSPrashanth Sreenivasa /*
17655cabbc6bSPrashanth Sreenivasa * Stop allocating from this vdev.
17665cabbc6bSPrashanth Sreenivasa */
17675cabbc6bSPrashanth Sreenivasa metaslab_group_passivate(mg);
17685cabbc6bSPrashanth Sreenivasa
17695cabbc6bSPrashanth Sreenivasa /*
17705cabbc6bSPrashanth Sreenivasa * Wait for the youngest allocations and frees to sync,
17715cabbc6bSPrashanth Sreenivasa * and then wait for the deferral of those frees to finish.
17725cabbc6bSPrashanth Sreenivasa */
17735cabbc6bSPrashanth Sreenivasa spa_vdev_config_exit(spa, NULL,
17745cabbc6bSPrashanth Sreenivasa *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
17755cabbc6bSPrashanth Sreenivasa
17765cabbc6bSPrashanth Sreenivasa /*
1777555d674dSSerapheim Dimitropoulos * Evacuate the device. We don't hold the config lock as
1778555d674dSSerapheim Dimitropoulos * writer since we need to do I/O but we do keep the
17795cabbc6bSPrashanth Sreenivasa * spa_namespace_lock held. Once this completes the device
17805cabbc6bSPrashanth Sreenivasa * should no longer have any blocks allocated on it.
17815cabbc6bSPrashanth Sreenivasa */
1782555d674dSSerapheim Dimitropoulos ASSERT(MUTEX_HELD(&spa_namespace_lock));
1783555d674dSSerapheim Dimitropoulos if (vd->vdev_stat.vs_alloc != 0)
1784555d674dSSerapheim Dimitropoulos error = spa_reset_logs(spa);
17855cabbc6bSPrashanth Sreenivasa
17865cabbc6bSPrashanth Sreenivasa *txg = spa_vdev_config_enter(spa);
17875cabbc6bSPrashanth Sreenivasa
17885cabbc6bSPrashanth Sreenivasa if (error != 0) {
17895cabbc6bSPrashanth Sreenivasa metaslab_group_activate(mg);
17905cabbc6bSPrashanth Sreenivasa return (error);
17915cabbc6bSPrashanth Sreenivasa }
17925cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_stat.vs_alloc);
17935cabbc6bSPrashanth Sreenivasa
17945cabbc6bSPrashanth Sreenivasa /*
17955cabbc6bSPrashanth Sreenivasa * The evacuation succeeded. Remove any remaining MOS metadata
17965cabbc6bSPrashanth Sreenivasa * associated with this vdev, and wait for these changes to sync.
17975cabbc6bSPrashanth Sreenivasa */
17985cabbc6bSPrashanth Sreenivasa vd->vdev_removing = B_TRUE;
17995cabbc6bSPrashanth Sreenivasa
18005cabbc6bSPrashanth Sreenivasa vdev_dirty_leaves(vd, VDD_DTL, *txg);
18015cabbc6bSPrashanth Sreenivasa vdev_config_dirty(vd);
18025cabbc6bSPrashanth Sreenivasa
1803814dcd43SSerapheim Dimitropoulos /*
1804814dcd43SSerapheim Dimitropoulos * When the log space map feature is enabled we look at
1805814dcd43SSerapheim Dimitropoulos * the vdev's top_zap to find the on-disk flush data of
1806814dcd43SSerapheim Dimitropoulos * the metaslab we just flushed. Thus, while removing a
1807814dcd43SSerapheim Dimitropoulos * log vdev we make sure to call vdev_metaslab_fini()
1808814dcd43SSerapheim Dimitropoulos * first, which removes all metaslabs of this vdev from
1809814dcd43SSerapheim Dimitropoulos * spa_metaslabs_by_flushed before vdev_remove_empty()
1810814dcd43SSerapheim Dimitropoulos * destroys the top_zap of this log vdev.
1811814dcd43SSerapheim Dimitropoulos *
1812814dcd43SSerapheim Dimitropoulos * This avoids the scenario where we flush a metaslab
1813814dcd43SSerapheim Dimitropoulos * from the log vdev being removed that doesn't have a
1814814dcd43SSerapheim Dimitropoulos * top_zap and end up failing to lookup its on-disk flush
1815814dcd43SSerapheim Dimitropoulos * data.
1816814dcd43SSerapheim Dimitropoulos *
1817814dcd43SSerapheim Dimitropoulos * We don't call metaslab_group_destroy() right away
1818814dcd43SSerapheim Dimitropoulos * though (it will be called in vdev_free() later) as
1819814dcd43SSerapheim Dimitropoulos * during metaslab_sync() of metaslabs from other vdevs
1820814dcd43SSerapheim Dimitropoulos * we may touch the metaslab group of this vdev through
1821814dcd43SSerapheim Dimitropoulos * metaslab_class_histogram_verify()
1822814dcd43SSerapheim Dimitropoulos */
1823555d674dSSerapheim Dimitropoulos vdev_metaslab_fini(vd);
1824814dcd43SSerapheim Dimitropoulos spa_log_sm_set_blocklimit(spa);
1825555d674dSSerapheim Dimitropoulos
18265cabbc6bSPrashanth Sreenivasa spa_history_log_internal(spa, "vdev remove", NULL,
18275cabbc6bSPrashanth Sreenivasa "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id,
18285cabbc6bSPrashanth Sreenivasa (vd->vdev_path != NULL) ? vd->vdev_path : "-");
18295cabbc6bSPrashanth Sreenivasa
18305cabbc6bSPrashanth Sreenivasa /* Make sure these changes are sync'ed */
18315cabbc6bSPrashanth Sreenivasa spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
18325cabbc6bSPrashanth Sreenivasa
1833084fd14fSBrian Behlendorf /* Stop initializing and TRIM */
1834084fd14fSBrian Behlendorf vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED);
1835084fd14fSBrian Behlendorf vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED);
1836084fd14fSBrian Behlendorf vdev_autotrim_stop_wait(vd);
1837094e47e9SGeorge Wilson
18385cabbc6bSPrashanth Sreenivasa *txg = spa_vdev_config_enter(spa);
18395cabbc6bSPrashanth Sreenivasa
18405cabbc6bSPrashanth Sreenivasa sysevent_t *ev = spa_event_create(spa, vd, NULL,
18415cabbc6bSPrashanth Sreenivasa ESC_ZFS_VDEV_REMOVE_DEV);
18425cabbc6bSPrashanth Sreenivasa ASSERT(MUTEX_HELD(&spa_namespace_lock));
18435cabbc6bSPrashanth Sreenivasa ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
18445cabbc6bSPrashanth Sreenivasa
18455cabbc6bSPrashanth Sreenivasa /* The top ZAP should have been destroyed by vdev_remove_empty. */
18465cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_top_zap);
18475cabbc6bSPrashanth Sreenivasa /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */
18485cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_leaf_zap);
18495cabbc6bSPrashanth Sreenivasa
18505cabbc6bSPrashanth Sreenivasa (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
18515cabbc6bSPrashanth Sreenivasa
18525cabbc6bSPrashanth Sreenivasa if (list_link_active(&vd->vdev_state_dirty_node))
18535cabbc6bSPrashanth Sreenivasa vdev_state_clean(vd);
18545cabbc6bSPrashanth Sreenivasa if (list_link_active(&vd->vdev_config_dirty_node))
18555cabbc6bSPrashanth Sreenivasa vdev_config_clean(vd);
18565cabbc6bSPrashanth Sreenivasa
1857555d674dSSerapheim Dimitropoulos ASSERT0(vd->vdev_stat.vs_alloc);
1858555d674dSSerapheim Dimitropoulos
18595cabbc6bSPrashanth Sreenivasa /*
18605cabbc6bSPrashanth Sreenivasa * Clean up the vdev namespace.
18615cabbc6bSPrashanth Sreenivasa */
18625cabbc6bSPrashanth Sreenivasa vdev_remove_make_hole_and_free(vd);
18635cabbc6bSPrashanth Sreenivasa
18645cabbc6bSPrashanth Sreenivasa if (ev != NULL)
18655cabbc6bSPrashanth Sreenivasa spa_event_post(ev);
18665cabbc6bSPrashanth Sreenivasa
18675cabbc6bSPrashanth Sreenivasa return (0);
18685cabbc6bSPrashanth Sreenivasa }
18695cabbc6bSPrashanth Sreenivasa
18705cabbc6bSPrashanth Sreenivasa static int
spa_vdev_remove_top_check(vdev_t * vd)18715cabbc6bSPrashanth Sreenivasa spa_vdev_remove_top_check(vdev_t *vd)
18725cabbc6bSPrashanth Sreenivasa {
18735cabbc6bSPrashanth Sreenivasa spa_t *spa = vd->vdev_spa;
18745cabbc6bSPrashanth Sreenivasa
18755cabbc6bSPrashanth Sreenivasa if (vd != vd->vdev_top)
18765cabbc6bSPrashanth Sreenivasa return (SET_ERROR(ENOTSUP));
18775cabbc6bSPrashanth Sreenivasa
18785cabbc6bSPrashanth Sreenivasa if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL))
18795cabbc6bSPrashanth Sreenivasa return (SET_ERROR(ENOTSUP));
18805cabbc6bSPrashanth Sreenivasa
1881663207adSDon Brady /* available space in the pool's normal class */
1882663207adSDon Brady uint64_t available = dsl_dir_space_available(
1883663207adSDon Brady spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE);
1884663207adSDon Brady
1885663207adSDon Brady metaslab_class_t *mc = vd->vdev_mg->mg_class;
1886663207adSDon Brady
1887663207adSDon Brady /*
1888663207adSDon Brady * When removing a vdev from an allocation class that has
1889663207adSDon Brady * remaining vdevs, include available space from the class.
1890663207adSDon Brady */
1891663207adSDon Brady if (mc != spa_normal_class(spa) && mc->mc_groups > 1) {
1892663207adSDon Brady uint64_t class_avail = metaslab_class_get_space(mc) -
1893663207adSDon Brady metaslab_class_get_alloc(mc);
1894663207adSDon Brady
1895663207adSDon Brady /* add class space, adjusted for overhead */
1896663207adSDon Brady available += (class_avail * 94) / 100;
1897663207adSDon Brady }
1898663207adSDon Brady
18995cabbc6bSPrashanth Sreenivasa /*
19005cabbc6bSPrashanth Sreenivasa * There has to be enough free space to remove the
19015cabbc6bSPrashanth Sreenivasa * device and leave double the "slop" space (i.e. we
19025cabbc6bSPrashanth Sreenivasa * must leave at least 3% of the pool free, in addition to
19035cabbc6bSPrashanth Sreenivasa * the normal slop space).
19045cabbc6bSPrashanth Sreenivasa */
1905663207adSDon Brady if (available < vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) {
19065cabbc6bSPrashanth Sreenivasa return (SET_ERROR(ENOSPC));
19075cabbc6bSPrashanth Sreenivasa }
19085cabbc6bSPrashanth Sreenivasa
19095cabbc6bSPrashanth Sreenivasa /*
19105cabbc6bSPrashanth Sreenivasa * There can not be a removal in progress.
19115cabbc6bSPrashanth Sreenivasa */
19125cabbc6bSPrashanth Sreenivasa if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
19135cabbc6bSPrashanth Sreenivasa return (SET_ERROR(EBUSY));
19145cabbc6bSPrashanth Sreenivasa
19155cabbc6bSPrashanth Sreenivasa /*
19165cabbc6bSPrashanth Sreenivasa * The device must have all its data.
19175cabbc6bSPrashanth Sreenivasa */
19185cabbc6bSPrashanth Sreenivasa if (!vdev_dtl_empty(vd, DTL_MISSING) ||
19195cabbc6bSPrashanth Sreenivasa !vdev_dtl_empty(vd, DTL_OUTAGE))
19205cabbc6bSPrashanth Sreenivasa return (SET_ERROR(EBUSY));
19215cabbc6bSPrashanth Sreenivasa
19225cabbc6bSPrashanth Sreenivasa /*
19235cabbc6bSPrashanth Sreenivasa * The device must be healthy.
19245cabbc6bSPrashanth Sreenivasa */
19255cabbc6bSPrashanth Sreenivasa if (!vdev_readable(vd))
19265cabbc6bSPrashanth Sreenivasa return (SET_ERROR(EIO));
19275cabbc6bSPrashanth Sreenivasa
19285cabbc6bSPrashanth Sreenivasa /*
19295cabbc6bSPrashanth Sreenivasa * All vdevs in normal class must have the same ashift.
19305cabbc6bSPrashanth Sreenivasa */
19315cabbc6bSPrashanth Sreenivasa if (spa->spa_max_ashift != spa->spa_min_ashift) {
19325cabbc6bSPrashanth Sreenivasa return (SET_ERROR(EINVAL));
19335cabbc6bSPrashanth Sreenivasa }
19345cabbc6bSPrashanth Sreenivasa
19355cabbc6bSPrashanth Sreenivasa /*
19365cabbc6bSPrashanth Sreenivasa * All vdevs in normal class must have the same ashift
19375cabbc6bSPrashanth Sreenivasa * and not be raidz.
19385cabbc6bSPrashanth Sreenivasa */
19395cabbc6bSPrashanth Sreenivasa vdev_t *rvd = spa->spa_root_vdev;
19405cabbc6bSPrashanth Sreenivasa int num_indirect = 0;
19415cabbc6bSPrashanth Sreenivasa for (uint64_t id = 0; id < rvd->vdev_children; id++) {
19425cabbc6bSPrashanth Sreenivasa vdev_t *cvd = rvd->vdev_child[id];
19435cabbc6bSPrashanth Sreenivasa if (cvd->vdev_ashift != 0 && !cvd->vdev_islog)
19445cabbc6bSPrashanth Sreenivasa ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift);
19455cabbc6bSPrashanth Sreenivasa if (cvd->vdev_ops == &vdev_indirect_ops)
19465cabbc6bSPrashanth Sreenivasa num_indirect++;
19475cabbc6bSPrashanth Sreenivasa if (!vdev_is_concrete(cvd))
19485cabbc6bSPrashanth Sreenivasa continue;
19495cabbc6bSPrashanth Sreenivasa if (cvd->vdev_ops == &vdev_raidz_ops)
19505cabbc6bSPrashanth Sreenivasa return (SET_ERROR(EINVAL));
19515cabbc6bSPrashanth Sreenivasa /*
19525cabbc6bSPrashanth Sreenivasa * Need the mirror to be mirror of leaf vdevs only
19535cabbc6bSPrashanth Sreenivasa */
19545cabbc6bSPrashanth Sreenivasa if (cvd->vdev_ops == &vdev_mirror_ops) {
19555cabbc6bSPrashanth Sreenivasa for (uint64_t cid = 0;
19565cabbc6bSPrashanth Sreenivasa cid < cvd->vdev_children; cid++) {
19575cabbc6bSPrashanth Sreenivasa vdev_t *tmp = cvd->vdev_child[cid];
19585cabbc6bSPrashanth Sreenivasa if (!tmp->vdev_ops->vdev_op_leaf)
19595cabbc6bSPrashanth Sreenivasa return (SET_ERROR(EINVAL));
19605cabbc6bSPrashanth Sreenivasa }
19615cabbc6bSPrashanth Sreenivasa }
19625cabbc6bSPrashanth Sreenivasa }
19635cabbc6bSPrashanth Sreenivasa
19645cabbc6bSPrashanth Sreenivasa return (0);
19655cabbc6bSPrashanth Sreenivasa }
19665cabbc6bSPrashanth Sreenivasa
19675cabbc6bSPrashanth Sreenivasa /*
19685cabbc6bSPrashanth Sreenivasa * Initiate removal of a top-level vdev, reducing the total space in the pool.
19695cabbc6bSPrashanth Sreenivasa * The config lock is held for the specified TXG. Once initiated,
19705cabbc6bSPrashanth Sreenivasa * evacuation of all allocated space (copying it to other vdevs) happens
19715cabbc6bSPrashanth Sreenivasa * in the background (see spa_vdev_remove_thread()), and can be canceled
19725cabbc6bSPrashanth Sreenivasa * (see spa_vdev_remove_cancel()). If successful, the vdev will
19735cabbc6bSPrashanth Sreenivasa * be transformed to an indirect vdev (see spa_vdev_remove_complete()).
19745cabbc6bSPrashanth Sreenivasa */
19755cabbc6bSPrashanth Sreenivasa static int
spa_vdev_remove_top(vdev_t * vd,uint64_t * txg)19765cabbc6bSPrashanth Sreenivasa spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
19775cabbc6bSPrashanth Sreenivasa {
19785cabbc6bSPrashanth Sreenivasa spa_t *spa = vd->vdev_spa;
19795cabbc6bSPrashanth Sreenivasa int error;
19805cabbc6bSPrashanth Sreenivasa
19815cabbc6bSPrashanth Sreenivasa /*
19825cabbc6bSPrashanth Sreenivasa * Check for errors up-front, so that we don't waste time
19835cabbc6bSPrashanth Sreenivasa * passivating the metaslab group and clearing the ZIL if there
19845cabbc6bSPrashanth Sreenivasa * are errors.
19855cabbc6bSPrashanth Sreenivasa */
19865cabbc6bSPrashanth Sreenivasa error = spa_vdev_remove_top_check(vd);
19875cabbc6bSPrashanth Sreenivasa if (error != 0)
19885cabbc6bSPrashanth Sreenivasa return (error);
19895cabbc6bSPrashanth Sreenivasa
19905cabbc6bSPrashanth Sreenivasa /*
19915cabbc6bSPrashanth Sreenivasa * Stop allocating from this vdev. Note that we must check
19925cabbc6bSPrashanth Sreenivasa * that this is not the only device in the pool before
19935cabbc6bSPrashanth Sreenivasa * passivating, otherwise we will not be able to make
19945cabbc6bSPrashanth Sreenivasa * progress because we can't allocate from any vdevs.
19955cabbc6bSPrashanth Sreenivasa * The above check for sufficient free space serves this
19965cabbc6bSPrashanth Sreenivasa * purpose.
19975cabbc6bSPrashanth Sreenivasa */
19985cabbc6bSPrashanth Sreenivasa metaslab_group_t *mg = vd->vdev_mg;
19995cabbc6bSPrashanth Sreenivasa metaslab_group_passivate(mg);
20005cabbc6bSPrashanth Sreenivasa
20015cabbc6bSPrashanth Sreenivasa /*
20025cabbc6bSPrashanth Sreenivasa * Wait for the youngest allocations and frees to sync,
20035cabbc6bSPrashanth Sreenivasa * and then wait for the deferral of those frees to finish.
20045cabbc6bSPrashanth Sreenivasa */
20055cabbc6bSPrashanth Sreenivasa spa_vdev_config_exit(spa, NULL,
20065cabbc6bSPrashanth Sreenivasa *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
20075cabbc6bSPrashanth Sreenivasa
20085cabbc6bSPrashanth Sreenivasa /*
20095cabbc6bSPrashanth Sreenivasa * We must ensure that no "stubby" log blocks are allocated
20105cabbc6bSPrashanth Sreenivasa * on the device to be removed. These blocks could be
20115cabbc6bSPrashanth Sreenivasa * written at any time, including while we are in the middle
20125cabbc6bSPrashanth Sreenivasa * of copying them.
20135cabbc6bSPrashanth Sreenivasa */
20145cabbc6bSPrashanth Sreenivasa error = spa_reset_logs(spa);
20155cabbc6bSPrashanth Sreenivasa
2016094e47e9SGeorge Wilson /*
2017084fd14fSBrian Behlendorf * We stop any initializing and TRIM that is currently in progress
2018084fd14fSBrian Behlendorf * but leave the state as "active". This will allow the process to
2019084fd14fSBrian Behlendorf * resume if the removal is canceled sometime later.
2020094e47e9SGeorge Wilson */
2021094e47e9SGeorge Wilson vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE);
2022084fd14fSBrian Behlendorf vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE);
2023084fd14fSBrian Behlendorf vdev_autotrim_stop_wait(vd);
2024094e47e9SGeorge Wilson
20255cabbc6bSPrashanth Sreenivasa *txg = spa_vdev_config_enter(spa);
20265cabbc6bSPrashanth Sreenivasa
20275cabbc6bSPrashanth Sreenivasa /*
20285cabbc6bSPrashanth Sreenivasa * Things might have changed while the config lock was dropped
20295cabbc6bSPrashanth Sreenivasa * (e.g. space usage). Check for errors again.
20305cabbc6bSPrashanth Sreenivasa */
20315cabbc6bSPrashanth Sreenivasa if (error == 0)
20325cabbc6bSPrashanth Sreenivasa error = spa_vdev_remove_top_check(vd);
20335cabbc6bSPrashanth Sreenivasa
20345cabbc6bSPrashanth Sreenivasa if (error != 0) {
20355cabbc6bSPrashanth Sreenivasa metaslab_group_activate(mg);
2036094e47e9SGeorge Wilson spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
2037084fd14fSBrian Behlendorf spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
2038084fd14fSBrian Behlendorf spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
20395cabbc6bSPrashanth Sreenivasa return (error);
20405cabbc6bSPrashanth Sreenivasa }
20415cabbc6bSPrashanth Sreenivasa
20425cabbc6bSPrashanth Sreenivasa vd->vdev_removing = B_TRUE;
20435cabbc6bSPrashanth Sreenivasa
20445cabbc6bSPrashanth Sreenivasa vdev_dirty_leaves(vd, VDD_DTL, *txg);
20455cabbc6bSPrashanth Sreenivasa vdev_config_dirty(vd);
20465cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
20475cabbc6bSPrashanth Sreenivasa dsl_sync_task_nowait(spa->spa_dsl_pool,
20485cabbc6bSPrashanth Sreenivasa vdev_remove_initiate_sync,
20493a4b1be9SMatthew Ahrens (void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx);
20505cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx);
20515cabbc6bSPrashanth Sreenivasa
20525cabbc6bSPrashanth Sreenivasa return (0);
20535cabbc6bSPrashanth Sreenivasa }
20545cabbc6bSPrashanth Sreenivasa
20555cabbc6bSPrashanth Sreenivasa /*
20565cabbc6bSPrashanth Sreenivasa * Remove a device from the pool.
20575cabbc6bSPrashanth Sreenivasa *
20585cabbc6bSPrashanth Sreenivasa * Removing a device from the vdev namespace requires several steps
20595cabbc6bSPrashanth Sreenivasa * and can take a significant amount of time. As a result we use
20605cabbc6bSPrashanth Sreenivasa * the spa_vdev_config_[enter/exit] functions which allow us to
20615cabbc6bSPrashanth Sreenivasa * grab and release the spa_config_lock while still holding the namespace
20625cabbc6bSPrashanth Sreenivasa * lock. During each step the configuration is synced out.
20635cabbc6bSPrashanth Sreenivasa */
20645cabbc6bSPrashanth Sreenivasa int
spa_vdev_remove(spa_t * spa,uint64_t guid,boolean_t unspare)20655cabbc6bSPrashanth Sreenivasa spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
20665cabbc6bSPrashanth Sreenivasa {
20675cabbc6bSPrashanth Sreenivasa vdev_t *vd;
20685cabbc6bSPrashanth Sreenivasa nvlist_t **spares, **l2cache, *nv;
20695cabbc6bSPrashanth Sreenivasa uint64_t txg = 0;
20705cabbc6bSPrashanth Sreenivasa uint_t nspares, nl2cache;
20715cabbc6bSPrashanth Sreenivasa int error = 0;
20725cabbc6bSPrashanth Sreenivasa boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
20735cabbc6bSPrashanth Sreenivasa sysevent_t *ev = NULL;
20745cabbc6bSPrashanth Sreenivasa
20755cabbc6bSPrashanth Sreenivasa ASSERT(spa_writeable(spa));
20765cabbc6bSPrashanth Sreenivasa
20775cabbc6bSPrashanth Sreenivasa if (!locked)
20785cabbc6bSPrashanth Sreenivasa txg = spa_vdev_enter(spa);
20795cabbc6bSPrashanth Sreenivasa
208086714001SSerapheim Dimitropoulos ASSERT(MUTEX_HELD(&spa_namespace_lock));
208186714001SSerapheim Dimitropoulos if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
208286714001SSerapheim Dimitropoulos error = (spa_has_checkpoint(spa)) ?
208386714001SSerapheim Dimitropoulos ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
208486714001SSerapheim Dimitropoulos
208586714001SSerapheim Dimitropoulos if (!locked)
208686714001SSerapheim Dimitropoulos return (spa_vdev_exit(spa, NULL, txg, error));
208786714001SSerapheim Dimitropoulos
208886714001SSerapheim Dimitropoulos return (error);
208986714001SSerapheim Dimitropoulos }
209086714001SSerapheim Dimitropoulos
20915cabbc6bSPrashanth Sreenivasa vd = spa_lookup_by_guid(spa, guid, B_FALSE);
20925cabbc6bSPrashanth Sreenivasa
20935cabbc6bSPrashanth Sreenivasa if (spa->spa_spares.sav_vdevs != NULL &&
20945cabbc6bSPrashanth Sreenivasa nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
20955cabbc6bSPrashanth Sreenivasa ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
20965cabbc6bSPrashanth Sreenivasa (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
20975cabbc6bSPrashanth Sreenivasa /*
20985cabbc6bSPrashanth Sreenivasa * Only remove the hot spare if it's not currently in use
20995cabbc6bSPrashanth Sreenivasa * in this pool.
21005cabbc6bSPrashanth Sreenivasa */
21015cabbc6bSPrashanth Sreenivasa if (vd == NULL || unspare) {
21025cabbc6bSPrashanth Sreenivasa char *nvstr = fnvlist_lookup_string(nv,
21035cabbc6bSPrashanth Sreenivasa ZPOOL_CONFIG_PATH);
21045cabbc6bSPrashanth Sreenivasa spa_history_log_internal(spa, "vdev remove", NULL,
21055cabbc6bSPrashanth Sreenivasa "%s vdev (%s) %s", spa_name(spa),
21065cabbc6bSPrashanth Sreenivasa VDEV_TYPE_SPARE, nvstr);
21075cabbc6bSPrashanth Sreenivasa if (vd == NULL)
21085cabbc6bSPrashanth Sreenivasa vd = spa_lookup_by_guid(spa, guid, B_TRUE);
21095cabbc6bSPrashanth Sreenivasa ev = spa_event_create(spa, vd, NULL,
21105cabbc6bSPrashanth Sreenivasa ESC_ZFS_VDEV_REMOVE_AUX);
21115cabbc6bSPrashanth Sreenivasa spa_vdev_remove_aux(spa->spa_spares.sav_config,
21125cabbc6bSPrashanth Sreenivasa ZPOOL_CONFIG_SPARES, spares, nspares, nv);
21135cabbc6bSPrashanth Sreenivasa spa_load_spares(spa);
21145cabbc6bSPrashanth Sreenivasa spa->spa_spares.sav_sync = B_TRUE;
21155cabbc6bSPrashanth Sreenivasa } else {
21165cabbc6bSPrashanth Sreenivasa error = SET_ERROR(EBUSY);
21175cabbc6bSPrashanth Sreenivasa }
21185cabbc6bSPrashanth Sreenivasa } else if (spa->spa_l2cache.sav_vdevs != NULL &&
21195cabbc6bSPrashanth Sreenivasa nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
21205cabbc6bSPrashanth Sreenivasa ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
21215cabbc6bSPrashanth Sreenivasa (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
21225cabbc6bSPrashanth Sreenivasa char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
21235cabbc6bSPrashanth Sreenivasa spa_history_log_internal(spa, "vdev remove", NULL,
21245cabbc6bSPrashanth Sreenivasa "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr);
21255cabbc6bSPrashanth Sreenivasa /*
21265cabbc6bSPrashanth Sreenivasa * Cache devices can always be removed.
21275cabbc6bSPrashanth Sreenivasa */
21285cabbc6bSPrashanth Sreenivasa vd = spa_lookup_by_guid(spa, guid, B_TRUE);
21295cabbc6bSPrashanth Sreenivasa ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
21305cabbc6bSPrashanth Sreenivasa spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
21315cabbc6bSPrashanth Sreenivasa ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
21325cabbc6bSPrashanth Sreenivasa spa_load_l2cache(spa);
21335cabbc6bSPrashanth Sreenivasa spa->spa_l2cache.sav_sync = B_TRUE;
21345cabbc6bSPrashanth Sreenivasa } else if (vd != NULL && vd->vdev_islog) {
21355cabbc6bSPrashanth Sreenivasa ASSERT(!locked);
21365cabbc6bSPrashanth Sreenivasa error = spa_vdev_remove_log(vd, &txg);
21375cabbc6bSPrashanth Sreenivasa } else if (vd != NULL) {
21385cabbc6bSPrashanth Sreenivasa ASSERT(!locked);
21395cabbc6bSPrashanth Sreenivasa error = spa_vdev_remove_top(vd, &txg);
21405cabbc6bSPrashanth Sreenivasa } else {
21415cabbc6bSPrashanth Sreenivasa /*
21425cabbc6bSPrashanth Sreenivasa * There is no vdev of any kind with the specified guid.
21435cabbc6bSPrashanth Sreenivasa */
21445cabbc6bSPrashanth Sreenivasa error = SET_ERROR(ENOENT);
21455cabbc6bSPrashanth Sreenivasa }
21465cabbc6bSPrashanth Sreenivasa
21475cabbc6bSPrashanth Sreenivasa if (!locked)
21485cabbc6bSPrashanth Sreenivasa error = spa_vdev_exit(spa, NULL, txg, error);
21495cabbc6bSPrashanth Sreenivasa
21505cabbc6bSPrashanth Sreenivasa if (ev != NULL) {
21515cabbc6bSPrashanth Sreenivasa if (error != 0) {
21525cabbc6bSPrashanth Sreenivasa spa_event_discard(ev);
21535cabbc6bSPrashanth Sreenivasa } else {
21545cabbc6bSPrashanth Sreenivasa spa_event_post(ev);
21555cabbc6bSPrashanth Sreenivasa }
21565cabbc6bSPrashanth Sreenivasa }
21575cabbc6bSPrashanth Sreenivasa
21585cabbc6bSPrashanth Sreenivasa return (error);
21595cabbc6bSPrashanth Sreenivasa }
21605cabbc6bSPrashanth Sreenivasa
21615cabbc6bSPrashanth Sreenivasa int
spa_removal_get_stats(spa_t * spa,pool_removal_stat_t * prs)21625cabbc6bSPrashanth Sreenivasa spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
21635cabbc6bSPrashanth Sreenivasa {
21645cabbc6bSPrashanth Sreenivasa prs->prs_state = spa->spa_removing_phys.sr_state;
21655cabbc6bSPrashanth Sreenivasa
21665cabbc6bSPrashanth Sreenivasa if (prs->prs_state == DSS_NONE)
21675cabbc6bSPrashanth Sreenivasa return (SET_ERROR(ENOENT));
21685cabbc6bSPrashanth Sreenivasa
21695cabbc6bSPrashanth Sreenivasa prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev;
21705cabbc6bSPrashanth Sreenivasa prs->prs_start_time = spa->spa_removing_phys.sr_start_time;
21715cabbc6bSPrashanth Sreenivasa prs->prs_end_time = spa->spa_removing_phys.sr_end_time;
21725cabbc6bSPrashanth Sreenivasa prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy;
21735cabbc6bSPrashanth Sreenivasa prs->prs_copied = spa->spa_removing_phys.sr_copied;
21745cabbc6bSPrashanth Sreenivasa
21755cabbc6bSPrashanth Sreenivasa if (spa->spa_vdev_removal != NULL) {
21765cabbc6bSPrashanth Sreenivasa for (int i = 0; i < TXG_SIZE; i++) {
21775cabbc6bSPrashanth Sreenivasa prs->prs_copied +=
21785cabbc6bSPrashanth Sreenivasa spa->spa_vdev_removal->svr_bytes_done[i];
21795cabbc6bSPrashanth Sreenivasa }
21805cabbc6bSPrashanth Sreenivasa }
21815cabbc6bSPrashanth Sreenivasa
21825cabbc6bSPrashanth Sreenivasa prs->prs_mapping_memory = 0;
21835cabbc6bSPrashanth Sreenivasa uint64_t indirect_vdev_id =
21845cabbc6bSPrashanth Sreenivasa spa->spa_removing_phys.sr_prev_indirect_vdev;
21855cabbc6bSPrashanth Sreenivasa while (indirect_vdev_id != -1) {
21865cabbc6bSPrashanth Sreenivasa vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id];
21875cabbc6bSPrashanth Sreenivasa vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
21885cabbc6bSPrashanth Sreenivasa vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
21895cabbc6bSPrashanth Sreenivasa
21905cabbc6bSPrashanth Sreenivasa ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
21915cabbc6bSPrashanth Sreenivasa prs->prs_mapping_memory += vdev_indirect_mapping_size(vim);
21925cabbc6bSPrashanth Sreenivasa indirect_vdev_id = vic->vic_prev_indirect_vdev;
21935cabbc6bSPrashanth Sreenivasa }
21945cabbc6bSPrashanth Sreenivasa
21955cabbc6bSPrashanth Sreenivasa return (0);
21965cabbc6bSPrashanth Sreenivasa }
2197