1*084fd14fSBrian Behlendorf /* 2*084fd14fSBrian Behlendorf * CDDL HEADER START 3*084fd14fSBrian Behlendorf * 4*084fd14fSBrian Behlendorf * The contents of this file are subject to the terms of the 5*084fd14fSBrian Behlendorf * Common Development and Distribution License (the "License"). 6*084fd14fSBrian Behlendorf * You may not use this file except in compliance with the License. 7*084fd14fSBrian Behlendorf * 8*084fd14fSBrian Behlendorf * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*084fd14fSBrian Behlendorf * or http://www.opensolaris.org/os/licensing. 10*084fd14fSBrian Behlendorf * See the License for the specific language governing permissions 11*084fd14fSBrian Behlendorf * and limitations under the License. 12*084fd14fSBrian Behlendorf * 13*084fd14fSBrian Behlendorf * When distributing Covered Code, include this CDDL HEADER in each 14*084fd14fSBrian Behlendorf * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*084fd14fSBrian Behlendorf * If applicable, add the following below this CDDL HEADER, with the 16*084fd14fSBrian Behlendorf * fields enclosed by brackets "[]" replaced with your own identifying 17*084fd14fSBrian Behlendorf * information: Portions Copyright [yyyy] [name of copyright owner] 18*084fd14fSBrian Behlendorf * 19*084fd14fSBrian Behlendorf * CDDL HEADER END 20*084fd14fSBrian Behlendorf */ 21*084fd14fSBrian Behlendorf 22*084fd14fSBrian Behlendorf /* 23*084fd14fSBrian Behlendorf * Copyright (c) 2016 by Delphix. All rights reserved. 24*084fd14fSBrian Behlendorf * Copyright (c) 2019 by Lawrence Livermore National Security, LLC. 25*084fd14fSBrian Behlendorf * Copyright 2019 Joyent, Inc. 26*084fd14fSBrian Behlendorf */ 27*084fd14fSBrian Behlendorf 28*084fd14fSBrian Behlendorf #include <sys/spa.h> 29*084fd14fSBrian Behlendorf #include <sys/spa_impl.h> 30*084fd14fSBrian Behlendorf #include <sys/txg.h> 31*084fd14fSBrian Behlendorf #include <sys/vdev_impl.h> 32*084fd14fSBrian Behlendorf #include <sys/vdev_trim.h> 33*084fd14fSBrian Behlendorf #include <sys/refcount.h> 34*084fd14fSBrian Behlendorf #include <sys/metaslab_impl.h> 35*084fd14fSBrian Behlendorf #include <sys/dsl_synctask.h> 36*084fd14fSBrian Behlendorf #include <sys/zap.h> 37*084fd14fSBrian Behlendorf #include <sys/dmu_tx.h> 38*084fd14fSBrian Behlendorf 39*084fd14fSBrian Behlendorf /* 40*084fd14fSBrian Behlendorf * TRIM is a feature which is used to notify a SSD that some previously 41*084fd14fSBrian Behlendorf * written space is no longer allocated by the pool. This is useful because 42*084fd14fSBrian Behlendorf * writes to a SSD must be performed to blocks which have first been erased. 43*084fd14fSBrian Behlendorf * Ensuring the SSD always has a supply of erased blocks for new writes 44*084fd14fSBrian Behlendorf * helps prevent the performance from deteriorating. 45*084fd14fSBrian Behlendorf * 46*084fd14fSBrian Behlendorf * There are two supported TRIM methods; manual and automatic. 47*084fd14fSBrian Behlendorf * 48*084fd14fSBrian Behlendorf * Manual TRIM: 49*084fd14fSBrian Behlendorf * 50*084fd14fSBrian Behlendorf * A manual TRIM is initiated by running the 'zpool trim' command. A single 51*084fd14fSBrian Behlendorf * 'vdev_trim' thread is created for each leaf vdev, and it is responsible for 52*084fd14fSBrian Behlendorf * managing that vdev TRIM process. This involves iterating over all the 53*084fd14fSBrian Behlendorf * metaslabs, calculating the unallocated space ranges, and then issuing the 54*084fd14fSBrian Behlendorf * required TRIM I/Os. 55*084fd14fSBrian Behlendorf * 56*084fd14fSBrian Behlendorf * While a metaslab is being actively trimmed it is not eligible to perform 57*084fd14fSBrian Behlendorf * new allocations. After traversing all of the metaslabs the thread is 58*084fd14fSBrian Behlendorf * terminated. Finally, both the requested options and current progress of 59*084fd14fSBrian Behlendorf * the TRIM are regularly written to the pool. This allows the TRIM to be 60*084fd14fSBrian Behlendorf * suspended and resumed as needed. 61*084fd14fSBrian Behlendorf * 62*084fd14fSBrian Behlendorf * Automatic TRIM: 63*084fd14fSBrian Behlendorf * 64*084fd14fSBrian Behlendorf * An automatic TRIM is enabled by setting the 'autotrim' pool property 65*084fd14fSBrian Behlendorf * to 'on'. When enabled, a `vdev_autotrim' thread is created for each 66*084fd14fSBrian Behlendorf * top-level (not leaf) vdev in the pool. These threads perform the same 67*084fd14fSBrian Behlendorf * core TRIM process as a manual TRIM, but with a few key differences. 68*084fd14fSBrian Behlendorf * 69*084fd14fSBrian Behlendorf * 1) Automatic TRIM happens continuously in the background and operates 70*084fd14fSBrian Behlendorf * solely on recently freed blocks (ms_trim not ms_allocatable). 71*084fd14fSBrian Behlendorf * 72*084fd14fSBrian Behlendorf * 2) Each thread is associated with a top-level (not leaf) vdev. This has 73*084fd14fSBrian Behlendorf * the benefit of simplifying the threading model, it makes it easier 74*084fd14fSBrian Behlendorf * to coordinate administrative commands, and it ensures only a single 75*084fd14fSBrian Behlendorf * metaslab is disabled at a time. Unlike manual TRIM, this means each 76*084fd14fSBrian Behlendorf * 'vdev_autotrim' thread is responsible for issuing TRIM I/Os for its 77*084fd14fSBrian Behlendorf * children. 78*084fd14fSBrian Behlendorf * 79*084fd14fSBrian Behlendorf * 3) There is no automatic TRIM progress information stored on disk, nor 80*084fd14fSBrian Behlendorf * is it reported by 'zpool status'. 81*084fd14fSBrian Behlendorf * 82*084fd14fSBrian Behlendorf * While the automatic TRIM process is highly effective it is more likely 83*084fd14fSBrian Behlendorf * than a manual TRIM to encounter tiny ranges. Ranges less than or equal to 84*084fd14fSBrian Behlendorf * 'zfs_trim_extent_bytes_min' (32k) are considered too small to efficiently 85*084fd14fSBrian Behlendorf * TRIM and are skipped. This means small amounts of freed space may not 86*084fd14fSBrian Behlendorf * be automatically trimmed. 87*084fd14fSBrian Behlendorf * 88*084fd14fSBrian Behlendorf * Furthermore, devices with attached hot spares and devices being actively 89*084fd14fSBrian Behlendorf * replaced are skipped. This is done to avoid adding additional stress to 90*084fd14fSBrian Behlendorf * a potentially unhealthy device and to minimize the required rebuild time. 91*084fd14fSBrian Behlendorf * 92*084fd14fSBrian Behlendorf * For this reason it may be beneficial to occasionally manually TRIM a pool 93*084fd14fSBrian Behlendorf * even when automatic TRIM is enabled. 94*084fd14fSBrian Behlendorf */ 95*084fd14fSBrian Behlendorf 96*084fd14fSBrian Behlendorf /* 97*084fd14fSBrian Behlendorf * Maximum size of TRIM I/O, ranges will be chunked in to 128MiB lengths. 98*084fd14fSBrian Behlendorf */ 99*084fd14fSBrian Behlendorf unsigned int zfs_trim_extent_bytes_max = 128 * 1024 * 1024; 100*084fd14fSBrian Behlendorf 101*084fd14fSBrian Behlendorf /* 102*084fd14fSBrian Behlendorf * Minimum size of TRIM I/O, extents smaller than 32Kib will be skipped. 103*084fd14fSBrian Behlendorf */ 104*084fd14fSBrian Behlendorf unsigned int zfs_trim_extent_bytes_min = 32 * 1024; 105*084fd14fSBrian Behlendorf 106*084fd14fSBrian Behlendorf /* 107*084fd14fSBrian Behlendorf * Skip uninitialized metaslabs during the TRIM process. This option is 108*084fd14fSBrian Behlendorf * useful for pools constructed from large thinly-provisioned devices where 109*084fd14fSBrian Behlendorf * TRIM operations are slow. As a pool ages an increasing fraction of 110*084fd14fSBrian Behlendorf * the pools metaslabs will be initialized progressively degrading the 111*084fd14fSBrian Behlendorf * usefulness of this option. This setting is stored when starting a 112*084fd14fSBrian Behlendorf * manual TRIM and will persist for the duration of the requested TRIM. 113*084fd14fSBrian Behlendorf */ 114*084fd14fSBrian Behlendorf unsigned int zfs_trim_metaslab_skip = 0; 115*084fd14fSBrian Behlendorf 116*084fd14fSBrian Behlendorf /* 117*084fd14fSBrian Behlendorf * Maximum number of queued TRIM I/Os per leaf vdev. The number of 118*084fd14fSBrian Behlendorf * concurrent TRIM I/Os issued to the device is controlled by the 119*084fd14fSBrian Behlendorf * zfs_vdev_trim_min_active and zfs_vdev_trim_max_active module options. 120*084fd14fSBrian Behlendorf */ 121*084fd14fSBrian Behlendorf unsigned int zfs_trim_queue_limit = 10; 122*084fd14fSBrian Behlendorf 123*084fd14fSBrian Behlendorf /* 124*084fd14fSBrian Behlendorf * The minimum number of transaction groups between automatic trims of a 125*084fd14fSBrian Behlendorf * metaslab. This setting represents a trade-off between issuing more 126*084fd14fSBrian Behlendorf * efficient TRIM operations, by allowing them to be aggregated longer, 127*084fd14fSBrian Behlendorf * and issuing them promptly so the trimmed space is available. Note 128*084fd14fSBrian Behlendorf * that this value is a minimum; metaslabs can be trimmed less frequently 129*084fd14fSBrian Behlendorf * when there are a large number of ranges which need to be trimmed. 130*084fd14fSBrian Behlendorf * 131*084fd14fSBrian Behlendorf * Increasing this value will allow frees to be aggregated for a longer 132*084fd14fSBrian Behlendorf * time. This can result is larger TRIM operations, and increased memory 133*084fd14fSBrian Behlendorf * usage in order to track the ranges to be trimmed. Decreasing this value 134*084fd14fSBrian Behlendorf * has the opposite effect. The default value of 32 was determined though 135*084fd14fSBrian Behlendorf * testing to be a reasonable compromise. 136*084fd14fSBrian Behlendorf */ 137*084fd14fSBrian Behlendorf unsigned int zfs_trim_txg_batch = 32; 138*084fd14fSBrian Behlendorf 139*084fd14fSBrian Behlendorf /* 140*084fd14fSBrian Behlendorf * The trim_args are a control structure which describe how a leaf vdev 141*084fd14fSBrian Behlendorf * should be trimmed. The core elements are the vdev, the metaslab being 142*084fd14fSBrian Behlendorf * trimmed and a range tree containing the extents to TRIM. All provided 143*084fd14fSBrian Behlendorf * ranges must be within the metaslab. 144*084fd14fSBrian Behlendorf */ 145*084fd14fSBrian Behlendorf typedef struct trim_args { 146*084fd14fSBrian Behlendorf /* 147*084fd14fSBrian Behlendorf * These fields are set by the caller of vdev_trim_ranges(). 148*084fd14fSBrian Behlendorf */ 149*084fd14fSBrian Behlendorf vdev_t *trim_vdev; /* Leaf vdev to TRIM */ 150*084fd14fSBrian Behlendorf metaslab_t *trim_msp; /* Disabled metaslab */ 151*084fd14fSBrian Behlendorf range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */ 152*084fd14fSBrian Behlendorf trim_type_t trim_type; /* Manual or auto TRIM */ 153*084fd14fSBrian Behlendorf uint64_t trim_extent_bytes_max; /* Maximum TRIM I/O size */ 154*084fd14fSBrian Behlendorf uint64_t trim_extent_bytes_min; /* Minimum TRIM I/O size */ 155*084fd14fSBrian Behlendorf enum trim_flag trim_flags; /* TRIM flags (secure) */ 156*084fd14fSBrian Behlendorf 157*084fd14fSBrian Behlendorf /* 158*084fd14fSBrian Behlendorf * These fields are updated by vdev_trim_ranges(). 159*084fd14fSBrian Behlendorf */ 160*084fd14fSBrian Behlendorf hrtime_t trim_start_time; /* Start time */ 161*084fd14fSBrian Behlendorf uint64_t trim_bytes_done; /* Bytes trimmed */ 162*084fd14fSBrian Behlendorf } trim_args_t; 163*084fd14fSBrian Behlendorf 164*084fd14fSBrian Behlendorf /* 165*084fd14fSBrian Behlendorf * Determines whether a vdev_trim_thread() should be stopped. 166*084fd14fSBrian Behlendorf */ 167*084fd14fSBrian Behlendorf static boolean_t 168*084fd14fSBrian Behlendorf vdev_trim_should_stop(vdev_t *vd) 169*084fd14fSBrian Behlendorf { 170*084fd14fSBrian Behlendorf return (vd->vdev_trim_exit_wanted || !vdev_writeable(vd) || 171*084fd14fSBrian Behlendorf vd->vdev_detached || vd->vdev_top->vdev_removing); 172*084fd14fSBrian Behlendorf } 173*084fd14fSBrian Behlendorf 174*084fd14fSBrian Behlendorf /* 175*084fd14fSBrian Behlendorf * Determines whether a vdev_autotrim_thread() should be stopped. 176*084fd14fSBrian Behlendorf */ 177*084fd14fSBrian Behlendorf static boolean_t 178*084fd14fSBrian Behlendorf vdev_autotrim_should_stop(vdev_t *tvd) 179*084fd14fSBrian Behlendorf { 180*084fd14fSBrian Behlendorf return (tvd->vdev_autotrim_exit_wanted || 181*084fd14fSBrian Behlendorf !vdev_writeable(tvd) || tvd->vdev_removing || 182*084fd14fSBrian Behlendorf spa_get_autotrim(tvd->vdev_spa) == SPA_AUTOTRIM_OFF); 183*084fd14fSBrian Behlendorf } 184*084fd14fSBrian Behlendorf 185*084fd14fSBrian Behlendorf /* 186*084fd14fSBrian Behlendorf * The sync task for updating the on-disk state of a manual TRIM. This 187*084fd14fSBrian Behlendorf * is scheduled by vdev_trim_change_state(). 188*084fd14fSBrian Behlendorf */ 189*084fd14fSBrian Behlendorf static void 190*084fd14fSBrian Behlendorf vdev_trim_zap_update_sync(void *arg, dmu_tx_t *tx) 191*084fd14fSBrian Behlendorf { 192*084fd14fSBrian Behlendorf /* 193*084fd14fSBrian Behlendorf * We pass in the guid instead of the vdev_t since the vdev may 194*084fd14fSBrian Behlendorf * have been freed prior to the sync task being processed. This 195*084fd14fSBrian Behlendorf * happens when a vdev is detached as we call spa_config_vdev_exit(), 196*084fd14fSBrian Behlendorf * stop the trimming thread, schedule the sync task, and free 197*084fd14fSBrian Behlendorf * the vdev. Later when the scheduled sync task is invoked, it would 198*084fd14fSBrian Behlendorf * find that the vdev has been freed. 199*084fd14fSBrian Behlendorf */ 200*084fd14fSBrian Behlendorf uint64_t guid = *(uint64_t *)arg; 201*084fd14fSBrian Behlendorf uint64_t txg = dmu_tx_get_txg(tx); 202*084fd14fSBrian Behlendorf kmem_free(arg, sizeof (uint64_t)); 203*084fd14fSBrian Behlendorf 204*084fd14fSBrian Behlendorf vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE); 205*084fd14fSBrian Behlendorf if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd)) 206*084fd14fSBrian Behlendorf return; 207*084fd14fSBrian Behlendorf 208*084fd14fSBrian Behlendorf uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK]; 209*084fd14fSBrian Behlendorf vd->vdev_trim_offset[txg & TXG_MASK] = 0; 210*084fd14fSBrian Behlendorf 211*084fd14fSBrian Behlendorf VERIFY3U(vd->vdev_leaf_zap, !=, 0); 212*084fd14fSBrian Behlendorf 213*084fd14fSBrian Behlendorf objset_t *mos = vd->vdev_spa->spa_meta_objset; 214*084fd14fSBrian Behlendorf 215*084fd14fSBrian Behlendorf if (last_offset > 0 || vd->vdev_trim_last_offset == UINT64_MAX) { 216*084fd14fSBrian Behlendorf 217*084fd14fSBrian Behlendorf if (vd->vdev_trim_last_offset == UINT64_MAX) 218*084fd14fSBrian Behlendorf last_offset = 0; 219*084fd14fSBrian Behlendorf 220*084fd14fSBrian Behlendorf vd->vdev_trim_last_offset = last_offset; 221*084fd14fSBrian Behlendorf VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 222*084fd14fSBrian Behlendorf VDEV_LEAF_ZAP_TRIM_LAST_OFFSET, 223*084fd14fSBrian Behlendorf sizeof (last_offset), 1, &last_offset, tx)); 224*084fd14fSBrian Behlendorf } 225*084fd14fSBrian Behlendorf 226*084fd14fSBrian Behlendorf if (vd->vdev_trim_action_time > 0) { 227*084fd14fSBrian Behlendorf uint64_t val = (uint64_t)vd->vdev_trim_action_time; 228*084fd14fSBrian Behlendorf VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 229*084fd14fSBrian Behlendorf VDEV_LEAF_ZAP_TRIM_ACTION_TIME, sizeof (val), 230*084fd14fSBrian Behlendorf 1, &val, tx)); 231*084fd14fSBrian Behlendorf } 232*084fd14fSBrian Behlendorf 233*084fd14fSBrian Behlendorf if (vd->vdev_trim_rate > 0) { 234*084fd14fSBrian Behlendorf uint64_t rate = (uint64_t)vd->vdev_trim_rate; 235*084fd14fSBrian Behlendorf 236*084fd14fSBrian Behlendorf if (rate == UINT64_MAX) 237*084fd14fSBrian Behlendorf rate = 0; 238*084fd14fSBrian Behlendorf 239*084fd14fSBrian Behlendorf VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 240*084fd14fSBrian Behlendorf VDEV_LEAF_ZAP_TRIM_RATE, sizeof (rate), 1, &rate, tx)); 241*084fd14fSBrian Behlendorf } 242*084fd14fSBrian Behlendorf 243*084fd14fSBrian Behlendorf uint64_t partial = vd->vdev_trim_partial; 244*084fd14fSBrian Behlendorf if (partial == UINT64_MAX) 245*084fd14fSBrian Behlendorf partial = 0; 246*084fd14fSBrian Behlendorf 247*084fd14fSBrian Behlendorf VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL, 248*084fd14fSBrian Behlendorf sizeof (partial), 1, &partial, tx)); 249*084fd14fSBrian Behlendorf 250*084fd14fSBrian Behlendorf uint64_t secure = vd->vdev_trim_secure; 251*084fd14fSBrian Behlendorf if (secure == UINT64_MAX) 252*084fd14fSBrian Behlendorf secure = 0; 253*084fd14fSBrian Behlendorf 254*084fd14fSBrian Behlendorf VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE, 255*084fd14fSBrian Behlendorf sizeof (secure), 1, &secure, tx)); 256*084fd14fSBrian Behlendorf 257*084fd14fSBrian Behlendorf 258*084fd14fSBrian Behlendorf uint64_t trim_state = vd->vdev_trim_state; 259*084fd14fSBrian Behlendorf VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE, 260*084fd14fSBrian Behlendorf sizeof (trim_state), 1, &trim_state, tx)); 261*084fd14fSBrian Behlendorf } 262*084fd14fSBrian Behlendorf 263*084fd14fSBrian Behlendorf /* 264*084fd14fSBrian Behlendorf * Update the on-disk state of a manual TRIM. This is called to request 265*084fd14fSBrian Behlendorf * that a TRIM be started/suspended/canceled, or to change one of the 266*084fd14fSBrian Behlendorf * TRIM options (partial, secure, rate). 267*084fd14fSBrian Behlendorf */ 268*084fd14fSBrian Behlendorf static void 269*084fd14fSBrian Behlendorf vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state, 270*084fd14fSBrian Behlendorf uint64_t rate, boolean_t partial, boolean_t secure) 271*084fd14fSBrian Behlendorf { 272*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&vd->vdev_trim_lock)); 273*084fd14fSBrian Behlendorf spa_t *spa = vd->vdev_spa; 274*084fd14fSBrian Behlendorf 275*084fd14fSBrian Behlendorf if (new_state == vd->vdev_trim_state) 276*084fd14fSBrian Behlendorf return; 277*084fd14fSBrian Behlendorf 278*084fd14fSBrian Behlendorf /* 279*084fd14fSBrian Behlendorf * Copy the vd's guid, this will be freed by the sync task. 280*084fd14fSBrian Behlendorf */ 281*084fd14fSBrian Behlendorf uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); 282*084fd14fSBrian Behlendorf *guid = vd->vdev_guid; 283*084fd14fSBrian Behlendorf 284*084fd14fSBrian Behlendorf /* 285*084fd14fSBrian Behlendorf * If we're suspending, then preserve the original start time. 286*084fd14fSBrian Behlendorf */ 287*084fd14fSBrian Behlendorf if (vd->vdev_trim_state != VDEV_TRIM_SUSPENDED) { 288*084fd14fSBrian Behlendorf vd->vdev_trim_action_time = gethrestime_sec(); 289*084fd14fSBrian Behlendorf } 290*084fd14fSBrian Behlendorf 291*084fd14fSBrian Behlendorf /* 292*084fd14fSBrian Behlendorf * If we're activating, then preserve the requested rate and trim 293*084fd14fSBrian Behlendorf * method. Setting the last offset and rate to UINT64_MAX is used 294*084fd14fSBrian Behlendorf * as a sentinel to indicate they should be reset to default values. 295*084fd14fSBrian Behlendorf */ 296*084fd14fSBrian Behlendorf if (new_state == VDEV_TRIM_ACTIVE) { 297*084fd14fSBrian Behlendorf if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE || 298*084fd14fSBrian Behlendorf vd->vdev_trim_state == VDEV_TRIM_CANCELED) { 299*084fd14fSBrian Behlendorf vd->vdev_trim_last_offset = UINT64_MAX; 300*084fd14fSBrian Behlendorf vd->vdev_trim_rate = UINT64_MAX; 301*084fd14fSBrian Behlendorf vd->vdev_trim_partial = UINT64_MAX; 302*084fd14fSBrian Behlendorf vd->vdev_trim_secure = UINT64_MAX; 303*084fd14fSBrian Behlendorf } 304*084fd14fSBrian Behlendorf 305*084fd14fSBrian Behlendorf if (rate != 0) 306*084fd14fSBrian Behlendorf vd->vdev_trim_rate = rate; 307*084fd14fSBrian Behlendorf 308*084fd14fSBrian Behlendorf if (partial != 0) 309*084fd14fSBrian Behlendorf vd->vdev_trim_partial = partial; 310*084fd14fSBrian Behlendorf 311*084fd14fSBrian Behlendorf if (secure != 0) 312*084fd14fSBrian Behlendorf vd->vdev_trim_secure = secure; 313*084fd14fSBrian Behlendorf } 314*084fd14fSBrian Behlendorf 315*084fd14fSBrian Behlendorf boolean_t resumed = !!(vd->vdev_trim_state == VDEV_TRIM_SUSPENDED); 316*084fd14fSBrian Behlendorf vd->vdev_trim_state = new_state; 317*084fd14fSBrian Behlendorf 318*084fd14fSBrian Behlendorf dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 319*084fd14fSBrian Behlendorf VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 320*084fd14fSBrian Behlendorf dsl_sync_task_nowait(spa_get_dsl(spa), vdev_trim_zap_update_sync, 321*084fd14fSBrian Behlendorf guid, 2, ZFS_SPACE_CHECK_NONE, tx); 322*084fd14fSBrian Behlendorf 323*084fd14fSBrian Behlendorf switch (new_state) { 324*084fd14fSBrian Behlendorf case VDEV_TRIM_ACTIVE: 325*084fd14fSBrian Behlendorf spa_event_notify(spa, vd, NULL, 326*084fd14fSBrian Behlendorf resumed ? ESC_ZFS_TRIM_RESUME : ESC_ZFS_TRIM_START); 327*084fd14fSBrian Behlendorf spa_history_log_internal(spa, "trim", tx, 328*084fd14fSBrian Behlendorf "vdev=%s activated", vd->vdev_path); 329*084fd14fSBrian Behlendorf break; 330*084fd14fSBrian Behlendorf case VDEV_TRIM_SUSPENDED: 331*084fd14fSBrian Behlendorf spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_SUSPEND); 332*084fd14fSBrian Behlendorf spa_history_log_internal(spa, "trim", tx, 333*084fd14fSBrian Behlendorf "vdev=%s suspended", vd->vdev_path); 334*084fd14fSBrian Behlendorf break; 335*084fd14fSBrian Behlendorf case VDEV_TRIM_CANCELED: 336*084fd14fSBrian Behlendorf spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_CANCEL); 337*084fd14fSBrian Behlendorf spa_history_log_internal(spa, "trim", tx, 338*084fd14fSBrian Behlendorf "vdev=%s canceled", vd->vdev_path); 339*084fd14fSBrian Behlendorf break; 340*084fd14fSBrian Behlendorf case VDEV_TRIM_COMPLETE: 341*084fd14fSBrian Behlendorf spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_FINISH); 342*084fd14fSBrian Behlendorf spa_history_log_internal(spa, "trim", tx, 343*084fd14fSBrian Behlendorf "vdev=%s complete", vd->vdev_path); 344*084fd14fSBrian Behlendorf break; 345*084fd14fSBrian Behlendorf default: 346*084fd14fSBrian Behlendorf panic("invalid state %llu", (unsigned long long)new_state); 347*084fd14fSBrian Behlendorf } 348*084fd14fSBrian Behlendorf 349*084fd14fSBrian Behlendorf dmu_tx_commit(tx); 350*084fd14fSBrian Behlendorf } 351*084fd14fSBrian Behlendorf 352*084fd14fSBrian Behlendorf /* 353*084fd14fSBrian Behlendorf * The zio_done_func_t done callback for each manual TRIM issued. It is 354*084fd14fSBrian Behlendorf * responsible for updating the TRIM stats, reissuing failed TRIM I/Os, 355*084fd14fSBrian Behlendorf * and limiting the number of in-flight TRIM I/Os. 356*084fd14fSBrian Behlendorf */ 357*084fd14fSBrian Behlendorf static void 358*084fd14fSBrian Behlendorf vdev_trim_cb(zio_t *zio) 359*084fd14fSBrian Behlendorf { 360*084fd14fSBrian Behlendorf vdev_t *vd = zio->io_vd; 361*084fd14fSBrian Behlendorf 362*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_io_lock); 363*084fd14fSBrian Behlendorf if (zio->io_error == ENXIO && !vdev_writeable(vd)) { 364*084fd14fSBrian Behlendorf /* 365*084fd14fSBrian Behlendorf * The I/O failed because the vdev was unavailable; roll the 366*084fd14fSBrian Behlendorf * last offset back. (This works because spa_sync waits on 367*084fd14fSBrian Behlendorf * spa_txg_zio before it runs sync tasks.) 368*084fd14fSBrian Behlendorf */ 369*084fd14fSBrian Behlendorf uint64_t *offset = 370*084fd14fSBrian Behlendorf &vd->vdev_trim_offset[zio->io_txg & TXG_MASK]; 371*084fd14fSBrian Behlendorf *offset = MIN(*offset, zio->io_offset); 372*084fd14fSBrian Behlendorf } else { 373*084fd14fSBrian Behlendorf if (zio->io_error != 0) { 374*084fd14fSBrian Behlendorf vd->vdev_stat.vs_trim_errors++; 375*084fd14fSBrian Behlendorf /* 376*084fd14fSBrian Behlendorf * spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL, 377*084fd14fSBrian Behlendorf * 0, 0, 0, 0, 1, zio->io_orig_size); 378*084fd14fSBrian Behlendorf */ 379*084fd14fSBrian Behlendorf } else { 380*084fd14fSBrian Behlendorf /* 381*084fd14fSBrian Behlendorf * spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL, 382*084fd14fSBrian Behlendorf * 1, zio->io_orig_size, 0, 0, 0, 0); 383*084fd14fSBrian Behlendorf */ 384*084fd14fSBrian Behlendorf } 385*084fd14fSBrian Behlendorf 386*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_done += zio->io_orig_size; 387*084fd14fSBrian Behlendorf } 388*084fd14fSBrian Behlendorf 389*084fd14fSBrian Behlendorf ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_MANUAL], >, 0); 390*084fd14fSBrian Behlendorf vd->vdev_trim_inflight[TRIM_TYPE_MANUAL]--; 391*084fd14fSBrian Behlendorf cv_broadcast(&vd->vdev_trim_io_cv); 392*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_io_lock); 393*084fd14fSBrian Behlendorf 394*084fd14fSBrian Behlendorf spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 395*084fd14fSBrian Behlendorf } 396*084fd14fSBrian Behlendorf 397*084fd14fSBrian Behlendorf /* 398*084fd14fSBrian Behlendorf * The zio_done_func_t done callback for each automatic TRIM issued. It 399*084fd14fSBrian Behlendorf * is responsible for updating the TRIM stats and limiting the number of 400*084fd14fSBrian Behlendorf * in-flight TRIM I/Os. Automatic TRIM I/Os are best effort and are 401*084fd14fSBrian Behlendorf * never reissued on failure. 402*084fd14fSBrian Behlendorf */ 403*084fd14fSBrian Behlendorf static void 404*084fd14fSBrian Behlendorf vdev_autotrim_cb(zio_t *zio) 405*084fd14fSBrian Behlendorf { 406*084fd14fSBrian Behlendorf vdev_t *vd = zio->io_vd; 407*084fd14fSBrian Behlendorf 408*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_io_lock); 409*084fd14fSBrian Behlendorf 410*084fd14fSBrian Behlendorf if (zio->io_error != 0) { 411*084fd14fSBrian Behlendorf vd->vdev_stat.vs_trim_errors++; 412*084fd14fSBrian Behlendorf /* 413*084fd14fSBrian Behlendorf * spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO, 414*084fd14fSBrian Behlendorf * 0, 0, 0, 0, 1, zio->io_orig_size); 415*084fd14fSBrian Behlendorf */ 416*084fd14fSBrian Behlendorf } else { 417*084fd14fSBrian Behlendorf /* 418*084fd14fSBrian Behlendorf * spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO, 419*084fd14fSBrian Behlendorf * 1, zio->io_orig_size, 0, 0, 0, 0); 420*084fd14fSBrian Behlendorf */ 421*084fd14fSBrian Behlendorf 422*084fd14fSBrian Behlendorf vd->vdev_autotrim_bytes_done += zio->io_orig_size; 423*084fd14fSBrian Behlendorf } 424*084fd14fSBrian Behlendorf 425*084fd14fSBrian Behlendorf ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_AUTO], >, 0); 426*084fd14fSBrian Behlendorf vd->vdev_trim_inflight[TRIM_TYPE_AUTO]--; 427*084fd14fSBrian Behlendorf cv_broadcast(&vd->vdev_trim_io_cv); 428*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_io_lock); 429*084fd14fSBrian Behlendorf 430*084fd14fSBrian Behlendorf spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 431*084fd14fSBrian Behlendorf } 432*084fd14fSBrian Behlendorf 433*084fd14fSBrian Behlendorf /* 434*084fd14fSBrian Behlendorf * Returns the average trim rate in bytes/sec for the ta->trim_vdev. 435*084fd14fSBrian Behlendorf */ 436*084fd14fSBrian Behlendorf static uint64_t 437*084fd14fSBrian Behlendorf vdev_trim_calculate_rate(trim_args_t *ta) 438*084fd14fSBrian Behlendorf { 439*084fd14fSBrian Behlendorf return (ta->trim_bytes_done * 1000 / 440*084fd14fSBrian Behlendorf (NSEC2MSEC(gethrtime() - ta->trim_start_time) + 1)); 441*084fd14fSBrian Behlendorf } 442*084fd14fSBrian Behlendorf 443*084fd14fSBrian Behlendorf /* 444*084fd14fSBrian Behlendorf * Issues a physical TRIM and takes care of rate limiting (bytes/sec) 445*084fd14fSBrian Behlendorf * and number of concurrent TRIM I/Os. 446*084fd14fSBrian Behlendorf */ 447*084fd14fSBrian Behlendorf static int 448*084fd14fSBrian Behlendorf vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size) 449*084fd14fSBrian Behlendorf { 450*084fd14fSBrian Behlendorf vdev_t *vd = ta->trim_vdev; 451*084fd14fSBrian Behlendorf spa_t *spa = vd->vdev_spa; 452*084fd14fSBrian Behlendorf 453*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_io_lock); 454*084fd14fSBrian Behlendorf 455*084fd14fSBrian Behlendorf /* 456*084fd14fSBrian Behlendorf * Limit manual TRIM I/Os to the requested rate. This does not 457*084fd14fSBrian Behlendorf * apply to automatic TRIM since no per vdev rate can be specified. 458*084fd14fSBrian Behlendorf */ 459*084fd14fSBrian Behlendorf if (ta->trim_type == TRIM_TYPE_MANUAL) { 460*084fd14fSBrian Behlendorf while (vd->vdev_trim_rate != 0 && !vdev_trim_should_stop(vd) && 461*084fd14fSBrian Behlendorf vdev_trim_calculate_rate(ta) > vd->vdev_trim_rate) { 462*084fd14fSBrian Behlendorf cv_timedwait_sig(&vd->vdev_trim_io_cv, 463*084fd14fSBrian Behlendorf &vd->vdev_trim_io_lock, ddi_get_lbolt() + 464*084fd14fSBrian Behlendorf MSEC_TO_TICK(10)); 465*084fd14fSBrian Behlendorf } 466*084fd14fSBrian Behlendorf } 467*084fd14fSBrian Behlendorf ta->trim_bytes_done += size; 468*084fd14fSBrian Behlendorf 469*084fd14fSBrian Behlendorf /* Limit in-flight trimming I/Os */ 470*084fd14fSBrian Behlendorf while (vd->vdev_trim_inflight[0] + vd->vdev_trim_inflight[1] >= 471*084fd14fSBrian Behlendorf zfs_trim_queue_limit) { 472*084fd14fSBrian Behlendorf cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock); 473*084fd14fSBrian Behlendorf } 474*084fd14fSBrian Behlendorf vd->vdev_trim_inflight[ta->trim_type]++; 475*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_io_lock); 476*084fd14fSBrian Behlendorf 477*084fd14fSBrian Behlendorf dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 478*084fd14fSBrian Behlendorf VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 479*084fd14fSBrian Behlendorf uint64_t txg = dmu_tx_get_txg(tx); 480*084fd14fSBrian Behlendorf 481*084fd14fSBrian Behlendorf spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER); 482*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_lock); 483*084fd14fSBrian Behlendorf 484*084fd14fSBrian Behlendorf if (ta->trim_type == TRIM_TYPE_MANUAL && 485*084fd14fSBrian Behlendorf vd->vdev_trim_offset[txg & TXG_MASK] == 0) { 486*084fd14fSBrian Behlendorf uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); 487*084fd14fSBrian Behlendorf *guid = vd->vdev_guid; 488*084fd14fSBrian Behlendorf 489*084fd14fSBrian Behlendorf /* This is the first write of this txg. */ 490*084fd14fSBrian Behlendorf dsl_sync_task_nowait(spa_get_dsl(spa), 491*084fd14fSBrian Behlendorf vdev_trim_zap_update_sync, guid, 2, 492*084fd14fSBrian Behlendorf ZFS_SPACE_CHECK_RESERVED, tx); 493*084fd14fSBrian Behlendorf } 494*084fd14fSBrian Behlendorf 495*084fd14fSBrian Behlendorf /* 496*084fd14fSBrian Behlendorf * We know the vdev_t will still be around since all consumers of 497*084fd14fSBrian Behlendorf * vdev_free must stop the trimming first. 498*084fd14fSBrian Behlendorf */ 499*084fd14fSBrian Behlendorf if ((ta->trim_type == TRIM_TYPE_MANUAL && 500*084fd14fSBrian Behlendorf vdev_trim_should_stop(vd)) || 501*084fd14fSBrian Behlendorf (ta->trim_type == TRIM_TYPE_AUTO && 502*084fd14fSBrian Behlendorf vdev_autotrim_should_stop(vd->vdev_top))) { 503*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_io_lock); 504*084fd14fSBrian Behlendorf vd->vdev_trim_inflight[ta->trim_type]--; 505*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_io_lock); 506*084fd14fSBrian Behlendorf spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 507*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_lock); 508*084fd14fSBrian Behlendorf dmu_tx_commit(tx); 509*084fd14fSBrian Behlendorf return (SET_ERROR(EINTR)); 510*084fd14fSBrian Behlendorf } 511*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_lock); 512*084fd14fSBrian Behlendorf 513*084fd14fSBrian Behlendorf if (ta->trim_type == TRIM_TYPE_MANUAL) 514*084fd14fSBrian Behlendorf vd->vdev_trim_offset[txg & TXG_MASK] = start + size; 515*084fd14fSBrian Behlendorf 516*084fd14fSBrian Behlendorf zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd, 517*084fd14fSBrian Behlendorf start, size, ta->trim_type == TRIM_TYPE_MANUAL ? 518*084fd14fSBrian Behlendorf vdev_trim_cb : vdev_autotrim_cb, NULL, 519*084fd14fSBrian Behlendorf ZIO_PRIORITY_TRIM, ZIO_FLAG_CANFAIL, ta->trim_flags)); 520*084fd14fSBrian Behlendorf /* vdev_trim_cb and vdev_autotrim_cb release SCL_STATE_ALL */ 521*084fd14fSBrian Behlendorf 522*084fd14fSBrian Behlendorf dmu_tx_commit(tx); 523*084fd14fSBrian Behlendorf 524*084fd14fSBrian Behlendorf return (0); 525*084fd14fSBrian Behlendorf } 526*084fd14fSBrian Behlendorf 527*084fd14fSBrian Behlendorf /* 528*084fd14fSBrian Behlendorf * Issues TRIM I/Os for all ranges in the provided ta->trim_tree range tree. 529*084fd14fSBrian Behlendorf * Additional parameters describing how the TRIM should be performed must 530*084fd14fSBrian Behlendorf * be set in the trim_args structure. See the trim_args definition for 531*084fd14fSBrian Behlendorf * additional information. 532*084fd14fSBrian Behlendorf */ 533*084fd14fSBrian Behlendorf static int 534*084fd14fSBrian Behlendorf vdev_trim_ranges(trim_args_t *ta) 535*084fd14fSBrian Behlendorf { 536*084fd14fSBrian Behlendorf vdev_t *vd = ta->trim_vdev; 537*084fd14fSBrian Behlendorf avl_tree_t *rt = &ta->trim_tree->rt_root; 538*084fd14fSBrian Behlendorf uint64_t extent_bytes_max = ta->trim_extent_bytes_max; 539*084fd14fSBrian Behlendorf uint64_t extent_bytes_min = ta->trim_extent_bytes_min; 540*084fd14fSBrian Behlendorf spa_t *spa = vd->vdev_spa; 541*084fd14fSBrian Behlendorf 542*084fd14fSBrian Behlendorf ta->trim_start_time = gethrtime(); 543*084fd14fSBrian Behlendorf ta->trim_bytes_done = 0; 544*084fd14fSBrian Behlendorf 545*084fd14fSBrian Behlendorf for (range_seg_t *rs = avl_first(rt); rs != NULL; 546*084fd14fSBrian Behlendorf rs = AVL_NEXT(rt, rs)) { 547*084fd14fSBrian Behlendorf uint64_t size = rs->rs_end - rs->rs_start; 548*084fd14fSBrian Behlendorf 549*084fd14fSBrian Behlendorf if (extent_bytes_min && size < extent_bytes_min) { 550*084fd14fSBrian Behlendorf /* 551*084fd14fSBrian Behlendorf * spa_iostats_trim_add(spa, ta->trim_type, 552*084fd14fSBrian Behlendorf * 0, 0, 1, size, 0, 0); 553*084fd14fSBrian Behlendorf */ 554*084fd14fSBrian Behlendorf continue; 555*084fd14fSBrian Behlendorf } 556*084fd14fSBrian Behlendorf 557*084fd14fSBrian Behlendorf /* Split range into legally-sized physical chunks */ 558*084fd14fSBrian Behlendorf uint64_t writes_required = ((size - 1) / extent_bytes_max) + 1; 559*084fd14fSBrian Behlendorf 560*084fd14fSBrian Behlendorf for (uint64_t w = 0; w < writes_required; w++) { 561*084fd14fSBrian Behlendorf int error; 562*084fd14fSBrian Behlendorf 563*084fd14fSBrian Behlendorf error = vdev_trim_range(ta, VDEV_LABEL_START_SIZE + 564*084fd14fSBrian Behlendorf rs->rs_start + (w * extent_bytes_max), 565*084fd14fSBrian Behlendorf MIN(size - (w * extent_bytes_max), 566*084fd14fSBrian Behlendorf extent_bytes_max)); 567*084fd14fSBrian Behlendorf if (error != 0) { 568*084fd14fSBrian Behlendorf return (error); 569*084fd14fSBrian Behlendorf } 570*084fd14fSBrian Behlendorf } 571*084fd14fSBrian Behlendorf } 572*084fd14fSBrian Behlendorf 573*084fd14fSBrian Behlendorf return (0); 574*084fd14fSBrian Behlendorf } 575*084fd14fSBrian Behlendorf 576*084fd14fSBrian Behlendorf /* 577*084fd14fSBrian Behlendorf * Calculates the completion percentage of a manual TRIM. 578*084fd14fSBrian Behlendorf */ 579*084fd14fSBrian Behlendorf static void 580*084fd14fSBrian Behlendorf vdev_trim_calculate_progress(vdev_t *vd) 581*084fd14fSBrian Behlendorf { 582*084fd14fSBrian Behlendorf ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || 583*084fd14fSBrian Behlendorf spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); 584*084fd14fSBrian Behlendorf ASSERT(vd->vdev_leaf_zap != 0); 585*084fd14fSBrian Behlendorf 586*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_est = 0; 587*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_done = 0; 588*084fd14fSBrian Behlendorf 589*084fd14fSBrian Behlendorf for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) { 590*084fd14fSBrian Behlendorf metaslab_t *msp = vd->vdev_top->vdev_ms[i]; 591*084fd14fSBrian Behlendorf mutex_enter(&msp->ms_lock); 592*084fd14fSBrian Behlendorf 593*084fd14fSBrian Behlendorf uint64_t ms_free = msp->ms_size - 594*084fd14fSBrian Behlendorf metaslab_allocated_space(msp); 595*084fd14fSBrian Behlendorf 596*084fd14fSBrian Behlendorf if (vd->vdev_top->vdev_ops == &vdev_raidz_ops) 597*084fd14fSBrian Behlendorf ms_free /= vd->vdev_top->vdev_children; 598*084fd14fSBrian Behlendorf 599*084fd14fSBrian Behlendorf /* 600*084fd14fSBrian Behlendorf * Convert the metaslab range to a physical range 601*084fd14fSBrian Behlendorf * on our vdev. We use this to determine if we are 602*084fd14fSBrian Behlendorf * in the middle of this metaslab range. 603*084fd14fSBrian Behlendorf */ 604*084fd14fSBrian Behlendorf range_seg_t logical_rs, physical_rs; 605*084fd14fSBrian Behlendorf logical_rs.rs_start = msp->ms_start; 606*084fd14fSBrian Behlendorf logical_rs.rs_end = msp->ms_start + msp->ms_size; 607*084fd14fSBrian Behlendorf vdev_xlate(vd, &logical_rs, &physical_rs); 608*084fd14fSBrian Behlendorf 609*084fd14fSBrian Behlendorf if (vd->vdev_trim_last_offset <= physical_rs.rs_start) { 610*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_est += ms_free; 611*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 612*084fd14fSBrian Behlendorf continue; 613*084fd14fSBrian Behlendorf } else if (vd->vdev_trim_last_offset > physical_rs.rs_end) { 614*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_done += ms_free; 615*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_est += ms_free; 616*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 617*084fd14fSBrian Behlendorf continue; 618*084fd14fSBrian Behlendorf } 619*084fd14fSBrian Behlendorf 620*084fd14fSBrian Behlendorf /* 621*084fd14fSBrian Behlendorf * If we get here, we're in the middle of trimming this 622*084fd14fSBrian Behlendorf * metaslab. Load it and walk the free tree for more 623*084fd14fSBrian Behlendorf * accurate progress estimation. 624*084fd14fSBrian Behlendorf */ 625*084fd14fSBrian Behlendorf VERIFY0(metaslab_load(msp)); 626*084fd14fSBrian Behlendorf 627*084fd14fSBrian Behlendorf for (range_seg_t *rs = avl_first(&msp->ms_allocatable->rt_root); 628*084fd14fSBrian Behlendorf rs; rs = AVL_NEXT(&msp->ms_allocatable->rt_root, rs)) { 629*084fd14fSBrian Behlendorf logical_rs.rs_start = rs->rs_start; 630*084fd14fSBrian Behlendorf logical_rs.rs_end = rs->rs_end; 631*084fd14fSBrian Behlendorf vdev_xlate(vd, &logical_rs, &physical_rs); 632*084fd14fSBrian Behlendorf 633*084fd14fSBrian Behlendorf uint64_t size = physical_rs.rs_end - 634*084fd14fSBrian Behlendorf physical_rs.rs_start; 635*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_est += size; 636*084fd14fSBrian Behlendorf if (vd->vdev_trim_last_offset >= physical_rs.rs_end) { 637*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_done += size; 638*084fd14fSBrian Behlendorf } else if (vd->vdev_trim_last_offset > 639*084fd14fSBrian Behlendorf physical_rs.rs_start && 640*084fd14fSBrian Behlendorf vd->vdev_trim_last_offset <= 641*084fd14fSBrian Behlendorf physical_rs.rs_end) { 642*084fd14fSBrian Behlendorf vd->vdev_trim_bytes_done += 643*084fd14fSBrian Behlendorf vd->vdev_trim_last_offset - 644*084fd14fSBrian Behlendorf physical_rs.rs_start; 645*084fd14fSBrian Behlendorf } 646*084fd14fSBrian Behlendorf } 647*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 648*084fd14fSBrian Behlendorf } 649*084fd14fSBrian Behlendorf } 650*084fd14fSBrian Behlendorf 651*084fd14fSBrian Behlendorf /* 652*084fd14fSBrian Behlendorf * Load from disk the vdev's manual TRIM information. This includes the 653*084fd14fSBrian Behlendorf * state, progress, and options provided when initiating the manual TRIM. 654*084fd14fSBrian Behlendorf */ 655*084fd14fSBrian Behlendorf static int 656*084fd14fSBrian Behlendorf vdev_trim_load(vdev_t *vd) 657*084fd14fSBrian Behlendorf { 658*084fd14fSBrian Behlendorf int err = 0; 659*084fd14fSBrian Behlendorf ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || 660*084fd14fSBrian Behlendorf spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); 661*084fd14fSBrian Behlendorf ASSERT(vd->vdev_leaf_zap != 0); 662*084fd14fSBrian Behlendorf 663*084fd14fSBrian Behlendorf if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE || 664*084fd14fSBrian Behlendorf vd->vdev_trim_state == VDEV_TRIM_SUSPENDED) { 665*084fd14fSBrian Behlendorf err = zap_lookup(vd->vdev_spa->spa_meta_objset, 666*084fd14fSBrian Behlendorf vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_LAST_OFFSET, 667*084fd14fSBrian Behlendorf sizeof (vd->vdev_trim_last_offset), 1, 668*084fd14fSBrian Behlendorf &vd->vdev_trim_last_offset); 669*084fd14fSBrian Behlendorf if (err == ENOENT) { 670*084fd14fSBrian Behlendorf vd->vdev_trim_last_offset = 0; 671*084fd14fSBrian Behlendorf err = 0; 672*084fd14fSBrian Behlendorf } 673*084fd14fSBrian Behlendorf 674*084fd14fSBrian Behlendorf if (err == 0) { 675*084fd14fSBrian Behlendorf err = zap_lookup(vd->vdev_spa->spa_meta_objset, 676*084fd14fSBrian Behlendorf vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_RATE, 677*084fd14fSBrian Behlendorf sizeof (vd->vdev_trim_rate), 1, 678*084fd14fSBrian Behlendorf &vd->vdev_trim_rate); 679*084fd14fSBrian Behlendorf if (err == ENOENT) { 680*084fd14fSBrian Behlendorf vd->vdev_trim_rate = 0; 681*084fd14fSBrian Behlendorf err = 0; 682*084fd14fSBrian Behlendorf } 683*084fd14fSBrian Behlendorf } 684*084fd14fSBrian Behlendorf 685*084fd14fSBrian Behlendorf if (err == 0) { 686*084fd14fSBrian Behlendorf err = zap_lookup(vd->vdev_spa->spa_meta_objset, 687*084fd14fSBrian Behlendorf vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL, 688*084fd14fSBrian Behlendorf sizeof (vd->vdev_trim_partial), 1, 689*084fd14fSBrian Behlendorf &vd->vdev_trim_partial); 690*084fd14fSBrian Behlendorf if (err == ENOENT) { 691*084fd14fSBrian Behlendorf vd->vdev_trim_partial = 0; 692*084fd14fSBrian Behlendorf err = 0; 693*084fd14fSBrian Behlendorf } 694*084fd14fSBrian Behlendorf } 695*084fd14fSBrian Behlendorf 696*084fd14fSBrian Behlendorf if (err == 0) { 697*084fd14fSBrian Behlendorf err = zap_lookup(vd->vdev_spa->spa_meta_objset, 698*084fd14fSBrian Behlendorf vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE, 699*084fd14fSBrian Behlendorf sizeof (vd->vdev_trim_secure), 1, 700*084fd14fSBrian Behlendorf &vd->vdev_trim_secure); 701*084fd14fSBrian Behlendorf if (err == ENOENT) { 702*084fd14fSBrian Behlendorf vd->vdev_trim_secure = 0; 703*084fd14fSBrian Behlendorf err = 0; 704*084fd14fSBrian Behlendorf } 705*084fd14fSBrian Behlendorf } 706*084fd14fSBrian Behlendorf } 707*084fd14fSBrian Behlendorf 708*084fd14fSBrian Behlendorf vdev_trim_calculate_progress(vd); 709*084fd14fSBrian Behlendorf 710*084fd14fSBrian Behlendorf return (err); 711*084fd14fSBrian Behlendorf } 712*084fd14fSBrian Behlendorf 713*084fd14fSBrian Behlendorf /* 714*084fd14fSBrian Behlendorf * Convert the logical range into a physical range and add it to the 715*084fd14fSBrian Behlendorf * range tree passed in the trim_args_t. 716*084fd14fSBrian Behlendorf */ 717*084fd14fSBrian Behlendorf static void 718*084fd14fSBrian Behlendorf vdev_trim_range_add(void *arg, uint64_t start, uint64_t size) 719*084fd14fSBrian Behlendorf { 720*084fd14fSBrian Behlendorf trim_args_t *ta = arg; 721*084fd14fSBrian Behlendorf vdev_t *vd = ta->trim_vdev; 722*084fd14fSBrian Behlendorf range_seg_t logical_rs, physical_rs; 723*084fd14fSBrian Behlendorf logical_rs.rs_start = start; 724*084fd14fSBrian Behlendorf logical_rs.rs_end = start + size; 725*084fd14fSBrian Behlendorf 726*084fd14fSBrian Behlendorf /* 727*084fd14fSBrian Behlendorf * Every range to be trimmed must be part of ms_allocatable. 728*084fd14fSBrian Behlendorf * When ZFS_DEBUG_TRIM is set load the metaslab to verify this 729*084fd14fSBrian Behlendorf * is always the case. 730*084fd14fSBrian Behlendorf */ 731*084fd14fSBrian Behlendorf if (zfs_flags & ZFS_DEBUG_TRIM) { 732*084fd14fSBrian Behlendorf metaslab_t *msp = ta->trim_msp; 733*084fd14fSBrian Behlendorf VERIFY0(metaslab_load(msp)); 734*084fd14fSBrian Behlendorf VERIFY3B(msp->ms_loaded, ==, B_TRUE); 735*084fd14fSBrian Behlendorf VERIFY(range_tree_find(msp->ms_allocatable, start, size)); 736*084fd14fSBrian Behlendorf } 737*084fd14fSBrian Behlendorf 738*084fd14fSBrian Behlendorf ASSERT(vd->vdev_ops->vdev_op_leaf); 739*084fd14fSBrian Behlendorf vdev_xlate(vd, &logical_rs, &physical_rs); 740*084fd14fSBrian Behlendorf 741*084fd14fSBrian Behlendorf IMPLY(vd->vdev_top == vd, 742*084fd14fSBrian Behlendorf logical_rs.rs_start == physical_rs.rs_start); 743*084fd14fSBrian Behlendorf IMPLY(vd->vdev_top == vd, 744*084fd14fSBrian Behlendorf logical_rs.rs_end == physical_rs.rs_end); 745*084fd14fSBrian Behlendorf 746*084fd14fSBrian Behlendorf /* 747*084fd14fSBrian Behlendorf * Only a manual trim will be traversing the vdev sequentially. 748*084fd14fSBrian Behlendorf * For an auto trim all valid ranges should be added. 749*084fd14fSBrian Behlendorf */ 750*084fd14fSBrian Behlendorf if (ta->trim_type == TRIM_TYPE_MANUAL) { 751*084fd14fSBrian Behlendorf 752*084fd14fSBrian Behlendorf /* Only add segments that we have not visited yet */ 753*084fd14fSBrian Behlendorf if (physical_rs.rs_end <= vd->vdev_trim_last_offset) 754*084fd14fSBrian Behlendorf return; 755*084fd14fSBrian Behlendorf 756*084fd14fSBrian Behlendorf /* Pick up where we left off mid-range. */ 757*084fd14fSBrian Behlendorf if (vd->vdev_trim_last_offset > physical_rs.rs_start) { 758*084fd14fSBrian Behlendorf ASSERT3U(physical_rs.rs_end, >, 759*084fd14fSBrian Behlendorf vd->vdev_trim_last_offset); 760*084fd14fSBrian Behlendorf physical_rs.rs_start = vd->vdev_trim_last_offset; 761*084fd14fSBrian Behlendorf } 762*084fd14fSBrian Behlendorf } 763*084fd14fSBrian Behlendorf 764*084fd14fSBrian Behlendorf ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start); 765*084fd14fSBrian Behlendorf 766*084fd14fSBrian Behlendorf /* 767*084fd14fSBrian Behlendorf * With raidz, it's possible that the logical range does not live on 768*084fd14fSBrian Behlendorf * this leaf vdev. We only add the physical range to this vdev's if it 769*084fd14fSBrian Behlendorf * has a length greater than 0. 770*084fd14fSBrian Behlendorf */ 771*084fd14fSBrian Behlendorf if (physical_rs.rs_end > physical_rs.rs_start) { 772*084fd14fSBrian Behlendorf range_tree_add(ta->trim_tree, physical_rs.rs_start, 773*084fd14fSBrian Behlendorf physical_rs.rs_end - physical_rs.rs_start); 774*084fd14fSBrian Behlendorf } else { 775*084fd14fSBrian Behlendorf ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start); 776*084fd14fSBrian Behlendorf } 777*084fd14fSBrian Behlendorf } 778*084fd14fSBrian Behlendorf 779*084fd14fSBrian Behlendorf /* 780*084fd14fSBrian Behlendorf * Each manual TRIM thread is responsible for trimming the unallocated 781*084fd14fSBrian Behlendorf * space for each leaf vdev. This is accomplished by sequentially iterating 782*084fd14fSBrian Behlendorf * over its top-level metaslabs and issuing TRIM I/O for the space described 783*084fd14fSBrian Behlendorf * by its ms_allocatable. While a metaslab is undergoing trimming it is 784*084fd14fSBrian Behlendorf * not eligible for new allocations. 785*084fd14fSBrian Behlendorf */ 786*084fd14fSBrian Behlendorf static void 787*084fd14fSBrian Behlendorf vdev_trim_thread(void *arg) 788*084fd14fSBrian Behlendorf { 789*084fd14fSBrian Behlendorf vdev_t *vd = arg; 790*084fd14fSBrian Behlendorf spa_t *spa = vd->vdev_spa; 791*084fd14fSBrian Behlendorf trim_args_t ta; 792*084fd14fSBrian Behlendorf int error = 0; 793*084fd14fSBrian Behlendorf 794*084fd14fSBrian Behlendorf /* 795*084fd14fSBrian Behlendorf * The VDEV_LEAF_ZAP_TRIM_* entries may have been updated by 796*084fd14fSBrian Behlendorf * vdev_trim(). Wait for the updated values to be reflected 797*084fd14fSBrian Behlendorf * in the zap in order to start with the requested settings. 798*084fd14fSBrian Behlendorf */ 799*084fd14fSBrian Behlendorf txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0); 800*084fd14fSBrian Behlendorf 801*084fd14fSBrian Behlendorf ASSERT(vdev_is_concrete(vd)); 802*084fd14fSBrian Behlendorf spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 803*084fd14fSBrian Behlendorf 804*084fd14fSBrian Behlendorf vd->vdev_trim_last_offset = 0; 805*084fd14fSBrian Behlendorf vd->vdev_trim_rate = 0; 806*084fd14fSBrian Behlendorf vd->vdev_trim_partial = 0; 807*084fd14fSBrian Behlendorf vd->vdev_trim_secure = 0; 808*084fd14fSBrian Behlendorf 809*084fd14fSBrian Behlendorf VERIFY0(vdev_trim_load(vd)); 810*084fd14fSBrian Behlendorf 811*084fd14fSBrian Behlendorf ta.trim_vdev = vd; 812*084fd14fSBrian Behlendorf ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max; 813*084fd14fSBrian Behlendorf ta.trim_extent_bytes_min = zfs_trim_extent_bytes_min; 814*084fd14fSBrian Behlendorf ta.trim_tree = range_tree_create(NULL, NULL); 815*084fd14fSBrian Behlendorf ta.trim_type = TRIM_TYPE_MANUAL; 816*084fd14fSBrian Behlendorf ta.trim_flags = 0; 817*084fd14fSBrian Behlendorf 818*084fd14fSBrian Behlendorf /* 819*084fd14fSBrian Behlendorf * When a secure TRIM has been requested infer that the intent 820*084fd14fSBrian Behlendorf * is that everything must be trimmed. Override the default 821*084fd14fSBrian Behlendorf * minimum TRIM size to prevent ranges from being skipped. 822*084fd14fSBrian Behlendorf */ 823*084fd14fSBrian Behlendorf if (vd->vdev_trim_secure) { 824*084fd14fSBrian Behlendorf ta.trim_flags |= ZIO_TRIM_SECURE; 825*084fd14fSBrian Behlendorf ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE; 826*084fd14fSBrian Behlendorf } 827*084fd14fSBrian Behlendorf 828*084fd14fSBrian Behlendorf uint64_t ms_count = 0; 829*084fd14fSBrian Behlendorf for (uint64_t i = 0; !vd->vdev_detached && 830*084fd14fSBrian Behlendorf i < vd->vdev_top->vdev_ms_count; i++) { 831*084fd14fSBrian Behlendorf metaslab_t *msp = vd->vdev_top->vdev_ms[i]; 832*084fd14fSBrian Behlendorf 833*084fd14fSBrian Behlendorf /* 834*084fd14fSBrian Behlendorf * If we've expanded the top-level vdev or it's our 835*084fd14fSBrian Behlendorf * first pass, calculate our progress. 836*084fd14fSBrian Behlendorf */ 837*084fd14fSBrian Behlendorf if (vd->vdev_top->vdev_ms_count != ms_count) { 838*084fd14fSBrian Behlendorf vdev_trim_calculate_progress(vd); 839*084fd14fSBrian Behlendorf ms_count = vd->vdev_top->vdev_ms_count; 840*084fd14fSBrian Behlendorf } 841*084fd14fSBrian Behlendorf 842*084fd14fSBrian Behlendorf spa_config_exit(spa, SCL_CONFIG, FTAG); 843*084fd14fSBrian Behlendorf metaslab_disable(msp); 844*084fd14fSBrian Behlendorf mutex_enter(&msp->ms_lock); 845*084fd14fSBrian Behlendorf VERIFY0(metaslab_load(msp)); 846*084fd14fSBrian Behlendorf 847*084fd14fSBrian Behlendorf /* 848*084fd14fSBrian Behlendorf * If a partial TRIM was requested skip metaslabs which have 849*084fd14fSBrian Behlendorf * never been initialized and thus have never been written. 850*084fd14fSBrian Behlendorf */ 851*084fd14fSBrian Behlendorf if (msp->ms_sm == NULL && vd->vdev_trim_partial) { 852*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 853*084fd14fSBrian Behlendorf metaslab_enable(msp, B_FALSE); 854*084fd14fSBrian Behlendorf spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 855*084fd14fSBrian Behlendorf vdev_trim_calculate_progress(vd); 856*084fd14fSBrian Behlendorf continue; 857*084fd14fSBrian Behlendorf } 858*084fd14fSBrian Behlendorf 859*084fd14fSBrian Behlendorf ta.trim_msp = msp; 860*084fd14fSBrian Behlendorf range_tree_walk(msp->ms_allocatable, vdev_trim_range_add, &ta); 861*084fd14fSBrian Behlendorf range_tree_vacate(msp->ms_trim, NULL, NULL); 862*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 863*084fd14fSBrian Behlendorf 864*084fd14fSBrian Behlendorf error = vdev_trim_ranges(&ta); 865*084fd14fSBrian Behlendorf metaslab_enable(msp, B_TRUE); 866*084fd14fSBrian Behlendorf spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 867*084fd14fSBrian Behlendorf 868*084fd14fSBrian Behlendorf range_tree_vacate(ta.trim_tree, NULL, NULL); 869*084fd14fSBrian Behlendorf if (error != 0) 870*084fd14fSBrian Behlendorf break; 871*084fd14fSBrian Behlendorf } 872*084fd14fSBrian Behlendorf 873*084fd14fSBrian Behlendorf spa_config_exit(spa, SCL_CONFIG, FTAG); 874*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_io_lock); 875*084fd14fSBrian Behlendorf while (vd->vdev_trim_inflight[0] > 0) { 876*084fd14fSBrian Behlendorf cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock); 877*084fd14fSBrian Behlendorf } 878*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_io_lock); 879*084fd14fSBrian Behlendorf 880*084fd14fSBrian Behlendorf range_tree_destroy(ta.trim_tree); 881*084fd14fSBrian Behlendorf 882*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_lock); 883*084fd14fSBrian Behlendorf if (!vd->vdev_trim_exit_wanted && vdev_writeable(vd)) { 884*084fd14fSBrian Behlendorf vdev_trim_change_state(vd, VDEV_TRIM_COMPLETE, 885*084fd14fSBrian Behlendorf vd->vdev_trim_rate, vd->vdev_trim_partial, 886*084fd14fSBrian Behlendorf vd->vdev_trim_secure); 887*084fd14fSBrian Behlendorf } 888*084fd14fSBrian Behlendorf ASSERT(vd->vdev_trim_thread != NULL || vd->vdev_trim_inflight[0] == 0); 889*084fd14fSBrian Behlendorf 890*084fd14fSBrian Behlendorf /* 891*084fd14fSBrian Behlendorf * Drop the vdev_trim_lock while we sync out the txg since it's 892*084fd14fSBrian Behlendorf * possible that a device might be trying to come online and must 893*084fd14fSBrian Behlendorf * check to see if it needs to restart a trim. That thread will be 894*084fd14fSBrian Behlendorf * holding the spa_config_lock which would prevent the txg_wait_synced 895*084fd14fSBrian Behlendorf * from completing. 896*084fd14fSBrian Behlendorf */ 897*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_lock); 898*084fd14fSBrian Behlendorf txg_wait_synced(spa_get_dsl(spa), 0); 899*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_lock); 900*084fd14fSBrian Behlendorf 901*084fd14fSBrian Behlendorf vd->vdev_trim_thread = NULL; 902*084fd14fSBrian Behlendorf cv_broadcast(&vd->vdev_trim_cv); 903*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_lock); 904*084fd14fSBrian Behlendorf } 905*084fd14fSBrian Behlendorf 906*084fd14fSBrian Behlendorf /* 907*084fd14fSBrian Behlendorf * Initiates a manual TRIM for the vdev_t. Callers must hold vdev_trim_lock, 908*084fd14fSBrian Behlendorf * the vdev_t must be a leaf and cannot already be manually trimming. 909*084fd14fSBrian Behlendorf */ 910*084fd14fSBrian Behlendorf void 911*084fd14fSBrian Behlendorf vdev_trim(vdev_t *vd, uint64_t rate, boolean_t partial, boolean_t secure) 912*084fd14fSBrian Behlendorf { 913*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&vd->vdev_trim_lock)); 914*084fd14fSBrian Behlendorf ASSERT(vd->vdev_ops->vdev_op_leaf); 915*084fd14fSBrian Behlendorf ASSERT(vdev_is_concrete(vd)); 916*084fd14fSBrian Behlendorf ASSERT3P(vd->vdev_trim_thread, ==, NULL); 917*084fd14fSBrian Behlendorf ASSERT(!vd->vdev_detached); 918*084fd14fSBrian Behlendorf ASSERT(!vd->vdev_trim_exit_wanted); 919*084fd14fSBrian Behlendorf ASSERT(!vd->vdev_top->vdev_removing); 920*084fd14fSBrian Behlendorf 921*084fd14fSBrian Behlendorf vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, rate, partial, secure); 922*084fd14fSBrian Behlendorf vd->vdev_trim_thread = thread_create(NULL, 0, 923*084fd14fSBrian Behlendorf vdev_trim_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 924*084fd14fSBrian Behlendorf } 925*084fd14fSBrian Behlendorf 926*084fd14fSBrian Behlendorf /* 927*084fd14fSBrian Behlendorf * Wait for the trimming thread to be terminated (canceled or stopped). 928*084fd14fSBrian Behlendorf */ 929*084fd14fSBrian Behlendorf static void 930*084fd14fSBrian Behlendorf vdev_trim_stop_wait_impl(vdev_t *vd) 931*084fd14fSBrian Behlendorf { 932*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&vd->vdev_trim_lock)); 933*084fd14fSBrian Behlendorf 934*084fd14fSBrian Behlendorf while (vd->vdev_trim_thread != NULL) 935*084fd14fSBrian Behlendorf cv_wait(&vd->vdev_trim_cv, &vd->vdev_trim_lock); 936*084fd14fSBrian Behlendorf 937*084fd14fSBrian Behlendorf ASSERT3P(vd->vdev_trim_thread, ==, NULL); 938*084fd14fSBrian Behlendorf vd->vdev_trim_exit_wanted = B_FALSE; 939*084fd14fSBrian Behlendorf } 940*084fd14fSBrian Behlendorf 941*084fd14fSBrian Behlendorf /* 942*084fd14fSBrian Behlendorf * Wait for vdev trim threads which were listed to cleanly exit. 943*084fd14fSBrian Behlendorf */ 944*084fd14fSBrian Behlendorf void 945*084fd14fSBrian Behlendorf vdev_trim_stop_wait(spa_t *spa, list_t *vd_list) 946*084fd14fSBrian Behlendorf { 947*084fd14fSBrian Behlendorf vdev_t *vd; 948*084fd14fSBrian Behlendorf 949*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&spa_namespace_lock)); 950*084fd14fSBrian Behlendorf 951*084fd14fSBrian Behlendorf while ((vd = list_remove_head(vd_list)) != NULL) { 952*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_lock); 953*084fd14fSBrian Behlendorf vdev_trim_stop_wait_impl(vd); 954*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_lock); 955*084fd14fSBrian Behlendorf } 956*084fd14fSBrian Behlendorf } 957*084fd14fSBrian Behlendorf 958*084fd14fSBrian Behlendorf /* 959*084fd14fSBrian Behlendorf * Stop trimming a device, with the resultant trimming state being tgt_state. 960*084fd14fSBrian Behlendorf * For blocking behavior pass NULL for vd_list. Otherwise, when a list_t is 961*084fd14fSBrian Behlendorf * provided the stopping vdev is inserted in to the list. Callers are then 962*084fd14fSBrian Behlendorf * required to call vdev_trim_stop_wait() to block for all the trim threads 963*084fd14fSBrian Behlendorf * to exit. The caller must hold vdev_trim_lock and must not be writing to 964*084fd14fSBrian Behlendorf * the spa config, as the trimming thread may try to enter the config as a 965*084fd14fSBrian Behlendorf * reader before exiting. 966*084fd14fSBrian Behlendorf */ 967*084fd14fSBrian Behlendorf void 968*084fd14fSBrian Behlendorf vdev_trim_stop(vdev_t *vd, vdev_trim_state_t tgt_state, list_t *vd_list) 969*084fd14fSBrian Behlendorf { 970*084fd14fSBrian Behlendorf ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER)); 971*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&vd->vdev_trim_lock)); 972*084fd14fSBrian Behlendorf ASSERT(vd->vdev_ops->vdev_op_leaf); 973*084fd14fSBrian Behlendorf ASSERT(vdev_is_concrete(vd)); 974*084fd14fSBrian Behlendorf 975*084fd14fSBrian Behlendorf /* 976*084fd14fSBrian Behlendorf * Allow cancel requests to proceed even if the trim thread has 977*084fd14fSBrian Behlendorf * stopped. 978*084fd14fSBrian Behlendorf */ 979*084fd14fSBrian Behlendorf if (vd->vdev_trim_thread == NULL && tgt_state != VDEV_TRIM_CANCELED) 980*084fd14fSBrian Behlendorf return; 981*084fd14fSBrian Behlendorf 982*084fd14fSBrian Behlendorf vdev_trim_change_state(vd, tgt_state, 0, 0, 0); 983*084fd14fSBrian Behlendorf vd->vdev_trim_exit_wanted = B_TRUE; 984*084fd14fSBrian Behlendorf 985*084fd14fSBrian Behlendorf if (vd_list == NULL) { 986*084fd14fSBrian Behlendorf vdev_trim_stop_wait_impl(vd); 987*084fd14fSBrian Behlendorf } else { 988*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&spa_namespace_lock)); 989*084fd14fSBrian Behlendorf list_insert_tail(vd_list, vd); 990*084fd14fSBrian Behlendorf } 991*084fd14fSBrian Behlendorf } 992*084fd14fSBrian Behlendorf 993*084fd14fSBrian Behlendorf /* 994*084fd14fSBrian Behlendorf * Requests that all listed vdevs stop trimming. 995*084fd14fSBrian Behlendorf */ 996*084fd14fSBrian Behlendorf static void 997*084fd14fSBrian Behlendorf vdev_trim_stop_all_impl(vdev_t *vd, vdev_trim_state_t tgt_state, 998*084fd14fSBrian Behlendorf list_t *vd_list) 999*084fd14fSBrian Behlendorf { 1000*084fd14fSBrian Behlendorf if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) { 1001*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_lock); 1002*084fd14fSBrian Behlendorf vdev_trim_stop(vd, tgt_state, vd_list); 1003*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_lock); 1004*084fd14fSBrian Behlendorf return; 1005*084fd14fSBrian Behlendorf } 1006*084fd14fSBrian Behlendorf 1007*084fd14fSBrian Behlendorf for (uint64_t i = 0; i < vd->vdev_children; i++) { 1008*084fd14fSBrian Behlendorf vdev_trim_stop_all_impl(vd->vdev_child[i], tgt_state, 1009*084fd14fSBrian Behlendorf vd_list); 1010*084fd14fSBrian Behlendorf } 1011*084fd14fSBrian Behlendorf } 1012*084fd14fSBrian Behlendorf 1013*084fd14fSBrian Behlendorf /* 1014*084fd14fSBrian Behlendorf * Convenience function to stop trimming of a vdev tree and set all trim 1015*084fd14fSBrian Behlendorf * thread pointers to NULL. 1016*084fd14fSBrian Behlendorf */ 1017*084fd14fSBrian Behlendorf void 1018*084fd14fSBrian Behlendorf vdev_trim_stop_all(vdev_t *vd, vdev_trim_state_t tgt_state) 1019*084fd14fSBrian Behlendorf { 1020*084fd14fSBrian Behlendorf spa_t *spa = vd->vdev_spa; 1021*084fd14fSBrian Behlendorf list_t vd_list; 1022*084fd14fSBrian Behlendorf 1023*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1024*084fd14fSBrian Behlendorf 1025*084fd14fSBrian Behlendorf list_create(&vd_list, sizeof (vdev_t), 1026*084fd14fSBrian Behlendorf offsetof(vdev_t, vdev_trim_node)); 1027*084fd14fSBrian Behlendorf 1028*084fd14fSBrian Behlendorf vdev_trim_stop_all_impl(vd, tgt_state, &vd_list); 1029*084fd14fSBrian Behlendorf vdev_trim_stop_wait(spa, &vd_list); 1030*084fd14fSBrian Behlendorf 1031*084fd14fSBrian Behlendorf if (vd->vdev_spa->spa_sync_on) { 1032*084fd14fSBrian Behlendorf /* Make sure that our state has been synced to disk */ 1033*084fd14fSBrian Behlendorf txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0); 1034*084fd14fSBrian Behlendorf } 1035*084fd14fSBrian Behlendorf 1036*084fd14fSBrian Behlendorf list_destroy(&vd_list); 1037*084fd14fSBrian Behlendorf } 1038*084fd14fSBrian Behlendorf 1039*084fd14fSBrian Behlendorf /* 1040*084fd14fSBrian Behlendorf * Conditionally restarts a manual TRIM given its on-disk state. 1041*084fd14fSBrian Behlendorf */ 1042*084fd14fSBrian Behlendorf void 1043*084fd14fSBrian Behlendorf vdev_trim_restart(vdev_t *vd) 1044*084fd14fSBrian Behlendorf { 1045*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1046*084fd14fSBrian Behlendorf ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 1047*084fd14fSBrian Behlendorf 1048*084fd14fSBrian Behlendorf if (vd->vdev_leaf_zap != 0) { 1049*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_trim_lock); 1050*084fd14fSBrian Behlendorf uint64_t trim_state = VDEV_TRIM_NONE; 1051*084fd14fSBrian Behlendorf int err = zap_lookup(vd->vdev_spa->spa_meta_objset, 1052*084fd14fSBrian Behlendorf vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE, 1053*084fd14fSBrian Behlendorf sizeof (trim_state), 1, &trim_state); 1054*084fd14fSBrian Behlendorf ASSERT(err == 0 || err == ENOENT); 1055*084fd14fSBrian Behlendorf vd->vdev_trim_state = trim_state; 1056*084fd14fSBrian Behlendorf 1057*084fd14fSBrian Behlendorf uint64_t timestamp = 0; 1058*084fd14fSBrian Behlendorf err = zap_lookup(vd->vdev_spa->spa_meta_objset, 1059*084fd14fSBrian Behlendorf vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_ACTION_TIME, 1060*084fd14fSBrian Behlendorf sizeof (timestamp), 1, ×tamp); 1061*084fd14fSBrian Behlendorf ASSERT(err == 0 || err == ENOENT); 1062*084fd14fSBrian Behlendorf vd->vdev_trim_action_time = (time_t)timestamp; 1063*084fd14fSBrian Behlendorf 1064*084fd14fSBrian Behlendorf if (vd->vdev_trim_state == VDEV_TRIM_SUSPENDED || 1065*084fd14fSBrian Behlendorf vd->vdev_offline) { 1066*084fd14fSBrian Behlendorf /* load progress for reporting, but don't resume */ 1067*084fd14fSBrian Behlendorf VERIFY0(vdev_trim_load(vd)); 1068*084fd14fSBrian Behlendorf } else if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE && 1069*084fd14fSBrian Behlendorf vdev_writeable(vd) && !vd->vdev_top->vdev_removing && 1070*084fd14fSBrian Behlendorf vd->vdev_trim_thread == NULL) { 1071*084fd14fSBrian Behlendorf VERIFY0(vdev_trim_load(vd)); 1072*084fd14fSBrian Behlendorf vdev_trim(vd, vd->vdev_trim_rate, 1073*084fd14fSBrian Behlendorf vd->vdev_trim_partial, vd->vdev_trim_secure); 1074*084fd14fSBrian Behlendorf } 1075*084fd14fSBrian Behlendorf 1076*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_trim_lock); 1077*084fd14fSBrian Behlendorf } 1078*084fd14fSBrian Behlendorf 1079*084fd14fSBrian Behlendorf for (uint64_t i = 0; i < vd->vdev_children; i++) { 1080*084fd14fSBrian Behlendorf vdev_trim_restart(vd->vdev_child[i]); 1081*084fd14fSBrian Behlendorf } 1082*084fd14fSBrian Behlendorf } 1083*084fd14fSBrian Behlendorf 1084*084fd14fSBrian Behlendorf /* 1085*084fd14fSBrian Behlendorf * Used by the automatic TRIM when ZFS_DEBUG_TRIM is set to verify that 1086*084fd14fSBrian Behlendorf * every TRIM range is contained within ms_allocatable. 1087*084fd14fSBrian Behlendorf */ 1088*084fd14fSBrian Behlendorf static void 1089*084fd14fSBrian Behlendorf vdev_trim_range_verify(void *arg, uint64_t start, uint64_t size) 1090*084fd14fSBrian Behlendorf { 1091*084fd14fSBrian Behlendorf trim_args_t *ta = arg; 1092*084fd14fSBrian Behlendorf metaslab_t *msp = ta->trim_msp; 1093*084fd14fSBrian Behlendorf 1094*084fd14fSBrian Behlendorf VERIFY3B(msp->ms_loaded, ==, B_TRUE); 1095*084fd14fSBrian Behlendorf VERIFY3U(msp->ms_disabled, >, 0); 1096*084fd14fSBrian Behlendorf VERIFY(range_tree_find(msp->ms_allocatable, start, size) != NULL); 1097*084fd14fSBrian Behlendorf } 1098*084fd14fSBrian Behlendorf 1099*084fd14fSBrian Behlendorf /* 1100*084fd14fSBrian Behlendorf * Each automatic TRIM thread is responsible for managing the trimming of a 1101*084fd14fSBrian Behlendorf * top-level vdev in the pool. No automatic TRIM state is maintained on-disk. 1102*084fd14fSBrian Behlendorf * 1103*084fd14fSBrian Behlendorf * N.B. This behavior is different from a manual TRIM where a thread 1104*084fd14fSBrian Behlendorf * is created for each leaf vdev, instead of each top-level vdev. 1105*084fd14fSBrian Behlendorf */ 1106*084fd14fSBrian Behlendorf static void 1107*084fd14fSBrian Behlendorf vdev_autotrim_thread(void *arg) 1108*084fd14fSBrian Behlendorf { 1109*084fd14fSBrian Behlendorf vdev_t *vd = arg; 1110*084fd14fSBrian Behlendorf spa_t *spa = vd->vdev_spa; 1111*084fd14fSBrian Behlendorf int shift = 0; 1112*084fd14fSBrian Behlendorf 1113*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_autotrim_lock); 1114*084fd14fSBrian Behlendorf ASSERT3P(vd->vdev_top, ==, vd); 1115*084fd14fSBrian Behlendorf ASSERT3P(vd->vdev_autotrim_thread, !=, NULL); 1116*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_autotrim_lock); 1117*084fd14fSBrian Behlendorf spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1118*084fd14fSBrian Behlendorf 1119*084fd14fSBrian Behlendorf uint64_t extent_bytes_max = zfs_trim_extent_bytes_max; 1120*084fd14fSBrian Behlendorf uint64_t extent_bytes_min = zfs_trim_extent_bytes_min; 1121*084fd14fSBrian Behlendorf 1122*084fd14fSBrian Behlendorf while (!vdev_autotrim_should_stop(vd)) { 1123*084fd14fSBrian Behlendorf int txgs_per_trim = MAX(zfs_trim_txg_batch, 1); 1124*084fd14fSBrian Behlendorf boolean_t issued_trim = B_FALSE; 1125*084fd14fSBrian Behlendorf 1126*084fd14fSBrian Behlendorf /* 1127*084fd14fSBrian Behlendorf * All of the metaslabs are divided in to groups of size 1128*084fd14fSBrian Behlendorf * num_metaslabs / zfs_trim_txg_batch. Each of these groups 1129*084fd14fSBrian Behlendorf * is composed of metaslabs which are spread evenly over the 1130*084fd14fSBrian Behlendorf * device. 1131*084fd14fSBrian Behlendorf * 1132*084fd14fSBrian Behlendorf * For example, when zfs_trim_txg_batch = 32 (default) then 1133*084fd14fSBrian Behlendorf * group 0 will contain metaslabs 0, 32, 64, ...; 1134*084fd14fSBrian Behlendorf * group 1 will contain metaslabs 1, 33, 65, ...; 1135*084fd14fSBrian Behlendorf * group 2 will contain metaslabs 2, 34, 66, ...; and so on. 1136*084fd14fSBrian Behlendorf * 1137*084fd14fSBrian Behlendorf * On each pass through the while() loop one of these groups 1138*084fd14fSBrian Behlendorf * is selected. This is accomplished by using a shift value 1139*084fd14fSBrian Behlendorf * to select the starting metaslab, then striding over the 1140*084fd14fSBrian Behlendorf * metaslabs using the zfs_trim_txg_batch size. This is 1141*084fd14fSBrian Behlendorf * done to accomplish two things. 1142*084fd14fSBrian Behlendorf * 1143*084fd14fSBrian Behlendorf * 1) By dividing the metaslabs into groups, and making sure 1144*084fd14fSBrian Behlendorf * that each group takes a minimum of one txg to process. 1145*084fd14fSBrian Behlendorf * Then zfs_trim_txg_batch controls the minimum number of 1146*084fd14fSBrian Behlendorf * txgs which must occur before a metaslab is revisited. 1147*084fd14fSBrian Behlendorf * 1148*084fd14fSBrian Behlendorf * 2) Selecting non-consecutive metaslabs distributes the 1149*084fd14fSBrian Behlendorf * TRIM commands for a group evenly over the entire device. 1150*084fd14fSBrian Behlendorf * This can be advantageous for certain types of devices. 1151*084fd14fSBrian Behlendorf */ 1152*084fd14fSBrian Behlendorf for (uint64_t i = shift % txgs_per_trim; i < vd->vdev_ms_count; 1153*084fd14fSBrian Behlendorf i += txgs_per_trim) { 1154*084fd14fSBrian Behlendorf metaslab_t *msp = vd->vdev_ms[i]; 1155*084fd14fSBrian Behlendorf range_tree_t *trim_tree; 1156*084fd14fSBrian Behlendorf 1157*084fd14fSBrian Behlendorf spa_config_exit(spa, SCL_CONFIG, FTAG); 1158*084fd14fSBrian Behlendorf metaslab_disable(msp); 1159*084fd14fSBrian Behlendorf spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1160*084fd14fSBrian Behlendorf 1161*084fd14fSBrian Behlendorf mutex_enter(&msp->ms_lock); 1162*084fd14fSBrian Behlendorf 1163*084fd14fSBrian Behlendorf /* 1164*084fd14fSBrian Behlendorf * Skip the metaslab when it has never been allocated 1165*084fd14fSBrian Behlendorf * or when there are no recent frees to trim. 1166*084fd14fSBrian Behlendorf */ 1167*084fd14fSBrian Behlendorf if (msp->ms_sm == NULL || 1168*084fd14fSBrian Behlendorf range_tree_is_empty(msp->ms_trim)) { 1169*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 1170*084fd14fSBrian Behlendorf metaslab_enable(msp, B_FALSE); 1171*084fd14fSBrian Behlendorf continue; 1172*084fd14fSBrian Behlendorf } 1173*084fd14fSBrian Behlendorf 1174*084fd14fSBrian Behlendorf /* 1175*084fd14fSBrian Behlendorf * Skip the metaslab when it has already been disabled. 1176*084fd14fSBrian Behlendorf * This may happen when a manual TRIM or initialize 1177*084fd14fSBrian Behlendorf * operation is running concurrently. In the case 1178*084fd14fSBrian Behlendorf * of a manual TRIM, the ms_trim tree will have been 1179*084fd14fSBrian Behlendorf * vacated. Only ranges added after the manual TRIM 1180*084fd14fSBrian Behlendorf * disabled the metaslab will be included in the tree. 1181*084fd14fSBrian Behlendorf * These will be processed when the automatic TRIM 1182*084fd14fSBrian Behlendorf * next revisits this metaslab. 1183*084fd14fSBrian Behlendorf */ 1184*084fd14fSBrian Behlendorf if (msp->ms_disabled > 1) { 1185*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 1186*084fd14fSBrian Behlendorf metaslab_enable(msp, B_FALSE); 1187*084fd14fSBrian Behlendorf continue; 1188*084fd14fSBrian Behlendorf } 1189*084fd14fSBrian Behlendorf 1190*084fd14fSBrian Behlendorf /* 1191*084fd14fSBrian Behlendorf * Allocate an empty range tree which is swapped in 1192*084fd14fSBrian Behlendorf * for the existing ms_trim tree while it is processed. 1193*084fd14fSBrian Behlendorf */ 1194*084fd14fSBrian Behlendorf trim_tree = range_tree_create(NULL, NULL); 1195*084fd14fSBrian Behlendorf range_tree_swap(&msp->ms_trim, &trim_tree); 1196*084fd14fSBrian Behlendorf ASSERT(range_tree_is_empty(msp->ms_trim)); 1197*084fd14fSBrian Behlendorf 1198*084fd14fSBrian Behlendorf /* 1199*084fd14fSBrian Behlendorf * There are two cases when constructing the per-vdev 1200*084fd14fSBrian Behlendorf * trim trees for a metaslab. If the top-level vdev 1201*084fd14fSBrian Behlendorf * has no children then it is also a leaf and should 1202*084fd14fSBrian Behlendorf * be trimmed. Otherwise our children are the leaves 1203*084fd14fSBrian Behlendorf * and a trim tree should be constructed for each. 1204*084fd14fSBrian Behlendorf */ 1205*084fd14fSBrian Behlendorf trim_args_t *tap; 1206*084fd14fSBrian Behlendorf uint64_t children = vd->vdev_children; 1207*084fd14fSBrian Behlendorf if (children == 0) { 1208*084fd14fSBrian Behlendorf children = 1; 1209*084fd14fSBrian Behlendorf tap = kmem_zalloc(sizeof (trim_args_t) * 1210*084fd14fSBrian Behlendorf children, KM_SLEEP); 1211*084fd14fSBrian Behlendorf tap[0].trim_vdev = vd; 1212*084fd14fSBrian Behlendorf } else { 1213*084fd14fSBrian Behlendorf tap = kmem_zalloc(sizeof (trim_args_t) * 1214*084fd14fSBrian Behlendorf children, KM_SLEEP); 1215*084fd14fSBrian Behlendorf 1216*084fd14fSBrian Behlendorf for (uint64_t c = 0; c < children; c++) { 1217*084fd14fSBrian Behlendorf tap[c].trim_vdev = vd->vdev_child[c]; 1218*084fd14fSBrian Behlendorf } 1219*084fd14fSBrian Behlendorf } 1220*084fd14fSBrian Behlendorf 1221*084fd14fSBrian Behlendorf for (uint64_t c = 0; c < children; c++) { 1222*084fd14fSBrian Behlendorf trim_args_t *ta = &tap[c]; 1223*084fd14fSBrian Behlendorf vdev_t *cvd = ta->trim_vdev; 1224*084fd14fSBrian Behlendorf 1225*084fd14fSBrian Behlendorf ta->trim_msp = msp; 1226*084fd14fSBrian Behlendorf ta->trim_extent_bytes_max = extent_bytes_max; 1227*084fd14fSBrian Behlendorf ta->trim_extent_bytes_min = extent_bytes_min; 1228*084fd14fSBrian Behlendorf ta->trim_type = TRIM_TYPE_AUTO; 1229*084fd14fSBrian Behlendorf ta->trim_flags = 0; 1230*084fd14fSBrian Behlendorf 1231*084fd14fSBrian Behlendorf if (cvd->vdev_detached || 1232*084fd14fSBrian Behlendorf !vdev_writeable(cvd) || 1233*084fd14fSBrian Behlendorf !cvd->vdev_has_trim || 1234*084fd14fSBrian Behlendorf cvd->vdev_trim_thread != NULL) { 1235*084fd14fSBrian Behlendorf continue; 1236*084fd14fSBrian Behlendorf } 1237*084fd14fSBrian Behlendorf 1238*084fd14fSBrian Behlendorf /* 1239*084fd14fSBrian Behlendorf * When a device has an attached hot spare, or 1240*084fd14fSBrian Behlendorf * is being replaced it will not be trimmed. 1241*084fd14fSBrian Behlendorf * This is done to avoid adding additional 1242*084fd14fSBrian Behlendorf * stress to a potentially unhealthy device, 1243*084fd14fSBrian Behlendorf * and to minimize the required rebuild time. 1244*084fd14fSBrian Behlendorf */ 1245*084fd14fSBrian Behlendorf if (!cvd->vdev_ops->vdev_op_leaf) 1246*084fd14fSBrian Behlendorf continue; 1247*084fd14fSBrian Behlendorf 1248*084fd14fSBrian Behlendorf ta->trim_tree = range_tree_create(NULL, NULL); 1249*084fd14fSBrian Behlendorf range_tree_walk(trim_tree, 1250*084fd14fSBrian Behlendorf vdev_trim_range_add, ta); 1251*084fd14fSBrian Behlendorf } 1252*084fd14fSBrian Behlendorf 1253*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 1254*084fd14fSBrian Behlendorf spa_config_exit(spa, SCL_CONFIG, FTAG); 1255*084fd14fSBrian Behlendorf 1256*084fd14fSBrian Behlendorf /* 1257*084fd14fSBrian Behlendorf * Issue the TRIM I/Os for all ranges covered by the 1258*084fd14fSBrian Behlendorf * TRIM trees. These ranges are safe to TRIM because 1259*084fd14fSBrian Behlendorf * no new allocations will be performed until the call 1260*084fd14fSBrian Behlendorf * to metaslab_enabled() below. 1261*084fd14fSBrian Behlendorf */ 1262*084fd14fSBrian Behlendorf for (uint64_t c = 0; c < children; c++) { 1263*084fd14fSBrian Behlendorf trim_args_t *ta = &tap[c]; 1264*084fd14fSBrian Behlendorf 1265*084fd14fSBrian Behlendorf /* 1266*084fd14fSBrian Behlendorf * Always yield to a manual TRIM if one has 1267*084fd14fSBrian Behlendorf * been started for the child vdev. 1268*084fd14fSBrian Behlendorf */ 1269*084fd14fSBrian Behlendorf if (ta->trim_tree == NULL || 1270*084fd14fSBrian Behlendorf ta->trim_vdev->vdev_trim_thread != NULL) { 1271*084fd14fSBrian Behlendorf continue; 1272*084fd14fSBrian Behlendorf } 1273*084fd14fSBrian Behlendorf 1274*084fd14fSBrian Behlendorf /* 1275*084fd14fSBrian Behlendorf * After this point metaslab_enable() must be 1276*084fd14fSBrian Behlendorf * called with the sync flag set. This is done 1277*084fd14fSBrian Behlendorf * here because vdev_trim_ranges() is allowed 1278*084fd14fSBrian Behlendorf * to be interrupted (EINTR) before issuing all 1279*084fd14fSBrian Behlendorf * of the required TRIM I/Os. 1280*084fd14fSBrian Behlendorf */ 1281*084fd14fSBrian Behlendorf issued_trim = B_TRUE; 1282*084fd14fSBrian Behlendorf 1283*084fd14fSBrian Behlendorf int error = vdev_trim_ranges(ta); 1284*084fd14fSBrian Behlendorf if (error) 1285*084fd14fSBrian Behlendorf break; 1286*084fd14fSBrian Behlendorf } 1287*084fd14fSBrian Behlendorf 1288*084fd14fSBrian Behlendorf /* 1289*084fd14fSBrian Behlendorf * Verify every range which was trimmed is still 1290*084fd14fSBrian Behlendorf * contained within the ms_allocatable tree. 1291*084fd14fSBrian Behlendorf */ 1292*084fd14fSBrian Behlendorf if (zfs_flags & ZFS_DEBUG_TRIM) { 1293*084fd14fSBrian Behlendorf mutex_enter(&msp->ms_lock); 1294*084fd14fSBrian Behlendorf VERIFY0(metaslab_load(msp)); 1295*084fd14fSBrian Behlendorf VERIFY3P(tap[0].trim_msp, ==, msp); 1296*084fd14fSBrian Behlendorf range_tree_walk(trim_tree, 1297*084fd14fSBrian Behlendorf vdev_trim_range_verify, &tap[0]); 1298*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 1299*084fd14fSBrian Behlendorf } 1300*084fd14fSBrian Behlendorf 1301*084fd14fSBrian Behlendorf range_tree_vacate(trim_tree, NULL, NULL); 1302*084fd14fSBrian Behlendorf range_tree_destroy(trim_tree); 1303*084fd14fSBrian Behlendorf 1304*084fd14fSBrian Behlendorf metaslab_enable(msp, issued_trim); 1305*084fd14fSBrian Behlendorf spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1306*084fd14fSBrian Behlendorf 1307*084fd14fSBrian Behlendorf for (uint64_t c = 0; c < children; c++) { 1308*084fd14fSBrian Behlendorf trim_args_t *ta = &tap[c]; 1309*084fd14fSBrian Behlendorf 1310*084fd14fSBrian Behlendorf if (ta->trim_tree == NULL) 1311*084fd14fSBrian Behlendorf continue; 1312*084fd14fSBrian Behlendorf 1313*084fd14fSBrian Behlendorf range_tree_vacate(ta->trim_tree, NULL, NULL); 1314*084fd14fSBrian Behlendorf range_tree_destroy(ta->trim_tree); 1315*084fd14fSBrian Behlendorf } 1316*084fd14fSBrian Behlendorf 1317*084fd14fSBrian Behlendorf kmem_free(tap, sizeof (trim_args_t) * children); 1318*084fd14fSBrian Behlendorf } 1319*084fd14fSBrian Behlendorf 1320*084fd14fSBrian Behlendorf spa_config_exit(spa, SCL_CONFIG, FTAG); 1321*084fd14fSBrian Behlendorf 1322*084fd14fSBrian Behlendorf /* 1323*084fd14fSBrian Behlendorf * After completing the group of metaslabs wait for the next 1324*084fd14fSBrian Behlendorf * open txg. This is done to make sure that a minimum of 1325*084fd14fSBrian Behlendorf * zfs_trim_txg_batch txgs will occur before these metaslabs 1326*084fd14fSBrian Behlendorf * are trimmed again. 1327*084fd14fSBrian Behlendorf */ 1328*084fd14fSBrian Behlendorf txg_wait_open(spa_get_dsl(spa), 0, issued_trim); 1329*084fd14fSBrian Behlendorf 1330*084fd14fSBrian Behlendorf shift++; 1331*084fd14fSBrian Behlendorf spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1332*084fd14fSBrian Behlendorf } 1333*084fd14fSBrian Behlendorf 1334*084fd14fSBrian Behlendorf for (uint64_t c = 0; c < vd->vdev_children; c++) { 1335*084fd14fSBrian Behlendorf vdev_t *cvd = vd->vdev_child[c]; 1336*084fd14fSBrian Behlendorf mutex_enter(&cvd->vdev_trim_io_lock); 1337*084fd14fSBrian Behlendorf 1338*084fd14fSBrian Behlendorf while (cvd->vdev_trim_inflight[1] > 0) { 1339*084fd14fSBrian Behlendorf cv_wait(&cvd->vdev_trim_io_cv, 1340*084fd14fSBrian Behlendorf &cvd->vdev_trim_io_lock); 1341*084fd14fSBrian Behlendorf } 1342*084fd14fSBrian Behlendorf mutex_exit(&cvd->vdev_trim_io_lock); 1343*084fd14fSBrian Behlendorf } 1344*084fd14fSBrian Behlendorf 1345*084fd14fSBrian Behlendorf spa_config_exit(spa, SCL_CONFIG, FTAG); 1346*084fd14fSBrian Behlendorf 1347*084fd14fSBrian Behlendorf /* 1348*084fd14fSBrian Behlendorf * When exiting because the autotrim property was set to off, then 1349*084fd14fSBrian Behlendorf * abandon any unprocessed ms_trim ranges to reclaim the memory. 1350*084fd14fSBrian Behlendorf */ 1351*084fd14fSBrian Behlendorf if (spa_get_autotrim(spa) == SPA_AUTOTRIM_OFF) { 1352*084fd14fSBrian Behlendorf for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 1353*084fd14fSBrian Behlendorf metaslab_t *msp = vd->vdev_ms[i]; 1354*084fd14fSBrian Behlendorf 1355*084fd14fSBrian Behlendorf mutex_enter(&msp->ms_lock); 1356*084fd14fSBrian Behlendorf range_tree_vacate(msp->ms_trim, NULL, NULL); 1357*084fd14fSBrian Behlendorf mutex_exit(&msp->ms_lock); 1358*084fd14fSBrian Behlendorf } 1359*084fd14fSBrian Behlendorf } 1360*084fd14fSBrian Behlendorf 1361*084fd14fSBrian Behlendorf mutex_enter(&vd->vdev_autotrim_lock); 1362*084fd14fSBrian Behlendorf ASSERT(vd->vdev_autotrim_thread != NULL); 1363*084fd14fSBrian Behlendorf vd->vdev_autotrim_thread = NULL; 1364*084fd14fSBrian Behlendorf cv_broadcast(&vd->vdev_autotrim_cv); 1365*084fd14fSBrian Behlendorf mutex_exit(&vd->vdev_autotrim_lock); 1366*084fd14fSBrian Behlendorf } 1367*084fd14fSBrian Behlendorf 1368*084fd14fSBrian Behlendorf /* 1369*084fd14fSBrian Behlendorf * Starts an autotrim thread, if needed, for each top-level vdev which can be 1370*084fd14fSBrian Behlendorf * trimmed. A top-level vdev which has been evacuated will never be trimmed. 1371*084fd14fSBrian Behlendorf */ 1372*084fd14fSBrian Behlendorf void 1373*084fd14fSBrian Behlendorf vdev_autotrim(spa_t *spa) 1374*084fd14fSBrian Behlendorf { 1375*084fd14fSBrian Behlendorf vdev_t *root_vd = spa->spa_root_vdev; 1376*084fd14fSBrian Behlendorf 1377*084fd14fSBrian Behlendorf for (uint64_t i = 0; i < root_vd->vdev_children; i++) { 1378*084fd14fSBrian Behlendorf vdev_t *tvd = root_vd->vdev_child[i]; 1379*084fd14fSBrian Behlendorf 1380*084fd14fSBrian Behlendorf mutex_enter(&tvd->vdev_autotrim_lock); 1381*084fd14fSBrian Behlendorf if (vdev_writeable(tvd) && !tvd->vdev_removing && 1382*084fd14fSBrian Behlendorf tvd->vdev_autotrim_thread == NULL) { 1383*084fd14fSBrian Behlendorf ASSERT3P(tvd->vdev_top, ==, tvd); 1384*084fd14fSBrian Behlendorf 1385*084fd14fSBrian Behlendorf tvd->vdev_autotrim_thread = thread_create(NULL, 0, 1386*084fd14fSBrian Behlendorf vdev_autotrim_thread, tvd, 0, &p0, TS_RUN, 1387*084fd14fSBrian Behlendorf maxclsyspri); 1388*084fd14fSBrian Behlendorf ASSERT(tvd->vdev_autotrim_thread != NULL); 1389*084fd14fSBrian Behlendorf } 1390*084fd14fSBrian Behlendorf mutex_exit(&tvd->vdev_autotrim_lock); 1391*084fd14fSBrian Behlendorf } 1392*084fd14fSBrian Behlendorf } 1393*084fd14fSBrian Behlendorf 1394*084fd14fSBrian Behlendorf /* 1395*084fd14fSBrian Behlendorf * Wait for the vdev_autotrim_thread associated with the passed top-level 1396*084fd14fSBrian Behlendorf * vdev to be terminated (canceled or stopped). 1397*084fd14fSBrian Behlendorf */ 1398*084fd14fSBrian Behlendorf void 1399*084fd14fSBrian Behlendorf vdev_autotrim_stop_wait(vdev_t *tvd) 1400*084fd14fSBrian Behlendorf { 1401*084fd14fSBrian Behlendorf mutex_enter(&tvd->vdev_autotrim_lock); 1402*084fd14fSBrian Behlendorf if (tvd->vdev_autotrim_thread != NULL) { 1403*084fd14fSBrian Behlendorf tvd->vdev_autotrim_exit_wanted = B_TRUE; 1404*084fd14fSBrian Behlendorf 1405*084fd14fSBrian Behlendorf while (tvd->vdev_autotrim_thread != NULL) { 1406*084fd14fSBrian Behlendorf cv_wait(&tvd->vdev_autotrim_cv, 1407*084fd14fSBrian Behlendorf &tvd->vdev_autotrim_lock); 1408*084fd14fSBrian Behlendorf } 1409*084fd14fSBrian Behlendorf 1410*084fd14fSBrian Behlendorf ASSERT3P(tvd->vdev_autotrim_thread, ==, NULL); 1411*084fd14fSBrian Behlendorf tvd->vdev_autotrim_exit_wanted = B_FALSE; 1412*084fd14fSBrian Behlendorf } 1413*084fd14fSBrian Behlendorf mutex_exit(&tvd->vdev_autotrim_lock); 1414*084fd14fSBrian Behlendorf } 1415*084fd14fSBrian Behlendorf 1416*084fd14fSBrian Behlendorf /* 1417*084fd14fSBrian Behlendorf * Wait for all of the vdev_autotrim_thread associated with the pool to 1418*084fd14fSBrian Behlendorf * be terminated (canceled or stopped). 1419*084fd14fSBrian Behlendorf */ 1420*084fd14fSBrian Behlendorf void 1421*084fd14fSBrian Behlendorf vdev_autotrim_stop_all(spa_t *spa) 1422*084fd14fSBrian Behlendorf { 1423*084fd14fSBrian Behlendorf vdev_t *root_vd = spa->spa_root_vdev; 1424*084fd14fSBrian Behlendorf 1425*084fd14fSBrian Behlendorf for (uint64_t i = 0; i < root_vd->vdev_children; i++) 1426*084fd14fSBrian Behlendorf vdev_autotrim_stop_wait(root_vd->vdev_child[i]); 1427*084fd14fSBrian Behlendorf } 1428*084fd14fSBrian Behlendorf 1429*084fd14fSBrian Behlendorf /* 1430*084fd14fSBrian Behlendorf * Conditionally restart all of the vdev_autotrim_thread's for the pool. 1431*084fd14fSBrian Behlendorf */ 1432*084fd14fSBrian Behlendorf void 1433*084fd14fSBrian Behlendorf vdev_autotrim_restart(spa_t *spa) 1434*084fd14fSBrian Behlendorf { 1435*084fd14fSBrian Behlendorf ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1436*084fd14fSBrian Behlendorf 1437*084fd14fSBrian Behlendorf if (spa->spa_autotrim) 1438*084fd14fSBrian Behlendorf vdev_autotrim(spa); 1439*084fd14fSBrian Behlendorf } 1440