xref: /illumos-gate/usr/src/uts/common/fs/zfs/metaslab.c (revision 094e47e9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  */
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/zio.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 #include <sys/vdev_indirect_mapping.h>
38 #include <sys/zap.h>
39 
40 #define	GANG_ALLOCATION(flags) \
41 	((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
42 
43 uint64_t metaslab_aliquot = 512ULL << 10;
44 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;	/* force gang blocks */
45 
46 /*
47  * Since we can touch multiple metaslabs (and their respective space maps)
48  * with each transaction group, we benefit from having a smaller space map
49  * block size since it allows us to issue more I/O operations scattered
50  * around the disk.
51  */
52 int zfs_metaslab_sm_blksz = (1 << 12);
53 
54 /*
55  * The in-core space map representation is more compact than its on-disk form.
56  * The zfs_condense_pct determines how much more compact the in-core
57  * space map representation must be before we compact it on-disk.
58  * Values should be greater than or equal to 100.
59  */
60 int zfs_condense_pct = 200;
61 
62 /*
63  * Condensing a metaslab is not guaranteed to actually reduce the amount of
64  * space used on disk. In particular, a space map uses data in increments of
65  * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
66  * same number of blocks after condensing. Since the goal of condensing is to
67  * reduce the number of IOPs required to read the space map, we only want to
68  * condense when we can be sure we will reduce the number of blocks used by the
69  * space map. Unfortunately, we cannot precisely compute whether or not this is
70  * the case in metaslab_should_condense since we are holding ms_lock. Instead,
71  * we apply the following heuristic: do not condense a spacemap unless the
72  * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
73  * blocks.
74  */
75 int zfs_metaslab_condense_block_threshold = 4;
76 
77 /*
78  * The zfs_mg_noalloc_threshold defines which metaslab groups should
79  * be eligible for allocation. The value is defined as a percentage of
80  * free space. Metaslab groups that have more free space than
81  * zfs_mg_noalloc_threshold are always eligible for allocations. Once
82  * a metaslab group's free space is less than or equal to the
83  * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
84  * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
85  * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
86  * groups are allowed to accept allocations. Gang blocks are always
87  * eligible to allocate on any metaslab group. The default value of 0 means
88  * no metaslab group will be excluded based on this criterion.
89  */
90 int zfs_mg_noalloc_threshold = 0;
91 
92 /*
93  * Metaslab groups are considered eligible for allocations if their
94  * fragmenation metric (measured as a percentage) is less than or equal to
95  * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
96  * then it will be skipped unless all metaslab groups within the metaslab
97  * class have also crossed this threshold.
98  */
99 int zfs_mg_fragmentation_threshold = 85;
100 
101 /*
102  * Allow metaslabs to keep their active state as long as their fragmentation
103  * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
104  * active metaslab that exceeds this threshold will no longer keep its active
105  * status allowing better metaslabs to be selected.
106  */
107 int zfs_metaslab_fragmentation_threshold = 70;
108 
109 /*
110  * When set will load all metaslabs when pool is first opened.
111  */
112 int metaslab_debug_load = 0;
113 
114 /*
115  * When set will prevent metaslabs from being unloaded.
116  */
117 int metaslab_debug_unload = 0;
118 
119 /*
120  * Minimum size which forces the dynamic allocator to change
121  * it's allocation strategy.  Once the space map cannot satisfy
122  * an allocation of this size then it switches to using more
123  * aggressive strategy (i.e search by size rather than offset).
124  */
125 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
126 
127 /*
128  * The minimum free space, in percent, which must be available
129  * in a space map to continue allocations in a first-fit fashion.
130  * Once the space map's free space drops below this level we dynamically
131  * switch to using best-fit allocations.
132  */
133 int metaslab_df_free_pct = 4;
134 
135 /*
136  * A metaslab is considered "free" if it contains a contiguous
137  * segment which is greater than metaslab_min_alloc_size.
138  */
139 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
140 
141 /*
142  * Percentage of all cpus that can be used by the metaslab taskq.
143  */
144 int metaslab_load_pct = 50;
145 
146 /*
147  * Determines how many txgs a metaslab may remain loaded without having any
148  * allocations from it. As long as a metaslab continues to be used we will
149  * keep it loaded.
150  */
151 int metaslab_unload_delay = TXG_SIZE * 2;
152 
153 /*
154  * Max number of metaslabs per group to preload.
155  */
156 int metaslab_preload_limit = SPA_DVAS_PER_BP;
157 
158 /*
159  * Enable/disable preloading of metaslab.
160  */
161 boolean_t metaslab_preload_enabled = B_TRUE;
162 
163 /*
164  * Enable/disable fragmentation weighting on metaslabs.
165  */
166 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
167 
168 /*
169  * Enable/disable lba weighting (i.e. outer tracks are given preference).
170  */
171 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
172 
173 /*
174  * Enable/disable metaslab group biasing.
175  */
176 boolean_t metaslab_bias_enabled = B_TRUE;
177 
178 /*
179  * Enable/disable remapping of indirect DVAs to their concrete vdevs.
180  */
181 boolean_t zfs_remap_blkptr_enable = B_TRUE;
182 
183 /*
184  * Enable/disable segment-based metaslab selection.
185  */
186 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE;
187 
188 /*
189  * When using segment-based metaslab selection, we will continue
190  * allocating from the active metaslab until we have exhausted
191  * zfs_metaslab_switch_threshold of its buckets.
192  */
193 int zfs_metaslab_switch_threshold = 2;
194 
195 /*
196  * Internal switch to enable/disable the metaslab allocation tracing
197  * facility.
198  */
199 boolean_t metaslab_trace_enabled = B_TRUE;
200 
201 /*
202  * Maximum entries that the metaslab allocation tracing facility will keep
203  * in a given list when running in non-debug mode. We limit the number
204  * of entries in non-debug mode to prevent us from using up too much memory.
205  * The limit should be sufficiently large that we don't expect any allocation
206  * to every exceed this value. In debug mode, the system will panic if this
207  * limit is ever reached allowing for further investigation.
208  */
209 uint64_t metaslab_trace_max_entries = 5000;
210 
211 static uint64_t metaslab_weight(metaslab_t *);
212 static void metaslab_set_fragmentation(metaslab_t *);
213 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
214 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
215 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
216 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
217 
218 kmem_cache_t *metaslab_alloc_trace_cache;
219 
220 /*
221  * ==========================================================================
222  * Metaslab classes
223  * ==========================================================================
224  */
225 metaslab_class_t *
226 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
227 {
228 	metaslab_class_t *mc;
229 
230 	mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
231 
232 	mc->mc_spa = spa;
233 	mc->mc_rotor = NULL;
234 	mc->mc_ops = ops;
235 	mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
236 	mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
237 	    sizeof (refcount_t), KM_SLEEP);
238 	mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
239 	    sizeof (uint64_t), KM_SLEEP);
240 	for (int i = 0; i < spa->spa_alloc_count; i++)
241 		refcount_create_tracked(&mc->mc_alloc_slots[i]);
242 
243 	return (mc);
244 }
245 
246 void
247 metaslab_class_destroy(metaslab_class_t *mc)
248 {
249 	ASSERT(mc->mc_rotor == NULL);
250 	ASSERT(mc->mc_alloc == 0);
251 	ASSERT(mc->mc_deferred == 0);
252 	ASSERT(mc->mc_space == 0);
253 	ASSERT(mc->mc_dspace == 0);
254 
255 	for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
256 		refcount_destroy(&mc->mc_alloc_slots[i]);
257 	kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
258 	    sizeof (refcount_t));
259 	kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
260 	    sizeof (uint64_t));
261 	mutex_destroy(&mc->mc_lock);
262 	kmem_free(mc, sizeof (metaslab_class_t));
263 }
264 
265 int
266 metaslab_class_validate(metaslab_class_t *mc)
267 {
268 	metaslab_group_t *mg;
269 	vdev_t *vd;
270 
271 	/*
272 	 * Must hold one of the spa_config locks.
273 	 */
274 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
275 	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
276 
277 	if ((mg = mc->mc_rotor) == NULL)
278 		return (0);
279 
280 	do {
281 		vd = mg->mg_vd;
282 		ASSERT(vd->vdev_mg != NULL);
283 		ASSERT3P(vd->vdev_top, ==, vd);
284 		ASSERT3P(mg->mg_class, ==, mc);
285 		ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
286 	} while ((mg = mg->mg_next) != mc->mc_rotor);
287 
288 	return (0);
289 }
290 
291 void
292 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
293     int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
294 {
295 	atomic_add_64(&mc->mc_alloc, alloc_delta);
296 	atomic_add_64(&mc->mc_deferred, defer_delta);
297 	atomic_add_64(&mc->mc_space, space_delta);
298 	atomic_add_64(&mc->mc_dspace, dspace_delta);
299 }
300 
301 uint64_t
302 metaslab_class_get_alloc(metaslab_class_t *mc)
303 {
304 	return (mc->mc_alloc);
305 }
306 
307 uint64_t
308 metaslab_class_get_deferred(metaslab_class_t *mc)
309 {
310 	return (mc->mc_deferred);
311 }
312 
313 uint64_t
314 metaslab_class_get_space(metaslab_class_t *mc)
315 {
316 	return (mc->mc_space);
317 }
318 
319 uint64_t
320 metaslab_class_get_dspace(metaslab_class_t *mc)
321 {
322 	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
323 }
324 
325 void
326 metaslab_class_histogram_verify(metaslab_class_t *mc)
327 {
328 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
329 	uint64_t *mc_hist;
330 	int i;
331 
332 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
333 		return;
334 
335 	mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
336 	    KM_SLEEP);
337 
338 	for (int c = 0; c < rvd->vdev_children; c++) {
339 		vdev_t *tvd = rvd->vdev_child[c];
340 		metaslab_group_t *mg = tvd->vdev_mg;
341 
342 		/*
343 		 * Skip any holes, uninitialized top-levels, or
344 		 * vdevs that are not in this metalab class.
345 		 */
346 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
347 		    mg->mg_class != mc) {
348 			continue;
349 		}
350 
351 		for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
352 			mc_hist[i] += mg->mg_histogram[i];
353 	}
354 
355 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
356 		VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
357 
358 	kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
359 }
360 
361 /*
362  * Calculate the metaslab class's fragmentation metric. The metric
363  * is weighted based on the space contribution of each metaslab group.
364  * The return value will be a number between 0 and 100 (inclusive), or
365  * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
366  * zfs_frag_table for more information about the metric.
367  */
368 uint64_t
369 metaslab_class_fragmentation(metaslab_class_t *mc)
370 {
371 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
372 	uint64_t fragmentation = 0;
373 
374 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
375 
376 	for (int c = 0; c < rvd->vdev_children; c++) {
377 		vdev_t *tvd = rvd->vdev_child[c];
378 		metaslab_group_t *mg = tvd->vdev_mg;
379 
380 		/*
381 		 * Skip any holes, uninitialized top-levels,
382 		 * or vdevs that are not in this metalab class.
383 		 */
384 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
385 		    mg->mg_class != mc) {
386 			continue;
387 		}
388 
389 		/*
390 		 * If a metaslab group does not contain a fragmentation
391 		 * metric then just bail out.
392 		 */
393 		if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
394 			spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
395 			return (ZFS_FRAG_INVALID);
396 		}
397 
398 		/*
399 		 * Determine how much this metaslab_group is contributing
400 		 * to the overall pool fragmentation metric.
401 		 */
402 		fragmentation += mg->mg_fragmentation *
403 		    metaslab_group_get_space(mg);
404 	}
405 	fragmentation /= metaslab_class_get_space(mc);
406 
407 	ASSERT3U(fragmentation, <=, 100);
408 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
409 	return (fragmentation);
410 }
411 
412 /*
413  * Calculate the amount of expandable space that is available in
414  * this metaslab class. If a device is expanded then its expandable
415  * space will be the amount of allocatable space that is currently not
416  * part of this metaslab class.
417  */
418 uint64_t
419 metaslab_class_expandable_space(metaslab_class_t *mc)
420 {
421 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
422 	uint64_t space = 0;
423 
424 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
425 	for (int c = 0; c < rvd->vdev_children; c++) {
426 		uint64_t tspace;
427 		vdev_t *tvd = rvd->vdev_child[c];
428 		metaslab_group_t *mg = tvd->vdev_mg;
429 
430 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
431 		    mg->mg_class != mc) {
432 			continue;
433 		}
434 
435 		/*
436 		 * Calculate if we have enough space to add additional
437 		 * metaslabs. We report the expandable space in terms
438 		 * of the metaslab size since that's the unit of expansion.
439 		 * Adjust by efi system partition size.
440 		 */
441 		tspace = tvd->vdev_max_asize - tvd->vdev_asize;
442 		if (tspace > mc->mc_spa->spa_bootsize) {
443 			tspace -= mc->mc_spa->spa_bootsize;
444 		}
445 		space += P2ALIGN(tspace, 1ULL << tvd->vdev_ms_shift);
446 	}
447 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
448 	return (space);
449 }
450 
451 static int
452 metaslab_compare(const void *x1, const void *x2)
453 {
454 	const metaslab_t *m1 = x1;
455 	const metaslab_t *m2 = x2;
456 
457 	int sort1 = 0;
458 	int sort2 = 0;
459 	if (m1->ms_allocator != -1 && m1->ms_primary)
460 		sort1 = 1;
461 	else if (m1->ms_allocator != -1 && !m1->ms_primary)
462 		sort1 = 2;
463 	if (m2->ms_allocator != -1 && m2->ms_primary)
464 		sort2 = 1;
465 	else if (m2->ms_allocator != -1 && !m2->ms_primary)
466 		sort2 = 2;
467 
468 	/*
469 	 * Sort inactive metaslabs first, then primaries, then secondaries. When
470 	 * selecting a metaslab to allocate from, an allocator first tries its
471 	 * primary, then secondary active metaslab. If it doesn't have active
472 	 * metaslabs, or can't allocate from them, it searches for an inactive
473 	 * metaslab to activate. If it can't find a suitable one, it will steal
474 	 * a primary or secondary metaslab from another allocator.
475 	 */
476 	if (sort1 < sort2)
477 		return (-1);
478 	if (sort1 > sort2)
479 		return (1);
480 
481 	if (m1->ms_weight < m2->ms_weight)
482 		return (1);
483 	if (m1->ms_weight > m2->ms_weight)
484 		return (-1);
485 
486 	/*
487 	 * If the weights are identical, use the offset to force uniqueness.
488 	 */
489 	if (m1->ms_start < m2->ms_start)
490 		return (-1);
491 	if (m1->ms_start > m2->ms_start)
492 		return (1);
493 
494 	ASSERT3P(m1, ==, m2);
495 
496 	return (0);
497 }
498 
499 /*
500  * Verify that the space accounting on disk matches the in-core range_trees.
501  */
502 void
503 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
504 {
505 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
506 	uint64_t allocated = 0;
507 	uint64_t sm_free_space, msp_free_space;
508 
509 	ASSERT(MUTEX_HELD(&msp->ms_lock));
510 
511 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
512 		return;
513 
514 	/*
515 	 * We can only verify the metaslab space when we're called
516 	 * from syncing context with a loaded metaslab that has an allocated
517 	 * space map. Calling this in non-syncing context does not
518 	 * provide a consistent view of the metaslab since we're performing
519 	 * allocations in the future.
520 	 */
521 	if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
522 	    !msp->ms_loaded)
523 		return;
524 
525 	sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
526 	    space_map_alloc_delta(msp->ms_sm);
527 
528 	/*
529 	 * Account for future allocations since we would have already
530 	 * deducted that space from the ms_freetree.
531 	 */
532 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
533 		allocated +=
534 		    range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
535 	}
536 
537 	msp_free_space = range_tree_space(msp->ms_allocatable) + allocated +
538 	    msp->ms_deferspace + range_tree_space(msp->ms_freed);
539 
540 	VERIFY3U(sm_free_space, ==, msp_free_space);
541 }
542 
543 /*
544  * ==========================================================================
545  * Metaslab groups
546  * ==========================================================================
547  */
548 /*
549  * Update the allocatable flag and the metaslab group's capacity.
550  * The allocatable flag is set to true if the capacity is below
551  * the zfs_mg_noalloc_threshold or has a fragmentation value that is
552  * greater than zfs_mg_fragmentation_threshold. If a metaslab group
553  * transitions from allocatable to non-allocatable or vice versa then the
554  * metaslab group's class is updated to reflect the transition.
555  */
556 static void
557 metaslab_group_alloc_update(metaslab_group_t *mg)
558 {
559 	vdev_t *vd = mg->mg_vd;
560 	metaslab_class_t *mc = mg->mg_class;
561 	vdev_stat_t *vs = &vd->vdev_stat;
562 	boolean_t was_allocatable;
563 	boolean_t was_initialized;
564 
565 	ASSERT(vd == vd->vdev_top);
566 	ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
567 	    SCL_ALLOC);
568 
569 	mutex_enter(&mg->mg_lock);
570 	was_allocatable = mg->mg_allocatable;
571 	was_initialized = mg->mg_initialized;
572 
573 	mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
574 	    (vs->vs_space + 1);
575 
576 	mutex_enter(&mc->mc_lock);
577 
578 	/*
579 	 * If the metaslab group was just added then it won't
580 	 * have any space until we finish syncing out this txg.
581 	 * At that point we will consider it initialized and available
582 	 * for allocations.  We also don't consider non-activated
583 	 * metaslab groups (e.g. vdevs that are in the middle of being removed)
584 	 * to be initialized, because they can't be used for allocation.
585 	 */
586 	mg->mg_initialized = metaslab_group_initialized(mg);
587 	if (!was_initialized && mg->mg_initialized) {
588 		mc->mc_groups++;
589 	} else if (was_initialized && !mg->mg_initialized) {
590 		ASSERT3U(mc->mc_groups, >, 0);
591 		mc->mc_groups--;
592 	}
593 	if (mg->mg_initialized)
594 		mg->mg_no_free_space = B_FALSE;
595 
596 	/*
597 	 * A metaslab group is considered allocatable if it has plenty
598 	 * of free space or is not heavily fragmented. We only take
599 	 * fragmentation into account if the metaslab group has a valid
600 	 * fragmentation metric (i.e. a value between 0 and 100).
601 	 */
602 	mg->mg_allocatable = (mg->mg_activation_count > 0 &&
603 	    mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
604 	    (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
605 	    mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
606 
607 	/*
608 	 * The mc_alloc_groups maintains a count of the number of
609 	 * groups in this metaslab class that are still above the
610 	 * zfs_mg_noalloc_threshold. This is used by the allocating
611 	 * threads to determine if they should avoid allocations to
612 	 * a given group. The allocator will avoid allocations to a group
613 	 * if that group has reached or is below the zfs_mg_noalloc_threshold
614 	 * and there are still other groups that are above the threshold.
615 	 * When a group transitions from allocatable to non-allocatable or
616 	 * vice versa we update the metaslab class to reflect that change.
617 	 * When the mc_alloc_groups value drops to 0 that means that all
618 	 * groups have reached the zfs_mg_noalloc_threshold making all groups
619 	 * eligible for allocations. This effectively means that all devices
620 	 * are balanced again.
621 	 */
622 	if (was_allocatable && !mg->mg_allocatable)
623 		mc->mc_alloc_groups--;
624 	else if (!was_allocatable && mg->mg_allocatable)
625 		mc->mc_alloc_groups++;
626 	mutex_exit(&mc->mc_lock);
627 
628 	mutex_exit(&mg->mg_lock);
629 }
630 
631 metaslab_group_t *
632 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
633 {
634 	metaslab_group_t *mg;
635 
636 	mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
637 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
638 	mutex_init(&mg->mg_ms_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
639 	cv_init(&mg->mg_ms_initialize_cv, NULL, CV_DEFAULT, NULL);
640 	mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
641 	    KM_SLEEP);
642 	mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
643 	    KM_SLEEP);
644 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
645 	    sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
646 	mg->mg_vd = vd;
647 	mg->mg_class = mc;
648 	mg->mg_activation_count = 0;
649 	mg->mg_initialized = B_FALSE;
650 	mg->mg_no_free_space = B_TRUE;
651 	mg->mg_allocators = allocators;
652 
653 	mg->mg_alloc_queue_depth = kmem_zalloc(allocators * sizeof (refcount_t),
654 	    KM_SLEEP);
655 	mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
656 	    sizeof (uint64_t), KM_SLEEP);
657 	for (int i = 0; i < allocators; i++) {
658 		refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
659 		mg->mg_cur_max_alloc_queue_depth[i] = 0;
660 	}
661 
662 	mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
663 	    minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
664 
665 	return (mg);
666 }
667 
668 void
669 metaslab_group_destroy(metaslab_group_t *mg)
670 {
671 	ASSERT(mg->mg_prev == NULL);
672 	ASSERT(mg->mg_next == NULL);
673 	/*
674 	 * We may have gone below zero with the activation count
675 	 * either because we never activated in the first place or
676 	 * because we're done, and possibly removing the vdev.
677 	 */
678 	ASSERT(mg->mg_activation_count <= 0);
679 
680 	taskq_destroy(mg->mg_taskq);
681 	avl_destroy(&mg->mg_metaslab_tree);
682 	kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *));
683 	kmem_free(mg->mg_secondaries, mg->mg_allocators *
684 	    sizeof (metaslab_t *));
685 	mutex_destroy(&mg->mg_lock);
686 	mutex_destroy(&mg->mg_ms_initialize_lock);
687 	cv_destroy(&mg->mg_ms_initialize_cv);
688 
689 	for (int i = 0; i < mg->mg_allocators; i++) {
690 		refcount_destroy(&mg->mg_alloc_queue_depth[i]);
691 		mg->mg_cur_max_alloc_queue_depth[i] = 0;
692 	}
693 	kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
694 	    sizeof (refcount_t));
695 	kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
696 	    sizeof (uint64_t));
697 
698 	kmem_free(mg, sizeof (metaslab_group_t));
699 }
700 
701 void
702 metaslab_group_activate(metaslab_group_t *mg)
703 {
704 	metaslab_class_t *mc = mg->mg_class;
705 	metaslab_group_t *mgprev, *mgnext;
706 
707 	ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
708 
709 	ASSERT(mc->mc_rotor != mg);
710 	ASSERT(mg->mg_prev == NULL);
711 	ASSERT(mg->mg_next == NULL);
712 	ASSERT(mg->mg_activation_count <= 0);
713 
714 	if (++mg->mg_activation_count <= 0)
715 		return;
716 
717 	mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
718 	metaslab_group_alloc_update(mg);
719 
720 	if ((mgprev = mc->mc_rotor) == NULL) {
721 		mg->mg_prev = mg;
722 		mg->mg_next = mg;
723 	} else {
724 		mgnext = mgprev->mg_next;
725 		mg->mg_prev = mgprev;
726 		mg->mg_next = mgnext;
727 		mgprev->mg_next = mg;
728 		mgnext->mg_prev = mg;
729 	}
730 	mc->mc_rotor = mg;
731 }
732 
733 /*
734  * Passivate a metaslab group and remove it from the allocation rotor.
735  * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
736  * a metaslab group. This function will momentarily drop spa_config_locks
737  * that are lower than the SCL_ALLOC lock (see comment below).
738  */
739 void
740 metaslab_group_passivate(metaslab_group_t *mg)
741 {
742 	metaslab_class_t *mc = mg->mg_class;
743 	spa_t *spa = mc->mc_spa;
744 	metaslab_group_t *mgprev, *mgnext;
745 	int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
746 
747 	ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
748 	    (SCL_ALLOC | SCL_ZIO));
749 
750 	if (--mg->mg_activation_count != 0) {
751 		ASSERT(mc->mc_rotor != mg);
752 		ASSERT(mg->mg_prev == NULL);
753 		ASSERT(mg->mg_next == NULL);
754 		ASSERT(mg->mg_activation_count < 0);
755 		return;
756 	}
757 
758 	/*
759 	 * The spa_config_lock is an array of rwlocks, ordered as
760 	 * follows (from highest to lowest):
761 	 *	SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
762 	 *	SCL_ZIO > SCL_FREE > SCL_VDEV
763 	 * (For more information about the spa_config_lock see spa_misc.c)
764 	 * The higher the lock, the broader its coverage. When we passivate
765 	 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
766 	 * config locks. However, the metaslab group's taskq might be trying
767 	 * to preload metaslabs so we must drop the SCL_ZIO lock and any
768 	 * lower locks to allow the I/O to complete. At a minimum,
769 	 * we continue to hold the SCL_ALLOC lock, which prevents any future
770 	 * allocations from taking place and any changes to the vdev tree.
771 	 */
772 	spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
773 	taskq_wait(mg->mg_taskq);
774 	spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
775 	metaslab_group_alloc_update(mg);
776 	for (int i = 0; i < mg->mg_allocators; i++) {
777 		metaslab_t *msp = mg->mg_primaries[i];
778 		if (msp != NULL) {
779 			mutex_enter(&msp->ms_lock);
780 			metaslab_passivate(msp,
781 			    metaslab_weight_from_range_tree(msp));
782 			mutex_exit(&msp->ms_lock);
783 		}
784 		msp = mg->mg_secondaries[i];
785 		if (msp != NULL) {
786 			mutex_enter(&msp->ms_lock);
787 			metaslab_passivate(msp,
788 			    metaslab_weight_from_range_tree(msp));
789 			mutex_exit(&msp->ms_lock);
790 		}
791 	}
792 
793 	mgprev = mg->mg_prev;
794 	mgnext = mg->mg_next;
795 
796 	if (mg == mgnext) {
797 		mc->mc_rotor = NULL;
798 	} else {
799 		mc->mc_rotor = mgnext;
800 		mgprev->mg_next = mgnext;
801 		mgnext->mg_prev = mgprev;
802 	}
803 
804 	mg->mg_prev = NULL;
805 	mg->mg_next = NULL;
806 }
807 
808 boolean_t
809 metaslab_group_initialized(metaslab_group_t *mg)
810 {
811 	vdev_t *vd = mg->mg_vd;
812 	vdev_stat_t *vs = &vd->vdev_stat;
813 
814 	return (vs->vs_space != 0 && mg->mg_activation_count > 0);
815 }
816 
817 uint64_t
818 metaslab_group_get_space(metaslab_group_t *mg)
819 {
820 	return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
821 }
822 
823 void
824 metaslab_group_histogram_verify(metaslab_group_t *mg)
825 {
826 	uint64_t *mg_hist;
827 	vdev_t *vd = mg->mg_vd;
828 	uint64_t ashift = vd->vdev_ashift;
829 	int i;
830 
831 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
832 		return;
833 
834 	mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
835 	    KM_SLEEP);
836 
837 	ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
838 	    SPACE_MAP_HISTOGRAM_SIZE + ashift);
839 
840 	for (int m = 0; m < vd->vdev_ms_count; m++) {
841 		metaslab_t *msp = vd->vdev_ms[m];
842 
843 		if (msp->ms_sm == NULL)
844 			continue;
845 
846 		for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
847 			mg_hist[i + ashift] +=
848 			    msp->ms_sm->sm_phys->smp_histogram[i];
849 	}
850 
851 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
852 		VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
853 
854 	kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
855 }
856 
857 static void
858 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
859 {
860 	metaslab_class_t *mc = mg->mg_class;
861 	uint64_t ashift = mg->mg_vd->vdev_ashift;
862 
863 	ASSERT(MUTEX_HELD(&msp->ms_lock));
864 	if (msp->ms_sm == NULL)
865 		return;
866 
867 	mutex_enter(&mg->mg_lock);
868 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
869 		mg->mg_histogram[i + ashift] +=
870 		    msp->ms_sm->sm_phys->smp_histogram[i];
871 		mc->mc_histogram[i + ashift] +=
872 		    msp->ms_sm->sm_phys->smp_histogram[i];
873 	}
874 	mutex_exit(&mg->mg_lock);
875 }
876 
877 void
878 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
879 {
880 	metaslab_class_t *mc = mg->mg_class;
881 	uint64_t ashift = mg->mg_vd->vdev_ashift;
882 
883 	ASSERT(MUTEX_HELD(&msp->ms_lock));
884 	if (msp->ms_sm == NULL)
885 		return;
886 
887 	mutex_enter(&mg->mg_lock);
888 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
889 		ASSERT3U(mg->mg_histogram[i + ashift], >=,
890 		    msp->ms_sm->sm_phys->smp_histogram[i]);
891 		ASSERT3U(mc->mc_histogram[i + ashift], >=,
892 		    msp->ms_sm->sm_phys->smp_histogram[i]);
893 
894 		mg->mg_histogram[i + ashift] -=
895 		    msp->ms_sm->sm_phys->smp_histogram[i];
896 		mc->mc_histogram[i + ashift] -=
897 		    msp->ms_sm->sm_phys->smp_histogram[i];
898 	}
899 	mutex_exit(&mg->mg_lock);
900 }
901 
902 static void
903 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
904 {
905 	ASSERT(msp->ms_group == NULL);
906 	mutex_enter(&mg->mg_lock);
907 	msp->ms_group = mg;
908 	msp->ms_weight = 0;
909 	avl_add(&mg->mg_metaslab_tree, msp);
910 	mutex_exit(&mg->mg_lock);
911 
912 	mutex_enter(&msp->ms_lock);
913 	metaslab_group_histogram_add(mg, msp);
914 	mutex_exit(&msp->ms_lock);
915 }
916 
917 static void
918 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
919 {
920 	mutex_enter(&msp->ms_lock);
921 	metaslab_group_histogram_remove(mg, msp);
922 	mutex_exit(&msp->ms_lock);
923 
924 	mutex_enter(&mg->mg_lock);
925 	ASSERT(msp->ms_group == mg);
926 	avl_remove(&mg->mg_metaslab_tree, msp);
927 	msp->ms_group = NULL;
928 	mutex_exit(&mg->mg_lock);
929 }
930 
931 static void
932 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
933 {
934 	ASSERT(MUTEX_HELD(&mg->mg_lock));
935 	ASSERT(msp->ms_group == mg);
936 	avl_remove(&mg->mg_metaslab_tree, msp);
937 	msp->ms_weight = weight;
938 	avl_add(&mg->mg_metaslab_tree, msp);
939 
940 }
941 
942 static void
943 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
944 {
945 	/*
946 	 * Although in principle the weight can be any value, in
947 	 * practice we do not use values in the range [1, 511].
948 	 */
949 	ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
950 	ASSERT(MUTEX_HELD(&msp->ms_lock));
951 
952 	mutex_enter(&mg->mg_lock);
953 	metaslab_group_sort_impl(mg, msp, weight);
954 	mutex_exit(&mg->mg_lock);
955 }
956 
957 /*
958  * Calculate the fragmentation for a given metaslab group. We can use
959  * a simple average here since all metaslabs within the group must have
960  * the same size. The return value will be a value between 0 and 100
961  * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
962  * group have a fragmentation metric.
963  */
964 uint64_t
965 metaslab_group_fragmentation(metaslab_group_t *mg)
966 {
967 	vdev_t *vd = mg->mg_vd;
968 	uint64_t fragmentation = 0;
969 	uint64_t valid_ms = 0;
970 
971 	for (int m = 0; m < vd->vdev_ms_count; m++) {
972 		metaslab_t *msp = vd->vdev_ms[m];
973 
974 		if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
975 			continue;
976 
977 		valid_ms++;
978 		fragmentation += msp->ms_fragmentation;
979 	}
980 
981 	if (valid_ms <= vd->vdev_ms_count / 2)
982 		return (ZFS_FRAG_INVALID);
983 
984 	fragmentation /= valid_ms;
985 	ASSERT3U(fragmentation, <=, 100);
986 	return (fragmentation);
987 }
988 
989 /*
990  * Determine if a given metaslab group should skip allocations. A metaslab
991  * group should avoid allocations if its free capacity is less than the
992  * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
993  * zfs_mg_fragmentation_threshold and there is at least one metaslab group
994  * that can still handle allocations. If the allocation throttle is enabled
995  * then we skip allocations to devices that have reached their maximum
996  * allocation queue depth unless the selected metaslab group is the only
997  * eligible group remaining.
998  */
999 static boolean_t
1000 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1001     uint64_t psize, int allocator)
1002 {
1003 	spa_t *spa = mg->mg_vd->vdev_spa;
1004 	metaslab_class_t *mc = mg->mg_class;
1005 
1006 	/*
1007 	 * We can only consider skipping this metaslab group if it's
1008 	 * in the normal metaslab class and there are other metaslab
1009 	 * groups to select from. Otherwise, we always consider it eligible
1010 	 * for allocations.
1011 	 */
1012 	if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
1013 		return (B_TRUE);
1014 
1015 	/*
1016 	 * If the metaslab group's mg_allocatable flag is set (see comments
1017 	 * in metaslab_group_alloc_update() for more information) and
1018 	 * the allocation throttle is disabled then allow allocations to this
1019 	 * device. However, if the allocation throttle is enabled then
1020 	 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1021 	 * to determine if we should allow allocations to this metaslab group.
1022 	 * If all metaslab groups are no longer considered allocatable
1023 	 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1024 	 * gang block size then we allow allocations on this metaslab group
1025 	 * regardless of the mg_allocatable or throttle settings.
1026 	 */
1027 	if (mg->mg_allocatable) {
1028 		metaslab_group_t *mgp;
1029 		int64_t qdepth;
1030 		uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator];
1031 
1032 		if (!mc->mc_alloc_throttle_enabled)
1033 			return (B_TRUE);
1034 
1035 		/*
1036 		 * If this metaslab group does not have any free space, then
1037 		 * there is no point in looking further.
1038 		 */
1039 		if (mg->mg_no_free_space)
1040 			return (B_FALSE);
1041 
1042 		qdepth = refcount_count(&mg->mg_alloc_queue_depth[allocator]);
1043 
1044 		/*
1045 		 * If this metaslab group is below its qmax or it's
1046 		 * the only allocatable metasable group, then attempt
1047 		 * to allocate from it.
1048 		 */
1049 		if (qdepth < qmax || mc->mc_alloc_groups == 1)
1050 			return (B_TRUE);
1051 		ASSERT3U(mc->mc_alloc_groups, >, 1);
1052 
1053 		/*
1054 		 * Since this metaslab group is at or over its qmax, we
1055 		 * need to determine if there are metaslab groups after this
1056 		 * one that might be able to handle this allocation. This is
1057 		 * racy since we can't hold the locks for all metaslab
1058 		 * groups at the same time when we make this check.
1059 		 */
1060 		for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1061 			qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
1062 
1063 			qdepth = refcount_count(
1064 			    &mgp->mg_alloc_queue_depth[allocator]);
1065 
1066 			/*
1067 			 * If there is another metaslab group that
1068 			 * might be able to handle the allocation, then
1069 			 * we return false so that we skip this group.
1070 			 */
1071 			if (qdepth < qmax && !mgp->mg_no_free_space)
1072 				return (B_FALSE);
1073 		}
1074 
1075 		/*
1076 		 * We didn't find another group to handle the allocation
1077 		 * so we can't skip this metaslab group even though
1078 		 * we are at or over our qmax.
1079 		 */
1080 		return (B_TRUE);
1081 
1082 	} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1083 		return (B_TRUE);
1084 	}
1085 	return (B_FALSE);
1086 }
1087 
1088 /*
1089  * ==========================================================================
1090  * Range tree callbacks
1091  * ==========================================================================
1092  */
1093 
1094 /*
1095  * Comparison function for the private size-ordered tree. Tree is sorted
1096  * by size, larger sizes at the end of the tree.
1097  */
1098 static int
1099 metaslab_rangesize_compare(const void *x1, const void *x2)
1100 {
1101 	const range_seg_t *r1 = x1;
1102 	const range_seg_t *r2 = x2;
1103 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1104 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1105 
1106 	if (rs_size1 < rs_size2)
1107 		return (-1);
1108 	if (rs_size1 > rs_size2)
1109 		return (1);
1110 
1111 	if (r1->rs_start < r2->rs_start)
1112 		return (-1);
1113 
1114 	if (r1->rs_start > r2->rs_start)
1115 		return (1);
1116 
1117 	return (0);
1118 }
1119 
1120 /*
1121  * Create any block allocator specific components. The current allocators
1122  * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1123  */
1124 static void
1125 metaslab_rt_create(range_tree_t *rt, void *arg)
1126 {
1127 	metaslab_t *msp = arg;
1128 
1129 	ASSERT3P(rt->rt_arg, ==, msp);
1130 	ASSERT(msp->ms_allocatable == NULL);
1131 
1132 	avl_create(&msp->ms_allocatable_by_size, metaslab_rangesize_compare,
1133 	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1134 }
1135 
1136 /*
1137  * Destroy the block allocator specific components.
1138  */
1139 static void
1140 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1141 {
1142 	metaslab_t *msp = arg;
1143 
1144 	ASSERT3P(rt->rt_arg, ==, msp);
1145 	ASSERT3P(msp->ms_allocatable, ==, rt);
1146 	ASSERT0(avl_numnodes(&msp->ms_allocatable_by_size));
1147 
1148 	avl_destroy(&msp->ms_allocatable_by_size);
1149 }
1150 
1151 static void
1152 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1153 {
1154 	metaslab_t *msp = arg;
1155 
1156 	ASSERT3P(rt->rt_arg, ==, msp);
1157 	ASSERT3P(msp->ms_allocatable, ==, rt);
1158 	VERIFY(!msp->ms_condensing);
1159 	avl_add(&msp->ms_allocatable_by_size, rs);
1160 }
1161 
1162 static void
1163 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1164 {
1165 	metaslab_t *msp = arg;
1166 
1167 	ASSERT3P(rt->rt_arg, ==, msp);
1168 	ASSERT3P(msp->ms_allocatable, ==, rt);
1169 	VERIFY(!msp->ms_condensing);
1170 	avl_remove(&msp->ms_allocatable_by_size, rs);
1171 }
1172 
1173 static void
1174 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1175 {
1176 	metaslab_t *msp = arg;
1177 
1178 	ASSERT3P(rt->rt_arg, ==, msp);
1179 	ASSERT3P(msp->ms_allocatable, ==, rt);
1180 
1181 	/*
1182 	 * Normally one would walk the tree freeing nodes along the way.
1183 	 * Since the nodes are shared with the range trees we can avoid
1184 	 * walking all nodes and just reinitialize the avl tree. The nodes
1185 	 * will be freed by the range tree, so we don't want to free them here.
1186 	 */
1187 	avl_create(&msp->ms_allocatable_by_size, metaslab_rangesize_compare,
1188 	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1189 }
1190 
1191 static range_tree_ops_t metaslab_rt_ops = {
1192 	metaslab_rt_create,
1193 	metaslab_rt_destroy,
1194 	metaslab_rt_add,
1195 	metaslab_rt_remove,
1196 	metaslab_rt_vacate
1197 };
1198 
1199 /*
1200  * ==========================================================================
1201  * Common allocator routines
1202  * ==========================================================================
1203  */
1204 
1205 /*
1206  * Return the maximum contiguous segment within the metaslab.
1207  */
1208 uint64_t
1209 metaslab_block_maxsize(metaslab_t *msp)
1210 {
1211 	avl_tree_t *t = &msp->ms_allocatable_by_size;
1212 	range_seg_t *rs;
1213 
1214 	if (t == NULL || (rs = avl_last(t)) == NULL)
1215 		return (0ULL);
1216 
1217 	return (rs->rs_end - rs->rs_start);
1218 }
1219 
1220 static range_seg_t *
1221 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1222 {
1223 	range_seg_t *rs, rsearch;
1224 	avl_index_t where;
1225 
1226 	rsearch.rs_start = start;
1227 	rsearch.rs_end = start + size;
1228 
1229 	rs = avl_find(t, &rsearch, &where);
1230 	if (rs == NULL) {
1231 		rs = avl_nearest(t, where, AVL_AFTER);
1232 	}
1233 
1234 	return (rs);
1235 }
1236 
1237 /*
1238  * This is a helper function that can be used by the allocator to find
1239  * a suitable block to allocate. This will search the specified AVL
1240  * tree looking for a block that matches the specified criteria.
1241  */
1242 static uint64_t
1243 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1244     uint64_t align)
1245 {
1246 	range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1247 
1248 	while (rs != NULL) {
1249 		uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1250 
1251 		if (offset + size <= rs->rs_end) {
1252 			*cursor = offset + size;
1253 			return (offset);
1254 		}
1255 		rs = AVL_NEXT(t, rs);
1256 	}
1257 
1258 	/*
1259 	 * If we know we've searched the whole map (*cursor == 0), give up.
1260 	 * Otherwise, reset the cursor to the beginning and try again.
1261 	 */
1262 	if (*cursor == 0)
1263 		return (-1ULL);
1264 
1265 	*cursor = 0;
1266 	return (metaslab_block_picker(t, cursor, size, align));
1267 }
1268 
1269 /*
1270  * ==========================================================================
1271  * The first-fit block allocator
1272  * ==========================================================================
1273  */
1274 static uint64_t
1275 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1276 {
1277 	/*
1278 	 * Find the largest power of 2 block size that evenly divides the
1279 	 * requested size. This is used to try to allocate blocks with similar
1280 	 * alignment from the same area of the metaslab (i.e. same cursor
1281 	 * bucket) but it does not guarantee that other allocations sizes
1282 	 * may exist in the same region.
1283 	 */
1284 	uint64_t align = size & -size;
1285 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1286 	avl_tree_t *t = &msp->ms_allocatable->rt_root;
1287 
1288 	return (metaslab_block_picker(t, cursor, size, align));
1289 }
1290 
1291 static metaslab_ops_t metaslab_ff_ops = {
1292 	metaslab_ff_alloc
1293 };
1294 
1295 /*
1296  * ==========================================================================
1297  * Dynamic block allocator -
1298  * Uses the first fit allocation scheme until space get low and then
1299  * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1300  * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1301  * ==========================================================================
1302  */
1303 static uint64_t
1304 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1305 {
1306 	/*
1307 	 * Find the largest power of 2 block size that evenly divides the
1308 	 * requested size. This is used to try to allocate blocks with similar
1309 	 * alignment from the same area of the metaslab (i.e. same cursor
1310 	 * bucket) but it does not guarantee that other allocations sizes
1311 	 * may exist in the same region.
1312 	 */
1313 	uint64_t align = size & -size;
1314 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1315 	range_tree_t *rt = msp->ms_allocatable;
1316 	avl_tree_t *t = &rt->rt_root;
1317 	uint64_t max_size = metaslab_block_maxsize(msp);
1318 	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1319 
1320 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1321 	ASSERT3U(avl_numnodes(t), ==,
1322 	    avl_numnodes(&msp->ms_allocatable_by_size));
1323 
1324 	if (max_size < size)
1325 		return (-1ULL);
1326 
1327 	/*
1328 	 * If we're running low on space switch to using the size
1329 	 * sorted AVL tree (best-fit).
1330 	 */
1331 	if (max_size < metaslab_df_alloc_threshold ||
1332 	    free_pct < metaslab_df_free_pct) {
1333 		t = &msp->ms_allocatable_by_size;
1334 		*cursor = 0;
1335 	}
1336 
1337 	return (metaslab_block_picker(t, cursor, size, 1ULL));
1338 }
1339 
1340 static metaslab_ops_t metaslab_df_ops = {
1341 	metaslab_df_alloc
1342 };
1343 
1344 /*
1345  * ==========================================================================
1346  * Cursor fit block allocator -
1347  * Select the largest region in the metaslab, set the cursor to the beginning
1348  * of the range and the cursor_end to the end of the range. As allocations
1349  * are made advance the cursor. Continue allocating from the cursor until
1350  * the range is exhausted and then find a new range.
1351  * ==========================================================================
1352  */
1353 static uint64_t
1354 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1355 {
1356 	range_tree_t *rt = msp->ms_allocatable;
1357 	avl_tree_t *t = &msp->ms_allocatable_by_size;
1358 	uint64_t *cursor = &msp->ms_lbas[0];
1359 	uint64_t *cursor_end = &msp->ms_lbas[1];
1360 	uint64_t offset = 0;
1361 
1362 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1363 	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1364 
1365 	ASSERT3U(*cursor_end, >=, *cursor);
1366 
1367 	if ((*cursor + size) > *cursor_end) {
1368 		range_seg_t *rs;
1369 
1370 		rs = avl_last(&msp->ms_allocatable_by_size);
1371 		if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1372 			return (-1ULL);
1373 
1374 		*cursor = rs->rs_start;
1375 		*cursor_end = rs->rs_end;
1376 	}
1377 
1378 	offset = *cursor;
1379 	*cursor += size;
1380 
1381 	return (offset);
1382 }
1383 
1384 static metaslab_ops_t metaslab_cf_ops = {
1385 	metaslab_cf_alloc
1386 };
1387 
1388 /*
1389  * ==========================================================================
1390  * New dynamic fit allocator -
1391  * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1392  * contiguous blocks. If no region is found then just use the largest segment
1393  * that remains.
1394  * ==========================================================================
1395  */
1396 
1397 /*
1398  * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1399  * to request from the allocator.
1400  */
1401 uint64_t metaslab_ndf_clump_shift = 4;
1402 
1403 static uint64_t
1404 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1405 {
1406 	avl_tree_t *t = &msp->ms_allocatable->rt_root;
1407 	avl_index_t where;
1408 	range_seg_t *rs, rsearch;
1409 	uint64_t hbit = highbit64(size);
1410 	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1411 	uint64_t max_size = metaslab_block_maxsize(msp);
1412 
1413 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1414 	ASSERT3U(avl_numnodes(t), ==,
1415 	    avl_numnodes(&msp->ms_allocatable_by_size));
1416 
1417 	if (max_size < size)
1418 		return (-1ULL);
1419 
1420 	rsearch.rs_start = *cursor;
1421 	rsearch.rs_end = *cursor + size;
1422 
1423 	rs = avl_find(t, &rsearch, &where);
1424 	if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1425 		t = &msp->ms_allocatable_by_size;
1426 
1427 		rsearch.rs_start = 0;
1428 		rsearch.rs_end = MIN(max_size,
1429 		    1ULL << (hbit + metaslab_ndf_clump_shift));
1430 		rs = avl_find(t, &rsearch, &where);
1431 		if (rs == NULL)
1432 			rs = avl_nearest(t, where, AVL_AFTER);
1433 		ASSERT(rs != NULL);
1434 	}
1435 
1436 	if ((rs->rs_end - rs->rs_start) >= size) {
1437 		*cursor = rs->rs_start + size;
1438 		return (rs->rs_start);
1439 	}
1440 	return (-1ULL);
1441 }
1442 
1443 static metaslab_ops_t metaslab_ndf_ops = {
1444 	metaslab_ndf_alloc
1445 };
1446 
1447 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1448 
1449 /*
1450  * ==========================================================================
1451  * Metaslabs
1452  * ==========================================================================
1453  */
1454 
1455 /*
1456  * Wait for any in-progress metaslab loads to complete.
1457  */
1458 void
1459 metaslab_load_wait(metaslab_t *msp)
1460 {
1461 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1462 
1463 	while (msp->ms_loading) {
1464 		ASSERT(!msp->ms_loaded);
1465 		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1466 	}
1467 }
1468 
1469 int
1470 metaslab_load(metaslab_t *msp)
1471 {
1472 	int error = 0;
1473 	boolean_t success = B_FALSE;
1474 
1475 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1476 	ASSERT(!msp->ms_loaded);
1477 	ASSERT(!msp->ms_loading);
1478 
1479 	msp->ms_loading = B_TRUE;
1480 	/*
1481 	 * Nobody else can manipulate a loading metaslab, so it's now safe
1482 	 * to drop the lock.  This way we don't have to hold the lock while
1483 	 * reading the spacemap from disk.
1484 	 */
1485 	mutex_exit(&msp->ms_lock);
1486 
1487 	/*
1488 	 * If the space map has not been allocated yet, then treat
1489 	 * all the space in the metaslab as free and add it to ms_allocatable.
1490 	 */
1491 	if (msp->ms_sm != NULL) {
1492 		error = space_map_load(msp->ms_sm, msp->ms_allocatable,
1493 		    SM_FREE);
1494 	} else {
1495 		range_tree_add(msp->ms_allocatable,
1496 		    msp->ms_start, msp->ms_size);
1497 	}
1498 
1499 	success = (error == 0);
1500 
1501 	mutex_enter(&msp->ms_lock);
1502 	msp->ms_loading = B_FALSE;
1503 
1504 	if (success) {
1505 		ASSERT3P(msp->ms_group, !=, NULL);
1506 		msp->ms_loaded = B_TRUE;
1507 
1508 		/*
1509 		 * If the metaslab already has a spacemap, then we need to
1510 		 * remove all segments from the defer tree; otherwise, the
1511 		 * metaslab is completely empty and we can skip this.
1512 		 */
1513 		if (msp->ms_sm != NULL) {
1514 			for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1515 				range_tree_walk(msp->ms_defer[t],
1516 				    range_tree_remove, msp->ms_allocatable);
1517 			}
1518 		}
1519 		msp->ms_max_size = metaslab_block_maxsize(msp);
1520 	}
1521 	cv_broadcast(&msp->ms_load_cv);
1522 	return (error);
1523 }
1524 
1525 void
1526 metaslab_unload(metaslab_t *msp)
1527 {
1528 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1529 	range_tree_vacate(msp->ms_allocatable, NULL, NULL);
1530 	msp->ms_loaded = B_FALSE;
1531 	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1532 	msp->ms_max_size = 0;
1533 }
1534 
1535 int
1536 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1537     metaslab_t **msp)
1538 {
1539 	vdev_t *vd = mg->mg_vd;
1540 	objset_t *mos = vd->vdev_spa->spa_meta_objset;
1541 	metaslab_t *ms;
1542 	int error;
1543 
1544 	ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1545 	mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1546 	mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
1547 	cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1548 
1549 	ms->ms_id = id;
1550 	ms->ms_start = id << vd->vdev_ms_shift;
1551 	ms->ms_size = 1ULL << vd->vdev_ms_shift;
1552 	ms->ms_allocator = -1;
1553 	ms->ms_new = B_TRUE;
1554 
1555 	/*
1556 	 * We only open space map objects that already exist. All others
1557 	 * will be opened when we finally allocate an object for it.
1558 	 */
1559 	if (object != 0) {
1560 		error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1561 		    ms->ms_size, vd->vdev_ashift);
1562 
1563 		if (error != 0) {
1564 			kmem_free(ms, sizeof (metaslab_t));
1565 			return (error);
1566 		}
1567 
1568 		ASSERT(ms->ms_sm != NULL);
1569 	}
1570 
1571 	/*
1572 	 * We create the main range tree here, but we don't create the
1573 	 * other range trees until metaslab_sync_done().  This serves
1574 	 * two purposes: it allows metaslab_sync_done() to detect the
1575 	 * addition of new space; and for debugging, it ensures that we'd
1576 	 * data fault on any attempt to use this metaslab before it's ready.
1577 	 */
1578 	ms->ms_allocatable = range_tree_create(&metaslab_rt_ops, ms);
1579 	metaslab_group_add(mg, ms);
1580 
1581 	metaslab_set_fragmentation(ms);
1582 
1583 	/*
1584 	 * If we're opening an existing pool (txg == 0) or creating
1585 	 * a new one (txg == TXG_INITIAL), all space is available now.
1586 	 * If we're adding space to an existing pool, the new space
1587 	 * does not become available until after this txg has synced.
1588 	 * The metaslab's weight will also be initialized when we sync
1589 	 * out this txg. This ensures that we don't attempt to allocate
1590 	 * from it before we have initialized it completely.
1591 	 */
1592 	if (txg <= TXG_INITIAL)
1593 		metaslab_sync_done(ms, 0);
1594 
1595 	/*
1596 	 * If metaslab_debug_load is set and we're initializing a metaslab
1597 	 * that has an allocated space map object then load the its space
1598 	 * map so that can verify frees.
1599 	 */
1600 	if (metaslab_debug_load && ms->ms_sm != NULL) {
1601 		mutex_enter(&ms->ms_lock);
1602 		VERIFY0(metaslab_load(ms));
1603 		mutex_exit(&ms->ms_lock);
1604 	}
1605 
1606 	if (txg != 0) {
1607 		vdev_dirty(vd, 0, NULL, txg);
1608 		vdev_dirty(vd, VDD_METASLAB, ms, txg);
1609 	}
1610 
1611 	*msp = ms;
1612 
1613 	return (0);
1614 }
1615 
1616 void
1617 metaslab_fini(metaslab_t *msp)
1618 {
1619 	metaslab_group_t *mg = msp->ms_group;
1620 
1621 	metaslab_group_remove(mg, msp);
1622 
1623 	mutex_enter(&msp->ms_lock);
1624 	VERIFY(msp->ms_group == NULL);
1625 	vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1626 	    0, -msp->ms_size);
1627 	space_map_close(msp->ms_sm);
1628 
1629 	metaslab_unload(msp);
1630 	range_tree_destroy(msp->ms_allocatable);
1631 	range_tree_destroy(msp->ms_freeing);
1632 	range_tree_destroy(msp->ms_freed);
1633 
1634 	for (int t = 0; t < TXG_SIZE; t++) {
1635 		range_tree_destroy(msp->ms_allocating[t]);
1636 	}
1637 
1638 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1639 		range_tree_destroy(msp->ms_defer[t]);
1640 	}
1641 	ASSERT0(msp->ms_deferspace);
1642 
1643 	range_tree_destroy(msp->ms_checkpointing);
1644 
1645 	mutex_exit(&msp->ms_lock);
1646 	cv_destroy(&msp->ms_load_cv);
1647 	mutex_destroy(&msp->ms_lock);
1648 	mutex_destroy(&msp->ms_sync_lock);
1649 	ASSERT3U(msp->ms_allocator, ==, -1);
1650 
1651 	kmem_free(msp, sizeof (metaslab_t));
1652 }
1653 
1654 #define	FRAGMENTATION_TABLE_SIZE	17
1655 
1656 /*
1657  * This table defines a segment size based fragmentation metric that will
1658  * allow each metaslab to derive its own fragmentation value. This is done
1659  * by calculating the space in each bucket of the spacemap histogram and
1660  * multiplying that by the fragmetation metric in this table. Doing
1661  * this for all buckets and dividing it by the total amount of free
1662  * space in this metaslab (i.e. the total free space in all buckets) gives
1663  * us the fragmentation metric. This means that a high fragmentation metric
1664  * equates to most of the free space being comprised of small segments.
1665  * Conversely, if the metric is low, then most of the free space is in
1666  * large segments. A 10% change in fragmentation equates to approximately
1667  * double the number of segments.
1668  *
1669  * This table defines 0% fragmented space using 16MB segments. Testing has
1670  * shown that segments that are greater than or equal to 16MB do not suffer
1671  * from drastic performance problems. Using this value, we derive the rest
1672  * of the table. Since the fragmentation value is never stored on disk, it
1673  * is possible to change these calculations in the future.
1674  */
1675 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1676 	100,	/* 512B	*/
1677 	100,	/* 1K	*/
1678 	98,	/* 2K	*/
1679 	95,	/* 4K	*/
1680 	90,	/* 8K	*/
1681 	80,	/* 16K	*/
1682 	70,	/* 32K	*/
1683 	60,	/* 64K	*/
1684 	50,	/* 128K	*/
1685 	40,	/* 256K	*/
1686 	30,	/* 512K	*/
1687 	20,	/* 1M	*/
1688 	15,	/* 2M	*/
1689 	10,	/* 4M	*/
1690 	5,	/* 8M	*/
1691 	0	/* 16M	*/
1692 };
1693 
1694 /*
1695  * Calclate the metaslab's fragmentation metric. A return value
1696  * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1697  * not support this metric. Otherwise, the return value should be in the
1698  * range [0, 100].
1699  */
1700 static void
1701 metaslab_set_fragmentation(metaslab_t *msp)
1702 {
1703 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1704 	uint64_t fragmentation = 0;
1705 	uint64_t total = 0;
1706 	boolean_t feature_enabled = spa_feature_is_enabled(spa,
1707 	    SPA_FEATURE_SPACEMAP_HISTOGRAM);
1708 
1709 	if (!feature_enabled) {
1710 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
1711 		return;
1712 	}
1713 
1714 	/*
1715 	 * A null space map means that the entire metaslab is free
1716 	 * and thus is not fragmented.
1717 	 */
1718 	if (msp->ms_sm == NULL) {
1719 		msp->ms_fragmentation = 0;
1720 		return;
1721 	}
1722 
1723 	/*
1724 	 * If this metaslab's space map has not been upgraded, flag it
1725 	 * so that we upgrade next time we encounter it.
1726 	 */
1727 	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1728 		uint64_t txg = spa_syncing_txg(spa);
1729 		vdev_t *vd = msp->ms_group->mg_vd;
1730 
1731 		/*
1732 		 * If we've reached the final dirty txg, then we must
1733 		 * be shutting down the pool. We don't want to dirty
1734 		 * any data past this point so skip setting the condense
1735 		 * flag. We can retry this action the next time the pool
1736 		 * is imported.
1737 		 */
1738 		if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
1739 			msp->ms_condense_wanted = B_TRUE;
1740 			vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1741 			zfs_dbgmsg("txg %llu, requesting force condense: "
1742 			    "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
1743 			    vd->vdev_id);
1744 		}
1745 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
1746 		return;
1747 	}
1748 
1749 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1750 		uint64_t space = 0;
1751 		uint8_t shift = msp->ms_sm->sm_shift;
1752 
1753 		int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1754 		    FRAGMENTATION_TABLE_SIZE - 1);
1755 
1756 		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1757 			continue;
1758 
1759 		space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1760 		total += space;
1761 
1762 		ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1763 		fragmentation += space * zfs_frag_table[idx];
1764 	}
1765 
1766 	if (total > 0)
1767 		fragmentation /= total;
1768 	ASSERT3U(fragmentation, <=, 100);
1769 
1770 	msp->ms_fragmentation = fragmentation;
1771 }
1772 
1773 /*
1774  * Compute a weight -- a selection preference value -- for the given metaslab.
1775  * This is based on the amount of free space, the level of fragmentation,
1776  * the LBA range, and whether the metaslab is loaded.
1777  */
1778 static uint64_t
1779 metaslab_space_weight(metaslab_t *msp)
1780 {
1781 	metaslab_group_t *mg = msp->ms_group;
1782 	vdev_t *vd = mg->mg_vd;
1783 	uint64_t weight, space;
1784 
1785 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1786 	ASSERT(!vd->vdev_removing);
1787 
1788 	/*
1789 	 * The baseline weight is the metaslab's free space.
1790 	 */
1791 	space = msp->ms_size - space_map_allocated(msp->ms_sm);
1792 
1793 	if (metaslab_fragmentation_factor_enabled &&
1794 	    msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1795 		/*
1796 		 * Use the fragmentation information to inversely scale
1797 		 * down the baseline weight. We need to ensure that we
1798 		 * don't exclude this metaslab completely when it's 100%
1799 		 * fragmented. To avoid this we reduce the fragmented value
1800 		 * by 1.
1801 		 */
1802 		space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1803 
1804 		/*
1805 		 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1806 		 * this metaslab again. The fragmentation metric may have
1807 		 * decreased the space to something smaller than
1808 		 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1809 		 * so that we can consume any remaining space.
1810 		 */
1811 		if (space > 0 && space < SPA_MINBLOCKSIZE)
1812 			space = SPA_MINBLOCKSIZE;
1813 	}
1814 	weight = space;
1815 
1816 	/*
1817 	 * Modern disks have uniform bit density and constant angular velocity.
1818 	 * Therefore, the outer recording zones are faster (higher bandwidth)
1819 	 * than the inner zones by the ratio of outer to inner track diameter,
1820 	 * which is typically around 2:1.  We account for this by assigning
1821 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1822 	 * In effect, this means that we'll select the metaslab with the most
1823 	 * free bandwidth rather than simply the one with the most free space.
1824 	 */
1825 	if (metaslab_lba_weighting_enabled) {
1826 		weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1827 		ASSERT(weight >= space && weight <= 2 * space);
1828 	}
1829 
1830 	/*
1831 	 * If this metaslab is one we're actively using, adjust its
1832 	 * weight to make it preferable to any inactive metaslab so
1833 	 * we'll polish it off. If the fragmentation on this metaslab
1834 	 * has exceed our threshold, then don't mark it active.
1835 	 */
1836 	if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1837 	    msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1838 		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1839 	}
1840 
1841 	WEIGHT_SET_SPACEBASED(weight);
1842 	return (weight);
1843 }
1844 
1845 /*
1846  * Return the weight of the specified metaslab, according to the segment-based
1847  * weighting algorithm. The metaslab must be loaded. This function can
1848  * be called within a sync pass since it relies only on the metaslab's
1849  * range tree which is always accurate when the metaslab is loaded.
1850  */
1851 static uint64_t
1852 metaslab_weight_from_range_tree(metaslab_t *msp)
1853 {
1854 	uint64_t weight = 0;
1855 	uint32_t segments = 0;
1856 
1857 	ASSERT(msp->ms_loaded);
1858 
1859 	for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
1860 	    i--) {
1861 		uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1862 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1863 
1864 		segments <<= 1;
1865 		segments += msp->ms_allocatable->rt_histogram[i];
1866 
1867 		/*
1868 		 * The range tree provides more precision than the space map
1869 		 * and must be downgraded so that all values fit within the
1870 		 * space map's histogram. This allows us to compare loaded
1871 		 * vs. unloaded metaslabs to determine which metaslab is
1872 		 * considered "best".
1873 		 */
1874 		if (i > max_idx)
1875 			continue;
1876 
1877 		if (segments != 0) {
1878 			WEIGHT_SET_COUNT(weight, segments);
1879 			WEIGHT_SET_INDEX(weight, i);
1880 			WEIGHT_SET_ACTIVE(weight, 0);
1881 			break;
1882 		}
1883 	}
1884 	return (weight);
1885 }
1886 
1887 /*
1888  * Calculate the weight based on the on-disk histogram. This should only
1889  * be called after a sync pass has completely finished since the on-disk
1890  * information is updated in metaslab_sync().
1891  */
1892 static uint64_t
1893 metaslab_weight_from_spacemap(metaslab_t *msp)
1894 {
1895 	uint64_t weight = 0;
1896 
1897 	for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
1898 		if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1899 			WEIGHT_SET_COUNT(weight,
1900 			    msp->ms_sm->sm_phys->smp_histogram[i]);
1901 			WEIGHT_SET_INDEX(weight, i +
1902 			    msp->ms_sm->sm_shift);
1903 			WEIGHT_SET_ACTIVE(weight, 0);
1904 			break;
1905 		}
1906 	}
1907 	return (weight);
1908 }
1909 
1910 /*
1911  * Compute a segment-based weight for the specified metaslab. The weight
1912  * is determined by highest bucket in the histogram. The information
1913  * for the highest bucket is encoded into the weight value.
1914  */
1915 static uint64_t
1916 metaslab_segment_weight(metaslab_t *msp)
1917 {
1918 	metaslab_group_t *mg = msp->ms_group;
1919 	uint64_t weight = 0;
1920 	uint8_t shift = mg->mg_vd->vdev_ashift;
1921 
1922 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1923 
1924 	/*
1925 	 * The metaslab is completely free.
1926 	 */
1927 	if (space_map_allocated(msp->ms_sm) == 0) {
1928 		int idx = highbit64(msp->ms_size) - 1;
1929 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1930 
1931 		if (idx < max_idx) {
1932 			WEIGHT_SET_COUNT(weight, 1ULL);
1933 			WEIGHT_SET_INDEX(weight, idx);
1934 		} else {
1935 			WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1936 			WEIGHT_SET_INDEX(weight, max_idx);
1937 		}
1938 		WEIGHT_SET_ACTIVE(weight, 0);
1939 		ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1940 
1941 		return (weight);
1942 	}
1943 
1944 	ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1945 
1946 	/*
1947 	 * If the metaslab is fully allocated then just make the weight 0.
1948 	 */
1949 	if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1950 		return (0);
1951 	/*
1952 	 * If the metaslab is already loaded, then use the range tree to
1953 	 * determine the weight. Otherwise, we rely on the space map information
1954 	 * to generate the weight.
1955 	 */
1956 	if (msp->ms_loaded) {
1957 		weight = metaslab_weight_from_range_tree(msp);
1958 	} else {
1959 		weight = metaslab_weight_from_spacemap(msp);
1960 	}
1961 
1962 	/*
1963 	 * If the metaslab was active the last time we calculated its weight
1964 	 * then keep it active. We want to consume the entire region that
1965 	 * is associated with this weight.
1966 	 */
1967 	if (msp->ms_activation_weight != 0 && weight != 0)
1968 		WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1969 	return (weight);
1970 }
1971 
1972 /*
1973  * Determine if we should attempt to allocate from this metaslab. If the
1974  * metaslab has a maximum size then we can quickly determine if the desired
1975  * allocation size can be satisfied. Otherwise, if we're using segment-based
1976  * weighting then we can determine the maximum allocation that this metaslab
1977  * can accommodate based on the index encoded in the weight. If we're using
1978  * space-based weights then rely on the entire weight (excluding the weight
1979  * type bit).
1980  */
1981 boolean_t
1982 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1983 {
1984 	boolean_t should_allocate;
1985 
1986 	if (msp->ms_max_size != 0)
1987 		return (msp->ms_max_size >= asize);
1988 
1989 	if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1990 		/*
1991 		 * The metaslab segment weight indicates segments in the
1992 		 * range [2^i, 2^(i+1)), where i is the index in the weight.
1993 		 * Since the asize might be in the middle of the range, we
1994 		 * should attempt the allocation if asize < 2^(i+1).
1995 		 */
1996 		should_allocate = (asize <
1997 		    1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
1998 	} else {
1999 		should_allocate = (asize <=
2000 		    (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
2001 	}
2002 	return (should_allocate);
2003 }
2004 
2005 static uint64_t
2006 metaslab_weight(metaslab_t *msp)
2007 {
2008 	vdev_t *vd = msp->ms_group->mg_vd;
2009 	spa_t *spa = vd->vdev_spa;
2010 	uint64_t weight;
2011 
2012 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2013 
2014 	/*
2015 	 * If this vdev is in the process of being removed, there is nothing
2016 	 * for us to do here.
2017 	 */
2018 	if (vd->vdev_removing)
2019 		return (0);
2020 
2021 	metaslab_set_fragmentation(msp);
2022 
2023 	/*
2024 	 * Update the maximum size if the metaslab is loaded. This will
2025 	 * ensure that we get an accurate maximum size if newly freed space
2026 	 * has been added back into the free tree.
2027 	 */
2028 	if (msp->ms_loaded)
2029 		msp->ms_max_size = metaslab_block_maxsize(msp);
2030 
2031 	/*
2032 	 * Segment-based weighting requires space map histogram support.
2033 	 */
2034 	if (zfs_metaslab_segment_weight_enabled &&
2035 	    spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
2036 	    (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
2037 	    sizeof (space_map_phys_t))) {
2038 		weight = metaslab_segment_weight(msp);
2039 	} else {
2040 		weight = metaslab_space_weight(msp);
2041 	}
2042 	return (weight);
2043 }
2044 
2045 static int
2046 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2047     int allocator, uint64_t activation_weight)
2048 {
2049 	/*
2050 	 * If we're activating for the claim code, we don't want to actually
2051 	 * set the metaslab up for a specific allocator.
2052 	 */
2053 	if (activation_weight == METASLAB_WEIGHT_CLAIM)
2054 		return (0);
2055 	metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
2056 	    mg->mg_primaries : mg->mg_secondaries);
2057 
2058 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2059 	mutex_enter(&mg->mg_lock);
2060 	if (arr[allocator] != NULL) {
2061 		mutex_exit(&mg->mg_lock);
2062 		return (EEXIST);
2063 	}
2064 
2065 	arr[allocator] = msp;
2066 	ASSERT3S(msp->ms_allocator, ==, -1);
2067 	msp->ms_allocator = allocator;
2068 	msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
2069 	mutex_exit(&mg->mg_lock);
2070 
2071 	return (0);
2072 }
2073 
2074 static int
2075 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
2076 {
2077 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2078 
2079 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
2080 		int error = 0;
2081 		metaslab_load_wait(msp);
2082 		if (!msp->ms_loaded) {
2083 			if ((error = metaslab_load(msp)) != 0) {
2084 				metaslab_group_sort(msp->ms_group, msp, 0);
2085 				return (error);
2086 			}
2087 		}
2088 		if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2089 			/*
2090 			 * The metaslab was activated for another allocator
2091 			 * while we were waiting, we should reselect.
2092 			 */
2093 			return (EBUSY);
2094 		}
2095 		if ((error = metaslab_activate_allocator(msp->ms_group, msp,
2096 		    allocator, activation_weight)) != 0) {
2097 			return (error);
2098 		}
2099 
2100 		msp->ms_activation_weight = msp->ms_weight;
2101 		metaslab_group_sort(msp->ms_group, msp,
2102 		    msp->ms_weight | activation_weight);
2103 	}
2104 	ASSERT(msp->ms_loaded);
2105 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
2106 
2107 	return (0);
2108 }
2109 
2110 static void
2111 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2112     uint64_t weight)
2113 {
2114 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2115 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
2116 		metaslab_group_sort(mg, msp, weight);
2117 		return;
2118 	}
2119 
2120 	mutex_enter(&mg->mg_lock);
2121 	ASSERT3P(msp->ms_group, ==, mg);
2122 	if (msp->ms_primary) {
2123 		ASSERT3U(0, <=, msp->ms_allocator);
2124 		ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
2125 		ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp);
2126 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
2127 		mg->mg_primaries[msp->ms_allocator] = NULL;
2128 	} else {
2129 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
2130 		ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp);
2131 		mg->mg_secondaries[msp->ms_allocator] = NULL;
2132 	}
2133 	msp->ms_allocator = -1;
2134 	metaslab_group_sort_impl(mg, msp, weight);
2135 	mutex_exit(&mg->mg_lock);
2136 }
2137 
2138 static void
2139 metaslab_passivate(metaslab_t *msp, uint64_t weight)
2140 {
2141 	uint64_t size = weight & ~METASLAB_WEIGHT_TYPE;
2142 
2143 	/*
2144 	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2145 	 * this metaslab again.  In that case, it had better be empty,
2146 	 * or we would be leaving space on the table.
2147 	 */
2148 	ASSERT(size >= SPA_MINBLOCKSIZE ||
2149 	    range_tree_is_empty(msp->ms_allocatable));
2150 	ASSERT0(weight & METASLAB_ACTIVE_MASK);
2151 
2152 	msp->ms_activation_weight = 0;
2153 	metaslab_passivate_allocator(msp->ms_group, msp, weight);
2154 	ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
2155 }
2156 
2157 /*
2158  * Segment-based metaslabs are activated once and remain active until
2159  * we either fail an allocation attempt (similar to space-based metaslabs)
2160  * or have exhausted the free space in zfs_metaslab_switch_threshold
2161  * buckets since the metaslab was activated. This function checks to see
2162  * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2163  * metaslab and passivates it proactively. This will allow us to select a
2164  * metaslabs with larger contiguous region if any remaining within this
2165  * metaslab group. If we're in sync pass > 1, then we continue using this
2166  * metaslab so that we don't dirty more block and cause more sync passes.
2167  */
2168 void
2169 metaslab_segment_may_passivate(metaslab_t *msp)
2170 {
2171 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2172 
2173 	if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2174 		return;
2175 
2176 	/*
2177 	 * Since we are in the middle of a sync pass, the most accurate
2178 	 * information that is accessible to us is the in-core range tree
2179 	 * histogram; calculate the new weight based on that information.
2180 	 */
2181 	uint64_t weight = metaslab_weight_from_range_tree(msp);
2182 	int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2183 	int current_idx = WEIGHT_GET_INDEX(weight);
2184 
2185 	if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2186 		metaslab_passivate(msp, weight);
2187 }
2188 
2189 static void
2190 metaslab_preload(void *arg)
2191 {
2192 	metaslab_t *msp = arg;
2193 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2194 
2195 	ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2196 
2197 	mutex_enter(&msp->ms_lock);
2198 	metaslab_load_wait(msp);
2199 	if (!msp->ms_loaded)
2200 		(void) metaslab_load(msp);
2201 	msp->ms_selected_txg = spa_syncing_txg(spa);
2202 	mutex_exit(&msp->ms_lock);
2203 }
2204 
2205 static void
2206 metaslab_group_preload(metaslab_group_t *mg)
2207 {
2208 	spa_t *spa = mg->mg_vd->vdev_spa;
2209 	metaslab_t *msp;
2210 	avl_tree_t *t = &mg->mg_metaslab_tree;
2211 	int m = 0;
2212 
2213 	if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2214 		taskq_wait(mg->mg_taskq);
2215 		return;
2216 	}
2217 
2218 	mutex_enter(&mg->mg_lock);
2219 
2220 	/*
2221 	 * Load the next potential metaslabs
2222 	 */
2223 	for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2224 		ASSERT3P(msp->ms_group, ==, mg);
2225 
2226 		/*
2227 		 * We preload only the maximum number of metaslabs specified
2228 		 * by metaslab_preload_limit. If a metaslab is being forced
2229 		 * to condense then we preload it too. This will ensure
2230 		 * that force condensing happens in the next txg.
2231 		 */
2232 		if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2233 			continue;
2234 		}
2235 
2236 		VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2237 		    msp, TQ_SLEEP) != NULL);
2238 	}
2239 	mutex_exit(&mg->mg_lock);
2240 }
2241 
2242 /*
2243  * Determine if the space map's on-disk footprint is past our tolerance
2244  * for inefficiency. We would like to use the following criteria to make
2245  * our decision:
2246  *
2247  * 1. The size of the space map object should not dramatically increase as a
2248  * result of writing out the free space range tree.
2249  *
2250  * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2251  * times the size than the free space range tree representation
2252  * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
2253  *
2254  * 3. The on-disk size of the space map should actually decrease.
2255  *
2256  * Unfortunately, we cannot compute the on-disk size of the space map in this
2257  * context because we cannot accurately compute the effects of compression, etc.
2258  * Instead, we apply the heuristic described in the block comment for
2259  * zfs_metaslab_condense_block_threshold - we only condense if the space used
2260  * is greater than a threshold number of blocks.
2261  */
2262 static boolean_t
2263 metaslab_should_condense(metaslab_t *msp)
2264 {
2265 	space_map_t *sm = msp->ms_sm;
2266 	vdev_t *vd = msp->ms_group->mg_vd;
2267 	uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
2268 	uint64_t current_txg = spa_syncing_txg(vd->vdev_spa);
2269 
2270 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2271 	ASSERT(msp->ms_loaded);
2272 
2273 	/*
2274 	 * Allocations and frees in early passes are generally more space
2275 	 * efficient (in terms of blocks described in space map entries)
2276 	 * than the ones in later passes (e.g. we don't compress after
2277 	 * sync pass 5) and condensing a metaslab multiple times in a txg
2278 	 * could degrade performance.
2279 	 *
2280 	 * Thus we prefer condensing each metaslab at most once every txg at
2281 	 * the earliest sync pass possible. If a metaslab is eligible for
2282 	 * condensing again after being considered for condensing within the
2283 	 * same txg, it will hopefully be dirty in the next txg where it will
2284 	 * be condensed at an earlier pass.
2285 	 */
2286 	if (msp->ms_condense_checked_txg == current_txg)
2287 		return (B_FALSE);
2288 	msp->ms_condense_checked_txg = current_txg;
2289 
2290 	/*
2291 	 * We always condense metaslabs that are empty and metaslabs for
2292 	 * which a condense request has been made.
2293 	 */
2294 	if (avl_is_empty(&msp->ms_allocatable_by_size) ||
2295 	    msp->ms_condense_wanted)
2296 		return (B_TRUE);
2297 
2298 	uint64_t object_size = space_map_length(msp->ms_sm);
2299 	uint64_t optimal_size = space_map_estimate_optimal_size(sm,
2300 	    msp->ms_allocatable, SM_NO_VDEVID);
2301 
2302 	dmu_object_info_t doi;
2303 	dmu_object_info_from_db(sm->sm_dbuf, &doi);
2304 	uint64_t record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2305 
2306 	return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
2307 	    object_size > zfs_metaslab_condense_block_threshold * record_size);
2308 }
2309 
2310 /*
2311  * Condense the on-disk space map representation to its minimized form.
2312  * The minimized form consists of a small number of allocations followed by
2313  * the entries of the free range tree.
2314  */
2315 static void
2316 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2317 {
2318 	range_tree_t *condense_tree;
2319 	space_map_t *sm = msp->ms_sm;
2320 
2321 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2322 	ASSERT(msp->ms_loaded);
2323 
2324 	zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2325 	    "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2326 	    msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2327 	    msp->ms_group->mg_vd->vdev_spa->spa_name,
2328 	    space_map_length(msp->ms_sm),
2329 	    avl_numnodes(&msp->ms_allocatable->rt_root),
2330 	    msp->ms_condense_wanted ? "TRUE" : "FALSE");
2331 
2332 	msp->ms_condense_wanted = B_FALSE;
2333 
2334 	/*
2335 	 * Create an range tree that is 100% allocated. We remove segments
2336 	 * that have been freed in this txg, any deferred frees that exist,
2337 	 * and any allocation in the future. Removing segments should be
2338 	 * a relatively inexpensive operation since we expect these trees to
2339 	 * have a small number of nodes.
2340 	 */
2341 	condense_tree = range_tree_create(NULL, NULL);
2342 	range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2343 
2344 	range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree);
2345 	range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree);
2346 
2347 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2348 		range_tree_walk(msp->ms_defer[t],
2349 		    range_tree_remove, condense_tree);
2350 	}
2351 
2352 	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2353 		range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
2354 		    range_tree_remove, condense_tree);
2355 	}
2356 
2357 	/*
2358 	 * We're about to drop the metaslab's lock thus allowing
2359 	 * other consumers to change it's content. Set the
2360 	 * metaslab's ms_condensing flag to ensure that
2361 	 * allocations on this metaslab do not occur while we're
2362 	 * in the middle of committing it to disk. This is only critical
2363 	 * for ms_allocatable as all other range trees use per txg
2364 	 * views of their content.
2365 	 */
2366 	msp->ms_condensing = B_TRUE;
2367 
2368 	mutex_exit(&msp->ms_lock);
2369 	space_map_truncate(sm, zfs_metaslab_sm_blksz, tx);
2370 
2371 	/*
2372 	 * While we would ideally like to create a space map representation
2373 	 * that consists only of allocation records, doing so can be
2374 	 * prohibitively expensive because the in-core free tree can be
2375 	 * large, and therefore computationally expensive to subtract
2376 	 * from the condense_tree. Instead we sync out two trees, a cheap
2377 	 * allocation only tree followed by the in-core free tree. While not
2378 	 * optimal, this is typically close to optimal, and much cheaper to
2379 	 * compute.
2380 	 */
2381 	space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx);
2382 	range_tree_vacate(condense_tree, NULL, NULL);
2383 	range_tree_destroy(condense_tree);
2384 
2385 	space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
2386 	mutex_enter(&msp->ms_lock);
2387 	msp->ms_condensing = B_FALSE;
2388 }
2389 
2390 /*
2391  * Write a metaslab to disk in the context of the specified transaction group.
2392  */
2393 void
2394 metaslab_sync(metaslab_t *msp, uint64_t txg)
2395 {
2396 	metaslab_group_t *mg = msp->ms_group;
2397 	vdev_t *vd = mg->mg_vd;
2398 	spa_t *spa = vd->vdev_spa;
2399 	objset_t *mos = spa_meta_objset(spa);
2400 	range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
2401 	dmu_tx_t *tx;
2402 	uint64_t object = space_map_object(msp->ms_sm);
2403 
2404 	ASSERT(!vd->vdev_ishole);
2405 
2406 	/*
2407 	 * This metaslab has just been added so there's no work to do now.
2408 	 */
2409 	if (msp->ms_freeing == NULL) {
2410 		ASSERT3P(alloctree, ==, NULL);
2411 		return;
2412 	}
2413 
2414 	ASSERT3P(alloctree, !=, NULL);
2415 	ASSERT3P(msp->ms_freeing, !=, NULL);
2416 	ASSERT3P(msp->ms_freed, !=, NULL);
2417 	ASSERT3P(msp->ms_checkpointing, !=, NULL);
2418 
2419 	/*
2420 	 * Normally, we don't want to process a metaslab if there are no
2421 	 * allocations or frees to perform. However, if the metaslab is being
2422 	 * forced to condense and it's loaded, we need to let it through.
2423 	 */
2424 	if (range_tree_is_empty(alloctree) &&
2425 	    range_tree_is_empty(msp->ms_freeing) &&
2426 	    range_tree_is_empty(msp->ms_checkpointing) &&
2427 	    !(msp->ms_loaded && msp->ms_condense_wanted))
2428 		return;
2429 
2430 
2431 	VERIFY(txg <= spa_final_dirty_txg(spa));
2432 
2433 	/*
2434 	 * The only state that can actually be changing concurrently with
2435 	 * metaslab_sync() is the metaslab's ms_allocatable.  No other
2436 	 * thread can be modifying this txg's alloc, freeing,
2437 	 * freed, or space_map_phys_t.  We drop ms_lock whenever we
2438 	 * could call into the DMU, because the DMU can call down to us
2439 	 * (e.g. via zio_free()) at any time.
2440 	 *
2441 	 * The spa_vdev_remove_thread() can be reading metaslab state
2442 	 * concurrently, and it is locked out by the ms_sync_lock.  Note
2443 	 * that the ms_lock is insufficient for this, because it is dropped
2444 	 * by space_map_write().
2445 	 */
2446 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2447 
2448 	if (msp->ms_sm == NULL) {
2449 		uint64_t new_object;
2450 
2451 		new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx);
2452 		VERIFY3U(new_object, !=, 0);
2453 
2454 		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2455 		    msp->ms_start, msp->ms_size, vd->vdev_ashift));
2456 		ASSERT(msp->ms_sm != NULL);
2457 	}
2458 
2459 	if (!range_tree_is_empty(msp->ms_checkpointing) &&
2460 	    vd->vdev_checkpoint_sm == NULL) {
2461 		ASSERT(spa_has_checkpoint(spa));
2462 
2463 		uint64_t new_object = space_map_alloc(mos,
2464 		    vdev_standard_sm_blksz, tx);
2465 		VERIFY3U(new_object, !=, 0);
2466 
2467 		VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
2468 		    mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
2469 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2470 
2471 		/*
2472 		 * We save the space map object as an entry in vdev_top_zap
2473 		 * so it can be retrieved when the pool is reopened after an
2474 		 * export or through zdb.
2475 		 */
2476 		VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
2477 		    vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
2478 		    sizeof (new_object), 1, &new_object, tx));
2479 	}
2480 
2481 	mutex_enter(&msp->ms_sync_lock);
2482 	mutex_enter(&msp->ms_lock);
2483 
2484 	/*
2485 	 * Note: metaslab_condense() clears the space map's histogram.
2486 	 * Therefore we must verify and remove this histogram before
2487 	 * condensing.
2488 	 */
2489 	metaslab_group_histogram_verify(mg);
2490 	metaslab_class_histogram_verify(mg->mg_class);
2491 	metaslab_group_histogram_remove(mg, msp);
2492 
2493 	if (msp->ms_loaded && metaslab_should_condense(msp)) {
2494 		metaslab_condense(msp, txg, tx);
2495 	} else {
2496 		mutex_exit(&msp->ms_lock);
2497 		space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
2498 		    SM_NO_VDEVID, tx);
2499 		space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
2500 		    SM_NO_VDEVID, tx);
2501 		mutex_enter(&msp->ms_lock);
2502 	}
2503 
2504 	if (!range_tree_is_empty(msp->ms_checkpointing)) {
2505 		ASSERT(spa_has_checkpoint(spa));
2506 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2507 
2508 		/*
2509 		 * Since we are doing writes to disk and the ms_checkpointing
2510 		 * tree won't be changing during that time, we drop the
2511 		 * ms_lock while writing to the checkpoint space map.
2512 		 */
2513 		mutex_exit(&msp->ms_lock);
2514 		space_map_write(vd->vdev_checkpoint_sm,
2515 		    msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
2516 		mutex_enter(&msp->ms_lock);
2517 		space_map_update(vd->vdev_checkpoint_sm);
2518 
2519 		spa->spa_checkpoint_info.sci_dspace +=
2520 		    range_tree_space(msp->ms_checkpointing);
2521 		vd->vdev_stat.vs_checkpoint_space +=
2522 		    range_tree_space(msp->ms_checkpointing);
2523 		ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
2524 		    -vd->vdev_checkpoint_sm->sm_alloc);
2525 
2526 		range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
2527 	}
2528 
2529 	if (msp->ms_loaded) {
2530 		/*
2531 		 * When the space map is loaded, we have an accurate
2532 		 * histogram in the range tree. This gives us an opportunity
2533 		 * to bring the space map's histogram up-to-date so we clear
2534 		 * it first before updating it.
2535 		 */
2536 		space_map_histogram_clear(msp->ms_sm);
2537 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
2538 
2539 		/*
2540 		 * Since we've cleared the histogram we need to add back
2541 		 * any free space that has already been processed, plus
2542 		 * any deferred space. This allows the on-disk histogram
2543 		 * to accurately reflect all free space even if some space
2544 		 * is not yet available for allocation (i.e. deferred).
2545 		 */
2546 		space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
2547 
2548 		/*
2549 		 * Add back any deferred free space that has not been
2550 		 * added back into the in-core free tree yet. This will
2551 		 * ensure that we don't end up with a space map histogram
2552 		 * that is completely empty unless the metaslab is fully
2553 		 * allocated.
2554 		 */
2555 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2556 			space_map_histogram_add(msp->ms_sm,
2557 			    msp->ms_defer[t], tx);
2558 		}
2559 	}
2560 
2561 	/*
2562 	 * Always add the free space from this sync pass to the space
2563 	 * map histogram. We want to make sure that the on-disk histogram
2564 	 * accounts for all free space. If the space map is not loaded,
2565 	 * then we will lose some accuracy but will correct it the next
2566 	 * time we load the space map.
2567 	 */
2568 	space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
2569 
2570 	metaslab_group_histogram_add(mg, msp);
2571 	metaslab_group_histogram_verify(mg);
2572 	metaslab_class_histogram_verify(mg->mg_class);
2573 
2574 	/*
2575 	 * For sync pass 1, we avoid traversing this txg's free range tree
2576 	 * and instead will just swap the pointers for freeing and
2577 	 * freed. We can safely do this since the freed_tree is
2578 	 * guaranteed to be empty on the initial pass.
2579 	 */
2580 	if (spa_sync_pass(spa) == 1) {
2581 		range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
2582 	} else {
2583 		range_tree_vacate(msp->ms_freeing,
2584 		    range_tree_add, msp->ms_freed);
2585 	}
2586 	range_tree_vacate(alloctree, NULL, NULL);
2587 
2588 	ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2589 	ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
2590 	    & TXG_MASK]));
2591 	ASSERT0(range_tree_space(msp->ms_freeing));
2592 	ASSERT0(range_tree_space(msp->ms_checkpointing));
2593 
2594 	mutex_exit(&msp->ms_lock);
2595 
2596 	if (object != space_map_object(msp->ms_sm)) {
2597 		object = space_map_object(msp->ms_sm);
2598 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2599 		    msp->ms_id, sizeof (uint64_t), &object, tx);
2600 	}
2601 	mutex_exit(&msp->ms_sync_lock);
2602 	dmu_tx_commit(tx);
2603 }
2604 
2605 /*
2606  * Called after a transaction group has completely synced to mark
2607  * all of the metaslab's free space as usable.
2608  */
2609 void
2610 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2611 {
2612 	metaslab_group_t *mg = msp->ms_group;
2613 	vdev_t *vd = mg->mg_vd;
2614 	spa_t *spa = vd->vdev_spa;
2615 	range_tree_t **defer_tree;
2616 	int64_t alloc_delta, defer_delta;
2617 	boolean_t defer_allowed = B_TRUE;
2618 
2619 	ASSERT(!vd->vdev_ishole);
2620 
2621 	mutex_enter(&msp->ms_lock);
2622 
2623 	/*
2624 	 * If this metaslab is just becoming available, initialize its
2625 	 * range trees and add its capacity to the vdev.
2626 	 */
2627 	if (msp->ms_freed == NULL) {
2628 		for (int t = 0; t < TXG_SIZE; t++) {
2629 			ASSERT(msp->ms_allocating[t] == NULL);
2630 
2631 			msp->ms_allocating[t] = range_tree_create(NULL, NULL);
2632 		}
2633 
2634 		ASSERT3P(msp->ms_freeing, ==, NULL);
2635 		msp->ms_freeing = range_tree_create(NULL, NULL);
2636 
2637 		ASSERT3P(msp->ms_freed, ==, NULL);
2638 		msp->ms_freed = range_tree_create(NULL, NULL);
2639 
2640 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2641 			ASSERT(msp->ms_defer[t] == NULL);
2642 
2643 			msp->ms_defer[t] = range_tree_create(NULL, NULL);
2644 		}
2645 
2646 		ASSERT3P(msp->ms_checkpointing, ==, NULL);
2647 		msp->ms_checkpointing = range_tree_create(NULL, NULL);
2648 
2649 		vdev_space_update(vd, 0, 0, msp->ms_size);
2650 	}
2651 	ASSERT0(range_tree_space(msp->ms_freeing));
2652 	ASSERT0(range_tree_space(msp->ms_checkpointing));
2653 
2654 	defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
2655 
2656 	uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
2657 	    metaslab_class_get_alloc(spa_normal_class(spa));
2658 	if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
2659 		defer_allowed = B_FALSE;
2660 	}
2661 
2662 	defer_delta = 0;
2663 	alloc_delta = space_map_alloc_delta(msp->ms_sm);
2664 	if (defer_allowed) {
2665 		defer_delta = range_tree_space(msp->ms_freed) -
2666 		    range_tree_space(*defer_tree);
2667 	} else {
2668 		defer_delta -= range_tree_space(*defer_tree);
2669 	}
2670 
2671 	vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2672 
2673 	/*
2674 	 * If there's a metaslab_load() in progress, wait for it to complete
2675 	 * so that we have a consistent view of the in-core space map.
2676 	 */
2677 	metaslab_load_wait(msp);
2678 
2679 	/*
2680 	 * Move the frees from the defer_tree back to the free
2681 	 * range tree (if it's loaded). Swap the freed_tree and
2682 	 * the defer_tree -- this is safe to do because we've
2683 	 * just emptied out the defer_tree.
2684 	 */
2685 	range_tree_vacate(*defer_tree,
2686 	    msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
2687 	if (defer_allowed) {
2688 		range_tree_swap(&msp->ms_freed, defer_tree);
2689 	} else {
2690 		range_tree_vacate(msp->ms_freed,
2691 		    msp->ms_loaded ? range_tree_add : NULL,
2692 		    msp->ms_allocatable);
2693 	}
2694 	space_map_update(msp->ms_sm);
2695 
2696 	msp->ms_deferspace += defer_delta;
2697 	ASSERT3S(msp->ms_deferspace, >=, 0);
2698 	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2699 	if (msp->ms_deferspace != 0) {
2700 		/*
2701 		 * Keep syncing this metaslab until all deferred frees
2702 		 * are back in circulation.
2703 		 */
2704 		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2705 	}
2706 
2707 	if (msp->ms_new) {
2708 		msp->ms_new = B_FALSE;
2709 		mutex_enter(&mg->mg_lock);
2710 		mg->mg_ms_ready++;
2711 		mutex_exit(&mg->mg_lock);
2712 	}
2713 	/*
2714 	 * Calculate the new weights before unloading any metaslabs.
2715 	 * This will give us the most accurate weighting.
2716 	 */
2717 	metaslab_group_sort(mg, msp, metaslab_weight(msp) |
2718 	    (msp->ms_weight & METASLAB_ACTIVE_MASK));
2719 
2720 	/*
2721 	 * If the metaslab is loaded and we've not tried to load or allocate
2722 	 * from it in 'metaslab_unload_delay' txgs, then unload it.
2723 	 */
2724 	if (msp->ms_loaded &&
2725 	    msp->ms_initializing == 0 &&
2726 	    msp->ms_selected_txg + metaslab_unload_delay < txg) {
2727 		for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2728 			VERIFY0(range_tree_space(
2729 			    msp->ms_allocating[(txg + t) & TXG_MASK]));
2730 		}
2731 		if (msp->ms_allocator != -1) {
2732 			metaslab_passivate(msp, msp->ms_weight &
2733 			    ~METASLAB_ACTIVE_MASK);
2734 		}
2735 
2736 		if (!metaslab_debug_unload)
2737 			metaslab_unload(msp);
2738 	}
2739 
2740 	ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2741 	ASSERT0(range_tree_space(msp->ms_freeing));
2742 	ASSERT0(range_tree_space(msp->ms_freed));
2743 	ASSERT0(range_tree_space(msp->ms_checkpointing));
2744 
2745 	mutex_exit(&msp->ms_lock);
2746 }
2747 
2748 void
2749 metaslab_sync_reassess(metaslab_group_t *mg)
2750 {
2751 	spa_t *spa = mg->mg_class->mc_spa;
2752 
2753 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2754 	metaslab_group_alloc_update(mg);
2755 	mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2756 
2757 	/*
2758 	 * Preload the next potential metaslabs but only on active
2759 	 * metaslab groups. We can get into a state where the metaslab
2760 	 * is no longer active since we dirty metaslabs as we remove a
2761 	 * a device, thus potentially making the metaslab group eligible
2762 	 * for preloading.
2763 	 */
2764 	if (mg->mg_activation_count > 0) {
2765 		metaslab_group_preload(mg);
2766 	}
2767 	spa_config_exit(spa, SCL_ALLOC, FTAG);
2768 }
2769 
2770 static uint64_t
2771 metaslab_distance(metaslab_t *msp, dva_t *dva)
2772 {
2773 	uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2774 	uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2775 	uint64_t start = msp->ms_id;
2776 
2777 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2778 		return (1ULL << 63);
2779 
2780 	if (offset < start)
2781 		return ((start - offset) << ms_shift);
2782 	if (offset > start)
2783 		return ((offset - start) << ms_shift);
2784 	return (0);
2785 }
2786 
2787 /*
2788  * ==========================================================================
2789  * Metaslab allocation tracing facility
2790  * ==========================================================================
2791  */
2792 kstat_t *metaslab_trace_ksp;
2793 kstat_named_t metaslab_trace_over_limit;
2794 
2795 void
2796 metaslab_alloc_trace_init(void)
2797 {
2798 	ASSERT(metaslab_alloc_trace_cache == NULL);
2799 	metaslab_alloc_trace_cache = kmem_cache_create(
2800 	    "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2801 	    0, NULL, NULL, NULL, NULL, NULL, 0);
2802 	metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2803 	    "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2804 	if (metaslab_trace_ksp != NULL) {
2805 		metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2806 		kstat_named_init(&metaslab_trace_over_limit,
2807 		    "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2808 		kstat_install(metaslab_trace_ksp);
2809 	}
2810 }
2811 
2812 void
2813 metaslab_alloc_trace_fini(void)
2814 {
2815 	if (metaslab_trace_ksp != NULL) {
2816 		kstat_delete(metaslab_trace_ksp);
2817 		metaslab_trace_ksp = NULL;
2818 	}
2819 	kmem_cache_destroy(metaslab_alloc_trace_cache);
2820 	metaslab_alloc_trace_cache = NULL;
2821 }
2822 
2823 /*
2824  * Add an allocation trace element to the allocation tracing list.
2825  */
2826 static void
2827 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2828     metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
2829     int allocator)
2830 {
2831 	if (!metaslab_trace_enabled)
2832 		return;
2833 
2834 	/*
2835 	 * When the tracing list reaches its maximum we remove
2836 	 * the second element in the list before adding a new one.
2837 	 * By removing the second element we preserve the original
2838 	 * entry as a clue to what allocations steps have already been
2839 	 * performed.
2840 	 */
2841 	if (zal->zal_size == metaslab_trace_max_entries) {
2842 		metaslab_alloc_trace_t *mat_next;
2843 #ifdef DEBUG
2844 		panic("too many entries in allocation list");
2845 #endif
2846 		atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2847 		zal->zal_size--;
2848 		mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2849 		list_remove(&zal->zal_list, mat_next);
2850 		kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2851 	}
2852 
2853 	metaslab_alloc_trace_t *mat =
2854 	    kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2855 	list_link_init(&mat->mat_list_node);
2856 	mat->mat_mg = mg;
2857 	mat->mat_msp = msp;
2858 	mat->mat_size = psize;
2859 	mat->mat_dva_id = dva_id;
2860 	mat->mat_offset = offset;
2861 	mat->mat_weight = 0;
2862 	mat->mat_allocator = allocator;
2863 
2864 	if (msp != NULL)
2865 		mat->mat_weight = msp->ms_weight;
2866 
2867 	/*
2868 	 * The list is part of the zio so locking is not required. Only
2869 	 * a single thread will perform allocations for a given zio.
2870 	 */
2871 	list_insert_tail(&zal->zal_list, mat);
2872 	zal->zal_size++;
2873 
2874 	ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2875 }
2876 
2877 void
2878 metaslab_trace_init(zio_alloc_list_t *zal)
2879 {
2880 	list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2881 	    offsetof(metaslab_alloc_trace_t, mat_list_node));
2882 	zal->zal_size = 0;
2883 }
2884 
2885 void
2886 metaslab_trace_fini(zio_alloc_list_t *zal)
2887 {
2888 	metaslab_alloc_trace_t *mat;
2889 
2890 	while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2891 		kmem_cache_free(metaslab_alloc_trace_cache, mat);
2892 	list_destroy(&zal->zal_list);
2893 	zal->zal_size = 0;
2894 }
2895 
2896 /*
2897  * ==========================================================================
2898  * Metaslab block operations
2899  * ==========================================================================
2900  */
2901 
2902 static void
2903 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
2904     int allocator)
2905 {
2906 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
2907 	    (flags & METASLAB_DONT_THROTTLE))
2908 		return;
2909 
2910 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2911 	if (!mg->mg_class->mc_alloc_throttle_enabled)
2912 		return;
2913 
2914 	(void) refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
2915 }
2916 
2917 static void
2918 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
2919 {
2920 	uint64_t max = mg->mg_max_alloc_queue_depth;
2921 	uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator];
2922 	while (cur < max) {
2923 		if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator],
2924 		    cur, cur + 1) == cur) {
2925 			atomic_inc_64(
2926 			    &mg->mg_class->mc_alloc_max_slots[allocator]);
2927 			return;
2928 		}
2929 		cur = mg->mg_cur_max_alloc_queue_depth[allocator];
2930 	}
2931 }
2932 
2933 void
2934 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
2935     int allocator, boolean_t io_complete)
2936 {
2937 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
2938 	    (flags & METASLAB_DONT_THROTTLE))
2939 		return;
2940 
2941 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2942 	if (!mg->mg_class->mc_alloc_throttle_enabled)
2943 		return;
2944 
2945 	(void) refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
2946 	if (io_complete)
2947 		metaslab_group_increment_qdepth(mg, allocator);
2948 }
2949 
2950 void
2951 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
2952     int allocator)
2953 {
2954 #ifdef ZFS_DEBUG
2955 	const dva_t *dva = bp->blk_dva;
2956 	int ndvas = BP_GET_NDVAS(bp);
2957 
2958 	for (int d = 0; d < ndvas; d++) {
2959 		uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2960 		metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2961 		VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth[allocator],
2962 		    tag));
2963 	}
2964 #endif
2965 }
2966 
2967 static uint64_t
2968 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2969 {
2970 	uint64_t start;
2971 	range_tree_t *rt = msp->ms_allocatable;
2972 	metaslab_class_t *mc = msp->ms_group->mg_class;
2973 
2974 	VERIFY(!msp->ms_condensing);
2975 	VERIFY0(msp->ms_initializing);
2976 
2977 	start = mc->mc_ops->msop_alloc(msp, size);
2978 	if (start != -1ULL) {
2979 		metaslab_group_t *mg = msp->ms_group;
2980 		vdev_t *vd = mg->mg_vd;
2981 
2982 		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2983 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2984 		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2985 		range_tree_remove(rt, start, size);
2986 
2987 		if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
2988 			vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2989 
2990 		range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
2991 
2992 		/* Track the last successful allocation */
2993 		msp->ms_alloc_txg = txg;
2994 		metaslab_verify_space(msp, txg);
2995 	}
2996 
2997 	/*
2998 	 * Now that we've attempted the allocation we need to update the
2999 	 * metaslab's maximum block size since it may have changed.
3000 	 */
3001 	msp->ms_max_size = metaslab_block_maxsize(msp);
3002 	return (start);
3003 }
3004 
3005 /*
3006  * Find the metaslab with the highest weight that is less than what we've
3007  * already tried.  In the common case, this means that we will examine each
3008  * metaslab at most once. Note that concurrent callers could reorder metaslabs
3009  * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3010  * activated by another thread, and we fail to allocate from the metaslab we
3011  * have selected, we may not try the newly-activated metaslab, and instead
3012  * activate another metaslab.  This is not optimal, but generally does not cause
3013  * any problems (a possible exception being if every metaslab is completely full
3014  * except for the the newly-activated metaslab which we fail to examine).
3015  */
3016 static metaslab_t *
3017 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
3018     dva_t *dva, int d, uint64_t min_distance, uint64_t asize, int allocator,
3019     zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active)
3020 {
3021 	avl_index_t idx;
3022 	avl_tree_t *t = &mg->mg_metaslab_tree;
3023 	metaslab_t *msp = avl_find(t, search, &idx);
3024 	if (msp == NULL)
3025 		msp = avl_nearest(t, idx, AVL_AFTER);
3026 
3027 	for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
3028 		int i;
3029 		if (!metaslab_should_allocate(msp, asize)) {
3030 			metaslab_trace_add(zal, mg, msp, asize, d,
3031 			    TRACE_TOO_SMALL, allocator);
3032 			continue;
3033 		}
3034 
3035 		/*
3036 			 * If the selected metaslab is condensing or being
3037 			 * initialized, skip it.
3038 		 */
3039 			if (msp->ms_condensing || msp->ms_initializing > 0)
3040 			continue;
3041 
3042 		*was_active = msp->ms_allocator != -1;
3043 		/*
3044 		 * If we're activating as primary, this is our first allocation
3045 		 * from this disk, so we don't need to check how close we are.
3046 		 * If the metaslab under consideration was already active,
3047 		 * we're getting desperate enough to steal another allocator's
3048 		 * metaslab, so we still don't care about distances.
3049 		 */
3050 		if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
3051 			break;
3052 
3053 		uint64_t target_distance = min_distance
3054 		    + (space_map_allocated(msp->ms_sm) != 0 ? 0 :
3055 		    min_distance >> 1);
3056 
3057 		for (i = 0; i < d; i++) {
3058 			if (metaslab_distance(msp, &dva[i]) < target_distance)
3059 				break;
3060 		}
3061 		if (i == d)
3062 			break;
3063 	}
3064 
3065 	if (msp != NULL) {
3066 		search->ms_weight = msp->ms_weight;
3067 		search->ms_start = msp->ms_start + 1;
3068 		search->ms_allocator = msp->ms_allocator;
3069 		search->ms_primary = msp->ms_primary;
3070 	}
3071 	return (msp);
3072 }
3073 
3074 /* ARGSUSED */
3075 static uint64_t
3076 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
3077     uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d,
3078     int allocator)
3079 {
3080 	metaslab_t *msp = NULL;
3081 	uint64_t offset = -1ULL;
3082 	uint64_t activation_weight;
3083 	boolean_t tertiary = B_FALSE;
3084 
3085 	activation_weight = METASLAB_WEIGHT_PRIMARY;
3086 	for (int i = 0; i < d; i++) {
3087 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3088 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
3089 			activation_weight = METASLAB_WEIGHT_SECONDARY;
3090 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3091 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
3092 			tertiary = B_TRUE;
3093 			break;
3094 		}
3095 	}
3096 
3097 	/*
3098 	 * If we don't have enough metaslabs active to fill the entire array, we
3099 	 * just use the 0th slot.
3100 	 */
3101 	if (mg->mg_ms_ready < mg->mg_allocators * 2) {
3102 		tertiary = B_FALSE;
3103 		allocator = 0;
3104 	}
3105 
3106 	ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
3107 
3108 	metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
3109 	search->ms_weight = UINT64_MAX;
3110 	search->ms_start = 0;
3111 	/*
3112 	 * At the end of the metaslab tree are the already-active metaslabs,
3113 	 * first the primaries, then the secondaries. When we resume searching
3114 	 * through the tree, we need to consider ms_allocator and ms_primary so
3115 	 * we start in the location right after where we left off, and don't
3116 	 * accidentally loop forever considering the same metaslabs.
3117 	 */
3118 	search->ms_allocator = -1;
3119 	search->ms_primary = B_TRUE;
3120 	for (;;) {
3121 		boolean_t was_active = B_FALSE;
3122 
3123 		mutex_enter(&mg->mg_lock);
3124 
3125 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3126 		    mg->mg_primaries[allocator] != NULL) {
3127 			msp = mg->mg_primaries[allocator];
3128 			was_active = B_TRUE;
3129 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3130 		    mg->mg_secondaries[allocator] != NULL && !tertiary) {
3131 			msp = mg->mg_secondaries[allocator];
3132 			was_active = B_TRUE;
3133 		} else {
3134 			msp = find_valid_metaslab(mg, activation_weight, dva, d,
3135 			    min_distance, asize, allocator, zal, search,
3136 			    &was_active);
3137 		}
3138 
3139 		mutex_exit(&mg->mg_lock);
3140 		if (msp == NULL) {
3141 			kmem_free(search, sizeof (*search));
3142 			return (-1ULL);
3143 		}
3144 
3145 		mutex_enter(&msp->ms_lock);
3146 		/*
3147 		 * Ensure that the metaslab we have selected is still
3148 		 * capable of handling our request. It's possible that
3149 		 * another thread may have changed the weight while we
3150 		 * were blocked on the metaslab lock. We check the
3151 		 * active status first to see if we need to reselect
3152 		 * a new metaslab.
3153 		 */
3154 		if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
3155 			mutex_exit(&msp->ms_lock);
3156 			continue;
3157 		}
3158 
3159 		/*
3160 		 * If the metaslab is freshly activated for an allocator that
3161 		 * isn't the one we're allocating from, or if it's a primary and
3162 		 * we're seeking a secondary (or vice versa), we go back and
3163 		 * select a new metaslab.
3164 		 */
3165 		if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
3166 		    (msp->ms_allocator != -1) &&
3167 		    (msp->ms_allocator != allocator || ((activation_weight ==
3168 		    METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
3169 			mutex_exit(&msp->ms_lock);
3170 			continue;
3171 		}
3172 
3173 		if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3174 			metaslab_passivate(msp, msp->ms_weight &
3175 			    ~METASLAB_WEIGHT_CLAIM);
3176 			mutex_exit(&msp->ms_lock);
3177 			continue;
3178 		}
3179 
3180 		if (metaslab_activate(msp, allocator, activation_weight) != 0) {
3181 			mutex_exit(&msp->ms_lock);
3182 			continue;
3183 		}
3184 
3185 		msp->ms_selected_txg = txg;
3186 
3187 		/*
3188 		 * Now that we have the lock, recheck to see if we should
3189 		 * continue to use this metaslab for this allocation. The
3190 		 * the metaslab is now loaded so metaslab_should_allocate() can
3191 		 * accurately determine if the allocation attempt should
3192 		 * proceed.
3193 		 */
3194 		if (!metaslab_should_allocate(msp, asize)) {
3195 			/* Passivate this metaslab and select a new one. */
3196 			metaslab_trace_add(zal, mg, msp, asize, d,
3197 			    TRACE_TOO_SMALL, allocator);
3198 			goto next;
3199 		}
3200 
3201 		/*
3202 		 * If this metaslab is currently condensing then pick again as
3203 		 * we can't manipulate this metaslab until it's committed
3204 		 * to disk. If this metaslab is being initialized, we shouldn't
3205 		 * allocate from it since the allocated region might be
3206 		 * overwritten after allocation.
3207 		 */
3208 		if (msp->ms_condensing) {
3209 			metaslab_trace_add(zal, mg, msp, asize, d,
3210 			    TRACE_CONDENSING, allocator);
3211 			metaslab_passivate(msp, msp->ms_weight &
3212 			    ~METASLAB_ACTIVE_MASK);
3213 			mutex_exit(&msp->ms_lock);
3214 			continue;
3215 		} else if (msp->ms_initializing > 0) {
3216 			metaslab_trace_add(zal, mg, msp, asize, d,
3217 			    TRACE_INITIALIZING, allocator);
3218 			metaslab_passivate(msp, msp->ms_weight &
3219 			    ~METASLAB_ACTIVE_MASK);
3220 			mutex_exit(&msp->ms_lock);
3221 			continue;
3222 		}
3223 
3224 		offset = metaslab_block_alloc(msp, asize, txg);
3225 		metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
3226 
3227 		if (offset != -1ULL) {
3228 			/* Proactively passivate the metaslab, if needed */
3229 			metaslab_segment_may_passivate(msp);
3230 			break;
3231 		}
3232 next:
3233 		ASSERT(msp->ms_loaded);
3234 
3235 		/*
3236 		 * We were unable to allocate from this metaslab so determine
3237 		 * a new weight for this metaslab. Now that we have loaded
3238 		 * the metaslab we can provide a better hint to the metaslab
3239 		 * selector.
3240 		 *
3241 		 * For space-based metaslabs, we use the maximum block size.
3242 		 * This information is only available when the metaslab
3243 		 * is loaded and is more accurate than the generic free
3244 		 * space weight that was calculated by metaslab_weight().
3245 		 * This information allows us to quickly compare the maximum
3246 		 * available allocation in the metaslab to the allocation
3247 		 * size being requested.
3248 		 *
3249 		 * For segment-based metaslabs, determine the new weight
3250 		 * based on the highest bucket in the range tree. We
3251 		 * explicitly use the loaded segment weight (i.e. the range
3252 		 * tree histogram) since it contains the space that is
3253 		 * currently available for allocation and is accurate
3254 		 * even within a sync pass.
3255 		 */
3256 		if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3257 			uint64_t weight = metaslab_block_maxsize(msp);
3258 			WEIGHT_SET_SPACEBASED(weight);
3259 			metaslab_passivate(msp, weight);
3260 		} else {
3261 			metaslab_passivate(msp,
3262 			    metaslab_weight_from_range_tree(msp));
3263 		}
3264 
3265 		/*
3266 		 * We have just failed an allocation attempt, check
3267 		 * that metaslab_should_allocate() agrees. Otherwise,
3268 		 * we may end up in an infinite loop retrying the same
3269 		 * metaslab.
3270 		 */
3271 		ASSERT(!metaslab_should_allocate(msp, asize));
3272 		mutex_exit(&msp->ms_lock);
3273 	}
3274 	mutex_exit(&msp->ms_lock);
3275 	kmem_free(search, sizeof (*search));
3276 	return (offset);
3277 }
3278 
3279 static uint64_t
3280 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
3281     uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d,
3282     int allocator)
3283 {
3284 	uint64_t offset;
3285 	ASSERT(mg->mg_initialized);
3286 
3287 	offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
3288 	    min_distance, dva, d, allocator);
3289 
3290 	mutex_enter(&mg->mg_lock);
3291 	if (offset == -1ULL) {
3292 		mg->mg_failed_allocations++;
3293 		metaslab_trace_add(zal, mg, NULL, asize, d,
3294 		    TRACE_GROUP_FAILURE, allocator);
3295 		if (asize == SPA_GANGBLOCKSIZE) {
3296 			/*
3297 			 * This metaslab group was unable to allocate
3298 			 * the minimum gang block size so it must be out of
3299 			 * space. We must notify the allocation throttle
3300 			 * to start skipping allocation attempts to this
3301 			 * metaslab group until more space becomes available.
3302 			 * Note: this failure cannot be caused by the
3303 			 * allocation throttle since the allocation throttle
3304 			 * is only responsible for skipping devices and
3305 			 * not failing block allocations.
3306 			 */
3307 			mg->mg_no_free_space = B_TRUE;
3308 		}
3309 	}
3310 	mg->mg_allocations++;
3311 	mutex_exit(&mg->mg_lock);
3312 	return (offset);
3313 }
3314 
3315 /*
3316  * If we have to write a ditto block (i.e. more than one DVA for a given BP)
3317  * on the same vdev as an existing DVA of this BP, then try to allocate it
3318  * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
3319  * existing DVAs.
3320  */
3321 int ditto_same_vdev_distance_shift = 3;
3322 
3323 /*
3324  * Allocate a block for the specified i/o.
3325  */
3326 int
3327 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
3328     dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
3329     zio_alloc_list_t *zal, int allocator)
3330 {
3331 	metaslab_group_t *mg, *rotor;
3332 	vdev_t *vd;
3333 	boolean_t try_hard = B_FALSE;
3334 
3335 	ASSERT(!DVA_IS_VALID(&dva[d]));
3336 
3337 	/*
3338 	 * For testing, make some blocks above a certain size be gang blocks.
3339 	 */
3340 	if (psize >= metaslab_force_ganging && (ddi_get_lbolt() & 3) == 0) {
3341 		metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
3342 		    allocator);
3343 		return (SET_ERROR(ENOSPC));
3344 	}
3345 
3346 	/*
3347 	 * Start at the rotor and loop through all mgs until we find something.
3348 	 * Note that there's no locking on mc_rotor or mc_aliquot because
3349 	 * nothing actually breaks if we miss a few updates -- we just won't
3350 	 * allocate quite as evenly.  It all balances out over time.
3351 	 *
3352 	 * If we are doing ditto or log blocks, try to spread them across
3353 	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
3354 	 * allocated all of our ditto blocks, then try and spread them out on
3355 	 * that vdev as much as possible.  If it turns out to not be possible,
3356 	 * gradually lower our standards until anything becomes acceptable.
3357 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3358 	 * gives us hope of containing our fault domains to something we're
3359 	 * able to reason about.  Otherwise, any two top-level vdev failures
3360 	 * will guarantee the loss of data.  With consecutive allocation,
3361 	 * only two adjacent top-level vdev failures will result in data loss.
3362 	 *
3363 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3364 	 * ourselves on the same vdev as our gang block header.  That
3365 	 * way, we can hope for locality in vdev_cache, plus it makes our
3366 	 * fault domains something tractable.
3367 	 */
3368 	if (hintdva) {
3369 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
3370 
3371 		/*
3372 		 * It's possible the vdev we're using as the hint no
3373 		 * longer exists or its mg has been closed (e.g. by
3374 		 * device removal).  Consult the rotor when
3375 		 * all else fails.
3376 		 */
3377 		if (vd != NULL && vd->vdev_mg != NULL) {
3378 			mg = vd->vdev_mg;
3379 
3380 			if (flags & METASLAB_HINTBP_AVOID &&
3381 			    mg->mg_next != NULL)
3382 				mg = mg->mg_next;
3383 		} else {
3384 			mg = mc->mc_rotor;
3385 		}
3386 	} else if (d != 0) {
3387 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3388 		mg = vd->vdev_mg->mg_next;
3389 	} else {
3390 		mg = mc->mc_rotor;
3391 	}
3392 
3393 	/*
3394 	 * If the hint put us into the wrong metaslab class, or into a
3395 	 * metaslab group that has been passivated, just follow the rotor.
3396 	 */
3397 	if (mg->mg_class != mc || mg->mg_activation_count <= 0)
3398 		mg = mc->mc_rotor;
3399 
3400 	rotor = mg;
3401 top:
3402 	do {
3403 		boolean_t allocatable;
3404 
3405 		ASSERT(mg->mg_activation_count == 1);
3406 		vd = mg->mg_vd;
3407 
3408 		/*
3409 		 * Don't allocate from faulted devices.
3410 		 */
3411 		if (try_hard) {
3412 			spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3413 			allocatable = vdev_allocatable(vd);
3414 			spa_config_exit(spa, SCL_ZIO, FTAG);
3415 		} else {
3416 			allocatable = vdev_allocatable(vd);
3417 		}
3418 
3419 		/*
3420 		 * Determine if the selected metaslab group is eligible
3421 		 * for allocations. If we're ganging then don't allow
3422 		 * this metaslab group to skip allocations since that would
3423 		 * inadvertently return ENOSPC and suspend the pool
3424 		 * even though space is still available.
3425 		 */
3426 		if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3427 			allocatable = metaslab_group_allocatable(mg, rotor,
3428 			    psize, allocator);
3429 		}
3430 
3431 		if (!allocatable) {
3432 			metaslab_trace_add(zal, mg, NULL, psize, d,
3433 			    TRACE_NOT_ALLOCATABLE, allocator);
3434 			goto next;
3435 		}
3436 
3437 		ASSERT(mg->mg_initialized);
3438 
3439 		/*
3440 		 * Avoid writing single-copy data to a failing,
3441 		 * non-redundant vdev, unless we've already tried all
3442 		 * other vdevs.
3443 		 */
3444 		if ((vd->vdev_stat.vs_write_errors > 0 ||
3445 		    vd->vdev_state < VDEV_STATE_HEALTHY) &&
3446 		    d == 0 && !try_hard && vd->vdev_children == 0) {
3447 			metaslab_trace_add(zal, mg, NULL, psize, d,
3448 			    TRACE_VDEV_ERROR, allocator);
3449 			goto next;
3450 		}
3451 
3452 		ASSERT(mg->mg_class == mc);
3453 
3454 		/*
3455 		 * If we don't need to try hard, then require that the
3456 		 * block be 1/8th of the device away from any other DVAs
3457 		 * in this BP.  If we are trying hard, allow any offset
3458 		 * to be used (distance=0).
3459 		 */
3460 		uint64_t distance = 0;
3461 		if (!try_hard) {
3462 			distance = vd->vdev_asize >>
3463 			    ditto_same_vdev_distance_shift;
3464 			if (distance <= (1ULL << vd->vdev_ms_shift))
3465 				distance = 0;
3466 		}
3467 
3468 		uint64_t asize = vdev_psize_to_asize(vd, psize);
3469 		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3470 
3471 		uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
3472 		    distance, dva, d, allocator);
3473 
3474 		if (offset != -1ULL) {
3475 			/*
3476 			 * If we've just selected this metaslab group,
3477 			 * figure out whether the corresponding vdev is
3478 			 * over- or under-used relative to the pool,
3479 			 * and set an allocation bias to even it out.
3480 			 */
3481 			if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
3482 				vdev_stat_t *vs = &vd->vdev_stat;
3483 				int64_t vu, cu;
3484 
3485 				vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
3486 				cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
3487 
3488 				/*
3489 				 * Calculate how much more or less we should
3490 				 * try to allocate from this device during
3491 				 * this iteration around the rotor.
3492 				 * For example, if a device is 80% full
3493 				 * and the pool is 20% full then we should
3494 				 * reduce allocations by 60% on this device.
3495 				 *
3496 				 * mg_bias = (20 - 80) * 512K / 100 = -307K
3497 				 *
3498 				 * This reduces allocations by 307K for this
3499 				 * iteration.
3500 				 */
3501 				mg->mg_bias = ((cu - vu) *
3502 				    (int64_t)mg->mg_aliquot) / 100;
3503 			} else if (!metaslab_bias_enabled) {
3504 				mg->mg_bias = 0;
3505 			}
3506 
3507 			if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
3508 			    mg->mg_aliquot + mg->mg_bias) {
3509 				mc->mc_rotor = mg->mg_next;
3510 				mc->mc_aliquot = 0;
3511 			}
3512 
3513 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
3514 			DVA_SET_OFFSET(&dva[d], offset);
3515 			DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
3516 			DVA_SET_ASIZE(&dva[d], asize);
3517 
3518 			return (0);
3519 		}
3520 next:
3521 		mc->mc_rotor = mg->mg_next;
3522 		mc->mc_aliquot = 0;
3523 	} while ((mg = mg->mg_next) != rotor);
3524 
3525 	/*
3526 	 * If we haven't tried hard, do so now.
3527 	 */
3528 	if (!try_hard) {
3529 		try_hard = B_TRUE;
3530 		goto top;
3531 	}
3532 
3533 	bzero(&dva[d], sizeof (dva_t));
3534 
3535 	metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
3536 	return (SET_ERROR(ENOSPC));
3537 }
3538 
3539 void
3540 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
3541     boolean_t checkpoint)
3542 {
3543 	metaslab_t *msp;
3544 	spa_t *spa = vd->vdev_spa;
3545 
3546 	ASSERT(vdev_is_concrete(vd));
3547 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3548 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
3549 
3550 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3551 
3552 	VERIFY(!msp->ms_condensing);
3553 	VERIFY3U(offset, >=, msp->ms_start);
3554 	VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
3555 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3556 	VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
3557 
3558 	metaslab_check_free_impl(vd, offset, asize);
3559 
3560 	mutex_enter(&msp->ms_lock);
3561 	if (range_tree_is_empty(msp->ms_freeing) &&
3562 	    range_tree_is_empty(msp->ms_checkpointing)) {
3563 		vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
3564 	}
3565 
3566 	if (checkpoint) {
3567 		ASSERT(spa_has_checkpoint(spa));
3568 		range_tree_add(msp->ms_checkpointing, offset, asize);
3569 	} else {
3570 		range_tree_add(msp->ms_freeing, offset, asize);
3571 	}
3572 	mutex_exit(&msp->ms_lock);
3573 }
3574 
3575 /* ARGSUSED */
3576 void
3577 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3578     uint64_t size, void *arg)
3579 {
3580 	boolean_t *checkpoint = arg;
3581 
3582 	ASSERT3P(checkpoint, !=, NULL);
3583 
3584 	if (vd->vdev_ops->vdev_op_remap != NULL)
3585 		vdev_indirect_mark_obsolete(vd, offset, size);
3586 	else
3587 		metaslab_free_impl(vd, offset, size, *checkpoint);
3588 }
3589 
3590 static void
3591 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
3592     boolean_t checkpoint)
3593 {
3594 	spa_t *spa = vd->vdev_spa;
3595 
3596 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3597 
3598 	if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
3599 		return;
3600 
3601 	if (spa->spa_vdev_removal != NULL &&
3602 	    spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
3603 	    vdev_is_concrete(vd)) {
3604 		/*
3605 		 * Note: we check if the vdev is concrete because when
3606 		 * we complete the removal, we first change the vdev to be
3607 		 * an indirect vdev (in open context), and then (in syncing
3608 		 * context) clear spa_vdev_removal.
3609 		 */
3610 		free_from_removing_vdev(vd, offset, size);
3611 	} else if (vd->vdev_ops->vdev_op_remap != NULL) {
3612 		vdev_indirect_mark_obsolete(vd, offset, size);
3613 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
3614 		    metaslab_free_impl_cb, &checkpoint);
3615 	} else {
3616 		metaslab_free_concrete(vd, offset, size, checkpoint);
3617 	}
3618 }
3619 
3620 typedef struct remap_blkptr_cb_arg {
3621 	blkptr_t *rbca_bp;
3622 	spa_remap_cb_t rbca_cb;
3623 	vdev_t *rbca_remap_vd;
3624 	uint64_t rbca_remap_offset;
3625 	void *rbca_cb_arg;
3626 } remap_blkptr_cb_arg_t;
3627 
3628 void
3629 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3630     uint64_t size, void *arg)
3631 {
3632 	remap_blkptr_cb_arg_t *rbca = arg;
3633 	blkptr_t *bp = rbca->rbca_bp;
3634 
3635 	/* We can not remap split blocks. */
3636 	if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
3637 		return;
3638 	ASSERT0(inner_offset);
3639 
3640 	if (rbca->rbca_cb != NULL) {
3641 		/*
3642 		 * At this point we know that we are not handling split
3643 		 * blocks and we invoke the callback on the previous
3644 		 * vdev which must be indirect.
3645 		 */
3646 		ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
3647 
3648 		rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
3649 		    rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
3650 
3651 		/* set up remap_blkptr_cb_arg for the next call */
3652 		rbca->rbca_remap_vd = vd;
3653 		rbca->rbca_remap_offset = offset;
3654 	}
3655 
3656 	/*
3657 	 * The phys birth time is that of dva[0].  This ensures that we know
3658 	 * when each dva was written, so that resilver can determine which
3659 	 * blocks need to be scrubbed (i.e. those written during the time
3660 	 * the vdev was offline).  It also ensures that the key used in
3661 	 * the ARC hash table is unique (i.e. dva[0] + phys_birth).  If
3662 	 * we didn't change the phys_birth, a lookup in the ARC for a
3663 	 * remapped BP could find the data that was previously stored at
3664 	 * this vdev + offset.
3665 	 */
3666 	vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
3667 	    DVA_GET_VDEV(&bp->blk_dva[0]));
3668 	vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
3669 	bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
3670 	    DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
3671 
3672 	DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
3673 	DVA_SET_OFFSET(&bp->blk_dva[0], offset);
3674 }
3675 
3676 /*
3677  * If the block pointer contains any indirect DVAs, modify them to refer to
3678  * concrete DVAs.  Note that this will sometimes not be possible, leaving
3679  * the indirect DVA in place.  This happens if the indirect DVA spans multiple
3680  * segments in the mapping (i.e. it is a "split block").
3681  *
3682  * If the BP was remapped, calls the callback on the original dva (note the
3683  * callback can be called multiple times if the original indirect DVA refers
3684  * to another indirect DVA, etc).
3685  *
3686  * Returns TRUE if the BP was remapped.
3687  */
3688 boolean_t
3689 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
3690 {
3691 	remap_blkptr_cb_arg_t rbca;
3692 
3693 	if (!zfs_remap_blkptr_enable)
3694 		return (B_FALSE);
3695 
3696 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
3697 		return (B_FALSE);
3698 
3699 	/*
3700 	 * Dedup BP's can not be remapped, because ddt_phys_select() depends
3701 	 * on DVA[0] being the same in the BP as in the DDT (dedup table).
3702 	 */
3703 	if (BP_GET_DEDUP(bp))
3704 		return (B_FALSE);
3705 
3706 	/*
3707 	 * Gang blocks can not be remapped, because
3708 	 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
3709 	 * the BP used to read the gang block header (GBH) being the same
3710 	 * as the DVA[0] that we allocated for the GBH.
3711 	 */
3712 	if (BP_IS_GANG(bp))
3713 		return (B_FALSE);
3714 
3715 	/*
3716 	 * Embedded BP's have no DVA to remap.
3717 	 */
3718 	if (BP_GET_NDVAS(bp) < 1)
3719 		return (B_FALSE);
3720 
3721 	/*
3722 	 * Note: we only remap dva[0].  If we remapped other dvas, we
3723 	 * would no longer know what their phys birth txg is.
3724 	 */
3725 	dva_t *dva = &bp->blk_dva[0];
3726 
3727 	uint64_t offset = DVA_GET_OFFSET(dva);
3728 	uint64_t size = DVA_GET_ASIZE(dva);
3729 	vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
3730 
3731 	if (vd->vdev_ops->vdev_op_remap == NULL)
3732 		return (B_FALSE);
3733 
3734 	rbca.rbca_bp = bp;
3735 	rbca.rbca_cb = callback;
3736 	rbca.rbca_remap_vd = vd;
3737 	rbca.rbca_remap_offset = offset;
3738 	rbca.rbca_cb_arg = arg;
3739 
3740 	/*
3741 	 * remap_blkptr_cb() will be called in order for each level of
3742 	 * indirection, until a concrete vdev is reached or a split block is
3743 	 * encountered. old_vd and old_offset are updated within the callback
3744 	 * as we go from the one indirect vdev to the next one (either concrete
3745 	 * or indirect again) in that order.
3746 	 */
3747 	vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
3748 
3749 	/* Check if the DVA wasn't remapped because it is a split block */
3750 	if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
3751 		return (B_FALSE);
3752 
3753 	return (B_TRUE);
3754 }
3755 
3756 /*
3757  * Undo the allocation of a DVA which happened in the given transaction group.
3758  */
3759 void
3760 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3761 {
3762 	metaslab_t *msp;
3763 	vdev_t *vd;
3764 	uint64_t vdev = DVA_GET_VDEV(dva);
3765 	uint64_t offset = DVA_GET_OFFSET(dva);
3766 	uint64_t size = DVA_GET_ASIZE(dva);
3767 
3768 	ASSERT(DVA_IS_VALID(dva));
3769 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3770 
3771 	if (txg > spa_freeze_txg(spa))
3772 		return;
3773 
3774 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3775 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
3776 		cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
3777 		    (u_longlong_t)vdev, (u_longlong_t)offset);
3778 		ASSERT(0);
3779 		return;
3780 	}
3781 
3782 	ASSERT(!vd->vdev_removing);
3783 	ASSERT(vdev_is_concrete(vd));
3784 	ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3785 	ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
3786 
3787 	if (DVA_GET_GANG(dva))
3788 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3789 
3790 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3791 
3792 	mutex_enter(&msp->ms_lock);
3793 	range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
3794 	    offset, size);
3795 
3796 	VERIFY(!msp->ms_condensing);
3797 	VERIFY3U(offset, >=, msp->ms_start);
3798 	VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
3799 	VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
3800 	    msp->ms_size);
3801 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3802 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3803 	range_tree_add(msp->ms_allocatable, offset, size);
3804 	mutex_exit(&msp->ms_lock);
3805 }
3806 
3807 /*
3808  * Free the block represented by the given DVA.
3809  */
3810 void
3811 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
3812 {
3813 	uint64_t vdev = DVA_GET_VDEV(dva);
3814 	uint64_t offset = DVA_GET_OFFSET(dva);
3815 	uint64_t size = DVA_GET_ASIZE(dva);
3816 	vdev_t *vd = vdev_lookup_top(spa, vdev);
3817 
3818 	ASSERT(DVA_IS_VALID(dva));
3819 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3820 
3821 	if (DVA_GET_GANG(dva)) {
3822 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3823 	}
3824 
3825 	metaslab_free_impl(vd, offset, size, checkpoint);
3826 }
3827 
3828 /*
3829  * Reserve some allocation slots. The reservation system must be called
3830  * before we call into the allocator. If there aren't any available slots
3831  * then the I/O will be throttled until an I/O completes and its slots are
3832  * freed up. The function returns true if it was successful in placing
3833  * the reservation.
3834  */
3835 boolean_t
3836 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
3837     zio_t *zio, int flags)
3838 {
3839 	uint64_t available_slots = 0;
3840 	boolean_t slot_reserved = B_FALSE;
3841 	uint64_t max = mc->mc_alloc_max_slots[allocator];
3842 
3843 	ASSERT(mc->mc_alloc_throttle_enabled);
3844 	mutex_enter(&mc->mc_lock);
3845 
3846 	uint64_t reserved_slots =
3847 	    refcount_count(&mc->mc_alloc_slots[allocator]);
3848 	if (reserved_slots < max)
3849 		available_slots = max - reserved_slots;
3850 
3851 	if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3852 		/*
3853 		 * We reserve the slots individually so that we can unreserve
3854 		 * them individually when an I/O completes.
3855 		 */
3856 		for (int d = 0; d < slots; d++) {
3857 			reserved_slots =
3858 			    refcount_add(&mc->mc_alloc_slots[allocator],
3859 			    zio);
3860 		}
3861 		zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3862 		slot_reserved = B_TRUE;
3863 	}
3864 
3865 	mutex_exit(&mc->mc_lock);
3866 	return (slot_reserved);
3867 }
3868 
3869 void
3870 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
3871     int allocator, zio_t *zio)
3872 {
3873 	ASSERT(mc->mc_alloc_throttle_enabled);
3874 	mutex_enter(&mc->mc_lock);
3875 	for (int d = 0; d < slots; d++) {
3876 		(void) refcount_remove(&mc->mc_alloc_slots[allocator],
3877 		    zio);
3878 	}
3879 	mutex_exit(&mc->mc_lock);
3880 }
3881 
3882 static int
3883 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
3884     uint64_t txg)
3885 {
3886 	metaslab_t *msp;
3887 	spa_t *spa = vd->vdev_spa;
3888 	int error = 0;
3889 
3890 	if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
3891 		return (ENXIO);
3892 
3893 	ASSERT3P(vd->vdev_ms, !=, NULL);
3894 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3895 
3896 	mutex_enter(&msp->ms_lock);
3897 
3898 	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
3899 		error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
3900 	/*
3901 	 * No need to fail in that case; someone else has activated the
3902 	 * metaslab, but that doesn't preclude us from using it.
3903 	 */
3904 	if (error == EBUSY)
3905 		error = 0;
3906 
3907 	if (error == 0 &&
3908 	    !range_tree_contains(msp->ms_allocatable, offset, size))
3909 		error = SET_ERROR(ENOENT);
3910 
3911 	if (error || txg == 0) {	/* txg == 0 indicates dry run */
3912 		mutex_exit(&msp->ms_lock);
3913 		return (error);
3914 	}
3915 
3916 	VERIFY(!msp->ms_condensing);
3917 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3918 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3919 	VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
3920 	    msp->ms_size);
3921 	range_tree_remove(msp->ms_allocatable, offset, size);
3922 
3923 	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(1M) */
3924 		if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
3925 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
3926 		range_tree_add(msp->ms_allocating[txg & TXG_MASK],
3927 		    offset, size);
3928 	}
3929 
3930 	mutex_exit(&msp->ms_lock);
3931 
3932 	return (0);
3933 }
3934 
3935 typedef struct metaslab_claim_cb_arg_t {
3936 	uint64_t	mcca_txg;
3937 	int		mcca_error;
3938 } metaslab_claim_cb_arg_t;
3939 
3940 /* ARGSUSED */
3941 static void
3942 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3943     uint64_t size, void *arg)
3944 {
3945 	metaslab_claim_cb_arg_t *mcca_arg = arg;
3946 
3947 	if (mcca_arg->mcca_error == 0) {
3948 		mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
3949 		    size, mcca_arg->mcca_txg);
3950 	}
3951 }
3952 
3953 int
3954 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
3955 {
3956 	if (vd->vdev_ops->vdev_op_remap != NULL) {
3957 		metaslab_claim_cb_arg_t arg;
3958 
3959 		/*
3960 		 * Only zdb(1M) can claim on indirect vdevs.  This is used
3961 		 * to detect leaks of mapped space (that are not accounted
3962 		 * for in the obsolete counts, spacemap, or bpobj).
3963 		 */
3964 		ASSERT(!spa_writeable(vd->vdev_spa));
3965 		arg.mcca_error = 0;
3966 		arg.mcca_txg = txg;
3967 
3968 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
3969 		    metaslab_claim_impl_cb, &arg);
3970 
3971 		if (arg.mcca_error == 0) {
3972 			arg.mcca_error = metaslab_claim_concrete(vd,
3973 			    offset, size, txg);
3974 		}
3975 		return (arg.mcca_error);
3976 	} else {
3977 		return (metaslab_claim_concrete(vd, offset, size, txg));
3978 	}
3979 }
3980 
3981 /*
3982  * Intent log support: upon opening the pool after a crash, notify the SPA
3983  * of blocks that the intent log has allocated for immediate write, but
3984  * which are still considered free by the SPA because the last transaction
3985  * group didn't commit yet.
3986  */
3987 static int
3988 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3989 {
3990 	uint64_t vdev = DVA_GET_VDEV(dva);
3991 	uint64_t offset = DVA_GET_OFFSET(dva);
3992 	uint64_t size = DVA_GET_ASIZE(dva);
3993 	vdev_t *vd;
3994 
3995 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
3996 		return (SET_ERROR(ENXIO));
3997 	}
3998 
3999 	ASSERT(DVA_IS_VALID(dva));
4000 
4001 	if (DVA_GET_GANG(dva))
4002 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4003 
4004 	return (metaslab_claim_impl(vd, offset, size, txg));
4005 }
4006 
4007 int
4008 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
4009     int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
4010     zio_alloc_list_t *zal, zio_t *zio, int allocator)
4011 {
4012 	dva_t *dva = bp->blk_dva;
4013 	dva_t *hintdva = hintbp->blk_dva;
4014 	int error = 0;
4015 
4016 	ASSERT(bp->blk_birth == 0);
4017 	ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
4018 
4019 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4020 
4021 	if (mc->mc_rotor == NULL) {	/* no vdevs in this class */
4022 		spa_config_exit(spa, SCL_ALLOC, FTAG);
4023 		return (SET_ERROR(ENOSPC));
4024 	}
4025 
4026 	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
4027 	ASSERT(BP_GET_NDVAS(bp) == 0);
4028 	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
4029 	ASSERT3P(zal, !=, NULL);
4030 
4031 	for (int d = 0; d < ndvas; d++) {
4032 		error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
4033 		    txg, flags, zal, allocator);
4034 		if (error != 0) {
4035 			for (d--; d >= 0; d--) {
4036 				metaslab_unalloc_dva(spa, &dva[d], txg);
4037 				metaslab_group_alloc_decrement(spa,
4038 				    DVA_GET_VDEV(&dva[d]), zio, flags,
4039 				    allocator, B_FALSE);
4040 				bzero(&dva[d], sizeof (dva_t));
4041 			}
4042 			spa_config_exit(spa, SCL_ALLOC, FTAG);
4043 			return (error);
4044 		} else {
4045 			/*
4046 			 * Update the metaslab group's queue depth
4047 			 * based on the newly allocated dva.
4048 			 */
4049 			metaslab_group_alloc_increment(spa,
4050 			    DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
4051 		}
4052 
4053 	}
4054 	ASSERT(error == 0);
4055 	ASSERT(BP_GET_NDVAS(bp) == ndvas);
4056 
4057 	spa_config_exit(spa, SCL_ALLOC, FTAG);
4058 
4059 	BP_SET_BIRTH(bp, txg, txg);
4060 
4061 	return (0);
4062 }
4063 
4064 void
4065 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
4066 {
4067 	const dva_t *dva = bp->blk_dva;
4068 	int ndvas = BP_GET_NDVAS(bp);
4069 
4070 	ASSERT(!BP_IS_HOLE(bp));
4071 	ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
4072 
4073 	/*
4074 	 * If we have a checkpoint for the pool we need to make sure that
4075 	 * the blocks that we free that are part of the checkpoint won't be
4076 	 * reused until the checkpoint is discarded or we revert to it.
4077 	 *
4078 	 * The checkpoint flag is passed down the metaslab_free code path
4079 	 * and is set whenever we want to add a block to the checkpoint's
4080 	 * accounting. That is, we "checkpoint" blocks that existed at the
4081 	 * time the checkpoint was created and are therefore referenced by
4082 	 * the checkpointed uberblock.
4083 	 *
4084 	 * Note that, we don't checkpoint any blocks if the current
4085 	 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
4086 	 * normally as they will be referenced by the checkpointed uberblock.
4087 	 */
4088 	boolean_t checkpoint = B_FALSE;
4089 	if (bp->blk_birth <= spa->spa_checkpoint_txg &&
4090 	    spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
4091 		/*
4092 		 * At this point, if the block is part of the checkpoint
4093 		 * there is no way it was created in the current txg.
4094 		 */
4095 		ASSERT(!now);
4096 		ASSERT3U(spa_syncing_txg(spa), ==, txg);
4097 		checkpoint = B_TRUE;
4098 	}
4099 
4100 	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
4101 
4102 	for (int d = 0; d < ndvas; d++) {
4103 		if (now) {
4104 			metaslab_unalloc_dva(spa, &dva[d], txg);
4105 		} else {
4106 			ASSERT3U(txg, ==, spa_syncing_txg(spa));
4107 			metaslab_free_dva(spa, &dva[d], checkpoint);
4108 		}
4109 	}
4110 
4111 	spa_config_exit(spa, SCL_FREE, FTAG);
4112 }
4113 
4114 int
4115 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
4116 {
4117 	const dva_t *dva = bp->blk_dva;
4118 	int ndvas = BP_GET_NDVAS(bp);
4119 	int error = 0;
4120 
4121 	ASSERT(!BP_IS_HOLE(bp));
4122 
4123 	if (txg != 0) {
4124 		/*
4125 		 * First do a dry run to make sure all DVAs are claimable,
4126 		 * so we don't have to unwind from partial failures below.
4127 		 */
4128 		if ((error = metaslab_claim(spa, bp, 0)) != 0)
4129 			return (error);
4130 	}
4131 
4132 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4133 
4134 	for (int d = 0; d < ndvas; d++)
4135 		if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
4136 			break;
4137 
4138 	spa_config_exit(spa, SCL_ALLOC, FTAG);
4139 
4140 	ASSERT(error == 0 || txg == 0);
4141 
4142 	return (error);
4143 }
4144 
4145 /* ARGSUSED */
4146 static void
4147 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
4148     uint64_t size, void *arg)
4149 {
4150 	if (vd->vdev_ops == &vdev_indirect_ops)
4151 		return;
4152 
4153 	metaslab_check_free_impl(vd, offset, size);
4154 }
4155 
4156 static void
4157 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
4158 {
4159 	metaslab_t *msp;
4160 	spa_t *spa = vd->vdev_spa;
4161 
4162 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4163 		return;
4164 
4165 	if (vd->vdev_ops->vdev_op_remap != NULL) {
4166 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
4167 		    metaslab_check_free_impl_cb, NULL);
4168 		return;
4169 	}
4170 
4171 	ASSERT(vdev_is_concrete(vd));
4172 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
4173 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4174 
4175 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4176 
4177 	mutex_enter(&msp->ms_lock);
4178 	if (msp->ms_loaded)
4179 		range_tree_verify(msp->ms_allocatable, offset, size);
4180 
4181 	range_tree_verify(msp->ms_freeing, offset, size);
4182 	range_tree_verify(msp->ms_checkpointing, offset, size);
4183 	range_tree_verify(msp->ms_freed, offset, size);
4184 	for (int j = 0; j < TXG_DEFER_SIZE; j++)
4185 		range_tree_verify(msp->ms_defer[j], offset, size);
4186 	mutex_exit(&msp->ms_lock);
4187 }
4188 
4189 void
4190 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
4191 {
4192 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4193 		return;
4194 
4195 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4196 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
4197 		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
4198 		vdev_t *vd = vdev_lookup_top(spa, vdev);
4199 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
4200 		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
4201 
4202 		if (DVA_GET_GANG(&bp->blk_dva[i]))
4203 			size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4204 
4205 		ASSERT3P(vd, !=, NULL);
4206 
4207 		metaslab_check_free_impl(vd, offset, size);
4208 	}
4209 	spa_config_exit(spa, SCL_VDEV, FTAG);
4210 }
4211