xref: /illumos-gate/usr/src/uts/common/fs/zfs/metaslab.c (revision 44cd46cadd9aab751dae6a4023c1cb5bf316d274)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/zio.h>
36 
37 /*
38  * ==========================================================================
39  * Metaslab classes
40  * ==========================================================================
41  */
42 metaslab_class_t *
43 metaslab_class_create(void)
44 {
45 	metaslab_class_t *mc;
46 
47 	mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
48 
49 	mc->mc_rotor = NULL;
50 
51 	return (mc);
52 }
53 
54 void
55 metaslab_class_destroy(metaslab_class_t *mc)
56 {
57 	metaslab_group_t *mg;
58 
59 	while ((mg = mc->mc_rotor) != NULL) {
60 		metaslab_class_remove(mc, mg);
61 		metaslab_group_destroy(mg);
62 	}
63 
64 	kmem_free(mc, sizeof (metaslab_class_t));
65 }
66 
67 void
68 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg)
69 {
70 	metaslab_group_t *mgprev, *mgnext;
71 
72 	ASSERT(mg->mg_class == NULL);
73 
74 	if ((mgprev = mc->mc_rotor) == NULL) {
75 		mg->mg_prev = mg;
76 		mg->mg_next = mg;
77 	} else {
78 		mgnext = mgprev->mg_next;
79 		mg->mg_prev = mgprev;
80 		mg->mg_next = mgnext;
81 		mgprev->mg_next = mg;
82 		mgnext->mg_prev = mg;
83 	}
84 	mc->mc_rotor = mg;
85 	mg->mg_class = mc;
86 }
87 
88 void
89 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg)
90 {
91 	metaslab_group_t *mgprev, *mgnext;
92 
93 	ASSERT(mg->mg_class == mc);
94 
95 	mgprev = mg->mg_prev;
96 	mgnext = mg->mg_next;
97 
98 	if (mg == mgnext) {
99 		mc->mc_rotor = NULL;
100 	} else {
101 		mc->mc_rotor = mgnext;
102 		mgprev->mg_next = mgnext;
103 		mgnext->mg_prev = mgprev;
104 	}
105 
106 	mg->mg_prev = NULL;
107 	mg->mg_next = NULL;
108 	mg->mg_class = NULL;
109 }
110 
111 /*
112  * ==========================================================================
113  * Metaslab groups
114  * ==========================================================================
115  */
116 static int
117 metaslab_compare(const void *x1, const void *x2)
118 {
119 	const metaslab_t *m1 = x1;
120 	const metaslab_t *m2 = x2;
121 
122 	if (m1->ms_weight < m2->ms_weight)
123 		return (1);
124 	if (m1->ms_weight > m2->ms_weight)
125 		return (-1);
126 
127 	/*
128 	 * If the weights are identical, use the offset to force uniqueness.
129 	 */
130 	if (m1->ms_map.sm_start < m2->ms_map.sm_start)
131 		return (-1);
132 	if (m1->ms_map.sm_start > m2->ms_map.sm_start)
133 		return (1);
134 
135 	ASSERT3P(m1, ==, m2);
136 
137 	return (0);
138 }
139 
140 metaslab_group_t *
141 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
142 {
143 	metaslab_group_t *mg;
144 
145 	mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
146 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
147 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
148 	    sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
149 	mg->mg_aliquot = 2ULL << 20;		/* XXX -- tweak me */
150 	mg->mg_vd = vd;
151 	metaslab_class_add(mc, mg);
152 
153 	return (mg);
154 }
155 
156 void
157 metaslab_group_destroy(metaslab_group_t *mg)
158 {
159 	avl_destroy(&mg->mg_metaslab_tree);
160 	mutex_destroy(&mg->mg_lock);
161 	kmem_free(mg, sizeof (metaslab_group_t));
162 }
163 
164 static void
165 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
166 {
167 	mutex_enter(&mg->mg_lock);
168 	ASSERT(msp->ms_group == NULL);
169 	msp->ms_group = mg;
170 	msp->ms_weight = 0;
171 	avl_add(&mg->mg_metaslab_tree, msp);
172 	mutex_exit(&mg->mg_lock);
173 }
174 
175 static void
176 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
177 {
178 	mutex_enter(&mg->mg_lock);
179 	ASSERT(msp->ms_group == mg);
180 	avl_remove(&mg->mg_metaslab_tree, msp);
181 	msp->ms_group = NULL;
182 	mutex_exit(&mg->mg_lock);
183 }
184 
185 static void
186 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
187 {
188 	ASSERT(MUTEX_HELD(&msp->ms_lock));
189 
190 	mutex_enter(&mg->mg_lock);
191 	ASSERT(msp->ms_group == mg);
192 	avl_remove(&mg->mg_metaslab_tree, msp);
193 	msp->ms_weight = weight;
194 	avl_add(&mg->mg_metaslab_tree, msp);
195 	mutex_exit(&mg->mg_lock);
196 }
197 
198 /*
199  * ==========================================================================
200  * The first-fit block allocator
201  * ==========================================================================
202  */
203 static void
204 metaslab_ff_load(space_map_t *sm)
205 {
206 	ASSERT(sm->sm_ppd == NULL);
207 	sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
208 }
209 
210 static void
211 metaslab_ff_unload(space_map_t *sm)
212 {
213 	kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
214 	sm->sm_ppd = NULL;
215 }
216 
217 static uint64_t
218 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
219 {
220 	avl_tree_t *t = &sm->sm_root;
221 	uint64_t align = size & -size;
222 	uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
223 	space_seg_t *ss, ssearch;
224 	avl_index_t where;
225 
226 	ssearch.ss_start = *cursor;
227 	ssearch.ss_end = *cursor + size;
228 
229 	ss = avl_find(t, &ssearch, &where);
230 	if (ss == NULL)
231 		ss = avl_nearest(t, where, AVL_AFTER);
232 
233 	while (ss != NULL) {
234 		uint64_t offset = P2ROUNDUP(ss->ss_start, align);
235 
236 		if (offset + size <= ss->ss_end) {
237 			*cursor = offset + size;
238 			return (offset);
239 		}
240 		ss = AVL_NEXT(t, ss);
241 	}
242 
243 	/*
244 	 * If we know we've searched the whole map (*cursor == 0), give up.
245 	 * Otherwise, reset the cursor to the beginning and try again.
246 	 */
247 	if (*cursor == 0)
248 		return (-1ULL);
249 
250 	*cursor = 0;
251 	return (metaslab_ff_alloc(sm, size));
252 }
253 
254 /* ARGSUSED */
255 static void
256 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size)
257 {
258 	/* No need to update cursor */
259 }
260 
261 /* ARGSUSED */
262 static void
263 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size)
264 {
265 	/* No need to update cursor */
266 }
267 
268 static space_map_ops_t metaslab_ff_ops = {
269 	metaslab_ff_load,
270 	metaslab_ff_unload,
271 	metaslab_ff_alloc,
272 	metaslab_ff_claim,
273 	metaslab_ff_free
274 };
275 
276 /*
277  * ==========================================================================
278  * Metaslabs
279  * ==========================================================================
280  */
281 metaslab_t *
282 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
283 	uint64_t start, uint64_t size, uint64_t txg)
284 {
285 	vdev_t *vd = mg->mg_vd;
286 	metaslab_t *msp;
287 
288 	msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
289 
290 	msp->ms_smo_syncing = *smo;
291 
292 	/*
293 	 * We create the main space map here, but we don't create the
294 	 * allocmaps and freemaps until metaslab_sync_done().  This serves
295 	 * two purposes: it allows metaslab_sync_done() to detect the
296 	 * addition of new space; and for debugging, it ensures that we'd
297 	 * data fault on any attempt to use this metaslab before it's ready.
298 	 */
299 	space_map_create(&msp->ms_map, start, size,
300 	    vd->vdev_ashift, &msp->ms_lock);
301 
302 	metaslab_group_add(mg, msp);
303 
304 	/*
305 	 * If we're opening an existing pool (txg == 0) or creating
306 	 * a new one (txg == TXG_INITIAL), all space is available now.
307 	 * If we're adding space to an existing pool, the new space
308 	 * does not become available until after this txg has synced.
309 	 */
310 	if (txg <= TXG_INITIAL)
311 		metaslab_sync_done(msp, 0);
312 
313 	if (txg != 0) {
314 		/*
315 		 * The vdev is dirty, but the metaslab isn't -- it just needs
316 		 * to have metaslab_sync_done() invoked from vdev_sync_done().
317 		 * [We could just dirty the metaslab, but that would cause us
318 		 * to allocate a space map object for it, which is wasteful
319 		 * and would mess up the locality logic in metaslab_weight().]
320 		 */
321 		ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa));
322 		vdev_dirty(vd, 0, NULL, txg);
323 		vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg));
324 	}
325 
326 	return (msp);
327 }
328 
329 void
330 metaslab_fini(metaslab_t *msp)
331 {
332 	metaslab_group_t *mg = msp->ms_group;
333 	int t;
334 
335 	vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size,
336 	    -msp->ms_smo.smo_alloc);
337 
338 	metaslab_group_remove(mg, msp);
339 
340 	mutex_enter(&msp->ms_lock);
341 
342 	space_map_unload(&msp->ms_map);
343 	space_map_destroy(&msp->ms_map);
344 
345 	for (t = 0; t < TXG_SIZE; t++) {
346 		space_map_destroy(&msp->ms_allocmap[t]);
347 		space_map_destroy(&msp->ms_freemap[t]);
348 	}
349 
350 	mutex_exit(&msp->ms_lock);
351 
352 	kmem_free(msp, sizeof (metaslab_t));
353 }
354 
355 #define	METASLAB_WEIGHT_PRIMARY		(1ULL << 63)
356 #define	METASLAB_WEIGHT_SECONDARY	(1ULL << 62)
357 #define	METASLAB_ACTIVE_MASK		\
358 	(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
359 #define	METASLAB_SMO_BONUS_MULTIPLIER	2
360 
361 static uint64_t
362 metaslab_weight(metaslab_t *msp)
363 {
364 	metaslab_group_t *mg = msp->ms_group;
365 	space_map_t *sm = &msp->ms_map;
366 	space_map_obj_t *smo = &msp->ms_smo;
367 	vdev_t *vd = mg->mg_vd;
368 	uint64_t weight, space;
369 
370 	ASSERT(MUTEX_HELD(&msp->ms_lock));
371 
372 	/*
373 	 * The baseline weight is the metaslab's free space.
374 	 */
375 	space = sm->sm_size - smo->smo_alloc;
376 	weight = space;
377 
378 	/*
379 	 * Modern disks have uniform bit density and constant angular velocity.
380 	 * Therefore, the outer recording zones are faster (higher bandwidth)
381 	 * than the inner zones by the ratio of outer to inner track diameter,
382 	 * which is typically around 2:1.  We account for this by assigning
383 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
384 	 * In effect, this means that we'll select the metaslab with the most
385 	 * free bandwidth rather than simply the one with the most free space.
386 	 */
387 	weight = 2 * weight -
388 	    ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
389 	ASSERT(weight >= space && weight <= 2 * space);
390 
391 	/*
392 	 * For locality, assign higher weight to metaslabs we've used before.
393 	 */
394 	if (smo->smo_object != 0)
395 		weight *= METASLAB_SMO_BONUS_MULTIPLIER;
396 	ASSERT(weight >= space &&
397 	    weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space);
398 
399 	/*
400 	 * If this metaslab is one we're actively using, adjust its weight to
401 	 * make it preferable to any inactive metaslab so we'll polish it off.
402 	 */
403 	weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
404 
405 	return (weight);
406 }
407 
408 static int
409 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
410 {
411 	space_map_t *sm = &msp->ms_map;
412 
413 	ASSERT(MUTEX_HELD(&msp->ms_lock));
414 
415 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
416 		int error = space_map_load(sm, &metaslab_ff_ops,
417 		    SM_FREE, &msp->ms_smo,
418 		    msp->ms_group->mg_vd->vdev_spa->spa_meta_objset);
419 		if (error) {
420 			metaslab_group_sort(msp->ms_group, msp, 0);
421 			return (error);
422 		}
423 		metaslab_group_sort(msp->ms_group, msp,
424 		    msp->ms_weight | activation_weight);
425 	}
426 	ASSERT(sm->sm_loaded);
427 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
428 
429 	return (0);
430 }
431 
432 static void
433 metaslab_passivate(metaslab_t *msp, uint64_t size)
434 {
435 	metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
436 	ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
437 }
438 
439 /*
440  * Write a metaslab to disk in the context of the specified transaction group.
441  */
442 void
443 metaslab_sync(metaslab_t *msp, uint64_t txg)
444 {
445 	vdev_t *vd = msp->ms_group->mg_vd;
446 	spa_t *spa = vd->vdev_spa;
447 	objset_t *mos = spa->spa_meta_objset;
448 	space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
449 	space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
450 	space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
451 	space_map_t *sm = &msp->ms_map;
452 	space_map_obj_t *smo = &msp->ms_smo_syncing;
453 	dmu_buf_t *db;
454 	dmu_tx_t *tx;
455 	int t;
456 
457 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
458 
459 	/*
460 	 * The only state that can actually be changing concurrently with
461 	 * metaslab_sync() is the metaslab's ms_map.  No other thread can
462 	 * be modifying this txg's allocmap, freemap, freed_map, or smo.
463 	 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
464 	 * We drop it whenever we call into the DMU, because the DMU
465 	 * can call down to us (e.g. via zio_free()) at any time.
466 	 */
467 	mutex_enter(&msp->ms_lock);
468 
469 	if (smo->smo_object == 0) {
470 		ASSERT(smo->smo_objsize == 0);
471 		ASSERT(smo->smo_alloc == 0);
472 		mutex_exit(&msp->ms_lock);
473 		smo->smo_object = dmu_object_alloc(mos,
474 		    DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
475 		    DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
476 		ASSERT(smo->smo_object != 0);
477 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
478 		    (sm->sm_start >> vd->vdev_ms_shift),
479 		    sizeof (uint64_t), &smo->smo_object, tx);
480 		mutex_enter(&msp->ms_lock);
481 	}
482 
483 	space_map_walk(freemap, space_map_add, freed_map);
484 
485 	if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
486 	    2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
487 		/*
488 		 * The in-core space map representation is twice as compact
489 		 * as the on-disk one, so it's time to condense the latter
490 		 * by generating a pure allocmap from first principles.
491 		 *
492 		 * This metaslab is 100% allocated,
493 		 * minus the content of the in-core map (sm),
494 		 * minus what's been freed this txg (freed_map),
495 		 * minus allocations from txgs in the future
496 		 * (because they haven't been committed yet).
497 		 */
498 		space_map_vacate(allocmap, NULL, NULL);
499 		space_map_vacate(freemap, NULL, NULL);
500 
501 		space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
502 
503 		space_map_walk(sm, space_map_remove, allocmap);
504 		space_map_walk(freed_map, space_map_remove, allocmap);
505 
506 		for (t = 1; t < TXG_CONCURRENT_STATES; t++)
507 			space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
508 			    space_map_remove, allocmap);
509 
510 		mutex_exit(&msp->ms_lock);
511 		space_map_truncate(smo, mos, tx);
512 		mutex_enter(&msp->ms_lock);
513 	}
514 
515 	space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
516 	space_map_sync(freemap, SM_FREE, smo, mos, tx);
517 
518 	mutex_exit(&msp->ms_lock);
519 
520 	VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
521 	dmu_buf_will_dirty(db, tx);
522 	ASSERT3U(db->db_size, ==, sizeof (*smo));
523 	bcopy(smo, db->db_data, db->db_size);
524 	dmu_buf_rele(db, FTAG);
525 
526 	dmu_tx_commit(tx);
527 }
528 
529 /*
530  * Called after a transaction group has completely synced to mark
531  * all of the metaslab's free space as usable.
532  */
533 void
534 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
535 {
536 	space_map_obj_t *smo = &msp->ms_smo;
537 	space_map_obj_t *smosync = &msp->ms_smo_syncing;
538 	space_map_t *sm = &msp->ms_map;
539 	space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
540 	metaslab_group_t *mg = msp->ms_group;
541 	vdev_t *vd = mg->mg_vd;
542 	int t;
543 
544 	mutex_enter(&msp->ms_lock);
545 
546 	/*
547 	 * If this metaslab is just becoming available, initialize its
548 	 * allocmaps and freemaps and add its capacity to the vdev.
549 	 */
550 	if (freed_map->sm_size == 0) {
551 		for (t = 0; t < TXG_SIZE; t++) {
552 			space_map_create(&msp->ms_allocmap[t], sm->sm_start,
553 			    sm->sm_size, sm->sm_shift, sm->sm_lock);
554 			space_map_create(&msp->ms_freemap[t], sm->sm_start,
555 			    sm->sm_size, sm->sm_shift, sm->sm_lock);
556 		}
557 		vdev_space_update(vd, sm->sm_size, 0);
558 	}
559 
560 	vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc);
561 
562 	ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
563 	ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
564 
565 	/*
566 	 * If there's a space_map_load() in progress, wait for it to complete
567 	 * so that we have a consistent view of the in-core space map.
568 	 * Then, add everything we freed in this txg to the map.
569 	 */
570 	space_map_load_wait(sm);
571 	space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm);
572 
573 	*smo = *smosync;
574 
575 	/*
576 	 * If the map is loaded but no longer active, evict it as soon as all
577 	 * future allocations have synced.  (If we unloaded it now and then
578 	 * loaded a moment later, the map wouldn't reflect those allocations.)
579 	 */
580 	if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
581 		int evictable = 1;
582 
583 		for (t = 1; t < TXG_CONCURRENT_STATES; t++)
584 			if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
585 				evictable = 0;
586 
587 		if (evictable)
588 			space_map_unload(sm);
589 	}
590 
591 	metaslab_group_sort(mg, msp, metaslab_weight(msp));
592 
593 	mutex_exit(&msp->ms_lock);
594 }
595 
596 /*
597  * Intent log support: upon opening the pool after a crash, notify the SPA
598  * of blocks that the intent log has allocated for immediate write, but
599  * which are still considered free by the SPA because the last transaction
600  * group didn't commit yet.
601  */
602 int
603 metaslab_claim(spa_t *spa, dva_t *dva, uint64_t txg)
604 {
605 	uint64_t vdev = DVA_GET_VDEV(dva);
606 	uint64_t offset = DVA_GET_OFFSET(dva);
607 	uint64_t size = DVA_GET_ASIZE(dva);
608 	vdev_t *vd;
609 	metaslab_t *msp;
610 	int error;
611 
612 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL)
613 		return (ENXIO);
614 
615 	if ((offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
616 		return (ENXIO);
617 
618 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
619 
620 	if (DVA_GET_GANG(dva))
621 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
622 
623 	mutex_enter(&msp->ms_lock);
624 
625 	error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
626 	if (error) {
627 		mutex_exit(&msp->ms_lock);
628 		return (error);
629 	}
630 
631 	if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
632 		vdev_dirty(vd, VDD_METASLAB, msp, txg);
633 
634 	space_map_claim(&msp->ms_map, offset, size);
635 	space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
636 
637 	mutex_exit(&msp->ms_lock);
638 
639 	return (0);
640 }
641 
642 static uint64_t
643 metaslab_distance(metaslab_t *msp, dva_t *dva)
644 {
645 	uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
646 	uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
647 	uint64_t start = msp->ms_map.sm_start >> ms_shift;
648 
649 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
650 		return (1ULL << 63);
651 
652 	if (offset < start)
653 		return ((start - offset) << ms_shift);
654 	if (offset > start)
655 		return ((offset - start) << ms_shift);
656 	return (0);
657 }
658 
659 static uint64_t
660 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg,
661     uint64_t min_distance, dva_t *dva, int d)
662 {
663 	metaslab_t *msp = NULL;
664 	uint64_t offset = -1ULL;
665 	avl_tree_t *t = &mg->mg_metaslab_tree;
666 	uint64_t activation_weight;
667 	uint64_t target_distance;
668 	int i;
669 
670 	activation_weight = METASLAB_WEIGHT_PRIMARY;
671 	for (i = 0; i < d; i++)
672 		if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id)
673 			activation_weight = METASLAB_WEIGHT_SECONDARY;
674 
675 	for (;;) {
676 		mutex_enter(&mg->mg_lock);
677 		for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
678 			if (msp->ms_weight < size) {
679 				mutex_exit(&mg->mg_lock);
680 				return (-1ULL);
681 			}
682 
683 			if (activation_weight == METASLAB_WEIGHT_PRIMARY)
684 				break;
685 
686 			target_distance = min_distance +
687 			    (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
688 
689 			for (i = 0; i < d; i++)
690 				if (metaslab_distance(msp, &dva[i]) <
691 				    target_distance)
692 					break;
693 			if (i == d)
694 				break;
695 		}
696 		mutex_exit(&mg->mg_lock);
697 		if (msp == NULL)
698 			return (-1ULL);
699 
700 		mutex_enter(&msp->ms_lock);
701 
702 		if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
703 		    activation_weight == METASLAB_WEIGHT_PRIMARY) {
704 			metaslab_passivate(msp,
705 			    (msp->ms_weight & ~METASLAB_ACTIVE_MASK) /
706 			    METASLAB_SMO_BONUS_MULTIPLIER);
707 			mutex_exit(&msp->ms_lock);
708 			continue;
709 		}
710 
711 		if (metaslab_activate(msp, activation_weight) != 0) {
712 			mutex_exit(&msp->ms_lock);
713 			continue;
714 		}
715 
716 		if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
717 			break;
718 
719 		metaslab_passivate(msp, size - 1);
720 
721 		mutex_exit(&msp->ms_lock);
722 	}
723 
724 	if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
725 		vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
726 
727 	space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
728 
729 	mutex_exit(&msp->ms_lock);
730 
731 	return (offset);
732 }
733 
734 /*
735  * Allocate a block for the specified i/o.
736  */
737 static int
738 metaslab_alloc_one(spa_t *spa, uint64_t psize, dva_t *dva, int d,
739     dva_t *hintdva, uint64_t txg)
740 {
741 	metaslab_group_t *mg, *rotor;
742 	metaslab_class_t *mc;
743 	vdev_t *vd;
744 	int dshift = 3;
745 	int all_zero;
746 	uint64_t offset = -1ULL;
747 	uint64_t asize;
748 	uint64_t distance;
749 
750 	mc = spa_metaslab_class_select(spa);
751 
752 	/*
753 	 * Start at the rotor and loop through all mgs until we find something.
754 	 * Note that there's no locking on mc_rotor or mc_allocated because
755 	 * nothing actually breaks if we miss a few updates -- we just won't
756 	 * allocate quite as evenly.  It all balances out over time.
757 	 *
758 	 * If we are doing ditto blocks, try to spread them across consecutive
759 	 * vdevs.  If we're forced to reuse a vdev before we've allocated
760 	 * all of our ditto blocks, then try and spread them out on that
761 	 * vdev as much as possible.  If it turns out to not be possible,
762 	 * gradually lower our standards until anything becomes acceptable.
763 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
764 	 * gives us hope of containing our fault domains to something we're
765 	 * able to reason about.  Otherwise, any two top-level vdev failures
766 	 * will guarantee the loss of data.  With consecutive allocation,
767 	 * only two adjacent top-level vdev failures will result in data loss.
768 	 *
769 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
770 	 * ourselves on the same vdev as our gang block header.  That
771 	 * way, we can hope for locality in vdev_cache, plus it makes our
772 	 * fault domains something tractable.
773 	 */
774 	if (hintdva) {
775 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
776 		mg = vd->vdev_mg;
777 	} else if (d != 0) {
778 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
779 		mg = vd->vdev_mg->mg_next;
780 	} else {
781 		mg = mc->mc_rotor;
782 	}
783 	rotor = mg;
784 
785 top:
786 	all_zero = B_TRUE;
787 	do {
788 		vd = mg->mg_vd;
789 
790 		distance = vd->vdev_asize >> dshift;
791 		if (distance <= (1ULL << vd->vdev_ms_shift))
792 			distance = 0;
793 		else
794 			all_zero = B_FALSE;
795 
796 		asize = vdev_psize_to_asize(vd, psize);
797 		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
798 
799 		offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d);
800 		if (offset != -1ULL) {
801 			/*
802 			 * If we've just selected this metaslab group,
803 			 * figure out whether the corresponding vdev is
804 			 * over- or under-used relative to the pool,
805 			 * and set an allocation bias to even it out.
806 			 */
807 			if (mc->mc_allocated == 0) {
808 				vdev_stat_t *vs = &vd->vdev_stat;
809 				uint64_t alloc, space;
810 				int64_t vu, su;
811 
812 				alloc = spa_get_alloc(spa);
813 				space = spa_get_space(spa);
814 
815 				/*
816 				 * Determine percent used in units of 0..1024.
817 				 * (This is just to avoid floating point.)
818 				 */
819 				vu = (vs->vs_alloc << 10) / (vs->vs_space + 1);
820 				su = (alloc << 10) / (space + 1);
821 
822 				/*
823 				 * Bias by at most +/- 25% of the aliquot.
824 				 */
825 				mg->mg_bias = ((su - vu) *
826 				    (int64_t)mg->mg_aliquot) / (1024 * 4);
827 			}
828 
829 			if (atomic_add_64_nv(&mc->mc_allocated, asize) >=
830 			    mg->mg_aliquot + mg->mg_bias) {
831 				mc->mc_rotor = mg->mg_next;
832 				mc->mc_allocated = 0;
833 			}
834 
835 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
836 			DVA_SET_OFFSET(&dva[d], offset);
837 			DVA_SET_GANG(&dva[d], 0);
838 			DVA_SET_ASIZE(&dva[d], asize);
839 
840 			return (0);
841 		}
842 		mc->mc_rotor = mg->mg_next;
843 		mc->mc_allocated = 0;
844 	} while ((mg = mg->mg_next) != rotor);
845 
846 	if (!all_zero) {
847 		dshift++;
848 		ASSERT(dshift < 64);
849 		goto top;
850 	}
851 
852 	bzero(&dva[d], sizeof (dva_t));
853 
854 	return (ENOSPC);
855 }
856 
857 int
858 metaslab_alloc(spa_t *spa, uint64_t psize, blkptr_t *bp, int ncopies,
859     uint64_t txg, blkptr_t *hintbp)
860 {
861 	int d, error;
862 	dva_t *dva = bp->blk_dva;
863 	dva_t *hintdva = hintbp->blk_dva;
864 
865 	ASSERT(ncopies > 0 && ncopies <= spa_max_replication(spa));
866 	ASSERT(BP_GET_NDVAS(bp) == 0);
867 	ASSERT(hintbp == NULL || ncopies <= BP_GET_NDVAS(hintbp));
868 
869 	for (d = 0; d < ncopies; d++) {
870 		error = metaslab_alloc_one(spa, psize, dva, d, hintdva, txg);
871 		if (error) {
872 			for (d--; d >= 0; d--) {
873 				ASSERT(DVA_IS_VALID(&dva[d]));
874 				metaslab_free(spa, &dva[d], txg, B_TRUE);
875 				bzero(&dva[d], sizeof (dva_t));
876 			}
877 			return (ENOSPC);
878 		}
879 	}
880 	ASSERT(error == 0);
881 	ASSERT(BP_GET_NDVAS(bp) == ncopies);
882 
883 	return (0);
884 }
885 
886 /*
887  * Free the block represented by DVA in the context of the specified
888  * transaction group.
889  */
890 void
891 metaslab_free(spa_t *spa, dva_t *dva, uint64_t txg, boolean_t now)
892 {
893 	uint64_t vdev = DVA_GET_VDEV(dva);
894 	uint64_t offset = DVA_GET_OFFSET(dva);
895 	uint64_t size = DVA_GET_ASIZE(dva);
896 	vdev_t *vd;
897 	metaslab_t *msp;
898 
899 	if (txg > spa_freeze_txg(spa))
900 		return;
901 
902 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
903 		cmn_err(CE_WARN, "metaslab_free(): bad vdev %llu",
904 		    (u_longlong_t)vdev);
905 		ASSERT(0);
906 		return;
907 	}
908 
909 	if ((offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
910 		cmn_err(CE_WARN, "metaslab_free(): bad offset %llu",
911 		    (u_longlong_t)offset);
912 		ASSERT(0);
913 		return;
914 	}
915 
916 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
917 
918 	if (DVA_GET_GANG(dva))
919 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
920 
921 	mutex_enter(&msp->ms_lock);
922 
923 	if (now) {
924 		space_map_remove(&msp->ms_allocmap[txg & TXG_MASK],
925 		    offset, size);
926 		space_map_free(&msp->ms_map, offset, size);
927 	} else {
928 		if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0)
929 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
930 		space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
931 	}
932 
933 	mutex_exit(&msp->ms_lock);
934 }
935