1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/spa_impl.h> 28 #include <sys/dmu.h> 29 #include <sys/dmu_tx.h> 30 #include <sys/space_map.h> 31 #include <sys/metaslab_impl.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/zio.h> 34 35 uint64_t metaslab_aliquot = 512ULL << 10; 36 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ 37 38 /* 39 * Minimum size which forces the dynamic allocator to change 40 * it's allocation strategy. Once the space map cannot satisfy 41 * an allocation of this size then it switches to using more 42 * aggressive strategy (i.e search by size rather than offset). 43 */ 44 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; 45 46 /* 47 * The minimum free space, in percent, which must be available 48 * in a space map to continue allocations in a first-fit fashion. 49 * Once the space_map's free space drops below this level we dynamically 50 * switch to using best-fit allocations. 51 */ 52 int metaslab_df_free_pct = 30; 53 54 /* 55 * ========================================================================== 56 * Metaslab classes 57 * ========================================================================== 58 */ 59 metaslab_class_t * 60 metaslab_class_create(spa_t *spa, space_map_ops_t *ops) 61 { 62 metaslab_class_t *mc; 63 64 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 65 66 mc->mc_spa = spa; 67 mc->mc_rotor = NULL; 68 mc->mc_ops = ops; 69 70 return (mc); 71 } 72 73 void 74 metaslab_class_destroy(metaslab_class_t *mc) 75 { 76 metaslab_group_t *mg; 77 78 while ((mg = mc->mc_rotor) != NULL) { 79 metaslab_class_remove(mc, mg); 80 metaslab_group_destroy(mg); 81 } 82 83 kmem_free(mc, sizeof (metaslab_class_t)); 84 } 85 86 void 87 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg) 88 { 89 metaslab_group_t *mgprev, *mgnext; 90 91 ASSERT(mg->mg_class == NULL); 92 93 if ((mgprev = mc->mc_rotor) == NULL) { 94 mg->mg_prev = mg; 95 mg->mg_next = mg; 96 } else { 97 mgnext = mgprev->mg_next; 98 mg->mg_prev = mgprev; 99 mg->mg_next = mgnext; 100 mgprev->mg_next = mg; 101 mgnext->mg_prev = mg; 102 } 103 mc->mc_rotor = mg; 104 mg->mg_class = mc; 105 } 106 107 void 108 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg) 109 { 110 metaslab_group_t *mgprev, *mgnext; 111 112 ASSERT(mg->mg_class == mc); 113 114 mgprev = mg->mg_prev; 115 mgnext = mg->mg_next; 116 117 if (mg == mgnext) { 118 mc->mc_rotor = NULL; 119 } else { 120 mc->mc_rotor = mgnext; 121 mgprev->mg_next = mgnext; 122 mgnext->mg_prev = mgprev; 123 } 124 125 mg->mg_prev = NULL; 126 mg->mg_next = NULL; 127 mg->mg_class = NULL; 128 } 129 130 int 131 metaslab_class_validate(metaslab_class_t *mc) 132 { 133 metaslab_group_t *mg; 134 vdev_t *vd; 135 136 /* 137 * Must hold one of the spa_config locks. 138 */ 139 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 140 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 141 142 if ((mg = mc->mc_rotor) == NULL) 143 return (0); 144 145 do { 146 vd = mg->mg_vd; 147 ASSERT(vd->vdev_mg != NULL); 148 ASSERT3P(vd->vdev_top, ==, vd); 149 ASSERT3P(mg->mg_class, ==, mc); 150 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 151 } while ((mg = mg->mg_next) != mc->mc_rotor); 152 153 return (0); 154 } 155 156 /* 157 * ========================================================================== 158 * Metaslab groups 159 * ========================================================================== 160 */ 161 static int 162 metaslab_compare(const void *x1, const void *x2) 163 { 164 const metaslab_t *m1 = x1; 165 const metaslab_t *m2 = x2; 166 167 if (m1->ms_weight < m2->ms_weight) 168 return (1); 169 if (m1->ms_weight > m2->ms_weight) 170 return (-1); 171 172 /* 173 * If the weights are identical, use the offset to force uniqueness. 174 */ 175 if (m1->ms_map.sm_start < m2->ms_map.sm_start) 176 return (-1); 177 if (m1->ms_map.sm_start > m2->ms_map.sm_start) 178 return (1); 179 180 ASSERT3P(m1, ==, m2); 181 182 return (0); 183 } 184 185 metaslab_group_t * 186 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) 187 { 188 metaslab_group_t *mg; 189 190 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 191 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 192 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 193 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 194 mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children); 195 mg->mg_vd = vd; 196 metaslab_class_add(mc, mg); 197 198 return (mg); 199 } 200 201 void 202 metaslab_group_destroy(metaslab_group_t *mg) 203 { 204 avl_destroy(&mg->mg_metaslab_tree); 205 mutex_destroy(&mg->mg_lock); 206 kmem_free(mg, sizeof (metaslab_group_t)); 207 } 208 209 static void 210 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 211 { 212 mutex_enter(&mg->mg_lock); 213 ASSERT(msp->ms_group == NULL); 214 msp->ms_group = mg; 215 msp->ms_weight = 0; 216 avl_add(&mg->mg_metaslab_tree, msp); 217 mutex_exit(&mg->mg_lock); 218 } 219 220 static void 221 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 222 { 223 mutex_enter(&mg->mg_lock); 224 ASSERT(msp->ms_group == mg); 225 avl_remove(&mg->mg_metaslab_tree, msp); 226 msp->ms_group = NULL; 227 mutex_exit(&mg->mg_lock); 228 } 229 230 static void 231 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 232 { 233 /* 234 * Although in principle the weight can be any value, in 235 * practice we do not use values in the range [1, 510]. 236 */ 237 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0); 238 ASSERT(MUTEX_HELD(&msp->ms_lock)); 239 240 mutex_enter(&mg->mg_lock); 241 ASSERT(msp->ms_group == mg); 242 avl_remove(&mg->mg_metaslab_tree, msp); 243 msp->ms_weight = weight; 244 avl_add(&mg->mg_metaslab_tree, msp); 245 mutex_exit(&mg->mg_lock); 246 } 247 248 /* 249 * This is a helper function that can be used by the allocator to find 250 * a suitable block to allocate. This will search the specified AVL 251 * tree looking for a block that matches the specified criteria. 252 */ 253 static uint64_t 254 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, 255 uint64_t align) 256 { 257 space_seg_t *ss, ssearch; 258 avl_index_t where; 259 260 ssearch.ss_start = *cursor; 261 ssearch.ss_end = *cursor + size; 262 263 ss = avl_find(t, &ssearch, &where); 264 if (ss == NULL) 265 ss = avl_nearest(t, where, AVL_AFTER); 266 267 while (ss != NULL) { 268 uint64_t offset = P2ROUNDUP(ss->ss_start, align); 269 270 if (offset + size <= ss->ss_end) { 271 *cursor = offset + size; 272 return (offset); 273 } 274 ss = AVL_NEXT(t, ss); 275 } 276 277 /* 278 * If we know we've searched the whole map (*cursor == 0), give up. 279 * Otherwise, reset the cursor to the beginning and try again. 280 */ 281 if (*cursor == 0) 282 return (-1ULL); 283 284 *cursor = 0; 285 return (metaslab_block_picker(t, cursor, size, align)); 286 } 287 288 /* 289 * ========================================================================== 290 * The first-fit block allocator 291 * ========================================================================== 292 */ 293 static void 294 metaslab_ff_load(space_map_t *sm) 295 { 296 ASSERT(sm->sm_ppd == NULL); 297 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 298 sm->sm_pp_root = NULL; 299 } 300 301 static void 302 metaslab_ff_unload(space_map_t *sm) 303 { 304 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 305 sm->sm_ppd = NULL; 306 } 307 308 static uint64_t 309 metaslab_ff_alloc(space_map_t *sm, uint64_t size) 310 { 311 avl_tree_t *t = &sm->sm_root; 312 uint64_t align = size & -size; 313 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 314 315 return (metaslab_block_picker(t, cursor, size, align)); 316 } 317 318 /* ARGSUSED */ 319 static void 320 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size) 321 { 322 /* No need to update cursor */ 323 } 324 325 /* ARGSUSED */ 326 static void 327 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size) 328 { 329 /* No need to update cursor */ 330 } 331 332 static space_map_ops_t metaslab_ff_ops = { 333 metaslab_ff_load, 334 metaslab_ff_unload, 335 metaslab_ff_alloc, 336 metaslab_ff_claim, 337 metaslab_ff_free, 338 NULL /* maxsize */ 339 }; 340 341 /* 342 * Dynamic block allocator - 343 * Uses the first fit allocation scheme until space get low and then 344 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold 345 * and metaslab_df_free_pct to determine when to switch the allocation scheme. 346 */ 347 348 uint64_t 349 metaslab_df_maxsize(space_map_t *sm) 350 { 351 avl_tree_t *t = sm->sm_pp_root; 352 space_seg_t *ss; 353 354 if (t == NULL || (ss = avl_last(t)) == NULL) 355 return (0ULL); 356 357 return (ss->ss_end - ss->ss_start); 358 } 359 360 static int 361 metaslab_df_seg_compare(const void *x1, const void *x2) 362 { 363 const space_seg_t *s1 = x1; 364 const space_seg_t *s2 = x2; 365 uint64_t ss_size1 = s1->ss_end - s1->ss_start; 366 uint64_t ss_size2 = s2->ss_end - s2->ss_start; 367 368 if (ss_size1 < ss_size2) 369 return (-1); 370 if (ss_size1 > ss_size2) 371 return (1); 372 373 if (s1->ss_start < s2->ss_start) 374 return (-1); 375 if (s1->ss_start > s2->ss_start) 376 return (1); 377 378 return (0); 379 } 380 381 static void 382 metaslab_df_load(space_map_t *sm) 383 { 384 space_seg_t *ss; 385 386 ASSERT(sm->sm_ppd == NULL); 387 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 388 389 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 390 avl_create(sm->sm_pp_root, metaslab_df_seg_compare, 391 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node)); 392 393 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) 394 avl_add(sm->sm_pp_root, ss); 395 } 396 397 static void 398 metaslab_df_unload(space_map_t *sm) 399 { 400 void *cookie = NULL; 401 402 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 403 sm->sm_ppd = NULL; 404 405 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) { 406 /* tear down the tree */ 407 } 408 409 avl_destroy(sm->sm_pp_root); 410 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t)); 411 sm->sm_pp_root = NULL; 412 } 413 414 static uint64_t 415 metaslab_df_alloc(space_map_t *sm, uint64_t size) 416 { 417 avl_tree_t *t = &sm->sm_root; 418 uint64_t align = size & -size; 419 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 420 uint64_t max_size = metaslab_df_maxsize(sm); 421 int free_pct = sm->sm_space * 100 / sm->sm_size; 422 423 ASSERT(MUTEX_HELD(sm->sm_lock)); 424 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root)); 425 426 if (max_size < size) 427 return (-1ULL); 428 429 /* 430 * If we're running low on space switch to using the size 431 * sorted AVL tree (best-fit). 432 */ 433 if (max_size < metaslab_df_alloc_threshold || 434 free_pct < metaslab_df_free_pct) { 435 t = sm->sm_pp_root; 436 *cursor = 0; 437 } 438 439 return (metaslab_block_picker(t, cursor, size, 1ULL)); 440 } 441 442 /* ARGSUSED */ 443 static void 444 metaslab_df_claim(space_map_t *sm, uint64_t start, uint64_t size) 445 { 446 /* No need to update cursor */ 447 } 448 449 /* ARGSUSED */ 450 static void 451 metaslab_df_free(space_map_t *sm, uint64_t start, uint64_t size) 452 { 453 /* No need to update cursor */ 454 } 455 456 static space_map_ops_t metaslab_df_ops = { 457 metaslab_df_load, 458 metaslab_df_unload, 459 metaslab_df_alloc, 460 metaslab_df_claim, 461 metaslab_df_free, 462 metaslab_df_maxsize 463 }; 464 465 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops; 466 467 /* 468 * ========================================================================== 469 * Metaslabs 470 * ========================================================================== 471 */ 472 metaslab_t * 473 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, 474 uint64_t start, uint64_t size, uint64_t txg) 475 { 476 vdev_t *vd = mg->mg_vd; 477 metaslab_t *msp; 478 479 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 480 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL); 481 482 msp->ms_smo_syncing = *smo; 483 484 /* 485 * We create the main space map here, but we don't create the 486 * allocmaps and freemaps until metaslab_sync_done(). This serves 487 * two purposes: it allows metaslab_sync_done() to detect the 488 * addition of new space; and for debugging, it ensures that we'd 489 * data fault on any attempt to use this metaslab before it's ready. 490 */ 491 space_map_create(&msp->ms_map, start, size, 492 vd->vdev_ashift, &msp->ms_lock); 493 494 metaslab_group_add(mg, msp); 495 496 /* 497 * If we're opening an existing pool (txg == 0) or creating 498 * a new one (txg == TXG_INITIAL), all space is available now. 499 * If we're adding space to an existing pool, the new space 500 * does not become available until after this txg has synced. 501 */ 502 if (txg <= TXG_INITIAL) 503 metaslab_sync_done(msp, 0); 504 505 if (txg != 0) { 506 vdev_dirty(vd, 0, NULL, txg); 507 vdev_dirty(vd, VDD_METASLAB, msp, txg); 508 } 509 510 return (msp); 511 } 512 513 void 514 metaslab_fini(metaslab_t *msp) 515 { 516 metaslab_group_t *mg = msp->ms_group; 517 518 vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size, 519 -msp->ms_smo.smo_alloc, 0, B_TRUE); 520 521 metaslab_group_remove(mg, msp); 522 523 mutex_enter(&msp->ms_lock); 524 525 space_map_unload(&msp->ms_map); 526 space_map_destroy(&msp->ms_map); 527 528 for (int t = 0; t < TXG_SIZE; t++) { 529 space_map_destroy(&msp->ms_allocmap[t]); 530 space_map_destroy(&msp->ms_freemap[t]); 531 } 532 533 for (int t = 0; t < TXG_DEFER_SIZE; t++) 534 space_map_destroy(&msp->ms_defermap[t]); 535 536 ASSERT3S(msp->ms_deferspace, ==, 0); 537 538 mutex_exit(&msp->ms_lock); 539 mutex_destroy(&msp->ms_lock); 540 541 kmem_free(msp, sizeof (metaslab_t)); 542 } 543 544 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) 545 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) 546 #define METASLAB_ACTIVE_MASK \ 547 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) 548 #define METASLAB_SMO_BONUS_MULTIPLIER 2 549 550 static uint64_t 551 metaslab_weight(metaslab_t *msp) 552 { 553 metaslab_group_t *mg = msp->ms_group; 554 space_map_t *sm = &msp->ms_map; 555 space_map_obj_t *smo = &msp->ms_smo; 556 vdev_t *vd = mg->mg_vd; 557 uint64_t weight, space; 558 559 ASSERT(MUTEX_HELD(&msp->ms_lock)); 560 561 /* 562 * The baseline weight is the metaslab's free space. 563 */ 564 space = sm->sm_size - smo->smo_alloc; 565 weight = space; 566 567 /* 568 * Modern disks have uniform bit density and constant angular velocity. 569 * Therefore, the outer recording zones are faster (higher bandwidth) 570 * than the inner zones by the ratio of outer to inner track diameter, 571 * which is typically around 2:1. We account for this by assigning 572 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 573 * In effect, this means that we'll select the metaslab with the most 574 * free bandwidth rather than simply the one with the most free space. 575 */ 576 weight = 2 * weight - 577 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count; 578 ASSERT(weight >= space && weight <= 2 * space); 579 580 /* 581 * For locality, assign higher weight to metaslabs we've used before. 582 */ 583 if (smo->smo_object != 0) 584 weight *= METASLAB_SMO_BONUS_MULTIPLIER; 585 ASSERT(weight >= space && 586 weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space); 587 588 /* 589 * If this metaslab is one we're actively using, adjust its weight to 590 * make it preferable to any inactive metaslab so we'll polish it off. 591 */ 592 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 593 594 return (weight); 595 } 596 597 static int 598 metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size) 599 { 600 space_map_t *sm = &msp->ms_map; 601 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops; 602 603 ASSERT(MUTEX_HELD(&msp->ms_lock)); 604 605 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 606 space_map_load_wait(sm); 607 if (!sm->sm_loaded) { 608 int error = space_map_load(sm, sm_ops, SM_FREE, 609 &msp->ms_smo, 610 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset); 611 if (error) { 612 metaslab_group_sort(msp->ms_group, msp, 0); 613 return (error); 614 } 615 for (int t = 0; t < TXG_DEFER_SIZE; t++) 616 space_map_walk(&msp->ms_defermap[t], 617 space_map_claim, sm); 618 } 619 620 /* 621 * If we were able to load the map then make sure 622 * that this map is still able to satisfy our request. 623 */ 624 if (msp->ms_weight < size) 625 return (ENOSPC); 626 627 metaslab_group_sort(msp->ms_group, msp, 628 msp->ms_weight | activation_weight); 629 } 630 ASSERT(sm->sm_loaded); 631 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 632 633 return (0); 634 } 635 636 static void 637 metaslab_passivate(metaslab_t *msp, uint64_t size) 638 { 639 /* 640 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 641 * this metaslab again. In that case, it had better be empty, 642 * or we would be leaving space on the table. 643 */ 644 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0); 645 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); 646 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); 647 } 648 649 /* 650 * Write a metaslab to disk in the context of the specified transaction group. 651 */ 652 void 653 metaslab_sync(metaslab_t *msp, uint64_t txg) 654 { 655 vdev_t *vd = msp->ms_group->mg_vd; 656 spa_t *spa = vd->vdev_spa; 657 objset_t *mos = spa->spa_meta_objset; 658 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK]; 659 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK]; 660 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 661 space_map_t *sm = &msp->ms_map; 662 space_map_obj_t *smo = &msp->ms_smo_syncing; 663 dmu_buf_t *db; 664 dmu_tx_t *tx; 665 666 ASSERT(!vd->vdev_ishole); 667 668 if (allocmap->sm_space == 0 && freemap->sm_space == 0) 669 return; 670 671 /* 672 * The only state that can actually be changing concurrently with 673 * metaslab_sync() is the metaslab's ms_map. No other thread can 674 * be modifying this txg's allocmap, freemap, freed_map, or smo. 675 * Therefore, we only hold ms_lock to satify space_map ASSERTs. 676 * We drop it whenever we call into the DMU, because the DMU 677 * can call down to us (e.g. via zio_free()) at any time. 678 */ 679 680 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 681 682 if (smo->smo_object == 0) { 683 ASSERT(smo->smo_objsize == 0); 684 ASSERT(smo->smo_alloc == 0); 685 smo->smo_object = dmu_object_alloc(mos, 686 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 687 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 688 ASSERT(smo->smo_object != 0); 689 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 690 (sm->sm_start >> vd->vdev_ms_shift), 691 sizeof (uint64_t), &smo->smo_object, tx); 692 } 693 694 mutex_enter(&msp->ms_lock); 695 696 space_map_walk(freemap, space_map_add, freed_map); 697 698 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >= 699 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) { 700 /* 701 * The in-core space map representation is twice as compact 702 * as the on-disk one, so it's time to condense the latter 703 * by generating a pure allocmap from first principles. 704 * 705 * This metaslab is 100% allocated, 706 * minus the content of the in-core map (sm), 707 * minus what's been freed this txg (freed_map), 708 * minus deferred frees (ms_defermap[]), 709 * minus allocations from txgs in the future 710 * (because they haven't been committed yet). 711 */ 712 space_map_vacate(allocmap, NULL, NULL); 713 space_map_vacate(freemap, NULL, NULL); 714 715 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size); 716 717 space_map_walk(sm, space_map_remove, allocmap); 718 space_map_walk(freed_map, space_map_remove, allocmap); 719 720 for (int t = 0; t < TXG_DEFER_SIZE; t++) 721 space_map_walk(&msp->ms_defermap[t], 722 space_map_remove, allocmap); 723 724 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) 725 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], 726 space_map_remove, allocmap); 727 728 mutex_exit(&msp->ms_lock); 729 space_map_truncate(smo, mos, tx); 730 mutex_enter(&msp->ms_lock); 731 } 732 733 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); 734 space_map_sync(freemap, SM_FREE, smo, mos, tx); 735 736 mutex_exit(&msp->ms_lock); 737 738 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 739 dmu_buf_will_dirty(db, tx); 740 ASSERT3U(db->db_size, >=, sizeof (*smo)); 741 bcopy(smo, db->db_data, sizeof (*smo)); 742 dmu_buf_rele(db, FTAG); 743 744 dmu_tx_commit(tx); 745 } 746 747 /* 748 * Called after a transaction group has completely synced to mark 749 * all of the metaslab's free space as usable. 750 */ 751 void 752 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 753 { 754 space_map_obj_t *smo = &msp->ms_smo; 755 space_map_obj_t *smosync = &msp->ms_smo_syncing; 756 space_map_t *sm = &msp->ms_map; 757 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 758 space_map_t *defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE]; 759 metaslab_group_t *mg = msp->ms_group; 760 vdev_t *vd = mg->mg_vd; 761 int64_t alloc_delta, defer_delta; 762 763 ASSERT(!vd->vdev_ishole); 764 765 mutex_enter(&msp->ms_lock); 766 767 /* 768 * If this metaslab is just becoming available, initialize its 769 * allocmaps and freemaps and add its capacity to the vdev. 770 */ 771 if (freed_map->sm_size == 0) { 772 for (int t = 0; t < TXG_SIZE; t++) { 773 space_map_create(&msp->ms_allocmap[t], sm->sm_start, 774 sm->sm_size, sm->sm_shift, sm->sm_lock); 775 space_map_create(&msp->ms_freemap[t], sm->sm_start, 776 sm->sm_size, sm->sm_shift, sm->sm_lock); 777 } 778 779 for (int t = 0; t < TXG_DEFER_SIZE; t++) 780 space_map_create(&msp->ms_defermap[t], sm->sm_start, 781 sm->sm_size, sm->sm_shift, sm->sm_lock); 782 783 vdev_space_update(vd, sm->sm_size, 0, 0, B_TRUE); 784 } 785 786 alloc_delta = smosync->smo_alloc - smo->smo_alloc; 787 defer_delta = freed_map->sm_space - defer_map->sm_space; 788 789 vdev_space_update(vd, 0, alloc_delta + defer_delta, 790 defer_delta, B_TRUE); 791 792 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0); 793 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0); 794 795 /* 796 * If there's a space_map_load() in progress, wait for it to complete 797 * so that we have a consistent view of the in-core space map. 798 * Then, add defer_map (oldest deferred frees) to this map and 799 * transfer freed_map (this txg's frees) to defer_map. 800 */ 801 space_map_load_wait(sm); 802 space_map_vacate(defer_map, sm->sm_loaded ? space_map_free : NULL, sm); 803 space_map_vacate(freed_map, space_map_add, defer_map); 804 805 *smo = *smosync; 806 807 msp->ms_deferspace += defer_delta; 808 ASSERT3S(msp->ms_deferspace, >=, 0); 809 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size); 810 if (msp->ms_deferspace != 0) { 811 /* 812 * Keep syncing this metaslab until all deferred frees 813 * are back in circulation. 814 */ 815 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 816 } 817 818 /* 819 * If the map is loaded but no longer active, evict it as soon as all 820 * future allocations have synced. (If we unloaded it now and then 821 * loaded a moment later, the map wouldn't reflect those allocations.) 822 */ 823 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 824 int evictable = 1; 825 826 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) 827 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) 828 evictable = 0; 829 830 if (evictable) 831 space_map_unload(sm); 832 } 833 834 metaslab_group_sort(mg, msp, metaslab_weight(msp)); 835 836 mutex_exit(&msp->ms_lock); 837 } 838 839 static uint64_t 840 metaslab_distance(metaslab_t *msp, dva_t *dva) 841 { 842 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; 843 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; 844 uint64_t start = msp->ms_map.sm_start >> ms_shift; 845 846 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 847 return (1ULL << 63); 848 849 if (offset < start) 850 return ((start - offset) << ms_shift); 851 if (offset > start) 852 return ((offset - start) << ms_shift); 853 return (0); 854 } 855 856 static uint64_t 857 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, 858 uint64_t min_distance, dva_t *dva, int d) 859 { 860 metaslab_t *msp = NULL; 861 uint64_t offset = -1ULL; 862 avl_tree_t *t = &mg->mg_metaslab_tree; 863 uint64_t activation_weight; 864 uint64_t target_distance; 865 int i; 866 867 activation_weight = METASLAB_WEIGHT_PRIMARY; 868 for (i = 0; i < d; i++) { 869 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 870 activation_weight = METASLAB_WEIGHT_SECONDARY; 871 break; 872 } 873 } 874 875 for (;;) { 876 boolean_t was_active; 877 878 mutex_enter(&mg->mg_lock); 879 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { 880 if (msp->ms_weight < size) { 881 mutex_exit(&mg->mg_lock); 882 return (-1ULL); 883 } 884 885 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 886 if (activation_weight == METASLAB_WEIGHT_PRIMARY) 887 break; 888 889 target_distance = min_distance + 890 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1); 891 892 for (i = 0; i < d; i++) 893 if (metaslab_distance(msp, &dva[i]) < 894 target_distance) 895 break; 896 if (i == d) 897 break; 898 } 899 mutex_exit(&mg->mg_lock); 900 if (msp == NULL) 901 return (-1ULL); 902 903 mutex_enter(&msp->ms_lock); 904 905 /* 906 * Ensure that the metaslab we have selected is still 907 * capable of handling our request. It's possible that 908 * another thread may have changed the weight while we 909 * were blocked on the metaslab lock. 910 */ 911 if (msp->ms_weight < size || (was_active && 912 !(msp->ms_weight & METASLAB_ACTIVE_MASK) && 913 activation_weight == METASLAB_WEIGHT_PRIMARY)) { 914 mutex_exit(&msp->ms_lock); 915 continue; 916 } 917 918 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && 919 activation_weight == METASLAB_WEIGHT_PRIMARY) { 920 metaslab_passivate(msp, 921 msp->ms_weight & ~METASLAB_ACTIVE_MASK); 922 mutex_exit(&msp->ms_lock); 923 continue; 924 } 925 926 if (metaslab_activate(msp, activation_weight, size) != 0) { 927 mutex_exit(&msp->ms_lock); 928 continue; 929 } 930 931 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL) 932 break; 933 934 metaslab_passivate(msp, size - 1); 935 936 mutex_exit(&msp->ms_lock); 937 } 938 939 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 940 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 941 942 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 943 944 mutex_exit(&msp->ms_lock); 945 946 return (offset); 947 } 948 949 /* 950 * Allocate a block for the specified i/o. 951 */ 952 static int 953 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 954 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) 955 { 956 metaslab_group_t *mg, *rotor; 957 vdev_t *vd; 958 int dshift = 3; 959 int all_zero; 960 int zio_lock = B_FALSE; 961 boolean_t allocatable; 962 uint64_t offset = -1ULL; 963 uint64_t asize; 964 uint64_t distance; 965 966 ASSERT(!DVA_IS_VALID(&dva[d])); 967 968 /* 969 * For testing, make some blocks above a certain size be gang blocks. 970 */ 971 if (psize >= metaslab_gang_bang && (lbolt & 3) == 0) 972 return (ENOSPC); 973 974 /* 975 * Start at the rotor and loop through all mgs until we find something. 976 * Note that there's no locking on mc_rotor or mc_allocated because 977 * nothing actually breaks if we miss a few updates -- we just won't 978 * allocate quite as evenly. It all balances out over time. 979 * 980 * If we are doing ditto or log blocks, try to spread them across 981 * consecutive vdevs. If we're forced to reuse a vdev before we've 982 * allocated all of our ditto blocks, then try and spread them out on 983 * that vdev as much as possible. If it turns out to not be possible, 984 * gradually lower our standards until anything becomes acceptable. 985 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 986 * gives us hope of containing our fault domains to something we're 987 * able to reason about. Otherwise, any two top-level vdev failures 988 * will guarantee the loss of data. With consecutive allocation, 989 * only two adjacent top-level vdev failures will result in data loss. 990 * 991 * If we are doing gang blocks (hintdva is non-NULL), try to keep 992 * ourselves on the same vdev as our gang block header. That 993 * way, we can hope for locality in vdev_cache, plus it makes our 994 * fault domains something tractable. 995 */ 996 if (hintdva) { 997 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 998 999 /* 1000 * It's possible the vdev we're using as the hint no 1001 * longer exists (i.e. removed). Consult the rotor when 1002 * all else fails. 1003 */ 1004 if (vd != NULL && vd->vdev_mg != NULL) { 1005 mg = vd->vdev_mg; 1006 1007 if (flags & METASLAB_HINTBP_AVOID && 1008 mg->mg_next != NULL) 1009 mg = mg->mg_next; 1010 } else { 1011 mg = mc->mc_rotor; 1012 } 1013 } else if (d != 0) { 1014 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 1015 mg = vd->vdev_mg->mg_next; 1016 } else { 1017 mg = mc->mc_rotor; 1018 } 1019 1020 /* 1021 * If the hint put us into the wrong class, just follow the rotor. 1022 */ 1023 if (mg->mg_class != mc) 1024 mg = mc->mc_rotor; 1025 1026 rotor = mg; 1027 top: 1028 all_zero = B_TRUE; 1029 do { 1030 vd = mg->mg_vd; 1031 1032 /* 1033 * Don't allocate from faulted devices. 1034 */ 1035 if (zio_lock) { 1036 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 1037 allocatable = vdev_allocatable(vd); 1038 spa_config_exit(spa, SCL_ZIO, FTAG); 1039 } else { 1040 allocatable = vdev_allocatable(vd); 1041 } 1042 if (!allocatable) 1043 goto next; 1044 1045 /* 1046 * Avoid writing single-copy data to a failing vdev 1047 */ 1048 if ((vd->vdev_stat.vs_write_errors > 0 || 1049 vd->vdev_state < VDEV_STATE_HEALTHY) && 1050 d == 0 && dshift == 3) { 1051 all_zero = B_FALSE; 1052 goto next; 1053 } 1054 1055 ASSERT(mg->mg_class == mc); 1056 1057 distance = vd->vdev_asize >> dshift; 1058 if (distance <= (1ULL << vd->vdev_ms_shift)) 1059 distance = 0; 1060 else 1061 all_zero = B_FALSE; 1062 1063 asize = vdev_psize_to_asize(vd, psize); 1064 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 1065 1066 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d); 1067 if (offset != -1ULL) { 1068 /* 1069 * If we've just selected this metaslab group, 1070 * figure out whether the corresponding vdev is 1071 * over- or under-used relative to the pool, 1072 * and set an allocation bias to even it out. 1073 */ 1074 if (mc->mc_allocated == 0) { 1075 vdev_stat_t *vs = &vd->vdev_stat; 1076 uint64_t alloc, space; 1077 int64_t vu, su; 1078 1079 alloc = spa_get_alloc(spa); 1080 space = spa_get_space(spa); 1081 1082 /* 1083 * Determine percent used in units of 0..1024. 1084 * (This is just to avoid floating point.) 1085 */ 1086 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1); 1087 su = (alloc << 10) / (space + 1); 1088 1089 /* 1090 * Bias by at most +/- 25% of the aliquot. 1091 */ 1092 mg->mg_bias = ((su - vu) * 1093 (int64_t)mg->mg_aliquot) / (1024 * 4); 1094 } 1095 1096 if (atomic_add_64_nv(&mc->mc_allocated, asize) >= 1097 mg->mg_aliquot + mg->mg_bias) { 1098 mc->mc_rotor = mg->mg_next; 1099 mc->mc_allocated = 0; 1100 } 1101 1102 DVA_SET_VDEV(&dva[d], vd->vdev_id); 1103 DVA_SET_OFFSET(&dva[d], offset); 1104 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); 1105 DVA_SET_ASIZE(&dva[d], asize); 1106 1107 return (0); 1108 } 1109 next: 1110 mc->mc_rotor = mg->mg_next; 1111 mc->mc_allocated = 0; 1112 } while ((mg = mg->mg_next) != rotor); 1113 1114 if (!all_zero) { 1115 dshift++; 1116 ASSERT(dshift < 64); 1117 goto top; 1118 } 1119 1120 if (!allocatable && !zio_lock) { 1121 dshift = 3; 1122 zio_lock = B_TRUE; 1123 goto top; 1124 } 1125 1126 bzero(&dva[d], sizeof (dva_t)); 1127 1128 return (ENOSPC); 1129 } 1130 1131 /* 1132 * Free the block represented by DVA in the context of the specified 1133 * transaction group. 1134 */ 1135 static void 1136 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) 1137 { 1138 uint64_t vdev = DVA_GET_VDEV(dva); 1139 uint64_t offset = DVA_GET_OFFSET(dva); 1140 uint64_t size = DVA_GET_ASIZE(dva); 1141 vdev_t *vd; 1142 metaslab_t *msp; 1143 1144 ASSERT(DVA_IS_VALID(dva)); 1145 1146 if (txg > spa_freeze_txg(spa)) 1147 return; 1148 1149 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 1150 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 1151 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", 1152 (u_longlong_t)vdev, (u_longlong_t)offset); 1153 ASSERT(0); 1154 return; 1155 } 1156 1157 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 1158 1159 if (DVA_GET_GANG(dva)) 1160 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 1161 1162 mutex_enter(&msp->ms_lock); 1163 1164 if (now) { 1165 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK], 1166 offset, size); 1167 space_map_free(&msp->ms_map, offset, size); 1168 } else { 1169 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0) 1170 vdev_dirty(vd, VDD_METASLAB, msp, txg); 1171 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size); 1172 } 1173 1174 mutex_exit(&msp->ms_lock); 1175 } 1176 1177 /* 1178 * Intent log support: upon opening the pool after a crash, notify the SPA 1179 * of blocks that the intent log has allocated for immediate write, but 1180 * which are still considered free by the SPA because the last transaction 1181 * group didn't commit yet. 1182 */ 1183 static int 1184 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 1185 { 1186 uint64_t vdev = DVA_GET_VDEV(dva); 1187 uint64_t offset = DVA_GET_OFFSET(dva); 1188 uint64_t size = DVA_GET_ASIZE(dva); 1189 vdev_t *vd; 1190 metaslab_t *msp; 1191 int error; 1192 1193 ASSERT(DVA_IS_VALID(dva)); 1194 1195 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 1196 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) 1197 return (ENXIO); 1198 1199 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 1200 1201 if (DVA_GET_GANG(dva)) 1202 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 1203 1204 mutex_enter(&msp->ms_lock); 1205 1206 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY, 0); 1207 if (error || txg == 0) { /* txg == 0 indicates dry run */ 1208 mutex_exit(&msp->ms_lock); 1209 return (error); 1210 } 1211 1212 space_map_claim(&msp->ms_map, offset, size); 1213 1214 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ 1215 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 1216 vdev_dirty(vd, VDD_METASLAB, msp, txg); 1217 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 1218 } 1219 1220 mutex_exit(&msp->ms_lock); 1221 1222 return (0); 1223 } 1224 1225 int 1226 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 1227 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) 1228 { 1229 dva_t *dva = bp->blk_dva; 1230 dva_t *hintdva = hintbp->blk_dva; 1231 int error = 0; 1232 1233 ASSERT(bp->blk_birth == 0); 1234 1235 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 1236 1237 if (mc->mc_rotor == NULL) { /* no vdevs in this class */ 1238 spa_config_exit(spa, SCL_ALLOC, FTAG); 1239 return (ENOSPC); 1240 } 1241 1242 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 1243 ASSERT(BP_GET_NDVAS(bp) == 0); 1244 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 1245 1246 for (int d = 0; d < ndvas; d++) { 1247 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 1248 txg, flags); 1249 if (error) { 1250 for (d--; d >= 0; d--) { 1251 metaslab_free_dva(spa, &dva[d], txg, B_TRUE); 1252 bzero(&dva[d], sizeof (dva_t)); 1253 } 1254 spa_config_exit(spa, SCL_ALLOC, FTAG); 1255 return (error); 1256 } 1257 } 1258 ASSERT(error == 0); 1259 ASSERT(BP_GET_NDVAS(bp) == ndvas); 1260 1261 spa_config_exit(spa, SCL_ALLOC, FTAG); 1262 1263 bp->blk_birth = txg; 1264 1265 return (0); 1266 } 1267 1268 void 1269 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 1270 { 1271 const dva_t *dva = bp->blk_dva; 1272 int ndvas = BP_GET_NDVAS(bp); 1273 1274 ASSERT(!BP_IS_HOLE(bp)); 1275 ASSERT(!now || bp->blk_birth >= spa->spa_syncing_txg); 1276 1277 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 1278 1279 for (int d = 0; d < ndvas; d++) 1280 metaslab_free_dva(spa, &dva[d], txg, now); 1281 1282 spa_config_exit(spa, SCL_FREE, FTAG); 1283 } 1284 1285 int 1286 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 1287 { 1288 const dva_t *dva = bp->blk_dva; 1289 int ndvas = BP_GET_NDVAS(bp); 1290 int error = 0; 1291 1292 ASSERT(!BP_IS_HOLE(bp)); 1293 1294 if (txg != 0) { 1295 /* 1296 * First do a dry run to make sure all DVAs are claimable, 1297 * so we don't have to unwind from partial failures below. 1298 */ 1299 if ((error = metaslab_claim(spa, bp, 0)) != 0) 1300 return (error); 1301 } 1302 1303 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 1304 1305 for (int d = 0; d < ndvas; d++) 1306 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) 1307 break; 1308 1309 spa_config_exit(spa, SCL_ALLOC, FTAG); 1310 1311 ASSERT(error == 0 || txg == 0); 1312 1313 return (error); 1314 } 1315