1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_send.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dbuf.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/spa.h> 41 #include <sys/zio.h> 42 #include <sys/dmu_zfetch.h> 43 #include <sys/sa.h> 44 #include <sys/sa_impl.h> 45 #include <sys/zfeature.h> 46 #include <sys/blkptr.h> 47 #include <sys/range_tree.h> 48 #include <sys/callb.h> 49 #include <sys/abd.h> 50 #include <sys/vdev.h> 51 #include <sys/cityhash.h> 52 #include <sys/spa_impl.h> 53 54 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 55 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 56 57 #ifndef __lint 58 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 59 dmu_buf_evict_func_t *evict_func_sync, 60 dmu_buf_evict_func_t *evict_func_async, 61 dmu_buf_t **clear_on_evict_dbufp); 62 #endif /* ! __lint */ 63 64 /* 65 * Global data structures and functions for the dbuf cache. 66 */ 67 static kmem_cache_t *dbuf_kmem_cache; 68 static taskq_t *dbu_evict_taskq; 69 70 static kthread_t *dbuf_cache_evict_thread; 71 static kmutex_t dbuf_evict_lock; 72 static kcondvar_t dbuf_evict_cv; 73 static boolean_t dbuf_evict_thread_exit; 74 75 /* 76 * There are two dbuf caches; each dbuf can only be in one of them at a time. 77 * 78 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 79 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 80 * that represent the metadata that describes filesystems/snapshots/ 81 * bookmarks/properties/etc. We only evict from this cache when we export a 82 * pool, to short-circuit as much I/O as possible for all administrative 83 * commands that need the metadata. There is no eviction policy for this 84 * cache, because we try to only include types in it which would occupy a 85 * very small amount of space per object but create a large impact on the 86 * performance of these commands. Instead, after it reaches a maximum size 87 * (which should only happen on very small memory systems with a very large 88 * number of filesystem objects), we stop taking new dbufs into the 89 * metadata cache, instead putting them in the normal dbuf cache. 90 * 91 * 2. LRU cache of dbufs. The "dbuf cache" maintains a list of dbufs that 92 * are not currently held but have been recently released. These dbufs 93 * are not eligible for arc eviction until they are aged out of the cache. 94 * Dbufs that are aged out of the cache will be immediately destroyed and 95 * become eligible for arc eviction. 96 * 97 * Dbufs are added to these caches once the last hold is released. If a dbuf is 98 * later accessed and still exists in the dbuf cache, then it will be removed 99 * from the cache and later re-added to the head of the cache. 100 * 101 * If a given dbuf meets the requirements for the metadata cache, it will go 102 * there, otherwise it will be considered for the generic LRU dbuf cache. The 103 * caches and the refcounts tracking their sizes are stored in an array indexed 104 * by those caches' matching enum values (from dbuf_cached_state_t). 105 */ 106 typedef struct dbuf_cache { 107 multilist_t *cache; 108 refcount_t size; 109 } dbuf_cache_t; 110 dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 111 112 /* Size limits for the caches */ 113 uint64_t dbuf_cache_max_bytes = 0; 114 uint64_t dbuf_metadata_cache_max_bytes = 0; 115 /* Set the default sizes of the caches to log2 fraction of arc size */ 116 int dbuf_cache_shift = 5; 117 int dbuf_metadata_cache_shift = 6; 118 119 /* 120 * For diagnostic purposes, this is incremented whenever we can't add 121 * something to the metadata cache because it's full, and instead put 122 * the data in the regular dbuf cache. 123 */ 124 uint64_t dbuf_metadata_cache_overflow; 125 126 /* 127 * The LRU dbuf cache uses a three-stage eviction policy: 128 * - A low water marker designates when the dbuf eviction thread 129 * should stop evicting from the dbuf cache. 130 * - When we reach the maximum size (aka mid water mark), we 131 * signal the eviction thread to run. 132 * - The high water mark indicates when the eviction thread 133 * is unable to keep up with the incoming load and eviction must 134 * happen in the context of the calling thread. 135 * 136 * The dbuf cache: 137 * (max size) 138 * low water mid water hi water 139 * +----------------------------------------+----------+----------+ 140 * | | | | 141 * | | | | 142 * | | | | 143 * | | | | 144 * +----------------------------------------+----------+----------+ 145 * stop signal evict 146 * evicting eviction directly 147 * thread 148 * 149 * The high and low water marks indicate the operating range for the eviction 150 * thread. The low water mark is, by default, 90% of the total size of the 151 * cache and the high water mark is at 110% (both of these percentages can be 152 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 153 * respectively). The eviction thread will try to ensure that the cache remains 154 * within this range by waking up every second and checking if the cache is 155 * above the low water mark. The thread can also be woken up by callers adding 156 * elements into the cache if the cache is larger than the mid water (i.e max 157 * cache size). Once the eviction thread is woken up and eviction is required, 158 * it will continue evicting buffers until it's able to reduce the cache size 159 * to the low water mark. If the cache size continues to grow and hits the high 160 * water mark, then callers adding elments to the cache will begin to evict 161 * directly from the cache until the cache is no longer above the high water 162 * mark. 163 */ 164 165 /* 166 * The percentage above and below the maximum cache size. 167 */ 168 uint_t dbuf_cache_hiwater_pct = 10; 169 uint_t dbuf_cache_lowater_pct = 10; 170 171 /* ARGSUSED */ 172 static int 173 dbuf_cons(void *vdb, void *unused, int kmflag) 174 { 175 dmu_buf_impl_t *db = vdb; 176 bzero(db, sizeof (dmu_buf_impl_t)); 177 178 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 179 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 180 multilist_link_init(&db->db_cache_link); 181 refcount_create(&db->db_holds); 182 183 return (0); 184 } 185 186 /* ARGSUSED */ 187 static void 188 dbuf_dest(void *vdb, void *unused) 189 { 190 dmu_buf_impl_t *db = vdb; 191 mutex_destroy(&db->db_mtx); 192 cv_destroy(&db->db_changed); 193 ASSERT(!multilist_link_active(&db->db_cache_link)); 194 refcount_destroy(&db->db_holds); 195 } 196 197 /* 198 * dbuf hash table routines 199 */ 200 static dbuf_hash_table_t dbuf_hash_table; 201 202 static uint64_t dbuf_hash_count; 203 204 /* 205 * We use Cityhash for this. It's fast, and has good hash properties without 206 * requiring any large static buffers. 207 */ 208 static uint64_t 209 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 210 { 211 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 212 } 213 214 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 215 ((dbuf)->db.db_object == (obj) && \ 216 (dbuf)->db_objset == (os) && \ 217 (dbuf)->db_level == (level) && \ 218 (dbuf)->db_blkid == (blkid)) 219 220 dmu_buf_impl_t * 221 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 222 { 223 dbuf_hash_table_t *h = &dbuf_hash_table; 224 uint64_t hv = dbuf_hash(os, obj, level, blkid); 225 uint64_t idx = hv & h->hash_table_mask; 226 dmu_buf_impl_t *db; 227 228 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 229 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 230 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 231 mutex_enter(&db->db_mtx); 232 if (db->db_state != DB_EVICTING) { 233 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 234 return (db); 235 } 236 mutex_exit(&db->db_mtx); 237 } 238 } 239 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 240 return (NULL); 241 } 242 243 static dmu_buf_impl_t * 244 dbuf_find_bonus(objset_t *os, uint64_t object) 245 { 246 dnode_t *dn; 247 dmu_buf_impl_t *db = NULL; 248 249 if (dnode_hold(os, object, FTAG, &dn) == 0) { 250 rw_enter(&dn->dn_struct_rwlock, RW_READER); 251 if (dn->dn_bonus != NULL) { 252 db = dn->dn_bonus; 253 mutex_enter(&db->db_mtx); 254 } 255 rw_exit(&dn->dn_struct_rwlock); 256 dnode_rele(dn, FTAG); 257 } 258 return (db); 259 } 260 261 /* 262 * Insert an entry into the hash table. If there is already an element 263 * equal to elem in the hash table, then the already existing element 264 * will be returned and the new element will not be inserted. 265 * Otherwise returns NULL. 266 */ 267 static dmu_buf_impl_t * 268 dbuf_hash_insert(dmu_buf_impl_t *db) 269 { 270 dbuf_hash_table_t *h = &dbuf_hash_table; 271 objset_t *os = db->db_objset; 272 uint64_t obj = db->db.db_object; 273 int level = db->db_level; 274 uint64_t blkid = db->db_blkid; 275 uint64_t hv = dbuf_hash(os, obj, level, blkid); 276 uint64_t idx = hv & h->hash_table_mask; 277 dmu_buf_impl_t *dbf; 278 279 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 280 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 281 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 282 mutex_enter(&dbf->db_mtx); 283 if (dbf->db_state != DB_EVICTING) { 284 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 285 return (dbf); 286 } 287 mutex_exit(&dbf->db_mtx); 288 } 289 } 290 291 mutex_enter(&db->db_mtx); 292 db->db_hash_next = h->hash_table[idx]; 293 h->hash_table[idx] = db; 294 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 295 atomic_inc_64(&dbuf_hash_count); 296 297 return (NULL); 298 } 299 300 /* 301 * Remove an entry from the hash table. It must be in the EVICTING state. 302 */ 303 static void 304 dbuf_hash_remove(dmu_buf_impl_t *db) 305 { 306 dbuf_hash_table_t *h = &dbuf_hash_table; 307 uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 308 db->db_level, db->db_blkid); 309 uint64_t idx = hv & h->hash_table_mask; 310 dmu_buf_impl_t *dbf, **dbp; 311 312 /* 313 * We musn't hold db_mtx to maintain lock ordering: 314 * DBUF_HASH_MUTEX > db_mtx. 315 */ 316 ASSERT(refcount_is_zero(&db->db_holds)); 317 ASSERT(db->db_state == DB_EVICTING); 318 ASSERT(!MUTEX_HELD(&db->db_mtx)); 319 320 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 321 dbp = &h->hash_table[idx]; 322 while ((dbf = *dbp) != db) { 323 dbp = &dbf->db_hash_next; 324 ASSERT(dbf != NULL); 325 } 326 *dbp = db->db_hash_next; 327 db->db_hash_next = NULL; 328 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 329 atomic_dec_64(&dbuf_hash_count); 330 } 331 332 typedef enum { 333 DBVU_EVICTING, 334 DBVU_NOT_EVICTING 335 } dbvu_verify_type_t; 336 337 static void 338 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 339 { 340 #ifdef ZFS_DEBUG 341 int64_t holds; 342 343 if (db->db_user == NULL) 344 return; 345 346 /* Only data blocks support the attachment of user data. */ 347 ASSERT(db->db_level == 0); 348 349 /* Clients must resolve a dbuf before attaching user data. */ 350 ASSERT(db->db.db_data != NULL); 351 ASSERT3U(db->db_state, ==, DB_CACHED); 352 353 holds = refcount_count(&db->db_holds); 354 if (verify_type == DBVU_EVICTING) { 355 /* 356 * Immediate eviction occurs when holds == dirtycnt. 357 * For normal eviction buffers, holds is zero on 358 * eviction, except when dbuf_fix_old_data() calls 359 * dbuf_clear_data(). However, the hold count can grow 360 * during eviction even though db_mtx is held (see 361 * dmu_bonus_hold() for an example), so we can only 362 * test the generic invariant that holds >= dirtycnt. 363 */ 364 ASSERT3U(holds, >=, db->db_dirtycnt); 365 } else { 366 if (db->db_user_immediate_evict == TRUE) 367 ASSERT3U(holds, >=, db->db_dirtycnt); 368 else 369 ASSERT3U(holds, >, 0); 370 } 371 #endif 372 } 373 374 static void 375 dbuf_evict_user(dmu_buf_impl_t *db) 376 { 377 dmu_buf_user_t *dbu = db->db_user; 378 379 ASSERT(MUTEX_HELD(&db->db_mtx)); 380 381 if (dbu == NULL) 382 return; 383 384 dbuf_verify_user(db, DBVU_EVICTING); 385 db->db_user = NULL; 386 387 #ifdef ZFS_DEBUG 388 if (dbu->dbu_clear_on_evict_dbufp != NULL) 389 *dbu->dbu_clear_on_evict_dbufp = NULL; 390 #endif 391 392 /* 393 * There are two eviction callbacks - one that we call synchronously 394 * and one that we invoke via a taskq. The async one is useful for 395 * avoiding lock order reversals and limiting stack depth. 396 * 397 * Note that if we have a sync callback but no async callback, 398 * it's likely that the sync callback will free the structure 399 * containing the dbu. In that case we need to take care to not 400 * dereference dbu after calling the sync evict func. 401 */ 402 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 403 404 if (dbu->dbu_evict_func_sync != NULL) 405 dbu->dbu_evict_func_sync(dbu); 406 407 if (has_async) { 408 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 409 dbu, 0, &dbu->dbu_tqent); 410 } 411 } 412 413 boolean_t 414 dbuf_is_metadata(dmu_buf_impl_t *db) 415 { 416 if (db->db_level > 0) { 417 return (B_TRUE); 418 } else { 419 boolean_t is_metadata; 420 421 DB_DNODE_ENTER(db); 422 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 423 DB_DNODE_EXIT(db); 424 425 return (is_metadata); 426 } 427 } 428 429 /* 430 * This returns whether this dbuf should be stored in the metadata cache, which 431 * is based on whether it's from one of the dnode types that store data related 432 * to traversing dataset hierarchies. 433 */ 434 static boolean_t 435 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 436 { 437 DB_DNODE_ENTER(db); 438 dmu_object_type_t type = DB_DNODE(db)->dn_type; 439 DB_DNODE_EXIT(db); 440 441 /* Check if this dbuf is one of the types we care about */ 442 if (DMU_OT_IS_METADATA_CACHED(type)) { 443 /* If we hit this, then we set something up wrong in dmu_ot */ 444 ASSERT(DMU_OT_IS_METADATA(type)); 445 446 /* 447 * Sanity check for small-memory systems: don't allocate too 448 * much memory for this purpose. 449 */ 450 if (refcount_count(&dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 451 dbuf_metadata_cache_max_bytes) { 452 dbuf_metadata_cache_overflow++; 453 DTRACE_PROBE1(dbuf__metadata__cache__overflow, 454 dmu_buf_impl_t *, db); 455 return (B_FALSE); 456 } 457 458 return (B_TRUE); 459 } 460 461 return (B_FALSE); 462 } 463 464 /* 465 * This function *must* return indices evenly distributed between all 466 * sublists of the multilist. This is needed due to how the dbuf eviction 467 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 468 * distributed between all sublists and uses this assumption when 469 * deciding which sublist to evict from and how much to evict from it. 470 */ 471 unsigned int 472 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 473 { 474 dmu_buf_impl_t *db = obj; 475 476 /* 477 * The assumption here, is the hash value for a given 478 * dmu_buf_impl_t will remain constant throughout it's lifetime 479 * (i.e. it's objset, object, level and blkid fields don't change). 480 * Thus, we don't need to store the dbuf's sublist index 481 * on insertion, as this index can be recalculated on removal. 482 * 483 * Also, the low order bits of the hash value are thought to be 484 * distributed evenly. Otherwise, in the case that the multilist 485 * has a power of two number of sublists, each sublists' usage 486 * would not be evenly distributed. 487 */ 488 return (dbuf_hash(db->db_objset, db->db.db_object, 489 db->db_level, db->db_blkid) % 490 multilist_get_num_sublists(ml)); 491 } 492 493 static inline boolean_t 494 dbuf_cache_above_hiwater(void) 495 { 496 uint64_t dbuf_cache_hiwater_bytes = 497 (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 498 499 return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 500 dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 501 } 502 503 static inline boolean_t 504 dbuf_cache_above_lowater(void) 505 { 506 uint64_t dbuf_cache_lowater_bytes = 507 (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 508 509 return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 510 dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 511 } 512 513 /* 514 * Evict the oldest eligible dbuf from the dbuf cache. 515 */ 516 static void 517 dbuf_evict_one(void) 518 { 519 int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache); 520 multilist_sublist_t *mls = multilist_sublist_lock( 521 dbuf_caches[DB_DBUF_CACHE].cache, idx); 522 523 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 524 525 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 526 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 527 db = multilist_sublist_prev(mls, db); 528 } 529 530 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 531 multilist_sublist_t *, mls); 532 533 if (db != NULL) { 534 multilist_sublist_remove(mls, db); 535 multilist_sublist_unlock(mls); 536 (void) refcount_remove_many(&dbuf_caches[DB_DBUF_CACHE].size, 537 db->db.db_size, db); 538 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 539 db->db_caching_status = DB_NO_CACHE; 540 dbuf_destroy(db); 541 } else { 542 multilist_sublist_unlock(mls); 543 } 544 } 545 546 /* 547 * The dbuf evict thread is responsible for aging out dbufs from the 548 * cache. Once the cache has reached it's maximum size, dbufs are removed 549 * and destroyed. The eviction thread will continue running until the size 550 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 551 * out of the cache it is destroyed and becomes eligible for arc eviction. 552 */ 553 /* ARGSUSED */ 554 static void 555 dbuf_evict_thread(void *unused) 556 { 557 callb_cpr_t cpr; 558 559 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 560 561 mutex_enter(&dbuf_evict_lock); 562 while (!dbuf_evict_thread_exit) { 563 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 564 CALLB_CPR_SAFE_BEGIN(&cpr); 565 (void) cv_timedwait_hires(&dbuf_evict_cv, 566 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 567 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 568 } 569 mutex_exit(&dbuf_evict_lock); 570 571 /* 572 * Keep evicting as long as we're above the low water mark 573 * for the cache. We do this without holding the locks to 574 * minimize lock contention. 575 */ 576 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 577 dbuf_evict_one(); 578 } 579 580 mutex_enter(&dbuf_evict_lock); 581 } 582 583 dbuf_evict_thread_exit = B_FALSE; 584 cv_broadcast(&dbuf_evict_cv); 585 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 586 thread_exit(); 587 } 588 589 /* 590 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 591 * If the dbuf cache is at its high water mark, then evict a dbuf from the 592 * dbuf cache using the callers context. 593 */ 594 static void 595 dbuf_evict_notify(void) 596 { 597 /* 598 * We check if we should evict without holding the dbuf_evict_lock, 599 * because it's OK to occasionally make the wrong decision here, 600 * and grabbing the lock results in massive lock contention. 601 */ 602 if (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 603 dbuf_cache_max_bytes) { 604 if (dbuf_cache_above_hiwater()) 605 dbuf_evict_one(); 606 cv_signal(&dbuf_evict_cv); 607 } 608 } 609 610 void 611 dbuf_init(void) 612 { 613 uint64_t hsize = 1ULL << 16; 614 dbuf_hash_table_t *h = &dbuf_hash_table; 615 int i; 616 617 /* 618 * The hash table is big enough to fill all of physical memory 619 * with an average 4K block size. The table will take up 620 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 621 */ 622 while (hsize * 4096 < physmem * PAGESIZE) 623 hsize <<= 1; 624 625 retry: 626 h->hash_table_mask = hsize - 1; 627 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 628 if (h->hash_table == NULL) { 629 /* XXX - we should really return an error instead of assert */ 630 ASSERT(hsize > (1ULL << 10)); 631 hsize >>= 1; 632 goto retry; 633 } 634 635 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 636 sizeof (dmu_buf_impl_t), 637 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 638 639 for (i = 0; i < DBUF_MUTEXES; i++) 640 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 641 642 /* 643 * Setup the parameters for the dbuf caches. We set the sizes of the 644 * dbuf cache and the metadata cache to 1/32nd and 1/16th (default) 645 * of the size of the ARC, respectively. If the values are set in 646 * /etc/system and they're not greater than the size of the ARC, then 647 * we honor that value. 648 */ 649 if (dbuf_cache_max_bytes == 0 || 650 dbuf_cache_max_bytes >= arc_max_bytes()) { 651 dbuf_cache_max_bytes = arc_max_bytes() >> dbuf_cache_shift; 652 } 653 if (dbuf_metadata_cache_max_bytes == 0 || 654 dbuf_metadata_cache_max_bytes >= arc_max_bytes()) { 655 dbuf_metadata_cache_max_bytes = 656 arc_max_bytes() >> dbuf_metadata_cache_shift; 657 } 658 659 /* 660 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 661 * configuration is not required. 662 */ 663 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 664 665 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 666 dbuf_caches[dcs].cache = 667 multilist_create(sizeof (dmu_buf_impl_t), 668 offsetof(dmu_buf_impl_t, db_cache_link), 669 dbuf_cache_multilist_index_func); 670 refcount_create(&dbuf_caches[dcs].size); 671 } 672 673 dbuf_evict_thread_exit = B_FALSE; 674 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 675 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 676 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 677 NULL, 0, &p0, TS_RUN, minclsyspri); 678 } 679 680 void 681 dbuf_fini(void) 682 { 683 dbuf_hash_table_t *h = &dbuf_hash_table; 684 int i; 685 686 for (i = 0; i < DBUF_MUTEXES; i++) 687 mutex_destroy(&h->hash_mutexes[i]); 688 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 689 kmem_cache_destroy(dbuf_kmem_cache); 690 taskq_destroy(dbu_evict_taskq); 691 692 mutex_enter(&dbuf_evict_lock); 693 dbuf_evict_thread_exit = B_TRUE; 694 while (dbuf_evict_thread_exit) { 695 cv_signal(&dbuf_evict_cv); 696 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 697 } 698 mutex_exit(&dbuf_evict_lock); 699 700 mutex_destroy(&dbuf_evict_lock); 701 cv_destroy(&dbuf_evict_cv); 702 703 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 704 refcount_destroy(&dbuf_caches[dcs].size); 705 multilist_destroy(dbuf_caches[dcs].cache); 706 } 707 } 708 709 /* 710 * Other stuff. 711 */ 712 713 #ifdef ZFS_DEBUG 714 static void 715 dbuf_verify(dmu_buf_impl_t *db) 716 { 717 dnode_t *dn; 718 dbuf_dirty_record_t *dr; 719 720 ASSERT(MUTEX_HELD(&db->db_mtx)); 721 722 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 723 return; 724 725 ASSERT(db->db_objset != NULL); 726 DB_DNODE_ENTER(db); 727 dn = DB_DNODE(db); 728 if (dn == NULL) { 729 ASSERT(db->db_parent == NULL); 730 ASSERT(db->db_blkptr == NULL); 731 } else { 732 ASSERT3U(db->db.db_object, ==, dn->dn_object); 733 ASSERT3P(db->db_objset, ==, dn->dn_objset); 734 ASSERT3U(db->db_level, <, dn->dn_nlevels); 735 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 736 db->db_blkid == DMU_SPILL_BLKID || 737 !avl_is_empty(&dn->dn_dbufs)); 738 } 739 if (db->db_blkid == DMU_BONUS_BLKID) { 740 ASSERT(dn != NULL); 741 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 742 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 743 } else if (db->db_blkid == DMU_SPILL_BLKID) { 744 ASSERT(dn != NULL); 745 ASSERT0(db->db.db_offset); 746 } else { 747 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 748 } 749 750 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 751 ASSERT(dr->dr_dbuf == db); 752 753 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 754 ASSERT(dr->dr_dbuf == db); 755 756 /* 757 * We can't assert that db_size matches dn_datablksz because it 758 * can be momentarily different when another thread is doing 759 * dnode_set_blksz(). 760 */ 761 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 762 dr = db->db_data_pending; 763 /* 764 * It should only be modified in syncing context, so 765 * make sure we only have one copy of the data. 766 */ 767 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 768 } 769 770 /* verify db->db_blkptr */ 771 if (db->db_blkptr) { 772 if (db->db_parent == dn->dn_dbuf) { 773 /* db is pointed to by the dnode */ 774 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 775 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 776 ASSERT(db->db_parent == NULL); 777 else 778 ASSERT(db->db_parent != NULL); 779 if (db->db_blkid != DMU_SPILL_BLKID) 780 ASSERT3P(db->db_blkptr, ==, 781 &dn->dn_phys->dn_blkptr[db->db_blkid]); 782 } else { 783 /* db is pointed to by an indirect block */ 784 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 785 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 786 ASSERT3U(db->db_parent->db.db_object, ==, 787 db->db.db_object); 788 /* 789 * dnode_grow_indblksz() can make this fail if we don't 790 * have the struct_rwlock. XXX indblksz no longer 791 * grows. safe to do this now? 792 */ 793 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 794 ASSERT3P(db->db_blkptr, ==, 795 ((blkptr_t *)db->db_parent->db.db_data + 796 db->db_blkid % epb)); 797 } 798 } 799 } 800 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 801 (db->db_buf == NULL || db->db_buf->b_data) && 802 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 803 db->db_state != DB_FILL && !dn->dn_free_txg) { 804 /* 805 * If the blkptr isn't set but they have nonzero data, 806 * it had better be dirty, otherwise we'll lose that 807 * data when we evict this buffer. 808 * 809 * There is an exception to this rule for indirect blocks; in 810 * this case, if the indirect block is a hole, we fill in a few 811 * fields on each of the child blocks (importantly, birth time) 812 * to prevent hole birth times from being lost when you 813 * partially fill in a hole. 814 */ 815 if (db->db_dirtycnt == 0) { 816 if (db->db_level == 0) { 817 uint64_t *buf = db->db.db_data; 818 int i; 819 820 for (i = 0; i < db->db.db_size >> 3; i++) { 821 ASSERT(buf[i] == 0); 822 } 823 } else { 824 blkptr_t *bps = db->db.db_data; 825 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 826 db->db.db_size); 827 /* 828 * We want to verify that all the blkptrs in the 829 * indirect block are holes, but we may have 830 * automatically set up a few fields for them. 831 * We iterate through each blkptr and verify 832 * they only have those fields set. 833 */ 834 for (int i = 0; 835 i < db->db.db_size / sizeof (blkptr_t); 836 i++) { 837 blkptr_t *bp = &bps[i]; 838 ASSERT(ZIO_CHECKSUM_IS_ZERO( 839 &bp->blk_cksum)); 840 ASSERT( 841 DVA_IS_EMPTY(&bp->blk_dva[0]) && 842 DVA_IS_EMPTY(&bp->blk_dva[1]) && 843 DVA_IS_EMPTY(&bp->blk_dva[2])); 844 ASSERT0(bp->blk_fill); 845 ASSERT0(bp->blk_pad[0]); 846 ASSERT0(bp->blk_pad[1]); 847 ASSERT(!BP_IS_EMBEDDED(bp)); 848 ASSERT(BP_IS_HOLE(bp)); 849 ASSERT0(bp->blk_phys_birth); 850 } 851 } 852 } 853 } 854 DB_DNODE_EXIT(db); 855 } 856 #endif 857 858 static void 859 dbuf_clear_data(dmu_buf_impl_t *db) 860 { 861 ASSERT(MUTEX_HELD(&db->db_mtx)); 862 dbuf_evict_user(db); 863 ASSERT3P(db->db_buf, ==, NULL); 864 db->db.db_data = NULL; 865 if (db->db_state != DB_NOFILL) 866 db->db_state = DB_UNCACHED; 867 } 868 869 static void 870 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 871 { 872 ASSERT(MUTEX_HELD(&db->db_mtx)); 873 ASSERT(buf != NULL); 874 875 db->db_buf = buf; 876 ASSERT(buf->b_data != NULL); 877 db->db.db_data = buf->b_data; 878 } 879 880 /* 881 * Loan out an arc_buf for read. Return the loaned arc_buf. 882 */ 883 arc_buf_t * 884 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 885 { 886 arc_buf_t *abuf; 887 888 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 889 mutex_enter(&db->db_mtx); 890 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 891 int blksz = db->db.db_size; 892 spa_t *spa = db->db_objset->os_spa; 893 894 mutex_exit(&db->db_mtx); 895 abuf = arc_loan_buf(spa, B_FALSE, blksz); 896 bcopy(db->db.db_data, abuf->b_data, blksz); 897 } else { 898 abuf = db->db_buf; 899 arc_loan_inuse_buf(abuf, db); 900 db->db_buf = NULL; 901 dbuf_clear_data(db); 902 mutex_exit(&db->db_mtx); 903 } 904 return (abuf); 905 } 906 907 /* 908 * Calculate which level n block references the data at the level 0 offset 909 * provided. 910 */ 911 uint64_t 912 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 913 { 914 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 915 /* 916 * The level n blkid is equal to the level 0 blkid divided by 917 * the number of level 0s in a level n block. 918 * 919 * The level 0 blkid is offset >> datablkshift = 920 * offset / 2^datablkshift. 921 * 922 * The number of level 0s in a level n is the number of block 923 * pointers in an indirect block, raised to the power of level. 924 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 925 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 926 * 927 * Thus, the level n blkid is: offset / 928 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 929 * = offset / 2^(datablkshift + level * 930 * (indblkshift - SPA_BLKPTRSHIFT)) 931 * = offset >> (datablkshift + level * 932 * (indblkshift - SPA_BLKPTRSHIFT)) 933 */ 934 return (offset >> (dn->dn_datablkshift + level * 935 (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 936 } else { 937 ASSERT3U(offset, <, dn->dn_datablksz); 938 return (0); 939 } 940 } 941 942 static void 943 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 944 { 945 dmu_buf_impl_t *db = vdb; 946 947 mutex_enter(&db->db_mtx); 948 ASSERT3U(db->db_state, ==, DB_READ); 949 /* 950 * All reads are synchronous, so we must have a hold on the dbuf 951 */ 952 ASSERT(refcount_count(&db->db_holds) > 0); 953 ASSERT(db->db_buf == NULL); 954 ASSERT(db->db.db_data == NULL); 955 if (buf == NULL) { 956 /* i/o error */ 957 ASSERT(zio == NULL || zio->io_error != 0); 958 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 959 ASSERT3P(db->db_buf, ==, NULL); 960 db->db_state = DB_UNCACHED; 961 } else if (db->db_level == 0 && db->db_freed_in_flight) { 962 /* freed in flight */ 963 ASSERT(zio == NULL || zio->io_error == 0); 964 arc_release(buf, db); 965 bzero(buf->b_data, db->db.db_size); 966 arc_buf_freeze(buf); 967 db->db_freed_in_flight = FALSE; 968 dbuf_set_data(db, buf); 969 db->db_state = DB_CACHED; 970 } else { 971 /* success */ 972 ASSERT(zio == NULL || zio->io_error == 0); 973 dbuf_set_data(db, buf); 974 db->db_state = DB_CACHED; 975 } 976 cv_broadcast(&db->db_changed); 977 dbuf_rele_and_unlock(db, NULL, B_FALSE); 978 } 979 980 static void 981 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 982 { 983 dnode_t *dn; 984 zbookmark_phys_t zb; 985 arc_flags_t aflags = ARC_FLAG_NOWAIT; 986 987 DB_DNODE_ENTER(db); 988 dn = DB_DNODE(db); 989 ASSERT(!refcount_is_zero(&db->db_holds)); 990 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 991 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 992 ASSERT(MUTEX_HELD(&db->db_mtx)); 993 ASSERT(db->db_state == DB_UNCACHED); 994 ASSERT(db->db_buf == NULL); 995 996 if (db->db_blkid == DMU_BONUS_BLKID) { 997 /* 998 * The bonus length stored in the dnode may be less than 999 * the maximum available space in the bonus buffer. 1000 */ 1001 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 1002 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1003 1004 ASSERT3U(bonuslen, <=, db->db.db_size); 1005 db->db.db_data = zio_buf_alloc(max_bonuslen); 1006 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 1007 if (bonuslen < max_bonuslen) 1008 bzero(db->db.db_data, max_bonuslen); 1009 if (bonuslen) 1010 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 1011 DB_DNODE_EXIT(db); 1012 db->db_state = DB_CACHED; 1013 mutex_exit(&db->db_mtx); 1014 return; 1015 } 1016 1017 /* 1018 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 1019 * processes the delete record and clears the bp while we are waiting 1020 * for the dn_mtx (resulting in a "no" from block_freed). 1021 */ 1022 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 1023 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 1024 BP_IS_HOLE(db->db_blkptr)))) { 1025 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1026 1027 dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 1028 db->db.db_size)); 1029 bzero(db->db.db_data, db->db.db_size); 1030 1031 if (db->db_blkptr != NULL && db->db_level > 0 && 1032 BP_IS_HOLE(db->db_blkptr) && 1033 db->db_blkptr->blk_birth != 0) { 1034 blkptr_t *bps = db->db.db_data; 1035 for (int i = 0; i < ((1 << 1036 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 1037 i++) { 1038 blkptr_t *bp = &bps[i]; 1039 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 1040 1 << dn->dn_indblkshift); 1041 BP_SET_LSIZE(bp, 1042 BP_GET_LEVEL(db->db_blkptr) == 1 ? 1043 dn->dn_datablksz : 1044 BP_GET_LSIZE(db->db_blkptr)); 1045 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 1046 BP_SET_LEVEL(bp, 1047 BP_GET_LEVEL(db->db_blkptr) - 1); 1048 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 1049 } 1050 } 1051 DB_DNODE_EXIT(db); 1052 db->db_state = DB_CACHED; 1053 mutex_exit(&db->db_mtx); 1054 return; 1055 } 1056 1057 DB_DNODE_EXIT(db); 1058 1059 db->db_state = DB_READ; 1060 mutex_exit(&db->db_mtx); 1061 1062 if (DBUF_IS_L2CACHEABLE(db)) 1063 aflags |= ARC_FLAG_L2CACHE; 1064 1065 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 1066 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1067 db->db.db_object, db->db_level, db->db_blkid); 1068 1069 dbuf_add_ref(db, NULL); 1070 1071 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1072 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 1073 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 1074 &aflags, &zb); 1075 } 1076 1077 /* 1078 * This is our just-in-time copy function. It makes a copy of buffers that 1079 * have been modified in a previous transaction group before we access them in 1080 * the current active group. 1081 * 1082 * This function is used in three places: when we are dirtying a buffer for the 1083 * first time in a txg, when we are freeing a range in a dnode that includes 1084 * this buffer, and when we are accessing a buffer which was received compressed 1085 * and later referenced in a WRITE_BYREF record. 1086 * 1087 * Note that when we are called from dbuf_free_range() we do not put a hold on 1088 * the buffer, we just traverse the active dbuf list for the dnode. 1089 */ 1090 static void 1091 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1092 { 1093 dbuf_dirty_record_t *dr = db->db_last_dirty; 1094 1095 ASSERT(MUTEX_HELD(&db->db_mtx)); 1096 ASSERT(db->db.db_data != NULL); 1097 ASSERT(db->db_level == 0); 1098 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1099 1100 if (dr == NULL || 1101 (dr->dt.dl.dr_data != 1102 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1103 return; 1104 1105 /* 1106 * If the last dirty record for this dbuf has not yet synced 1107 * and its referencing the dbuf data, either: 1108 * reset the reference to point to a new copy, 1109 * or (if there a no active holders) 1110 * just null out the current db_data pointer. 1111 */ 1112 ASSERT(dr->dr_txg >= txg - 2); 1113 if (db->db_blkid == DMU_BONUS_BLKID) { 1114 /* Note that the data bufs here are zio_bufs */ 1115 dnode_t *dn = DB_DNODE(db); 1116 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1117 dr->dt.dl.dr_data = zio_buf_alloc(bonuslen); 1118 arc_space_consume(bonuslen, ARC_SPACE_BONUS); 1119 bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen); 1120 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 1121 int size = arc_buf_size(db->db_buf); 1122 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1123 spa_t *spa = db->db_objset->os_spa; 1124 enum zio_compress compress_type = 1125 arc_get_compression(db->db_buf); 1126 1127 if (compress_type == ZIO_COMPRESS_OFF) { 1128 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1129 } else { 1130 ASSERT3U(type, ==, ARC_BUFC_DATA); 1131 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1132 size, arc_buf_lsize(db->db_buf), compress_type); 1133 } 1134 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 1135 } else { 1136 db->db_buf = NULL; 1137 dbuf_clear_data(db); 1138 } 1139 } 1140 1141 int 1142 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1143 { 1144 int err = 0; 1145 boolean_t prefetch; 1146 dnode_t *dn; 1147 1148 /* 1149 * We don't have to hold the mutex to check db_state because it 1150 * can't be freed while we have a hold on the buffer. 1151 */ 1152 ASSERT(!refcount_is_zero(&db->db_holds)); 1153 1154 if (db->db_state == DB_NOFILL) 1155 return (SET_ERROR(EIO)); 1156 1157 DB_DNODE_ENTER(db); 1158 dn = DB_DNODE(db); 1159 if ((flags & DB_RF_HAVESTRUCT) == 0) 1160 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1161 1162 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1163 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 1164 DBUF_IS_CACHEABLE(db); 1165 1166 mutex_enter(&db->db_mtx); 1167 if (db->db_state == DB_CACHED) { 1168 /* 1169 * If the arc buf is compressed, we need to decompress it to 1170 * read the data. This could happen during the "zfs receive" of 1171 * a stream which is compressed and deduplicated. 1172 */ 1173 if (db->db_buf != NULL && 1174 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { 1175 dbuf_fix_old_data(db, 1176 spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1177 err = arc_decompress(db->db_buf); 1178 dbuf_set_data(db, db->db_buf); 1179 } 1180 mutex_exit(&db->db_mtx); 1181 if (prefetch) 1182 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1183 if ((flags & DB_RF_HAVESTRUCT) == 0) 1184 rw_exit(&dn->dn_struct_rwlock); 1185 DB_DNODE_EXIT(db); 1186 } else if (db->db_state == DB_UNCACHED) { 1187 spa_t *spa = dn->dn_objset->os_spa; 1188 boolean_t need_wait = B_FALSE; 1189 1190 if (zio == NULL && 1191 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1192 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1193 need_wait = B_TRUE; 1194 } 1195 dbuf_read_impl(db, zio, flags); 1196 1197 /* dbuf_read_impl has dropped db_mtx for us */ 1198 1199 if (prefetch) 1200 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1201 1202 if ((flags & DB_RF_HAVESTRUCT) == 0) 1203 rw_exit(&dn->dn_struct_rwlock); 1204 DB_DNODE_EXIT(db); 1205 1206 if (need_wait) 1207 err = zio_wait(zio); 1208 } else { 1209 /* 1210 * Another reader came in while the dbuf was in flight 1211 * between UNCACHED and CACHED. Either a writer will finish 1212 * writing the buffer (sending the dbuf to CACHED) or the 1213 * first reader's request will reach the read_done callback 1214 * and send the dbuf to CACHED. Otherwise, a failure 1215 * occurred and the dbuf went to UNCACHED. 1216 */ 1217 mutex_exit(&db->db_mtx); 1218 if (prefetch) 1219 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1220 if ((flags & DB_RF_HAVESTRUCT) == 0) 1221 rw_exit(&dn->dn_struct_rwlock); 1222 DB_DNODE_EXIT(db); 1223 1224 /* Skip the wait per the caller's request. */ 1225 mutex_enter(&db->db_mtx); 1226 if ((flags & DB_RF_NEVERWAIT) == 0) { 1227 while (db->db_state == DB_READ || 1228 db->db_state == DB_FILL) { 1229 ASSERT(db->db_state == DB_READ || 1230 (flags & DB_RF_HAVESTRUCT) == 0); 1231 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1232 db, zio_t *, zio); 1233 cv_wait(&db->db_changed, &db->db_mtx); 1234 } 1235 if (db->db_state == DB_UNCACHED) 1236 err = SET_ERROR(EIO); 1237 } 1238 mutex_exit(&db->db_mtx); 1239 } 1240 1241 return (err); 1242 } 1243 1244 static void 1245 dbuf_noread(dmu_buf_impl_t *db) 1246 { 1247 ASSERT(!refcount_is_zero(&db->db_holds)); 1248 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1249 mutex_enter(&db->db_mtx); 1250 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1251 cv_wait(&db->db_changed, &db->db_mtx); 1252 if (db->db_state == DB_UNCACHED) { 1253 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1254 spa_t *spa = db->db_objset->os_spa; 1255 1256 ASSERT(db->db_buf == NULL); 1257 ASSERT(db->db.db_data == NULL); 1258 dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1259 db->db_state = DB_FILL; 1260 } else if (db->db_state == DB_NOFILL) { 1261 dbuf_clear_data(db); 1262 } else { 1263 ASSERT3U(db->db_state, ==, DB_CACHED); 1264 } 1265 mutex_exit(&db->db_mtx); 1266 } 1267 1268 void 1269 dbuf_unoverride(dbuf_dirty_record_t *dr) 1270 { 1271 dmu_buf_impl_t *db = dr->dr_dbuf; 1272 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1273 uint64_t txg = dr->dr_txg; 1274 1275 ASSERT(MUTEX_HELD(&db->db_mtx)); 1276 /* 1277 * This assert is valid because dmu_sync() expects to be called by 1278 * a zilog's get_data while holding a range lock. This call only 1279 * comes from dbuf_dirty() callers who must also hold a range lock. 1280 */ 1281 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1282 ASSERT(db->db_level == 0); 1283 1284 if (db->db_blkid == DMU_BONUS_BLKID || 1285 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1286 return; 1287 1288 ASSERT(db->db_data_pending != dr); 1289 1290 /* free this block */ 1291 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1292 zio_free(db->db_objset->os_spa, txg, bp); 1293 1294 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1295 dr->dt.dl.dr_nopwrite = B_FALSE; 1296 1297 /* 1298 * Release the already-written buffer, so we leave it in 1299 * a consistent dirty state. Note that all callers are 1300 * modifying the buffer, so they will immediately do 1301 * another (redundant) arc_release(). Therefore, leave 1302 * the buf thawed to save the effort of freezing & 1303 * immediately re-thawing it. 1304 */ 1305 arc_release(dr->dt.dl.dr_data, db); 1306 } 1307 1308 /* 1309 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1310 * data blocks in the free range, so that any future readers will find 1311 * empty blocks. 1312 */ 1313 void 1314 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1315 dmu_tx_t *tx) 1316 { 1317 dmu_buf_impl_t db_search; 1318 dmu_buf_impl_t *db, *db_next; 1319 uint64_t txg = tx->tx_txg; 1320 avl_index_t where; 1321 1322 if (end_blkid > dn->dn_maxblkid && 1323 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1324 end_blkid = dn->dn_maxblkid; 1325 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 1326 1327 db_search.db_level = 0; 1328 db_search.db_blkid = start_blkid; 1329 db_search.db_state = DB_SEARCH; 1330 1331 mutex_enter(&dn->dn_dbufs_mtx); 1332 db = avl_find(&dn->dn_dbufs, &db_search, &where); 1333 ASSERT3P(db, ==, NULL); 1334 1335 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1336 1337 for (; db != NULL; db = db_next) { 1338 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1339 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1340 1341 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1342 break; 1343 } 1344 ASSERT3U(db->db_blkid, >=, start_blkid); 1345 1346 /* found a level 0 buffer in the range */ 1347 mutex_enter(&db->db_mtx); 1348 if (dbuf_undirty(db, tx)) { 1349 /* mutex has been dropped and dbuf destroyed */ 1350 continue; 1351 } 1352 1353 if (db->db_state == DB_UNCACHED || 1354 db->db_state == DB_NOFILL || 1355 db->db_state == DB_EVICTING) { 1356 ASSERT(db->db.db_data == NULL); 1357 mutex_exit(&db->db_mtx); 1358 continue; 1359 } 1360 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1361 /* will be handled in dbuf_read_done or dbuf_rele */ 1362 db->db_freed_in_flight = TRUE; 1363 mutex_exit(&db->db_mtx); 1364 continue; 1365 } 1366 if (refcount_count(&db->db_holds) == 0) { 1367 ASSERT(db->db_buf); 1368 dbuf_destroy(db); 1369 continue; 1370 } 1371 /* The dbuf is referenced */ 1372 1373 if (db->db_last_dirty != NULL) { 1374 dbuf_dirty_record_t *dr = db->db_last_dirty; 1375 1376 if (dr->dr_txg == txg) { 1377 /* 1378 * This buffer is "in-use", re-adjust the file 1379 * size to reflect that this buffer may 1380 * contain new data when we sync. 1381 */ 1382 if (db->db_blkid != DMU_SPILL_BLKID && 1383 db->db_blkid > dn->dn_maxblkid) 1384 dn->dn_maxblkid = db->db_blkid; 1385 dbuf_unoverride(dr); 1386 } else { 1387 /* 1388 * This dbuf is not dirty in the open context. 1389 * Either uncache it (if its not referenced in 1390 * the open context) or reset its contents to 1391 * empty. 1392 */ 1393 dbuf_fix_old_data(db, txg); 1394 } 1395 } 1396 /* clear the contents if its cached */ 1397 if (db->db_state == DB_CACHED) { 1398 ASSERT(db->db.db_data != NULL); 1399 arc_release(db->db_buf, db); 1400 bzero(db->db.db_data, db->db.db_size); 1401 arc_buf_freeze(db->db_buf); 1402 } 1403 1404 mutex_exit(&db->db_mtx); 1405 } 1406 mutex_exit(&dn->dn_dbufs_mtx); 1407 } 1408 1409 void 1410 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1411 { 1412 arc_buf_t *buf, *obuf; 1413 int osize = db->db.db_size; 1414 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1415 dnode_t *dn; 1416 1417 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1418 1419 DB_DNODE_ENTER(db); 1420 dn = DB_DNODE(db); 1421 1422 /* XXX does *this* func really need the lock? */ 1423 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1424 1425 /* 1426 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1427 * is OK, because there can be no other references to the db 1428 * when we are changing its size, so no concurrent DB_FILL can 1429 * be happening. 1430 */ 1431 /* 1432 * XXX we should be doing a dbuf_read, checking the return 1433 * value and returning that up to our callers 1434 */ 1435 dmu_buf_will_dirty(&db->db, tx); 1436 1437 /* create the data buffer for the new block */ 1438 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1439 1440 /* copy old block data to the new block */ 1441 obuf = db->db_buf; 1442 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1443 /* zero the remainder */ 1444 if (size > osize) 1445 bzero((uint8_t *)buf->b_data + osize, size - osize); 1446 1447 mutex_enter(&db->db_mtx); 1448 dbuf_set_data(db, buf); 1449 arc_buf_destroy(obuf, db); 1450 db->db.db_size = size; 1451 1452 if (db->db_level == 0) { 1453 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1454 db->db_last_dirty->dt.dl.dr_data = buf; 1455 } 1456 mutex_exit(&db->db_mtx); 1457 1458 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1459 DB_DNODE_EXIT(db); 1460 } 1461 1462 void 1463 dbuf_release_bp(dmu_buf_impl_t *db) 1464 { 1465 objset_t *os = db->db_objset; 1466 1467 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 1468 ASSERT(arc_released(os->os_phys_buf) || 1469 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 1470 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 1471 1472 (void) arc_release(db->db_buf, db); 1473 } 1474 1475 /* 1476 * We already have a dirty record for this TXG, and we are being 1477 * dirtied again. 1478 */ 1479 static void 1480 dbuf_redirty(dbuf_dirty_record_t *dr) 1481 { 1482 dmu_buf_impl_t *db = dr->dr_dbuf; 1483 1484 ASSERT(MUTEX_HELD(&db->db_mtx)); 1485 1486 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1487 /* 1488 * If this buffer has already been written out, 1489 * we now need to reset its state. 1490 */ 1491 dbuf_unoverride(dr); 1492 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1493 db->db_state != DB_NOFILL) { 1494 /* Already released on initial dirty, so just thaw. */ 1495 ASSERT(arc_released(db->db_buf)); 1496 arc_buf_thaw(db->db_buf); 1497 } 1498 } 1499 } 1500 1501 dbuf_dirty_record_t * 1502 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1503 { 1504 dnode_t *dn; 1505 objset_t *os; 1506 dbuf_dirty_record_t **drp, *dr; 1507 int drop_struct_lock = FALSE; 1508 int txgoff = tx->tx_txg & TXG_MASK; 1509 1510 ASSERT(tx->tx_txg != 0); 1511 ASSERT(!refcount_is_zero(&db->db_holds)); 1512 DMU_TX_DIRTY_BUF(tx, db); 1513 1514 DB_DNODE_ENTER(db); 1515 dn = DB_DNODE(db); 1516 /* 1517 * Shouldn't dirty a regular buffer in syncing context. Private 1518 * objects may be dirtied in syncing context, but only if they 1519 * were already pre-dirtied in open context. 1520 */ 1521 #ifdef DEBUG 1522 if (dn->dn_objset->os_dsl_dataset != NULL) { 1523 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1524 RW_READER, FTAG); 1525 } 1526 ASSERT(!dmu_tx_is_syncing(tx) || 1527 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1528 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1529 dn->dn_objset->os_dsl_dataset == NULL); 1530 if (dn->dn_objset->os_dsl_dataset != NULL) 1531 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1532 #endif 1533 /* 1534 * We make this assert for private objects as well, but after we 1535 * check if we're already dirty. They are allowed to re-dirty 1536 * in syncing context. 1537 */ 1538 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1539 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1540 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1541 1542 mutex_enter(&db->db_mtx); 1543 /* 1544 * XXX make this true for indirects too? The problem is that 1545 * transactions created with dmu_tx_create_assigned() from 1546 * syncing context don't bother holding ahead. 1547 */ 1548 ASSERT(db->db_level != 0 || 1549 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1550 db->db_state == DB_NOFILL); 1551 1552 mutex_enter(&dn->dn_mtx); 1553 /* 1554 * Don't set dirtyctx to SYNC if we're just modifying this as we 1555 * initialize the objset. 1556 */ 1557 if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1558 if (dn->dn_objset->os_dsl_dataset != NULL) { 1559 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1560 RW_READER, FTAG); 1561 } 1562 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1563 dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1564 DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1565 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1566 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1567 } 1568 if (dn->dn_objset->os_dsl_dataset != NULL) { 1569 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1570 FTAG); 1571 } 1572 } 1573 mutex_exit(&dn->dn_mtx); 1574 1575 if (db->db_blkid == DMU_SPILL_BLKID) 1576 dn->dn_have_spill = B_TRUE; 1577 1578 /* 1579 * If this buffer is already dirty, we're done. 1580 */ 1581 drp = &db->db_last_dirty; 1582 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1583 db->db.db_object == DMU_META_DNODE_OBJECT); 1584 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1585 drp = &dr->dr_next; 1586 if (dr && dr->dr_txg == tx->tx_txg) { 1587 DB_DNODE_EXIT(db); 1588 1589 dbuf_redirty(dr); 1590 mutex_exit(&db->db_mtx); 1591 return (dr); 1592 } 1593 1594 /* 1595 * Only valid if not already dirty. 1596 */ 1597 ASSERT(dn->dn_object == 0 || 1598 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1599 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1600 1601 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1602 1603 /* 1604 * We should only be dirtying in syncing context if it's the 1605 * mos or we're initializing the os or it's a special object. 1606 * However, we are allowed to dirty in syncing context provided 1607 * we already dirtied it in open context. Hence we must make 1608 * this assertion only if we're not already dirty. 1609 */ 1610 os = dn->dn_objset; 1611 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1612 #ifdef DEBUG 1613 if (dn->dn_objset->os_dsl_dataset != NULL) 1614 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 1615 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1616 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1617 if (dn->dn_objset->os_dsl_dataset != NULL) 1618 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1619 #endif 1620 ASSERT(db->db.db_size != 0); 1621 1622 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1623 1624 if (db->db_blkid != DMU_BONUS_BLKID) { 1625 dmu_objset_willuse_space(os, db->db.db_size, tx); 1626 } 1627 1628 /* 1629 * If this buffer is dirty in an old transaction group we need 1630 * to make a copy of it so that the changes we make in this 1631 * transaction group won't leak out when we sync the older txg. 1632 */ 1633 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1634 if (db->db_level == 0) { 1635 void *data_old = db->db_buf; 1636 1637 if (db->db_state != DB_NOFILL) { 1638 if (db->db_blkid == DMU_BONUS_BLKID) { 1639 dbuf_fix_old_data(db, tx->tx_txg); 1640 data_old = db->db.db_data; 1641 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1642 /* 1643 * Release the data buffer from the cache so 1644 * that we can modify it without impacting 1645 * possible other users of this cached data 1646 * block. Note that indirect blocks and 1647 * private objects are not released until the 1648 * syncing state (since they are only modified 1649 * then). 1650 */ 1651 arc_release(db->db_buf, db); 1652 dbuf_fix_old_data(db, tx->tx_txg); 1653 data_old = db->db_buf; 1654 } 1655 ASSERT(data_old != NULL); 1656 } 1657 dr->dt.dl.dr_data = data_old; 1658 } else { 1659 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1660 list_create(&dr->dt.di.dr_children, 1661 sizeof (dbuf_dirty_record_t), 1662 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1663 } 1664 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 1665 dr->dr_accounted = db->db.db_size; 1666 dr->dr_dbuf = db; 1667 dr->dr_txg = tx->tx_txg; 1668 dr->dr_next = *drp; 1669 *drp = dr; 1670 1671 /* 1672 * We could have been freed_in_flight between the dbuf_noread 1673 * and dbuf_dirty. We win, as though the dbuf_noread() had 1674 * happened after the free. 1675 */ 1676 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1677 db->db_blkid != DMU_SPILL_BLKID) { 1678 mutex_enter(&dn->dn_mtx); 1679 if (dn->dn_free_ranges[txgoff] != NULL) { 1680 range_tree_clear(dn->dn_free_ranges[txgoff], 1681 db->db_blkid, 1); 1682 } 1683 mutex_exit(&dn->dn_mtx); 1684 db->db_freed_in_flight = FALSE; 1685 } 1686 1687 /* 1688 * This buffer is now part of this txg 1689 */ 1690 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1691 db->db_dirtycnt += 1; 1692 ASSERT3U(db->db_dirtycnt, <=, 3); 1693 1694 mutex_exit(&db->db_mtx); 1695 1696 if (db->db_blkid == DMU_BONUS_BLKID || 1697 db->db_blkid == DMU_SPILL_BLKID) { 1698 mutex_enter(&dn->dn_mtx); 1699 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1700 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1701 mutex_exit(&dn->dn_mtx); 1702 dnode_setdirty(dn, tx); 1703 DB_DNODE_EXIT(db); 1704 return (dr); 1705 } 1706 1707 /* 1708 * The dn_struct_rwlock prevents db_blkptr from changing 1709 * due to a write from syncing context completing 1710 * while we are running, so we want to acquire it before 1711 * looking at db_blkptr. 1712 */ 1713 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1714 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1715 drop_struct_lock = TRUE; 1716 } 1717 1718 /* 1719 * We need to hold the dn_struct_rwlock to make this assertion, 1720 * because it protects dn_phys / dn_next_nlevels from changing. 1721 */ 1722 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1723 dn->dn_phys->dn_nlevels > db->db_level || 1724 dn->dn_next_nlevels[txgoff] > db->db_level || 1725 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1726 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1727 1728 /* 1729 * If we are overwriting a dedup BP, then unless it is snapshotted, 1730 * when we get to syncing context we will need to decrement its 1731 * refcount in the DDT. Prefetch the relevant DDT block so that 1732 * syncing context won't have to wait for the i/o. 1733 */ 1734 ddt_prefetch(os->os_spa, db->db_blkptr); 1735 1736 if (db->db_level == 0) { 1737 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1738 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1739 } 1740 1741 if (db->db_level+1 < dn->dn_nlevels) { 1742 dmu_buf_impl_t *parent = db->db_parent; 1743 dbuf_dirty_record_t *di; 1744 int parent_held = FALSE; 1745 1746 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1747 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1748 1749 parent = dbuf_hold_level(dn, db->db_level+1, 1750 db->db_blkid >> epbs, FTAG); 1751 ASSERT(parent != NULL); 1752 parent_held = TRUE; 1753 } 1754 if (drop_struct_lock) 1755 rw_exit(&dn->dn_struct_rwlock); 1756 ASSERT3U(db->db_level+1, ==, parent->db_level); 1757 di = dbuf_dirty(parent, tx); 1758 if (parent_held) 1759 dbuf_rele(parent, FTAG); 1760 1761 mutex_enter(&db->db_mtx); 1762 /* 1763 * Since we've dropped the mutex, it's possible that 1764 * dbuf_undirty() might have changed this out from under us. 1765 */ 1766 if (db->db_last_dirty == dr || 1767 dn->dn_object == DMU_META_DNODE_OBJECT) { 1768 mutex_enter(&di->dt.di.dr_mtx); 1769 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1770 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1771 list_insert_tail(&di->dt.di.dr_children, dr); 1772 mutex_exit(&di->dt.di.dr_mtx); 1773 dr->dr_parent = di; 1774 } 1775 mutex_exit(&db->db_mtx); 1776 } else { 1777 ASSERT(db->db_level+1 == dn->dn_nlevels); 1778 ASSERT(db->db_blkid < dn->dn_nblkptr); 1779 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1780 mutex_enter(&dn->dn_mtx); 1781 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1782 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1783 mutex_exit(&dn->dn_mtx); 1784 if (drop_struct_lock) 1785 rw_exit(&dn->dn_struct_rwlock); 1786 } 1787 1788 dnode_setdirty(dn, tx); 1789 DB_DNODE_EXIT(db); 1790 return (dr); 1791 } 1792 1793 /* 1794 * Undirty a buffer in the transaction group referenced by the given 1795 * transaction. Return whether this evicted the dbuf. 1796 */ 1797 static boolean_t 1798 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1799 { 1800 dnode_t *dn; 1801 uint64_t txg = tx->tx_txg; 1802 dbuf_dirty_record_t *dr, **drp; 1803 1804 ASSERT(txg != 0); 1805 1806 /* 1807 * Due to our use of dn_nlevels below, this can only be called 1808 * in open context, unless we are operating on the MOS. 1809 * From syncing context, dn_nlevels may be different from the 1810 * dn_nlevels used when dbuf was dirtied. 1811 */ 1812 ASSERT(db->db_objset == 1813 dmu_objset_pool(db->db_objset)->dp_meta_objset || 1814 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1815 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1816 ASSERT0(db->db_level); 1817 ASSERT(MUTEX_HELD(&db->db_mtx)); 1818 1819 /* 1820 * If this buffer is not dirty, we're done. 1821 */ 1822 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1823 if (dr->dr_txg <= txg) 1824 break; 1825 if (dr == NULL || dr->dr_txg < txg) 1826 return (B_FALSE); 1827 ASSERT(dr->dr_txg == txg); 1828 ASSERT(dr->dr_dbuf == db); 1829 1830 DB_DNODE_ENTER(db); 1831 dn = DB_DNODE(db); 1832 1833 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1834 1835 ASSERT(db->db.db_size != 0); 1836 1837 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 1838 dr->dr_accounted, txg); 1839 1840 *drp = dr->dr_next; 1841 1842 /* 1843 * Note that there are three places in dbuf_dirty() 1844 * where this dirty record may be put on a list. 1845 * Make sure to do a list_remove corresponding to 1846 * every one of those list_insert calls. 1847 */ 1848 if (dr->dr_parent) { 1849 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1850 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1851 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1852 } else if (db->db_blkid == DMU_SPILL_BLKID || 1853 db->db_level + 1 == dn->dn_nlevels) { 1854 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1855 mutex_enter(&dn->dn_mtx); 1856 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1857 mutex_exit(&dn->dn_mtx); 1858 } 1859 DB_DNODE_EXIT(db); 1860 1861 if (db->db_state != DB_NOFILL) { 1862 dbuf_unoverride(dr); 1863 1864 ASSERT(db->db_buf != NULL); 1865 ASSERT(dr->dt.dl.dr_data != NULL); 1866 if (dr->dt.dl.dr_data != db->db_buf) 1867 arc_buf_destroy(dr->dt.dl.dr_data, db); 1868 } 1869 1870 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1871 1872 ASSERT(db->db_dirtycnt > 0); 1873 db->db_dirtycnt -= 1; 1874 1875 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1876 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 1877 dbuf_destroy(db); 1878 return (B_TRUE); 1879 } 1880 1881 return (B_FALSE); 1882 } 1883 1884 void 1885 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1886 { 1887 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1888 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1889 1890 ASSERT(tx->tx_txg != 0); 1891 ASSERT(!refcount_is_zero(&db->db_holds)); 1892 1893 /* 1894 * Quick check for dirtyness. For already dirty blocks, this 1895 * reduces runtime of this function by >90%, and overall performance 1896 * by 50% for some workloads (e.g. file deletion with indirect blocks 1897 * cached). 1898 */ 1899 mutex_enter(&db->db_mtx); 1900 dbuf_dirty_record_t *dr; 1901 for (dr = db->db_last_dirty; 1902 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 1903 /* 1904 * It's possible that it is already dirty but not cached, 1905 * because there are some calls to dbuf_dirty() that don't 1906 * go through dmu_buf_will_dirty(). 1907 */ 1908 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 1909 /* This dbuf is already dirty and cached. */ 1910 dbuf_redirty(dr); 1911 mutex_exit(&db->db_mtx); 1912 return; 1913 } 1914 } 1915 mutex_exit(&db->db_mtx); 1916 1917 DB_DNODE_ENTER(db); 1918 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1919 rf |= DB_RF_HAVESTRUCT; 1920 DB_DNODE_EXIT(db); 1921 (void) dbuf_read(db, NULL, rf); 1922 (void) dbuf_dirty(db, tx); 1923 } 1924 1925 void 1926 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1927 { 1928 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1929 1930 db->db_state = DB_NOFILL; 1931 1932 dmu_buf_will_fill(db_fake, tx); 1933 } 1934 1935 void 1936 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1937 { 1938 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1939 1940 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1941 ASSERT(tx->tx_txg != 0); 1942 ASSERT(db->db_level == 0); 1943 ASSERT(!refcount_is_zero(&db->db_holds)); 1944 1945 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1946 dmu_tx_private_ok(tx)); 1947 1948 dbuf_noread(db); 1949 (void) dbuf_dirty(db, tx); 1950 } 1951 1952 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1953 /* ARGSUSED */ 1954 void 1955 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1956 { 1957 mutex_enter(&db->db_mtx); 1958 DBUF_VERIFY(db); 1959 1960 if (db->db_state == DB_FILL) { 1961 if (db->db_level == 0 && db->db_freed_in_flight) { 1962 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1963 /* we were freed while filling */ 1964 /* XXX dbuf_undirty? */ 1965 bzero(db->db.db_data, db->db.db_size); 1966 db->db_freed_in_flight = FALSE; 1967 } 1968 db->db_state = DB_CACHED; 1969 cv_broadcast(&db->db_changed); 1970 } 1971 mutex_exit(&db->db_mtx); 1972 } 1973 1974 void 1975 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 1976 bp_embedded_type_t etype, enum zio_compress comp, 1977 int uncompressed_size, int compressed_size, int byteorder, 1978 dmu_tx_t *tx) 1979 { 1980 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 1981 struct dirty_leaf *dl; 1982 dmu_object_type_t type; 1983 1984 if (etype == BP_EMBEDDED_TYPE_DATA) { 1985 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1986 SPA_FEATURE_EMBEDDED_DATA)); 1987 } 1988 1989 DB_DNODE_ENTER(db); 1990 type = DB_DNODE(db)->dn_type; 1991 DB_DNODE_EXIT(db); 1992 1993 ASSERT0(db->db_level); 1994 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1995 1996 dmu_buf_will_not_fill(dbuf, tx); 1997 1998 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1999 dl = &db->db_last_dirty->dt.dl; 2000 encode_embedded_bp_compressed(&dl->dr_overridden_by, 2001 data, comp, uncompressed_size, compressed_size); 2002 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 2003 BP_SET_TYPE(&dl->dr_overridden_by, type); 2004 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 2005 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 2006 2007 dl->dr_override_state = DR_OVERRIDDEN; 2008 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 2009 } 2010 2011 /* 2012 * Directly assign a provided arc buf to a given dbuf if it's not referenced 2013 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 2014 */ 2015 void 2016 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 2017 { 2018 ASSERT(!refcount_is_zero(&db->db_holds)); 2019 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2020 ASSERT(db->db_level == 0); 2021 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 2022 ASSERT(buf != NULL); 2023 ASSERT(arc_buf_lsize(buf) == db->db.db_size); 2024 ASSERT(tx->tx_txg != 0); 2025 2026 arc_return_buf(buf, db); 2027 ASSERT(arc_released(buf)); 2028 2029 mutex_enter(&db->db_mtx); 2030 2031 while (db->db_state == DB_READ || db->db_state == DB_FILL) 2032 cv_wait(&db->db_changed, &db->db_mtx); 2033 2034 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 2035 2036 if (db->db_state == DB_CACHED && 2037 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 2038 mutex_exit(&db->db_mtx); 2039 (void) dbuf_dirty(db, tx); 2040 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 2041 arc_buf_destroy(buf, db); 2042 xuio_stat_wbuf_copied(); 2043 return; 2044 } 2045 2046 xuio_stat_wbuf_nocopy(); 2047 if (db->db_state == DB_CACHED) { 2048 dbuf_dirty_record_t *dr = db->db_last_dirty; 2049 2050 ASSERT(db->db_buf != NULL); 2051 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 2052 ASSERT(dr->dt.dl.dr_data == db->db_buf); 2053 if (!arc_released(db->db_buf)) { 2054 ASSERT(dr->dt.dl.dr_override_state == 2055 DR_OVERRIDDEN); 2056 arc_release(db->db_buf, db); 2057 } 2058 dr->dt.dl.dr_data = buf; 2059 arc_buf_destroy(db->db_buf, db); 2060 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 2061 arc_release(db->db_buf, db); 2062 arc_buf_destroy(db->db_buf, db); 2063 } 2064 db->db_buf = NULL; 2065 } 2066 ASSERT(db->db_buf == NULL); 2067 dbuf_set_data(db, buf); 2068 db->db_state = DB_FILL; 2069 mutex_exit(&db->db_mtx); 2070 (void) dbuf_dirty(db, tx); 2071 dmu_buf_fill_done(&db->db, tx); 2072 } 2073 2074 void 2075 dbuf_destroy(dmu_buf_impl_t *db) 2076 { 2077 dnode_t *dn; 2078 dmu_buf_impl_t *parent = db->db_parent; 2079 dmu_buf_impl_t *dndb; 2080 2081 ASSERT(MUTEX_HELD(&db->db_mtx)); 2082 ASSERT(refcount_is_zero(&db->db_holds)); 2083 2084 if (db->db_buf != NULL) { 2085 arc_buf_destroy(db->db_buf, db); 2086 db->db_buf = NULL; 2087 } 2088 2089 if (db->db_blkid == DMU_BONUS_BLKID) { 2090 int slots = DB_DNODE(db)->dn_num_slots; 2091 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 2092 if (db->db.db_data != NULL) { 2093 zio_buf_free(db->db.db_data, bonuslen); 2094 arc_space_return(bonuslen, ARC_SPACE_BONUS); 2095 db->db_state = DB_UNCACHED; 2096 } 2097 } 2098 2099 dbuf_clear_data(db); 2100 2101 if (multilist_link_active(&db->db_cache_link)) { 2102 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2103 db->db_caching_status == DB_DBUF_METADATA_CACHE); 2104 2105 multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2106 (void) refcount_remove_many( 2107 &dbuf_caches[db->db_caching_status].size, 2108 db->db.db_size, db); 2109 2110 db->db_caching_status = DB_NO_CACHE; 2111 } 2112 2113 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2114 ASSERT(db->db_data_pending == NULL); 2115 2116 db->db_state = DB_EVICTING; 2117 db->db_blkptr = NULL; 2118 2119 /* 2120 * Now that db_state is DB_EVICTING, nobody else can find this via 2121 * the hash table. We can now drop db_mtx, which allows us to 2122 * acquire the dn_dbufs_mtx. 2123 */ 2124 mutex_exit(&db->db_mtx); 2125 2126 DB_DNODE_ENTER(db); 2127 dn = DB_DNODE(db); 2128 dndb = dn->dn_dbuf; 2129 if (db->db_blkid != DMU_BONUS_BLKID) { 2130 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2131 if (needlock) 2132 mutex_enter(&dn->dn_dbufs_mtx); 2133 avl_remove(&dn->dn_dbufs, db); 2134 atomic_dec_32(&dn->dn_dbufs_count); 2135 membar_producer(); 2136 DB_DNODE_EXIT(db); 2137 if (needlock) 2138 mutex_exit(&dn->dn_dbufs_mtx); 2139 /* 2140 * Decrementing the dbuf count means that the hold corresponding 2141 * to the removed dbuf is no longer discounted in dnode_move(), 2142 * so the dnode cannot be moved until after we release the hold. 2143 * The membar_producer() ensures visibility of the decremented 2144 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2145 * release any lock. 2146 */ 2147 mutex_enter(&dn->dn_mtx); 2148 dnode_rele_and_unlock(dn, db, B_TRUE); 2149 db->db_dnode_handle = NULL; 2150 2151 dbuf_hash_remove(db); 2152 } else { 2153 DB_DNODE_EXIT(db); 2154 } 2155 2156 ASSERT(refcount_is_zero(&db->db_holds)); 2157 2158 db->db_parent = NULL; 2159 2160 ASSERT(db->db_buf == NULL); 2161 ASSERT(db->db.db_data == NULL); 2162 ASSERT(db->db_hash_next == NULL); 2163 ASSERT(db->db_blkptr == NULL); 2164 ASSERT(db->db_data_pending == NULL); 2165 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 2166 ASSERT(!multilist_link_active(&db->db_cache_link)); 2167 2168 kmem_cache_free(dbuf_kmem_cache, db); 2169 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2170 2171 /* 2172 * If this dbuf is referenced from an indirect dbuf, 2173 * decrement the ref count on the indirect dbuf. 2174 */ 2175 if (parent && parent != dndb) { 2176 mutex_enter(&parent->db_mtx); 2177 dbuf_rele_and_unlock(parent, db, B_TRUE); 2178 } 2179 } 2180 2181 /* 2182 * Note: While bpp will always be updated if the function returns success, 2183 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2184 * this happens when the dnode is the meta-dnode, or a userused or groupused 2185 * object. 2186 */ 2187 static int 2188 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2189 dmu_buf_impl_t **parentp, blkptr_t **bpp) 2190 { 2191 *parentp = NULL; 2192 *bpp = NULL; 2193 2194 ASSERT(blkid != DMU_BONUS_BLKID); 2195 2196 if (blkid == DMU_SPILL_BLKID) { 2197 mutex_enter(&dn->dn_mtx); 2198 if (dn->dn_have_spill && 2199 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 2200 *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 2201 else 2202 *bpp = NULL; 2203 dbuf_add_ref(dn->dn_dbuf, NULL); 2204 *parentp = dn->dn_dbuf; 2205 mutex_exit(&dn->dn_mtx); 2206 return (0); 2207 } 2208 2209 int nlevels = 2210 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 2211 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2212 2213 ASSERT3U(level * epbs, <, 64); 2214 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2215 /* 2216 * This assertion shouldn't trip as long as the max indirect block size 2217 * is less than 1M. The reason for this is that up to that point, 2218 * the number of levels required to address an entire object with blocks 2219 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 2220 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 2221 * (i.e. we can address the entire object), objects will all use at most 2222 * N-1 levels and the assertion won't overflow. However, once epbs is 2223 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 2224 * enough to address an entire object, so objects will have 5 levels, 2225 * but then this assertion will overflow. 2226 * 2227 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 2228 * need to redo this logic to handle overflows. 2229 */ 2230 ASSERT(level >= nlevels || 2231 ((nlevels - level - 1) * epbs) + 2232 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2233 if (level >= nlevels || 2234 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 2235 ((nlevels - level - 1) * epbs)) || 2236 (fail_sparse && 2237 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2238 /* the buffer has no parent yet */ 2239 return (SET_ERROR(ENOENT)); 2240 } else if (level < nlevels-1) { 2241 /* this block is referenced from an indirect block */ 2242 int err = dbuf_hold_impl(dn, level+1, 2243 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2244 if (err) 2245 return (err); 2246 err = dbuf_read(*parentp, NULL, 2247 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2248 if (err) { 2249 dbuf_rele(*parentp, NULL); 2250 *parentp = NULL; 2251 return (err); 2252 } 2253 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2254 (blkid & ((1ULL << epbs) - 1)); 2255 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 2256 ASSERT(BP_IS_HOLE(*bpp)); 2257 return (0); 2258 } else { 2259 /* the block is referenced from the dnode */ 2260 ASSERT3U(level, ==, nlevels-1); 2261 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2262 blkid < dn->dn_phys->dn_nblkptr); 2263 if (dn->dn_dbuf) { 2264 dbuf_add_ref(dn->dn_dbuf, NULL); 2265 *parentp = dn->dn_dbuf; 2266 } 2267 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2268 return (0); 2269 } 2270 } 2271 2272 static dmu_buf_impl_t * 2273 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2274 dmu_buf_impl_t *parent, blkptr_t *blkptr) 2275 { 2276 objset_t *os = dn->dn_objset; 2277 dmu_buf_impl_t *db, *odb; 2278 2279 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2280 ASSERT(dn->dn_type != DMU_OT_NONE); 2281 2282 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2283 2284 db->db_objset = os; 2285 db->db.db_object = dn->dn_object; 2286 db->db_level = level; 2287 db->db_blkid = blkid; 2288 db->db_last_dirty = NULL; 2289 db->db_dirtycnt = 0; 2290 db->db_dnode_handle = dn->dn_handle; 2291 db->db_parent = parent; 2292 db->db_blkptr = blkptr; 2293 2294 db->db_user = NULL; 2295 db->db_user_immediate_evict = FALSE; 2296 db->db_freed_in_flight = FALSE; 2297 db->db_pending_evict = FALSE; 2298 2299 if (blkid == DMU_BONUS_BLKID) { 2300 ASSERT3P(parent, ==, dn->dn_dbuf); 2301 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 2302 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 2303 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 2304 db->db.db_offset = DMU_BONUS_BLKID; 2305 db->db_state = DB_UNCACHED; 2306 db->db_caching_status = DB_NO_CACHE; 2307 /* the bonus dbuf is not placed in the hash table */ 2308 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2309 return (db); 2310 } else if (blkid == DMU_SPILL_BLKID) { 2311 db->db.db_size = (blkptr != NULL) ? 2312 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 2313 db->db.db_offset = 0; 2314 } else { 2315 int blocksize = 2316 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2317 db->db.db_size = blocksize; 2318 db->db.db_offset = db->db_blkid * blocksize; 2319 } 2320 2321 /* 2322 * Hold the dn_dbufs_mtx while we get the new dbuf 2323 * in the hash table *and* added to the dbufs list. 2324 * This prevents a possible deadlock with someone 2325 * trying to look up this dbuf before its added to the 2326 * dn_dbufs list. 2327 */ 2328 mutex_enter(&dn->dn_dbufs_mtx); 2329 db->db_state = DB_EVICTING; 2330 if ((odb = dbuf_hash_insert(db)) != NULL) { 2331 /* someone else inserted it first */ 2332 kmem_cache_free(dbuf_kmem_cache, db); 2333 mutex_exit(&dn->dn_dbufs_mtx); 2334 return (odb); 2335 } 2336 avl_add(&dn->dn_dbufs, db); 2337 2338 db->db_state = DB_UNCACHED; 2339 db->db_caching_status = DB_NO_CACHE; 2340 mutex_exit(&dn->dn_dbufs_mtx); 2341 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2342 2343 if (parent && parent != dn->dn_dbuf) 2344 dbuf_add_ref(parent, db); 2345 2346 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2347 refcount_count(&dn->dn_holds) > 0); 2348 (void) refcount_add(&dn->dn_holds, db); 2349 atomic_inc_32(&dn->dn_dbufs_count); 2350 2351 dprintf_dbuf(db, "db=%p\n", db); 2352 2353 return (db); 2354 } 2355 2356 typedef struct dbuf_prefetch_arg { 2357 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2358 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2359 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2360 int dpa_curlevel; /* The current level that we're reading */ 2361 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2362 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2363 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2364 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2365 } dbuf_prefetch_arg_t; 2366 2367 /* 2368 * Actually issue the prefetch read for the block given. 2369 */ 2370 static void 2371 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2372 { 2373 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2374 return; 2375 2376 arc_flags_t aflags = 2377 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2378 2379 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2380 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2381 ASSERT(dpa->dpa_zio != NULL); 2382 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2383 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2384 &aflags, &dpa->dpa_zb); 2385 } 2386 2387 /* 2388 * Called when an indirect block above our prefetch target is read in. This 2389 * will either read in the next indirect block down the tree or issue the actual 2390 * prefetch if the next block down is our target. 2391 */ 2392 static void 2393 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2394 { 2395 dbuf_prefetch_arg_t *dpa = private; 2396 2397 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2398 ASSERT3S(dpa->dpa_curlevel, >, 0); 2399 2400 if (abuf == NULL) { 2401 ASSERT(zio == NULL || zio->io_error != 0); 2402 kmem_free(dpa, sizeof (*dpa)); 2403 return; 2404 } 2405 ASSERT(zio == NULL || zio->io_error == 0); 2406 2407 /* 2408 * The dpa_dnode is only valid if we are called with a NULL 2409 * zio. This indicates that the arc_read() returned without 2410 * first calling zio_read() to issue a physical read. Once 2411 * a physical read is made the dpa_dnode must be invalidated 2412 * as the locks guarding it may have been dropped. If the 2413 * dpa_dnode is still valid, then we want to add it to the dbuf 2414 * cache. To do so, we must hold the dbuf associated with the block 2415 * we just prefetched, read its contents so that we associate it 2416 * with an arc_buf_t, and then release it. 2417 */ 2418 if (zio != NULL) { 2419 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2420 if (zio->io_flags & ZIO_FLAG_RAW) { 2421 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2422 } else { 2423 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2424 } 2425 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2426 2427 dpa->dpa_dnode = NULL; 2428 } else if (dpa->dpa_dnode != NULL) { 2429 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2430 (dpa->dpa_epbs * (dpa->dpa_curlevel - 2431 dpa->dpa_zb.zb_level)); 2432 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2433 dpa->dpa_curlevel, curblkid, FTAG); 2434 (void) dbuf_read(db, NULL, 2435 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2436 dbuf_rele(db, FTAG); 2437 } 2438 2439 dpa->dpa_curlevel--; 2440 2441 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2442 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2443 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2444 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2445 if (BP_IS_HOLE(bp)) { 2446 kmem_free(dpa, sizeof (*dpa)); 2447 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2448 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2449 dbuf_issue_final_prefetch(dpa, bp); 2450 kmem_free(dpa, sizeof (*dpa)); 2451 } else { 2452 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2453 zbookmark_phys_t zb; 2454 2455 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2456 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 2457 iter_aflags |= ARC_FLAG_L2CACHE; 2458 2459 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2460 2461 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2462 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2463 2464 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2465 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2466 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2467 &iter_aflags, &zb); 2468 } 2469 2470 arc_buf_destroy(abuf, private); 2471 } 2472 2473 /* 2474 * Issue prefetch reads for the given block on the given level. If the indirect 2475 * blocks above that block are not in memory, we will read them in 2476 * asynchronously. As a result, this call never blocks waiting for a read to 2477 * complete. 2478 */ 2479 void 2480 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2481 arc_flags_t aflags) 2482 { 2483 blkptr_t bp; 2484 int epbs, nlevels, curlevel; 2485 uint64_t curblkid; 2486 2487 ASSERT(blkid != DMU_BONUS_BLKID); 2488 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2489 2490 if (blkid > dn->dn_maxblkid) 2491 return; 2492 2493 if (dnode_block_freed(dn, blkid)) 2494 return; 2495 2496 /* 2497 * This dnode hasn't been written to disk yet, so there's nothing to 2498 * prefetch. 2499 */ 2500 nlevels = dn->dn_phys->dn_nlevels; 2501 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2502 return; 2503 2504 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2505 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2506 return; 2507 2508 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2509 level, blkid); 2510 if (db != NULL) { 2511 mutex_exit(&db->db_mtx); 2512 /* 2513 * This dbuf already exists. It is either CACHED, or 2514 * (we assume) about to be read or filled. 2515 */ 2516 return; 2517 } 2518 2519 /* 2520 * Find the closest ancestor (indirect block) of the target block 2521 * that is present in the cache. In this indirect block, we will 2522 * find the bp that is at curlevel, curblkid. 2523 */ 2524 curlevel = level; 2525 curblkid = blkid; 2526 while (curlevel < nlevels - 1) { 2527 int parent_level = curlevel + 1; 2528 uint64_t parent_blkid = curblkid >> epbs; 2529 dmu_buf_impl_t *db; 2530 2531 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2532 FALSE, TRUE, FTAG, &db) == 0) { 2533 blkptr_t *bpp = db->db_buf->b_data; 2534 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2535 dbuf_rele(db, FTAG); 2536 break; 2537 } 2538 2539 curlevel = parent_level; 2540 curblkid = parent_blkid; 2541 } 2542 2543 if (curlevel == nlevels - 1) { 2544 /* No cached indirect blocks found. */ 2545 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2546 bp = dn->dn_phys->dn_blkptr[curblkid]; 2547 } 2548 if (BP_IS_HOLE(&bp)) 2549 return; 2550 2551 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2552 2553 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2554 ZIO_FLAG_CANFAIL); 2555 2556 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2557 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2558 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2559 dn->dn_object, level, blkid); 2560 dpa->dpa_curlevel = curlevel; 2561 dpa->dpa_prio = prio; 2562 dpa->dpa_aflags = aflags; 2563 dpa->dpa_spa = dn->dn_objset->os_spa; 2564 dpa->dpa_dnode = dn; 2565 dpa->dpa_epbs = epbs; 2566 dpa->dpa_zio = pio; 2567 2568 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2569 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 2570 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 2571 2572 /* 2573 * If we have the indirect just above us, no need to do the asynchronous 2574 * prefetch chain; we'll just run the last step ourselves. If we're at 2575 * a higher level, though, we want to issue the prefetches for all the 2576 * indirect blocks asynchronously, so we can go on with whatever we were 2577 * doing. 2578 */ 2579 if (curlevel == level) { 2580 ASSERT3U(curblkid, ==, blkid); 2581 dbuf_issue_final_prefetch(dpa, &bp); 2582 kmem_free(dpa, sizeof (*dpa)); 2583 } else { 2584 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2585 zbookmark_phys_t zb; 2586 2587 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2588 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 2589 iter_aflags |= ARC_FLAG_L2CACHE; 2590 2591 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2592 dn->dn_object, curlevel, curblkid); 2593 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2594 &bp, dbuf_prefetch_indirect_done, dpa, prio, 2595 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2596 &iter_aflags, &zb); 2597 } 2598 /* 2599 * We use pio here instead of dpa_zio since it's possible that 2600 * dpa may have already been freed. 2601 */ 2602 zio_nowait(pio); 2603 } 2604 2605 /* 2606 * Returns with db_holds incremented, and db_mtx not held. 2607 * Note: dn_struct_rwlock must be held. 2608 */ 2609 int 2610 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2611 boolean_t fail_sparse, boolean_t fail_uncached, 2612 void *tag, dmu_buf_impl_t **dbp) 2613 { 2614 dmu_buf_impl_t *db, *parent = NULL; 2615 2616 ASSERT(blkid != DMU_BONUS_BLKID); 2617 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2618 ASSERT3U(dn->dn_nlevels, >, level); 2619 2620 *dbp = NULL; 2621 top: 2622 /* dbuf_find() returns with db_mtx held */ 2623 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2624 2625 if (db == NULL) { 2626 blkptr_t *bp = NULL; 2627 int err; 2628 2629 if (fail_uncached) 2630 return (SET_ERROR(ENOENT)); 2631 2632 ASSERT3P(parent, ==, NULL); 2633 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2634 if (fail_sparse) { 2635 if (err == 0 && bp && BP_IS_HOLE(bp)) 2636 err = SET_ERROR(ENOENT); 2637 if (err) { 2638 if (parent) 2639 dbuf_rele(parent, NULL); 2640 return (err); 2641 } 2642 } 2643 if (err && err != ENOENT) 2644 return (err); 2645 db = dbuf_create(dn, level, blkid, parent, bp); 2646 } 2647 2648 if (fail_uncached && db->db_state != DB_CACHED) { 2649 mutex_exit(&db->db_mtx); 2650 return (SET_ERROR(ENOENT)); 2651 } 2652 2653 if (db->db_buf != NULL) 2654 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2655 2656 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2657 2658 /* 2659 * If this buffer is currently syncing out, and we are are 2660 * still referencing it from db_data, we need to make a copy 2661 * of it in case we decide we want to dirty it again in this txg. 2662 */ 2663 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2664 dn->dn_object != DMU_META_DNODE_OBJECT && 2665 db->db_state == DB_CACHED && db->db_data_pending) { 2666 dbuf_dirty_record_t *dr = db->db_data_pending; 2667 2668 if (dr->dt.dl.dr_data == db->db_buf) { 2669 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2670 2671 dbuf_set_data(db, 2672 arc_alloc_buf(dn->dn_objset->os_spa, db, type, 2673 db->db.db_size)); 2674 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2675 db->db.db_size); 2676 } 2677 } 2678 2679 if (multilist_link_active(&db->db_cache_link)) { 2680 ASSERT(refcount_is_zero(&db->db_holds)); 2681 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2682 db->db_caching_status == DB_DBUF_METADATA_CACHE); 2683 2684 multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2685 (void) refcount_remove_many( 2686 &dbuf_caches[db->db_caching_status].size, 2687 db->db.db_size, db); 2688 2689 db->db_caching_status = DB_NO_CACHE; 2690 } 2691 (void) refcount_add(&db->db_holds, tag); 2692 DBUF_VERIFY(db); 2693 mutex_exit(&db->db_mtx); 2694 2695 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2696 if (parent) 2697 dbuf_rele(parent, NULL); 2698 2699 ASSERT3P(DB_DNODE(db), ==, dn); 2700 ASSERT3U(db->db_blkid, ==, blkid); 2701 ASSERT3U(db->db_level, ==, level); 2702 *dbp = db; 2703 2704 return (0); 2705 } 2706 2707 dmu_buf_impl_t * 2708 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2709 { 2710 return (dbuf_hold_level(dn, 0, blkid, tag)); 2711 } 2712 2713 dmu_buf_impl_t * 2714 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2715 { 2716 dmu_buf_impl_t *db; 2717 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2718 return (err ? NULL : db); 2719 } 2720 2721 void 2722 dbuf_create_bonus(dnode_t *dn) 2723 { 2724 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2725 2726 ASSERT(dn->dn_bonus == NULL); 2727 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 2728 } 2729 2730 int 2731 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 2732 { 2733 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2734 dnode_t *dn; 2735 2736 if (db->db_blkid != DMU_SPILL_BLKID) 2737 return (SET_ERROR(ENOTSUP)); 2738 if (blksz == 0) 2739 blksz = SPA_MINBLOCKSIZE; 2740 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 2741 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 2742 2743 DB_DNODE_ENTER(db); 2744 dn = DB_DNODE(db); 2745 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2746 dbuf_new_size(db, blksz, tx); 2747 rw_exit(&dn->dn_struct_rwlock); 2748 DB_DNODE_EXIT(db); 2749 2750 return (0); 2751 } 2752 2753 void 2754 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 2755 { 2756 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2757 } 2758 2759 #pragma weak dmu_buf_add_ref = dbuf_add_ref 2760 void 2761 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2762 { 2763 int64_t holds = refcount_add(&db->db_holds, tag); 2764 ASSERT3S(holds, >, 1); 2765 } 2766 2767 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2768 boolean_t 2769 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2770 void *tag) 2771 { 2772 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2773 dmu_buf_impl_t *found_db; 2774 boolean_t result = B_FALSE; 2775 2776 if (db->db_blkid == DMU_BONUS_BLKID) 2777 found_db = dbuf_find_bonus(os, obj); 2778 else 2779 found_db = dbuf_find(os, obj, 0, blkid); 2780 2781 if (found_db != NULL) { 2782 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2783 (void) refcount_add(&db->db_holds, tag); 2784 result = B_TRUE; 2785 } 2786 mutex_exit(&db->db_mtx); 2787 } 2788 return (result); 2789 } 2790 2791 /* 2792 * If you call dbuf_rele() you had better not be referencing the dnode handle 2793 * unless you have some other direct or indirect hold on the dnode. (An indirect 2794 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2795 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2796 * dnode's parent dbuf evicting its dnode handles. 2797 */ 2798 void 2799 dbuf_rele(dmu_buf_impl_t *db, void *tag) 2800 { 2801 mutex_enter(&db->db_mtx); 2802 dbuf_rele_and_unlock(db, tag, B_FALSE); 2803 } 2804 2805 void 2806 dmu_buf_rele(dmu_buf_t *db, void *tag) 2807 { 2808 dbuf_rele((dmu_buf_impl_t *)db, tag); 2809 } 2810 2811 /* 2812 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2813 * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 2814 * argument should be set if we are already in the dbuf-evicting code 2815 * path, in which case we don't want to recursively evict. This allows us to 2816 * avoid deeply nested stacks that would have a call flow similar to this: 2817 * 2818 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 2819 * ^ | 2820 * | | 2821 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 2822 * 2823 */ 2824 void 2825 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting) 2826 { 2827 int64_t holds; 2828 2829 ASSERT(MUTEX_HELD(&db->db_mtx)); 2830 DBUF_VERIFY(db); 2831 2832 /* 2833 * Remove the reference to the dbuf before removing its hold on the 2834 * dnode so we can guarantee in dnode_move() that a referenced bonus 2835 * buffer has a corresponding dnode hold. 2836 */ 2837 holds = refcount_remove(&db->db_holds, tag); 2838 ASSERT(holds >= 0); 2839 2840 /* 2841 * We can't freeze indirects if there is a possibility that they 2842 * may be modified in the current syncing context. 2843 */ 2844 if (db->db_buf != NULL && 2845 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 2846 arc_buf_freeze(db->db_buf); 2847 } 2848 2849 if (holds == db->db_dirtycnt && 2850 db->db_level == 0 && db->db_user_immediate_evict) 2851 dbuf_evict_user(db); 2852 2853 if (holds == 0) { 2854 if (db->db_blkid == DMU_BONUS_BLKID) { 2855 dnode_t *dn; 2856 boolean_t evict_dbuf = db->db_pending_evict; 2857 2858 /* 2859 * If the dnode moves here, we cannot cross this 2860 * barrier until the move completes. 2861 */ 2862 DB_DNODE_ENTER(db); 2863 2864 dn = DB_DNODE(db); 2865 atomic_dec_32(&dn->dn_dbufs_count); 2866 2867 /* 2868 * Decrementing the dbuf count means that the bonus 2869 * buffer's dnode hold is no longer discounted in 2870 * dnode_move(). The dnode cannot move until after 2871 * the dnode_rele() below. 2872 */ 2873 DB_DNODE_EXIT(db); 2874 2875 /* 2876 * Do not reference db after its lock is dropped. 2877 * Another thread may evict it. 2878 */ 2879 mutex_exit(&db->db_mtx); 2880 2881 if (evict_dbuf) 2882 dnode_evict_bonus(dn); 2883 2884 dnode_rele(dn, db); 2885 } else if (db->db_buf == NULL) { 2886 /* 2887 * This is a special case: we never associated this 2888 * dbuf with any data allocated from the ARC. 2889 */ 2890 ASSERT(db->db_state == DB_UNCACHED || 2891 db->db_state == DB_NOFILL); 2892 dbuf_destroy(db); 2893 } else if (arc_released(db->db_buf)) { 2894 /* 2895 * This dbuf has anonymous data associated with it. 2896 */ 2897 dbuf_destroy(db); 2898 } else { 2899 boolean_t do_arc_evict = B_FALSE; 2900 blkptr_t bp; 2901 spa_t *spa = dmu_objset_spa(db->db_objset); 2902 2903 if (!DBUF_IS_CACHEABLE(db) && 2904 db->db_blkptr != NULL && 2905 !BP_IS_HOLE(db->db_blkptr) && 2906 !BP_IS_EMBEDDED(db->db_blkptr)) { 2907 do_arc_evict = B_TRUE; 2908 bp = *db->db_blkptr; 2909 } 2910 2911 if (!DBUF_IS_CACHEABLE(db) || 2912 db->db_pending_evict) { 2913 dbuf_destroy(db); 2914 } else if (!multilist_link_active(&db->db_cache_link)) { 2915 ASSERT3U(db->db_caching_status, ==, 2916 DB_NO_CACHE); 2917 2918 dbuf_cached_state_t dcs = 2919 dbuf_include_in_metadata_cache(db) ? 2920 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 2921 db->db_caching_status = dcs; 2922 2923 multilist_insert(dbuf_caches[dcs].cache, db); 2924 (void) refcount_add_many(&dbuf_caches[dcs].size, 2925 db->db.db_size, db); 2926 mutex_exit(&db->db_mtx); 2927 2928 if (db->db_caching_status == DB_DBUF_CACHE && 2929 !evicting) { 2930 dbuf_evict_notify(); 2931 } 2932 } 2933 2934 if (do_arc_evict) 2935 arc_freed(spa, &bp); 2936 } 2937 } else { 2938 mutex_exit(&db->db_mtx); 2939 } 2940 2941 } 2942 2943 #pragma weak dmu_buf_refcount = dbuf_refcount 2944 uint64_t 2945 dbuf_refcount(dmu_buf_impl_t *db) 2946 { 2947 return (refcount_count(&db->db_holds)); 2948 } 2949 2950 void * 2951 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2952 dmu_buf_user_t *new_user) 2953 { 2954 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2955 2956 mutex_enter(&db->db_mtx); 2957 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2958 if (db->db_user == old_user) 2959 db->db_user = new_user; 2960 else 2961 old_user = db->db_user; 2962 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2963 mutex_exit(&db->db_mtx); 2964 2965 return (old_user); 2966 } 2967 2968 void * 2969 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2970 { 2971 return (dmu_buf_replace_user(db_fake, NULL, user)); 2972 } 2973 2974 void * 2975 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2976 { 2977 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2978 2979 db->db_user_immediate_evict = TRUE; 2980 return (dmu_buf_set_user(db_fake, user)); 2981 } 2982 2983 void * 2984 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2985 { 2986 return (dmu_buf_replace_user(db_fake, user, NULL)); 2987 } 2988 2989 void * 2990 dmu_buf_get_user(dmu_buf_t *db_fake) 2991 { 2992 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2993 2994 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2995 return (db->db_user); 2996 } 2997 2998 void 2999 dmu_buf_user_evict_wait() 3000 { 3001 taskq_wait(dbu_evict_taskq); 3002 } 3003 3004 blkptr_t * 3005 dmu_buf_get_blkptr(dmu_buf_t *db) 3006 { 3007 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3008 return (dbi->db_blkptr); 3009 } 3010 3011 objset_t * 3012 dmu_buf_get_objset(dmu_buf_t *db) 3013 { 3014 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3015 return (dbi->db_objset); 3016 } 3017 3018 dnode_t * 3019 dmu_buf_dnode_enter(dmu_buf_t *db) 3020 { 3021 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3022 DB_DNODE_ENTER(dbi); 3023 return (DB_DNODE(dbi)); 3024 } 3025 3026 void 3027 dmu_buf_dnode_exit(dmu_buf_t *db) 3028 { 3029 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3030 DB_DNODE_EXIT(dbi); 3031 } 3032 3033 static void 3034 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 3035 { 3036 /* ASSERT(dmu_tx_is_syncing(tx) */ 3037 ASSERT(MUTEX_HELD(&db->db_mtx)); 3038 3039 if (db->db_blkptr != NULL) 3040 return; 3041 3042 if (db->db_blkid == DMU_SPILL_BLKID) { 3043 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 3044 BP_ZERO(db->db_blkptr); 3045 return; 3046 } 3047 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 3048 /* 3049 * This buffer was allocated at a time when there was 3050 * no available blkptrs from the dnode, or it was 3051 * inappropriate to hook it in (i.e., nlevels mis-match). 3052 */ 3053 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 3054 ASSERT(db->db_parent == NULL); 3055 db->db_parent = dn->dn_dbuf; 3056 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 3057 DBUF_VERIFY(db); 3058 } else { 3059 dmu_buf_impl_t *parent = db->db_parent; 3060 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3061 3062 ASSERT(dn->dn_phys->dn_nlevels > 1); 3063 if (parent == NULL) { 3064 mutex_exit(&db->db_mtx); 3065 rw_enter(&dn->dn_struct_rwlock, RW_READER); 3066 parent = dbuf_hold_level(dn, db->db_level + 1, 3067 db->db_blkid >> epbs, db); 3068 rw_exit(&dn->dn_struct_rwlock); 3069 mutex_enter(&db->db_mtx); 3070 db->db_parent = parent; 3071 } 3072 db->db_blkptr = (blkptr_t *)parent->db.db_data + 3073 (db->db_blkid & ((1ULL << epbs) - 1)); 3074 DBUF_VERIFY(db); 3075 } 3076 } 3077 3078 static void 3079 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3080 { 3081 dmu_buf_impl_t *db = dr->dr_dbuf; 3082 dnode_t *dn; 3083 zio_t *zio; 3084 3085 ASSERT(dmu_tx_is_syncing(tx)); 3086 3087 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3088 3089 mutex_enter(&db->db_mtx); 3090 3091 ASSERT(db->db_level > 0); 3092 DBUF_VERIFY(db); 3093 3094 /* Read the block if it hasn't been read yet. */ 3095 if (db->db_buf == NULL) { 3096 mutex_exit(&db->db_mtx); 3097 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 3098 mutex_enter(&db->db_mtx); 3099 } 3100 ASSERT3U(db->db_state, ==, DB_CACHED); 3101 ASSERT(db->db_buf != NULL); 3102 3103 DB_DNODE_ENTER(db); 3104 dn = DB_DNODE(db); 3105 /* Indirect block size must match what the dnode thinks it is. */ 3106 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3107 dbuf_check_blkptr(dn, db); 3108 DB_DNODE_EXIT(db); 3109 3110 /* Provide the pending dirty record to child dbufs */ 3111 db->db_data_pending = dr; 3112 3113 mutex_exit(&db->db_mtx); 3114 3115 dbuf_write(dr, db->db_buf, tx); 3116 3117 zio = dr->dr_zio; 3118 mutex_enter(&dr->dt.di.dr_mtx); 3119 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3120 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3121 mutex_exit(&dr->dt.di.dr_mtx); 3122 zio_nowait(zio); 3123 } 3124 3125 static void 3126 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3127 { 3128 arc_buf_t **datap = &dr->dt.dl.dr_data; 3129 dmu_buf_impl_t *db = dr->dr_dbuf; 3130 dnode_t *dn; 3131 objset_t *os; 3132 uint64_t txg = tx->tx_txg; 3133 3134 ASSERT(dmu_tx_is_syncing(tx)); 3135 3136 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3137 3138 mutex_enter(&db->db_mtx); 3139 /* 3140 * To be synced, we must be dirtied. But we 3141 * might have been freed after the dirty. 3142 */ 3143 if (db->db_state == DB_UNCACHED) { 3144 /* This buffer has been freed since it was dirtied */ 3145 ASSERT(db->db.db_data == NULL); 3146 } else if (db->db_state == DB_FILL) { 3147 /* This buffer was freed and is now being re-filled */ 3148 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3149 } else { 3150 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3151 } 3152 DBUF_VERIFY(db); 3153 3154 DB_DNODE_ENTER(db); 3155 dn = DB_DNODE(db); 3156 3157 if (db->db_blkid == DMU_SPILL_BLKID) { 3158 mutex_enter(&dn->dn_mtx); 3159 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 3160 mutex_exit(&dn->dn_mtx); 3161 } 3162 3163 /* 3164 * If this is a bonus buffer, simply copy the bonus data into the 3165 * dnode. It will be written out when the dnode is synced (and it 3166 * will be synced, since it must have been dirty for dbuf_sync to 3167 * be called). 3168 */ 3169 if (db->db_blkid == DMU_BONUS_BLKID) { 3170 dbuf_dirty_record_t **drp; 3171 3172 ASSERT(*datap != NULL); 3173 ASSERT0(db->db_level); 3174 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 3175 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 3176 bcopy(*datap, DN_BONUS(dn->dn_phys), 3177 DN_MAX_BONUS_LEN(dn->dn_phys)); 3178 DB_DNODE_EXIT(db); 3179 3180 if (*datap != db->db.db_data) { 3181 int slots = DB_DNODE(db)->dn_num_slots; 3182 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 3183 zio_buf_free(*datap, bonuslen); 3184 arc_space_return(bonuslen, ARC_SPACE_BONUS); 3185 } 3186 db->db_data_pending = NULL; 3187 drp = &db->db_last_dirty; 3188 while (*drp != dr) 3189 drp = &(*drp)->dr_next; 3190 ASSERT(dr->dr_next == NULL); 3191 ASSERT(dr->dr_dbuf == db); 3192 *drp = dr->dr_next; 3193 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3194 ASSERT(db->db_dirtycnt > 0); 3195 db->db_dirtycnt -= 1; 3196 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE); 3197 return; 3198 } 3199 3200 os = dn->dn_objset; 3201 3202 /* 3203 * This function may have dropped the db_mtx lock allowing a dmu_sync 3204 * operation to sneak in. As a result, we need to ensure that we 3205 * don't check the dr_override_state until we have returned from 3206 * dbuf_check_blkptr. 3207 */ 3208 dbuf_check_blkptr(dn, db); 3209 3210 /* 3211 * If this buffer is in the middle of an immediate write, 3212 * wait for the synchronous IO to complete. 3213 */ 3214 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3215 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3216 cv_wait(&db->db_changed, &db->db_mtx); 3217 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3218 } 3219 3220 if (db->db_state != DB_NOFILL && 3221 dn->dn_object != DMU_META_DNODE_OBJECT && 3222 refcount_count(&db->db_holds) > 1 && 3223 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3224 *datap == db->db_buf) { 3225 /* 3226 * If this buffer is currently "in use" (i.e., there 3227 * are active holds and db_data still references it), 3228 * then make a copy before we start the write so that 3229 * any modifications from the open txg will not leak 3230 * into this write. 3231 * 3232 * NOTE: this copy does not need to be made for 3233 * objects only modified in the syncing context (e.g. 3234 * DNONE_DNODE blocks). 3235 */ 3236 int psize = arc_buf_size(*datap); 3237 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 3238 enum zio_compress compress_type = arc_get_compression(*datap); 3239 3240 if (compress_type == ZIO_COMPRESS_OFF) { 3241 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 3242 } else { 3243 ASSERT3U(type, ==, ARC_BUFC_DATA); 3244 int lsize = arc_buf_lsize(*datap); 3245 *datap = arc_alloc_compressed_buf(os->os_spa, db, 3246 psize, lsize, compress_type); 3247 } 3248 bcopy(db->db.db_data, (*datap)->b_data, psize); 3249 } 3250 db->db_data_pending = dr; 3251 3252 mutex_exit(&db->db_mtx); 3253 3254 dbuf_write(dr, *datap, tx); 3255 3256 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3257 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3258 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3259 DB_DNODE_EXIT(db); 3260 } else { 3261 /* 3262 * Although zio_nowait() does not "wait for an IO", it does 3263 * initiate the IO. If this is an empty write it seems plausible 3264 * that the IO could actually be completed before the nowait 3265 * returns. We need to DB_DNODE_EXIT() first in case 3266 * zio_nowait() invalidates the dbuf. 3267 */ 3268 DB_DNODE_EXIT(db); 3269 zio_nowait(dr->dr_zio); 3270 } 3271 } 3272 3273 void 3274 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3275 { 3276 dbuf_dirty_record_t *dr; 3277 3278 while (dr = list_head(list)) { 3279 if (dr->dr_zio != NULL) { 3280 /* 3281 * If we find an already initialized zio then we 3282 * are processing the meta-dnode, and we have finished. 3283 * The dbufs for all dnodes are put back on the list 3284 * during processing, so that we can zio_wait() 3285 * these IOs after initiating all child IOs. 3286 */ 3287 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3288 DMU_META_DNODE_OBJECT); 3289 break; 3290 } 3291 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 3292 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 3293 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 3294 } 3295 list_remove(list, dr); 3296 if (dr->dr_dbuf->db_level > 0) 3297 dbuf_sync_indirect(dr, tx); 3298 else 3299 dbuf_sync_leaf(dr, tx); 3300 } 3301 } 3302 3303 /* ARGSUSED */ 3304 static void 3305 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3306 { 3307 dmu_buf_impl_t *db = vdb; 3308 dnode_t *dn; 3309 blkptr_t *bp = zio->io_bp; 3310 blkptr_t *bp_orig = &zio->io_bp_orig; 3311 spa_t *spa = zio->io_spa; 3312 int64_t delta; 3313 uint64_t fill = 0; 3314 int i; 3315 3316 ASSERT3P(db->db_blkptr, !=, NULL); 3317 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3318 3319 DB_DNODE_ENTER(db); 3320 dn = DB_DNODE(db); 3321 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3322 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3323 zio->io_prev_space_delta = delta; 3324 3325 if (bp->blk_birth != 0) { 3326 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 3327 BP_GET_TYPE(bp) == dn->dn_type) || 3328 (db->db_blkid == DMU_SPILL_BLKID && 3329 BP_GET_TYPE(bp) == dn->dn_bonustype) || 3330 BP_IS_EMBEDDED(bp)); 3331 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 3332 } 3333 3334 mutex_enter(&db->db_mtx); 3335 3336 #ifdef ZFS_DEBUG 3337 if (db->db_blkid == DMU_SPILL_BLKID) { 3338 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3339 ASSERT(!(BP_IS_HOLE(bp)) && 3340 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 3341 } 3342 #endif 3343 3344 if (db->db_level == 0) { 3345 mutex_enter(&dn->dn_mtx); 3346 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 3347 db->db_blkid != DMU_SPILL_BLKID) 3348 dn->dn_phys->dn_maxblkid = db->db_blkid; 3349 mutex_exit(&dn->dn_mtx); 3350 3351 if (dn->dn_type == DMU_OT_DNODE) { 3352 i = 0; 3353 while (i < db->db.db_size) { 3354 dnode_phys_t *dnp = 3355 (void *)(((char *)db->db.db_data) + i); 3356 3357 i += DNODE_MIN_SIZE; 3358 if (dnp->dn_type != DMU_OT_NONE) { 3359 fill++; 3360 i += dnp->dn_extra_slots * 3361 DNODE_MIN_SIZE; 3362 } 3363 } 3364 } else { 3365 if (BP_IS_HOLE(bp)) { 3366 fill = 0; 3367 } else { 3368 fill = 1; 3369 } 3370 } 3371 } else { 3372 blkptr_t *ibp = db->db.db_data; 3373 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3374 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3375 if (BP_IS_HOLE(ibp)) 3376 continue; 3377 fill += BP_GET_FILL(ibp); 3378 } 3379 } 3380 DB_DNODE_EXIT(db); 3381 3382 if (!BP_IS_EMBEDDED(bp)) 3383 bp->blk_fill = fill; 3384 3385 mutex_exit(&db->db_mtx); 3386 3387 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3388 *db->db_blkptr = *bp; 3389 rw_exit(&dn->dn_struct_rwlock); 3390 } 3391 3392 /* ARGSUSED */ 3393 /* 3394 * This function gets called just prior to running through the compression 3395 * stage of the zio pipeline. If we're an indirect block comprised of only 3396 * holes, then we want this indirect to be compressed away to a hole. In 3397 * order to do that we must zero out any information about the holes that 3398 * this indirect points to prior to before we try to compress it. 3399 */ 3400 static void 3401 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3402 { 3403 dmu_buf_impl_t *db = vdb; 3404 dnode_t *dn; 3405 blkptr_t *bp; 3406 unsigned int epbs, i; 3407 3408 ASSERT3U(db->db_level, >, 0); 3409 DB_DNODE_ENTER(db); 3410 dn = DB_DNODE(db); 3411 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3412 ASSERT3U(epbs, <, 31); 3413 3414 /* Determine if all our children are holes */ 3415 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 3416 if (!BP_IS_HOLE(bp)) 3417 break; 3418 } 3419 3420 /* 3421 * If all the children are holes, then zero them all out so that 3422 * we may get compressed away. 3423 */ 3424 if (i == 1 << epbs) { 3425 /* 3426 * We only found holes. Grab the rwlock to prevent 3427 * anybody from reading the blocks we're about to 3428 * zero out. 3429 */ 3430 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3431 bzero(db->db.db_data, db->db.db_size); 3432 rw_exit(&dn->dn_struct_rwlock); 3433 } 3434 DB_DNODE_EXIT(db); 3435 } 3436 3437 /* 3438 * The SPA will call this callback several times for each zio - once 3439 * for every physical child i/o (zio->io_phys_children times). This 3440 * allows the DMU to monitor the progress of each logical i/o. For example, 3441 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 3442 * block. There may be a long delay before all copies/fragments are completed, 3443 * so this callback allows us to retire dirty space gradually, as the physical 3444 * i/os complete. 3445 */ 3446 /* ARGSUSED */ 3447 static void 3448 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 3449 { 3450 dmu_buf_impl_t *db = arg; 3451 objset_t *os = db->db_objset; 3452 dsl_pool_t *dp = dmu_objset_pool(os); 3453 dbuf_dirty_record_t *dr; 3454 int delta = 0; 3455 3456 dr = db->db_data_pending; 3457 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 3458 3459 /* 3460 * The callback will be called io_phys_children times. Retire one 3461 * portion of our dirty space each time we are called. Any rounding 3462 * error will be cleaned up by dsl_pool_sync()'s call to 3463 * dsl_pool_undirty_space(). 3464 */ 3465 delta = dr->dr_accounted / zio->io_phys_children; 3466 dsl_pool_undirty_space(dp, delta, zio->io_txg); 3467 } 3468 3469 /* ARGSUSED */ 3470 static void 3471 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3472 { 3473 dmu_buf_impl_t *db = vdb; 3474 blkptr_t *bp_orig = &zio->io_bp_orig; 3475 blkptr_t *bp = db->db_blkptr; 3476 objset_t *os = db->db_objset; 3477 dmu_tx_t *tx = os->os_synctx; 3478 dbuf_dirty_record_t **drp, *dr; 3479 3480 ASSERT0(zio->io_error); 3481 ASSERT(db->db_blkptr == bp); 3482 3483 /* 3484 * For nopwrites and rewrites we ensure that the bp matches our 3485 * original and bypass all the accounting. 3486 */ 3487 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3488 ASSERT(BP_EQUAL(bp, bp_orig)); 3489 } else { 3490 dsl_dataset_t *ds = os->os_dsl_dataset; 3491 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3492 dsl_dataset_block_born(ds, bp, tx); 3493 } 3494 3495 mutex_enter(&db->db_mtx); 3496 3497 DBUF_VERIFY(db); 3498 3499 drp = &db->db_last_dirty; 3500 while ((dr = *drp) != db->db_data_pending) 3501 drp = &dr->dr_next; 3502 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3503 ASSERT(dr->dr_dbuf == db); 3504 ASSERT(dr->dr_next == NULL); 3505 *drp = dr->dr_next; 3506 3507 #ifdef ZFS_DEBUG 3508 if (db->db_blkid == DMU_SPILL_BLKID) { 3509 dnode_t *dn; 3510 3511 DB_DNODE_ENTER(db); 3512 dn = DB_DNODE(db); 3513 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3514 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 3515 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 3516 DB_DNODE_EXIT(db); 3517 } 3518 #endif 3519 3520 if (db->db_level == 0) { 3521 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3522 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 3523 if (db->db_state != DB_NOFILL) { 3524 if (dr->dt.dl.dr_data != db->db_buf) 3525 arc_buf_destroy(dr->dt.dl.dr_data, db); 3526 } 3527 } else { 3528 dnode_t *dn; 3529 3530 DB_DNODE_ENTER(db); 3531 dn = DB_DNODE(db); 3532 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3533 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3534 if (!BP_IS_HOLE(db->db_blkptr)) { 3535 int epbs = 3536 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3537 ASSERT3U(db->db_blkid, <=, 3538 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3539 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3540 db->db.db_size); 3541 } 3542 DB_DNODE_EXIT(db); 3543 mutex_destroy(&dr->dt.di.dr_mtx); 3544 list_destroy(&dr->dt.di.dr_children); 3545 } 3546 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3547 3548 cv_broadcast(&db->db_changed); 3549 ASSERT(db->db_dirtycnt > 0); 3550 db->db_dirtycnt -= 1; 3551 db->db_data_pending = NULL; 3552 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 3553 } 3554 3555 static void 3556 dbuf_write_nofill_ready(zio_t *zio) 3557 { 3558 dbuf_write_ready(zio, NULL, zio->io_private); 3559 } 3560 3561 static void 3562 dbuf_write_nofill_done(zio_t *zio) 3563 { 3564 dbuf_write_done(zio, NULL, zio->io_private); 3565 } 3566 3567 static void 3568 dbuf_write_override_ready(zio_t *zio) 3569 { 3570 dbuf_dirty_record_t *dr = zio->io_private; 3571 dmu_buf_impl_t *db = dr->dr_dbuf; 3572 3573 dbuf_write_ready(zio, NULL, db); 3574 } 3575 3576 static void 3577 dbuf_write_override_done(zio_t *zio) 3578 { 3579 dbuf_dirty_record_t *dr = zio->io_private; 3580 dmu_buf_impl_t *db = dr->dr_dbuf; 3581 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3582 3583 mutex_enter(&db->db_mtx); 3584 if (!BP_EQUAL(zio->io_bp, obp)) { 3585 if (!BP_IS_HOLE(obp)) 3586 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3587 arc_release(dr->dt.dl.dr_data, db); 3588 } 3589 mutex_exit(&db->db_mtx); 3590 dbuf_write_done(zio, NULL, db); 3591 3592 if (zio->io_abd != NULL) 3593 abd_put(zio->io_abd); 3594 } 3595 3596 typedef struct dbuf_remap_impl_callback_arg { 3597 objset_t *drica_os; 3598 uint64_t drica_blk_birth; 3599 dmu_tx_t *drica_tx; 3600 } dbuf_remap_impl_callback_arg_t; 3601 3602 static void 3603 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 3604 void *arg) 3605 { 3606 dbuf_remap_impl_callback_arg_t *drica = arg; 3607 objset_t *os = drica->drica_os; 3608 spa_t *spa = dmu_objset_spa(os); 3609 dmu_tx_t *tx = drica->drica_tx; 3610 3611 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3612 3613 if (os == spa_meta_objset(spa)) { 3614 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 3615 } else { 3616 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 3617 size, drica->drica_blk_birth, tx); 3618 } 3619 } 3620 3621 static void 3622 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx) 3623 { 3624 blkptr_t bp_copy = *bp; 3625 spa_t *spa = dmu_objset_spa(dn->dn_objset); 3626 dbuf_remap_impl_callback_arg_t drica; 3627 3628 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3629 3630 drica.drica_os = dn->dn_objset; 3631 drica.drica_blk_birth = bp->blk_birth; 3632 drica.drica_tx = tx; 3633 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 3634 &drica)) { 3635 /* 3636 * The struct_rwlock prevents dbuf_read_impl() from 3637 * dereferencing the BP while we are changing it. To 3638 * avoid lock contention, only grab it when we are actually 3639 * changing the BP. 3640 */ 3641 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3642 *bp = bp_copy; 3643 rw_exit(&dn->dn_struct_rwlock); 3644 } 3645 } 3646 3647 /* 3648 * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 3649 * to remap a copy of every bp in the dbuf. 3650 */ 3651 boolean_t 3652 dbuf_can_remap(const dmu_buf_impl_t *db) 3653 { 3654 spa_t *spa = dmu_objset_spa(db->db_objset); 3655 blkptr_t *bp = db->db.db_data; 3656 boolean_t ret = B_FALSE; 3657 3658 ASSERT3U(db->db_level, >, 0); 3659 ASSERT3S(db->db_state, ==, DB_CACHED); 3660 3661 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3662 3663 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3664 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3665 blkptr_t bp_copy = bp[i]; 3666 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3667 ret = B_TRUE; 3668 break; 3669 } 3670 } 3671 spa_config_exit(spa, SCL_VDEV, FTAG); 3672 3673 return (ret); 3674 } 3675 3676 boolean_t 3677 dnode_needs_remap(const dnode_t *dn) 3678 { 3679 spa_t *spa = dmu_objset_spa(dn->dn_objset); 3680 boolean_t ret = B_FALSE; 3681 3682 if (dn->dn_phys->dn_nlevels == 0) { 3683 return (B_FALSE); 3684 } 3685 3686 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3687 3688 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3689 for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 3690 blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 3691 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3692 ret = B_TRUE; 3693 break; 3694 } 3695 } 3696 spa_config_exit(spa, SCL_VDEV, FTAG); 3697 3698 return (ret); 3699 } 3700 3701 /* 3702 * Remap any existing BP's to concrete vdevs, if possible. 3703 */ 3704 static void 3705 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 3706 { 3707 spa_t *spa = dmu_objset_spa(db->db_objset); 3708 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3709 3710 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 3711 return; 3712 3713 if (db->db_level > 0) { 3714 blkptr_t *bp = db->db.db_data; 3715 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3716 dbuf_remap_impl(dn, &bp[i], tx); 3717 } 3718 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 3719 dnode_phys_t *dnp = db->db.db_data; 3720 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 3721 DMU_OT_DNODE); 3722 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 3723 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 3724 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx); 3725 } 3726 } 3727 } 3728 } 3729 3730 3731 /* Issue I/O to commit a dirty buffer to disk. */ 3732 static void 3733 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3734 { 3735 dmu_buf_impl_t *db = dr->dr_dbuf; 3736 dnode_t *dn; 3737 objset_t *os; 3738 dmu_buf_impl_t *parent = db->db_parent; 3739 uint64_t txg = tx->tx_txg; 3740 zbookmark_phys_t zb; 3741 zio_prop_t zp; 3742 zio_t *zio; 3743 int wp_flag = 0; 3744 3745 ASSERT(dmu_tx_is_syncing(tx)); 3746 3747 DB_DNODE_ENTER(db); 3748 dn = DB_DNODE(db); 3749 os = dn->dn_objset; 3750 3751 if (db->db_state != DB_NOFILL) { 3752 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3753 /* 3754 * Private object buffers are released here rather 3755 * than in dbuf_dirty() since they are only modified 3756 * in the syncing context and we don't want the 3757 * overhead of making multiple copies of the data. 3758 */ 3759 if (BP_IS_HOLE(db->db_blkptr)) { 3760 arc_buf_thaw(data); 3761 } else { 3762 dbuf_release_bp(db); 3763 } 3764 dbuf_remap(dn, db, tx); 3765 } 3766 } 3767 3768 if (parent != dn->dn_dbuf) { 3769 /* Our parent is an indirect block. */ 3770 /* We have a dirty parent that has been scheduled for write. */ 3771 ASSERT(parent && parent->db_data_pending); 3772 /* Our parent's buffer is one level closer to the dnode. */ 3773 ASSERT(db->db_level == parent->db_level-1); 3774 /* 3775 * We're about to modify our parent's db_data by modifying 3776 * our block pointer, so the parent must be released. 3777 */ 3778 ASSERT(arc_released(parent->db_buf)); 3779 zio = parent->db_data_pending->dr_zio; 3780 } else { 3781 /* Our parent is the dnode itself. */ 3782 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 3783 db->db_blkid != DMU_SPILL_BLKID) || 3784 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 3785 if (db->db_blkid != DMU_SPILL_BLKID) 3786 ASSERT3P(db->db_blkptr, ==, 3787 &dn->dn_phys->dn_blkptr[db->db_blkid]); 3788 zio = dn->dn_zio; 3789 } 3790 3791 ASSERT(db->db_level == 0 || data == db->db_buf); 3792 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3793 ASSERT(zio); 3794 3795 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3796 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3797 db->db.db_object, db->db_level, db->db_blkid); 3798 3799 if (db->db_blkid == DMU_SPILL_BLKID) 3800 wp_flag = WP_SPILL; 3801 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 3802 3803 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3804 DB_DNODE_EXIT(db); 3805 3806 /* 3807 * We copy the blkptr now (rather than when we instantiate the dirty 3808 * record), because its value can change between open context and 3809 * syncing context. We do not need to hold dn_struct_rwlock to read 3810 * db_blkptr because we are in syncing context. 3811 */ 3812 dr->dr_bp_copy = *db->db_blkptr; 3813 3814 if (db->db_level == 0 && 3815 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 3816 /* 3817 * The BP for this block has been provided by open context 3818 * (by dmu_sync() or dmu_buf_write_embedded()). 3819 */ 3820 abd_t *contents = (data != NULL) ? 3821 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 3822 3823 dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 3824 contents, db->db.db_size, db->db.db_size, &zp, 3825 dbuf_write_override_ready, NULL, NULL, 3826 dbuf_write_override_done, 3827 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3828 mutex_enter(&db->db_mtx); 3829 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3830 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 3831 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3832 mutex_exit(&db->db_mtx); 3833 } else if (db->db_state == DB_NOFILL) { 3834 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3835 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3836 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3837 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 3838 dbuf_write_nofill_ready, NULL, NULL, 3839 dbuf_write_nofill_done, db, 3840 ZIO_PRIORITY_ASYNC_WRITE, 3841 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3842 } else { 3843 ASSERT(arc_released(data)); 3844 3845 /* 3846 * For indirect blocks, we want to setup the children 3847 * ready callback so that we can properly handle an indirect 3848 * block that only contains holes. 3849 */ 3850 arc_done_func_t *children_ready_cb = NULL; 3851 if (db->db_level != 0) 3852 children_ready_cb = dbuf_write_children_ready; 3853 3854 dr->dr_zio = arc_write(zio, os->os_spa, txg, 3855 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3856 &zp, dbuf_write_ready, children_ready_cb, 3857 dbuf_write_physdone, dbuf_write_done, db, 3858 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3859 } 3860 } 3861