1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_send.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dbuf.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/spa.h> 41 #include <sys/zio.h> 42 #include <sys/dmu_zfetch.h> 43 #include <sys/sa.h> 44 #include <sys/sa_impl.h> 45 #include <sys/zfeature.h> 46 #include <sys/blkptr.h> 47 #include <sys/range_tree.h> 48 #include <sys/callb.h> 49 #include <sys/abd.h> 50 #include <sys/vdev.h> 51 #include <sys/cityhash.h> 52 53 uint_t zfs_dbuf_evict_key; 54 55 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 56 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 57 58 #ifndef __lint 59 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 60 dmu_buf_evict_func_t *evict_func_sync, 61 dmu_buf_evict_func_t *evict_func_async, 62 dmu_buf_t **clear_on_evict_dbufp); 63 #endif /* ! __lint */ 64 65 /* 66 * Global data structures and functions for the dbuf cache. 67 */ 68 static kmem_cache_t *dbuf_kmem_cache; 69 static taskq_t *dbu_evict_taskq; 70 71 static kthread_t *dbuf_cache_evict_thread; 72 static kmutex_t dbuf_evict_lock; 73 static kcondvar_t dbuf_evict_cv; 74 static boolean_t dbuf_evict_thread_exit; 75 76 /* 77 * LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 78 * are not currently held but have been recently released. These dbufs 79 * are not eligible for arc eviction until they are aged out of the cache. 80 * Dbufs are added to the dbuf cache once the last hold is released. If a 81 * dbuf is later accessed and still exists in the dbuf cache, then it will 82 * be removed from the cache and later re-added to the head of the cache. 83 * Dbufs that are aged out of the cache will be immediately destroyed and 84 * become eligible for arc eviction. 85 */ 86 static multilist_t *dbuf_cache; 87 static refcount_t dbuf_cache_size; 88 uint64_t dbuf_cache_max_bytes = 100 * 1024 * 1024; 89 90 /* Cap the size of the dbuf cache to log2 fraction of arc size. */ 91 int dbuf_cache_max_shift = 5; 92 93 /* 94 * The dbuf cache uses a three-stage eviction policy: 95 * - A low water marker designates when the dbuf eviction thread 96 * should stop evicting from the dbuf cache. 97 * - When we reach the maximum size (aka mid water mark), we 98 * signal the eviction thread to run. 99 * - The high water mark indicates when the eviction thread 100 * is unable to keep up with the incoming load and eviction must 101 * happen in the context of the calling thread. 102 * 103 * The dbuf cache: 104 * (max size) 105 * low water mid water hi water 106 * +----------------------------------------+----------+----------+ 107 * | | | | 108 * | | | | 109 * | | | | 110 * | | | | 111 * +----------------------------------------+----------+----------+ 112 * stop signal evict 113 * evicting eviction directly 114 * thread 115 * 116 * The high and low water marks indicate the operating range for the eviction 117 * thread. The low water mark is, by default, 90% of the total size of the 118 * cache and the high water mark is at 110% (both of these percentages can be 119 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 120 * respectively). The eviction thread will try to ensure that the cache remains 121 * within this range by waking up every second and checking if the cache is 122 * above the low water mark. The thread can also be woken up by callers adding 123 * elements into the cache if the cache is larger than the mid water (i.e max 124 * cache size). Once the eviction thread is woken up and eviction is required, 125 * it will continue evicting buffers until it's able to reduce the cache size 126 * to the low water mark. If the cache size continues to grow and hits the high 127 * water mark, then callers adding elments to the cache will begin to evict 128 * directly from the cache until the cache is no longer above the high water 129 * mark. 130 */ 131 132 /* 133 * The percentage above and below the maximum cache size. 134 */ 135 uint_t dbuf_cache_hiwater_pct = 10; 136 uint_t dbuf_cache_lowater_pct = 10; 137 138 /* ARGSUSED */ 139 static int 140 dbuf_cons(void *vdb, void *unused, int kmflag) 141 { 142 dmu_buf_impl_t *db = vdb; 143 bzero(db, sizeof (dmu_buf_impl_t)); 144 145 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 146 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 147 multilist_link_init(&db->db_cache_link); 148 refcount_create(&db->db_holds); 149 150 return (0); 151 } 152 153 /* ARGSUSED */ 154 static void 155 dbuf_dest(void *vdb, void *unused) 156 { 157 dmu_buf_impl_t *db = vdb; 158 mutex_destroy(&db->db_mtx); 159 cv_destroy(&db->db_changed); 160 ASSERT(!multilist_link_active(&db->db_cache_link)); 161 refcount_destroy(&db->db_holds); 162 } 163 164 /* 165 * dbuf hash table routines 166 */ 167 static dbuf_hash_table_t dbuf_hash_table; 168 169 static uint64_t dbuf_hash_count; 170 171 /* 172 * We use Cityhash for this. It's fast, and has good hash properties without 173 * requiring any large static buffers. 174 */ 175 static uint64_t 176 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 177 { 178 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 179 } 180 181 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 182 ((dbuf)->db.db_object == (obj) && \ 183 (dbuf)->db_objset == (os) && \ 184 (dbuf)->db_level == (level) && \ 185 (dbuf)->db_blkid == (blkid)) 186 187 dmu_buf_impl_t * 188 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 189 { 190 dbuf_hash_table_t *h = &dbuf_hash_table; 191 uint64_t hv = dbuf_hash(os, obj, level, blkid); 192 uint64_t idx = hv & h->hash_table_mask; 193 dmu_buf_impl_t *db; 194 195 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 196 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 197 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 198 mutex_enter(&db->db_mtx); 199 if (db->db_state != DB_EVICTING) { 200 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 201 return (db); 202 } 203 mutex_exit(&db->db_mtx); 204 } 205 } 206 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 207 return (NULL); 208 } 209 210 static dmu_buf_impl_t * 211 dbuf_find_bonus(objset_t *os, uint64_t object) 212 { 213 dnode_t *dn; 214 dmu_buf_impl_t *db = NULL; 215 216 if (dnode_hold(os, object, FTAG, &dn) == 0) { 217 rw_enter(&dn->dn_struct_rwlock, RW_READER); 218 if (dn->dn_bonus != NULL) { 219 db = dn->dn_bonus; 220 mutex_enter(&db->db_mtx); 221 } 222 rw_exit(&dn->dn_struct_rwlock); 223 dnode_rele(dn, FTAG); 224 } 225 return (db); 226 } 227 228 /* 229 * Insert an entry into the hash table. If there is already an element 230 * equal to elem in the hash table, then the already existing element 231 * will be returned and the new element will not be inserted. 232 * Otherwise returns NULL. 233 */ 234 static dmu_buf_impl_t * 235 dbuf_hash_insert(dmu_buf_impl_t *db) 236 { 237 dbuf_hash_table_t *h = &dbuf_hash_table; 238 objset_t *os = db->db_objset; 239 uint64_t obj = db->db.db_object; 240 int level = db->db_level; 241 uint64_t blkid = db->db_blkid; 242 uint64_t hv = dbuf_hash(os, obj, level, blkid); 243 uint64_t idx = hv & h->hash_table_mask; 244 dmu_buf_impl_t *dbf; 245 246 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 247 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 248 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 249 mutex_enter(&dbf->db_mtx); 250 if (dbf->db_state != DB_EVICTING) { 251 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 252 return (dbf); 253 } 254 mutex_exit(&dbf->db_mtx); 255 } 256 } 257 258 mutex_enter(&db->db_mtx); 259 db->db_hash_next = h->hash_table[idx]; 260 h->hash_table[idx] = db; 261 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 262 atomic_inc_64(&dbuf_hash_count); 263 264 return (NULL); 265 } 266 267 /* 268 * Remove an entry from the hash table. It must be in the EVICTING state. 269 */ 270 static void 271 dbuf_hash_remove(dmu_buf_impl_t *db) 272 { 273 dbuf_hash_table_t *h = &dbuf_hash_table; 274 uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 275 db->db_level, db->db_blkid); 276 uint64_t idx = hv & h->hash_table_mask; 277 dmu_buf_impl_t *dbf, **dbp; 278 279 /* 280 * We musn't hold db_mtx to maintain lock ordering: 281 * DBUF_HASH_MUTEX > db_mtx. 282 */ 283 ASSERT(refcount_is_zero(&db->db_holds)); 284 ASSERT(db->db_state == DB_EVICTING); 285 ASSERT(!MUTEX_HELD(&db->db_mtx)); 286 287 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 288 dbp = &h->hash_table[idx]; 289 while ((dbf = *dbp) != db) { 290 dbp = &dbf->db_hash_next; 291 ASSERT(dbf != NULL); 292 } 293 *dbp = db->db_hash_next; 294 db->db_hash_next = NULL; 295 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 296 atomic_dec_64(&dbuf_hash_count); 297 } 298 299 typedef enum { 300 DBVU_EVICTING, 301 DBVU_NOT_EVICTING 302 } dbvu_verify_type_t; 303 304 static void 305 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 306 { 307 #ifdef ZFS_DEBUG 308 int64_t holds; 309 310 if (db->db_user == NULL) 311 return; 312 313 /* Only data blocks support the attachment of user data. */ 314 ASSERT(db->db_level == 0); 315 316 /* Clients must resolve a dbuf before attaching user data. */ 317 ASSERT(db->db.db_data != NULL); 318 ASSERT3U(db->db_state, ==, DB_CACHED); 319 320 holds = refcount_count(&db->db_holds); 321 if (verify_type == DBVU_EVICTING) { 322 /* 323 * Immediate eviction occurs when holds == dirtycnt. 324 * For normal eviction buffers, holds is zero on 325 * eviction, except when dbuf_fix_old_data() calls 326 * dbuf_clear_data(). However, the hold count can grow 327 * during eviction even though db_mtx is held (see 328 * dmu_bonus_hold() for an example), so we can only 329 * test the generic invariant that holds >= dirtycnt. 330 */ 331 ASSERT3U(holds, >=, db->db_dirtycnt); 332 } else { 333 if (db->db_user_immediate_evict == TRUE) 334 ASSERT3U(holds, >=, db->db_dirtycnt); 335 else 336 ASSERT3U(holds, >, 0); 337 } 338 #endif 339 } 340 341 static void 342 dbuf_evict_user(dmu_buf_impl_t *db) 343 { 344 dmu_buf_user_t *dbu = db->db_user; 345 346 ASSERT(MUTEX_HELD(&db->db_mtx)); 347 348 if (dbu == NULL) 349 return; 350 351 dbuf_verify_user(db, DBVU_EVICTING); 352 db->db_user = NULL; 353 354 #ifdef ZFS_DEBUG 355 if (dbu->dbu_clear_on_evict_dbufp != NULL) 356 *dbu->dbu_clear_on_evict_dbufp = NULL; 357 #endif 358 359 /* 360 * There are two eviction callbacks - one that we call synchronously 361 * and one that we invoke via a taskq. The async one is useful for 362 * avoiding lock order reversals and limiting stack depth. 363 * 364 * Note that if we have a sync callback but no async callback, 365 * it's likely that the sync callback will free the structure 366 * containing the dbu. In that case we need to take care to not 367 * dereference dbu after calling the sync evict func. 368 */ 369 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 370 371 if (dbu->dbu_evict_func_sync != NULL) 372 dbu->dbu_evict_func_sync(dbu); 373 374 if (has_async) { 375 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 376 dbu, 0, &dbu->dbu_tqent); 377 } 378 } 379 380 boolean_t 381 dbuf_is_metadata(dmu_buf_impl_t *db) 382 { 383 if (db->db_level > 0) { 384 return (B_TRUE); 385 } else { 386 boolean_t is_metadata; 387 388 DB_DNODE_ENTER(db); 389 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 390 DB_DNODE_EXIT(db); 391 392 return (is_metadata); 393 } 394 } 395 396 /* 397 * This function *must* return indices evenly distributed between all 398 * sublists of the multilist. This is needed due to how the dbuf eviction 399 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 400 * distributed between all sublists and uses this assumption when 401 * deciding which sublist to evict from and how much to evict from it. 402 */ 403 unsigned int 404 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 405 { 406 dmu_buf_impl_t *db = obj; 407 408 /* 409 * The assumption here, is the hash value for a given 410 * dmu_buf_impl_t will remain constant throughout it's lifetime 411 * (i.e. it's objset, object, level and blkid fields don't change). 412 * Thus, we don't need to store the dbuf's sublist index 413 * on insertion, as this index can be recalculated on removal. 414 * 415 * Also, the low order bits of the hash value are thought to be 416 * distributed evenly. Otherwise, in the case that the multilist 417 * has a power of two number of sublists, each sublists' usage 418 * would not be evenly distributed. 419 */ 420 return (dbuf_hash(db->db_objset, db->db.db_object, 421 db->db_level, db->db_blkid) % 422 multilist_get_num_sublists(ml)); 423 } 424 425 static inline boolean_t 426 dbuf_cache_above_hiwater(void) 427 { 428 uint64_t dbuf_cache_hiwater_bytes = 429 (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 430 431 return (refcount_count(&dbuf_cache_size) > 432 dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 433 } 434 435 static inline boolean_t 436 dbuf_cache_above_lowater(void) 437 { 438 uint64_t dbuf_cache_lowater_bytes = 439 (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 440 441 return (refcount_count(&dbuf_cache_size) > 442 dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 443 } 444 445 /* 446 * Evict the oldest eligible dbuf from the dbuf cache. 447 */ 448 static void 449 dbuf_evict_one(void) 450 { 451 int idx = multilist_get_random_index(dbuf_cache); 452 multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx); 453 454 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 455 456 /* 457 * Set the thread's tsd to indicate that it's processing evictions. 458 * Once a thread stops evicting from the dbuf cache it will 459 * reset its tsd to NULL. 460 */ 461 ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); 462 (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); 463 464 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 465 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 466 db = multilist_sublist_prev(mls, db); 467 } 468 469 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 470 multilist_sublist_t *, mls); 471 472 if (db != NULL) { 473 multilist_sublist_remove(mls, db); 474 multilist_sublist_unlock(mls); 475 (void) refcount_remove_many(&dbuf_cache_size, 476 db->db.db_size, db); 477 dbuf_destroy(db); 478 } else { 479 multilist_sublist_unlock(mls); 480 } 481 (void) tsd_set(zfs_dbuf_evict_key, NULL); 482 } 483 484 /* 485 * The dbuf evict thread is responsible for aging out dbufs from the 486 * cache. Once the cache has reached it's maximum size, dbufs are removed 487 * and destroyed. The eviction thread will continue running until the size 488 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 489 * out of the cache it is destroyed and becomes eligible for arc eviction. 490 */ 491 /* ARGSUSED */ 492 static void 493 dbuf_evict_thread(void *unused) 494 { 495 callb_cpr_t cpr; 496 497 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 498 499 mutex_enter(&dbuf_evict_lock); 500 while (!dbuf_evict_thread_exit) { 501 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 502 CALLB_CPR_SAFE_BEGIN(&cpr); 503 (void) cv_timedwait_hires(&dbuf_evict_cv, 504 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 505 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 506 } 507 mutex_exit(&dbuf_evict_lock); 508 509 /* 510 * Keep evicting as long as we're above the low water mark 511 * for the cache. We do this without holding the locks to 512 * minimize lock contention. 513 */ 514 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 515 dbuf_evict_one(); 516 } 517 518 mutex_enter(&dbuf_evict_lock); 519 } 520 521 dbuf_evict_thread_exit = B_FALSE; 522 cv_broadcast(&dbuf_evict_cv); 523 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 524 thread_exit(); 525 } 526 527 /* 528 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 529 * If the dbuf cache is at its high water mark, then evict a dbuf from the 530 * dbuf cache using the callers context. 531 */ 532 static void 533 dbuf_evict_notify(void) 534 { 535 536 /* 537 * We use thread specific data to track when a thread has 538 * started processing evictions. This allows us to avoid deeply 539 * nested stacks that would have a call flow similar to this: 540 * 541 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 542 * ^ | 543 * | | 544 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 545 * 546 * The dbuf_eviction_thread will always have its tsd set until 547 * that thread exits. All other threads will only set their tsd 548 * if they are participating in the eviction process. This only 549 * happens if the eviction thread is unable to process evictions 550 * fast enough. To keep the dbuf cache size in check, other threads 551 * can evict from the dbuf cache directly. Those threads will set 552 * their tsd values so that we ensure that they only evict one dbuf 553 * from the dbuf cache. 554 */ 555 if (tsd_get(zfs_dbuf_evict_key) != NULL) 556 return; 557 558 /* 559 * We check if we should evict without holding the dbuf_evict_lock, 560 * because it's OK to occasionally make the wrong decision here, 561 * and grabbing the lock results in massive lock contention. 562 */ 563 if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) { 564 if (dbuf_cache_above_hiwater()) 565 dbuf_evict_one(); 566 cv_signal(&dbuf_evict_cv); 567 } 568 } 569 570 void 571 dbuf_init(void) 572 { 573 uint64_t hsize = 1ULL << 16; 574 dbuf_hash_table_t *h = &dbuf_hash_table; 575 int i; 576 577 /* 578 * The hash table is big enough to fill all of physical memory 579 * with an average 4K block size. The table will take up 580 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 581 */ 582 while (hsize * 4096 < physmem * PAGESIZE) 583 hsize <<= 1; 584 585 retry: 586 h->hash_table_mask = hsize - 1; 587 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 588 if (h->hash_table == NULL) { 589 /* XXX - we should really return an error instead of assert */ 590 ASSERT(hsize > (1ULL << 10)); 591 hsize >>= 1; 592 goto retry; 593 } 594 595 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 596 sizeof (dmu_buf_impl_t), 597 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 598 599 for (i = 0; i < DBUF_MUTEXES; i++) 600 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 601 602 /* 603 * Setup the parameters for the dbuf cache. We cap the size of the 604 * dbuf cache to 1/32nd (default) of the size of the ARC. 605 */ 606 dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes, 607 arc_max_bytes() >> dbuf_cache_max_shift); 608 609 /* 610 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 611 * configuration is not required. 612 */ 613 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 614 615 dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t), 616 offsetof(dmu_buf_impl_t, db_cache_link), 617 dbuf_cache_multilist_index_func); 618 refcount_create(&dbuf_cache_size); 619 620 tsd_create(&zfs_dbuf_evict_key, NULL); 621 dbuf_evict_thread_exit = B_FALSE; 622 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 623 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 624 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 625 NULL, 0, &p0, TS_RUN, minclsyspri); 626 } 627 628 void 629 dbuf_fini(void) 630 { 631 dbuf_hash_table_t *h = &dbuf_hash_table; 632 int i; 633 634 for (i = 0; i < DBUF_MUTEXES; i++) 635 mutex_destroy(&h->hash_mutexes[i]); 636 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 637 kmem_cache_destroy(dbuf_kmem_cache); 638 taskq_destroy(dbu_evict_taskq); 639 640 mutex_enter(&dbuf_evict_lock); 641 dbuf_evict_thread_exit = B_TRUE; 642 while (dbuf_evict_thread_exit) { 643 cv_signal(&dbuf_evict_cv); 644 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 645 } 646 mutex_exit(&dbuf_evict_lock); 647 tsd_destroy(&zfs_dbuf_evict_key); 648 649 mutex_destroy(&dbuf_evict_lock); 650 cv_destroy(&dbuf_evict_cv); 651 652 refcount_destroy(&dbuf_cache_size); 653 multilist_destroy(dbuf_cache); 654 } 655 656 /* 657 * Other stuff. 658 */ 659 660 #ifdef ZFS_DEBUG 661 static void 662 dbuf_verify(dmu_buf_impl_t *db) 663 { 664 dnode_t *dn; 665 dbuf_dirty_record_t *dr; 666 667 ASSERT(MUTEX_HELD(&db->db_mtx)); 668 669 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 670 return; 671 672 ASSERT(db->db_objset != NULL); 673 DB_DNODE_ENTER(db); 674 dn = DB_DNODE(db); 675 if (dn == NULL) { 676 ASSERT(db->db_parent == NULL); 677 ASSERT(db->db_blkptr == NULL); 678 } else { 679 ASSERT3U(db->db.db_object, ==, dn->dn_object); 680 ASSERT3P(db->db_objset, ==, dn->dn_objset); 681 ASSERT3U(db->db_level, <, dn->dn_nlevels); 682 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 683 db->db_blkid == DMU_SPILL_BLKID || 684 !avl_is_empty(&dn->dn_dbufs)); 685 } 686 if (db->db_blkid == DMU_BONUS_BLKID) { 687 ASSERT(dn != NULL); 688 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 689 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 690 } else if (db->db_blkid == DMU_SPILL_BLKID) { 691 ASSERT(dn != NULL); 692 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 693 ASSERT0(db->db.db_offset); 694 } else { 695 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 696 } 697 698 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 699 ASSERT(dr->dr_dbuf == db); 700 701 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 702 ASSERT(dr->dr_dbuf == db); 703 704 /* 705 * We can't assert that db_size matches dn_datablksz because it 706 * can be momentarily different when another thread is doing 707 * dnode_set_blksz(). 708 */ 709 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 710 dr = db->db_data_pending; 711 /* 712 * It should only be modified in syncing context, so 713 * make sure we only have one copy of the data. 714 */ 715 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 716 } 717 718 /* verify db->db_blkptr */ 719 if (db->db_blkptr) { 720 if (db->db_parent == dn->dn_dbuf) { 721 /* db is pointed to by the dnode */ 722 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 723 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 724 ASSERT(db->db_parent == NULL); 725 else 726 ASSERT(db->db_parent != NULL); 727 if (db->db_blkid != DMU_SPILL_BLKID) 728 ASSERT3P(db->db_blkptr, ==, 729 &dn->dn_phys->dn_blkptr[db->db_blkid]); 730 } else { 731 /* db is pointed to by an indirect block */ 732 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 733 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 734 ASSERT3U(db->db_parent->db.db_object, ==, 735 db->db.db_object); 736 /* 737 * dnode_grow_indblksz() can make this fail if we don't 738 * have the struct_rwlock. XXX indblksz no longer 739 * grows. safe to do this now? 740 */ 741 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 742 ASSERT3P(db->db_blkptr, ==, 743 ((blkptr_t *)db->db_parent->db.db_data + 744 db->db_blkid % epb)); 745 } 746 } 747 } 748 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 749 (db->db_buf == NULL || db->db_buf->b_data) && 750 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 751 db->db_state != DB_FILL && !dn->dn_free_txg) { 752 /* 753 * If the blkptr isn't set but they have nonzero data, 754 * it had better be dirty, otherwise we'll lose that 755 * data when we evict this buffer. 756 * 757 * There is an exception to this rule for indirect blocks; in 758 * this case, if the indirect block is a hole, we fill in a few 759 * fields on each of the child blocks (importantly, birth time) 760 * to prevent hole birth times from being lost when you 761 * partially fill in a hole. 762 */ 763 if (db->db_dirtycnt == 0) { 764 if (db->db_level == 0) { 765 uint64_t *buf = db->db.db_data; 766 int i; 767 768 for (i = 0; i < db->db.db_size >> 3; i++) { 769 ASSERT(buf[i] == 0); 770 } 771 } else { 772 blkptr_t *bps = db->db.db_data; 773 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 774 db->db.db_size); 775 /* 776 * We want to verify that all the blkptrs in the 777 * indirect block are holes, but we may have 778 * automatically set up a few fields for them. 779 * We iterate through each blkptr and verify 780 * they only have those fields set. 781 */ 782 for (int i = 0; 783 i < db->db.db_size / sizeof (blkptr_t); 784 i++) { 785 blkptr_t *bp = &bps[i]; 786 ASSERT(ZIO_CHECKSUM_IS_ZERO( 787 &bp->blk_cksum)); 788 ASSERT( 789 DVA_IS_EMPTY(&bp->blk_dva[0]) && 790 DVA_IS_EMPTY(&bp->blk_dva[1]) && 791 DVA_IS_EMPTY(&bp->blk_dva[2])); 792 ASSERT0(bp->blk_fill); 793 ASSERT0(bp->blk_pad[0]); 794 ASSERT0(bp->blk_pad[1]); 795 ASSERT(!BP_IS_EMBEDDED(bp)); 796 ASSERT(BP_IS_HOLE(bp)); 797 ASSERT0(bp->blk_phys_birth); 798 } 799 } 800 } 801 } 802 DB_DNODE_EXIT(db); 803 } 804 #endif 805 806 static void 807 dbuf_clear_data(dmu_buf_impl_t *db) 808 { 809 ASSERT(MUTEX_HELD(&db->db_mtx)); 810 dbuf_evict_user(db); 811 ASSERT3P(db->db_buf, ==, NULL); 812 db->db.db_data = NULL; 813 if (db->db_state != DB_NOFILL) 814 db->db_state = DB_UNCACHED; 815 } 816 817 static void 818 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 819 { 820 ASSERT(MUTEX_HELD(&db->db_mtx)); 821 ASSERT(buf != NULL); 822 823 db->db_buf = buf; 824 ASSERT(buf->b_data != NULL); 825 db->db.db_data = buf->b_data; 826 } 827 828 /* 829 * Loan out an arc_buf for read. Return the loaned arc_buf. 830 */ 831 arc_buf_t * 832 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 833 { 834 arc_buf_t *abuf; 835 836 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 837 mutex_enter(&db->db_mtx); 838 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 839 int blksz = db->db.db_size; 840 spa_t *spa = db->db_objset->os_spa; 841 842 mutex_exit(&db->db_mtx); 843 abuf = arc_loan_buf(spa, B_FALSE, blksz); 844 bcopy(db->db.db_data, abuf->b_data, blksz); 845 } else { 846 abuf = db->db_buf; 847 arc_loan_inuse_buf(abuf, db); 848 db->db_buf = NULL; 849 dbuf_clear_data(db); 850 mutex_exit(&db->db_mtx); 851 } 852 return (abuf); 853 } 854 855 /* 856 * Calculate which level n block references the data at the level 0 offset 857 * provided. 858 */ 859 uint64_t 860 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 861 { 862 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 863 /* 864 * The level n blkid is equal to the level 0 blkid divided by 865 * the number of level 0s in a level n block. 866 * 867 * The level 0 blkid is offset >> datablkshift = 868 * offset / 2^datablkshift. 869 * 870 * The number of level 0s in a level n is the number of block 871 * pointers in an indirect block, raised to the power of level. 872 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 873 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 874 * 875 * Thus, the level n blkid is: offset / 876 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 877 * = offset / 2^(datablkshift + level * 878 * (indblkshift - SPA_BLKPTRSHIFT)) 879 * = offset >> (datablkshift + level * 880 * (indblkshift - SPA_BLKPTRSHIFT)) 881 */ 882 return (offset >> (dn->dn_datablkshift + level * 883 (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 884 } else { 885 ASSERT3U(offset, <, dn->dn_datablksz); 886 return (0); 887 } 888 } 889 890 static void 891 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 892 { 893 dmu_buf_impl_t *db = vdb; 894 895 mutex_enter(&db->db_mtx); 896 ASSERT3U(db->db_state, ==, DB_READ); 897 /* 898 * All reads are synchronous, so we must have a hold on the dbuf 899 */ 900 ASSERT(refcount_count(&db->db_holds) > 0); 901 ASSERT(db->db_buf == NULL); 902 ASSERT(db->db.db_data == NULL); 903 if (db->db_level == 0 && db->db_freed_in_flight) { 904 /* we were freed in flight; disregard any error */ 905 arc_release(buf, db); 906 bzero(buf->b_data, db->db.db_size); 907 arc_buf_freeze(buf); 908 db->db_freed_in_flight = FALSE; 909 dbuf_set_data(db, buf); 910 db->db_state = DB_CACHED; 911 } else if (zio == NULL || zio->io_error == 0) { 912 dbuf_set_data(db, buf); 913 db->db_state = DB_CACHED; 914 } else { 915 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 916 ASSERT3P(db->db_buf, ==, NULL); 917 arc_buf_destroy(buf, db); 918 db->db_state = DB_UNCACHED; 919 } 920 cv_broadcast(&db->db_changed); 921 dbuf_rele_and_unlock(db, NULL); 922 } 923 924 static void 925 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 926 { 927 dnode_t *dn; 928 zbookmark_phys_t zb; 929 arc_flags_t aflags = ARC_FLAG_NOWAIT; 930 931 DB_DNODE_ENTER(db); 932 dn = DB_DNODE(db); 933 ASSERT(!refcount_is_zero(&db->db_holds)); 934 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 935 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 936 ASSERT(MUTEX_HELD(&db->db_mtx)); 937 ASSERT(db->db_state == DB_UNCACHED); 938 ASSERT(db->db_buf == NULL); 939 940 if (db->db_blkid == DMU_BONUS_BLKID) { 941 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 942 943 ASSERT3U(bonuslen, <=, db->db.db_size); 944 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 945 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 946 if (bonuslen < DN_MAX_BONUSLEN) 947 bzero(db->db.db_data, DN_MAX_BONUSLEN); 948 if (bonuslen) 949 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 950 DB_DNODE_EXIT(db); 951 db->db_state = DB_CACHED; 952 mutex_exit(&db->db_mtx); 953 return; 954 } 955 956 /* 957 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 958 * processes the delete record and clears the bp while we are waiting 959 * for the dn_mtx (resulting in a "no" from block_freed). 960 */ 961 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 962 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 963 BP_IS_HOLE(db->db_blkptr)))) { 964 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 965 966 dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 967 db->db.db_size)); 968 bzero(db->db.db_data, db->db.db_size); 969 970 if (db->db_blkptr != NULL && db->db_level > 0 && 971 BP_IS_HOLE(db->db_blkptr) && 972 db->db_blkptr->blk_birth != 0) { 973 blkptr_t *bps = db->db.db_data; 974 for (int i = 0; i < ((1 << 975 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 976 i++) { 977 blkptr_t *bp = &bps[i]; 978 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 979 1 << dn->dn_indblkshift); 980 BP_SET_LSIZE(bp, 981 BP_GET_LEVEL(db->db_blkptr) == 1 ? 982 dn->dn_datablksz : 983 BP_GET_LSIZE(db->db_blkptr)); 984 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 985 BP_SET_LEVEL(bp, 986 BP_GET_LEVEL(db->db_blkptr) - 1); 987 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 988 } 989 } 990 DB_DNODE_EXIT(db); 991 db->db_state = DB_CACHED; 992 mutex_exit(&db->db_mtx); 993 return; 994 } 995 996 DB_DNODE_EXIT(db); 997 998 db->db_state = DB_READ; 999 mutex_exit(&db->db_mtx); 1000 1001 if (DBUF_IS_L2CACHEABLE(db)) 1002 aflags |= ARC_FLAG_L2CACHE; 1003 1004 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 1005 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1006 db->db.db_object, db->db_level, db->db_blkid); 1007 1008 dbuf_add_ref(db, NULL); 1009 1010 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1011 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 1012 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 1013 &aflags, &zb); 1014 } 1015 1016 /* 1017 * This is our just-in-time copy function. It makes a copy of buffers that 1018 * have been modified in a previous transaction group before we access them in 1019 * the current active group. 1020 * 1021 * This function is used in three places: when we are dirtying a buffer for the 1022 * first time in a txg, when we are freeing a range in a dnode that includes 1023 * this buffer, and when we are accessing a buffer which was received compressed 1024 * and later referenced in a WRITE_BYREF record. 1025 * 1026 * Note that when we are called from dbuf_free_range() we do not put a hold on 1027 * the buffer, we just traverse the active dbuf list for the dnode. 1028 */ 1029 static void 1030 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1031 { 1032 dbuf_dirty_record_t *dr = db->db_last_dirty; 1033 1034 ASSERT(MUTEX_HELD(&db->db_mtx)); 1035 ASSERT(db->db.db_data != NULL); 1036 ASSERT(db->db_level == 0); 1037 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1038 1039 if (dr == NULL || 1040 (dr->dt.dl.dr_data != 1041 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1042 return; 1043 1044 /* 1045 * If the last dirty record for this dbuf has not yet synced 1046 * and its referencing the dbuf data, either: 1047 * reset the reference to point to a new copy, 1048 * or (if there a no active holders) 1049 * just null out the current db_data pointer. 1050 */ 1051 ASSERT(dr->dr_txg >= txg - 2); 1052 if (db->db_blkid == DMU_BONUS_BLKID) { 1053 /* Note that the data bufs here are zio_bufs */ 1054 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 1055 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1056 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 1057 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 1058 int size = arc_buf_size(db->db_buf); 1059 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1060 spa_t *spa = db->db_objset->os_spa; 1061 enum zio_compress compress_type = 1062 arc_get_compression(db->db_buf); 1063 1064 if (compress_type == ZIO_COMPRESS_OFF) { 1065 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1066 } else { 1067 ASSERT3U(type, ==, ARC_BUFC_DATA); 1068 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1069 size, arc_buf_lsize(db->db_buf), compress_type); 1070 } 1071 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 1072 } else { 1073 db->db_buf = NULL; 1074 dbuf_clear_data(db); 1075 } 1076 } 1077 1078 int 1079 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1080 { 1081 int err = 0; 1082 boolean_t prefetch; 1083 dnode_t *dn; 1084 1085 /* 1086 * We don't have to hold the mutex to check db_state because it 1087 * can't be freed while we have a hold on the buffer. 1088 */ 1089 ASSERT(!refcount_is_zero(&db->db_holds)); 1090 1091 if (db->db_state == DB_NOFILL) 1092 return (SET_ERROR(EIO)); 1093 1094 DB_DNODE_ENTER(db); 1095 dn = DB_DNODE(db); 1096 if ((flags & DB_RF_HAVESTRUCT) == 0) 1097 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1098 1099 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1100 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 1101 DBUF_IS_CACHEABLE(db); 1102 1103 mutex_enter(&db->db_mtx); 1104 if (db->db_state == DB_CACHED) { 1105 /* 1106 * If the arc buf is compressed, we need to decompress it to 1107 * read the data. This could happen during the "zfs receive" of 1108 * a stream which is compressed and deduplicated. 1109 */ 1110 if (db->db_buf != NULL && 1111 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { 1112 dbuf_fix_old_data(db, 1113 spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1114 err = arc_decompress(db->db_buf); 1115 dbuf_set_data(db, db->db_buf); 1116 } 1117 mutex_exit(&db->db_mtx); 1118 if (prefetch) 1119 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1120 if ((flags & DB_RF_HAVESTRUCT) == 0) 1121 rw_exit(&dn->dn_struct_rwlock); 1122 DB_DNODE_EXIT(db); 1123 } else if (db->db_state == DB_UNCACHED) { 1124 spa_t *spa = dn->dn_objset->os_spa; 1125 boolean_t need_wait = B_FALSE; 1126 1127 if (zio == NULL && 1128 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1129 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1130 need_wait = B_TRUE; 1131 } 1132 dbuf_read_impl(db, zio, flags); 1133 1134 /* dbuf_read_impl has dropped db_mtx for us */ 1135 1136 if (prefetch) 1137 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1138 1139 if ((flags & DB_RF_HAVESTRUCT) == 0) 1140 rw_exit(&dn->dn_struct_rwlock); 1141 DB_DNODE_EXIT(db); 1142 1143 if (need_wait) 1144 err = zio_wait(zio); 1145 } else { 1146 /* 1147 * Another reader came in while the dbuf was in flight 1148 * between UNCACHED and CACHED. Either a writer will finish 1149 * writing the buffer (sending the dbuf to CACHED) or the 1150 * first reader's request will reach the read_done callback 1151 * and send the dbuf to CACHED. Otherwise, a failure 1152 * occurred and the dbuf went to UNCACHED. 1153 */ 1154 mutex_exit(&db->db_mtx); 1155 if (prefetch) 1156 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1157 if ((flags & DB_RF_HAVESTRUCT) == 0) 1158 rw_exit(&dn->dn_struct_rwlock); 1159 DB_DNODE_EXIT(db); 1160 1161 /* Skip the wait per the caller's request. */ 1162 mutex_enter(&db->db_mtx); 1163 if ((flags & DB_RF_NEVERWAIT) == 0) { 1164 while (db->db_state == DB_READ || 1165 db->db_state == DB_FILL) { 1166 ASSERT(db->db_state == DB_READ || 1167 (flags & DB_RF_HAVESTRUCT) == 0); 1168 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1169 db, zio_t *, zio); 1170 cv_wait(&db->db_changed, &db->db_mtx); 1171 } 1172 if (db->db_state == DB_UNCACHED) 1173 err = SET_ERROR(EIO); 1174 } 1175 mutex_exit(&db->db_mtx); 1176 } 1177 1178 return (err); 1179 } 1180 1181 static void 1182 dbuf_noread(dmu_buf_impl_t *db) 1183 { 1184 ASSERT(!refcount_is_zero(&db->db_holds)); 1185 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1186 mutex_enter(&db->db_mtx); 1187 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1188 cv_wait(&db->db_changed, &db->db_mtx); 1189 if (db->db_state == DB_UNCACHED) { 1190 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1191 spa_t *spa = db->db_objset->os_spa; 1192 1193 ASSERT(db->db_buf == NULL); 1194 ASSERT(db->db.db_data == NULL); 1195 dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1196 db->db_state = DB_FILL; 1197 } else if (db->db_state == DB_NOFILL) { 1198 dbuf_clear_data(db); 1199 } else { 1200 ASSERT3U(db->db_state, ==, DB_CACHED); 1201 } 1202 mutex_exit(&db->db_mtx); 1203 } 1204 1205 void 1206 dbuf_unoverride(dbuf_dirty_record_t *dr) 1207 { 1208 dmu_buf_impl_t *db = dr->dr_dbuf; 1209 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1210 uint64_t txg = dr->dr_txg; 1211 1212 ASSERT(MUTEX_HELD(&db->db_mtx)); 1213 /* 1214 * This assert is valid because dmu_sync() expects to be called by 1215 * a zilog's get_data while holding a range lock. This call only 1216 * comes from dbuf_dirty() callers who must also hold a range lock. 1217 */ 1218 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1219 ASSERT(db->db_level == 0); 1220 1221 if (db->db_blkid == DMU_BONUS_BLKID || 1222 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1223 return; 1224 1225 ASSERT(db->db_data_pending != dr); 1226 1227 /* free this block */ 1228 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1229 zio_free(db->db_objset->os_spa, txg, bp); 1230 1231 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1232 dr->dt.dl.dr_nopwrite = B_FALSE; 1233 1234 /* 1235 * Release the already-written buffer, so we leave it in 1236 * a consistent dirty state. Note that all callers are 1237 * modifying the buffer, so they will immediately do 1238 * another (redundant) arc_release(). Therefore, leave 1239 * the buf thawed to save the effort of freezing & 1240 * immediately re-thawing it. 1241 */ 1242 arc_release(dr->dt.dl.dr_data, db); 1243 } 1244 1245 /* 1246 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1247 * data blocks in the free range, so that any future readers will find 1248 * empty blocks. 1249 */ 1250 void 1251 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1252 dmu_tx_t *tx) 1253 { 1254 dmu_buf_impl_t db_search; 1255 dmu_buf_impl_t *db, *db_next; 1256 uint64_t txg = tx->tx_txg; 1257 avl_index_t where; 1258 1259 if (end_blkid > dn->dn_maxblkid && 1260 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1261 end_blkid = dn->dn_maxblkid; 1262 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 1263 1264 db_search.db_level = 0; 1265 db_search.db_blkid = start_blkid; 1266 db_search.db_state = DB_SEARCH; 1267 1268 mutex_enter(&dn->dn_dbufs_mtx); 1269 db = avl_find(&dn->dn_dbufs, &db_search, &where); 1270 ASSERT3P(db, ==, NULL); 1271 1272 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1273 1274 for (; db != NULL; db = db_next) { 1275 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1276 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1277 1278 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1279 break; 1280 } 1281 ASSERT3U(db->db_blkid, >=, start_blkid); 1282 1283 /* found a level 0 buffer in the range */ 1284 mutex_enter(&db->db_mtx); 1285 if (dbuf_undirty(db, tx)) { 1286 /* mutex has been dropped and dbuf destroyed */ 1287 continue; 1288 } 1289 1290 if (db->db_state == DB_UNCACHED || 1291 db->db_state == DB_NOFILL || 1292 db->db_state == DB_EVICTING) { 1293 ASSERT(db->db.db_data == NULL); 1294 mutex_exit(&db->db_mtx); 1295 continue; 1296 } 1297 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1298 /* will be handled in dbuf_read_done or dbuf_rele */ 1299 db->db_freed_in_flight = TRUE; 1300 mutex_exit(&db->db_mtx); 1301 continue; 1302 } 1303 if (refcount_count(&db->db_holds) == 0) { 1304 ASSERT(db->db_buf); 1305 dbuf_destroy(db); 1306 continue; 1307 } 1308 /* The dbuf is referenced */ 1309 1310 if (db->db_last_dirty != NULL) { 1311 dbuf_dirty_record_t *dr = db->db_last_dirty; 1312 1313 if (dr->dr_txg == txg) { 1314 /* 1315 * This buffer is "in-use", re-adjust the file 1316 * size to reflect that this buffer may 1317 * contain new data when we sync. 1318 */ 1319 if (db->db_blkid != DMU_SPILL_BLKID && 1320 db->db_blkid > dn->dn_maxblkid) 1321 dn->dn_maxblkid = db->db_blkid; 1322 dbuf_unoverride(dr); 1323 } else { 1324 /* 1325 * This dbuf is not dirty in the open context. 1326 * Either uncache it (if its not referenced in 1327 * the open context) or reset its contents to 1328 * empty. 1329 */ 1330 dbuf_fix_old_data(db, txg); 1331 } 1332 } 1333 /* clear the contents if its cached */ 1334 if (db->db_state == DB_CACHED) { 1335 ASSERT(db->db.db_data != NULL); 1336 arc_release(db->db_buf, db); 1337 bzero(db->db.db_data, db->db.db_size); 1338 arc_buf_freeze(db->db_buf); 1339 } 1340 1341 mutex_exit(&db->db_mtx); 1342 } 1343 mutex_exit(&dn->dn_dbufs_mtx); 1344 } 1345 1346 void 1347 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1348 { 1349 arc_buf_t *buf, *obuf; 1350 int osize = db->db.db_size; 1351 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1352 dnode_t *dn; 1353 1354 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1355 1356 DB_DNODE_ENTER(db); 1357 dn = DB_DNODE(db); 1358 1359 /* XXX does *this* func really need the lock? */ 1360 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1361 1362 /* 1363 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1364 * is OK, because there can be no other references to the db 1365 * when we are changing its size, so no concurrent DB_FILL can 1366 * be happening. 1367 */ 1368 /* 1369 * XXX we should be doing a dbuf_read, checking the return 1370 * value and returning that up to our callers 1371 */ 1372 dmu_buf_will_dirty(&db->db, tx); 1373 1374 /* create the data buffer for the new block */ 1375 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1376 1377 /* copy old block data to the new block */ 1378 obuf = db->db_buf; 1379 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1380 /* zero the remainder */ 1381 if (size > osize) 1382 bzero((uint8_t *)buf->b_data + osize, size - osize); 1383 1384 mutex_enter(&db->db_mtx); 1385 dbuf_set_data(db, buf); 1386 arc_buf_destroy(obuf, db); 1387 db->db.db_size = size; 1388 1389 if (db->db_level == 0) { 1390 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1391 db->db_last_dirty->dt.dl.dr_data = buf; 1392 } 1393 mutex_exit(&db->db_mtx); 1394 1395 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1396 DB_DNODE_EXIT(db); 1397 } 1398 1399 void 1400 dbuf_release_bp(dmu_buf_impl_t *db) 1401 { 1402 objset_t *os = db->db_objset; 1403 1404 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 1405 ASSERT(arc_released(os->os_phys_buf) || 1406 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 1407 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 1408 1409 (void) arc_release(db->db_buf, db); 1410 } 1411 1412 /* 1413 * We already have a dirty record for this TXG, and we are being 1414 * dirtied again. 1415 */ 1416 static void 1417 dbuf_redirty(dbuf_dirty_record_t *dr) 1418 { 1419 dmu_buf_impl_t *db = dr->dr_dbuf; 1420 1421 ASSERT(MUTEX_HELD(&db->db_mtx)); 1422 1423 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1424 /* 1425 * If this buffer has already been written out, 1426 * we now need to reset its state. 1427 */ 1428 dbuf_unoverride(dr); 1429 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1430 db->db_state != DB_NOFILL) { 1431 /* Already released on initial dirty, so just thaw. */ 1432 ASSERT(arc_released(db->db_buf)); 1433 arc_buf_thaw(db->db_buf); 1434 } 1435 } 1436 } 1437 1438 dbuf_dirty_record_t * 1439 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1440 { 1441 dnode_t *dn; 1442 objset_t *os; 1443 dbuf_dirty_record_t **drp, *dr; 1444 int drop_struct_lock = FALSE; 1445 int txgoff = tx->tx_txg & TXG_MASK; 1446 1447 ASSERT(tx->tx_txg != 0); 1448 ASSERT(!refcount_is_zero(&db->db_holds)); 1449 DMU_TX_DIRTY_BUF(tx, db); 1450 1451 DB_DNODE_ENTER(db); 1452 dn = DB_DNODE(db); 1453 /* 1454 * Shouldn't dirty a regular buffer in syncing context. Private 1455 * objects may be dirtied in syncing context, but only if they 1456 * were already pre-dirtied in open context. 1457 */ 1458 #ifdef DEBUG 1459 if (dn->dn_objset->os_dsl_dataset != NULL) { 1460 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1461 RW_READER, FTAG); 1462 } 1463 ASSERT(!dmu_tx_is_syncing(tx) || 1464 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1465 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1466 dn->dn_objset->os_dsl_dataset == NULL); 1467 if (dn->dn_objset->os_dsl_dataset != NULL) 1468 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1469 #endif 1470 /* 1471 * We make this assert for private objects as well, but after we 1472 * check if we're already dirty. They are allowed to re-dirty 1473 * in syncing context. 1474 */ 1475 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1476 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1477 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1478 1479 mutex_enter(&db->db_mtx); 1480 /* 1481 * XXX make this true for indirects too? The problem is that 1482 * transactions created with dmu_tx_create_assigned() from 1483 * syncing context don't bother holding ahead. 1484 */ 1485 ASSERT(db->db_level != 0 || 1486 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1487 db->db_state == DB_NOFILL); 1488 1489 mutex_enter(&dn->dn_mtx); 1490 /* 1491 * Don't set dirtyctx to SYNC if we're just modifying this as we 1492 * initialize the objset. 1493 */ 1494 if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1495 if (dn->dn_objset->os_dsl_dataset != NULL) { 1496 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1497 RW_READER, FTAG); 1498 } 1499 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1500 dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1501 DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1502 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1503 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1504 } 1505 if (dn->dn_objset->os_dsl_dataset != NULL) { 1506 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1507 FTAG); 1508 } 1509 } 1510 mutex_exit(&dn->dn_mtx); 1511 1512 if (db->db_blkid == DMU_SPILL_BLKID) 1513 dn->dn_have_spill = B_TRUE; 1514 1515 /* 1516 * If this buffer is already dirty, we're done. 1517 */ 1518 drp = &db->db_last_dirty; 1519 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1520 db->db.db_object == DMU_META_DNODE_OBJECT); 1521 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1522 drp = &dr->dr_next; 1523 if (dr && dr->dr_txg == tx->tx_txg) { 1524 DB_DNODE_EXIT(db); 1525 1526 dbuf_redirty(dr); 1527 mutex_exit(&db->db_mtx); 1528 return (dr); 1529 } 1530 1531 /* 1532 * Only valid if not already dirty. 1533 */ 1534 ASSERT(dn->dn_object == 0 || 1535 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1536 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1537 1538 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1539 1540 /* 1541 * We should only be dirtying in syncing context if it's the 1542 * mos or we're initializing the os or it's a special object. 1543 * However, we are allowed to dirty in syncing context provided 1544 * we already dirtied it in open context. Hence we must make 1545 * this assertion only if we're not already dirty. 1546 */ 1547 os = dn->dn_objset; 1548 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1549 #ifdef DEBUG 1550 if (dn->dn_objset->os_dsl_dataset != NULL) 1551 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 1552 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1553 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1554 if (dn->dn_objset->os_dsl_dataset != NULL) 1555 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1556 #endif 1557 ASSERT(db->db.db_size != 0); 1558 1559 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1560 1561 if (db->db_blkid != DMU_BONUS_BLKID) { 1562 dmu_objset_willuse_space(os, db->db.db_size, tx); 1563 } 1564 1565 /* 1566 * If this buffer is dirty in an old transaction group we need 1567 * to make a copy of it so that the changes we make in this 1568 * transaction group won't leak out when we sync the older txg. 1569 */ 1570 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1571 if (db->db_level == 0) { 1572 void *data_old = db->db_buf; 1573 1574 if (db->db_state != DB_NOFILL) { 1575 if (db->db_blkid == DMU_BONUS_BLKID) { 1576 dbuf_fix_old_data(db, tx->tx_txg); 1577 data_old = db->db.db_data; 1578 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1579 /* 1580 * Release the data buffer from the cache so 1581 * that we can modify it without impacting 1582 * possible other users of this cached data 1583 * block. Note that indirect blocks and 1584 * private objects are not released until the 1585 * syncing state (since they are only modified 1586 * then). 1587 */ 1588 arc_release(db->db_buf, db); 1589 dbuf_fix_old_data(db, tx->tx_txg); 1590 data_old = db->db_buf; 1591 } 1592 ASSERT(data_old != NULL); 1593 } 1594 dr->dt.dl.dr_data = data_old; 1595 } else { 1596 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1597 list_create(&dr->dt.di.dr_children, 1598 sizeof (dbuf_dirty_record_t), 1599 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1600 } 1601 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 1602 dr->dr_accounted = db->db.db_size; 1603 dr->dr_dbuf = db; 1604 dr->dr_txg = tx->tx_txg; 1605 dr->dr_next = *drp; 1606 *drp = dr; 1607 1608 /* 1609 * We could have been freed_in_flight between the dbuf_noread 1610 * and dbuf_dirty. We win, as though the dbuf_noread() had 1611 * happened after the free. 1612 */ 1613 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1614 db->db_blkid != DMU_SPILL_BLKID) { 1615 mutex_enter(&dn->dn_mtx); 1616 if (dn->dn_free_ranges[txgoff] != NULL) { 1617 range_tree_clear(dn->dn_free_ranges[txgoff], 1618 db->db_blkid, 1); 1619 } 1620 mutex_exit(&dn->dn_mtx); 1621 db->db_freed_in_flight = FALSE; 1622 } 1623 1624 /* 1625 * This buffer is now part of this txg 1626 */ 1627 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1628 db->db_dirtycnt += 1; 1629 ASSERT3U(db->db_dirtycnt, <=, 3); 1630 1631 mutex_exit(&db->db_mtx); 1632 1633 if (db->db_blkid == DMU_BONUS_BLKID || 1634 db->db_blkid == DMU_SPILL_BLKID) { 1635 mutex_enter(&dn->dn_mtx); 1636 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1637 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1638 mutex_exit(&dn->dn_mtx); 1639 dnode_setdirty(dn, tx); 1640 DB_DNODE_EXIT(db); 1641 return (dr); 1642 } 1643 1644 /* 1645 * The dn_struct_rwlock prevents db_blkptr from changing 1646 * due to a write from syncing context completing 1647 * while we are running, so we want to acquire it before 1648 * looking at db_blkptr. 1649 */ 1650 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1651 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1652 drop_struct_lock = TRUE; 1653 } 1654 1655 /* 1656 * We need to hold the dn_struct_rwlock to make this assertion, 1657 * because it protects dn_phys / dn_next_nlevels from changing. 1658 */ 1659 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1660 dn->dn_phys->dn_nlevels > db->db_level || 1661 dn->dn_next_nlevels[txgoff] > db->db_level || 1662 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1663 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1664 1665 /* 1666 * If we are overwriting a dedup BP, then unless it is snapshotted, 1667 * when we get to syncing context we will need to decrement its 1668 * refcount in the DDT. Prefetch the relevant DDT block so that 1669 * syncing context won't have to wait for the i/o. 1670 */ 1671 ddt_prefetch(os->os_spa, db->db_blkptr); 1672 1673 if (db->db_level == 0) { 1674 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1675 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1676 } 1677 1678 if (db->db_level+1 < dn->dn_nlevels) { 1679 dmu_buf_impl_t *parent = db->db_parent; 1680 dbuf_dirty_record_t *di; 1681 int parent_held = FALSE; 1682 1683 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1684 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1685 1686 parent = dbuf_hold_level(dn, db->db_level+1, 1687 db->db_blkid >> epbs, FTAG); 1688 ASSERT(parent != NULL); 1689 parent_held = TRUE; 1690 } 1691 if (drop_struct_lock) 1692 rw_exit(&dn->dn_struct_rwlock); 1693 ASSERT3U(db->db_level+1, ==, parent->db_level); 1694 di = dbuf_dirty(parent, tx); 1695 if (parent_held) 1696 dbuf_rele(parent, FTAG); 1697 1698 mutex_enter(&db->db_mtx); 1699 /* 1700 * Since we've dropped the mutex, it's possible that 1701 * dbuf_undirty() might have changed this out from under us. 1702 */ 1703 if (db->db_last_dirty == dr || 1704 dn->dn_object == DMU_META_DNODE_OBJECT) { 1705 mutex_enter(&di->dt.di.dr_mtx); 1706 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1707 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1708 list_insert_tail(&di->dt.di.dr_children, dr); 1709 mutex_exit(&di->dt.di.dr_mtx); 1710 dr->dr_parent = di; 1711 } 1712 mutex_exit(&db->db_mtx); 1713 } else { 1714 ASSERT(db->db_level+1 == dn->dn_nlevels); 1715 ASSERT(db->db_blkid < dn->dn_nblkptr); 1716 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1717 mutex_enter(&dn->dn_mtx); 1718 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1719 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1720 mutex_exit(&dn->dn_mtx); 1721 if (drop_struct_lock) 1722 rw_exit(&dn->dn_struct_rwlock); 1723 } 1724 1725 dnode_setdirty(dn, tx); 1726 DB_DNODE_EXIT(db); 1727 return (dr); 1728 } 1729 1730 /* 1731 * Undirty a buffer in the transaction group referenced by the given 1732 * transaction. Return whether this evicted the dbuf. 1733 */ 1734 static boolean_t 1735 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1736 { 1737 dnode_t *dn; 1738 uint64_t txg = tx->tx_txg; 1739 dbuf_dirty_record_t *dr, **drp; 1740 1741 ASSERT(txg != 0); 1742 1743 /* 1744 * Due to our use of dn_nlevels below, this can only be called 1745 * in open context, unless we are operating on the MOS. 1746 * From syncing context, dn_nlevels may be different from the 1747 * dn_nlevels used when dbuf was dirtied. 1748 */ 1749 ASSERT(db->db_objset == 1750 dmu_objset_pool(db->db_objset)->dp_meta_objset || 1751 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1752 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1753 ASSERT0(db->db_level); 1754 ASSERT(MUTEX_HELD(&db->db_mtx)); 1755 1756 /* 1757 * If this buffer is not dirty, we're done. 1758 */ 1759 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1760 if (dr->dr_txg <= txg) 1761 break; 1762 if (dr == NULL || dr->dr_txg < txg) 1763 return (B_FALSE); 1764 ASSERT(dr->dr_txg == txg); 1765 ASSERT(dr->dr_dbuf == db); 1766 1767 DB_DNODE_ENTER(db); 1768 dn = DB_DNODE(db); 1769 1770 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1771 1772 ASSERT(db->db.db_size != 0); 1773 1774 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 1775 dr->dr_accounted, txg); 1776 1777 *drp = dr->dr_next; 1778 1779 /* 1780 * Note that there are three places in dbuf_dirty() 1781 * where this dirty record may be put on a list. 1782 * Make sure to do a list_remove corresponding to 1783 * every one of those list_insert calls. 1784 */ 1785 if (dr->dr_parent) { 1786 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1787 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1788 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1789 } else if (db->db_blkid == DMU_SPILL_BLKID || 1790 db->db_level + 1 == dn->dn_nlevels) { 1791 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1792 mutex_enter(&dn->dn_mtx); 1793 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1794 mutex_exit(&dn->dn_mtx); 1795 } 1796 DB_DNODE_EXIT(db); 1797 1798 if (db->db_state != DB_NOFILL) { 1799 dbuf_unoverride(dr); 1800 1801 ASSERT(db->db_buf != NULL); 1802 ASSERT(dr->dt.dl.dr_data != NULL); 1803 if (dr->dt.dl.dr_data != db->db_buf) 1804 arc_buf_destroy(dr->dt.dl.dr_data, db); 1805 } 1806 1807 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1808 1809 ASSERT(db->db_dirtycnt > 0); 1810 db->db_dirtycnt -= 1; 1811 1812 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1813 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 1814 dbuf_destroy(db); 1815 return (B_TRUE); 1816 } 1817 1818 return (B_FALSE); 1819 } 1820 1821 void 1822 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1823 { 1824 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1825 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1826 1827 ASSERT(tx->tx_txg != 0); 1828 ASSERT(!refcount_is_zero(&db->db_holds)); 1829 1830 /* 1831 * Quick check for dirtyness. For already dirty blocks, this 1832 * reduces runtime of this function by >90%, and overall performance 1833 * by 50% for some workloads (e.g. file deletion with indirect blocks 1834 * cached). 1835 */ 1836 mutex_enter(&db->db_mtx); 1837 dbuf_dirty_record_t *dr; 1838 for (dr = db->db_last_dirty; 1839 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 1840 /* 1841 * It's possible that it is already dirty but not cached, 1842 * because there are some calls to dbuf_dirty() that don't 1843 * go through dmu_buf_will_dirty(). 1844 */ 1845 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 1846 /* This dbuf is already dirty and cached. */ 1847 dbuf_redirty(dr); 1848 mutex_exit(&db->db_mtx); 1849 return; 1850 } 1851 } 1852 mutex_exit(&db->db_mtx); 1853 1854 DB_DNODE_ENTER(db); 1855 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1856 rf |= DB_RF_HAVESTRUCT; 1857 DB_DNODE_EXIT(db); 1858 (void) dbuf_read(db, NULL, rf); 1859 (void) dbuf_dirty(db, tx); 1860 } 1861 1862 void 1863 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1864 { 1865 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1866 1867 db->db_state = DB_NOFILL; 1868 1869 dmu_buf_will_fill(db_fake, tx); 1870 } 1871 1872 void 1873 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1874 { 1875 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1876 1877 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1878 ASSERT(tx->tx_txg != 0); 1879 ASSERT(db->db_level == 0); 1880 ASSERT(!refcount_is_zero(&db->db_holds)); 1881 1882 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1883 dmu_tx_private_ok(tx)); 1884 1885 dbuf_noread(db); 1886 (void) dbuf_dirty(db, tx); 1887 } 1888 1889 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1890 /* ARGSUSED */ 1891 void 1892 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1893 { 1894 mutex_enter(&db->db_mtx); 1895 DBUF_VERIFY(db); 1896 1897 if (db->db_state == DB_FILL) { 1898 if (db->db_level == 0 && db->db_freed_in_flight) { 1899 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1900 /* we were freed while filling */ 1901 /* XXX dbuf_undirty? */ 1902 bzero(db->db.db_data, db->db.db_size); 1903 db->db_freed_in_flight = FALSE; 1904 } 1905 db->db_state = DB_CACHED; 1906 cv_broadcast(&db->db_changed); 1907 } 1908 mutex_exit(&db->db_mtx); 1909 } 1910 1911 void 1912 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 1913 bp_embedded_type_t etype, enum zio_compress comp, 1914 int uncompressed_size, int compressed_size, int byteorder, 1915 dmu_tx_t *tx) 1916 { 1917 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 1918 struct dirty_leaf *dl; 1919 dmu_object_type_t type; 1920 1921 if (etype == BP_EMBEDDED_TYPE_DATA) { 1922 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1923 SPA_FEATURE_EMBEDDED_DATA)); 1924 } 1925 1926 DB_DNODE_ENTER(db); 1927 type = DB_DNODE(db)->dn_type; 1928 DB_DNODE_EXIT(db); 1929 1930 ASSERT0(db->db_level); 1931 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1932 1933 dmu_buf_will_not_fill(dbuf, tx); 1934 1935 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1936 dl = &db->db_last_dirty->dt.dl; 1937 encode_embedded_bp_compressed(&dl->dr_overridden_by, 1938 data, comp, uncompressed_size, compressed_size); 1939 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 1940 BP_SET_TYPE(&dl->dr_overridden_by, type); 1941 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 1942 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 1943 1944 dl->dr_override_state = DR_OVERRIDDEN; 1945 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 1946 } 1947 1948 /* 1949 * Directly assign a provided arc buf to a given dbuf if it's not referenced 1950 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 1951 */ 1952 void 1953 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 1954 { 1955 ASSERT(!refcount_is_zero(&db->db_holds)); 1956 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1957 ASSERT(db->db_level == 0); 1958 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 1959 ASSERT(buf != NULL); 1960 ASSERT(arc_buf_lsize(buf) == db->db.db_size); 1961 ASSERT(tx->tx_txg != 0); 1962 1963 arc_return_buf(buf, db); 1964 ASSERT(arc_released(buf)); 1965 1966 mutex_enter(&db->db_mtx); 1967 1968 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1969 cv_wait(&db->db_changed, &db->db_mtx); 1970 1971 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 1972 1973 if (db->db_state == DB_CACHED && 1974 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 1975 mutex_exit(&db->db_mtx); 1976 (void) dbuf_dirty(db, tx); 1977 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1978 arc_buf_destroy(buf, db); 1979 xuio_stat_wbuf_copied(); 1980 return; 1981 } 1982 1983 xuio_stat_wbuf_nocopy(); 1984 if (db->db_state == DB_CACHED) { 1985 dbuf_dirty_record_t *dr = db->db_last_dirty; 1986 1987 ASSERT(db->db_buf != NULL); 1988 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 1989 ASSERT(dr->dt.dl.dr_data == db->db_buf); 1990 if (!arc_released(db->db_buf)) { 1991 ASSERT(dr->dt.dl.dr_override_state == 1992 DR_OVERRIDDEN); 1993 arc_release(db->db_buf, db); 1994 } 1995 dr->dt.dl.dr_data = buf; 1996 arc_buf_destroy(db->db_buf, db); 1997 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 1998 arc_release(db->db_buf, db); 1999 arc_buf_destroy(db->db_buf, db); 2000 } 2001 db->db_buf = NULL; 2002 } 2003 ASSERT(db->db_buf == NULL); 2004 dbuf_set_data(db, buf); 2005 db->db_state = DB_FILL; 2006 mutex_exit(&db->db_mtx); 2007 (void) dbuf_dirty(db, tx); 2008 dmu_buf_fill_done(&db->db, tx); 2009 } 2010 2011 void 2012 dbuf_destroy(dmu_buf_impl_t *db) 2013 { 2014 dnode_t *dn; 2015 dmu_buf_impl_t *parent = db->db_parent; 2016 dmu_buf_impl_t *dndb; 2017 2018 ASSERT(MUTEX_HELD(&db->db_mtx)); 2019 ASSERT(refcount_is_zero(&db->db_holds)); 2020 2021 if (db->db_buf != NULL) { 2022 arc_buf_destroy(db->db_buf, db); 2023 db->db_buf = NULL; 2024 } 2025 2026 if (db->db_blkid == DMU_BONUS_BLKID) { 2027 ASSERT(db->db.db_data != NULL); 2028 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 2029 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2030 db->db_state = DB_UNCACHED; 2031 } 2032 2033 dbuf_clear_data(db); 2034 2035 if (multilist_link_active(&db->db_cache_link)) { 2036 multilist_remove(dbuf_cache, db); 2037 (void) refcount_remove_many(&dbuf_cache_size, 2038 db->db.db_size, db); 2039 } 2040 2041 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2042 ASSERT(db->db_data_pending == NULL); 2043 2044 db->db_state = DB_EVICTING; 2045 db->db_blkptr = NULL; 2046 2047 /* 2048 * Now that db_state is DB_EVICTING, nobody else can find this via 2049 * the hash table. We can now drop db_mtx, which allows us to 2050 * acquire the dn_dbufs_mtx. 2051 */ 2052 mutex_exit(&db->db_mtx); 2053 2054 DB_DNODE_ENTER(db); 2055 dn = DB_DNODE(db); 2056 dndb = dn->dn_dbuf; 2057 if (db->db_blkid != DMU_BONUS_BLKID) { 2058 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2059 if (needlock) 2060 mutex_enter(&dn->dn_dbufs_mtx); 2061 avl_remove(&dn->dn_dbufs, db); 2062 atomic_dec_32(&dn->dn_dbufs_count); 2063 membar_producer(); 2064 DB_DNODE_EXIT(db); 2065 if (needlock) 2066 mutex_exit(&dn->dn_dbufs_mtx); 2067 /* 2068 * Decrementing the dbuf count means that the hold corresponding 2069 * to the removed dbuf is no longer discounted in dnode_move(), 2070 * so the dnode cannot be moved until after we release the hold. 2071 * The membar_producer() ensures visibility of the decremented 2072 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2073 * release any lock. 2074 */ 2075 dnode_rele(dn, db); 2076 db->db_dnode_handle = NULL; 2077 2078 dbuf_hash_remove(db); 2079 } else { 2080 DB_DNODE_EXIT(db); 2081 } 2082 2083 ASSERT(refcount_is_zero(&db->db_holds)); 2084 2085 db->db_parent = NULL; 2086 2087 ASSERT(db->db_buf == NULL); 2088 ASSERT(db->db.db_data == NULL); 2089 ASSERT(db->db_hash_next == NULL); 2090 ASSERT(db->db_blkptr == NULL); 2091 ASSERT(db->db_data_pending == NULL); 2092 ASSERT(!multilist_link_active(&db->db_cache_link)); 2093 2094 kmem_cache_free(dbuf_kmem_cache, db); 2095 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2096 2097 /* 2098 * If this dbuf is referenced from an indirect dbuf, 2099 * decrement the ref count on the indirect dbuf. 2100 */ 2101 if (parent && parent != dndb) 2102 dbuf_rele(parent, db); 2103 } 2104 2105 /* 2106 * Note: While bpp will always be updated if the function returns success, 2107 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2108 * this happens when the dnode is the meta-dnode, or a userused or groupused 2109 * object. 2110 */ 2111 static int 2112 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2113 dmu_buf_impl_t **parentp, blkptr_t **bpp) 2114 { 2115 *parentp = NULL; 2116 *bpp = NULL; 2117 2118 ASSERT(blkid != DMU_BONUS_BLKID); 2119 2120 if (blkid == DMU_SPILL_BLKID) { 2121 mutex_enter(&dn->dn_mtx); 2122 if (dn->dn_have_spill && 2123 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 2124 *bpp = &dn->dn_phys->dn_spill; 2125 else 2126 *bpp = NULL; 2127 dbuf_add_ref(dn->dn_dbuf, NULL); 2128 *parentp = dn->dn_dbuf; 2129 mutex_exit(&dn->dn_mtx); 2130 return (0); 2131 } 2132 2133 int nlevels = 2134 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 2135 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2136 2137 ASSERT3U(level * epbs, <, 64); 2138 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2139 /* 2140 * This assertion shouldn't trip as long as the max indirect block size 2141 * is less than 1M. The reason for this is that up to that point, 2142 * the number of levels required to address an entire object with blocks 2143 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 2144 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 2145 * (i.e. we can address the entire object), objects will all use at most 2146 * N-1 levels and the assertion won't overflow. However, once epbs is 2147 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 2148 * enough to address an entire object, so objects will have 5 levels, 2149 * but then this assertion will overflow. 2150 * 2151 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 2152 * need to redo this logic to handle overflows. 2153 */ 2154 ASSERT(level >= nlevels || 2155 ((nlevels - level - 1) * epbs) + 2156 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2157 if (level >= nlevels || 2158 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 2159 ((nlevels - level - 1) * epbs)) || 2160 (fail_sparse && 2161 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2162 /* the buffer has no parent yet */ 2163 return (SET_ERROR(ENOENT)); 2164 } else if (level < nlevels-1) { 2165 /* this block is referenced from an indirect block */ 2166 int err = dbuf_hold_impl(dn, level+1, 2167 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2168 if (err) 2169 return (err); 2170 err = dbuf_read(*parentp, NULL, 2171 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2172 if (err) { 2173 dbuf_rele(*parentp, NULL); 2174 *parentp = NULL; 2175 return (err); 2176 } 2177 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2178 (blkid & ((1ULL << epbs) - 1)); 2179 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 2180 ASSERT(BP_IS_HOLE(*bpp)); 2181 return (0); 2182 } else { 2183 /* the block is referenced from the dnode */ 2184 ASSERT3U(level, ==, nlevels-1); 2185 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2186 blkid < dn->dn_phys->dn_nblkptr); 2187 if (dn->dn_dbuf) { 2188 dbuf_add_ref(dn->dn_dbuf, NULL); 2189 *parentp = dn->dn_dbuf; 2190 } 2191 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2192 return (0); 2193 } 2194 } 2195 2196 static dmu_buf_impl_t * 2197 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2198 dmu_buf_impl_t *parent, blkptr_t *blkptr) 2199 { 2200 objset_t *os = dn->dn_objset; 2201 dmu_buf_impl_t *db, *odb; 2202 2203 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2204 ASSERT(dn->dn_type != DMU_OT_NONE); 2205 2206 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2207 2208 db->db_objset = os; 2209 db->db.db_object = dn->dn_object; 2210 db->db_level = level; 2211 db->db_blkid = blkid; 2212 db->db_last_dirty = NULL; 2213 db->db_dirtycnt = 0; 2214 db->db_dnode_handle = dn->dn_handle; 2215 db->db_parent = parent; 2216 db->db_blkptr = blkptr; 2217 2218 db->db_user = NULL; 2219 db->db_user_immediate_evict = FALSE; 2220 db->db_freed_in_flight = FALSE; 2221 db->db_pending_evict = FALSE; 2222 2223 if (blkid == DMU_BONUS_BLKID) { 2224 ASSERT3P(parent, ==, dn->dn_dbuf); 2225 db->db.db_size = DN_MAX_BONUSLEN - 2226 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 2227 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 2228 db->db.db_offset = DMU_BONUS_BLKID; 2229 db->db_state = DB_UNCACHED; 2230 /* the bonus dbuf is not placed in the hash table */ 2231 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2232 return (db); 2233 } else if (blkid == DMU_SPILL_BLKID) { 2234 db->db.db_size = (blkptr != NULL) ? 2235 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 2236 db->db.db_offset = 0; 2237 } else { 2238 int blocksize = 2239 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2240 db->db.db_size = blocksize; 2241 db->db.db_offset = db->db_blkid * blocksize; 2242 } 2243 2244 /* 2245 * Hold the dn_dbufs_mtx while we get the new dbuf 2246 * in the hash table *and* added to the dbufs list. 2247 * This prevents a possible deadlock with someone 2248 * trying to look up this dbuf before its added to the 2249 * dn_dbufs list. 2250 */ 2251 mutex_enter(&dn->dn_dbufs_mtx); 2252 db->db_state = DB_EVICTING; 2253 if ((odb = dbuf_hash_insert(db)) != NULL) { 2254 /* someone else inserted it first */ 2255 kmem_cache_free(dbuf_kmem_cache, db); 2256 mutex_exit(&dn->dn_dbufs_mtx); 2257 return (odb); 2258 } 2259 avl_add(&dn->dn_dbufs, db); 2260 2261 db->db_state = DB_UNCACHED; 2262 mutex_exit(&dn->dn_dbufs_mtx); 2263 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2264 2265 if (parent && parent != dn->dn_dbuf) 2266 dbuf_add_ref(parent, db); 2267 2268 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2269 refcount_count(&dn->dn_holds) > 0); 2270 (void) refcount_add(&dn->dn_holds, db); 2271 atomic_inc_32(&dn->dn_dbufs_count); 2272 2273 dprintf_dbuf(db, "db=%p\n", db); 2274 2275 return (db); 2276 } 2277 2278 typedef struct dbuf_prefetch_arg { 2279 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2280 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2281 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2282 int dpa_curlevel; /* The current level that we're reading */ 2283 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2284 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2285 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2286 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2287 } dbuf_prefetch_arg_t; 2288 2289 /* 2290 * Actually issue the prefetch read for the block given. 2291 */ 2292 static void 2293 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2294 { 2295 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2296 return; 2297 2298 arc_flags_t aflags = 2299 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2300 2301 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2302 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2303 ASSERT(dpa->dpa_zio != NULL); 2304 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2305 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2306 &aflags, &dpa->dpa_zb); 2307 } 2308 2309 /* 2310 * Called when an indirect block above our prefetch target is read in. This 2311 * will either read in the next indirect block down the tree or issue the actual 2312 * prefetch if the next block down is our target. 2313 */ 2314 static void 2315 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2316 { 2317 dbuf_prefetch_arg_t *dpa = private; 2318 2319 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2320 ASSERT3S(dpa->dpa_curlevel, >, 0); 2321 2322 /* 2323 * The dpa_dnode is only valid if we are called with a NULL 2324 * zio. This indicates that the arc_read() returned without 2325 * first calling zio_read() to issue a physical read. Once 2326 * a physical read is made the dpa_dnode must be invalidated 2327 * as the locks guarding it may have been dropped. If the 2328 * dpa_dnode is still valid, then we want to add it to the dbuf 2329 * cache. To do so, we must hold the dbuf associated with the block 2330 * we just prefetched, read its contents so that we associate it 2331 * with an arc_buf_t, and then release it. 2332 */ 2333 if (zio != NULL) { 2334 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2335 if (zio->io_flags & ZIO_FLAG_RAW) { 2336 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2337 } else { 2338 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2339 } 2340 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2341 2342 dpa->dpa_dnode = NULL; 2343 } else if (dpa->dpa_dnode != NULL) { 2344 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2345 (dpa->dpa_epbs * (dpa->dpa_curlevel - 2346 dpa->dpa_zb.zb_level)); 2347 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2348 dpa->dpa_curlevel, curblkid, FTAG); 2349 (void) dbuf_read(db, NULL, 2350 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2351 dbuf_rele(db, FTAG); 2352 } 2353 2354 dpa->dpa_curlevel--; 2355 2356 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2357 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2358 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2359 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2360 if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { 2361 kmem_free(dpa, sizeof (*dpa)); 2362 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2363 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2364 dbuf_issue_final_prefetch(dpa, bp); 2365 kmem_free(dpa, sizeof (*dpa)); 2366 } else { 2367 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2368 zbookmark_phys_t zb; 2369 2370 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2371 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 2372 iter_aflags |= ARC_FLAG_L2CACHE; 2373 2374 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2375 2376 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2377 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2378 2379 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2380 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2381 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2382 &iter_aflags, &zb); 2383 } 2384 2385 arc_buf_destroy(abuf, private); 2386 } 2387 2388 /* 2389 * Issue prefetch reads for the given block on the given level. If the indirect 2390 * blocks above that block are not in memory, we will read them in 2391 * asynchronously. As a result, this call never blocks waiting for a read to 2392 * complete. 2393 */ 2394 void 2395 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2396 arc_flags_t aflags) 2397 { 2398 blkptr_t bp; 2399 int epbs, nlevels, curlevel; 2400 uint64_t curblkid; 2401 2402 ASSERT(blkid != DMU_BONUS_BLKID); 2403 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2404 2405 if (blkid > dn->dn_maxblkid) 2406 return; 2407 2408 if (dnode_block_freed(dn, blkid)) 2409 return; 2410 2411 /* 2412 * This dnode hasn't been written to disk yet, so there's nothing to 2413 * prefetch. 2414 */ 2415 nlevels = dn->dn_phys->dn_nlevels; 2416 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2417 return; 2418 2419 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2420 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2421 return; 2422 2423 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2424 level, blkid); 2425 if (db != NULL) { 2426 mutex_exit(&db->db_mtx); 2427 /* 2428 * This dbuf already exists. It is either CACHED, or 2429 * (we assume) about to be read or filled. 2430 */ 2431 return; 2432 } 2433 2434 /* 2435 * Find the closest ancestor (indirect block) of the target block 2436 * that is present in the cache. In this indirect block, we will 2437 * find the bp that is at curlevel, curblkid. 2438 */ 2439 curlevel = level; 2440 curblkid = blkid; 2441 while (curlevel < nlevels - 1) { 2442 int parent_level = curlevel + 1; 2443 uint64_t parent_blkid = curblkid >> epbs; 2444 dmu_buf_impl_t *db; 2445 2446 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2447 FALSE, TRUE, FTAG, &db) == 0) { 2448 blkptr_t *bpp = db->db_buf->b_data; 2449 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2450 dbuf_rele(db, FTAG); 2451 break; 2452 } 2453 2454 curlevel = parent_level; 2455 curblkid = parent_blkid; 2456 } 2457 2458 if (curlevel == nlevels - 1) { 2459 /* No cached indirect blocks found. */ 2460 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2461 bp = dn->dn_phys->dn_blkptr[curblkid]; 2462 } 2463 if (BP_IS_HOLE(&bp)) 2464 return; 2465 2466 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2467 2468 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2469 ZIO_FLAG_CANFAIL); 2470 2471 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2472 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2473 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2474 dn->dn_object, level, blkid); 2475 dpa->dpa_curlevel = curlevel; 2476 dpa->dpa_prio = prio; 2477 dpa->dpa_aflags = aflags; 2478 dpa->dpa_spa = dn->dn_objset->os_spa; 2479 dpa->dpa_dnode = dn; 2480 dpa->dpa_epbs = epbs; 2481 dpa->dpa_zio = pio; 2482 2483 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2484 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 2485 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 2486 2487 /* 2488 * If we have the indirect just above us, no need to do the asynchronous 2489 * prefetch chain; we'll just run the last step ourselves. If we're at 2490 * a higher level, though, we want to issue the prefetches for all the 2491 * indirect blocks asynchronously, so we can go on with whatever we were 2492 * doing. 2493 */ 2494 if (curlevel == level) { 2495 ASSERT3U(curblkid, ==, blkid); 2496 dbuf_issue_final_prefetch(dpa, &bp); 2497 kmem_free(dpa, sizeof (*dpa)); 2498 } else { 2499 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2500 zbookmark_phys_t zb; 2501 2502 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2503 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 2504 iter_aflags |= ARC_FLAG_L2CACHE; 2505 2506 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2507 dn->dn_object, curlevel, curblkid); 2508 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2509 &bp, dbuf_prefetch_indirect_done, dpa, prio, 2510 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2511 &iter_aflags, &zb); 2512 } 2513 /* 2514 * We use pio here instead of dpa_zio since it's possible that 2515 * dpa may have already been freed. 2516 */ 2517 zio_nowait(pio); 2518 } 2519 2520 /* 2521 * Returns with db_holds incremented, and db_mtx not held. 2522 * Note: dn_struct_rwlock must be held. 2523 */ 2524 int 2525 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2526 boolean_t fail_sparse, boolean_t fail_uncached, 2527 void *tag, dmu_buf_impl_t **dbp) 2528 { 2529 dmu_buf_impl_t *db, *parent = NULL; 2530 2531 ASSERT(blkid != DMU_BONUS_BLKID); 2532 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2533 ASSERT3U(dn->dn_nlevels, >, level); 2534 2535 *dbp = NULL; 2536 top: 2537 /* dbuf_find() returns with db_mtx held */ 2538 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2539 2540 if (db == NULL) { 2541 blkptr_t *bp = NULL; 2542 int err; 2543 2544 if (fail_uncached) 2545 return (SET_ERROR(ENOENT)); 2546 2547 ASSERT3P(parent, ==, NULL); 2548 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2549 if (fail_sparse) { 2550 if (err == 0 && bp && BP_IS_HOLE(bp)) 2551 err = SET_ERROR(ENOENT); 2552 if (err) { 2553 if (parent) 2554 dbuf_rele(parent, NULL); 2555 return (err); 2556 } 2557 } 2558 if (err && err != ENOENT) 2559 return (err); 2560 db = dbuf_create(dn, level, blkid, parent, bp); 2561 } 2562 2563 if (fail_uncached && db->db_state != DB_CACHED) { 2564 mutex_exit(&db->db_mtx); 2565 return (SET_ERROR(ENOENT)); 2566 } 2567 2568 if (db->db_buf != NULL) 2569 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2570 2571 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2572 2573 /* 2574 * If this buffer is currently syncing out, and we are are 2575 * still referencing it from db_data, we need to make a copy 2576 * of it in case we decide we want to dirty it again in this txg. 2577 */ 2578 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2579 dn->dn_object != DMU_META_DNODE_OBJECT && 2580 db->db_state == DB_CACHED && db->db_data_pending) { 2581 dbuf_dirty_record_t *dr = db->db_data_pending; 2582 2583 if (dr->dt.dl.dr_data == db->db_buf) { 2584 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2585 2586 dbuf_set_data(db, 2587 arc_alloc_buf(dn->dn_objset->os_spa, db, type, 2588 db->db.db_size)); 2589 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2590 db->db.db_size); 2591 } 2592 } 2593 2594 if (multilist_link_active(&db->db_cache_link)) { 2595 ASSERT(refcount_is_zero(&db->db_holds)); 2596 multilist_remove(dbuf_cache, db); 2597 (void) refcount_remove_many(&dbuf_cache_size, 2598 db->db.db_size, db); 2599 } 2600 (void) refcount_add(&db->db_holds, tag); 2601 DBUF_VERIFY(db); 2602 mutex_exit(&db->db_mtx); 2603 2604 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2605 if (parent) 2606 dbuf_rele(parent, NULL); 2607 2608 ASSERT3P(DB_DNODE(db), ==, dn); 2609 ASSERT3U(db->db_blkid, ==, blkid); 2610 ASSERT3U(db->db_level, ==, level); 2611 *dbp = db; 2612 2613 return (0); 2614 } 2615 2616 dmu_buf_impl_t * 2617 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2618 { 2619 return (dbuf_hold_level(dn, 0, blkid, tag)); 2620 } 2621 2622 dmu_buf_impl_t * 2623 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2624 { 2625 dmu_buf_impl_t *db; 2626 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2627 return (err ? NULL : db); 2628 } 2629 2630 void 2631 dbuf_create_bonus(dnode_t *dn) 2632 { 2633 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2634 2635 ASSERT(dn->dn_bonus == NULL); 2636 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 2637 } 2638 2639 int 2640 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 2641 { 2642 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2643 dnode_t *dn; 2644 2645 if (db->db_blkid != DMU_SPILL_BLKID) 2646 return (SET_ERROR(ENOTSUP)); 2647 if (blksz == 0) 2648 blksz = SPA_MINBLOCKSIZE; 2649 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 2650 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 2651 2652 DB_DNODE_ENTER(db); 2653 dn = DB_DNODE(db); 2654 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2655 dbuf_new_size(db, blksz, tx); 2656 rw_exit(&dn->dn_struct_rwlock); 2657 DB_DNODE_EXIT(db); 2658 2659 return (0); 2660 } 2661 2662 void 2663 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 2664 { 2665 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2666 } 2667 2668 #pragma weak dmu_buf_add_ref = dbuf_add_ref 2669 void 2670 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2671 { 2672 int64_t holds = refcount_add(&db->db_holds, tag); 2673 ASSERT3S(holds, >, 1); 2674 } 2675 2676 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2677 boolean_t 2678 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2679 void *tag) 2680 { 2681 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2682 dmu_buf_impl_t *found_db; 2683 boolean_t result = B_FALSE; 2684 2685 if (db->db_blkid == DMU_BONUS_BLKID) 2686 found_db = dbuf_find_bonus(os, obj); 2687 else 2688 found_db = dbuf_find(os, obj, 0, blkid); 2689 2690 if (found_db != NULL) { 2691 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2692 (void) refcount_add(&db->db_holds, tag); 2693 result = B_TRUE; 2694 } 2695 mutex_exit(&db->db_mtx); 2696 } 2697 return (result); 2698 } 2699 2700 /* 2701 * If you call dbuf_rele() you had better not be referencing the dnode handle 2702 * unless you have some other direct or indirect hold on the dnode. (An indirect 2703 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2704 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2705 * dnode's parent dbuf evicting its dnode handles. 2706 */ 2707 void 2708 dbuf_rele(dmu_buf_impl_t *db, void *tag) 2709 { 2710 mutex_enter(&db->db_mtx); 2711 dbuf_rele_and_unlock(db, tag); 2712 } 2713 2714 void 2715 dmu_buf_rele(dmu_buf_t *db, void *tag) 2716 { 2717 dbuf_rele((dmu_buf_impl_t *)db, tag); 2718 } 2719 2720 /* 2721 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2722 * db_dirtycnt and db_holds to be updated atomically. 2723 */ 2724 void 2725 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2726 { 2727 int64_t holds; 2728 2729 ASSERT(MUTEX_HELD(&db->db_mtx)); 2730 DBUF_VERIFY(db); 2731 2732 /* 2733 * Remove the reference to the dbuf before removing its hold on the 2734 * dnode so we can guarantee in dnode_move() that a referenced bonus 2735 * buffer has a corresponding dnode hold. 2736 */ 2737 holds = refcount_remove(&db->db_holds, tag); 2738 ASSERT(holds >= 0); 2739 2740 /* 2741 * We can't freeze indirects if there is a possibility that they 2742 * may be modified in the current syncing context. 2743 */ 2744 if (db->db_buf != NULL && 2745 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 2746 arc_buf_freeze(db->db_buf); 2747 } 2748 2749 if (holds == db->db_dirtycnt && 2750 db->db_level == 0 && db->db_user_immediate_evict) 2751 dbuf_evict_user(db); 2752 2753 if (holds == 0) { 2754 if (db->db_blkid == DMU_BONUS_BLKID) { 2755 dnode_t *dn; 2756 boolean_t evict_dbuf = db->db_pending_evict; 2757 2758 /* 2759 * If the dnode moves here, we cannot cross this 2760 * barrier until the move completes. 2761 */ 2762 DB_DNODE_ENTER(db); 2763 2764 dn = DB_DNODE(db); 2765 atomic_dec_32(&dn->dn_dbufs_count); 2766 2767 /* 2768 * Decrementing the dbuf count means that the bonus 2769 * buffer's dnode hold is no longer discounted in 2770 * dnode_move(). The dnode cannot move until after 2771 * the dnode_rele() below. 2772 */ 2773 DB_DNODE_EXIT(db); 2774 2775 /* 2776 * Do not reference db after its lock is dropped. 2777 * Another thread may evict it. 2778 */ 2779 mutex_exit(&db->db_mtx); 2780 2781 if (evict_dbuf) 2782 dnode_evict_bonus(dn); 2783 2784 dnode_rele(dn, db); 2785 } else if (db->db_buf == NULL) { 2786 /* 2787 * This is a special case: we never associated this 2788 * dbuf with any data allocated from the ARC. 2789 */ 2790 ASSERT(db->db_state == DB_UNCACHED || 2791 db->db_state == DB_NOFILL); 2792 dbuf_destroy(db); 2793 } else if (arc_released(db->db_buf)) { 2794 /* 2795 * This dbuf has anonymous data associated with it. 2796 */ 2797 dbuf_destroy(db); 2798 } else { 2799 boolean_t do_arc_evict = B_FALSE; 2800 blkptr_t bp; 2801 spa_t *spa = dmu_objset_spa(db->db_objset); 2802 2803 if (!DBUF_IS_CACHEABLE(db) && 2804 db->db_blkptr != NULL && 2805 !BP_IS_HOLE(db->db_blkptr) && 2806 !BP_IS_EMBEDDED(db->db_blkptr)) { 2807 do_arc_evict = B_TRUE; 2808 bp = *db->db_blkptr; 2809 } 2810 2811 if (!DBUF_IS_CACHEABLE(db) || 2812 db->db_pending_evict) { 2813 dbuf_destroy(db); 2814 } else if (!multilist_link_active(&db->db_cache_link)) { 2815 multilist_insert(dbuf_cache, db); 2816 (void) refcount_add_many(&dbuf_cache_size, 2817 db->db.db_size, db); 2818 mutex_exit(&db->db_mtx); 2819 2820 dbuf_evict_notify(); 2821 } 2822 2823 if (do_arc_evict) 2824 arc_freed(spa, &bp); 2825 } 2826 } else { 2827 mutex_exit(&db->db_mtx); 2828 } 2829 2830 } 2831 2832 #pragma weak dmu_buf_refcount = dbuf_refcount 2833 uint64_t 2834 dbuf_refcount(dmu_buf_impl_t *db) 2835 { 2836 return (refcount_count(&db->db_holds)); 2837 } 2838 2839 void * 2840 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2841 dmu_buf_user_t *new_user) 2842 { 2843 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2844 2845 mutex_enter(&db->db_mtx); 2846 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2847 if (db->db_user == old_user) 2848 db->db_user = new_user; 2849 else 2850 old_user = db->db_user; 2851 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2852 mutex_exit(&db->db_mtx); 2853 2854 return (old_user); 2855 } 2856 2857 void * 2858 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2859 { 2860 return (dmu_buf_replace_user(db_fake, NULL, user)); 2861 } 2862 2863 void * 2864 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2865 { 2866 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2867 2868 db->db_user_immediate_evict = TRUE; 2869 return (dmu_buf_set_user(db_fake, user)); 2870 } 2871 2872 void * 2873 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2874 { 2875 return (dmu_buf_replace_user(db_fake, user, NULL)); 2876 } 2877 2878 void * 2879 dmu_buf_get_user(dmu_buf_t *db_fake) 2880 { 2881 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2882 2883 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2884 return (db->db_user); 2885 } 2886 2887 void 2888 dmu_buf_user_evict_wait() 2889 { 2890 taskq_wait(dbu_evict_taskq); 2891 } 2892 2893 blkptr_t * 2894 dmu_buf_get_blkptr(dmu_buf_t *db) 2895 { 2896 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2897 return (dbi->db_blkptr); 2898 } 2899 2900 objset_t * 2901 dmu_buf_get_objset(dmu_buf_t *db) 2902 { 2903 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2904 return (dbi->db_objset); 2905 } 2906 2907 dnode_t * 2908 dmu_buf_dnode_enter(dmu_buf_t *db) 2909 { 2910 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2911 DB_DNODE_ENTER(dbi); 2912 return (DB_DNODE(dbi)); 2913 } 2914 2915 void 2916 dmu_buf_dnode_exit(dmu_buf_t *db) 2917 { 2918 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2919 DB_DNODE_EXIT(dbi); 2920 } 2921 2922 static void 2923 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2924 { 2925 /* ASSERT(dmu_tx_is_syncing(tx) */ 2926 ASSERT(MUTEX_HELD(&db->db_mtx)); 2927 2928 if (db->db_blkptr != NULL) 2929 return; 2930 2931 if (db->db_blkid == DMU_SPILL_BLKID) { 2932 db->db_blkptr = &dn->dn_phys->dn_spill; 2933 BP_ZERO(db->db_blkptr); 2934 return; 2935 } 2936 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2937 /* 2938 * This buffer was allocated at a time when there was 2939 * no available blkptrs from the dnode, or it was 2940 * inappropriate to hook it in (i.e., nlevels mis-match). 2941 */ 2942 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2943 ASSERT(db->db_parent == NULL); 2944 db->db_parent = dn->dn_dbuf; 2945 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2946 DBUF_VERIFY(db); 2947 } else { 2948 dmu_buf_impl_t *parent = db->db_parent; 2949 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2950 2951 ASSERT(dn->dn_phys->dn_nlevels > 1); 2952 if (parent == NULL) { 2953 mutex_exit(&db->db_mtx); 2954 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2955 parent = dbuf_hold_level(dn, db->db_level + 1, 2956 db->db_blkid >> epbs, db); 2957 rw_exit(&dn->dn_struct_rwlock); 2958 mutex_enter(&db->db_mtx); 2959 db->db_parent = parent; 2960 } 2961 db->db_blkptr = (blkptr_t *)parent->db.db_data + 2962 (db->db_blkid & ((1ULL << epbs) - 1)); 2963 DBUF_VERIFY(db); 2964 } 2965 } 2966 2967 static void 2968 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2969 { 2970 dmu_buf_impl_t *db = dr->dr_dbuf; 2971 dnode_t *dn; 2972 zio_t *zio; 2973 2974 ASSERT(dmu_tx_is_syncing(tx)); 2975 2976 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2977 2978 mutex_enter(&db->db_mtx); 2979 2980 ASSERT(db->db_level > 0); 2981 DBUF_VERIFY(db); 2982 2983 /* Read the block if it hasn't been read yet. */ 2984 if (db->db_buf == NULL) { 2985 mutex_exit(&db->db_mtx); 2986 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2987 mutex_enter(&db->db_mtx); 2988 } 2989 ASSERT3U(db->db_state, ==, DB_CACHED); 2990 ASSERT(db->db_buf != NULL); 2991 2992 DB_DNODE_ENTER(db); 2993 dn = DB_DNODE(db); 2994 /* Indirect block size must match what the dnode thinks it is. */ 2995 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2996 dbuf_check_blkptr(dn, db); 2997 DB_DNODE_EXIT(db); 2998 2999 /* Provide the pending dirty record to child dbufs */ 3000 db->db_data_pending = dr; 3001 3002 mutex_exit(&db->db_mtx); 3003 3004 dbuf_write(dr, db->db_buf, tx); 3005 3006 zio = dr->dr_zio; 3007 mutex_enter(&dr->dt.di.dr_mtx); 3008 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3009 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3010 mutex_exit(&dr->dt.di.dr_mtx); 3011 zio_nowait(zio); 3012 } 3013 3014 static void 3015 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3016 { 3017 arc_buf_t **datap = &dr->dt.dl.dr_data; 3018 dmu_buf_impl_t *db = dr->dr_dbuf; 3019 dnode_t *dn; 3020 objset_t *os; 3021 uint64_t txg = tx->tx_txg; 3022 3023 ASSERT(dmu_tx_is_syncing(tx)); 3024 3025 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3026 3027 mutex_enter(&db->db_mtx); 3028 /* 3029 * To be synced, we must be dirtied. But we 3030 * might have been freed after the dirty. 3031 */ 3032 if (db->db_state == DB_UNCACHED) { 3033 /* This buffer has been freed since it was dirtied */ 3034 ASSERT(db->db.db_data == NULL); 3035 } else if (db->db_state == DB_FILL) { 3036 /* This buffer was freed and is now being re-filled */ 3037 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3038 } else { 3039 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3040 } 3041 DBUF_VERIFY(db); 3042 3043 DB_DNODE_ENTER(db); 3044 dn = DB_DNODE(db); 3045 3046 if (db->db_blkid == DMU_SPILL_BLKID) { 3047 mutex_enter(&dn->dn_mtx); 3048 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 3049 mutex_exit(&dn->dn_mtx); 3050 } 3051 3052 /* 3053 * If this is a bonus buffer, simply copy the bonus data into the 3054 * dnode. It will be written out when the dnode is synced (and it 3055 * will be synced, since it must have been dirty for dbuf_sync to 3056 * be called). 3057 */ 3058 if (db->db_blkid == DMU_BONUS_BLKID) { 3059 dbuf_dirty_record_t **drp; 3060 3061 ASSERT(*datap != NULL); 3062 ASSERT0(db->db_level); 3063 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 3064 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 3065 DB_DNODE_EXIT(db); 3066 3067 if (*datap != db->db.db_data) { 3068 zio_buf_free(*datap, DN_MAX_BONUSLEN); 3069 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 3070 } 3071 db->db_data_pending = NULL; 3072 drp = &db->db_last_dirty; 3073 while (*drp != dr) 3074 drp = &(*drp)->dr_next; 3075 ASSERT(dr->dr_next == NULL); 3076 ASSERT(dr->dr_dbuf == db); 3077 *drp = dr->dr_next; 3078 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3079 ASSERT(db->db_dirtycnt > 0); 3080 db->db_dirtycnt -= 1; 3081 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 3082 return; 3083 } 3084 3085 os = dn->dn_objset; 3086 3087 /* 3088 * This function may have dropped the db_mtx lock allowing a dmu_sync 3089 * operation to sneak in. As a result, we need to ensure that we 3090 * don't check the dr_override_state until we have returned from 3091 * dbuf_check_blkptr. 3092 */ 3093 dbuf_check_blkptr(dn, db); 3094 3095 /* 3096 * If this buffer is in the middle of an immediate write, 3097 * wait for the synchronous IO to complete. 3098 */ 3099 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3100 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3101 cv_wait(&db->db_changed, &db->db_mtx); 3102 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3103 } 3104 3105 if (db->db_state != DB_NOFILL && 3106 dn->dn_object != DMU_META_DNODE_OBJECT && 3107 refcount_count(&db->db_holds) > 1 && 3108 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3109 *datap == db->db_buf) { 3110 /* 3111 * If this buffer is currently "in use" (i.e., there 3112 * are active holds and db_data still references it), 3113 * then make a copy before we start the write so that 3114 * any modifications from the open txg will not leak 3115 * into this write. 3116 * 3117 * NOTE: this copy does not need to be made for 3118 * objects only modified in the syncing context (e.g. 3119 * DNONE_DNODE blocks). 3120 */ 3121 int psize = arc_buf_size(*datap); 3122 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 3123 enum zio_compress compress_type = arc_get_compression(*datap); 3124 3125 if (compress_type == ZIO_COMPRESS_OFF) { 3126 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 3127 } else { 3128 ASSERT3U(type, ==, ARC_BUFC_DATA); 3129 int lsize = arc_buf_lsize(*datap); 3130 *datap = arc_alloc_compressed_buf(os->os_spa, db, 3131 psize, lsize, compress_type); 3132 } 3133 bcopy(db->db.db_data, (*datap)->b_data, psize); 3134 } 3135 db->db_data_pending = dr; 3136 3137 mutex_exit(&db->db_mtx); 3138 3139 dbuf_write(dr, *datap, tx); 3140 3141 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3142 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3143 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3144 DB_DNODE_EXIT(db); 3145 } else { 3146 /* 3147 * Although zio_nowait() does not "wait for an IO", it does 3148 * initiate the IO. If this is an empty write it seems plausible 3149 * that the IO could actually be completed before the nowait 3150 * returns. We need to DB_DNODE_EXIT() first in case 3151 * zio_nowait() invalidates the dbuf. 3152 */ 3153 DB_DNODE_EXIT(db); 3154 zio_nowait(dr->dr_zio); 3155 } 3156 } 3157 3158 void 3159 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3160 { 3161 dbuf_dirty_record_t *dr; 3162 3163 while (dr = list_head(list)) { 3164 if (dr->dr_zio != NULL) { 3165 /* 3166 * If we find an already initialized zio then we 3167 * are processing the meta-dnode, and we have finished. 3168 * The dbufs for all dnodes are put back on the list 3169 * during processing, so that we can zio_wait() 3170 * these IOs after initiating all child IOs. 3171 */ 3172 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3173 DMU_META_DNODE_OBJECT); 3174 break; 3175 } 3176 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 3177 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 3178 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 3179 } 3180 list_remove(list, dr); 3181 if (dr->dr_dbuf->db_level > 0) 3182 dbuf_sync_indirect(dr, tx); 3183 else 3184 dbuf_sync_leaf(dr, tx); 3185 } 3186 } 3187 3188 /* ARGSUSED */ 3189 static void 3190 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3191 { 3192 dmu_buf_impl_t *db = vdb; 3193 dnode_t *dn; 3194 blkptr_t *bp = zio->io_bp; 3195 blkptr_t *bp_orig = &zio->io_bp_orig; 3196 spa_t *spa = zio->io_spa; 3197 int64_t delta; 3198 uint64_t fill = 0; 3199 int i; 3200 3201 ASSERT3P(db->db_blkptr, !=, NULL); 3202 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3203 3204 DB_DNODE_ENTER(db); 3205 dn = DB_DNODE(db); 3206 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3207 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3208 zio->io_prev_space_delta = delta; 3209 3210 if (bp->blk_birth != 0) { 3211 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 3212 BP_GET_TYPE(bp) == dn->dn_type) || 3213 (db->db_blkid == DMU_SPILL_BLKID && 3214 BP_GET_TYPE(bp) == dn->dn_bonustype) || 3215 BP_IS_EMBEDDED(bp)); 3216 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 3217 } 3218 3219 mutex_enter(&db->db_mtx); 3220 3221 #ifdef ZFS_DEBUG 3222 if (db->db_blkid == DMU_SPILL_BLKID) { 3223 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3224 ASSERT(!(BP_IS_HOLE(bp)) && 3225 db->db_blkptr == &dn->dn_phys->dn_spill); 3226 } 3227 #endif 3228 3229 if (db->db_level == 0) { 3230 mutex_enter(&dn->dn_mtx); 3231 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 3232 db->db_blkid != DMU_SPILL_BLKID) 3233 dn->dn_phys->dn_maxblkid = db->db_blkid; 3234 mutex_exit(&dn->dn_mtx); 3235 3236 if (dn->dn_type == DMU_OT_DNODE) { 3237 dnode_phys_t *dnp = db->db.db_data; 3238 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 3239 i--, dnp++) { 3240 if (dnp->dn_type != DMU_OT_NONE) 3241 fill++; 3242 } 3243 } else { 3244 if (BP_IS_HOLE(bp)) { 3245 fill = 0; 3246 } else { 3247 fill = 1; 3248 } 3249 } 3250 } else { 3251 blkptr_t *ibp = db->db.db_data; 3252 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3253 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3254 if (BP_IS_HOLE(ibp)) 3255 continue; 3256 fill += BP_GET_FILL(ibp); 3257 } 3258 } 3259 DB_DNODE_EXIT(db); 3260 3261 if (!BP_IS_EMBEDDED(bp)) 3262 bp->blk_fill = fill; 3263 3264 mutex_exit(&db->db_mtx); 3265 3266 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3267 *db->db_blkptr = *bp; 3268 rw_exit(&dn->dn_struct_rwlock); 3269 } 3270 3271 /* ARGSUSED */ 3272 /* 3273 * This function gets called just prior to running through the compression 3274 * stage of the zio pipeline. If we're an indirect block comprised of only 3275 * holes, then we want this indirect to be compressed away to a hole. In 3276 * order to do that we must zero out any information about the holes that 3277 * this indirect points to prior to before we try to compress it. 3278 */ 3279 static void 3280 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3281 { 3282 dmu_buf_impl_t *db = vdb; 3283 dnode_t *dn; 3284 blkptr_t *bp; 3285 unsigned int epbs, i; 3286 3287 ASSERT3U(db->db_level, >, 0); 3288 DB_DNODE_ENTER(db); 3289 dn = DB_DNODE(db); 3290 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3291 ASSERT3U(epbs, <, 31); 3292 3293 /* Determine if all our children are holes */ 3294 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 3295 if (!BP_IS_HOLE(bp)) 3296 break; 3297 } 3298 3299 /* 3300 * If all the children are holes, then zero them all out so that 3301 * we may get compressed away. 3302 */ 3303 if (i == 1 << epbs) { 3304 /* 3305 * We only found holes. Grab the rwlock to prevent 3306 * anybody from reading the blocks we're about to 3307 * zero out. 3308 */ 3309 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3310 bzero(db->db.db_data, db->db.db_size); 3311 rw_exit(&dn->dn_struct_rwlock); 3312 } 3313 DB_DNODE_EXIT(db); 3314 } 3315 3316 /* 3317 * The SPA will call this callback several times for each zio - once 3318 * for every physical child i/o (zio->io_phys_children times). This 3319 * allows the DMU to monitor the progress of each logical i/o. For example, 3320 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 3321 * block. There may be a long delay before all copies/fragments are completed, 3322 * so this callback allows us to retire dirty space gradually, as the physical 3323 * i/os complete. 3324 */ 3325 /* ARGSUSED */ 3326 static void 3327 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 3328 { 3329 dmu_buf_impl_t *db = arg; 3330 objset_t *os = db->db_objset; 3331 dsl_pool_t *dp = dmu_objset_pool(os); 3332 dbuf_dirty_record_t *dr; 3333 int delta = 0; 3334 3335 dr = db->db_data_pending; 3336 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 3337 3338 /* 3339 * The callback will be called io_phys_children times. Retire one 3340 * portion of our dirty space each time we are called. Any rounding 3341 * error will be cleaned up by dsl_pool_sync()'s call to 3342 * dsl_pool_undirty_space(). 3343 */ 3344 delta = dr->dr_accounted / zio->io_phys_children; 3345 dsl_pool_undirty_space(dp, delta, zio->io_txg); 3346 } 3347 3348 /* ARGSUSED */ 3349 static void 3350 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3351 { 3352 dmu_buf_impl_t *db = vdb; 3353 blkptr_t *bp_orig = &zio->io_bp_orig; 3354 blkptr_t *bp = db->db_blkptr; 3355 objset_t *os = db->db_objset; 3356 dmu_tx_t *tx = os->os_synctx; 3357 dbuf_dirty_record_t **drp, *dr; 3358 3359 ASSERT0(zio->io_error); 3360 ASSERT(db->db_blkptr == bp); 3361 3362 /* 3363 * For nopwrites and rewrites we ensure that the bp matches our 3364 * original and bypass all the accounting. 3365 */ 3366 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3367 ASSERT(BP_EQUAL(bp, bp_orig)); 3368 } else { 3369 dsl_dataset_t *ds = os->os_dsl_dataset; 3370 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3371 dsl_dataset_block_born(ds, bp, tx); 3372 } 3373 3374 mutex_enter(&db->db_mtx); 3375 3376 DBUF_VERIFY(db); 3377 3378 drp = &db->db_last_dirty; 3379 while ((dr = *drp) != db->db_data_pending) 3380 drp = &dr->dr_next; 3381 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3382 ASSERT(dr->dr_dbuf == db); 3383 ASSERT(dr->dr_next == NULL); 3384 *drp = dr->dr_next; 3385 3386 #ifdef ZFS_DEBUG 3387 if (db->db_blkid == DMU_SPILL_BLKID) { 3388 dnode_t *dn; 3389 3390 DB_DNODE_ENTER(db); 3391 dn = DB_DNODE(db); 3392 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3393 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 3394 db->db_blkptr == &dn->dn_phys->dn_spill); 3395 DB_DNODE_EXIT(db); 3396 } 3397 #endif 3398 3399 if (db->db_level == 0) { 3400 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3401 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 3402 if (db->db_state != DB_NOFILL) { 3403 if (dr->dt.dl.dr_data != db->db_buf) 3404 arc_buf_destroy(dr->dt.dl.dr_data, db); 3405 } 3406 } else { 3407 dnode_t *dn; 3408 3409 DB_DNODE_ENTER(db); 3410 dn = DB_DNODE(db); 3411 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3412 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3413 if (!BP_IS_HOLE(db->db_blkptr)) { 3414 int epbs = 3415 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3416 ASSERT3U(db->db_blkid, <=, 3417 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3418 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3419 db->db.db_size); 3420 } 3421 DB_DNODE_EXIT(db); 3422 mutex_destroy(&dr->dt.di.dr_mtx); 3423 list_destroy(&dr->dt.di.dr_children); 3424 } 3425 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3426 3427 cv_broadcast(&db->db_changed); 3428 ASSERT(db->db_dirtycnt > 0); 3429 db->db_dirtycnt -= 1; 3430 db->db_data_pending = NULL; 3431 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); 3432 } 3433 3434 static void 3435 dbuf_write_nofill_ready(zio_t *zio) 3436 { 3437 dbuf_write_ready(zio, NULL, zio->io_private); 3438 } 3439 3440 static void 3441 dbuf_write_nofill_done(zio_t *zio) 3442 { 3443 dbuf_write_done(zio, NULL, zio->io_private); 3444 } 3445 3446 static void 3447 dbuf_write_override_ready(zio_t *zio) 3448 { 3449 dbuf_dirty_record_t *dr = zio->io_private; 3450 dmu_buf_impl_t *db = dr->dr_dbuf; 3451 3452 dbuf_write_ready(zio, NULL, db); 3453 } 3454 3455 static void 3456 dbuf_write_override_done(zio_t *zio) 3457 { 3458 dbuf_dirty_record_t *dr = zio->io_private; 3459 dmu_buf_impl_t *db = dr->dr_dbuf; 3460 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3461 3462 mutex_enter(&db->db_mtx); 3463 if (!BP_EQUAL(zio->io_bp, obp)) { 3464 if (!BP_IS_HOLE(obp)) 3465 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3466 arc_release(dr->dt.dl.dr_data, db); 3467 } 3468 mutex_exit(&db->db_mtx); 3469 dbuf_write_done(zio, NULL, db); 3470 3471 if (zio->io_abd != NULL) 3472 abd_put(zio->io_abd); 3473 } 3474 3475 typedef struct dbuf_remap_impl_callback_arg { 3476 objset_t *drica_os; 3477 uint64_t drica_blk_birth; 3478 dmu_tx_t *drica_tx; 3479 } dbuf_remap_impl_callback_arg_t; 3480 3481 static void 3482 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 3483 void *arg) 3484 { 3485 dbuf_remap_impl_callback_arg_t *drica = arg; 3486 objset_t *os = drica->drica_os; 3487 spa_t *spa = dmu_objset_spa(os); 3488 dmu_tx_t *tx = drica->drica_tx; 3489 3490 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3491 3492 if (os == spa_meta_objset(spa)) { 3493 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 3494 } else { 3495 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 3496 size, drica->drica_blk_birth, tx); 3497 } 3498 } 3499 3500 static void 3501 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx) 3502 { 3503 blkptr_t bp_copy = *bp; 3504 spa_t *spa = dmu_objset_spa(dn->dn_objset); 3505 dbuf_remap_impl_callback_arg_t drica; 3506 3507 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3508 3509 drica.drica_os = dn->dn_objset; 3510 drica.drica_blk_birth = bp->blk_birth; 3511 drica.drica_tx = tx; 3512 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 3513 &drica)) { 3514 /* 3515 * The struct_rwlock prevents dbuf_read_impl() from 3516 * dereferencing the BP while we are changing it. To 3517 * avoid lock contention, only grab it when we are actually 3518 * changing the BP. 3519 */ 3520 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3521 *bp = bp_copy; 3522 rw_exit(&dn->dn_struct_rwlock); 3523 } 3524 } 3525 3526 /* 3527 * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 3528 * to remap a copy of every bp in the dbuf. 3529 */ 3530 boolean_t 3531 dbuf_can_remap(const dmu_buf_impl_t *db) 3532 { 3533 spa_t *spa = dmu_objset_spa(db->db_objset); 3534 blkptr_t *bp = db->db.db_data; 3535 boolean_t ret = B_FALSE; 3536 3537 ASSERT3U(db->db_level, >, 0); 3538 ASSERT3S(db->db_state, ==, DB_CACHED); 3539 3540 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3541 3542 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3543 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3544 blkptr_t bp_copy = bp[i]; 3545 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3546 ret = B_TRUE; 3547 break; 3548 } 3549 } 3550 spa_config_exit(spa, SCL_VDEV, FTAG); 3551 3552 return (ret); 3553 } 3554 3555 boolean_t 3556 dnode_needs_remap(const dnode_t *dn) 3557 { 3558 spa_t *spa = dmu_objset_spa(dn->dn_objset); 3559 boolean_t ret = B_FALSE; 3560 3561 if (dn->dn_phys->dn_nlevels == 0) { 3562 return (B_FALSE); 3563 } 3564 3565 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3566 3567 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3568 for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 3569 blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 3570 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3571 ret = B_TRUE; 3572 break; 3573 } 3574 } 3575 spa_config_exit(spa, SCL_VDEV, FTAG); 3576 3577 return (ret); 3578 } 3579 3580 /* 3581 * Remap any existing BP's to concrete vdevs, if possible. 3582 */ 3583 static void 3584 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 3585 { 3586 spa_t *spa = dmu_objset_spa(db->db_objset); 3587 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3588 3589 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 3590 return; 3591 3592 if (db->db_level > 0) { 3593 blkptr_t *bp = db->db.db_data; 3594 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3595 dbuf_remap_impl(dn, &bp[i], tx); 3596 } 3597 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 3598 dnode_phys_t *dnp = db->db.db_data; 3599 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 3600 DMU_OT_DNODE); 3601 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 3602 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 3603 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx); 3604 } 3605 } 3606 } 3607 } 3608 3609 3610 /* Issue I/O to commit a dirty buffer to disk. */ 3611 static void 3612 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3613 { 3614 dmu_buf_impl_t *db = dr->dr_dbuf; 3615 dnode_t *dn; 3616 objset_t *os; 3617 dmu_buf_impl_t *parent = db->db_parent; 3618 uint64_t txg = tx->tx_txg; 3619 zbookmark_phys_t zb; 3620 zio_prop_t zp; 3621 zio_t *zio; 3622 int wp_flag = 0; 3623 3624 ASSERT(dmu_tx_is_syncing(tx)); 3625 3626 DB_DNODE_ENTER(db); 3627 dn = DB_DNODE(db); 3628 os = dn->dn_objset; 3629 3630 if (db->db_state != DB_NOFILL) { 3631 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3632 /* 3633 * Private object buffers are released here rather 3634 * than in dbuf_dirty() since they are only modified 3635 * in the syncing context and we don't want the 3636 * overhead of making multiple copies of the data. 3637 */ 3638 if (BP_IS_HOLE(db->db_blkptr)) { 3639 arc_buf_thaw(data); 3640 } else { 3641 dbuf_release_bp(db); 3642 } 3643 dbuf_remap(dn, db, tx); 3644 } 3645 } 3646 3647 if (parent != dn->dn_dbuf) { 3648 /* Our parent is an indirect block. */ 3649 /* We have a dirty parent that has been scheduled for write. */ 3650 ASSERT(parent && parent->db_data_pending); 3651 /* Our parent's buffer is one level closer to the dnode. */ 3652 ASSERT(db->db_level == parent->db_level-1); 3653 /* 3654 * We're about to modify our parent's db_data by modifying 3655 * our block pointer, so the parent must be released. 3656 */ 3657 ASSERT(arc_released(parent->db_buf)); 3658 zio = parent->db_data_pending->dr_zio; 3659 } else { 3660 /* Our parent is the dnode itself. */ 3661 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 3662 db->db_blkid != DMU_SPILL_BLKID) || 3663 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 3664 if (db->db_blkid != DMU_SPILL_BLKID) 3665 ASSERT3P(db->db_blkptr, ==, 3666 &dn->dn_phys->dn_blkptr[db->db_blkid]); 3667 zio = dn->dn_zio; 3668 } 3669 3670 ASSERT(db->db_level == 0 || data == db->db_buf); 3671 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3672 ASSERT(zio); 3673 3674 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3675 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3676 db->db.db_object, db->db_level, db->db_blkid); 3677 3678 if (db->db_blkid == DMU_SPILL_BLKID) 3679 wp_flag = WP_SPILL; 3680 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 3681 3682 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3683 DB_DNODE_EXIT(db); 3684 3685 /* 3686 * We copy the blkptr now (rather than when we instantiate the dirty 3687 * record), because its value can change between open context and 3688 * syncing context. We do not need to hold dn_struct_rwlock to read 3689 * db_blkptr because we are in syncing context. 3690 */ 3691 dr->dr_bp_copy = *db->db_blkptr; 3692 3693 if (db->db_level == 0 && 3694 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 3695 /* 3696 * The BP for this block has been provided by open context 3697 * (by dmu_sync() or dmu_buf_write_embedded()). 3698 */ 3699 abd_t *contents = (data != NULL) ? 3700 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 3701 3702 dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 3703 contents, db->db.db_size, db->db.db_size, &zp, 3704 dbuf_write_override_ready, NULL, NULL, 3705 dbuf_write_override_done, 3706 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3707 mutex_enter(&db->db_mtx); 3708 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3709 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 3710 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3711 mutex_exit(&db->db_mtx); 3712 } else if (db->db_state == DB_NOFILL) { 3713 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3714 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3715 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3716 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 3717 dbuf_write_nofill_ready, NULL, NULL, 3718 dbuf_write_nofill_done, db, 3719 ZIO_PRIORITY_ASYNC_WRITE, 3720 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3721 } else { 3722 ASSERT(arc_released(data)); 3723 3724 /* 3725 * For indirect blocks, we want to setup the children 3726 * ready callback so that we can properly handle an indirect 3727 * block that only contains holes. 3728 */ 3729 arc_done_func_t *children_ready_cb = NULL; 3730 if (db->db_level != 0) 3731 children_ready_cb = dbuf_write_children_ready; 3732 3733 dr->dr_zio = arc_write(zio, os->os_spa, txg, 3734 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3735 &zp, dbuf_write_ready, children_ready_cb, 3736 dbuf_write_physdone, dbuf_write_done, db, 3737 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3738 } 3739 } 3740