1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5f65e61c0Sahrens * Common Development and Distribution License (the "License"). 6f65e61c0Sahrens * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2206e0070dSMark Shellenbaum * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 233f2366c2SGordon Ross * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 2494c2d0ebSMatthew Ahrens * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25aad02571SSaso Kiselkov * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26810e43b2SBill Pijewski * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27bc9014e6SJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 29fa9e4066Sahrens */ 30fa9e4066Sahrens 31fa9e4066Sahrens #include <sys/zfs_context.h> 32fa9e4066Sahrens #include <sys/dmu.h> 332f3d8780SMatthew Ahrens #include <sys/dmu_send.h> 34fa9e4066Sahrens #include <sys/dmu_impl.h> 35fa9e4066Sahrens #include <sys/dbuf.h> 36fa9e4066Sahrens #include <sys/dmu_objset.h> 37fa9e4066Sahrens #include <sys/dsl_dataset.h> 38fa9e4066Sahrens #include <sys/dsl_dir.h> 39fa9e4066Sahrens #include <sys/dmu_tx.h> 40fa9e4066Sahrens #include <sys/spa.h> 41fa9e4066Sahrens #include <sys/zio.h> 42fa9e4066Sahrens #include <sys/dmu_zfetch.h> 430a586ceaSMark Shellenbaum #include <sys/sa.h> 440a586ceaSMark Shellenbaum #include <sys/sa_impl.h> 455d7b4d43SMatthew Ahrens #include <sys/zfeature.h> 465d7b4d43SMatthew Ahrens #include <sys/blkptr.h> 47bf16b11eSMatthew Ahrens #include <sys/range_tree.h> 48dcbf3bd6SGeorge Wilson #include <sys/callb.h> 49770499e1SDan Kimmel #include <sys/abd.h> 50*5cabbc6bSPrashanth Sreenivasa #include <sys/vdev.h> 51dcbf3bd6SGeorge Wilson 52dcbf3bd6SGeorge Wilson uint_t zfs_dbuf_evict_key; 53fa9e4066Sahrens 543b2aab18SMatthew Ahrens static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 55088f3894Sahrens static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 56fa9e4066Sahrens 57bc9014e6SJustin Gibbs #ifndef __lint 58bc9014e6SJustin Gibbs extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 5940510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_sync, 6040510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_async, 6140510e8eSJosef 'Jeff' Sipek dmu_buf_t **clear_on_evict_dbufp); 62bc9014e6SJustin Gibbs #endif /* ! __lint */ 63bc9014e6SJustin Gibbs 64fa9e4066Sahrens /* 65fa9e4066Sahrens * Global data structures and functions for the dbuf cache. 66fa9e4066Sahrens */ 67dcbf3bd6SGeorge Wilson static kmem_cache_t *dbuf_kmem_cache; 68bc9014e6SJustin Gibbs static taskq_t *dbu_evict_taskq; 69fa9e4066Sahrens 70dcbf3bd6SGeorge Wilson static kthread_t *dbuf_cache_evict_thread; 71dcbf3bd6SGeorge Wilson static kmutex_t dbuf_evict_lock; 72dcbf3bd6SGeorge Wilson static kcondvar_t dbuf_evict_cv; 73dcbf3bd6SGeorge Wilson static boolean_t dbuf_evict_thread_exit; 74dcbf3bd6SGeorge Wilson 75dcbf3bd6SGeorge Wilson /* 76dcbf3bd6SGeorge Wilson * LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 77dcbf3bd6SGeorge Wilson * are not currently held but have been recently released. These dbufs 78dcbf3bd6SGeorge Wilson * are not eligible for arc eviction until they are aged out of the cache. 79dcbf3bd6SGeorge Wilson * Dbufs are added to the dbuf cache once the last hold is released. If a 80dcbf3bd6SGeorge Wilson * dbuf is later accessed and still exists in the dbuf cache, then it will 81dcbf3bd6SGeorge Wilson * be removed from the cache and later re-added to the head of the cache. 82dcbf3bd6SGeorge Wilson * Dbufs that are aged out of the cache will be immediately destroyed and 83dcbf3bd6SGeorge Wilson * become eligible for arc eviction. 84dcbf3bd6SGeorge Wilson */ 8594c2d0ebSMatthew Ahrens static multilist_t *dbuf_cache; 86dcbf3bd6SGeorge Wilson static refcount_t dbuf_cache_size; 87dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_max_bytes = 100 * 1024 * 1024; 88dcbf3bd6SGeorge Wilson 89dcbf3bd6SGeorge Wilson /* Cap the size of the dbuf cache to log2 fraction of arc size. */ 90dcbf3bd6SGeorge Wilson int dbuf_cache_max_shift = 5; 91dcbf3bd6SGeorge Wilson 92dcbf3bd6SGeorge Wilson /* 93dcbf3bd6SGeorge Wilson * The dbuf cache uses a three-stage eviction policy: 94dcbf3bd6SGeorge Wilson * - A low water marker designates when the dbuf eviction thread 95dcbf3bd6SGeorge Wilson * should stop evicting from the dbuf cache. 96dcbf3bd6SGeorge Wilson * - When we reach the maximum size (aka mid water mark), we 97dcbf3bd6SGeorge Wilson * signal the eviction thread to run. 98dcbf3bd6SGeorge Wilson * - The high water mark indicates when the eviction thread 99dcbf3bd6SGeorge Wilson * is unable to keep up with the incoming load and eviction must 100dcbf3bd6SGeorge Wilson * happen in the context of the calling thread. 101dcbf3bd6SGeorge Wilson * 102dcbf3bd6SGeorge Wilson * The dbuf cache: 103dcbf3bd6SGeorge Wilson * (max size) 104dcbf3bd6SGeorge Wilson * low water mid water hi water 105dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 106dcbf3bd6SGeorge Wilson * | | | | 107dcbf3bd6SGeorge Wilson * | | | | 108dcbf3bd6SGeorge Wilson * | | | | 109dcbf3bd6SGeorge Wilson * | | | | 110dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 111dcbf3bd6SGeorge Wilson * stop signal evict 112dcbf3bd6SGeorge Wilson * evicting eviction directly 113dcbf3bd6SGeorge Wilson * thread 114dcbf3bd6SGeorge Wilson * 115dcbf3bd6SGeorge Wilson * The high and low water marks indicate the operating range for the eviction 116dcbf3bd6SGeorge Wilson * thread. The low water mark is, by default, 90% of the total size of the 117dcbf3bd6SGeorge Wilson * cache and the high water mark is at 110% (both of these percentages can be 118dcbf3bd6SGeorge Wilson * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 119dcbf3bd6SGeorge Wilson * respectively). The eviction thread will try to ensure that the cache remains 120dcbf3bd6SGeorge Wilson * within this range by waking up every second and checking if the cache is 121dcbf3bd6SGeorge Wilson * above the low water mark. The thread can also be woken up by callers adding 122dcbf3bd6SGeorge Wilson * elements into the cache if the cache is larger than the mid water (i.e max 123dcbf3bd6SGeorge Wilson * cache size). Once the eviction thread is woken up and eviction is required, 124dcbf3bd6SGeorge Wilson * it will continue evicting buffers until it's able to reduce the cache size 125dcbf3bd6SGeorge Wilson * to the low water mark. If the cache size continues to grow and hits the high 126dcbf3bd6SGeorge Wilson * water mark, then callers adding elments to the cache will begin to evict 127dcbf3bd6SGeorge Wilson * directly from the cache until the cache is no longer above the high water 128dcbf3bd6SGeorge Wilson * mark. 129dcbf3bd6SGeorge Wilson */ 130dcbf3bd6SGeorge Wilson 131dcbf3bd6SGeorge Wilson /* 132dcbf3bd6SGeorge Wilson * The percentage above and below the maximum cache size. 133dcbf3bd6SGeorge Wilson */ 134dcbf3bd6SGeorge Wilson uint_t dbuf_cache_hiwater_pct = 10; 135dcbf3bd6SGeorge Wilson uint_t dbuf_cache_lowater_pct = 10; 136dcbf3bd6SGeorge Wilson 137fa9e4066Sahrens /* ARGSUSED */ 138fa9e4066Sahrens static int 139fa9e4066Sahrens dbuf_cons(void *vdb, void *unused, int kmflag) 140fa9e4066Sahrens { 141fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 142fa9e4066Sahrens bzero(db, sizeof (dmu_buf_impl_t)); 143fa9e4066Sahrens 144fa9e4066Sahrens mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 145fa9e4066Sahrens cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 146dcbf3bd6SGeorge Wilson multilist_link_init(&db->db_cache_link); 147fa9e4066Sahrens refcount_create(&db->db_holds); 1480f6d88adSAlex Reece 149fa9e4066Sahrens return (0); 150fa9e4066Sahrens } 151fa9e4066Sahrens 152fa9e4066Sahrens /* ARGSUSED */ 153fa9e4066Sahrens static void 154fa9e4066Sahrens dbuf_dest(void *vdb, void *unused) 155fa9e4066Sahrens { 156fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 157fa9e4066Sahrens mutex_destroy(&db->db_mtx); 158fa9e4066Sahrens cv_destroy(&db->db_changed); 159dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 160fa9e4066Sahrens refcount_destroy(&db->db_holds); 161fa9e4066Sahrens } 162fa9e4066Sahrens 163fa9e4066Sahrens /* 164fa9e4066Sahrens * dbuf hash table routines 165fa9e4066Sahrens */ 166fa9e4066Sahrens static dbuf_hash_table_t dbuf_hash_table; 167fa9e4066Sahrens 168fa9e4066Sahrens static uint64_t dbuf_hash_count; 169fa9e4066Sahrens 170fa9e4066Sahrens static uint64_t 171fa9e4066Sahrens dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 172fa9e4066Sahrens { 173fa9e4066Sahrens uintptr_t osv = (uintptr_t)os; 174fa9e4066Sahrens uint64_t crc = -1ULL; 175fa9e4066Sahrens 176fa9e4066Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 177fa9e4066Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 178fa9e4066Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 179fa9e4066Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 180fa9e4066Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 181fa9e4066Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 182fa9e4066Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 183fa9e4066Sahrens 184fa9e4066Sahrens crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 185fa9e4066Sahrens 186fa9e4066Sahrens return (crc); 187fa9e4066Sahrens } 188fa9e4066Sahrens 189fa9e4066Sahrens #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 190fa9e4066Sahrens ((dbuf)->db.db_object == (obj) && \ 191fa9e4066Sahrens (dbuf)->db_objset == (os) && \ 192fa9e4066Sahrens (dbuf)->db_level == (level) && \ 193fa9e4066Sahrens (dbuf)->db_blkid == (blkid)) 194fa9e4066Sahrens 195fa9e4066Sahrens dmu_buf_impl_t * 196e57a022bSJustin T. Gibbs dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 197fa9e4066Sahrens { 198fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 199dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 200fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 201fa9e4066Sahrens dmu_buf_impl_t *db; 202fa9e4066Sahrens 203fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 204fa9e4066Sahrens for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 205fa9e4066Sahrens if (DBUF_EQUAL(db, os, obj, level, blkid)) { 206fa9e4066Sahrens mutex_enter(&db->db_mtx); 207ea8dc4b6Seschrock if (db->db_state != DB_EVICTING) { 208fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 209fa9e4066Sahrens return (db); 210fa9e4066Sahrens } 211fa9e4066Sahrens mutex_exit(&db->db_mtx); 212fa9e4066Sahrens } 213fa9e4066Sahrens } 214fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 215fa9e4066Sahrens return (NULL); 216fa9e4066Sahrens } 217fa9e4066Sahrens 218e57a022bSJustin T. Gibbs static dmu_buf_impl_t * 219e57a022bSJustin T. Gibbs dbuf_find_bonus(objset_t *os, uint64_t object) 220e57a022bSJustin T. Gibbs { 221e57a022bSJustin T. Gibbs dnode_t *dn; 222e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = NULL; 223e57a022bSJustin T. Gibbs 224e57a022bSJustin T. Gibbs if (dnode_hold(os, object, FTAG, &dn) == 0) { 225e57a022bSJustin T. Gibbs rw_enter(&dn->dn_struct_rwlock, RW_READER); 226e57a022bSJustin T. Gibbs if (dn->dn_bonus != NULL) { 227e57a022bSJustin T. Gibbs db = dn->dn_bonus; 228e57a022bSJustin T. Gibbs mutex_enter(&db->db_mtx); 229e57a022bSJustin T. Gibbs } 230e57a022bSJustin T. Gibbs rw_exit(&dn->dn_struct_rwlock); 231e57a022bSJustin T. Gibbs dnode_rele(dn, FTAG); 232e57a022bSJustin T. Gibbs } 233e57a022bSJustin T. Gibbs return (db); 234e57a022bSJustin T. Gibbs } 235e57a022bSJustin T. Gibbs 236fa9e4066Sahrens /* 237fa9e4066Sahrens * Insert an entry into the hash table. If there is already an element 238fa9e4066Sahrens * equal to elem in the hash table, then the already existing element 239fa9e4066Sahrens * will be returned and the new element will not be inserted. 240fa9e4066Sahrens * Otherwise returns NULL. 241fa9e4066Sahrens */ 242fa9e4066Sahrens static dmu_buf_impl_t * 243fa9e4066Sahrens dbuf_hash_insert(dmu_buf_impl_t *db) 244fa9e4066Sahrens { 245fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 246503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 247fa9e4066Sahrens uint64_t obj = db->db.db_object; 248fa9e4066Sahrens int level = db->db_level; 249fa9e4066Sahrens uint64_t blkid = db->db_blkid; 250dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 251fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 252fa9e4066Sahrens dmu_buf_impl_t *dbf; 253fa9e4066Sahrens 254fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 255fa9e4066Sahrens for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 256fa9e4066Sahrens if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 257fa9e4066Sahrens mutex_enter(&dbf->db_mtx); 258ea8dc4b6Seschrock if (dbf->db_state != DB_EVICTING) { 259fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 260fa9e4066Sahrens return (dbf); 261fa9e4066Sahrens } 262fa9e4066Sahrens mutex_exit(&dbf->db_mtx); 263fa9e4066Sahrens } 264fa9e4066Sahrens } 265fa9e4066Sahrens 266fa9e4066Sahrens mutex_enter(&db->db_mtx); 267fa9e4066Sahrens db->db_hash_next = h->hash_table[idx]; 268fa9e4066Sahrens h->hash_table[idx] = db; 269fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 2701a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&dbuf_hash_count); 271fa9e4066Sahrens 272fa9e4066Sahrens return (NULL); 273fa9e4066Sahrens } 274fa9e4066Sahrens 275fa9e4066Sahrens /* 276bbfa8ea8SMatthew Ahrens * Remove an entry from the hash table. It must be in the EVICTING state. 277fa9e4066Sahrens */ 278fa9e4066Sahrens static void 279fa9e4066Sahrens dbuf_hash_remove(dmu_buf_impl_t *db) 280fa9e4066Sahrens { 281fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 282dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 283fa9e4066Sahrens db->db_level, db->db_blkid); 284fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 285fa9e4066Sahrens dmu_buf_impl_t *dbf, **dbp; 286fa9e4066Sahrens 287fa9e4066Sahrens /* 288bbfa8ea8SMatthew Ahrens * We musn't hold db_mtx to maintain lock ordering: 289fa9e4066Sahrens * DBUF_HASH_MUTEX > db_mtx. 290fa9e4066Sahrens */ 291fa9e4066Sahrens ASSERT(refcount_is_zero(&db->db_holds)); 292ea8dc4b6Seschrock ASSERT(db->db_state == DB_EVICTING); 293fa9e4066Sahrens ASSERT(!MUTEX_HELD(&db->db_mtx)); 294fa9e4066Sahrens 295fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 296fa9e4066Sahrens dbp = &h->hash_table[idx]; 297fa9e4066Sahrens while ((dbf = *dbp) != db) { 298fa9e4066Sahrens dbp = &dbf->db_hash_next; 299fa9e4066Sahrens ASSERT(dbf != NULL); 300fa9e4066Sahrens } 301fa9e4066Sahrens *dbp = db->db_hash_next; 302fa9e4066Sahrens db->db_hash_next = NULL; 303fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 3041a5e258fSJosef 'Jeff' Sipek atomic_dec_64(&dbuf_hash_count); 305fa9e4066Sahrens } 306fa9e4066Sahrens 307bc9014e6SJustin Gibbs typedef enum { 308bc9014e6SJustin Gibbs DBVU_EVICTING, 309bc9014e6SJustin Gibbs DBVU_NOT_EVICTING 310bc9014e6SJustin Gibbs } dbvu_verify_type_t; 311bc9014e6SJustin Gibbs 312bc9014e6SJustin Gibbs static void 313bc9014e6SJustin Gibbs dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 314bc9014e6SJustin Gibbs { 315bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 316bc9014e6SJustin Gibbs int64_t holds; 317bc9014e6SJustin Gibbs 318bc9014e6SJustin Gibbs if (db->db_user == NULL) 319bc9014e6SJustin Gibbs return; 320bc9014e6SJustin Gibbs 321bc9014e6SJustin Gibbs /* Only data blocks support the attachment of user data. */ 322bc9014e6SJustin Gibbs ASSERT(db->db_level == 0); 323bc9014e6SJustin Gibbs 324bc9014e6SJustin Gibbs /* Clients must resolve a dbuf before attaching user data. */ 325bc9014e6SJustin Gibbs ASSERT(db->db.db_data != NULL); 326bc9014e6SJustin Gibbs ASSERT3U(db->db_state, ==, DB_CACHED); 327bc9014e6SJustin Gibbs 328bc9014e6SJustin Gibbs holds = refcount_count(&db->db_holds); 329bc9014e6SJustin Gibbs if (verify_type == DBVU_EVICTING) { 330bc9014e6SJustin Gibbs /* 331bc9014e6SJustin Gibbs * Immediate eviction occurs when holds == dirtycnt. 332bc9014e6SJustin Gibbs * For normal eviction buffers, holds is zero on 333bc9014e6SJustin Gibbs * eviction, except when dbuf_fix_old_data() calls 334bc9014e6SJustin Gibbs * dbuf_clear_data(). However, the hold count can grow 335bc9014e6SJustin Gibbs * during eviction even though db_mtx is held (see 336bc9014e6SJustin Gibbs * dmu_bonus_hold() for an example), so we can only 337bc9014e6SJustin Gibbs * test the generic invariant that holds >= dirtycnt. 338bc9014e6SJustin Gibbs */ 339bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 340bc9014e6SJustin Gibbs } else { 341d2058105SJustin T. Gibbs if (db->db_user_immediate_evict == TRUE) 342bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 343bc9014e6SJustin Gibbs else 344bc9014e6SJustin Gibbs ASSERT3U(holds, >, 0); 345bc9014e6SJustin Gibbs } 346bc9014e6SJustin Gibbs #endif 347bc9014e6SJustin Gibbs } 348bc9014e6SJustin Gibbs 349fa9e4066Sahrens static void 350fa9e4066Sahrens dbuf_evict_user(dmu_buf_impl_t *db) 351fa9e4066Sahrens { 352bc9014e6SJustin Gibbs dmu_buf_user_t *dbu = db->db_user; 353bc9014e6SJustin Gibbs 354fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 355fa9e4066Sahrens 356bc9014e6SJustin Gibbs if (dbu == NULL) 357fa9e4066Sahrens return; 358fa9e4066Sahrens 359bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_EVICTING); 360bc9014e6SJustin Gibbs db->db_user = NULL; 361bc9014e6SJustin Gibbs 362bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 363bc9014e6SJustin Gibbs if (dbu->dbu_clear_on_evict_dbufp != NULL) 364bc9014e6SJustin Gibbs *dbu->dbu_clear_on_evict_dbufp = NULL; 365bc9014e6SJustin Gibbs #endif 366bc9014e6SJustin Gibbs 367bc9014e6SJustin Gibbs /* 36840510e8eSJosef 'Jeff' Sipek * There are two eviction callbacks - one that we call synchronously 36940510e8eSJosef 'Jeff' Sipek * and one that we invoke via a taskq. The async one is useful for 37040510e8eSJosef 'Jeff' Sipek * avoiding lock order reversals and limiting stack depth. 37140510e8eSJosef 'Jeff' Sipek * 37240510e8eSJosef 'Jeff' Sipek * Note that if we have a sync callback but no async callback, 37340510e8eSJosef 'Jeff' Sipek * it's likely that the sync callback will free the structure 37440510e8eSJosef 'Jeff' Sipek * containing the dbu. In that case we need to take care to not 37540510e8eSJosef 'Jeff' Sipek * dereference dbu after calling the sync evict func. 376bc9014e6SJustin Gibbs */ 37740510e8eSJosef 'Jeff' Sipek boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 37840510e8eSJosef 'Jeff' Sipek 37940510e8eSJosef 'Jeff' Sipek if (dbu->dbu_evict_func_sync != NULL) 38040510e8eSJosef 'Jeff' Sipek dbu->dbu_evict_func_sync(dbu); 38140510e8eSJosef 'Jeff' Sipek 38240510e8eSJosef 'Jeff' Sipek if (has_async) { 38340510e8eSJosef 'Jeff' Sipek taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 38440510e8eSJosef 'Jeff' Sipek dbu, 0, &dbu->dbu_tqent); 38540510e8eSJosef 'Jeff' Sipek } 386fa9e4066Sahrens } 387fa9e4066Sahrens 388744947dcSTom Erickson boolean_t 389744947dcSTom Erickson dbuf_is_metadata(dmu_buf_impl_t *db) 390744947dcSTom Erickson { 391744947dcSTom Erickson if (db->db_level > 0) { 392744947dcSTom Erickson return (B_TRUE); 393744947dcSTom Erickson } else { 394744947dcSTom Erickson boolean_t is_metadata; 395744947dcSTom Erickson 396744947dcSTom Erickson DB_DNODE_ENTER(db); 397ad135b5dSChristopher Siden is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 398744947dcSTom Erickson DB_DNODE_EXIT(db); 399744947dcSTom Erickson 400744947dcSTom Erickson return (is_metadata); 401744947dcSTom Erickson } 402744947dcSTom Erickson } 403744947dcSTom Erickson 404dcbf3bd6SGeorge Wilson /* 405dcbf3bd6SGeorge Wilson * This function *must* return indices evenly distributed between all 406dcbf3bd6SGeorge Wilson * sublists of the multilist. This is needed due to how the dbuf eviction 407dcbf3bd6SGeorge Wilson * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 408dcbf3bd6SGeorge Wilson * distributed between all sublists and uses this assumption when 409dcbf3bd6SGeorge Wilson * deciding which sublist to evict from and how much to evict from it. 410dcbf3bd6SGeorge Wilson */ 411dcbf3bd6SGeorge Wilson unsigned int 412dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 413ea8dc4b6Seschrock { 414dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = obj; 415dcbf3bd6SGeorge Wilson 416dcbf3bd6SGeorge Wilson /* 417dcbf3bd6SGeorge Wilson * The assumption here, is the hash value for a given 418dcbf3bd6SGeorge Wilson * dmu_buf_impl_t will remain constant throughout it's lifetime 419dcbf3bd6SGeorge Wilson * (i.e. it's objset, object, level and blkid fields don't change). 420dcbf3bd6SGeorge Wilson * Thus, we don't need to store the dbuf's sublist index 421dcbf3bd6SGeorge Wilson * on insertion, as this index can be recalculated on removal. 422dcbf3bd6SGeorge Wilson * 423dcbf3bd6SGeorge Wilson * Also, the low order bits of the hash value are thought to be 424dcbf3bd6SGeorge Wilson * distributed evenly. Otherwise, in the case that the multilist 425dcbf3bd6SGeorge Wilson * has a power of two number of sublists, each sublists' usage 426dcbf3bd6SGeorge Wilson * would not be evenly distributed. 427dcbf3bd6SGeorge Wilson */ 428dcbf3bd6SGeorge Wilson return (dbuf_hash(db->db_objset, db->db.db_object, 429dcbf3bd6SGeorge Wilson db->db_level, db->db_blkid) % 430dcbf3bd6SGeorge Wilson multilist_get_num_sublists(ml)); 431dcbf3bd6SGeorge Wilson } 432dcbf3bd6SGeorge Wilson 433dcbf3bd6SGeorge Wilson static inline boolean_t 434dcbf3bd6SGeorge Wilson dbuf_cache_above_hiwater(void) 435dcbf3bd6SGeorge Wilson { 436dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_hiwater_bytes = 437dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 438dcbf3bd6SGeorge Wilson 439dcbf3bd6SGeorge Wilson return (refcount_count(&dbuf_cache_size) > 440dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 441dcbf3bd6SGeorge Wilson } 442dcbf3bd6SGeorge Wilson 443dcbf3bd6SGeorge Wilson static inline boolean_t 444dcbf3bd6SGeorge Wilson dbuf_cache_above_lowater(void) 445dcbf3bd6SGeorge Wilson { 446dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_lowater_bytes = 447dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 448dcbf3bd6SGeorge Wilson 449dcbf3bd6SGeorge Wilson return (refcount_count(&dbuf_cache_size) > 450dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 451dcbf3bd6SGeorge Wilson } 452dcbf3bd6SGeorge Wilson 453dcbf3bd6SGeorge Wilson /* 454dcbf3bd6SGeorge Wilson * Evict the oldest eligible dbuf from the dbuf cache. 455dcbf3bd6SGeorge Wilson */ 456dcbf3bd6SGeorge Wilson static void 457dcbf3bd6SGeorge Wilson dbuf_evict_one(void) 458dcbf3bd6SGeorge Wilson { 45994c2d0ebSMatthew Ahrens int idx = multilist_get_random_index(dbuf_cache); 46094c2d0ebSMatthew Ahrens multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx); 461dcbf3bd6SGeorge Wilson 462dcbf3bd6SGeorge Wilson ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 463dcbf3bd6SGeorge Wilson 464dcbf3bd6SGeorge Wilson /* 465dcbf3bd6SGeorge Wilson * Set the thread's tsd to indicate that it's processing evictions. 466dcbf3bd6SGeorge Wilson * Once a thread stops evicting from the dbuf cache it will 467dcbf3bd6SGeorge Wilson * reset its tsd to NULL. 468dcbf3bd6SGeorge Wilson */ 469dcbf3bd6SGeorge Wilson ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); 470dcbf3bd6SGeorge Wilson (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); 471dcbf3bd6SGeorge Wilson 472dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = multilist_sublist_tail(mls); 473dcbf3bd6SGeorge Wilson while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 474dcbf3bd6SGeorge Wilson db = multilist_sublist_prev(mls, db); 475dcbf3bd6SGeorge Wilson } 476dcbf3bd6SGeorge Wilson 477dcbf3bd6SGeorge Wilson DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 478dcbf3bd6SGeorge Wilson multilist_sublist_t *, mls); 479dcbf3bd6SGeorge Wilson 480dcbf3bd6SGeorge Wilson if (db != NULL) { 481dcbf3bd6SGeorge Wilson multilist_sublist_remove(mls, db); 482dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 483dcbf3bd6SGeorge Wilson (void) refcount_remove_many(&dbuf_cache_size, 484dcbf3bd6SGeorge Wilson db->db.db_size, db); 485dcbf3bd6SGeorge Wilson dbuf_destroy(db); 486dcbf3bd6SGeorge Wilson } else { 487dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 488dcbf3bd6SGeorge Wilson } 489dcbf3bd6SGeorge Wilson (void) tsd_set(zfs_dbuf_evict_key, NULL); 490dcbf3bd6SGeorge Wilson } 491dcbf3bd6SGeorge Wilson 492dcbf3bd6SGeorge Wilson /* 493dcbf3bd6SGeorge Wilson * The dbuf evict thread is responsible for aging out dbufs from the 494dcbf3bd6SGeorge Wilson * cache. Once the cache has reached it's maximum size, dbufs are removed 495dcbf3bd6SGeorge Wilson * and destroyed. The eviction thread will continue running until the size 496dcbf3bd6SGeorge Wilson * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 497dcbf3bd6SGeorge Wilson * out of the cache it is destroyed and becomes eligible for arc eviction. 498dcbf3bd6SGeorge Wilson */ 4993f7978d0SAlan Somers /* ARGSUSED */ 500dcbf3bd6SGeorge Wilson static void 5013f7978d0SAlan Somers dbuf_evict_thread(void *unused) 502dcbf3bd6SGeorge Wilson { 503dcbf3bd6SGeorge Wilson callb_cpr_t cpr; 504dcbf3bd6SGeorge Wilson 505dcbf3bd6SGeorge Wilson CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 506dcbf3bd6SGeorge Wilson 507dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 508dcbf3bd6SGeorge Wilson while (!dbuf_evict_thread_exit) { 509dcbf3bd6SGeorge Wilson while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 510dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_BEGIN(&cpr); 511dcbf3bd6SGeorge Wilson (void) cv_timedwait_hires(&dbuf_evict_cv, 512dcbf3bd6SGeorge Wilson &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 513dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 514dcbf3bd6SGeorge Wilson } 515dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 516dcbf3bd6SGeorge Wilson 517dcbf3bd6SGeorge Wilson /* 518dcbf3bd6SGeorge Wilson * Keep evicting as long as we're above the low water mark 519dcbf3bd6SGeorge Wilson * for the cache. We do this without holding the locks to 520dcbf3bd6SGeorge Wilson * minimize lock contention. 521dcbf3bd6SGeorge Wilson */ 522dcbf3bd6SGeorge Wilson while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 523dcbf3bd6SGeorge Wilson dbuf_evict_one(); 524dcbf3bd6SGeorge Wilson } 525dcbf3bd6SGeorge Wilson 526dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 527dcbf3bd6SGeorge Wilson } 528ea8dc4b6Seschrock 529dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 530dcbf3bd6SGeorge Wilson cv_broadcast(&dbuf_evict_cv); 531dcbf3bd6SGeorge Wilson CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 532dcbf3bd6SGeorge Wilson thread_exit(); 533dcbf3bd6SGeorge Wilson } 534dcbf3bd6SGeorge Wilson 535dcbf3bd6SGeorge Wilson /* 536dcbf3bd6SGeorge Wilson * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 537dcbf3bd6SGeorge Wilson * If the dbuf cache is at its high water mark, then evict a dbuf from the 538dcbf3bd6SGeorge Wilson * dbuf cache using the callers context. 539dcbf3bd6SGeorge Wilson */ 540dcbf3bd6SGeorge Wilson static void 541dcbf3bd6SGeorge Wilson dbuf_evict_notify(void) 542dcbf3bd6SGeorge Wilson { 543dcbf3bd6SGeorge Wilson 544dcbf3bd6SGeorge Wilson /* 545dcbf3bd6SGeorge Wilson * We use thread specific data to track when a thread has 546dcbf3bd6SGeorge Wilson * started processing evictions. This allows us to avoid deeply 547dcbf3bd6SGeorge Wilson * nested stacks that would have a call flow similar to this: 548dcbf3bd6SGeorge Wilson * 549dcbf3bd6SGeorge Wilson * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 550dcbf3bd6SGeorge Wilson * ^ | 551dcbf3bd6SGeorge Wilson * | | 552dcbf3bd6SGeorge Wilson * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 553dcbf3bd6SGeorge Wilson * 554dcbf3bd6SGeorge Wilson * The dbuf_eviction_thread will always have its tsd set until 555dcbf3bd6SGeorge Wilson * that thread exits. All other threads will only set their tsd 556dcbf3bd6SGeorge Wilson * if they are participating in the eviction process. This only 557dcbf3bd6SGeorge Wilson * happens if the eviction thread is unable to process evictions 558dcbf3bd6SGeorge Wilson * fast enough. To keep the dbuf cache size in check, other threads 559dcbf3bd6SGeorge Wilson * can evict from the dbuf cache directly. Those threads will set 560dcbf3bd6SGeorge Wilson * their tsd values so that we ensure that they only evict one dbuf 561dcbf3bd6SGeorge Wilson * from the dbuf cache. 562dcbf3bd6SGeorge Wilson */ 563dcbf3bd6SGeorge Wilson if (tsd_get(zfs_dbuf_evict_key) != NULL) 564dcbf3bd6SGeorge Wilson return; 565dcbf3bd6SGeorge Wilson 566dbfd9f93SMatthew Ahrens /* 567dbfd9f93SMatthew Ahrens * We check if we should evict without holding the dbuf_evict_lock, 568dbfd9f93SMatthew Ahrens * because it's OK to occasionally make the wrong decision here, 569dbfd9f93SMatthew Ahrens * and grabbing the lock results in massive lock contention. 570dbfd9f93SMatthew Ahrens */ 571dcbf3bd6SGeorge Wilson if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) { 572dbfd9f93SMatthew Ahrens if (dbuf_cache_above_hiwater()) 573dcbf3bd6SGeorge Wilson dbuf_evict_one(); 574dbfd9f93SMatthew Ahrens cv_signal(&dbuf_evict_cv); 575dcbf3bd6SGeorge Wilson } 576ea8dc4b6Seschrock } 577ea8dc4b6Seschrock 578fa9e4066Sahrens void 579fa9e4066Sahrens dbuf_init(void) 580fa9e4066Sahrens { 581ea8dc4b6Seschrock uint64_t hsize = 1ULL << 16; 582fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 583fa9e4066Sahrens int i; 584fa9e4066Sahrens 585fa9e4066Sahrens /* 586fa9e4066Sahrens * The hash table is big enough to fill all of physical memory 587ea8dc4b6Seschrock * with an average 4K block size. The table will take up 588ea8dc4b6Seschrock * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 589fa9e4066Sahrens */ 590ea8dc4b6Seschrock while (hsize * 4096 < physmem * PAGESIZE) 591fa9e4066Sahrens hsize <<= 1; 592fa9e4066Sahrens 593ea8dc4b6Seschrock retry: 594fa9e4066Sahrens h->hash_table_mask = hsize - 1; 595ea8dc4b6Seschrock h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 596ea8dc4b6Seschrock if (h->hash_table == NULL) { 597ea8dc4b6Seschrock /* XXX - we should really return an error instead of assert */ 598ea8dc4b6Seschrock ASSERT(hsize > (1ULL << 10)); 599ea8dc4b6Seschrock hsize >>= 1; 600ea8dc4b6Seschrock goto retry; 601ea8dc4b6Seschrock } 602fa9e4066Sahrens 603dcbf3bd6SGeorge Wilson dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 604fa9e4066Sahrens sizeof (dmu_buf_impl_t), 605fa9e4066Sahrens 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 606fa9e4066Sahrens 607fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 608fa9e4066Sahrens mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 609bc9014e6SJustin Gibbs 610dcbf3bd6SGeorge Wilson /* 611dcbf3bd6SGeorge Wilson * Setup the parameters for the dbuf cache. We cap the size of the 612dcbf3bd6SGeorge Wilson * dbuf cache to 1/32nd (default) of the size of the ARC. 613dcbf3bd6SGeorge Wilson */ 614dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes, 615dcbf3bd6SGeorge Wilson arc_max_bytes() >> dbuf_cache_max_shift); 616dcbf3bd6SGeorge Wilson 617bc9014e6SJustin Gibbs /* 618bc9014e6SJustin Gibbs * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 619bc9014e6SJustin Gibbs * configuration is not required. 620bc9014e6SJustin Gibbs */ 621bc9014e6SJustin Gibbs dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 622dcbf3bd6SGeorge Wilson 62394c2d0ebSMatthew Ahrens dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t), 624dcbf3bd6SGeorge Wilson offsetof(dmu_buf_impl_t, db_cache_link), 625dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func); 626dcbf3bd6SGeorge Wilson refcount_create(&dbuf_cache_size); 627dcbf3bd6SGeorge Wilson 628dcbf3bd6SGeorge Wilson tsd_create(&zfs_dbuf_evict_key, NULL); 629dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 630dcbf3bd6SGeorge Wilson mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 631dcbf3bd6SGeorge Wilson cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 632dcbf3bd6SGeorge Wilson dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 633dcbf3bd6SGeorge Wilson NULL, 0, &p0, TS_RUN, minclsyspri); 634fa9e4066Sahrens } 635fa9e4066Sahrens 636fa9e4066Sahrens void 637fa9e4066Sahrens dbuf_fini(void) 638fa9e4066Sahrens { 639fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 640fa9e4066Sahrens int i; 641fa9e4066Sahrens 642fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 643fa9e4066Sahrens mutex_destroy(&h->hash_mutexes[i]); 644fa9e4066Sahrens kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 645dcbf3bd6SGeorge Wilson kmem_cache_destroy(dbuf_kmem_cache); 646bc9014e6SJustin Gibbs taskq_destroy(dbu_evict_taskq); 647dcbf3bd6SGeorge Wilson 648dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 649dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_TRUE; 650dcbf3bd6SGeorge Wilson while (dbuf_evict_thread_exit) { 651dcbf3bd6SGeorge Wilson cv_signal(&dbuf_evict_cv); 652dcbf3bd6SGeorge Wilson cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 653dcbf3bd6SGeorge Wilson } 654dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 655dcbf3bd6SGeorge Wilson tsd_destroy(&zfs_dbuf_evict_key); 656dcbf3bd6SGeorge Wilson 657dcbf3bd6SGeorge Wilson mutex_destroy(&dbuf_evict_lock); 658dcbf3bd6SGeorge Wilson cv_destroy(&dbuf_evict_cv); 659dcbf3bd6SGeorge Wilson 660dcbf3bd6SGeorge Wilson refcount_destroy(&dbuf_cache_size); 66194c2d0ebSMatthew Ahrens multilist_destroy(dbuf_cache); 662fa9e4066Sahrens } 663fa9e4066Sahrens 664fa9e4066Sahrens /* 665fa9e4066Sahrens * Other stuff. 666fa9e4066Sahrens */ 667fa9e4066Sahrens 6689c9dc39aSek #ifdef ZFS_DEBUG 669fa9e4066Sahrens static void 670fa9e4066Sahrens dbuf_verify(dmu_buf_impl_t *db) 671fa9e4066Sahrens { 672744947dcSTom Erickson dnode_t *dn; 673b24ab676SJeff Bonwick dbuf_dirty_record_t *dr; 674fa9e4066Sahrens 675fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 676fa9e4066Sahrens 677fa9e4066Sahrens if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 678fa9e4066Sahrens return; 679fa9e4066Sahrens 680fa9e4066Sahrens ASSERT(db->db_objset != NULL); 681744947dcSTom Erickson DB_DNODE_ENTER(db); 682744947dcSTom Erickson dn = DB_DNODE(db); 683fa9e4066Sahrens if (dn == NULL) { 684fa9e4066Sahrens ASSERT(db->db_parent == NULL); 685fa9e4066Sahrens ASSERT(db->db_blkptr == NULL); 686fa9e4066Sahrens } else { 687fa9e4066Sahrens ASSERT3U(db->db.db_object, ==, dn->dn_object); 688fa9e4066Sahrens ASSERT3P(db->db_objset, ==, dn->dn_objset); 689fa9e4066Sahrens ASSERT3U(db->db_level, <, dn->dn_nlevels); 690744947dcSTom Erickson ASSERT(db->db_blkid == DMU_BONUS_BLKID || 691744947dcSTom Erickson db->db_blkid == DMU_SPILL_BLKID || 6920f6d88adSAlex Reece !avl_is_empty(&dn->dn_dbufs)); 693fa9e4066Sahrens } 6940a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 695fa9e4066Sahrens ASSERT(dn != NULL); 6961934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 6970a586ceaSMark Shellenbaum ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 6980a586ceaSMark Shellenbaum } else if (db->db_blkid == DMU_SPILL_BLKID) { 6990a586ceaSMark Shellenbaum ASSERT(dn != NULL); 7000a586ceaSMark Shellenbaum ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 701fb09f5aaSMadhav Suresh ASSERT0(db->db.db_offset); 702fa9e4066Sahrens } else { 703fa9e4066Sahrens ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 704fa9e4066Sahrens } 705fa9e4066Sahrens 706b24ab676SJeff Bonwick for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 707b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 708b24ab676SJeff Bonwick 709b24ab676SJeff Bonwick for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 710b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 711b24ab676SJeff Bonwick 71288b7b0f2SMatthew Ahrens /* 71388b7b0f2SMatthew Ahrens * We can't assert that db_size matches dn_datablksz because it 71488b7b0f2SMatthew Ahrens * can be momentarily different when another thread is doing 71588b7b0f2SMatthew Ahrens * dnode_set_blksz(). 71688b7b0f2SMatthew Ahrens */ 71788b7b0f2SMatthew Ahrens if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 718b24ab676SJeff Bonwick dr = db->db_data_pending; 71988b7b0f2SMatthew Ahrens /* 72088b7b0f2SMatthew Ahrens * It should only be modified in syncing context, so 72188b7b0f2SMatthew Ahrens * make sure we only have one copy of the data. 72288b7b0f2SMatthew Ahrens */ 72388b7b0f2SMatthew Ahrens ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 724fa9e4066Sahrens } 725fa9e4066Sahrens 726fa9e4066Sahrens /* verify db->db_blkptr */ 727fa9e4066Sahrens if (db->db_blkptr) { 728fa9e4066Sahrens if (db->db_parent == dn->dn_dbuf) { 729fa9e4066Sahrens /* db is pointed to by the dnode */ 730fa9e4066Sahrens /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 73114843421SMatthew Ahrens if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 732fa9e4066Sahrens ASSERT(db->db_parent == NULL); 733fa9e4066Sahrens else 734fa9e4066Sahrens ASSERT(db->db_parent != NULL); 7350a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 7360a586ceaSMark Shellenbaum ASSERT3P(db->db_blkptr, ==, 7370a586ceaSMark Shellenbaum &dn->dn_phys->dn_blkptr[db->db_blkid]); 738fa9e4066Sahrens } else { 739fa9e4066Sahrens /* db is pointed to by an indirect block */ 740fa9e4066Sahrens int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 741fa9e4066Sahrens ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 742fa9e4066Sahrens ASSERT3U(db->db_parent->db.db_object, ==, 743fa9e4066Sahrens db->db.db_object); 744fa9e4066Sahrens /* 745fa9e4066Sahrens * dnode_grow_indblksz() can make this fail if we don't 746fa9e4066Sahrens * have the struct_rwlock. XXX indblksz no longer 747fa9e4066Sahrens * grows. safe to do this now? 748fa9e4066Sahrens */ 749744947dcSTom Erickson if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 750fa9e4066Sahrens ASSERT3P(db->db_blkptr, ==, 751fa9e4066Sahrens ((blkptr_t *)db->db_parent->db.db_data + 752fa9e4066Sahrens db->db_blkid % epb)); 753fa9e4066Sahrens } 754fa9e4066Sahrens } 755fa9e4066Sahrens } 756fa9e4066Sahrens if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 7573f9d6ad7SLin Ling (db->db_buf == NULL || db->db_buf->b_data) && 7580a586ceaSMark Shellenbaum db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 759fa9e4066Sahrens db->db_state != DB_FILL && !dn->dn_free_txg) { 760fa9e4066Sahrens /* 761fa9e4066Sahrens * If the blkptr isn't set but they have nonzero data, 762fa9e4066Sahrens * it had better be dirty, otherwise we'll lose that 763fa9e4066Sahrens * data when we evict this buffer. 7648df0bcf0SPaul Dagnelie * 7658df0bcf0SPaul Dagnelie * There is an exception to this rule for indirect blocks; in 7668df0bcf0SPaul Dagnelie * this case, if the indirect block is a hole, we fill in a few 7678df0bcf0SPaul Dagnelie * fields on each of the child blocks (importantly, birth time) 7688df0bcf0SPaul Dagnelie * to prevent hole birth times from being lost when you 7698df0bcf0SPaul Dagnelie * partially fill in a hole. 770fa9e4066Sahrens */ 771fa9e4066Sahrens if (db->db_dirtycnt == 0) { 7728df0bcf0SPaul Dagnelie if (db->db_level == 0) { 7738df0bcf0SPaul Dagnelie uint64_t *buf = db->db.db_data; 7748df0bcf0SPaul Dagnelie int i; 775fa9e4066Sahrens 7768df0bcf0SPaul Dagnelie for (i = 0; i < db->db.db_size >> 3; i++) { 7778df0bcf0SPaul Dagnelie ASSERT(buf[i] == 0); 7788df0bcf0SPaul Dagnelie } 7798df0bcf0SPaul Dagnelie } else { 7808df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 7818df0bcf0SPaul Dagnelie ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 7828df0bcf0SPaul Dagnelie db->db.db_size); 7838df0bcf0SPaul Dagnelie /* 7848df0bcf0SPaul Dagnelie * We want to verify that all the blkptrs in the 7858df0bcf0SPaul Dagnelie * indirect block are holes, but we may have 7868df0bcf0SPaul Dagnelie * automatically set up a few fields for them. 7878df0bcf0SPaul Dagnelie * We iterate through each blkptr and verify 7888df0bcf0SPaul Dagnelie * they only have those fields set. 7898df0bcf0SPaul Dagnelie */ 7908df0bcf0SPaul Dagnelie for (int i = 0; 7918df0bcf0SPaul Dagnelie i < db->db.db_size / sizeof (blkptr_t); 7928df0bcf0SPaul Dagnelie i++) { 7938df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 7948df0bcf0SPaul Dagnelie ASSERT(ZIO_CHECKSUM_IS_ZERO( 7958df0bcf0SPaul Dagnelie &bp->blk_cksum)); 7968df0bcf0SPaul Dagnelie ASSERT( 7978df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[0]) && 7988df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[1]) && 7998df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[2])); 8008df0bcf0SPaul Dagnelie ASSERT0(bp->blk_fill); 8018df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[0]); 8028df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[1]); 8038df0bcf0SPaul Dagnelie ASSERT(!BP_IS_EMBEDDED(bp)); 8048df0bcf0SPaul Dagnelie ASSERT(BP_IS_HOLE(bp)); 8058df0bcf0SPaul Dagnelie ASSERT0(bp->blk_phys_birth); 8068df0bcf0SPaul Dagnelie } 807fa9e4066Sahrens } 808fa9e4066Sahrens } 809fa9e4066Sahrens } 810744947dcSTom Erickson DB_DNODE_EXIT(db); 811fa9e4066Sahrens } 8129c9dc39aSek #endif 813fa9e4066Sahrens 814bc9014e6SJustin Gibbs static void 815bc9014e6SJustin Gibbs dbuf_clear_data(dmu_buf_impl_t *db) 816bc9014e6SJustin Gibbs { 817bc9014e6SJustin Gibbs ASSERT(MUTEX_HELD(&db->db_mtx)); 818bc9014e6SJustin Gibbs dbuf_evict_user(db); 819dcbf3bd6SGeorge Wilson ASSERT3P(db->db_buf, ==, NULL); 820bc9014e6SJustin Gibbs db->db.db_data = NULL; 821bc9014e6SJustin Gibbs if (db->db_state != DB_NOFILL) 822bc9014e6SJustin Gibbs db->db_state = DB_UNCACHED; 823bc9014e6SJustin Gibbs } 824bc9014e6SJustin Gibbs 825fa9e4066Sahrens static void 826fa9e4066Sahrens dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 827fa9e4066Sahrens { 828fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 829bc9014e6SJustin Gibbs ASSERT(buf != NULL); 830bc9014e6SJustin Gibbs 831fa9e4066Sahrens db->db_buf = buf; 832bc9014e6SJustin Gibbs ASSERT(buf->b_data != NULL); 833bc9014e6SJustin Gibbs db->db.db_data = buf->b_data; 834fa9e4066Sahrens } 835fa9e4066Sahrens 836c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 837c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Loan out an arc_buf for read. Return the loaned arc_buf. 838c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 839c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 840c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dbuf_loan_arcbuf(dmu_buf_impl_t *db) 841c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 842c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf; 843c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 844dcbf3bd6SGeorge Wilson ASSERT(db->db_blkid != DMU_BONUS_BLKID); 845c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_enter(&db->db_mtx); 846c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 847c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int blksz = db->db.db_size; 84843466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 849744947dcSTom Erickson 850c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 8515602294fSDan Kimmel abuf = arc_loan_buf(spa, B_FALSE, blksz); 852c242f9a0Schunli zhang - Sun Microsystems - Irvine United States bcopy(db->db.db_data, abuf->b_data, blksz); 853c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 854c242f9a0Schunli zhang - Sun Microsystems - Irvine United States abuf = db->db_buf; 855c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_loan_inuse_buf(abuf, db); 856dcbf3bd6SGeorge Wilson db->db_buf = NULL; 857bc9014e6SJustin Gibbs dbuf_clear_data(db); 858c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 859c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 860c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (abuf); 861c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 862c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 863a2cdcdd2SPaul Dagnelie /* 864a2cdcdd2SPaul Dagnelie * Calculate which level n block references the data at the level 0 offset 865a2cdcdd2SPaul Dagnelie * provided. 866a2cdcdd2SPaul Dagnelie */ 867fa9e4066Sahrens uint64_t 868a2cdcdd2SPaul Dagnelie dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 869fa9e4066Sahrens { 870a2cdcdd2SPaul Dagnelie if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 871a2cdcdd2SPaul Dagnelie /* 872a2cdcdd2SPaul Dagnelie * The level n blkid is equal to the level 0 blkid divided by 873a2cdcdd2SPaul Dagnelie * the number of level 0s in a level n block. 874a2cdcdd2SPaul Dagnelie * 875a2cdcdd2SPaul Dagnelie * The level 0 blkid is offset >> datablkshift = 876a2cdcdd2SPaul Dagnelie * offset / 2^datablkshift. 877a2cdcdd2SPaul Dagnelie * 878a2cdcdd2SPaul Dagnelie * The number of level 0s in a level n is the number of block 879a2cdcdd2SPaul Dagnelie * pointers in an indirect block, raised to the power of level. 880a2cdcdd2SPaul Dagnelie * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 881a2cdcdd2SPaul Dagnelie * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 882a2cdcdd2SPaul Dagnelie * 883a2cdcdd2SPaul Dagnelie * Thus, the level n blkid is: offset / 884a2cdcdd2SPaul Dagnelie * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 885a2cdcdd2SPaul Dagnelie * = offset / 2^(datablkshift + level * 886a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 887a2cdcdd2SPaul Dagnelie * = offset >> (datablkshift + level * 888a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 889a2cdcdd2SPaul Dagnelie */ 890a2cdcdd2SPaul Dagnelie return (offset >> (dn->dn_datablkshift + level * 891a2cdcdd2SPaul Dagnelie (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 892fa9e4066Sahrens } else { 893fa9e4066Sahrens ASSERT3U(offset, <, dn->dn_datablksz); 894fa9e4066Sahrens return (0); 895fa9e4066Sahrens } 896fa9e4066Sahrens } 897fa9e4066Sahrens 898fa9e4066Sahrens static void 899fa9e4066Sahrens dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 900fa9e4066Sahrens { 901fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 902fa9e4066Sahrens 903fa9e4066Sahrens mutex_enter(&db->db_mtx); 904fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_READ); 905fa9e4066Sahrens /* 906fa9e4066Sahrens * All reads are synchronous, so we must have a hold on the dbuf 907fa9e4066Sahrens */ 908fa9e4066Sahrens ASSERT(refcount_count(&db->db_holds) > 0); 909ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 910fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 911c717a561Smaybee if (db->db_level == 0 && db->db_freed_in_flight) { 912fa9e4066Sahrens /* we were freed in flight; disregard any error */ 913fa9e4066Sahrens arc_release(buf, db); 914fa9e4066Sahrens bzero(buf->b_data, db->db.db_size); 9156b4acc8bSahrens arc_buf_freeze(buf); 916c717a561Smaybee db->db_freed_in_flight = FALSE; 917fa9e4066Sahrens dbuf_set_data(db, buf); 918fa9e4066Sahrens db->db_state = DB_CACHED; 919fa9e4066Sahrens } else if (zio == NULL || zio->io_error == 0) { 920fa9e4066Sahrens dbuf_set_data(db, buf); 921fa9e4066Sahrens db->db_state = DB_CACHED; 922fa9e4066Sahrens } else { 9230a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 924fa9e4066Sahrens ASSERT3P(db->db_buf, ==, NULL); 925dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, db); 926ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 927fa9e4066Sahrens } 928fa9e4066Sahrens cv_broadcast(&db->db_changed); 9293f9d6ad7SLin Ling dbuf_rele_and_unlock(db, NULL); 930fa9e4066Sahrens } 931fa9e4066Sahrens 932ea8dc4b6Seschrock static void 933cf6106c8SMatthew Ahrens dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 934fa9e4066Sahrens { 935744947dcSTom Erickson dnode_t *dn; 9367802d7bfSMatthew Ahrens zbookmark_phys_t zb; 9377adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_NOWAIT; 938fa9e4066Sahrens 939744947dcSTom Erickson DB_DNODE_ENTER(db); 940744947dcSTom Erickson dn = DB_DNODE(db); 941fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 942fa9e4066Sahrens /* We need the struct_rwlock to prevent db_blkptr from changing. */ 943088f3894Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 944ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&db->db_mtx)); 945ea8dc4b6Seschrock ASSERT(db->db_state == DB_UNCACHED); 946ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 947fa9e4066Sahrens 9480a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 949cf04dda1SMark Maybee int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 9501934e92fSmaybee 9511934e92fSmaybee ASSERT3U(bonuslen, <=, db->db.db_size); 952ea8dc4b6Seschrock db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 9535a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 9541934e92fSmaybee if (bonuslen < DN_MAX_BONUSLEN) 955ea8dc4b6Seschrock bzero(db->db.db_data, DN_MAX_BONUSLEN); 956cf04dda1SMark Maybee if (bonuslen) 957cf04dda1SMark Maybee bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 958744947dcSTom Erickson DB_DNODE_EXIT(db); 959fa9e4066Sahrens db->db_state = DB_CACHED; 960fa9e4066Sahrens mutex_exit(&db->db_mtx); 961fa9e4066Sahrens return; 962fa9e4066Sahrens } 963fa9e4066Sahrens 9641c8564a7SMark Maybee /* 9651c8564a7SMark Maybee * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 9661c8564a7SMark Maybee * processes the delete record and clears the bp while we are waiting 9671c8564a7SMark Maybee * for the dn_mtx (resulting in a "no" from block_freed). 9681c8564a7SMark Maybee */ 969088f3894Sahrens if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 9701c8564a7SMark Maybee (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 9711c8564a7SMark Maybee BP_IS_HOLE(db->db_blkptr)))) { 972ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 973ad23a2dbSjohansen 9745602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 9755602294fSDan Kimmel db->db.db_size)); 976fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 9778df0bcf0SPaul Dagnelie 9788df0bcf0SPaul Dagnelie if (db->db_blkptr != NULL && db->db_level > 0 && 9798df0bcf0SPaul Dagnelie BP_IS_HOLE(db->db_blkptr) && 9808df0bcf0SPaul Dagnelie db->db_blkptr->blk_birth != 0) { 9818df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 9828df0bcf0SPaul Dagnelie for (int i = 0; i < ((1 << 9838df0bcf0SPaul Dagnelie DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 9848df0bcf0SPaul Dagnelie i++) { 9858df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 9868df0bcf0SPaul Dagnelie ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 9878df0bcf0SPaul Dagnelie 1 << dn->dn_indblkshift); 9888df0bcf0SPaul Dagnelie BP_SET_LSIZE(bp, 9898df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) == 1 ? 9908df0bcf0SPaul Dagnelie dn->dn_datablksz : 9918df0bcf0SPaul Dagnelie BP_GET_LSIZE(db->db_blkptr)); 9928df0bcf0SPaul Dagnelie BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 9938df0bcf0SPaul Dagnelie BP_SET_LEVEL(bp, 9948df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) - 1); 9958df0bcf0SPaul Dagnelie BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 9968df0bcf0SPaul Dagnelie } 9978df0bcf0SPaul Dagnelie } 9988df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 999fa9e4066Sahrens db->db_state = DB_CACHED; 1000fa9e4066Sahrens mutex_exit(&db->db_mtx); 1001fa9e4066Sahrens return; 1002fa9e4066Sahrens } 1003fa9e4066Sahrens 1004744947dcSTom Erickson DB_DNODE_EXIT(db); 1005744947dcSTom Erickson 1006fa9e4066Sahrens db->db_state = DB_READ; 1007fa9e4066Sahrens mutex_exit(&db->db_mtx); 1008fa9e4066Sahrens 10093baa08fcSek if (DBUF_IS_L2CACHEABLE(db)) 10107adb730bSGeorge Wilson aflags |= ARC_FLAG_L2CACHE; 10113baa08fcSek 1012b24ab676SJeff Bonwick SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 1013b24ab676SJeff Bonwick db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1014b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 1015ea8dc4b6Seschrock 1016ea8dc4b6Seschrock dbuf_add_ref(db, NULL); 1017088f3894Sahrens 101843466aaeSMax Grossman (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1019fa9e4066Sahrens dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 1020cf6106c8SMatthew Ahrens (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 102113506d1eSmaybee &aflags, &zb); 1022fa9e4066Sahrens } 1023fa9e4066Sahrens 10245602294fSDan Kimmel /* 10255602294fSDan Kimmel * This is our just-in-time copy function. It makes a copy of buffers that 10265602294fSDan Kimmel * have been modified in a previous transaction group before we access them in 10275602294fSDan Kimmel * the current active group. 10285602294fSDan Kimmel * 10295602294fSDan Kimmel * This function is used in three places: when we are dirtying a buffer for the 10305602294fSDan Kimmel * first time in a txg, when we are freeing a range in a dnode that includes 10315602294fSDan Kimmel * this buffer, and when we are accessing a buffer which was received compressed 10325602294fSDan Kimmel * and later referenced in a WRITE_BYREF record. 10335602294fSDan Kimmel * 10345602294fSDan Kimmel * Note that when we are called from dbuf_free_range() we do not put a hold on 10355602294fSDan Kimmel * the buffer, we just traverse the active dbuf list for the dnode. 10365602294fSDan Kimmel */ 10375602294fSDan Kimmel static void 10385602294fSDan Kimmel dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 10395602294fSDan Kimmel { 10405602294fSDan Kimmel dbuf_dirty_record_t *dr = db->db_last_dirty; 10415602294fSDan Kimmel 10425602294fSDan Kimmel ASSERT(MUTEX_HELD(&db->db_mtx)); 10435602294fSDan Kimmel ASSERT(db->db.db_data != NULL); 10445602294fSDan Kimmel ASSERT(db->db_level == 0); 10455602294fSDan Kimmel ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 10465602294fSDan Kimmel 10475602294fSDan Kimmel if (dr == NULL || 10485602294fSDan Kimmel (dr->dt.dl.dr_data != 10495602294fSDan Kimmel ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 10505602294fSDan Kimmel return; 10515602294fSDan Kimmel 10525602294fSDan Kimmel /* 10535602294fSDan Kimmel * If the last dirty record for this dbuf has not yet synced 10545602294fSDan Kimmel * and its referencing the dbuf data, either: 10555602294fSDan Kimmel * reset the reference to point to a new copy, 10565602294fSDan Kimmel * or (if there a no active holders) 10575602294fSDan Kimmel * just null out the current db_data pointer. 10585602294fSDan Kimmel */ 10595602294fSDan Kimmel ASSERT(dr->dr_txg >= txg - 2); 10605602294fSDan Kimmel if (db->db_blkid == DMU_BONUS_BLKID) { 10615602294fSDan Kimmel /* Note that the data bufs here are zio_bufs */ 10625602294fSDan Kimmel dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 10635602294fSDan Kimmel arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 10645602294fSDan Kimmel bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 10655602294fSDan Kimmel } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 10665602294fSDan Kimmel int size = arc_buf_size(db->db_buf); 10675602294fSDan Kimmel arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 10685602294fSDan Kimmel spa_t *spa = db->db_objset->os_spa; 10695602294fSDan Kimmel enum zio_compress compress_type = 10705602294fSDan Kimmel arc_get_compression(db->db_buf); 10715602294fSDan Kimmel 10725602294fSDan Kimmel if (compress_type == ZIO_COMPRESS_OFF) { 10735602294fSDan Kimmel dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 10745602294fSDan Kimmel } else { 10755602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 10765602294fSDan Kimmel dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 10775602294fSDan Kimmel size, arc_buf_lsize(db->db_buf), compress_type); 10785602294fSDan Kimmel } 10795602294fSDan Kimmel bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 10805602294fSDan Kimmel } else { 10815602294fSDan Kimmel db->db_buf = NULL; 10825602294fSDan Kimmel dbuf_clear_data(db); 10835602294fSDan Kimmel } 10845602294fSDan Kimmel } 10855602294fSDan Kimmel 1086ea8dc4b6Seschrock int 1087ea8dc4b6Seschrock dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1088fa9e4066Sahrens { 1089ea8dc4b6Seschrock int err = 0; 109043466aaeSMax Grossman boolean_t prefetch; 1091744947dcSTom Erickson dnode_t *dn; 1092fa9e4066Sahrens 1093fa9e4066Sahrens /* 1094fa9e4066Sahrens * We don't have to hold the mutex to check db_state because it 1095fa9e4066Sahrens * can't be freed while we have a hold on the buffer. 1096fa9e4066Sahrens */ 1097fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1098fa9e4066Sahrens 109982c9918fSTim Haley if (db->db_state == DB_NOFILL) 1100be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 110182c9918fSTim Haley 1102744947dcSTom Erickson DB_DNODE_ENTER(db); 1103744947dcSTom Erickson dn = DB_DNODE(db); 1104ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1105744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_READER); 1106ea8dc4b6Seschrock 11070a586ceaSMark Shellenbaum prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1108744947dcSTom Erickson (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 11093baa08fcSek DBUF_IS_CACHEABLE(db); 111013506d1eSmaybee 1111ea8dc4b6Seschrock mutex_enter(&db->db_mtx); 1112ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 11135602294fSDan Kimmel /* 11145602294fSDan Kimmel * If the arc buf is compressed, we need to decompress it to 11155602294fSDan Kimmel * read the data. This could happen during the "zfs receive" of 11165602294fSDan Kimmel * a stream which is compressed and deduplicated. 11175602294fSDan Kimmel */ 11185602294fSDan Kimmel if (db->db_buf != NULL && 11195602294fSDan Kimmel arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { 11205602294fSDan Kimmel dbuf_fix_old_data(db, 11215602294fSDan Kimmel spa_syncing_txg(dmu_objset_spa(db->db_objset))); 11225602294fSDan Kimmel err = arc_decompress(db->db_buf); 11235602294fSDan Kimmel dbuf_set_data(db, db->db_buf); 11245602294fSDan Kimmel } 1125ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 112613506d1eSmaybee if (prefetch) 1127cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1128fa9e4066Sahrens if ((flags & DB_RF_HAVESTRUCT) == 0) 1129744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1130744947dcSTom Erickson DB_DNODE_EXIT(db); 1131ea8dc4b6Seschrock } else if (db->db_state == DB_UNCACHED) { 1132744947dcSTom Erickson spa_t *spa = dn->dn_objset->os_spa; 1133def4fac5SMatthew Ahrens boolean_t need_wait = B_FALSE; 1134744947dcSTom Erickson 1135def4fac5SMatthew Ahrens if (zio == NULL && 1136def4fac5SMatthew Ahrens db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1137744947dcSTom Erickson zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1138def4fac5SMatthew Ahrens need_wait = B_TRUE; 1139def4fac5SMatthew Ahrens } 1140cf6106c8SMatthew Ahrens dbuf_read_impl(db, zio, flags); 114113506d1eSmaybee 1142ea8dc4b6Seschrock /* dbuf_read_impl has dropped db_mtx for us */ 1143ea8dc4b6Seschrock 114413506d1eSmaybee if (prefetch) 1145cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1146ea8dc4b6Seschrock 1147fa9e4066Sahrens if ((flags & DB_RF_HAVESTRUCT) == 0) 1148744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1149744947dcSTom Erickson DB_DNODE_EXIT(db); 1150fa9e4066Sahrens 1151def4fac5SMatthew Ahrens if (need_wait) 1152ea8dc4b6Seschrock err = zio_wait(zio); 1153ea8dc4b6Seschrock } else { 11543e30c24aSWill Andrews /* 11553e30c24aSWill Andrews * Another reader came in while the dbuf was in flight 11563e30c24aSWill Andrews * between UNCACHED and CACHED. Either a writer will finish 11573e30c24aSWill Andrews * writing the buffer (sending the dbuf to CACHED) or the 11583e30c24aSWill Andrews * first reader's request will reach the read_done callback 11593e30c24aSWill Andrews * and send the dbuf to CACHED. Otherwise, a failure 11603e30c24aSWill Andrews * occurred and the dbuf went to UNCACHED. 11613e30c24aSWill Andrews */ 116213506d1eSmaybee mutex_exit(&db->db_mtx); 116313506d1eSmaybee if (prefetch) 1164cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1165ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1166744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1167744947dcSTom Erickson DB_DNODE_EXIT(db); 116813506d1eSmaybee 11693e30c24aSWill Andrews /* Skip the wait per the caller's request. */ 117013506d1eSmaybee mutex_enter(&db->db_mtx); 1171ea8dc4b6Seschrock if ((flags & DB_RF_NEVERWAIT) == 0) { 1172ea8dc4b6Seschrock while (db->db_state == DB_READ || 1173ea8dc4b6Seschrock db->db_state == DB_FILL) { 1174ea8dc4b6Seschrock ASSERT(db->db_state == DB_READ || 1175ea8dc4b6Seschrock (flags & DB_RF_HAVESTRUCT) == 0); 1176f6164ad6SAdam H. Leventhal DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1177f6164ad6SAdam H. Leventhal db, zio_t *, zio); 1178ea8dc4b6Seschrock cv_wait(&db->db_changed, &db->db_mtx); 1179ea8dc4b6Seschrock } 1180ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 1181be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 1182ea8dc4b6Seschrock } 1183ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 1184fa9e4066Sahrens } 1185fa9e4066Sahrens 1186ea8dc4b6Seschrock return (err); 1187fa9e4066Sahrens } 1188fa9e4066Sahrens 1189fa9e4066Sahrens static void 1190fa9e4066Sahrens dbuf_noread(dmu_buf_impl_t *db) 1191fa9e4066Sahrens { 1192fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 11930a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1194fa9e4066Sahrens mutex_enter(&db->db_mtx); 1195fa9e4066Sahrens while (db->db_state == DB_READ || db->db_state == DB_FILL) 1196fa9e4066Sahrens cv_wait(&db->db_changed, &db->db_mtx); 1197fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 1198ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 119943466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 1200ad23a2dbSjohansen 1201ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 1202fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 12035602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1204fa9e4066Sahrens db->db_state = DB_FILL; 120582c9918fSTim Haley } else if (db->db_state == DB_NOFILL) { 1206bc9014e6SJustin Gibbs dbuf_clear_data(db); 1207fa9e4066Sahrens } else { 1208fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_CACHED); 1209fa9e4066Sahrens } 1210fa9e4066Sahrens mutex_exit(&db->db_mtx); 1211fa9e4066Sahrens } 1212fa9e4066Sahrens 1213c717a561Smaybee void 1214c717a561Smaybee dbuf_unoverride(dbuf_dirty_record_t *dr) 1215ea8dc4b6Seschrock { 1216c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1217b24ab676SJeff Bonwick blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1218c717a561Smaybee uint64_t txg = dr->dr_txg; 1219ea8dc4b6Seschrock 1220ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&db->db_mtx)); 122140713f2bSAlan Somers /* 122240713f2bSAlan Somers * This assert is valid because dmu_sync() expects to be called by 122340713f2bSAlan Somers * a zilog's get_data while holding a range lock. This call only 122440713f2bSAlan Somers * comes from dbuf_dirty() callers who must also hold a range lock. 122540713f2bSAlan Somers */ 1226c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1227c717a561Smaybee ASSERT(db->db_level == 0); 1228ea8dc4b6Seschrock 12290a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 1230c717a561Smaybee dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1231c717a561Smaybee return; 1232ea8dc4b6Seschrock 1233b24ab676SJeff Bonwick ASSERT(db->db_data_pending != dr); 1234b24ab676SJeff Bonwick 1235c717a561Smaybee /* free this block */ 123643466aaeSMax Grossman if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 123743466aaeSMax Grossman zio_free(db->db_objset->os_spa, txg, bp); 1238b24ab676SJeff Bonwick 1239c717a561Smaybee dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 124080901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = B_FALSE; 124180901aeaSGeorge Wilson 1242c717a561Smaybee /* 1243c717a561Smaybee * Release the already-written buffer, so we leave it in 1244c717a561Smaybee * a consistent dirty state. Note that all callers are 1245c717a561Smaybee * modifying the buffer, so they will immediately do 1246c717a561Smaybee * another (redundant) arc_release(). Therefore, leave 1247c717a561Smaybee * the buf thawed to save the effort of freezing & 1248c717a561Smaybee * immediately re-thawing it. 1249c717a561Smaybee */ 1250c717a561Smaybee arc_release(dr->dt.dl.dr_data, db); 1251fa9e4066Sahrens } 1252fa9e4066Sahrens 1253cdb0ab79Smaybee /* 1254cdb0ab79Smaybee * Evict (if its unreferenced) or clear (if its referenced) any level-0 1255cdb0ab79Smaybee * data blocks in the free range, so that any future readers will find 125643466aaeSMax Grossman * empty blocks. 1257cdb0ab79Smaybee */ 1258fa9e4066Sahrens void 12590f6d88adSAlex Reece dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 12600f6d88adSAlex Reece dmu_tx_t *tx) 1261fa9e4066Sahrens { 1262bc9014e6SJustin Gibbs dmu_buf_impl_t db_search; 1263bc9014e6SJustin Gibbs dmu_buf_impl_t *db, *db_next; 1264fa9e4066Sahrens uint64_t txg = tx->tx_txg; 12650f6d88adSAlex Reece avl_index_t where; 1266fa9e4066Sahrens 1267653af1b8SStephen Blinick if (end_blkid > dn->dn_maxblkid && 1268653af1b8SStephen Blinick !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 12690f6d88adSAlex Reece end_blkid = dn->dn_maxblkid; 12700f6d88adSAlex Reece dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 12710f6d88adSAlex Reece 12720f6d88adSAlex Reece db_search.db_level = 0; 12730f6d88adSAlex Reece db_search.db_blkid = start_blkid; 127486bb58aeSAlex Reece db_search.db_state = DB_SEARCH; 12752f3d8780SMatthew Ahrens 1276713d6c20SMatthew Ahrens mutex_enter(&dn->dn_dbufs_mtx); 12770f6d88adSAlex Reece db = avl_find(&dn->dn_dbufs, &db_search, &where); 12780f6d88adSAlex Reece ASSERT3P(db, ==, NULL); 1279653af1b8SStephen Blinick 12800f6d88adSAlex Reece db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 12810f6d88adSAlex Reece 12820f6d88adSAlex Reece for (; db != NULL; db = db_next) { 12830f6d88adSAlex Reece db_next = AVL_NEXT(&dn->dn_dbufs, db); 12840a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1285cdb0ab79Smaybee 12860f6d88adSAlex Reece if (db->db_level != 0 || db->db_blkid > end_blkid) { 12870f6d88adSAlex Reece break; 12880f6d88adSAlex Reece } 12890f6d88adSAlex Reece ASSERT3U(db->db_blkid, >=, start_blkid); 1290fa9e4066Sahrens 1291fa9e4066Sahrens /* found a level 0 buffer in the range */ 12923b2aab18SMatthew Ahrens mutex_enter(&db->db_mtx); 12933b2aab18SMatthew Ahrens if (dbuf_undirty(db, tx)) { 12943b2aab18SMatthew Ahrens /* mutex has been dropped and dbuf destroyed */ 1295fa9e4066Sahrens continue; 12963b2aab18SMatthew Ahrens } 1297fa9e4066Sahrens 1298ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED || 129982c9918fSTim Haley db->db_state == DB_NOFILL || 1300ea8dc4b6Seschrock db->db_state == DB_EVICTING) { 1301fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 1302fa9e4066Sahrens mutex_exit(&db->db_mtx); 1303fa9e4066Sahrens continue; 1304fa9e4066Sahrens } 1305c543ec06Sahrens if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1306c543ec06Sahrens /* will be handled in dbuf_read_done or dbuf_rele */ 1307c717a561Smaybee db->db_freed_in_flight = TRUE; 1308fa9e4066Sahrens mutex_exit(&db->db_mtx); 1309fa9e4066Sahrens continue; 1310fa9e4066Sahrens } 1311ea8dc4b6Seschrock if (refcount_count(&db->db_holds) == 0) { 1312ea8dc4b6Seschrock ASSERT(db->db_buf); 1313dcbf3bd6SGeorge Wilson dbuf_destroy(db); 1314ea8dc4b6Seschrock continue; 1315ea8dc4b6Seschrock } 1316c717a561Smaybee /* The dbuf is referenced */ 1317fa9e4066Sahrens 1318c717a561Smaybee if (db->db_last_dirty != NULL) { 1319c717a561Smaybee dbuf_dirty_record_t *dr = db->db_last_dirty; 1320c717a561Smaybee 1321c717a561Smaybee if (dr->dr_txg == txg) { 132244eda4d7Smaybee /* 1323c717a561Smaybee * This buffer is "in-use", re-adjust the file 1324c717a561Smaybee * size to reflect that this buffer may 1325c717a561Smaybee * contain new data when we sync. 132644eda4d7Smaybee */ 132706e0070dSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID && 132806e0070dSMark Shellenbaum db->db_blkid > dn->dn_maxblkid) 1329c717a561Smaybee dn->dn_maxblkid = db->db_blkid; 1330c717a561Smaybee dbuf_unoverride(dr); 1331c717a561Smaybee } else { 1332c717a561Smaybee /* 1333c717a561Smaybee * This dbuf is not dirty in the open context. 1334c717a561Smaybee * Either uncache it (if its not referenced in 1335c717a561Smaybee * the open context) or reset its contents to 1336c717a561Smaybee * empty. 1337c717a561Smaybee */ 1338c717a561Smaybee dbuf_fix_old_data(db, txg); 133944eda4d7Smaybee } 1340ea8dc4b6Seschrock } 1341c717a561Smaybee /* clear the contents if its cached */ 1342ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 1343ea8dc4b6Seschrock ASSERT(db->db.db_data != NULL); 1344fa9e4066Sahrens arc_release(db->db_buf, db); 1345fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 13466b4acc8bSahrens arc_buf_freeze(db->db_buf); 1347fa9e4066Sahrens } 1348ea8dc4b6Seschrock 1349fa9e4066Sahrens mutex_exit(&db->db_mtx); 1350fa9e4066Sahrens } 1351fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 1352fa9e4066Sahrens } 1353fa9e4066Sahrens 1354fa9e4066Sahrens void 1355fa9e4066Sahrens dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1356fa9e4066Sahrens { 1357fa9e4066Sahrens arc_buf_t *buf, *obuf; 1358fa9e4066Sahrens int osize = db->db.db_size; 1359ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1360744947dcSTom Erickson dnode_t *dn; 1361fa9e4066Sahrens 13620a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1363ea8dc4b6Seschrock 1364744947dcSTom Erickson DB_DNODE_ENTER(db); 1365744947dcSTom Erickson dn = DB_DNODE(db); 1366744947dcSTom Erickson 1367fa9e4066Sahrens /* XXX does *this* func really need the lock? */ 1368744947dcSTom Erickson ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1369fa9e4066Sahrens 1370fa9e4066Sahrens /* 137143466aaeSMax Grossman * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1372fa9e4066Sahrens * is OK, because there can be no other references to the db 1373fa9e4066Sahrens * when we are changing its size, so no concurrent DB_FILL can 1374fa9e4066Sahrens * be happening. 1375fa9e4066Sahrens */ 1376ea8dc4b6Seschrock /* 1377ea8dc4b6Seschrock * XXX we should be doing a dbuf_read, checking the return 1378ea8dc4b6Seschrock * value and returning that up to our callers 1379ea8dc4b6Seschrock */ 138043466aaeSMax Grossman dmu_buf_will_dirty(&db->db, tx); 1381fa9e4066Sahrens 1382fa9e4066Sahrens /* create the data buffer for the new block */ 13835602294fSDan Kimmel buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1384fa9e4066Sahrens 1385fa9e4066Sahrens /* copy old block data to the new block */ 1386fa9e4066Sahrens obuf = db->db_buf; 1387f65e61c0Sahrens bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1388fa9e4066Sahrens /* zero the remainder */ 1389f65e61c0Sahrens if (size > osize) 1390f65e61c0Sahrens bzero((uint8_t *)buf->b_data + osize, size - osize); 1391fa9e4066Sahrens 1392fa9e4066Sahrens mutex_enter(&db->db_mtx); 1393fa9e4066Sahrens dbuf_set_data(db, buf); 1394dcbf3bd6SGeorge Wilson arc_buf_destroy(obuf, db); 1395fa9e4066Sahrens db->db.db_size = size; 1396fa9e4066Sahrens 1397c717a561Smaybee if (db->db_level == 0) { 1398c717a561Smaybee ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1399c717a561Smaybee db->db_last_dirty->dt.dl.dr_data = buf; 1400c717a561Smaybee } 1401fa9e4066Sahrens mutex_exit(&db->db_mtx); 1402fa9e4066Sahrens 140361e255ceSMatthew Ahrens dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1404744947dcSTom Erickson DB_DNODE_EXIT(db); 1405fa9e4066Sahrens } 1406fa9e4066Sahrens 14073f9d6ad7SLin Ling void 14083f9d6ad7SLin Ling dbuf_release_bp(dmu_buf_impl_t *db) 14093f9d6ad7SLin Ling { 141043466aaeSMax Grossman objset_t *os = db->db_objset; 14113f9d6ad7SLin Ling 14123f9d6ad7SLin Ling ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 14133f9d6ad7SLin Ling ASSERT(arc_released(os->os_phys_buf) || 14143f9d6ad7SLin Ling list_link_active(&os->os_dsl_dataset->ds_synced_link)); 14153f9d6ad7SLin Ling ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 14163f9d6ad7SLin Ling 14171b912ec7SGeorge Wilson (void) arc_release(db->db_buf, db); 14183f9d6ad7SLin Ling } 14193f9d6ad7SLin Ling 14200f2e7d03SMatthew Ahrens /* 14210f2e7d03SMatthew Ahrens * We already have a dirty record for this TXG, and we are being 14220f2e7d03SMatthew Ahrens * dirtied again. 14230f2e7d03SMatthew Ahrens */ 14240f2e7d03SMatthew Ahrens static void 14250f2e7d03SMatthew Ahrens dbuf_redirty(dbuf_dirty_record_t *dr) 14260f2e7d03SMatthew Ahrens { 14270f2e7d03SMatthew Ahrens dmu_buf_impl_t *db = dr->dr_dbuf; 14280f2e7d03SMatthew Ahrens 14290f2e7d03SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 14300f2e7d03SMatthew Ahrens 14310f2e7d03SMatthew Ahrens if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 14320f2e7d03SMatthew Ahrens /* 14330f2e7d03SMatthew Ahrens * If this buffer has already been written out, 14340f2e7d03SMatthew Ahrens * we now need to reset its state. 14350f2e7d03SMatthew Ahrens */ 14360f2e7d03SMatthew Ahrens dbuf_unoverride(dr); 14370f2e7d03SMatthew Ahrens if (db->db.db_object != DMU_META_DNODE_OBJECT && 14380f2e7d03SMatthew Ahrens db->db_state != DB_NOFILL) { 14390f2e7d03SMatthew Ahrens /* Already released on initial dirty, so just thaw. */ 14400f2e7d03SMatthew Ahrens ASSERT(arc_released(db->db_buf)); 14410f2e7d03SMatthew Ahrens arc_buf_thaw(db->db_buf); 14420f2e7d03SMatthew Ahrens } 14430f2e7d03SMatthew Ahrens } 14440f2e7d03SMatthew Ahrens } 14450f2e7d03SMatthew Ahrens 1446c717a561Smaybee dbuf_dirty_record_t * 1447fa9e4066Sahrens dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1448fa9e4066Sahrens { 1449744947dcSTom Erickson dnode_t *dn; 1450744947dcSTom Erickson objset_t *os; 1451c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 1452fa9e4066Sahrens int drop_struct_lock = FALSE; 1453fa9e4066Sahrens int txgoff = tx->tx_txg & TXG_MASK; 1454fa9e4066Sahrens 1455fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1456fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 14579c9dc39aSek DMU_TX_DIRTY_BUF(tx, db); 1458fa9e4066Sahrens 1459744947dcSTom Erickson DB_DNODE_ENTER(db); 1460744947dcSTom Erickson dn = DB_DNODE(db); 1461fa9e4066Sahrens /* 1462fa9e4066Sahrens * Shouldn't dirty a regular buffer in syncing context. Private 1463fa9e4066Sahrens * objects may be dirtied in syncing context, but only if they 1464fa9e4066Sahrens * were already pre-dirtied in open context. 1465fa9e4066Sahrens */ 1466c166b69dSPaul Dagnelie #ifdef DEBUG 1467c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1468c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1469c166b69dSPaul Dagnelie RW_READER, FTAG); 1470c166b69dSPaul Dagnelie } 1471c717a561Smaybee ASSERT(!dmu_tx_is_syncing(tx) || 1472c717a561Smaybee BP_IS_HOLE(dn->dn_objset->os_rootbp) || 147314843421SMatthew Ahrens DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 147414843421SMatthew Ahrens dn->dn_objset->os_dsl_dataset == NULL); 1475c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1476c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1477c166b69dSPaul Dagnelie #endif 1478fa9e4066Sahrens /* 1479fa9e4066Sahrens * We make this assert for private objects as well, but after we 1480fa9e4066Sahrens * check if we're already dirty. They are allowed to re-dirty 1481fa9e4066Sahrens * in syncing context. 1482fa9e4066Sahrens */ 1483ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1484c717a561Smaybee dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1485fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1486fa9e4066Sahrens 1487fa9e4066Sahrens mutex_enter(&db->db_mtx); 1488fa9e4066Sahrens /* 1489c717a561Smaybee * XXX make this true for indirects too? The problem is that 1490c717a561Smaybee * transactions created with dmu_tx_create_assigned() from 1491c717a561Smaybee * syncing context don't bother holding ahead. 1492fa9e4066Sahrens */ 1493c717a561Smaybee ASSERT(db->db_level != 0 || 149482c9918fSTim Haley db->db_state == DB_CACHED || db->db_state == DB_FILL || 149582c9918fSTim Haley db->db_state == DB_NOFILL); 1496fa9e4066Sahrens 1497fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1498fa9e4066Sahrens /* 1499fa9e4066Sahrens * Don't set dirtyctx to SYNC if we're just modifying this as we 1500fa9e4066Sahrens * initialize the objset. 1501fa9e4066Sahrens */ 1502c166b69dSPaul Dagnelie if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1503c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1504c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1505c166b69dSPaul Dagnelie RW_READER, FTAG); 1506c166b69dSPaul Dagnelie } 1507c166b69dSPaul Dagnelie if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1508c166b69dSPaul Dagnelie dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1509c166b69dSPaul Dagnelie DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1510c166b69dSPaul Dagnelie ASSERT(dn->dn_dirtyctx_firstset == NULL); 1511c166b69dSPaul Dagnelie dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1512c166b69dSPaul Dagnelie } 1513c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1514c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1515c166b69dSPaul Dagnelie FTAG); 1516c166b69dSPaul Dagnelie } 1517fa9e4066Sahrens } 1518fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1519fa9e4066Sahrens 15200a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 15210a586ceaSMark Shellenbaum dn->dn_have_spill = B_TRUE; 15220a586ceaSMark Shellenbaum 1523fa9e4066Sahrens /* 1524fa9e4066Sahrens * If this buffer is already dirty, we're done. 1525fa9e4066Sahrens */ 1526c717a561Smaybee drp = &db->db_last_dirty; 1527c717a561Smaybee ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1528c717a561Smaybee db->db.db_object == DMU_META_DNODE_OBJECT); 15297e2186e3Sbonwick while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 15307e2186e3Sbonwick drp = &dr->dr_next; 15317e2186e3Sbonwick if (dr && dr->dr_txg == tx->tx_txg) { 1532744947dcSTom Erickson DB_DNODE_EXIT(db); 1533744947dcSTom Erickson 15340f2e7d03SMatthew Ahrens dbuf_redirty(dr); 1535fa9e4066Sahrens mutex_exit(&db->db_mtx); 15367e2186e3Sbonwick return (dr); 1537fa9e4066Sahrens } 1538fa9e4066Sahrens 1539fa9e4066Sahrens /* 1540fa9e4066Sahrens * Only valid if not already dirty. 1541fa9e4066Sahrens */ 154214843421SMatthew Ahrens ASSERT(dn->dn_object == 0 || 154314843421SMatthew Ahrens dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1544fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1545fa9e4066Sahrens 1546fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, db->db_level); 1547fa9e4066Sahrens 1548fa9e4066Sahrens /* 1549fa9e4066Sahrens * We should only be dirtying in syncing context if it's the 155014843421SMatthew Ahrens * mos or we're initializing the os or it's a special object. 155114843421SMatthew Ahrens * However, we are allowed to dirty in syncing context provided 155214843421SMatthew Ahrens * we already dirtied it in open context. Hence we must make 155314843421SMatthew Ahrens * this assertion only if we're not already dirty. 1554fa9e4066Sahrens */ 1555744947dcSTom Erickson os = dn->dn_objset; 15563991b535SGeorge Wilson VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1557c166b69dSPaul Dagnelie #ifdef DEBUG 1558c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1559c166b69dSPaul Dagnelie rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 156014843421SMatthew Ahrens ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 156114843421SMatthew Ahrens os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1562c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1563c166b69dSPaul Dagnelie rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1564c166b69dSPaul Dagnelie #endif 1565fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1566fa9e4066Sahrens 1567fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1568fa9e4066Sahrens 15690a586ceaSMark Shellenbaum if (db->db_blkid != DMU_BONUS_BLKID) { 157061e255ceSMatthew Ahrens dmu_objset_willuse_space(os, db->db.db_size, tx); 15711934e92fSmaybee } 15721934e92fSmaybee 1573ea8dc4b6Seschrock /* 1574ea8dc4b6Seschrock * If this buffer is dirty in an old transaction group we need 1575ea8dc4b6Seschrock * to make a copy of it so that the changes we make in this 1576ea8dc4b6Seschrock * transaction group won't leak out when we sync the older txg. 1577ea8dc4b6Seschrock */ 1578c717a561Smaybee dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1579c717a561Smaybee if (db->db_level == 0) { 1580c717a561Smaybee void *data_old = db->db_buf; 1581c717a561Smaybee 158282c9918fSTim Haley if (db->db_state != DB_NOFILL) { 15830a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 158482c9918fSTim Haley dbuf_fix_old_data(db, tx->tx_txg); 158582c9918fSTim Haley data_old = db->db.db_data; 158682c9918fSTim Haley } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 158782c9918fSTim Haley /* 158882c9918fSTim Haley * Release the data buffer from the cache so 158982c9918fSTim Haley * that we can modify it without impacting 159082c9918fSTim Haley * possible other users of this cached data 159182c9918fSTim Haley * block. Note that indirect blocks and 159282c9918fSTim Haley * private objects are not released until the 159382c9918fSTim Haley * syncing state (since they are only modified 159482c9918fSTim Haley * then). 159582c9918fSTim Haley */ 159682c9918fSTim Haley arc_release(db->db_buf, db); 159782c9918fSTim Haley dbuf_fix_old_data(db, tx->tx_txg); 159882c9918fSTim Haley data_old = db->db_buf; 159982c9918fSTim Haley } 160082c9918fSTim Haley ASSERT(data_old != NULL); 1601fa9e4066Sahrens } 1602c717a561Smaybee dr->dt.dl.dr_data = data_old; 1603c717a561Smaybee } else { 1604c717a561Smaybee mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1605c717a561Smaybee list_create(&dr->dt.di.dr_children, 1606c717a561Smaybee sizeof (dbuf_dirty_record_t), 1607c717a561Smaybee offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1608fa9e4066Sahrens } 160969962b56SMatthew Ahrens if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 161069962b56SMatthew Ahrens dr->dr_accounted = db->db.db_size; 1611c717a561Smaybee dr->dr_dbuf = db; 1612c717a561Smaybee dr->dr_txg = tx->tx_txg; 1613c717a561Smaybee dr->dr_next = *drp; 1614c717a561Smaybee *drp = dr; 1615fa9e4066Sahrens 1616fa9e4066Sahrens /* 1617fa9e4066Sahrens * We could have been freed_in_flight between the dbuf_noread 1618fa9e4066Sahrens * and dbuf_dirty. We win, as though the dbuf_noread() had 1619fa9e4066Sahrens * happened after the free. 1620fa9e4066Sahrens */ 16210a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 16220a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) { 1623c717a561Smaybee mutex_enter(&dn->dn_mtx); 1624bf16b11eSMatthew Ahrens if (dn->dn_free_ranges[txgoff] != NULL) { 1625bf16b11eSMatthew Ahrens range_tree_clear(dn->dn_free_ranges[txgoff], 1626bf16b11eSMatthew Ahrens db->db_blkid, 1); 1627bf16b11eSMatthew Ahrens } 1628c717a561Smaybee mutex_exit(&dn->dn_mtx); 1629c717a561Smaybee db->db_freed_in_flight = FALSE; 1630fa9e4066Sahrens } 1631fa9e4066Sahrens 1632fa9e4066Sahrens /* 1633fa9e4066Sahrens * This buffer is now part of this txg 1634fa9e4066Sahrens */ 1635fa9e4066Sahrens dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1636fa9e4066Sahrens db->db_dirtycnt += 1; 1637fa9e4066Sahrens ASSERT3U(db->db_dirtycnt, <=, 3); 1638fa9e4066Sahrens 1639fa9e4066Sahrens mutex_exit(&db->db_mtx); 1640fa9e4066Sahrens 16410a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 16420a586ceaSMark Shellenbaum db->db_blkid == DMU_SPILL_BLKID) { 1643c717a561Smaybee mutex_enter(&dn->dn_mtx); 1644c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1645c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1646c717a561Smaybee mutex_exit(&dn->dn_mtx); 1647fa9e4066Sahrens dnode_setdirty(dn, tx); 1648744947dcSTom Erickson DB_DNODE_EXIT(db); 1649c717a561Smaybee return (dr); 165092654925SMatthew Ahrens } 165192654925SMatthew Ahrens 165292654925SMatthew Ahrens /* 165392654925SMatthew Ahrens * The dn_struct_rwlock prevents db_blkptr from changing 165492654925SMatthew Ahrens * due to a write from syncing context completing 165592654925SMatthew Ahrens * while we are running, so we want to acquire it before 165692654925SMatthew Ahrens * looking at db_blkptr. 165792654925SMatthew Ahrens */ 165892654925SMatthew Ahrens if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 165992654925SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 166092654925SMatthew Ahrens drop_struct_lock = TRUE; 166192654925SMatthew Ahrens } 166292654925SMatthew Ahrens 1663dcb6872cSMatthew Ahrens /* 1664dcb6872cSMatthew Ahrens * We need to hold the dn_struct_rwlock to make this assertion, 1665dcb6872cSMatthew Ahrens * because it protects dn_phys / dn_next_nlevels from changing. 1666dcb6872cSMatthew Ahrens */ 1667dcb6872cSMatthew Ahrens ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1668dcb6872cSMatthew Ahrens dn->dn_phys->dn_nlevels > db->db_level || 1669dcb6872cSMatthew Ahrens dn->dn_next_nlevels[txgoff] > db->db_level || 1670dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1671dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1672dcb6872cSMatthew Ahrens 167361e255ceSMatthew Ahrens /* 167461e255ceSMatthew Ahrens * If we are overwriting a dedup BP, then unless it is snapshotted, 167561e255ceSMatthew Ahrens * when we get to syncing context we will need to decrement its 167661e255ceSMatthew Ahrens * refcount in the DDT. Prefetch the relevant DDT block so that 167761e255ceSMatthew Ahrens * syncing context won't have to wait for the i/o. 167861e255ceSMatthew Ahrens */ 167961e255ceSMatthew Ahrens ddt_prefetch(os->os_spa, db->db_blkptr); 1680fa9e4066Sahrens 16818346f03fSJonathan W Adams if (db->db_level == 0) { 16828346f03fSJonathan W Adams dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 16838346f03fSJonathan W Adams ASSERT(dn->dn_maxblkid >= db->db_blkid); 16848346f03fSJonathan W Adams } 16858346f03fSJonathan W Adams 168644eda4d7Smaybee if (db->db_level+1 < dn->dn_nlevels) { 1687c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 1688c717a561Smaybee dbuf_dirty_record_t *di; 1689c717a561Smaybee int parent_held = FALSE; 1690c717a561Smaybee 1691c717a561Smaybee if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1692c717a561Smaybee int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1693c717a561Smaybee 1694c717a561Smaybee parent = dbuf_hold_level(dn, db->db_level+1, 1695c717a561Smaybee db->db_blkid >> epbs, FTAG); 169601025c89SJohn Harres ASSERT(parent != NULL); 1697c717a561Smaybee parent_held = TRUE; 1698c717a561Smaybee } 1699fa9e4066Sahrens if (drop_struct_lock) 1700fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1701c717a561Smaybee ASSERT3U(db->db_level+1, ==, parent->db_level); 1702c717a561Smaybee di = dbuf_dirty(parent, tx); 1703c717a561Smaybee if (parent_held) 1704c717a561Smaybee dbuf_rele(parent, FTAG); 1705c717a561Smaybee 1706c717a561Smaybee mutex_enter(&db->db_mtx); 170769962b56SMatthew Ahrens /* 170869962b56SMatthew Ahrens * Since we've dropped the mutex, it's possible that 170969962b56SMatthew Ahrens * dbuf_undirty() might have changed this out from under us. 171069962b56SMatthew Ahrens */ 1711c717a561Smaybee if (db->db_last_dirty == dr || 1712c717a561Smaybee dn->dn_object == DMU_META_DNODE_OBJECT) { 1713c717a561Smaybee mutex_enter(&di->dt.di.dr_mtx); 1714c717a561Smaybee ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1715c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1716c717a561Smaybee list_insert_tail(&di->dt.di.dr_children, dr); 1717c717a561Smaybee mutex_exit(&di->dt.di.dr_mtx); 1718c717a561Smaybee dr->dr_parent = di; 1719c717a561Smaybee } 1720c717a561Smaybee mutex_exit(&db->db_mtx); 1721fa9e4066Sahrens } else { 1722c717a561Smaybee ASSERT(db->db_level+1 == dn->dn_nlevels); 1723c717a561Smaybee ASSERT(db->db_blkid < dn->dn_nblkptr); 1724744947dcSTom Erickson ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1725c717a561Smaybee mutex_enter(&dn->dn_mtx); 1726c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1727c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1728c717a561Smaybee mutex_exit(&dn->dn_mtx); 1729fa9e4066Sahrens if (drop_struct_lock) 1730fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1731fa9e4066Sahrens } 1732fa9e4066Sahrens 1733fa9e4066Sahrens dnode_setdirty(dn, tx); 1734744947dcSTom Erickson DB_DNODE_EXIT(db); 1735c717a561Smaybee return (dr); 1736fa9e4066Sahrens } 1737fa9e4066Sahrens 17383b2aab18SMatthew Ahrens /* 17393e30c24aSWill Andrews * Undirty a buffer in the transaction group referenced by the given 17403e30c24aSWill Andrews * transaction. Return whether this evicted the dbuf. 17413b2aab18SMatthew Ahrens */ 17423b2aab18SMatthew Ahrens static boolean_t 1743fa9e4066Sahrens dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1744fa9e4066Sahrens { 1745744947dcSTom Erickson dnode_t *dn; 1746c717a561Smaybee uint64_t txg = tx->tx_txg; 174717f17c2dSbonwick dbuf_dirty_record_t *dr, **drp; 1748fa9e4066Sahrens 1749c717a561Smaybee ASSERT(txg != 0); 175046e1baa6SMatthew Ahrens 175146e1baa6SMatthew Ahrens /* 175246e1baa6SMatthew Ahrens * Due to our use of dn_nlevels below, this can only be called 175346e1baa6SMatthew Ahrens * in open context, unless we are operating on the MOS. 175446e1baa6SMatthew Ahrens * From syncing context, dn_nlevels may be different from the 175546e1baa6SMatthew Ahrens * dn_nlevels used when dbuf was dirtied. 175646e1baa6SMatthew Ahrens */ 175746e1baa6SMatthew Ahrens ASSERT(db->db_objset == 175846e1baa6SMatthew Ahrens dmu_objset_pool(db->db_objset)->dp_meta_objset || 175946e1baa6SMatthew Ahrens txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 17600a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 17613b2aab18SMatthew Ahrens ASSERT0(db->db_level); 17623b2aab18SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 1763fa9e4066Sahrens 1764fa9e4066Sahrens /* 1765fa9e4066Sahrens * If this buffer is not dirty, we're done. 1766fa9e4066Sahrens */ 176717f17c2dSbonwick for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1768c717a561Smaybee if (dr->dr_txg <= txg) 1769c717a561Smaybee break; 17703b2aab18SMatthew Ahrens if (dr == NULL || dr->dr_txg < txg) 17713b2aab18SMatthew Ahrens return (B_FALSE); 1772c717a561Smaybee ASSERT(dr->dr_txg == txg); 1773b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 1774fa9e4066Sahrens 1775744947dcSTom Erickson DB_DNODE_ENTER(db); 1776744947dcSTom Erickson dn = DB_DNODE(db); 1777744947dcSTom Erickson 1778fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1779fa9e4066Sahrens 1780fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1781fa9e4066Sahrens 178246e1baa6SMatthew Ahrens dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 178346e1baa6SMatthew Ahrens dr->dr_accounted, txg); 1784fa9e4066Sahrens 178517f17c2dSbonwick *drp = dr->dr_next; 1786c717a561Smaybee 17873f2366c2SGordon Ross /* 17883f2366c2SGordon Ross * Note that there are three places in dbuf_dirty() 17893f2366c2SGordon Ross * where this dirty record may be put on a list. 17903f2366c2SGordon Ross * Make sure to do a list_remove corresponding to 17913f2366c2SGordon Ross * every one of those list_insert calls. 17923f2366c2SGordon Ross */ 1793c717a561Smaybee if (dr->dr_parent) { 1794c717a561Smaybee mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1795c717a561Smaybee list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1796c717a561Smaybee mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 17973f2366c2SGordon Ross } else if (db->db_blkid == DMU_SPILL_BLKID || 179846e1baa6SMatthew Ahrens db->db_level + 1 == dn->dn_nlevels) { 1799cdb0ab79Smaybee ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1800c717a561Smaybee mutex_enter(&dn->dn_mtx); 1801c717a561Smaybee list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1802c717a561Smaybee mutex_exit(&dn->dn_mtx); 1803c717a561Smaybee } 1804744947dcSTom Erickson DB_DNODE_EXIT(db); 1805c717a561Smaybee 18063b2aab18SMatthew Ahrens if (db->db_state != DB_NOFILL) { 18073b2aab18SMatthew Ahrens dbuf_unoverride(dr); 1808c717a561Smaybee 1809c717a561Smaybee ASSERT(db->db_buf != NULL); 18103b2aab18SMatthew Ahrens ASSERT(dr->dt.dl.dr_data != NULL); 18113b2aab18SMatthew Ahrens if (dr->dt.dl.dr_data != db->db_buf) 1812dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 1813c717a561Smaybee } 1814d2b3cbbdSJorgen Lundman 1815c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1816fa9e4066Sahrens 1817fa9e4066Sahrens ASSERT(db->db_dirtycnt > 0); 1818fa9e4066Sahrens db->db_dirtycnt -= 1; 1819fa9e4066Sahrens 1820c717a561Smaybee if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1821dcbf3bd6SGeorge Wilson ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 1822dcbf3bd6SGeorge Wilson dbuf_destroy(db); 18233b2aab18SMatthew Ahrens return (B_TRUE); 1824fa9e4066Sahrens } 1825fa9e4066Sahrens 18263b2aab18SMatthew Ahrens return (B_FALSE); 1827fa9e4066Sahrens } 1828fa9e4066Sahrens 1829fa9e4066Sahrens void 183043466aaeSMax Grossman dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1831fa9e4066Sahrens { 183243466aaeSMax Grossman dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 18331ab7f2deSmaybee int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1834fa9e4066Sahrens 1835fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1836fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1837fa9e4066Sahrens 18380f2e7d03SMatthew Ahrens /* 18390f2e7d03SMatthew Ahrens * Quick check for dirtyness. For already dirty blocks, this 18400f2e7d03SMatthew Ahrens * reduces runtime of this function by >90%, and overall performance 18410f2e7d03SMatthew Ahrens * by 50% for some workloads (e.g. file deletion with indirect blocks 18420f2e7d03SMatthew Ahrens * cached). 18430f2e7d03SMatthew Ahrens */ 18440f2e7d03SMatthew Ahrens mutex_enter(&db->db_mtx); 18450f2e7d03SMatthew Ahrens dbuf_dirty_record_t *dr; 18460f2e7d03SMatthew Ahrens for (dr = db->db_last_dirty; 18470f2e7d03SMatthew Ahrens dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 18480f2e7d03SMatthew Ahrens /* 18490f2e7d03SMatthew Ahrens * It's possible that it is already dirty but not cached, 18500f2e7d03SMatthew Ahrens * because there are some calls to dbuf_dirty() that don't 18510f2e7d03SMatthew Ahrens * go through dmu_buf_will_dirty(). 18520f2e7d03SMatthew Ahrens */ 18530f2e7d03SMatthew Ahrens if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 18540f2e7d03SMatthew Ahrens /* This dbuf is already dirty and cached. */ 18550f2e7d03SMatthew Ahrens dbuf_redirty(dr); 18560f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 18570f2e7d03SMatthew Ahrens return; 18580f2e7d03SMatthew Ahrens } 18590f2e7d03SMatthew Ahrens } 18600f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 18610f2e7d03SMatthew Ahrens 1862744947dcSTom Erickson DB_DNODE_ENTER(db); 1863744947dcSTom Erickson if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1864fa9e4066Sahrens rf |= DB_RF_HAVESTRUCT; 1865744947dcSTom Erickson DB_DNODE_EXIT(db); 1866ea8dc4b6Seschrock (void) dbuf_read(db, NULL, rf); 1867c717a561Smaybee (void) dbuf_dirty(db, tx); 1868fa9e4066Sahrens } 1869fa9e4066Sahrens 187082c9918fSTim Haley void 187182c9918fSTim Haley dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 187282c9918fSTim Haley { 187382c9918fSTim Haley dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 187482c9918fSTim Haley 187582c9918fSTim Haley db->db_state = DB_NOFILL; 187682c9918fSTim Haley 187782c9918fSTim Haley dmu_buf_will_fill(db_fake, tx); 187882c9918fSTim Haley } 187982c9918fSTim Haley 1880fa9e4066Sahrens void 1881ea8dc4b6Seschrock dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1882fa9e4066Sahrens { 1883ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1884ea8dc4b6Seschrock 18850a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1886fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1887fa9e4066Sahrens ASSERT(db->db_level == 0); 1888fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1889fa9e4066Sahrens 1890ea8dc4b6Seschrock ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1891fa9e4066Sahrens dmu_tx_private_ok(tx)); 1892fa9e4066Sahrens 1893fa9e4066Sahrens dbuf_noread(db); 1894c717a561Smaybee (void) dbuf_dirty(db, tx); 1895fa9e4066Sahrens } 1896fa9e4066Sahrens 1897fa9e4066Sahrens #pragma weak dmu_buf_fill_done = dbuf_fill_done 1898fa9e4066Sahrens /* ARGSUSED */ 1899fa9e4066Sahrens void 1900fa9e4066Sahrens dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1901fa9e4066Sahrens { 1902fa9e4066Sahrens mutex_enter(&db->db_mtx); 19039c9dc39aSek DBUF_VERIFY(db); 1904fa9e4066Sahrens 1905fa9e4066Sahrens if (db->db_state == DB_FILL) { 1906c717a561Smaybee if (db->db_level == 0 && db->db_freed_in_flight) { 19070a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1908fa9e4066Sahrens /* we were freed while filling */ 1909fa9e4066Sahrens /* XXX dbuf_undirty? */ 1910fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 1911c717a561Smaybee db->db_freed_in_flight = FALSE; 1912fa9e4066Sahrens } 1913fa9e4066Sahrens db->db_state = DB_CACHED; 1914fa9e4066Sahrens cv_broadcast(&db->db_changed); 1915fa9e4066Sahrens } 1916fa9e4066Sahrens mutex_exit(&db->db_mtx); 1917fa9e4066Sahrens } 1918fa9e4066Sahrens 19195d7b4d43SMatthew Ahrens void 19205d7b4d43SMatthew Ahrens dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 19215d7b4d43SMatthew Ahrens bp_embedded_type_t etype, enum zio_compress comp, 19225d7b4d43SMatthew Ahrens int uncompressed_size, int compressed_size, int byteorder, 19235d7b4d43SMatthew Ahrens dmu_tx_t *tx) 19245d7b4d43SMatthew Ahrens { 19255d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 19265d7b4d43SMatthew Ahrens struct dirty_leaf *dl; 19275d7b4d43SMatthew Ahrens dmu_object_type_t type; 19285d7b4d43SMatthew Ahrens 1929ca0cc391SMatthew Ahrens if (etype == BP_EMBEDDED_TYPE_DATA) { 1930ca0cc391SMatthew Ahrens ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1931ca0cc391SMatthew Ahrens SPA_FEATURE_EMBEDDED_DATA)); 1932ca0cc391SMatthew Ahrens } 1933ca0cc391SMatthew Ahrens 19345d7b4d43SMatthew Ahrens DB_DNODE_ENTER(db); 19355d7b4d43SMatthew Ahrens type = DB_DNODE(db)->dn_type; 19365d7b4d43SMatthew Ahrens DB_DNODE_EXIT(db); 19375d7b4d43SMatthew Ahrens 19385d7b4d43SMatthew Ahrens ASSERT0(db->db_level); 19395d7b4d43SMatthew Ahrens ASSERT(db->db_blkid != DMU_BONUS_BLKID); 19405d7b4d43SMatthew Ahrens 19415d7b4d43SMatthew Ahrens dmu_buf_will_not_fill(dbuf, tx); 19425d7b4d43SMatthew Ahrens 19435d7b4d43SMatthew Ahrens ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 19445d7b4d43SMatthew Ahrens dl = &db->db_last_dirty->dt.dl; 19455d7b4d43SMatthew Ahrens encode_embedded_bp_compressed(&dl->dr_overridden_by, 19465d7b4d43SMatthew Ahrens data, comp, uncompressed_size, compressed_size); 19475d7b4d43SMatthew Ahrens BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 19485d7b4d43SMatthew Ahrens BP_SET_TYPE(&dl->dr_overridden_by, type); 19495d7b4d43SMatthew Ahrens BP_SET_LEVEL(&dl->dr_overridden_by, 0); 19505d7b4d43SMatthew Ahrens BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 19515d7b4d43SMatthew Ahrens 19525d7b4d43SMatthew Ahrens dl->dr_override_state = DR_OVERRIDDEN; 19535d7b4d43SMatthew Ahrens dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 19545d7b4d43SMatthew Ahrens } 19555d7b4d43SMatthew Ahrens 19562fdbea25SAleksandr Guzovskiy /* 19572fdbea25SAleksandr Guzovskiy * Directly assign a provided arc buf to a given dbuf if it's not referenced 19582fdbea25SAleksandr Guzovskiy * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 19592fdbea25SAleksandr Guzovskiy */ 19602fdbea25SAleksandr Guzovskiy void 19612fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 19622fdbea25SAleksandr Guzovskiy { 19632fdbea25SAleksandr Guzovskiy ASSERT(!refcount_is_zero(&db->db_holds)); 19640a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 19652fdbea25SAleksandr Guzovskiy ASSERT(db->db_level == 0); 19665602294fSDan Kimmel ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 19672fdbea25SAleksandr Guzovskiy ASSERT(buf != NULL); 19685602294fSDan Kimmel ASSERT(arc_buf_lsize(buf) == db->db.db_size); 19692fdbea25SAleksandr Guzovskiy ASSERT(tx->tx_txg != 0); 19702fdbea25SAleksandr Guzovskiy 19712fdbea25SAleksandr Guzovskiy arc_return_buf(buf, db); 19722fdbea25SAleksandr Guzovskiy ASSERT(arc_released(buf)); 19732fdbea25SAleksandr Guzovskiy 19742fdbea25SAleksandr Guzovskiy mutex_enter(&db->db_mtx); 19752fdbea25SAleksandr Guzovskiy 19762fdbea25SAleksandr Guzovskiy while (db->db_state == DB_READ || db->db_state == DB_FILL) 19772fdbea25SAleksandr Guzovskiy cv_wait(&db->db_changed, &db->db_mtx); 19782fdbea25SAleksandr Guzovskiy 19792fdbea25SAleksandr Guzovskiy ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 19802fdbea25SAleksandr Guzovskiy 19812fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED && 19822fdbea25SAleksandr Guzovskiy refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 19832fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 19842fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 19852fdbea25SAleksandr Guzovskiy bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1986dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, db); 1987c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_copied(); 19882fdbea25SAleksandr Guzovskiy return; 19892fdbea25SAleksandr Guzovskiy } 19902fdbea25SAleksandr Guzovskiy 1991c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_nocopy(); 19922fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED) { 19932fdbea25SAleksandr Guzovskiy dbuf_dirty_record_t *dr = db->db_last_dirty; 19942fdbea25SAleksandr Guzovskiy 19952fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf != NULL); 19962fdbea25SAleksandr Guzovskiy if (dr != NULL && dr->dr_txg == tx->tx_txg) { 19972fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_data == db->db_buf); 19982fdbea25SAleksandr Guzovskiy if (!arc_released(db->db_buf)) { 19992fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_override_state == 20002fdbea25SAleksandr Guzovskiy DR_OVERRIDDEN); 20012fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 20022fdbea25SAleksandr Guzovskiy } 20032fdbea25SAleksandr Guzovskiy dr->dt.dl.dr_data = buf; 2004dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 20052fdbea25SAleksandr Guzovskiy } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 20062fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 2007dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 20082fdbea25SAleksandr Guzovskiy } 20092fdbea25SAleksandr Guzovskiy db->db_buf = NULL; 20102fdbea25SAleksandr Guzovskiy } 20112fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf == NULL); 20122fdbea25SAleksandr Guzovskiy dbuf_set_data(db, buf); 20132fdbea25SAleksandr Guzovskiy db->db_state = DB_FILL; 20142fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 20152fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 201643466aaeSMax Grossman dmu_buf_fill_done(&db->db, tx); 20172fdbea25SAleksandr Guzovskiy } 20182fdbea25SAleksandr Guzovskiy 2019ea8dc4b6Seschrock void 2020dcbf3bd6SGeorge Wilson dbuf_destroy(dmu_buf_impl_t *db) 2021fa9e4066Sahrens { 2022744947dcSTom Erickson dnode_t *dn; 2023ea8dc4b6Seschrock dmu_buf_impl_t *parent = db->db_parent; 2024744947dcSTom Erickson dmu_buf_impl_t *dndb; 2025fa9e4066Sahrens 2026fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 2027fa9e4066Sahrens ASSERT(refcount_is_zero(&db->db_holds)); 2028fa9e4066Sahrens 2029dcbf3bd6SGeorge Wilson if (db->db_buf != NULL) { 2030dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 2031dcbf3bd6SGeorge Wilson db->db_buf = NULL; 2032dcbf3bd6SGeorge Wilson } 2033ea8dc4b6Seschrock 2034dcbf3bd6SGeorge Wilson if (db->db_blkid == DMU_BONUS_BLKID) { 2035ea8dc4b6Seschrock ASSERT(db->db.db_data != NULL); 2036dcbf3bd6SGeorge Wilson zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 2037dcbf3bd6SGeorge Wilson arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2038fa9e4066Sahrens db->db_state = DB_UNCACHED; 2039fa9e4066Sahrens } 2040fa9e4066Sahrens 2041dcbf3bd6SGeorge Wilson dbuf_clear_data(db); 2042dcbf3bd6SGeorge Wilson 2043dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 204494c2d0ebSMatthew Ahrens multilist_remove(dbuf_cache, db); 2045dcbf3bd6SGeorge Wilson (void) refcount_remove_many(&dbuf_cache_size, 2046dcbf3bd6SGeorge Wilson db->db.db_size, db); 2047dcbf3bd6SGeorge Wilson } 2048dcbf3bd6SGeorge Wilson 204982c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2050fa9e4066Sahrens ASSERT(db->db_data_pending == NULL); 2051fa9e4066Sahrens 2052ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2053ea8dc4b6Seschrock db->db_blkptr = NULL; 2054ea8dc4b6Seschrock 2055dcbf3bd6SGeorge Wilson /* 2056dcbf3bd6SGeorge Wilson * Now that db_state is DB_EVICTING, nobody else can find this via 2057dcbf3bd6SGeorge Wilson * the hash table. We can now drop db_mtx, which allows us to 2058dcbf3bd6SGeorge Wilson * acquire the dn_dbufs_mtx. 2059dcbf3bd6SGeorge Wilson */ 2060dcbf3bd6SGeorge Wilson mutex_exit(&db->db_mtx); 2061dcbf3bd6SGeorge Wilson 2062744947dcSTom Erickson DB_DNODE_ENTER(db); 2063744947dcSTom Erickson dn = DB_DNODE(db); 2064744947dcSTom Erickson dndb = dn->dn_dbuf; 2065dcbf3bd6SGeorge Wilson if (db->db_blkid != DMU_BONUS_BLKID) { 2066dcbf3bd6SGeorge Wilson boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2067dcbf3bd6SGeorge Wilson if (needlock) 2068dcbf3bd6SGeorge Wilson mutex_enter(&dn->dn_dbufs_mtx); 20690f6d88adSAlex Reece avl_remove(&dn->dn_dbufs, db); 2070640c1670SJosef 'Jeff' Sipek atomic_dec_32(&dn->dn_dbufs_count); 2071744947dcSTom Erickson membar_producer(); 2072744947dcSTom Erickson DB_DNODE_EXIT(db); 2073dcbf3bd6SGeorge Wilson if (needlock) 2074dcbf3bd6SGeorge Wilson mutex_exit(&dn->dn_dbufs_mtx); 2075744947dcSTom Erickson /* 2076744947dcSTom Erickson * Decrementing the dbuf count means that the hold corresponding 2077744947dcSTom Erickson * to the removed dbuf is no longer discounted in dnode_move(), 2078744947dcSTom Erickson * so the dnode cannot be moved until after we release the hold. 2079744947dcSTom Erickson * The membar_producer() ensures visibility of the decremented 2080744947dcSTom Erickson * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2081744947dcSTom Erickson * release any lock. 2082744947dcSTom Erickson */ 2083ea8dc4b6Seschrock dnode_rele(dn, db); 2084744947dcSTom Erickson db->db_dnode_handle = NULL; 2085dcbf3bd6SGeorge Wilson 2086dcbf3bd6SGeorge Wilson dbuf_hash_remove(db); 2087744947dcSTom Erickson } else { 2088744947dcSTom Erickson DB_DNODE_EXIT(db); 2089ea8dc4b6Seschrock } 2090ea8dc4b6Seschrock 2091dcbf3bd6SGeorge Wilson ASSERT(refcount_is_zero(&db->db_holds)); 2092ea8dc4b6Seschrock 2093dcbf3bd6SGeorge Wilson db->db_parent = NULL; 2094dcbf3bd6SGeorge Wilson 2095dcbf3bd6SGeorge Wilson ASSERT(db->db_buf == NULL); 2096dcbf3bd6SGeorge Wilson ASSERT(db->db.db_data == NULL); 2097dcbf3bd6SGeorge Wilson ASSERT(db->db_hash_next == NULL); 2098dcbf3bd6SGeorge Wilson ASSERT(db->db_blkptr == NULL); 2099dcbf3bd6SGeorge Wilson ASSERT(db->db_data_pending == NULL); 2100dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 2101dcbf3bd6SGeorge Wilson 2102dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2103dcbf3bd6SGeorge Wilson arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2104fa9e4066Sahrens 2105fa9e4066Sahrens /* 2106744947dcSTom Erickson * If this dbuf is referenced from an indirect dbuf, 2107fa9e4066Sahrens * decrement the ref count on the indirect dbuf. 2108fa9e4066Sahrens */ 2109c543ec06Sahrens if (parent && parent != dndb) 2110ea8dc4b6Seschrock dbuf_rele(parent, db); 2111fa9e4066Sahrens } 2112fa9e4066Sahrens 2113a2cdcdd2SPaul Dagnelie /* 2114a2cdcdd2SPaul Dagnelie * Note: While bpp will always be updated if the function returns success, 2115a2cdcdd2SPaul Dagnelie * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2116a2cdcdd2SPaul Dagnelie * this happens when the dnode is the meta-dnode, or a userused or groupused 2117a2cdcdd2SPaul Dagnelie * object. 2118a2cdcdd2SPaul Dagnelie */ 2119fa9e4066Sahrens static int 2120fa9e4066Sahrens dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2121fa9e4066Sahrens dmu_buf_impl_t **parentp, blkptr_t **bpp) 2122fa9e4066Sahrens { 21230b69c2f0Sahrens *parentp = NULL; 21240b69c2f0Sahrens *bpp = NULL; 21250b69c2f0Sahrens 21260a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 21270a586ceaSMark Shellenbaum 21280a586ceaSMark Shellenbaum if (blkid == DMU_SPILL_BLKID) { 21290a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 213006e0070dSMark Shellenbaum if (dn->dn_have_spill && 213106e0070dSMark Shellenbaum (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 21320a586ceaSMark Shellenbaum *bpp = &dn->dn_phys->dn_spill; 21330a586ceaSMark Shellenbaum else 21340a586ceaSMark Shellenbaum *bpp = NULL; 21350a586ceaSMark Shellenbaum dbuf_add_ref(dn->dn_dbuf, NULL); 21360a586ceaSMark Shellenbaum *parentp = dn->dn_dbuf; 21370a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 21380a586ceaSMark Shellenbaum return (0); 21390a586ceaSMark Shellenbaum } 2140ea8dc4b6Seschrock 21417de35a3eSPaul Dagnelie int nlevels = 21427de35a3eSPaul Dagnelie (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 21437de35a3eSPaul Dagnelie int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2144fa9e4066Sahrens 2145fa9e4066Sahrens ASSERT3U(level * epbs, <, 64); 2146fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 21477de35a3eSPaul Dagnelie /* 21487de35a3eSPaul Dagnelie * This assertion shouldn't trip as long as the max indirect block size 21497de35a3eSPaul Dagnelie * is less than 1M. The reason for this is that up to that point, 21507de35a3eSPaul Dagnelie * the number of levels required to address an entire object with blocks 21517de35a3eSPaul Dagnelie * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 21527de35a3eSPaul Dagnelie * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 21537de35a3eSPaul Dagnelie * (i.e. we can address the entire object), objects will all use at most 21547de35a3eSPaul Dagnelie * N-1 levels and the assertion won't overflow. However, once epbs is 21557de35a3eSPaul Dagnelie * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 21567de35a3eSPaul Dagnelie * enough to address an entire object, so objects will have 5 levels, 21577de35a3eSPaul Dagnelie * but then this assertion will overflow. 21587de35a3eSPaul Dagnelie * 21597de35a3eSPaul Dagnelie * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 21607de35a3eSPaul Dagnelie * need to redo this logic to handle overflows. 21617de35a3eSPaul Dagnelie */ 21627de35a3eSPaul Dagnelie ASSERT(level >= nlevels || 21637de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs) + 21647de35a3eSPaul Dagnelie highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2165ea8dc4b6Seschrock if (level >= nlevels || 21667de35a3eSPaul Dagnelie blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 21677de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs)) || 21687de35a3eSPaul Dagnelie (fail_sparse && 21697de35a3eSPaul Dagnelie blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2170fa9e4066Sahrens /* the buffer has no parent yet */ 2171be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2172fa9e4066Sahrens } else if (level < nlevels-1) { 2173fa9e4066Sahrens /* this block is referenced from an indirect block */ 2174fa9e4066Sahrens int err = dbuf_hold_impl(dn, level+1, 2175a2cdcdd2SPaul Dagnelie blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2176fa9e4066Sahrens if (err) 2177fa9e4066Sahrens return (err); 2178ea8dc4b6Seschrock err = dbuf_read(*parentp, NULL, 2179ea8dc4b6Seschrock (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2180c543ec06Sahrens if (err) { 2181c543ec06Sahrens dbuf_rele(*parentp, NULL); 2182c543ec06Sahrens *parentp = NULL; 2183c543ec06Sahrens return (err); 2184ea8dc4b6Seschrock } 2185c543ec06Sahrens *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2186c543ec06Sahrens (blkid & ((1ULL << epbs) - 1)); 21877de35a3eSPaul Dagnelie if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 21887de35a3eSPaul Dagnelie ASSERT(BP_IS_HOLE(*bpp)); 2189c543ec06Sahrens return (0); 2190fa9e4066Sahrens } else { 2191fa9e4066Sahrens /* the block is referenced from the dnode */ 2192fa9e4066Sahrens ASSERT3U(level, ==, nlevels-1); 2193fa9e4066Sahrens ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2194fa9e4066Sahrens blkid < dn->dn_phys->dn_nblkptr); 2195c543ec06Sahrens if (dn->dn_dbuf) { 2196c543ec06Sahrens dbuf_add_ref(dn->dn_dbuf, NULL); 2197c543ec06Sahrens *parentp = dn->dn_dbuf; 2198c543ec06Sahrens } 2199fa9e4066Sahrens *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2200fa9e4066Sahrens return (0); 2201fa9e4066Sahrens } 2202fa9e4066Sahrens } 2203fa9e4066Sahrens 2204fa9e4066Sahrens static dmu_buf_impl_t * 2205fa9e4066Sahrens dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2206fa9e4066Sahrens dmu_buf_impl_t *parent, blkptr_t *blkptr) 2207fa9e4066Sahrens { 2208503ad85cSMatthew Ahrens objset_t *os = dn->dn_objset; 2209fa9e4066Sahrens dmu_buf_impl_t *db, *odb; 2210fa9e4066Sahrens 2211fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2212fa9e4066Sahrens ASSERT(dn->dn_type != DMU_OT_NONE); 2213fa9e4066Sahrens 2214dcbf3bd6SGeorge Wilson db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2215fa9e4066Sahrens 2216fa9e4066Sahrens db->db_objset = os; 2217fa9e4066Sahrens db->db.db_object = dn->dn_object; 2218fa9e4066Sahrens db->db_level = level; 2219fa9e4066Sahrens db->db_blkid = blkid; 2220c717a561Smaybee db->db_last_dirty = NULL; 2221ea8dc4b6Seschrock db->db_dirtycnt = 0; 2222744947dcSTom Erickson db->db_dnode_handle = dn->dn_handle; 2223ea8dc4b6Seschrock db->db_parent = parent; 2224ea8dc4b6Seschrock db->db_blkptr = blkptr; 2225fa9e4066Sahrens 2226bc9014e6SJustin Gibbs db->db_user = NULL; 2227d2058105SJustin T. Gibbs db->db_user_immediate_evict = FALSE; 2228d2058105SJustin T. Gibbs db->db_freed_in_flight = FALSE; 2229d2058105SJustin T. Gibbs db->db_pending_evict = FALSE; 2230ea8dc4b6Seschrock 22310a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID) { 2232ea8dc4b6Seschrock ASSERT3P(parent, ==, dn->dn_dbuf); 22331934e92fSmaybee db->db.db_size = DN_MAX_BONUSLEN - 22341934e92fSmaybee (dn->dn_nblkptr-1) * sizeof (blkptr_t); 22351934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 22360a586ceaSMark Shellenbaum db->db.db_offset = DMU_BONUS_BLKID; 2237ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2238ea8dc4b6Seschrock /* the bonus dbuf is not placed in the hash table */ 22395a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2240ea8dc4b6Seschrock return (db); 22410a586ceaSMark Shellenbaum } else if (blkid == DMU_SPILL_BLKID) { 22420a586ceaSMark Shellenbaum db->db.db_size = (blkptr != NULL) ? 22430a586ceaSMark Shellenbaum BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 22440a586ceaSMark Shellenbaum db->db.db_offset = 0; 2245fa9e4066Sahrens } else { 2246fa9e4066Sahrens int blocksize = 224769962b56SMatthew Ahrens db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2248fa9e4066Sahrens db->db.db_size = blocksize; 2249fa9e4066Sahrens db->db.db_offset = db->db_blkid * blocksize; 2250fa9e4066Sahrens } 2251fa9e4066Sahrens 2252fa9e4066Sahrens /* 2253fa9e4066Sahrens * Hold the dn_dbufs_mtx while we get the new dbuf 2254fa9e4066Sahrens * in the hash table *and* added to the dbufs list. 2255fa9e4066Sahrens * This prevents a possible deadlock with someone 2256fa9e4066Sahrens * trying to look up this dbuf before its added to the 2257fa9e4066Sahrens * dn_dbufs list. 2258fa9e4066Sahrens */ 2259fa9e4066Sahrens mutex_enter(&dn->dn_dbufs_mtx); 2260ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2261fa9e4066Sahrens if ((odb = dbuf_hash_insert(db)) != NULL) { 2262fa9e4066Sahrens /* someone else inserted it first */ 2263dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2264fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 2265fa9e4066Sahrens return (odb); 2266fa9e4066Sahrens } 22670f6d88adSAlex Reece avl_add(&dn->dn_dbufs, db); 2268653af1b8SStephen Blinick 2269ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2270fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 22715a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2272fa9e4066Sahrens 2273fa9e4066Sahrens if (parent && parent != dn->dn_dbuf) 2274fa9e4066Sahrens dbuf_add_ref(parent, db); 2275fa9e4066Sahrens 2276ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2277ea8dc4b6Seschrock refcount_count(&dn->dn_holds) > 0); 2278fa9e4066Sahrens (void) refcount_add(&dn->dn_holds, db); 2279640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 2280fa9e4066Sahrens 2281fa9e4066Sahrens dprintf_dbuf(db, "db=%p\n", db); 2282fa9e4066Sahrens 2283fa9e4066Sahrens return (db); 2284fa9e4066Sahrens } 2285fa9e4066Sahrens 2286a2cdcdd2SPaul Dagnelie typedef struct dbuf_prefetch_arg { 2287a2cdcdd2SPaul Dagnelie spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2288a2cdcdd2SPaul Dagnelie zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2289a2cdcdd2SPaul Dagnelie int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2290a2cdcdd2SPaul Dagnelie int dpa_curlevel; /* The current level that we're reading */ 2291dcbf3bd6SGeorge Wilson dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2292a2cdcdd2SPaul Dagnelie zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2293a2cdcdd2SPaul Dagnelie zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2294a2cdcdd2SPaul Dagnelie arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2295a2cdcdd2SPaul Dagnelie } dbuf_prefetch_arg_t; 2296a2cdcdd2SPaul Dagnelie 2297a2cdcdd2SPaul Dagnelie /* 2298a2cdcdd2SPaul Dagnelie * Actually issue the prefetch read for the block given. 2299a2cdcdd2SPaul Dagnelie */ 2300a2cdcdd2SPaul Dagnelie static void 2301a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2302a2cdcdd2SPaul Dagnelie { 2303a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2304a2cdcdd2SPaul Dagnelie return; 2305a2cdcdd2SPaul Dagnelie 2306a2cdcdd2SPaul Dagnelie arc_flags_t aflags = 2307a2cdcdd2SPaul Dagnelie dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2308a2cdcdd2SPaul Dagnelie 2309a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2310a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2311a2cdcdd2SPaul Dagnelie ASSERT(dpa->dpa_zio != NULL); 2312a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2313a2cdcdd2SPaul Dagnelie dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2314a2cdcdd2SPaul Dagnelie &aflags, &dpa->dpa_zb); 2315a2cdcdd2SPaul Dagnelie } 2316a2cdcdd2SPaul Dagnelie 2317a2cdcdd2SPaul Dagnelie /* 2318a2cdcdd2SPaul Dagnelie * Called when an indirect block above our prefetch target is read in. This 2319a2cdcdd2SPaul Dagnelie * will either read in the next indirect block down the tree or issue the actual 2320a2cdcdd2SPaul Dagnelie * prefetch if the next block down is our target. 2321a2cdcdd2SPaul Dagnelie */ 2322a2cdcdd2SPaul Dagnelie static void 2323a2cdcdd2SPaul Dagnelie dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2324a2cdcdd2SPaul Dagnelie { 2325a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = private; 2326a2cdcdd2SPaul Dagnelie 2327a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2328a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_curlevel, >, 0); 2329dcbf3bd6SGeorge Wilson 2330dcbf3bd6SGeorge Wilson /* 2331dcbf3bd6SGeorge Wilson * The dpa_dnode is only valid if we are called with a NULL 2332dcbf3bd6SGeorge Wilson * zio. This indicates that the arc_read() returned without 2333dcbf3bd6SGeorge Wilson * first calling zio_read() to issue a physical read. Once 2334dcbf3bd6SGeorge Wilson * a physical read is made the dpa_dnode must be invalidated 2335dcbf3bd6SGeorge Wilson * as the locks guarding it may have been dropped. If the 2336dcbf3bd6SGeorge Wilson * dpa_dnode is still valid, then we want to add it to the dbuf 2337dcbf3bd6SGeorge Wilson * cache. To do so, we must hold the dbuf associated with the block 2338dcbf3bd6SGeorge Wilson * we just prefetched, read its contents so that we associate it 2339dcbf3bd6SGeorge Wilson * with an arc_buf_t, and then release it. 2340dcbf3bd6SGeorge Wilson */ 2341a2cdcdd2SPaul Dagnelie if (zio != NULL) { 2342a2cdcdd2SPaul Dagnelie ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2343dcbf3bd6SGeorge Wilson if (zio->io_flags & ZIO_FLAG_RAW) { 2344dcbf3bd6SGeorge Wilson ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2345dcbf3bd6SGeorge Wilson } else { 2346dcbf3bd6SGeorge Wilson ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2347dcbf3bd6SGeorge Wilson } 2348a2cdcdd2SPaul Dagnelie ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2349dcbf3bd6SGeorge Wilson 2350dcbf3bd6SGeorge Wilson dpa->dpa_dnode = NULL; 2351dcbf3bd6SGeorge Wilson } else if (dpa->dpa_dnode != NULL) { 2352dcbf3bd6SGeorge Wilson uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2353dcbf3bd6SGeorge Wilson (dpa->dpa_epbs * (dpa->dpa_curlevel - 2354dcbf3bd6SGeorge Wilson dpa->dpa_zb.zb_level)); 2355dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2356dcbf3bd6SGeorge Wilson dpa->dpa_curlevel, curblkid, FTAG); 2357dcbf3bd6SGeorge Wilson (void) dbuf_read(db, NULL, 2358dcbf3bd6SGeorge Wilson DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2359dcbf3bd6SGeorge Wilson dbuf_rele(db, FTAG); 2360a2cdcdd2SPaul Dagnelie } 2361a2cdcdd2SPaul Dagnelie 2362a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel--; 2363a2cdcdd2SPaul Dagnelie 2364a2cdcdd2SPaul Dagnelie uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2365a2cdcdd2SPaul Dagnelie (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2366a2cdcdd2SPaul Dagnelie blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2367a2cdcdd2SPaul Dagnelie P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2368a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { 2369a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2370a2cdcdd2SPaul Dagnelie } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2371a2cdcdd2SPaul Dagnelie ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2372a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, bp); 2373a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2374a2cdcdd2SPaul Dagnelie } else { 2375a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2376a2cdcdd2SPaul Dagnelie zbookmark_phys_t zb; 2377a2cdcdd2SPaul Dagnelie 237827295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 237927295216Sbenrubson if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 238027295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 238127295216Sbenrubson 2382a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2383a2cdcdd2SPaul Dagnelie 2384a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2385a2cdcdd2SPaul Dagnelie dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2386a2cdcdd2SPaul Dagnelie 2387a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2388a2cdcdd2SPaul Dagnelie bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2389a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2390a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2391a2cdcdd2SPaul Dagnelie } 2392dcbf3bd6SGeorge Wilson 2393dcbf3bd6SGeorge Wilson arc_buf_destroy(abuf, private); 2394a2cdcdd2SPaul Dagnelie } 2395a2cdcdd2SPaul Dagnelie 2396a2cdcdd2SPaul Dagnelie /* 2397a2cdcdd2SPaul Dagnelie * Issue prefetch reads for the given block on the given level. If the indirect 2398a2cdcdd2SPaul Dagnelie * blocks above that block are not in memory, we will read them in 2399a2cdcdd2SPaul Dagnelie * asynchronously. As a result, this call never blocks waiting for a read to 2400a2cdcdd2SPaul Dagnelie * complete. 2401a2cdcdd2SPaul Dagnelie */ 2402fa9e4066Sahrens void 2403a2cdcdd2SPaul Dagnelie dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2404a2cdcdd2SPaul Dagnelie arc_flags_t aflags) 2405fa9e4066Sahrens { 2406a2cdcdd2SPaul Dagnelie blkptr_t bp; 2407a2cdcdd2SPaul Dagnelie int epbs, nlevels, curlevel; 2408a2cdcdd2SPaul Dagnelie uint64_t curblkid; 2409fa9e4066Sahrens 24100a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2411fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2412fa9e4066Sahrens 2413cf6106c8SMatthew Ahrens if (blkid > dn->dn_maxblkid) 2414cf6106c8SMatthew Ahrens return; 2415cf6106c8SMatthew Ahrens 2416fa9e4066Sahrens if (dnode_block_freed(dn, blkid)) 2417fa9e4066Sahrens return; 2418fa9e4066Sahrens 2419a2cdcdd2SPaul Dagnelie /* 2420a2cdcdd2SPaul Dagnelie * This dnode hasn't been written to disk yet, so there's nothing to 2421a2cdcdd2SPaul Dagnelie * prefetch. 2422a2cdcdd2SPaul Dagnelie */ 2423a2cdcdd2SPaul Dagnelie nlevels = dn->dn_phys->dn_nlevels; 2424a2cdcdd2SPaul Dagnelie if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2425a2cdcdd2SPaul Dagnelie return; 2426a2cdcdd2SPaul Dagnelie 2427a2cdcdd2SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2428a2cdcdd2SPaul Dagnelie if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2429a2cdcdd2SPaul Dagnelie return; 2430a2cdcdd2SPaul Dagnelie 2431a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2432a2cdcdd2SPaul Dagnelie level, blkid); 2433a2cdcdd2SPaul Dagnelie if (db != NULL) { 2434a2cdcdd2SPaul Dagnelie mutex_exit(&db->db_mtx); 24359e9c486fSGeorge Wilson /* 2436a2cdcdd2SPaul Dagnelie * This dbuf already exists. It is either CACHED, or 2437a2cdcdd2SPaul Dagnelie * (we assume) about to be read or filled. 24389e9c486fSGeorge Wilson */ 24399e9c486fSGeorge Wilson return; 2440fa9e4066Sahrens } 2441fa9e4066Sahrens 2442a2cdcdd2SPaul Dagnelie /* 2443a2cdcdd2SPaul Dagnelie * Find the closest ancestor (indirect block) of the target block 2444a2cdcdd2SPaul Dagnelie * that is present in the cache. In this indirect block, we will 2445a2cdcdd2SPaul Dagnelie * find the bp that is at curlevel, curblkid. 2446a2cdcdd2SPaul Dagnelie */ 2447a2cdcdd2SPaul Dagnelie curlevel = level; 2448a2cdcdd2SPaul Dagnelie curblkid = blkid; 2449a2cdcdd2SPaul Dagnelie while (curlevel < nlevels - 1) { 2450a2cdcdd2SPaul Dagnelie int parent_level = curlevel + 1; 2451a2cdcdd2SPaul Dagnelie uint64_t parent_blkid = curblkid >> epbs; 2452a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db; 2453a2cdcdd2SPaul Dagnelie 2454a2cdcdd2SPaul Dagnelie if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2455a2cdcdd2SPaul Dagnelie FALSE, TRUE, FTAG, &db) == 0) { 2456a2cdcdd2SPaul Dagnelie blkptr_t *bpp = db->db_buf->b_data; 2457a2cdcdd2SPaul Dagnelie bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2458a2cdcdd2SPaul Dagnelie dbuf_rele(db, FTAG); 2459a2cdcdd2SPaul Dagnelie break; 2460a2cdcdd2SPaul Dagnelie } 2461b24ab676SJeff Bonwick 2462a2cdcdd2SPaul Dagnelie curlevel = parent_level; 2463a2cdcdd2SPaul Dagnelie curblkid = parent_blkid; 2464a2cdcdd2SPaul Dagnelie } 2465ea8dc4b6Seschrock 2466a2cdcdd2SPaul Dagnelie if (curlevel == nlevels - 1) { 2467a2cdcdd2SPaul Dagnelie /* No cached indirect blocks found. */ 2468a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2469a2cdcdd2SPaul Dagnelie bp = dn->dn_phys->dn_blkptr[curblkid]; 2470fa9e4066Sahrens } 2471a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(&bp)) 2472a2cdcdd2SPaul Dagnelie return; 2473a2cdcdd2SPaul Dagnelie 2474a2cdcdd2SPaul Dagnelie ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2475a2cdcdd2SPaul Dagnelie 2476a2cdcdd2SPaul Dagnelie zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2477a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL); 2478a2cdcdd2SPaul Dagnelie 2479a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2480a2cdcdd2SPaul Dagnelie dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2481a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2482a2cdcdd2SPaul Dagnelie dn->dn_object, level, blkid); 2483a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel = curlevel; 2484a2cdcdd2SPaul Dagnelie dpa->dpa_prio = prio; 2485a2cdcdd2SPaul Dagnelie dpa->dpa_aflags = aflags; 2486a2cdcdd2SPaul Dagnelie dpa->dpa_spa = dn->dn_objset->os_spa; 2487dcbf3bd6SGeorge Wilson dpa->dpa_dnode = dn; 2488a2cdcdd2SPaul Dagnelie dpa->dpa_epbs = epbs; 2489a2cdcdd2SPaul Dagnelie dpa->dpa_zio = pio; 2490a2cdcdd2SPaul Dagnelie 249127295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 249227295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 249327295216Sbenrubson dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 249427295216Sbenrubson 2495a2cdcdd2SPaul Dagnelie /* 2496a2cdcdd2SPaul Dagnelie * If we have the indirect just above us, no need to do the asynchronous 2497a2cdcdd2SPaul Dagnelie * prefetch chain; we'll just run the last step ourselves. If we're at 2498a2cdcdd2SPaul Dagnelie * a higher level, though, we want to issue the prefetches for all the 2499a2cdcdd2SPaul Dagnelie * indirect blocks asynchronously, so we can go on with whatever we were 2500a2cdcdd2SPaul Dagnelie * doing. 2501a2cdcdd2SPaul Dagnelie */ 2502a2cdcdd2SPaul Dagnelie if (curlevel == level) { 2503a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, ==, blkid); 2504a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, &bp); 2505a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2506a2cdcdd2SPaul Dagnelie } else { 2507a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2508a2cdcdd2SPaul Dagnelie zbookmark_phys_t zb; 2509a2cdcdd2SPaul Dagnelie 251027295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 251127295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 251227295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 251327295216Sbenrubson 2514a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2515a2cdcdd2SPaul Dagnelie dn->dn_object, curlevel, curblkid); 2516a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2517a2cdcdd2SPaul Dagnelie &bp, dbuf_prefetch_indirect_done, dpa, prio, 2518a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2519a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2520a2cdcdd2SPaul Dagnelie } 2521a2cdcdd2SPaul Dagnelie /* 2522a2cdcdd2SPaul Dagnelie * We use pio here instead of dpa_zio since it's possible that 2523a2cdcdd2SPaul Dagnelie * dpa may have already been freed. 2524a2cdcdd2SPaul Dagnelie */ 2525a2cdcdd2SPaul Dagnelie zio_nowait(pio); 2526fa9e4066Sahrens } 2527fa9e4066Sahrens 2528fa9e4066Sahrens /* 2529fa9e4066Sahrens * Returns with db_holds incremented, and db_mtx not held. 2530fa9e4066Sahrens * Note: dn_struct_rwlock must be held. 2531fa9e4066Sahrens */ 2532fa9e4066Sahrens int 2533a2cdcdd2SPaul Dagnelie dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2534a2cdcdd2SPaul Dagnelie boolean_t fail_sparse, boolean_t fail_uncached, 2535fa9e4066Sahrens void *tag, dmu_buf_impl_t **dbp) 2536fa9e4066Sahrens { 2537fa9e4066Sahrens dmu_buf_impl_t *db, *parent = NULL; 2538fa9e4066Sahrens 25390a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2540fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2541fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, level); 2542fa9e4066Sahrens 2543fa9e4066Sahrens *dbp = NULL; 2544ea8dc4b6Seschrock top: 2545fa9e4066Sahrens /* dbuf_find() returns with db_mtx held */ 2546e57a022bSJustin T. Gibbs db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2547fa9e4066Sahrens 2548fa9e4066Sahrens if (db == NULL) { 2549fa9e4066Sahrens blkptr_t *bp = NULL; 2550fa9e4066Sahrens int err; 2551fa9e4066Sahrens 2552a2cdcdd2SPaul Dagnelie if (fail_uncached) 2553a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2554a2cdcdd2SPaul Dagnelie 2555c543ec06Sahrens ASSERT3P(parent, ==, NULL); 2556fa9e4066Sahrens err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2557fa9e4066Sahrens if (fail_sparse) { 2558fa9e4066Sahrens if (err == 0 && bp && BP_IS_HOLE(bp)) 2559be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 2560fa9e4066Sahrens if (err) { 2561c543ec06Sahrens if (parent) 2562ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2563fa9e4066Sahrens return (err); 2564fa9e4066Sahrens } 2565fa9e4066Sahrens } 2566ea8dc4b6Seschrock if (err && err != ENOENT) 2567ea8dc4b6Seschrock return (err); 2568fa9e4066Sahrens db = dbuf_create(dn, level, blkid, parent, bp); 2569fa9e4066Sahrens } 2570fa9e4066Sahrens 2571a2cdcdd2SPaul Dagnelie if (fail_uncached && db->db_state != DB_CACHED) { 2572a2cdcdd2SPaul Dagnelie mutex_exit(&db->db_mtx); 2573a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2574a2cdcdd2SPaul Dagnelie } 2575a2cdcdd2SPaul Dagnelie 2576dcbf3bd6SGeorge Wilson if (db->db_buf != NULL) 2577ea8dc4b6Seschrock ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2578ea8dc4b6Seschrock 2579ea8dc4b6Seschrock ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2580ea8dc4b6Seschrock 2581fa9e4066Sahrens /* 2582c717a561Smaybee * If this buffer is currently syncing out, and we are are 2583c717a561Smaybee * still referencing it from db_data, we need to make a copy 2584c717a561Smaybee * of it in case we decide we want to dirty it again in this txg. 2585fa9e4066Sahrens */ 25860a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2587ea8dc4b6Seschrock dn->dn_object != DMU_META_DNODE_OBJECT && 2588c717a561Smaybee db->db_state == DB_CACHED && db->db_data_pending) { 2589c717a561Smaybee dbuf_dirty_record_t *dr = db->db_data_pending; 2590fa9e4066Sahrens 2591c717a561Smaybee if (dr->dt.dl.dr_data == db->db_buf) { 2592c717a561Smaybee arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2593c717a561Smaybee 2594c717a561Smaybee dbuf_set_data(db, 25955602294fSDan Kimmel arc_alloc_buf(dn->dn_objset->os_spa, db, type, 25965602294fSDan Kimmel db->db.db_size)); 2597c717a561Smaybee bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2598c717a561Smaybee db->db.db_size); 2599c717a561Smaybee } 2600fa9e4066Sahrens } 2601fa9e4066Sahrens 2602dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 2603dcbf3bd6SGeorge Wilson ASSERT(refcount_is_zero(&db->db_holds)); 260494c2d0ebSMatthew Ahrens multilist_remove(dbuf_cache, db); 2605dcbf3bd6SGeorge Wilson (void) refcount_remove_many(&dbuf_cache_size, 2606dcbf3bd6SGeorge Wilson db->db.db_size, db); 2607dcbf3bd6SGeorge Wilson } 2608ea8dc4b6Seschrock (void) refcount_add(&db->db_holds, tag); 26099c9dc39aSek DBUF_VERIFY(db); 2610fa9e4066Sahrens mutex_exit(&db->db_mtx); 2611fa9e4066Sahrens 2612fa9e4066Sahrens /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2613c543ec06Sahrens if (parent) 2614ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2615fa9e4066Sahrens 2616744947dcSTom Erickson ASSERT3P(DB_DNODE(db), ==, dn); 2617fa9e4066Sahrens ASSERT3U(db->db_blkid, ==, blkid); 2618fa9e4066Sahrens ASSERT3U(db->db_level, ==, level); 2619fa9e4066Sahrens *dbp = db; 2620fa9e4066Sahrens 2621fa9e4066Sahrens return (0); 2622fa9e4066Sahrens } 2623fa9e4066Sahrens 2624fa9e4066Sahrens dmu_buf_impl_t * 2625ea8dc4b6Seschrock dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2626fa9e4066Sahrens { 2627a2cdcdd2SPaul Dagnelie return (dbuf_hold_level(dn, 0, blkid, tag)); 2628fa9e4066Sahrens } 2629fa9e4066Sahrens 2630fa9e4066Sahrens dmu_buf_impl_t * 2631fa9e4066Sahrens dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2632fa9e4066Sahrens { 2633fa9e4066Sahrens dmu_buf_impl_t *db; 2634a2cdcdd2SPaul Dagnelie int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2635ea8dc4b6Seschrock return (err ? NULL : db); 2636fa9e4066Sahrens } 2637fa9e4066Sahrens 26381934e92fSmaybee void 2639ea8dc4b6Seschrock dbuf_create_bonus(dnode_t *dn) 2640fa9e4066Sahrens { 2641ea8dc4b6Seschrock ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2642ea8dc4b6Seschrock 2643ea8dc4b6Seschrock ASSERT(dn->dn_bonus == NULL); 26440a586ceaSMark Shellenbaum dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 26450a586ceaSMark Shellenbaum } 26460a586ceaSMark Shellenbaum 26470a586ceaSMark Shellenbaum int 26480a586ceaSMark Shellenbaum dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 26490a586ceaSMark Shellenbaum { 26500a586ceaSMark Shellenbaum dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2651744947dcSTom Erickson dnode_t *dn; 2652744947dcSTom Erickson 26530a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 2654be6fd75aSMatthew Ahrens return (SET_ERROR(ENOTSUP)); 26550a586ceaSMark Shellenbaum if (blksz == 0) 26560a586ceaSMark Shellenbaum blksz = SPA_MINBLOCKSIZE; 2657b5152584SMatthew Ahrens ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 2658b5152584SMatthew Ahrens blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 26590a586ceaSMark Shellenbaum 2660744947dcSTom Erickson DB_DNODE_ENTER(db); 2661744947dcSTom Erickson dn = DB_DNODE(db); 2662744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 26630a586ceaSMark Shellenbaum dbuf_new_size(db, blksz, tx); 2664744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 2665744947dcSTom Erickson DB_DNODE_EXIT(db); 26660a586ceaSMark Shellenbaum 26670a586ceaSMark Shellenbaum return (0); 26680a586ceaSMark Shellenbaum } 26690a586ceaSMark Shellenbaum 26700a586ceaSMark Shellenbaum void 26710a586ceaSMark Shellenbaum dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 26720a586ceaSMark Shellenbaum { 26730a586ceaSMark Shellenbaum dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2674fa9e4066Sahrens } 2675fa9e4066Sahrens 2676ea8dc4b6Seschrock #pragma weak dmu_buf_add_ref = dbuf_add_ref 2677fa9e4066Sahrens void 2678fa9e4066Sahrens dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2679fa9e4066Sahrens { 2680ea8dc4b6Seschrock int64_t holds = refcount_add(&db->db_holds, tag); 2681dcbf3bd6SGeorge Wilson ASSERT3S(holds, >, 1); 2682fa9e4066Sahrens } 2683fa9e4066Sahrens 2684e57a022bSJustin T. Gibbs #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2685e57a022bSJustin T. Gibbs boolean_t 2686e57a022bSJustin T. Gibbs dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2687e57a022bSJustin T. Gibbs void *tag) 2688e57a022bSJustin T. Gibbs { 2689e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2690e57a022bSJustin T. Gibbs dmu_buf_impl_t *found_db; 2691e57a022bSJustin T. Gibbs boolean_t result = B_FALSE; 2692e57a022bSJustin T. Gibbs 2693e57a022bSJustin T. Gibbs if (db->db_blkid == DMU_BONUS_BLKID) 2694e57a022bSJustin T. Gibbs found_db = dbuf_find_bonus(os, obj); 2695e57a022bSJustin T. Gibbs else 2696e57a022bSJustin T. Gibbs found_db = dbuf_find(os, obj, 0, blkid); 2697e57a022bSJustin T. Gibbs 2698e57a022bSJustin T. Gibbs if (found_db != NULL) { 2699e57a022bSJustin T. Gibbs if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2700e57a022bSJustin T. Gibbs (void) refcount_add(&db->db_holds, tag); 2701e57a022bSJustin T. Gibbs result = B_TRUE; 2702e57a022bSJustin T. Gibbs } 2703e57a022bSJustin T. Gibbs mutex_exit(&db->db_mtx); 2704e57a022bSJustin T. Gibbs } 2705e57a022bSJustin T. Gibbs return (result); 2706e57a022bSJustin T. Gibbs } 2707e57a022bSJustin T. Gibbs 2708744947dcSTom Erickson /* 2709744947dcSTom Erickson * If you call dbuf_rele() you had better not be referencing the dnode handle 2710744947dcSTom Erickson * unless you have some other direct or indirect hold on the dnode. (An indirect 2711744947dcSTom Erickson * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2712744947dcSTom Erickson * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2713744947dcSTom Erickson * dnode's parent dbuf evicting its dnode handles. 2714744947dcSTom Erickson */ 2715fa9e4066Sahrens void 2716ea8dc4b6Seschrock dbuf_rele(dmu_buf_impl_t *db, void *tag) 2717b24ab676SJeff Bonwick { 2718b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 2719b24ab676SJeff Bonwick dbuf_rele_and_unlock(db, tag); 2720b24ab676SJeff Bonwick } 2721b24ab676SJeff Bonwick 272243466aaeSMax Grossman void 272343466aaeSMax Grossman dmu_buf_rele(dmu_buf_t *db, void *tag) 272443466aaeSMax Grossman { 272543466aaeSMax Grossman dbuf_rele((dmu_buf_impl_t *)db, tag); 272643466aaeSMax Grossman } 272743466aaeSMax Grossman 2728b24ab676SJeff Bonwick /* 2729b24ab676SJeff Bonwick * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2730b24ab676SJeff Bonwick * db_dirtycnt and db_holds to be updated atomically. 2731b24ab676SJeff Bonwick */ 2732b24ab676SJeff Bonwick void 2733b24ab676SJeff Bonwick dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2734fa9e4066Sahrens { 2735fa9e4066Sahrens int64_t holds; 2736fa9e4066Sahrens 2737b24ab676SJeff Bonwick ASSERT(MUTEX_HELD(&db->db_mtx)); 27389c9dc39aSek DBUF_VERIFY(db); 2739fa9e4066Sahrens 2740744947dcSTom Erickson /* 2741744947dcSTom Erickson * Remove the reference to the dbuf before removing its hold on the 2742744947dcSTom Erickson * dnode so we can guarantee in dnode_move() that a referenced bonus 2743744947dcSTom Erickson * buffer has a corresponding dnode hold. 2744744947dcSTom Erickson */ 2745fa9e4066Sahrens holds = refcount_remove(&db->db_holds, tag); 2746ea8dc4b6Seschrock ASSERT(holds >= 0); 2747ea8dc4b6Seschrock 2748c717a561Smaybee /* 2749c717a561Smaybee * We can't freeze indirects if there is a possibility that they 2750c717a561Smaybee * may be modified in the current syncing context. 2751c717a561Smaybee */ 2752dcbf3bd6SGeorge Wilson if (db->db_buf != NULL && 2753dcbf3bd6SGeorge Wilson holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 27546b4acc8bSahrens arc_buf_freeze(db->db_buf); 2755dcbf3bd6SGeorge Wilson } 27566b4acc8bSahrens 2757ea8dc4b6Seschrock if (holds == db->db_dirtycnt && 2758d2058105SJustin T. Gibbs db->db_level == 0 && db->db_user_immediate_evict) 2759ea8dc4b6Seschrock dbuf_evict_user(db); 2760fa9e4066Sahrens 2761fa9e4066Sahrens if (holds == 0) { 27620a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 2763cd485b49SJustin T. Gibbs dnode_t *dn; 2764d2058105SJustin T. Gibbs boolean_t evict_dbuf = db->db_pending_evict; 2765744947dcSTom Erickson 2766744947dcSTom Erickson /* 2767cd485b49SJustin T. Gibbs * If the dnode moves here, we cannot cross this 2768cd485b49SJustin T. Gibbs * barrier until the move completes. 2769744947dcSTom Erickson */ 2770744947dcSTom Erickson DB_DNODE_ENTER(db); 2771cd485b49SJustin T. Gibbs 2772cd485b49SJustin T. Gibbs dn = DB_DNODE(db); 2773cd485b49SJustin T. Gibbs atomic_dec_32(&dn->dn_dbufs_count); 2774cd485b49SJustin T. Gibbs 2775cd485b49SJustin T. Gibbs /* 2776cd485b49SJustin T. Gibbs * Decrementing the dbuf count means that the bonus 2777cd485b49SJustin T. Gibbs * buffer's dnode hold is no longer discounted in 2778cd485b49SJustin T. Gibbs * dnode_move(). The dnode cannot move until after 2779d2058105SJustin T. Gibbs * the dnode_rele() below. 2780cd485b49SJustin T. Gibbs */ 2781744947dcSTom Erickson DB_DNODE_EXIT(db); 2782cd485b49SJustin T. Gibbs 2783cd485b49SJustin T. Gibbs /* 2784cd485b49SJustin T. Gibbs * Do not reference db after its lock is dropped. 2785cd485b49SJustin T. Gibbs * Another thread may evict it. 2786cd485b49SJustin T. Gibbs */ 2787cd485b49SJustin T. Gibbs mutex_exit(&db->db_mtx); 2788cd485b49SJustin T. Gibbs 2789d2058105SJustin T. Gibbs if (evict_dbuf) 2790cd485b49SJustin T. Gibbs dnode_evict_bonus(dn); 2791d2058105SJustin T. Gibbs 2792d2058105SJustin T. Gibbs dnode_rele(dn, db); 2793ea8dc4b6Seschrock } else if (db->db_buf == NULL) { 2794ea8dc4b6Seschrock /* 2795ea8dc4b6Seschrock * This is a special case: we never associated this 2796ea8dc4b6Seschrock * dbuf with any data allocated from the ARC. 2797ea8dc4b6Seschrock */ 279882c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || 279982c9918fSTim Haley db->db_state == DB_NOFILL); 2800dcbf3bd6SGeorge Wilson dbuf_destroy(db); 28016b4acc8bSahrens } else if (arc_released(db->db_buf)) { 2802ea8dc4b6Seschrock /* 2803ea8dc4b6Seschrock * This dbuf has anonymous data associated with it. 2804ea8dc4b6Seschrock */ 2805dcbf3bd6SGeorge Wilson dbuf_destroy(db); 2806ea8dc4b6Seschrock } else { 2807dcbf3bd6SGeorge Wilson boolean_t do_arc_evict = B_FALSE; 2808dcbf3bd6SGeorge Wilson blkptr_t bp; 2809dcbf3bd6SGeorge Wilson spa_t *spa = dmu_objset_spa(db->db_objset); 2810dcbf3bd6SGeorge Wilson 2811dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) && 2812dcbf3bd6SGeorge Wilson db->db_blkptr != NULL && 2813dcbf3bd6SGeorge Wilson !BP_IS_HOLE(db->db_blkptr) && 2814dcbf3bd6SGeorge Wilson !BP_IS_EMBEDDED(db->db_blkptr)) { 2815dcbf3bd6SGeorge Wilson do_arc_evict = B_TRUE; 2816dcbf3bd6SGeorge Wilson bp = *db->db_blkptr; 2817dcbf3bd6SGeorge Wilson } 28189253d63dSGeorge Wilson 2819dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) || 2820dcbf3bd6SGeorge Wilson db->db_pending_evict) { 2821dcbf3bd6SGeorge Wilson dbuf_destroy(db); 2822dcbf3bd6SGeorge Wilson } else if (!multilist_link_active(&db->db_cache_link)) { 282394c2d0ebSMatthew Ahrens multilist_insert(dbuf_cache, db); 2824dcbf3bd6SGeorge Wilson (void) refcount_add_many(&dbuf_cache_size, 2825dcbf3bd6SGeorge Wilson db->db.db_size, db); 28263baa08fcSek mutex_exit(&db->db_mtx); 2827dcbf3bd6SGeorge Wilson 2828dcbf3bd6SGeorge Wilson dbuf_evict_notify(); 2829bbfa8ea8SMatthew Ahrens } 2830dcbf3bd6SGeorge Wilson 2831dcbf3bd6SGeorge Wilson if (do_arc_evict) 2832dcbf3bd6SGeorge Wilson arc_freed(spa, &bp); 2833ea8dc4b6Seschrock } 2834fa9e4066Sahrens } else { 2835fa9e4066Sahrens mutex_exit(&db->db_mtx); 2836fa9e4066Sahrens } 2837dcbf3bd6SGeorge Wilson 2838fa9e4066Sahrens } 2839fa9e4066Sahrens 2840fa9e4066Sahrens #pragma weak dmu_buf_refcount = dbuf_refcount 2841fa9e4066Sahrens uint64_t 2842fa9e4066Sahrens dbuf_refcount(dmu_buf_impl_t *db) 2843fa9e4066Sahrens { 2844fa9e4066Sahrens return (refcount_count(&db->db_holds)); 2845fa9e4066Sahrens } 2846fa9e4066Sahrens 2847fa9e4066Sahrens void * 2848bc9014e6SJustin Gibbs dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2849bc9014e6SJustin Gibbs dmu_buf_user_t *new_user) 2850fa9e4066Sahrens { 2851bc9014e6SJustin Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2852bc9014e6SJustin Gibbs 2853bc9014e6SJustin Gibbs mutex_enter(&db->db_mtx); 2854bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 2855bc9014e6SJustin Gibbs if (db->db_user == old_user) 2856bc9014e6SJustin Gibbs db->db_user = new_user; 2857bc9014e6SJustin Gibbs else 2858bc9014e6SJustin Gibbs old_user = db->db_user; 2859bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 2860bc9014e6SJustin Gibbs mutex_exit(&db->db_mtx); 2861bc9014e6SJustin Gibbs 2862bc9014e6SJustin Gibbs return (old_user); 2863fa9e4066Sahrens } 2864fa9e4066Sahrens 2865fa9e4066Sahrens void * 2866bc9014e6SJustin Gibbs dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2867fa9e4066Sahrens { 2868bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, NULL, user)); 2869fa9e4066Sahrens } 2870fa9e4066Sahrens 2871fa9e4066Sahrens void * 2872bc9014e6SJustin Gibbs dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2873fa9e4066Sahrens { 2874fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2875fa9e4066Sahrens 2876d2058105SJustin T. Gibbs db->db_user_immediate_evict = TRUE; 2877bc9014e6SJustin Gibbs return (dmu_buf_set_user(db_fake, user)); 2878bc9014e6SJustin Gibbs } 2879fa9e4066Sahrens 2880bc9014e6SJustin Gibbs void * 2881bc9014e6SJustin Gibbs dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2882bc9014e6SJustin Gibbs { 2883bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, user, NULL)); 2884fa9e4066Sahrens } 2885fa9e4066Sahrens 2886fa9e4066Sahrens void * 2887fa9e4066Sahrens dmu_buf_get_user(dmu_buf_t *db_fake) 2888fa9e4066Sahrens { 2889fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2890fa9e4066Sahrens 2891bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 2892bc9014e6SJustin Gibbs return (db->db_user); 2893bc9014e6SJustin Gibbs } 2894bc9014e6SJustin Gibbs 2895bc9014e6SJustin Gibbs void 2896bc9014e6SJustin Gibbs dmu_buf_user_evict_wait() 2897bc9014e6SJustin Gibbs { 2898bc9014e6SJustin Gibbs taskq_wait(dbu_evict_taskq); 2899fa9e4066Sahrens } 2900fa9e4066Sahrens 290180901aeaSGeorge Wilson blkptr_t * 290280901aeaSGeorge Wilson dmu_buf_get_blkptr(dmu_buf_t *db) 290380901aeaSGeorge Wilson { 290480901aeaSGeorge Wilson dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 290580901aeaSGeorge Wilson return (dbi->db_blkptr); 290680901aeaSGeorge Wilson } 290780901aeaSGeorge Wilson 2908ae972795SMatthew Ahrens objset_t * 2909ae972795SMatthew Ahrens dmu_buf_get_objset(dmu_buf_t *db) 2910ae972795SMatthew Ahrens { 2911ae972795SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2912ae972795SMatthew Ahrens return (dbi->db_objset); 2913ae972795SMatthew Ahrens } 2914ae972795SMatthew Ahrens 291579d72832SMatthew Ahrens dnode_t * 291679d72832SMatthew Ahrens dmu_buf_dnode_enter(dmu_buf_t *db) 291779d72832SMatthew Ahrens { 291879d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 291979d72832SMatthew Ahrens DB_DNODE_ENTER(dbi); 292079d72832SMatthew Ahrens return (DB_DNODE(dbi)); 292179d72832SMatthew Ahrens } 292279d72832SMatthew Ahrens 292379d72832SMatthew Ahrens void 292479d72832SMatthew Ahrens dmu_buf_dnode_exit(dmu_buf_t *db) 292579d72832SMatthew Ahrens { 292679d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 292779d72832SMatthew Ahrens DB_DNODE_EXIT(dbi); 292879d72832SMatthew Ahrens } 292979d72832SMatthew Ahrens 2930c717a561Smaybee static void 2931c717a561Smaybee dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2932fa9e4066Sahrens { 2933c717a561Smaybee /* ASSERT(dmu_tx_is_syncing(tx) */ 2934c717a561Smaybee ASSERT(MUTEX_HELD(&db->db_mtx)); 2935c717a561Smaybee 2936c717a561Smaybee if (db->db_blkptr != NULL) 2937c717a561Smaybee return; 2938c717a561Smaybee 29390a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 29400a586ceaSMark Shellenbaum db->db_blkptr = &dn->dn_phys->dn_spill; 29410a586ceaSMark Shellenbaum BP_ZERO(db->db_blkptr); 29420a586ceaSMark Shellenbaum return; 29430a586ceaSMark Shellenbaum } 2944c717a561Smaybee if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2945c717a561Smaybee /* 2946c717a561Smaybee * This buffer was allocated at a time when there was 2947c717a561Smaybee * no available blkptrs from the dnode, or it was 2948c717a561Smaybee * inappropriate to hook it in (i.e., nlevels mis-match). 2949c717a561Smaybee */ 2950c717a561Smaybee ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2951c717a561Smaybee ASSERT(db->db_parent == NULL); 2952c717a561Smaybee db->db_parent = dn->dn_dbuf; 2953c717a561Smaybee db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2954c717a561Smaybee DBUF_VERIFY(db); 2955c717a561Smaybee } else { 2956c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 2957c717a561Smaybee int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2958c717a561Smaybee 2959c717a561Smaybee ASSERT(dn->dn_phys->dn_nlevels > 1); 2960c717a561Smaybee if (parent == NULL) { 2961c717a561Smaybee mutex_exit(&db->db_mtx); 2962c717a561Smaybee rw_enter(&dn->dn_struct_rwlock, RW_READER); 2963a2cdcdd2SPaul Dagnelie parent = dbuf_hold_level(dn, db->db_level + 1, 2964a2cdcdd2SPaul Dagnelie db->db_blkid >> epbs, db); 2965c717a561Smaybee rw_exit(&dn->dn_struct_rwlock); 2966c717a561Smaybee mutex_enter(&db->db_mtx); 2967c717a561Smaybee db->db_parent = parent; 2968c717a561Smaybee } 2969c717a561Smaybee db->db_blkptr = (blkptr_t *)parent->db.db_data + 2970c717a561Smaybee (db->db_blkid & ((1ULL << epbs) - 1)); 2971c717a561Smaybee DBUF_VERIFY(db); 2972c717a561Smaybee } 2973c717a561Smaybee } 2974c717a561Smaybee 2975c717a561Smaybee static void 2976c717a561Smaybee dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2977c717a561Smaybee { 2978c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 2979744947dcSTom Erickson dnode_t *dn; 2980c717a561Smaybee zio_t *zio; 2981c717a561Smaybee 2982c717a561Smaybee ASSERT(dmu_tx_is_syncing(tx)); 2983c717a561Smaybee 2984c717a561Smaybee dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2985c717a561Smaybee 2986c717a561Smaybee mutex_enter(&db->db_mtx); 2987c717a561Smaybee 2988c717a561Smaybee ASSERT(db->db_level > 0); 2989c717a561Smaybee DBUF_VERIFY(db); 2990c717a561Smaybee 29913e30c24aSWill Andrews /* Read the block if it hasn't been read yet. */ 2992c717a561Smaybee if (db->db_buf == NULL) { 2993c717a561Smaybee mutex_exit(&db->db_mtx); 2994c717a561Smaybee (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2995c717a561Smaybee mutex_enter(&db->db_mtx); 2996c717a561Smaybee } 2997c717a561Smaybee ASSERT3U(db->db_state, ==, DB_CACHED); 2998c717a561Smaybee ASSERT(db->db_buf != NULL); 2999c717a561Smaybee 3000744947dcSTom Erickson DB_DNODE_ENTER(db); 3001744947dcSTom Erickson dn = DB_DNODE(db); 30023e30c24aSWill Andrews /* Indirect block size must match what the dnode thinks it is. */ 3003744947dcSTom Erickson ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3004c717a561Smaybee dbuf_check_blkptr(dn, db); 3005744947dcSTom Erickson DB_DNODE_EXIT(db); 3006c717a561Smaybee 30073e30c24aSWill Andrews /* Provide the pending dirty record to child dbufs */ 3008c717a561Smaybee db->db_data_pending = dr; 3009c717a561Smaybee 3010af2c4821Smaybee mutex_exit(&db->db_mtx); 3011*5cabbc6bSPrashanth Sreenivasa 3012088f3894Sahrens dbuf_write(dr, db->db_buf, tx); 3013c717a561Smaybee 3014c717a561Smaybee zio = dr->dr_zio; 3015c717a561Smaybee mutex_enter(&dr->dt.di.dr_mtx); 301646e1baa6SMatthew Ahrens dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3017c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3018c717a561Smaybee mutex_exit(&dr->dt.di.dr_mtx); 3019c717a561Smaybee zio_nowait(zio); 3020c717a561Smaybee } 3021c717a561Smaybee 3022c717a561Smaybee static void 3023c717a561Smaybee dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3024c717a561Smaybee { 3025c717a561Smaybee arc_buf_t **datap = &dr->dt.dl.dr_data; 3026c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 3027744947dcSTom Erickson dnode_t *dn; 3028744947dcSTom Erickson objset_t *os; 3029c717a561Smaybee uint64_t txg = tx->tx_txg; 3030fa9e4066Sahrens 3031fa9e4066Sahrens ASSERT(dmu_tx_is_syncing(tx)); 3032fa9e4066Sahrens 3033fa9e4066Sahrens dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3034fa9e4066Sahrens 3035fa9e4066Sahrens mutex_enter(&db->db_mtx); 3036fa9e4066Sahrens /* 3037fa9e4066Sahrens * To be synced, we must be dirtied. But we 3038fa9e4066Sahrens * might have been freed after the dirty. 3039fa9e4066Sahrens */ 3040fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 3041fa9e4066Sahrens /* This buffer has been freed since it was dirtied */ 3042fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 3043fa9e4066Sahrens } else if (db->db_state == DB_FILL) { 3044fa9e4066Sahrens /* This buffer was freed and is now being re-filled */ 3045c717a561Smaybee ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3046fa9e4066Sahrens } else { 304782c9918fSTim Haley ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3048fa9e4066Sahrens } 30499c9dc39aSek DBUF_VERIFY(db); 3050fa9e4066Sahrens 3051744947dcSTom Erickson DB_DNODE_ENTER(db); 3052744947dcSTom Erickson dn = DB_DNODE(db); 3053744947dcSTom Erickson 30540a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 30550a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 30560a586ceaSMark Shellenbaum dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 30570a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 30580a586ceaSMark Shellenbaum } 30590a586ceaSMark Shellenbaum 3060fa9e4066Sahrens /* 3061c717a561Smaybee * If this is a bonus buffer, simply copy the bonus data into the 3062c717a561Smaybee * dnode. It will be written out when the dnode is synced (and it 3063c717a561Smaybee * will be synced, since it must have been dirty for dbuf_sync to 3064c717a561Smaybee * be called). 3065fa9e4066Sahrens */ 30660a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 3067c717a561Smaybee dbuf_dirty_record_t **drp; 30681934e92fSmaybee 3069ea8dc4b6Seschrock ASSERT(*datap != NULL); 3070fb09f5aaSMadhav Suresh ASSERT0(db->db_level); 3071ea8dc4b6Seschrock ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 3072ea8dc4b6Seschrock bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 3073744947dcSTom Erickson DB_DNODE_EXIT(db); 3074744947dcSTom Erickson 30750e8c6158Smaybee if (*datap != db->db.db_data) { 3076ea8dc4b6Seschrock zio_buf_free(*datap, DN_MAX_BONUSLEN); 30775a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 30780e8c6158Smaybee } 3079ea8dc4b6Seschrock db->db_data_pending = NULL; 3080c717a561Smaybee drp = &db->db_last_dirty; 3081c717a561Smaybee while (*drp != dr) 3082c717a561Smaybee drp = &(*drp)->dr_next; 308317f17c2dSbonwick ASSERT(dr->dr_next == NULL); 3084b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 308517f17c2dSbonwick *drp = dr->dr_next; 3086c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3087ea8dc4b6Seschrock ASSERT(db->db_dirtycnt > 0); 3088ea8dc4b6Seschrock db->db_dirtycnt -= 1; 3089b24ab676SJeff Bonwick dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 3090ea8dc4b6Seschrock return; 3091ea8dc4b6Seschrock } 3092ea8dc4b6Seschrock 3093744947dcSTom Erickson os = dn->dn_objset; 3094744947dcSTom Erickson 3095f82bfe17Sgw /* 3096f82bfe17Sgw * This function may have dropped the db_mtx lock allowing a dmu_sync 3097f82bfe17Sgw * operation to sneak in. As a result, we need to ensure that we 3098f82bfe17Sgw * don't check the dr_override_state until we have returned from 3099f82bfe17Sgw * dbuf_check_blkptr. 3100f82bfe17Sgw */ 3101f82bfe17Sgw dbuf_check_blkptr(dn, db); 3102f82bfe17Sgw 3103c717a561Smaybee /* 3104744947dcSTom Erickson * If this buffer is in the middle of an immediate write, 3105c717a561Smaybee * wait for the synchronous IO to complete. 3106c717a561Smaybee */ 3107c717a561Smaybee while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3108c717a561Smaybee ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3109c717a561Smaybee cv_wait(&db->db_changed, &db->db_mtx); 3110c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3111c717a561Smaybee } 3112c5c6ffa0Smaybee 3113ab69d62fSMatthew Ahrens if (db->db_state != DB_NOFILL && 3114ab69d62fSMatthew Ahrens dn->dn_object != DMU_META_DNODE_OBJECT && 3115ab69d62fSMatthew Ahrens refcount_count(&db->db_holds) > 1 && 3116b24ab676SJeff Bonwick dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3117ab69d62fSMatthew Ahrens *datap == db->db_buf) { 3118ab69d62fSMatthew Ahrens /* 3119ab69d62fSMatthew Ahrens * If this buffer is currently "in use" (i.e., there 3120ab69d62fSMatthew Ahrens * are active holds and db_data still references it), 3121ab69d62fSMatthew Ahrens * then make a copy before we start the write so that 3122ab69d62fSMatthew Ahrens * any modifications from the open txg will not leak 3123ab69d62fSMatthew Ahrens * into this write. 3124ab69d62fSMatthew Ahrens * 3125ab69d62fSMatthew Ahrens * NOTE: this copy does not need to be made for 3126ab69d62fSMatthew Ahrens * objects only modified in the syncing context (e.g. 3127ab69d62fSMatthew Ahrens * DNONE_DNODE blocks). 3128ab69d62fSMatthew Ahrens */ 31295602294fSDan Kimmel int psize = arc_buf_size(*datap); 3130ab69d62fSMatthew Ahrens arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 31315602294fSDan Kimmel enum zio_compress compress_type = arc_get_compression(*datap); 31325602294fSDan Kimmel 31335602294fSDan Kimmel if (compress_type == ZIO_COMPRESS_OFF) { 31345602294fSDan Kimmel *datap = arc_alloc_buf(os->os_spa, db, type, psize); 31355602294fSDan Kimmel } else { 31365602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 31375602294fSDan Kimmel int lsize = arc_buf_lsize(*datap); 31385602294fSDan Kimmel *datap = arc_alloc_compressed_buf(os->os_spa, db, 31395602294fSDan Kimmel psize, lsize, compress_type); 31405602294fSDan Kimmel } 31415602294fSDan Kimmel bcopy(db->db.db_data, (*datap)->b_data, psize); 314282c9918fSTim Haley } 3143c717a561Smaybee db->db_data_pending = dr; 3144fa9e4066Sahrens 3145c717a561Smaybee mutex_exit(&db->db_mtx); 3146fa9e4066Sahrens 3147088f3894Sahrens dbuf_write(dr, *datap, tx); 3148fa9e4066Sahrens 3149c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 3150744947dcSTom Erickson if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3151c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3152744947dcSTom Erickson DB_DNODE_EXIT(db); 3153744947dcSTom Erickson } else { 3154744947dcSTom Erickson /* 3155744947dcSTom Erickson * Although zio_nowait() does not "wait for an IO", it does 3156744947dcSTom Erickson * initiate the IO. If this is an empty write it seems plausible 3157744947dcSTom Erickson * that the IO could actually be completed before the nowait 3158744947dcSTom Erickson * returns. We need to DB_DNODE_EXIT() first in case 3159744947dcSTom Erickson * zio_nowait() invalidates the dbuf. 3160744947dcSTom Erickson */ 3161744947dcSTom Erickson DB_DNODE_EXIT(db); 3162c717a561Smaybee zio_nowait(dr->dr_zio); 3163744947dcSTom Erickson } 3164c717a561Smaybee } 316523b11526Smaybee 3166c717a561Smaybee void 316746e1baa6SMatthew Ahrens dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3168c717a561Smaybee { 3169c717a561Smaybee dbuf_dirty_record_t *dr; 3170c717a561Smaybee 3171c717a561Smaybee while (dr = list_head(list)) { 3172c717a561Smaybee if (dr->dr_zio != NULL) { 3173c717a561Smaybee /* 3174c717a561Smaybee * If we find an already initialized zio then we 3175c717a561Smaybee * are processing the meta-dnode, and we have finished. 3176c717a561Smaybee * The dbufs for all dnodes are put back on the list 3177c717a561Smaybee * during processing, so that we can zio_wait() 3178c717a561Smaybee * these IOs after initiating all child IOs. 3179c717a561Smaybee */ 3180c717a561Smaybee ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3181c717a561Smaybee DMU_META_DNODE_OBJECT); 3182c717a561Smaybee break; 318323b11526Smaybee } 318446e1baa6SMatthew Ahrens if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 318546e1baa6SMatthew Ahrens dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 318646e1baa6SMatthew Ahrens VERIFY3U(dr->dr_dbuf->db_level, ==, level); 318746e1baa6SMatthew Ahrens } 3188c717a561Smaybee list_remove(list, dr); 3189c717a561Smaybee if (dr->dr_dbuf->db_level > 0) 3190c717a561Smaybee dbuf_sync_indirect(dr, tx); 3191c717a561Smaybee else 3192c717a561Smaybee dbuf_sync_leaf(dr, tx); 319323b11526Smaybee } 3194c717a561Smaybee } 319523b11526Smaybee 3196fa9e4066Sahrens /* ARGSUSED */ 3197fa9e4066Sahrens static void 3198c717a561Smaybee dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3199fa9e4066Sahrens { 3200fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 3201744947dcSTom Erickson dnode_t *dn; 3202e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3203c717a561Smaybee blkptr_t *bp_orig = &zio->io_bp_orig; 3204b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 3205b24ab676SJeff Bonwick int64_t delta; 3206fa9e4066Sahrens uint64_t fill = 0; 3207b24ab676SJeff Bonwick int i; 3208fa9e4066Sahrens 320911ceac77SAlex Reece ASSERT3P(db->db_blkptr, !=, NULL); 321011ceac77SAlex Reece ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3211e14bb325SJeff Bonwick 3212744947dcSTom Erickson DB_DNODE_ENTER(db); 3213744947dcSTom Erickson dn = DB_DNODE(db); 3214b24ab676SJeff Bonwick delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3215b24ab676SJeff Bonwick dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3216b24ab676SJeff Bonwick zio->io_prev_space_delta = delta; 3217fa9e4066Sahrens 321843466aaeSMax Grossman if (bp->blk_birth != 0) { 321943466aaeSMax Grossman ASSERT((db->db_blkid != DMU_SPILL_BLKID && 322043466aaeSMax Grossman BP_GET_TYPE(bp) == dn->dn_type) || 322143466aaeSMax Grossman (db->db_blkid == DMU_SPILL_BLKID && 32225d7b4d43SMatthew Ahrens BP_GET_TYPE(bp) == dn->dn_bonustype) || 32235d7b4d43SMatthew Ahrens BP_IS_EMBEDDED(bp)); 322443466aaeSMax Grossman ASSERT(BP_GET_LEVEL(bp) == db->db_level); 3225c717a561Smaybee } 3226c5c6ffa0Smaybee 3227c717a561Smaybee mutex_enter(&db->db_mtx); 3228fa9e4066Sahrens 32290a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 32300a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 32310a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 323211ceac77SAlex Reece ASSERT(!(BP_IS_HOLE(bp)) && 32330a586ceaSMark Shellenbaum db->db_blkptr == &dn->dn_phys->dn_spill); 32340a586ceaSMark Shellenbaum } 32350a586ceaSMark Shellenbaum #endif 32360a586ceaSMark Shellenbaum 3237fa9e4066Sahrens if (db->db_level == 0) { 3238fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 32390a586ceaSMark Shellenbaum if (db->db_blkid > dn->dn_phys->dn_maxblkid && 32400a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) 3241fa9e4066Sahrens dn->dn_phys->dn_maxblkid = db->db_blkid; 3242fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 3243fa9e4066Sahrens 3244fa9e4066Sahrens if (dn->dn_type == DMU_OT_DNODE) { 3245fa9e4066Sahrens dnode_phys_t *dnp = db->db.db_data; 3246fa9e4066Sahrens for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 3247fa9e4066Sahrens i--, dnp++) { 3248fa9e4066Sahrens if (dnp->dn_type != DMU_OT_NONE) 3249fa9e4066Sahrens fill++; 3250fa9e4066Sahrens } 3251fa9e4066Sahrens } else { 325243466aaeSMax Grossman if (BP_IS_HOLE(bp)) { 325343466aaeSMax Grossman fill = 0; 325443466aaeSMax Grossman } else { 325543466aaeSMax Grossman fill = 1; 325643466aaeSMax Grossman } 3257fa9e4066Sahrens } 3258fa9e4066Sahrens } else { 3259e14bb325SJeff Bonwick blkptr_t *ibp = db->db.db_data; 3260fa9e4066Sahrens ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3261e14bb325SJeff Bonwick for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3262e14bb325SJeff Bonwick if (BP_IS_HOLE(ibp)) 3263fa9e4066Sahrens continue; 32645d7b4d43SMatthew Ahrens fill += BP_GET_FILL(ibp); 3265fa9e4066Sahrens } 3266fa9e4066Sahrens } 3267744947dcSTom Erickson DB_DNODE_EXIT(db); 3268fa9e4066Sahrens 32695d7b4d43SMatthew Ahrens if (!BP_IS_EMBEDDED(bp)) 32705d7b4d43SMatthew Ahrens bp->blk_fill = fill; 3271c717a561Smaybee 3272c717a561Smaybee mutex_exit(&db->db_mtx); 327311ceac77SAlex Reece 327411ceac77SAlex Reece rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 327511ceac77SAlex Reece *db->db_blkptr = *bp; 327611ceac77SAlex Reece rw_exit(&dn->dn_struct_rwlock); 3277c717a561Smaybee } 3278fa9e4066Sahrens 32798df0bcf0SPaul Dagnelie /* ARGSUSED */ 32808df0bcf0SPaul Dagnelie /* 32818df0bcf0SPaul Dagnelie * This function gets called just prior to running through the compression 32828df0bcf0SPaul Dagnelie * stage of the zio pipeline. If we're an indirect block comprised of only 32838df0bcf0SPaul Dagnelie * holes, then we want this indirect to be compressed away to a hole. In 32848df0bcf0SPaul Dagnelie * order to do that we must zero out any information about the holes that 32858df0bcf0SPaul Dagnelie * this indirect points to prior to before we try to compress it. 32868df0bcf0SPaul Dagnelie */ 32878df0bcf0SPaul Dagnelie static void 32888df0bcf0SPaul Dagnelie dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 32898df0bcf0SPaul Dagnelie { 32908df0bcf0SPaul Dagnelie dmu_buf_impl_t *db = vdb; 32918df0bcf0SPaul Dagnelie dnode_t *dn; 32928df0bcf0SPaul Dagnelie blkptr_t *bp; 32931a01181fSGeorge Wilson unsigned int epbs, i; 32948df0bcf0SPaul Dagnelie 32958df0bcf0SPaul Dagnelie ASSERT3U(db->db_level, >, 0); 32968df0bcf0SPaul Dagnelie DB_DNODE_ENTER(db); 32978df0bcf0SPaul Dagnelie dn = DB_DNODE(db); 32988df0bcf0SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 32991a01181fSGeorge Wilson ASSERT3U(epbs, <, 31); 33008df0bcf0SPaul Dagnelie 33018df0bcf0SPaul Dagnelie /* Determine if all our children are holes */ 33028df0bcf0SPaul Dagnelie for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 33038df0bcf0SPaul Dagnelie if (!BP_IS_HOLE(bp)) 33048df0bcf0SPaul Dagnelie break; 33058df0bcf0SPaul Dagnelie } 33068df0bcf0SPaul Dagnelie 33078df0bcf0SPaul Dagnelie /* 33088df0bcf0SPaul Dagnelie * If all the children are holes, then zero them all out so that 33098df0bcf0SPaul Dagnelie * we may get compressed away. 33108df0bcf0SPaul Dagnelie */ 33118df0bcf0SPaul Dagnelie if (i == 1 << epbs) { 33121a01181fSGeorge Wilson /* 33131a01181fSGeorge Wilson * We only found holes. Grab the rwlock to prevent 33141a01181fSGeorge Wilson * anybody from reading the blocks we're about to 33151a01181fSGeorge Wilson * zero out. 33161a01181fSGeorge Wilson */ 33171a01181fSGeorge Wilson rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 33188df0bcf0SPaul Dagnelie bzero(db->db.db_data, db->db.db_size); 33191a01181fSGeorge Wilson rw_exit(&dn->dn_struct_rwlock); 33208df0bcf0SPaul Dagnelie } 33218df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 33228df0bcf0SPaul Dagnelie } 33238df0bcf0SPaul Dagnelie 332469962b56SMatthew Ahrens /* 332569962b56SMatthew Ahrens * The SPA will call this callback several times for each zio - once 332669962b56SMatthew Ahrens * for every physical child i/o (zio->io_phys_children times). This 332769962b56SMatthew Ahrens * allows the DMU to monitor the progress of each logical i/o. For example, 332869962b56SMatthew Ahrens * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 332969962b56SMatthew Ahrens * block. There may be a long delay before all copies/fragments are completed, 333069962b56SMatthew Ahrens * so this callback allows us to retire dirty space gradually, as the physical 333169962b56SMatthew Ahrens * i/os complete. 333269962b56SMatthew Ahrens */ 333369962b56SMatthew Ahrens /* ARGSUSED */ 333469962b56SMatthew Ahrens static void 333569962b56SMatthew Ahrens dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 333669962b56SMatthew Ahrens { 333769962b56SMatthew Ahrens dmu_buf_impl_t *db = arg; 333869962b56SMatthew Ahrens objset_t *os = db->db_objset; 333969962b56SMatthew Ahrens dsl_pool_t *dp = dmu_objset_pool(os); 334069962b56SMatthew Ahrens dbuf_dirty_record_t *dr; 334169962b56SMatthew Ahrens int delta = 0; 334269962b56SMatthew Ahrens 334369962b56SMatthew Ahrens dr = db->db_data_pending; 334469962b56SMatthew Ahrens ASSERT3U(dr->dr_txg, ==, zio->io_txg); 334569962b56SMatthew Ahrens 334669962b56SMatthew Ahrens /* 334769962b56SMatthew Ahrens * The callback will be called io_phys_children times. Retire one 334869962b56SMatthew Ahrens * portion of our dirty space each time we are called. Any rounding 334969962b56SMatthew Ahrens * error will be cleaned up by dsl_pool_sync()'s call to 335069962b56SMatthew Ahrens * dsl_pool_undirty_space(). 335169962b56SMatthew Ahrens */ 335269962b56SMatthew Ahrens delta = dr->dr_accounted / zio->io_phys_children; 335369962b56SMatthew Ahrens dsl_pool_undirty_space(dp, delta, zio->io_txg); 335469962b56SMatthew Ahrens } 335569962b56SMatthew Ahrens 3356c717a561Smaybee /* ARGSUSED */ 3357c717a561Smaybee static void 3358c717a561Smaybee dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3359c717a561Smaybee { 3360c717a561Smaybee dmu_buf_impl_t *db = vdb; 3361b24ab676SJeff Bonwick blkptr_t *bp_orig = &zio->io_bp_orig; 336243466aaeSMax Grossman blkptr_t *bp = db->db_blkptr; 336343466aaeSMax Grossman objset_t *os = db->db_objset; 336443466aaeSMax Grossman dmu_tx_t *tx = os->os_synctx; 3365c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 3366c717a561Smaybee 3367fb09f5aaSMadhav Suresh ASSERT0(zio->io_error); 3368b24ab676SJeff Bonwick ASSERT(db->db_blkptr == bp); 3369b24ab676SJeff Bonwick 337080901aeaSGeorge Wilson /* 337180901aeaSGeorge Wilson * For nopwrites and rewrites we ensure that the bp matches our 337280901aeaSGeorge Wilson * original and bypass all the accounting. 337380901aeaSGeorge Wilson */ 337480901aeaSGeorge Wilson if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3375b24ab676SJeff Bonwick ASSERT(BP_EQUAL(bp, bp_orig)); 3376b24ab676SJeff Bonwick } else { 337743466aaeSMax Grossman dsl_dataset_t *ds = os->os_dsl_dataset; 3378b24ab676SJeff Bonwick (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3379b24ab676SJeff Bonwick dsl_dataset_block_born(ds, bp, tx); 3380b24ab676SJeff Bonwick } 3381c717a561Smaybee 3382c717a561Smaybee mutex_enter(&db->db_mtx); 3383c717a561Smaybee 3384b24ab676SJeff Bonwick DBUF_VERIFY(db); 3385b24ab676SJeff Bonwick 3386c717a561Smaybee drp = &db->db_last_dirty; 338717f17c2dSbonwick while ((dr = *drp) != db->db_data_pending) 338817f17c2dSbonwick drp = &dr->dr_next; 338917f17c2dSbonwick ASSERT(!list_link_active(&dr->dr_dirty_node)); 3390b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 339117f17c2dSbonwick ASSERT(dr->dr_next == NULL); 339217f17c2dSbonwick *drp = dr->dr_next; 3393c717a561Smaybee 33940a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 33950a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 3396744947dcSTom Erickson dnode_t *dn; 3397744947dcSTom Erickson 3398744947dcSTom Erickson DB_DNODE_ENTER(db); 3399744947dcSTom Erickson dn = DB_DNODE(db); 34000a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 34010a586ceaSMark Shellenbaum ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 34020a586ceaSMark Shellenbaum db->db_blkptr == &dn->dn_phys->dn_spill); 3403744947dcSTom Erickson DB_DNODE_EXIT(db); 34040a586ceaSMark Shellenbaum } 34050a586ceaSMark Shellenbaum #endif 34060a586ceaSMark Shellenbaum 3407c717a561Smaybee if (db->db_level == 0) { 34080a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3409c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 341082c9918fSTim Haley if (db->db_state != DB_NOFILL) { 341182c9918fSTim Haley if (dr->dt.dl.dr_data != db->db_buf) 3412dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 341382c9918fSTim Haley } 3414c717a561Smaybee } else { 3415744947dcSTom Erickson dnode_t *dn; 3416744947dcSTom Erickson 3417744947dcSTom Erickson DB_DNODE_ENTER(db); 3418744947dcSTom Erickson dn = DB_DNODE(db); 3419c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 342043466aaeSMax Grossman ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3421c717a561Smaybee if (!BP_IS_HOLE(db->db_blkptr)) { 3422c717a561Smaybee int epbs = 3423c717a561Smaybee dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 342443466aaeSMax Grossman ASSERT3U(db->db_blkid, <=, 342543466aaeSMax Grossman dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3426c717a561Smaybee ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3427c717a561Smaybee db->db.db_size); 3428c717a561Smaybee } 3429744947dcSTom Erickson DB_DNODE_EXIT(db); 3430c25056deSgw mutex_destroy(&dr->dt.di.dr_mtx); 3431c25056deSgw list_destroy(&dr->dt.di.dr_children); 3432c717a561Smaybee } 3433c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3434fa9e4066Sahrens 3435fa9e4066Sahrens cv_broadcast(&db->db_changed); 3436fa9e4066Sahrens ASSERT(db->db_dirtycnt > 0); 3437fa9e4066Sahrens db->db_dirtycnt -= 1; 3438c717a561Smaybee db->db_data_pending = NULL; 343943466aaeSMax Grossman dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); 3440b24ab676SJeff Bonwick } 3441b24ab676SJeff Bonwick 3442b24ab676SJeff Bonwick static void 3443b24ab676SJeff Bonwick dbuf_write_nofill_ready(zio_t *zio) 3444b24ab676SJeff Bonwick { 3445b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, zio->io_private); 3446b24ab676SJeff Bonwick } 3447b24ab676SJeff Bonwick 3448b24ab676SJeff Bonwick static void 3449b24ab676SJeff Bonwick dbuf_write_nofill_done(zio_t *zio) 3450b24ab676SJeff Bonwick { 3451b24ab676SJeff Bonwick dbuf_write_done(zio, NULL, zio->io_private); 3452b24ab676SJeff Bonwick } 3453b24ab676SJeff Bonwick 3454b24ab676SJeff Bonwick static void 3455b24ab676SJeff Bonwick dbuf_write_override_ready(zio_t *zio) 3456b24ab676SJeff Bonwick { 3457b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3458b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3459b24ab676SJeff Bonwick 3460b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, db); 3461b24ab676SJeff Bonwick } 3462b24ab676SJeff Bonwick 3463b24ab676SJeff Bonwick static void 3464b24ab676SJeff Bonwick dbuf_write_override_done(zio_t *zio) 3465b24ab676SJeff Bonwick { 3466b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3467b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3468b24ab676SJeff Bonwick blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3469b24ab676SJeff Bonwick 3470b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3471b24ab676SJeff Bonwick if (!BP_EQUAL(zio->io_bp, obp)) { 3472b24ab676SJeff Bonwick if (!BP_IS_HOLE(obp)) 3473b24ab676SJeff Bonwick dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3474b24ab676SJeff Bonwick arc_release(dr->dt.dl.dr_data, db); 3475b24ab676SJeff Bonwick } 3476fa9e4066Sahrens mutex_exit(&db->db_mtx); 34774ee0199eSRobert Mustacchi dbuf_write_done(zio, NULL, db); 3478770499e1SDan Kimmel 3479770499e1SDan Kimmel if (zio->io_abd != NULL) 3480770499e1SDan Kimmel abd_put(zio->io_abd); 3481b24ab676SJeff Bonwick } 3482b24ab676SJeff Bonwick 3483*5cabbc6bSPrashanth Sreenivasa typedef struct dbuf_remap_impl_callback_arg { 3484*5cabbc6bSPrashanth Sreenivasa objset_t *drica_os; 3485*5cabbc6bSPrashanth Sreenivasa uint64_t drica_blk_birth; 3486*5cabbc6bSPrashanth Sreenivasa dmu_tx_t *drica_tx; 3487*5cabbc6bSPrashanth Sreenivasa } dbuf_remap_impl_callback_arg_t; 3488*5cabbc6bSPrashanth Sreenivasa 3489*5cabbc6bSPrashanth Sreenivasa static void 3490*5cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 3491*5cabbc6bSPrashanth Sreenivasa void *arg) 3492*5cabbc6bSPrashanth Sreenivasa { 3493*5cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t *drica = arg; 3494*5cabbc6bSPrashanth Sreenivasa objset_t *os = drica->drica_os; 3495*5cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(os); 3496*5cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = drica->drica_tx; 3497*5cabbc6bSPrashanth Sreenivasa 3498*5cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3499*5cabbc6bSPrashanth Sreenivasa 3500*5cabbc6bSPrashanth Sreenivasa if (os == spa_meta_objset(spa)) { 3501*5cabbc6bSPrashanth Sreenivasa spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 3502*5cabbc6bSPrashanth Sreenivasa } else { 3503*5cabbc6bSPrashanth Sreenivasa dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 3504*5cabbc6bSPrashanth Sreenivasa size, drica->drica_blk_birth, tx); 3505*5cabbc6bSPrashanth Sreenivasa } 3506*5cabbc6bSPrashanth Sreenivasa } 3507*5cabbc6bSPrashanth Sreenivasa 3508*5cabbc6bSPrashanth Sreenivasa static void 3509*5cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx) 3510*5cabbc6bSPrashanth Sreenivasa { 3511*5cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = *bp; 3512*5cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 3513*5cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t drica; 3514*5cabbc6bSPrashanth Sreenivasa 3515*5cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3516*5cabbc6bSPrashanth Sreenivasa 3517*5cabbc6bSPrashanth Sreenivasa drica.drica_os = dn->dn_objset; 3518*5cabbc6bSPrashanth Sreenivasa drica.drica_blk_birth = bp->blk_birth; 3519*5cabbc6bSPrashanth Sreenivasa drica.drica_tx = tx; 3520*5cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 3521*5cabbc6bSPrashanth Sreenivasa &drica)) { 3522*5cabbc6bSPrashanth Sreenivasa /* 3523*5cabbc6bSPrashanth Sreenivasa * The struct_rwlock prevents dbuf_read_impl() from 3524*5cabbc6bSPrashanth Sreenivasa * dereferencing the BP while we are changing it. To 3525*5cabbc6bSPrashanth Sreenivasa * avoid lock contention, only grab it when we are actually 3526*5cabbc6bSPrashanth Sreenivasa * changing the BP. 3527*5cabbc6bSPrashanth Sreenivasa */ 3528*5cabbc6bSPrashanth Sreenivasa rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3529*5cabbc6bSPrashanth Sreenivasa *bp = bp_copy; 3530*5cabbc6bSPrashanth Sreenivasa rw_exit(&dn->dn_struct_rwlock); 3531*5cabbc6bSPrashanth Sreenivasa } 3532*5cabbc6bSPrashanth Sreenivasa } 3533*5cabbc6bSPrashanth Sreenivasa 3534*5cabbc6bSPrashanth Sreenivasa /* 3535*5cabbc6bSPrashanth Sreenivasa * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 3536*5cabbc6bSPrashanth Sreenivasa * to remap a copy of every bp in the dbuf. 3537*5cabbc6bSPrashanth Sreenivasa */ 3538*5cabbc6bSPrashanth Sreenivasa boolean_t 3539*5cabbc6bSPrashanth Sreenivasa dbuf_can_remap(const dmu_buf_impl_t *db) 3540*5cabbc6bSPrashanth Sreenivasa { 3541*5cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 3542*5cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 3543*5cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 3544*5cabbc6bSPrashanth Sreenivasa 3545*5cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_level, >, 0); 3546*5cabbc6bSPrashanth Sreenivasa ASSERT3S(db->db_state, ==, DB_CACHED); 3547*5cabbc6bSPrashanth Sreenivasa 3548*5cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3549*5cabbc6bSPrashanth Sreenivasa 3550*5cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3551*5cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3552*5cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = bp[i]; 3553*5cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3554*5cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 3555*5cabbc6bSPrashanth Sreenivasa break; 3556*5cabbc6bSPrashanth Sreenivasa } 3557*5cabbc6bSPrashanth Sreenivasa } 3558*5cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 3559*5cabbc6bSPrashanth Sreenivasa 3560*5cabbc6bSPrashanth Sreenivasa return (ret); 3561*5cabbc6bSPrashanth Sreenivasa } 3562*5cabbc6bSPrashanth Sreenivasa 3563*5cabbc6bSPrashanth Sreenivasa boolean_t 3564*5cabbc6bSPrashanth Sreenivasa dnode_needs_remap(const dnode_t *dn) 3565*5cabbc6bSPrashanth Sreenivasa { 3566*5cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 3567*5cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 3568*5cabbc6bSPrashanth Sreenivasa 3569*5cabbc6bSPrashanth Sreenivasa if (dn->dn_phys->dn_nlevels == 0) { 3570*5cabbc6bSPrashanth Sreenivasa return (B_FALSE); 3571*5cabbc6bSPrashanth Sreenivasa } 3572*5cabbc6bSPrashanth Sreenivasa 3573*5cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3574*5cabbc6bSPrashanth Sreenivasa 3575*5cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3576*5cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 3577*5cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 3578*5cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3579*5cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 3580*5cabbc6bSPrashanth Sreenivasa break; 3581*5cabbc6bSPrashanth Sreenivasa } 3582*5cabbc6bSPrashanth Sreenivasa } 3583*5cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 3584*5cabbc6bSPrashanth Sreenivasa 3585*5cabbc6bSPrashanth Sreenivasa return (ret); 3586*5cabbc6bSPrashanth Sreenivasa } 3587*5cabbc6bSPrashanth Sreenivasa 3588*5cabbc6bSPrashanth Sreenivasa /* 3589*5cabbc6bSPrashanth Sreenivasa * Remap any existing BP's to concrete vdevs, if possible. 3590*5cabbc6bSPrashanth Sreenivasa */ 3591*5cabbc6bSPrashanth Sreenivasa static void 3592*5cabbc6bSPrashanth Sreenivasa dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 3593*5cabbc6bSPrashanth Sreenivasa { 3594*5cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 3595*5cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3596*5cabbc6bSPrashanth Sreenivasa 3597*5cabbc6bSPrashanth Sreenivasa if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 3598*5cabbc6bSPrashanth Sreenivasa return; 3599*5cabbc6bSPrashanth Sreenivasa 3600*5cabbc6bSPrashanth Sreenivasa if (db->db_level > 0) { 3601*5cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 3602*5cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3603*5cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dn, &bp[i], tx); 3604*5cabbc6bSPrashanth Sreenivasa } 3605*5cabbc6bSPrashanth Sreenivasa } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 3606*5cabbc6bSPrashanth Sreenivasa dnode_phys_t *dnp = db->db.db_data; 3607*5cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 3608*5cabbc6bSPrashanth Sreenivasa DMU_OT_DNODE); 3609*5cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 3610*5cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 3611*5cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx); 3612*5cabbc6bSPrashanth Sreenivasa } 3613*5cabbc6bSPrashanth Sreenivasa } 3614*5cabbc6bSPrashanth Sreenivasa } 3615*5cabbc6bSPrashanth Sreenivasa } 3616*5cabbc6bSPrashanth Sreenivasa 3617*5cabbc6bSPrashanth Sreenivasa 36183e30c24aSWill Andrews /* Issue I/O to commit a dirty buffer to disk. */ 3619b24ab676SJeff Bonwick static void 3620b24ab676SJeff Bonwick dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3621b24ab676SJeff Bonwick { 3622b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3623744947dcSTom Erickson dnode_t *dn; 3624744947dcSTom Erickson objset_t *os; 3625b24ab676SJeff Bonwick dmu_buf_impl_t *parent = db->db_parent; 3626b24ab676SJeff Bonwick uint64_t txg = tx->tx_txg; 36277802d7bfSMatthew Ahrens zbookmark_phys_t zb; 3628b24ab676SJeff Bonwick zio_prop_t zp; 3629b24ab676SJeff Bonwick zio_t *zio; 36300a586ceaSMark Shellenbaum int wp_flag = 0; 3631b24ab676SJeff Bonwick 363211ceac77SAlex Reece ASSERT(dmu_tx_is_syncing(tx)); 363311ceac77SAlex Reece 3634744947dcSTom Erickson DB_DNODE_ENTER(db); 3635744947dcSTom Erickson dn = DB_DNODE(db); 3636744947dcSTom Erickson os = dn->dn_objset; 3637744947dcSTom Erickson 3638b24ab676SJeff Bonwick if (db->db_state != DB_NOFILL) { 3639b24ab676SJeff Bonwick if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3640b24ab676SJeff Bonwick /* 3641b24ab676SJeff Bonwick * Private object buffers are released here rather 3642b24ab676SJeff Bonwick * than in dbuf_dirty() since they are only modified 3643b24ab676SJeff Bonwick * in the syncing context and we don't want the 3644b24ab676SJeff Bonwick * overhead of making multiple copies of the data. 3645b24ab676SJeff Bonwick */ 3646b24ab676SJeff Bonwick if (BP_IS_HOLE(db->db_blkptr)) { 3647b24ab676SJeff Bonwick arc_buf_thaw(data); 3648b24ab676SJeff Bonwick } else { 36493f9d6ad7SLin Ling dbuf_release_bp(db); 3650b24ab676SJeff Bonwick } 3651*5cabbc6bSPrashanth Sreenivasa dbuf_remap(dn, db, tx); 3652b24ab676SJeff Bonwick } 3653b24ab676SJeff Bonwick } 3654b24ab676SJeff Bonwick 3655b24ab676SJeff Bonwick if (parent != dn->dn_dbuf) { 36563e30c24aSWill Andrews /* Our parent is an indirect block. */ 36573e30c24aSWill Andrews /* We have a dirty parent that has been scheduled for write. */ 3658b24ab676SJeff Bonwick ASSERT(parent && parent->db_data_pending); 36593e30c24aSWill Andrews /* Our parent's buffer is one level closer to the dnode. */ 3660b24ab676SJeff Bonwick ASSERT(db->db_level == parent->db_level-1); 36613e30c24aSWill Andrews /* 36623e30c24aSWill Andrews * We're about to modify our parent's db_data by modifying 36633e30c24aSWill Andrews * our block pointer, so the parent must be released. 36643e30c24aSWill Andrews */ 3665b24ab676SJeff Bonwick ASSERT(arc_released(parent->db_buf)); 3666b24ab676SJeff Bonwick zio = parent->db_data_pending->dr_zio; 3667b24ab676SJeff Bonwick } else { 36683e30c24aSWill Andrews /* Our parent is the dnode itself. */ 36690a586ceaSMark Shellenbaum ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 36700a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) || 36710a586ceaSMark Shellenbaum (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 36720a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 36730a586ceaSMark Shellenbaum ASSERT3P(db->db_blkptr, ==, 36740a586ceaSMark Shellenbaum &dn->dn_phys->dn_blkptr[db->db_blkid]); 3675b24ab676SJeff Bonwick zio = dn->dn_zio; 3676b24ab676SJeff Bonwick } 3677b24ab676SJeff Bonwick 3678b24ab676SJeff Bonwick ASSERT(db->db_level == 0 || data == db->db_buf); 3679b24ab676SJeff Bonwick ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3680b24ab676SJeff Bonwick ASSERT(zio); 3681fa9e4066Sahrens 3682b24ab676SJeff Bonwick SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3683b24ab676SJeff Bonwick os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3684b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 3685b24ab676SJeff Bonwick 36860a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 36870a586ceaSMark Shellenbaum wp_flag = WP_SPILL; 36880a586ceaSMark Shellenbaum wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 36890a586ceaSMark Shellenbaum 3690adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3691744947dcSTom Erickson DB_DNODE_EXIT(db); 3692b24ab676SJeff Bonwick 369311ceac77SAlex Reece /* 369411ceac77SAlex Reece * We copy the blkptr now (rather than when we instantiate the dirty 369511ceac77SAlex Reece * record), because its value can change between open context and 369611ceac77SAlex Reece * syncing context. We do not need to hold dn_struct_rwlock to read 369711ceac77SAlex Reece * db_blkptr because we are in syncing context. 369811ceac77SAlex Reece */ 369911ceac77SAlex Reece dr->dr_bp_copy = *db->db_blkptr; 370011ceac77SAlex Reece 37015d7b4d43SMatthew Ahrens if (db->db_level == 0 && 37025d7b4d43SMatthew Ahrens dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 37035d7b4d43SMatthew Ahrens /* 37045d7b4d43SMatthew Ahrens * The BP for this block has been provided by open context 37055d7b4d43SMatthew Ahrens * (by dmu_sync() or dmu_buf_write_embedded()). 37065d7b4d43SMatthew Ahrens */ 3707770499e1SDan Kimmel abd_t *contents = (data != NULL) ? 3708770499e1SDan Kimmel abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 37095d7b4d43SMatthew Ahrens 37105602294fSDan Kimmel dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 37115602294fSDan Kimmel contents, db->db.db_size, db->db.db_size, &zp, 37128df0bcf0SPaul Dagnelie dbuf_write_override_ready, NULL, NULL, 37138df0bcf0SPaul Dagnelie dbuf_write_override_done, 371469962b56SMatthew Ahrens dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3715b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3716b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3717b24ab676SJeff Bonwick zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 371880901aeaSGeorge Wilson dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3719b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 3720b24ab676SJeff Bonwick } else if (db->db_state == DB_NOFILL) { 3721810e43b2SBill Pijewski ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3722810e43b2SBill Pijewski zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3723b24ab676SJeff Bonwick dr->dr_zio = zio_write(zio, os->os_spa, txg, 37245602294fSDan Kimmel &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 37258df0bcf0SPaul Dagnelie dbuf_write_nofill_ready, NULL, NULL, 37268df0bcf0SPaul Dagnelie dbuf_write_nofill_done, db, 3727b24ab676SJeff Bonwick ZIO_PRIORITY_ASYNC_WRITE, 3728b24ab676SJeff Bonwick ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3729b24ab676SJeff Bonwick } else { 3730b24ab676SJeff Bonwick ASSERT(arc_released(data)); 37318df0bcf0SPaul Dagnelie 37328df0bcf0SPaul Dagnelie /* 37338df0bcf0SPaul Dagnelie * For indirect blocks, we want to setup the children 37348df0bcf0SPaul Dagnelie * ready callback so that we can properly handle an indirect 37358df0bcf0SPaul Dagnelie * block that only contains holes. 37368df0bcf0SPaul Dagnelie */ 37378df0bcf0SPaul Dagnelie arc_done_func_t *children_ready_cb = NULL; 37388df0bcf0SPaul Dagnelie if (db->db_level != 0) 37398df0bcf0SPaul Dagnelie children_ready_cb = dbuf_write_children_ready; 37408df0bcf0SPaul Dagnelie 3741b24ab676SJeff Bonwick dr->dr_zio = arc_write(zio, os->os_spa, txg, 374211ceac77SAlex Reece &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3743dcbf3bd6SGeorge Wilson &zp, dbuf_write_ready, children_ready_cb, 374469962b56SMatthew Ahrens dbuf_write_physdone, dbuf_write_done, db, 374569962b56SMatthew Ahrens ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3746b24ab676SJeff Bonwick } 3747fa9e4066Sahrens } 3748