1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5f65e61c0Sahrens * Common Development and Distribution License (the "License"). 6f65e61c0Sahrens * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2206e0070dSMark Shellenbaum * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 233f2366c2SGordon Ross * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24*9704bf7fSPaul Dagnelie * Copyright (c) 2012, 2019 by Delphix. All rights reserved. 25aad02571SSaso Kiselkov * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26810e43b2SBill Pijewski * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27bc9014e6SJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 29fa9e4066Sahrens */ 30fa9e4066Sahrens 31fa9e4066Sahrens #include <sys/zfs_context.h> 32fa9e4066Sahrens #include <sys/dmu.h> 332f3d8780SMatthew Ahrens #include <sys/dmu_send.h> 34fa9e4066Sahrens #include <sys/dmu_impl.h> 35fa9e4066Sahrens #include <sys/dbuf.h> 36fa9e4066Sahrens #include <sys/dmu_objset.h> 37fa9e4066Sahrens #include <sys/dsl_dataset.h> 38fa9e4066Sahrens #include <sys/dsl_dir.h> 39fa9e4066Sahrens #include <sys/dmu_tx.h> 40fa9e4066Sahrens #include <sys/spa.h> 41fa9e4066Sahrens #include <sys/zio.h> 42fa9e4066Sahrens #include <sys/dmu_zfetch.h> 430a586ceaSMark Shellenbaum #include <sys/sa.h> 440a586ceaSMark Shellenbaum #include <sys/sa_impl.h> 455d7b4d43SMatthew Ahrens #include <sys/zfeature.h> 465d7b4d43SMatthew Ahrens #include <sys/blkptr.h> 47bf16b11eSMatthew Ahrens #include <sys/range_tree.h> 48dcbf3bd6SGeorge Wilson #include <sys/callb.h> 49770499e1SDan Kimmel #include <sys/abd.h> 505cabbc6bSPrashanth Sreenivasa #include <sys/vdev.h> 513a2d8a1bSPaul Dagnelie #include <sys/cityhash.h> 52adb52d92SMatthew Ahrens #include <sys/spa_impl.h> 53dcbf3bd6SGeorge Wilson 543b2aab18SMatthew Ahrens static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 55088f3894Sahrens static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 56fa9e4066Sahrens 57bc9014e6SJustin Gibbs #ifndef __lint 58bc9014e6SJustin Gibbs extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 5940510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_sync, 6040510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_async, 6140510e8eSJosef 'Jeff' Sipek dmu_buf_t **clear_on_evict_dbufp); 62bc9014e6SJustin Gibbs #endif /* ! __lint */ 63bc9014e6SJustin Gibbs 64fa9e4066Sahrens /* 65fa9e4066Sahrens * Global data structures and functions for the dbuf cache. 66fa9e4066Sahrens */ 67dcbf3bd6SGeorge Wilson static kmem_cache_t *dbuf_kmem_cache; 68bc9014e6SJustin Gibbs static taskq_t *dbu_evict_taskq; 69fa9e4066Sahrens 70dcbf3bd6SGeorge Wilson static kthread_t *dbuf_cache_evict_thread; 71dcbf3bd6SGeorge Wilson static kmutex_t dbuf_evict_lock; 72dcbf3bd6SGeorge Wilson static kcondvar_t dbuf_evict_cv; 73dcbf3bd6SGeorge Wilson static boolean_t dbuf_evict_thread_exit; 74dcbf3bd6SGeorge Wilson 75dcbf3bd6SGeorge Wilson /* 76adb52d92SMatthew Ahrens * There are two dbuf caches; each dbuf can only be in one of them at a time. 77adb52d92SMatthew Ahrens * 78adb52d92SMatthew Ahrens * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 79adb52d92SMatthew Ahrens * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 80adb52d92SMatthew Ahrens * that represent the metadata that describes filesystems/snapshots/ 81adb52d92SMatthew Ahrens * bookmarks/properties/etc. We only evict from this cache when we export a 82adb52d92SMatthew Ahrens * pool, to short-circuit as much I/O as possible for all administrative 83adb52d92SMatthew Ahrens * commands that need the metadata. There is no eviction policy for this 84adb52d92SMatthew Ahrens * cache, because we try to only include types in it which would occupy a 85adb52d92SMatthew Ahrens * very small amount of space per object but create a large impact on the 86adb52d92SMatthew Ahrens * performance of these commands. Instead, after it reaches a maximum size 87adb52d92SMatthew Ahrens * (which should only happen on very small memory systems with a very large 88adb52d92SMatthew Ahrens * number of filesystem objects), we stop taking new dbufs into the 89adb52d92SMatthew Ahrens * metadata cache, instead putting them in the normal dbuf cache. 90adb52d92SMatthew Ahrens * 91adb52d92SMatthew Ahrens * 2. LRU cache of dbufs. The "dbuf cache" maintains a list of dbufs that 92adb52d92SMatthew Ahrens * are not currently held but have been recently released. These dbufs 93adb52d92SMatthew Ahrens * are not eligible for arc eviction until they are aged out of the cache. 94adb52d92SMatthew Ahrens * Dbufs that are aged out of the cache will be immediately destroyed and 95adb52d92SMatthew Ahrens * become eligible for arc eviction. 96adb52d92SMatthew Ahrens * 97adb52d92SMatthew Ahrens * Dbufs are added to these caches once the last hold is released. If a dbuf is 98adb52d92SMatthew Ahrens * later accessed and still exists in the dbuf cache, then it will be removed 99adb52d92SMatthew Ahrens * from the cache and later re-added to the head of the cache. 100adb52d92SMatthew Ahrens * 101adb52d92SMatthew Ahrens * If a given dbuf meets the requirements for the metadata cache, it will go 102adb52d92SMatthew Ahrens * there, otherwise it will be considered for the generic LRU dbuf cache. The 103adb52d92SMatthew Ahrens * caches and the refcounts tracking their sizes are stored in an array indexed 104adb52d92SMatthew Ahrens * by those caches' matching enum values (from dbuf_cached_state_t). 105dcbf3bd6SGeorge Wilson */ 106adb52d92SMatthew Ahrens typedef struct dbuf_cache { 107adb52d92SMatthew Ahrens multilist_t *cache; 108e914ace2STim Schumacher zfs_refcount_t size; 109adb52d92SMatthew Ahrens } dbuf_cache_t; 110adb52d92SMatthew Ahrens dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 111dcbf3bd6SGeorge Wilson 112adb52d92SMatthew Ahrens /* Size limits for the caches */ 113adb52d92SMatthew Ahrens uint64_t dbuf_cache_max_bytes = 0; 114adb52d92SMatthew Ahrens uint64_t dbuf_metadata_cache_max_bytes = 0; 115adb52d92SMatthew Ahrens /* Set the default sizes of the caches to log2 fraction of arc size */ 116268bbb2aSGeorge Wilson int dbuf_cache_shift = 5; 117adb52d92SMatthew Ahrens int dbuf_metadata_cache_shift = 6; 118dcbf3bd6SGeorge Wilson 119dcbf3bd6SGeorge Wilson /* 120adb52d92SMatthew Ahrens * For diagnostic purposes, this is incremented whenever we can't add 121adb52d92SMatthew Ahrens * something to the metadata cache because it's full, and instead put 122adb52d92SMatthew Ahrens * the data in the regular dbuf cache. 123adb52d92SMatthew Ahrens */ 124adb52d92SMatthew Ahrens uint64_t dbuf_metadata_cache_overflow; 125adb52d92SMatthew Ahrens 126adb52d92SMatthew Ahrens /* 127adb52d92SMatthew Ahrens * The LRU dbuf cache uses a three-stage eviction policy: 128dcbf3bd6SGeorge Wilson * - A low water marker designates when the dbuf eviction thread 129dcbf3bd6SGeorge Wilson * should stop evicting from the dbuf cache. 130dcbf3bd6SGeorge Wilson * - When we reach the maximum size (aka mid water mark), we 131dcbf3bd6SGeorge Wilson * signal the eviction thread to run. 132dcbf3bd6SGeorge Wilson * - The high water mark indicates when the eviction thread 133dcbf3bd6SGeorge Wilson * is unable to keep up with the incoming load and eviction must 134dcbf3bd6SGeorge Wilson * happen in the context of the calling thread. 135dcbf3bd6SGeorge Wilson * 136dcbf3bd6SGeorge Wilson * The dbuf cache: 137dcbf3bd6SGeorge Wilson * (max size) 138dcbf3bd6SGeorge Wilson * low water mid water hi water 139dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 140dcbf3bd6SGeorge Wilson * | | | | 141dcbf3bd6SGeorge Wilson * | | | | 142dcbf3bd6SGeorge Wilson * | | | | 143dcbf3bd6SGeorge Wilson * | | | | 144dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 145dcbf3bd6SGeorge Wilson * stop signal evict 146dcbf3bd6SGeorge Wilson * evicting eviction directly 147dcbf3bd6SGeorge Wilson * thread 148dcbf3bd6SGeorge Wilson * 149dcbf3bd6SGeorge Wilson * The high and low water marks indicate the operating range for the eviction 150dcbf3bd6SGeorge Wilson * thread. The low water mark is, by default, 90% of the total size of the 151dcbf3bd6SGeorge Wilson * cache and the high water mark is at 110% (both of these percentages can be 152dcbf3bd6SGeorge Wilson * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 153dcbf3bd6SGeorge Wilson * respectively). The eviction thread will try to ensure that the cache remains 154dcbf3bd6SGeorge Wilson * within this range by waking up every second and checking if the cache is 155dcbf3bd6SGeorge Wilson * above the low water mark. The thread can also be woken up by callers adding 156dcbf3bd6SGeorge Wilson * elements into the cache if the cache is larger than the mid water (i.e max 157dcbf3bd6SGeorge Wilson * cache size). Once the eviction thread is woken up and eviction is required, 158dcbf3bd6SGeorge Wilson * it will continue evicting buffers until it's able to reduce the cache size 159dcbf3bd6SGeorge Wilson * to the low water mark. If the cache size continues to grow and hits the high 160eb633035STom Caputi * water mark, then callers adding elements to the cache will begin to evict 161dcbf3bd6SGeorge Wilson * directly from the cache until the cache is no longer above the high water 162dcbf3bd6SGeorge Wilson * mark. 163dcbf3bd6SGeorge Wilson */ 164dcbf3bd6SGeorge Wilson 165dcbf3bd6SGeorge Wilson /* 166dcbf3bd6SGeorge Wilson * The percentage above and below the maximum cache size. 167dcbf3bd6SGeorge Wilson */ 168dcbf3bd6SGeorge Wilson uint_t dbuf_cache_hiwater_pct = 10; 169dcbf3bd6SGeorge Wilson uint_t dbuf_cache_lowater_pct = 10; 170dcbf3bd6SGeorge Wilson 171fa9e4066Sahrens /* ARGSUSED */ 172fa9e4066Sahrens static int 173fa9e4066Sahrens dbuf_cons(void *vdb, void *unused, int kmflag) 174fa9e4066Sahrens { 175fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 176fa9e4066Sahrens bzero(db, sizeof (dmu_buf_impl_t)); 177fa9e4066Sahrens 178fa9e4066Sahrens mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 179*9704bf7fSPaul Dagnelie rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL); 180fa9e4066Sahrens cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 181dcbf3bd6SGeorge Wilson multilist_link_init(&db->db_cache_link); 182e914ace2STim Schumacher zfs_refcount_create(&db->db_holds); 1830f6d88adSAlex Reece 184fa9e4066Sahrens return (0); 185fa9e4066Sahrens } 186fa9e4066Sahrens 187fa9e4066Sahrens /* ARGSUSED */ 188fa9e4066Sahrens static void 189fa9e4066Sahrens dbuf_dest(void *vdb, void *unused) 190fa9e4066Sahrens { 191fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 192fa9e4066Sahrens mutex_destroy(&db->db_mtx); 193*9704bf7fSPaul Dagnelie rw_destroy(&db->db_rwlock); 194fa9e4066Sahrens cv_destroy(&db->db_changed); 195dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 196e914ace2STim Schumacher zfs_refcount_destroy(&db->db_holds); 197fa9e4066Sahrens } 198fa9e4066Sahrens 199fa9e4066Sahrens /* 200fa9e4066Sahrens * dbuf hash table routines 201fa9e4066Sahrens */ 202fa9e4066Sahrens static dbuf_hash_table_t dbuf_hash_table; 203fa9e4066Sahrens 204fa9e4066Sahrens static uint64_t dbuf_hash_count; 205fa9e4066Sahrens 2063a2d8a1bSPaul Dagnelie /* 2073a2d8a1bSPaul Dagnelie * We use Cityhash for this. It's fast, and has good hash properties without 2083a2d8a1bSPaul Dagnelie * requiring any large static buffers. 2093a2d8a1bSPaul Dagnelie */ 210fa9e4066Sahrens static uint64_t 211fa9e4066Sahrens dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 212fa9e4066Sahrens { 2133a2d8a1bSPaul Dagnelie return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 214fa9e4066Sahrens } 215fa9e4066Sahrens 216fa9e4066Sahrens #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 217fa9e4066Sahrens ((dbuf)->db.db_object == (obj) && \ 218fa9e4066Sahrens (dbuf)->db_objset == (os) && \ 219fa9e4066Sahrens (dbuf)->db_level == (level) && \ 220fa9e4066Sahrens (dbuf)->db_blkid == (blkid)) 221fa9e4066Sahrens 222fa9e4066Sahrens dmu_buf_impl_t * 223e57a022bSJustin T. Gibbs dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 224fa9e4066Sahrens { 225fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 226dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 227fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 228fa9e4066Sahrens dmu_buf_impl_t *db; 229fa9e4066Sahrens 230fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 231fa9e4066Sahrens for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 232fa9e4066Sahrens if (DBUF_EQUAL(db, os, obj, level, blkid)) { 233fa9e4066Sahrens mutex_enter(&db->db_mtx); 234ea8dc4b6Seschrock if (db->db_state != DB_EVICTING) { 235fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 236fa9e4066Sahrens return (db); 237fa9e4066Sahrens } 238fa9e4066Sahrens mutex_exit(&db->db_mtx); 239fa9e4066Sahrens } 240fa9e4066Sahrens } 241fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 242fa9e4066Sahrens return (NULL); 243fa9e4066Sahrens } 244fa9e4066Sahrens 245e57a022bSJustin T. Gibbs static dmu_buf_impl_t * 246e57a022bSJustin T. Gibbs dbuf_find_bonus(objset_t *os, uint64_t object) 247e57a022bSJustin T. Gibbs { 248e57a022bSJustin T. Gibbs dnode_t *dn; 249e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = NULL; 250e57a022bSJustin T. Gibbs 251e57a022bSJustin T. Gibbs if (dnode_hold(os, object, FTAG, &dn) == 0) { 252e57a022bSJustin T. Gibbs rw_enter(&dn->dn_struct_rwlock, RW_READER); 253e57a022bSJustin T. Gibbs if (dn->dn_bonus != NULL) { 254e57a022bSJustin T. Gibbs db = dn->dn_bonus; 255e57a022bSJustin T. Gibbs mutex_enter(&db->db_mtx); 256e57a022bSJustin T. Gibbs } 257e57a022bSJustin T. Gibbs rw_exit(&dn->dn_struct_rwlock); 258e57a022bSJustin T. Gibbs dnode_rele(dn, FTAG); 259e57a022bSJustin T. Gibbs } 260e57a022bSJustin T. Gibbs return (db); 261e57a022bSJustin T. Gibbs } 262e57a022bSJustin T. Gibbs 263fa9e4066Sahrens /* 264fa9e4066Sahrens * Insert an entry into the hash table. If there is already an element 265fa9e4066Sahrens * equal to elem in the hash table, then the already existing element 266fa9e4066Sahrens * will be returned and the new element will not be inserted. 267fa9e4066Sahrens * Otherwise returns NULL. 268fa9e4066Sahrens */ 269fa9e4066Sahrens static dmu_buf_impl_t * 270fa9e4066Sahrens dbuf_hash_insert(dmu_buf_impl_t *db) 271fa9e4066Sahrens { 272fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 273503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 274fa9e4066Sahrens uint64_t obj = db->db.db_object; 275fa9e4066Sahrens int level = db->db_level; 276fa9e4066Sahrens uint64_t blkid = db->db_blkid; 277dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 278fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 279fa9e4066Sahrens dmu_buf_impl_t *dbf; 280fa9e4066Sahrens 281fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 282fa9e4066Sahrens for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 283fa9e4066Sahrens if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 284fa9e4066Sahrens mutex_enter(&dbf->db_mtx); 285ea8dc4b6Seschrock if (dbf->db_state != DB_EVICTING) { 286fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 287fa9e4066Sahrens return (dbf); 288fa9e4066Sahrens } 289fa9e4066Sahrens mutex_exit(&dbf->db_mtx); 290fa9e4066Sahrens } 291fa9e4066Sahrens } 292fa9e4066Sahrens 293fa9e4066Sahrens mutex_enter(&db->db_mtx); 294fa9e4066Sahrens db->db_hash_next = h->hash_table[idx]; 295fa9e4066Sahrens h->hash_table[idx] = db; 296fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 2971a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&dbuf_hash_count); 298fa9e4066Sahrens 299fa9e4066Sahrens return (NULL); 300fa9e4066Sahrens } 301fa9e4066Sahrens 302fa9e4066Sahrens /* 303bbfa8ea8SMatthew Ahrens * Remove an entry from the hash table. It must be in the EVICTING state. 304fa9e4066Sahrens */ 305fa9e4066Sahrens static void 306fa9e4066Sahrens dbuf_hash_remove(dmu_buf_impl_t *db) 307fa9e4066Sahrens { 308fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 309dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 310fa9e4066Sahrens db->db_level, db->db_blkid); 311fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 312fa9e4066Sahrens dmu_buf_impl_t *dbf, **dbp; 313fa9e4066Sahrens 314fa9e4066Sahrens /* 315eb633035STom Caputi * We mustn't hold db_mtx to maintain lock ordering: 316fa9e4066Sahrens * DBUF_HASH_MUTEX > db_mtx. 317fa9e4066Sahrens */ 318e914ace2STim Schumacher ASSERT(zfs_refcount_is_zero(&db->db_holds)); 319ea8dc4b6Seschrock ASSERT(db->db_state == DB_EVICTING); 320fa9e4066Sahrens ASSERT(!MUTEX_HELD(&db->db_mtx)); 321fa9e4066Sahrens 322fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 323fa9e4066Sahrens dbp = &h->hash_table[idx]; 324fa9e4066Sahrens while ((dbf = *dbp) != db) { 325fa9e4066Sahrens dbp = &dbf->db_hash_next; 326fa9e4066Sahrens ASSERT(dbf != NULL); 327fa9e4066Sahrens } 328fa9e4066Sahrens *dbp = db->db_hash_next; 329fa9e4066Sahrens db->db_hash_next = NULL; 330fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 3311a5e258fSJosef 'Jeff' Sipek atomic_dec_64(&dbuf_hash_count); 332fa9e4066Sahrens } 333fa9e4066Sahrens 334bc9014e6SJustin Gibbs typedef enum { 335bc9014e6SJustin Gibbs DBVU_EVICTING, 336bc9014e6SJustin Gibbs DBVU_NOT_EVICTING 337bc9014e6SJustin Gibbs } dbvu_verify_type_t; 338bc9014e6SJustin Gibbs 339bc9014e6SJustin Gibbs static void 340bc9014e6SJustin Gibbs dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 341bc9014e6SJustin Gibbs { 342bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 343bc9014e6SJustin Gibbs int64_t holds; 344bc9014e6SJustin Gibbs 345bc9014e6SJustin Gibbs if (db->db_user == NULL) 346bc9014e6SJustin Gibbs return; 347bc9014e6SJustin Gibbs 348bc9014e6SJustin Gibbs /* Only data blocks support the attachment of user data. */ 349bc9014e6SJustin Gibbs ASSERT(db->db_level == 0); 350bc9014e6SJustin Gibbs 351bc9014e6SJustin Gibbs /* Clients must resolve a dbuf before attaching user data. */ 352bc9014e6SJustin Gibbs ASSERT(db->db.db_data != NULL); 353bc9014e6SJustin Gibbs ASSERT3U(db->db_state, ==, DB_CACHED); 354bc9014e6SJustin Gibbs 355e914ace2STim Schumacher holds = zfs_refcount_count(&db->db_holds); 356bc9014e6SJustin Gibbs if (verify_type == DBVU_EVICTING) { 357bc9014e6SJustin Gibbs /* 358bc9014e6SJustin Gibbs * Immediate eviction occurs when holds == dirtycnt. 359bc9014e6SJustin Gibbs * For normal eviction buffers, holds is zero on 360bc9014e6SJustin Gibbs * eviction, except when dbuf_fix_old_data() calls 361bc9014e6SJustin Gibbs * dbuf_clear_data(). However, the hold count can grow 362bc9014e6SJustin Gibbs * during eviction even though db_mtx is held (see 363bc9014e6SJustin Gibbs * dmu_bonus_hold() for an example), so we can only 364bc9014e6SJustin Gibbs * test the generic invariant that holds >= dirtycnt. 365bc9014e6SJustin Gibbs */ 366bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 367bc9014e6SJustin Gibbs } else { 368d2058105SJustin T. Gibbs if (db->db_user_immediate_evict == TRUE) 369bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 370bc9014e6SJustin Gibbs else 371bc9014e6SJustin Gibbs ASSERT3U(holds, >, 0); 372bc9014e6SJustin Gibbs } 373bc9014e6SJustin Gibbs #endif 374bc9014e6SJustin Gibbs } 375bc9014e6SJustin Gibbs 376fa9e4066Sahrens static void 377fa9e4066Sahrens dbuf_evict_user(dmu_buf_impl_t *db) 378fa9e4066Sahrens { 379bc9014e6SJustin Gibbs dmu_buf_user_t *dbu = db->db_user; 380bc9014e6SJustin Gibbs 381fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 382fa9e4066Sahrens 383bc9014e6SJustin Gibbs if (dbu == NULL) 384fa9e4066Sahrens return; 385fa9e4066Sahrens 386bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_EVICTING); 387bc9014e6SJustin Gibbs db->db_user = NULL; 388bc9014e6SJustin Gibbs 389bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 390bc9014e6SJustin Gibbs if (dbu->dbu_clear_on_evict_dbufp != NULL) 391bc9014e6SJustin Gibbs *dbu->dbu_clear_on_evict_dbufp = NULL; 392bc9014e6SJustin Gibbs #endif 393bc9014e6SJustin Gibbs 394bc9014e6SJustin Gibbs /* 39540510e8eSJosef 'Jeff' Sipek * There are two eviction callbacks - one that we call synchronously 39640510e8eSJosef 'Jeff' Sipek * and one that we invoke via a taskq. The async one is useful for 39740510e8eSJosef 'Jeff' Sipek * avoiding lock order reversals and limiting stack depth. 39840510e8eSJosef 'Jeff' Sipek * 39940510e8eSJosef 'Jeff' Sipek * Note that if we have a sync callback but no async callback, 40040510e8eSJosef 'Jeff' Sipek * it's likely that the sync callback will free the structure 40140510e8eSJosef 'Jeff' Sipek * containing the dbu. In that case we need to take care to not 40240510e8eSJosef 'Jeff' Sipek * dereference dbu after calling the sync evict func. 403bc9014e6SJustin Gibbs */ 40440510e8eSJosef 'Jeff' Sipek boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 40540510e8eSJosef 'Jeff' Sipek 40640510e8eSJosef 'Jeff' Sipek if (dbu->dbu_evict_func_sync != NULL) 40740510e8eSJosef 'Jeff' Sipek dbu->dbu_evict_func_sync(dbu); 40840510e8eSJosef 'Jeff' Sipek 40940510e8eSJosef 'Jeff' Sipek if (has_async) { 41040510e8eSJosef 'Jeff' Sipek taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 41140510e8eSJosef 'Jeff' Sipek dbu, 0, &dbu->dbu_tqent); 41240510e8eSJosef 'Jeff' Sipek } 413fa9e4066Sahrens } 414fa9e4066Sahrens 415744947dcSTom Erickson boolean_t 416744947dcSTom Erickson dbuf_is_metadata(dmu_buf_impl_t *db) 417744947dcSTom Erickson { 418eb633035STom Caputi if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { 419744947dcSTom Erickson return (B_TRUE); 420744947dcSTom Erickson } else { 421744947dcSTom Erickson boolean_t is_metadata; 422744947dcSTom Erickson 423744947dcSTom Erickson DB_DNODE_ENTER(db); 424ad135b5dSChristopher Siden is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 425744947dcSTom Erickson DB_DNODE_EXIT(db); 426744947dcSTom Erickson 427744947dcSTom Erickson return (is_metadata); 428744947dcSTom Erickson } 429744947dcSTom Erickson } 430744947dcSTom Erickson 431adb52d92SMatthew Ahrens /* 432adb52d92SMatthew Ahrens * This returns whether this dbuf should be stored in the metadata cache, which 433adb52d92SMatthew Ahrens * is based on whether it's from one of the dnode types that store data related 434adb52d92SMatthew Ahrens * to traversing dataset hierarchies. 435adb52d92SMatthew Ahrens */ 436adb52d92SMatthew Ahrens static boolean_t 437adb52d92SMatthew Ahrens dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 438adb52d92SMatthew Ahrens { 439adb52d92SMatthew Ahrens DB_DNODE_ENTER(db); 440adb52d92SMatthew Ahrens dmu_object_type_t type = DB_DNODE(db)->dn_type; 441adb52d92SMatthew Ahrens DB_DNODE_EXIT(db); 442adb52d92SMatthew Ahrens 443adb52d92SMatthew Ahrens /* Check if this dbuf is one of the types we care about */ 444adb52d92SMatthew Ahrens if (DMU_OT_IS_METADATA_CACHED(type)) { 445adb52d92SMatthew Ahrens /* If we hit this, then we set something up wrong in dmu_ot */ 446adb52d92SMatthew Ahrens ASSERT(DMU_OT_IS_METADATA(type)); 447adb52d92SMatthew Ahrens 448adb52d92SMatthew Ahrens /* 449adb52d92SMatthew Ahrens * Sanity check for small-memory systems: don't allocate too 450adb52d92SMatthew Ahrens * much memory for this purpose. 451adb52d92SMatthew Ahrens */ 452e914ace2STim Schumacher if (zfs_refcount_count( 453e914ace2STim Schumacher &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 454adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes) { 455adb52d92SMatthew Ahrens dbuf_metadata_cache_overflow++; 456adb52d92SMatthew Ahrens DTRACE_PROBE1(dbuf__metadata__cache__overflow, 457adb52d92SMatthew Ahrens dmu_buf_impl_t *, db); 458adb52d92SMatthew Ahrens return (B_FALSE); 459adb52d92SMatthew Ahrens } 460adb52d92SMatthew Ahrens 461adb52d92SMatthew Ahrens return (B_TRUE); 462adb52d92SMatthew Ahrens } 463adb52d92SMatthew Ahrens 464adb52d92SMatthew Ahrens return (B_FALSE); 465adb52d92SMatthew Ahrens } 466adb52d92SMatthew Ahrens 467dcbf3bd6SGeorge Wilson /* 468dcbf3bd6SGeorge Wilson * This function *must* return indices evenly distributed between all 469dcbf3bd6SGeorge Wilson * sublists of the multilist. This is needed due to how the dbuf eviction 470dcbf3bd6SGeorge Wilson * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 471dcbf3bd6SGeorge Wilson * distributed between all sublists and uses this assumption when 472dcbf3bd6SGeorge Wilson * deciding which sublist to evict from and how much to evict from it. 473dcbf3bd6SGeorge Wilson */ 474dcbf3bd6SGeorge Wilson unsigned int 475dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 476ea8dc4b6Seschrock { 477dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = obj; 478dcbf3bd6SGeorge Wilson 479dcbf3bd6SGeorge Wilson /* 480dcbf3bd6SGeorge Wilson * The assumption here, is the hash value for a given 481dcbf3bd6SGeorge Wilson * dmu_buf_impl_t will remain constant throughout it's lifetime 482dcbf3bd6SGeorge Wilson * (i.e. it's objset, object, level and blkid fields don't change). 483dcbf3bd6SGeorge Wilson * Thus, we don't need to store the dbuf's sublist index 484dcbf3bd6SGeorge Wilson * on insertion, as this index can be recalculated on removal. 485dcbf3bd6SGeorge Wilson * 486dcbf3bd6SGeorge Wilson * Also, the low order bits of the hash value are thought to be 487dcbf3bd6SGeorge Wilson * distributed evenly. Otherwise, in the case that the multilist 488dcbf3bd6SGeorge Wilson * has a power of two number of sublists, each sublists' usage 489dcbf3bd6SGeorge Wilson * would not be evenly distributed. 490dcbf3bd6SGeorge Wilson */ 491dcbf3bd6SGeorge Wilson return (dbuf_hash(db->db_objset, db->db.db_object, 492dcbf3bd6SGeorge Wilson db->db_level, db->db_blkid) % 493dcbf3bd6SGeorge Wilson multilist_get_num_sublists(ml)); 494dcbf3bd6SGeorge Wilson } 495dcbf3bd6SGeorge Wilson 496dcbf3bd6SGeorge Wilson static inline boolean_t 497dcbf3bd6SGeorge Wilson dbuf_cache_above_hiwater(void) 498dcbf3bd6SGeorge Wilson { 499dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_hiwater_bytes = 500dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 501dcbf3bd6SGeorge Wilson 502e914ace2STim Schumacher return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 503dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 504dcbf3bd6SGeorge Wilson } 505dcbf3bd6SGeorge Wilson 506dcbf3bd6SGeorge Wilson static inline boolean_t 507dcbf3bd6SGeorge Wilson dbuf_cache_above_lowater(void) 508dcbf3bd6SGeorge Wilson { 509dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_lowater_bytes = 510dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 511dcbf3bd6SGeorge Wilson 512e914ace2STim Schumacher return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 513dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 514dcbf3bd6SGeorge Wilson } 515dcbf3bd6SGeorge Wilson 516dcbf3bd6SGeorge Wilson /* 517dcbf3bd6SGeorge Wilson * Evict the oldest eligible dbuf from the dbuf cache. 518dcbf3bd6SGeorge Wilson */ 519dcbf3bd6SGeorge Wilson static void 520dcbf3bd6SGeorge Wilson dbuf_evict_one(void) 521dcbf3bd6SGeorge Wilson { 522adb52d92SMatthew Ahrens int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache); 523adb52d92SMatthew Ahrens multilist_sublist_t *mls = multilist_sublist_lock( 524adb52d92SMatthew Ahrens dbuf_caches[DB_DBUF_CACHE].cache, idx); 525dcbf3bd6SGeorge Wilson 526dcbf3bd6SGeorge Wilson ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 527dcbf3bd6SGeorge Wilson 528dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = multilist_sublist_tail(mls); 529dcbf3bd6SGeorge Wilson while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 530dcbf3bd6SGeorge Wilson db = multilist_sublist_prev(mls, db); 531dcbf3bd6SGeorge Wilson } 532dcbf3bd6SGeorge Wilson 533dcbf3bd6SGeorge Wilson DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 534dcbf3bd6SGeorge Wilson multilist_sublist_t *, mls); 535dcbf3bd6SGeorge Wilson 536dcbf3bd6SGeorge Wilson if (db != NULL) { 537dcbf3bd6SGeorge Wilson multilist_sublist_remove(mls, db); 538dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 539e914ace2STim Schumacher (void) zfs_refcount_remove_many( 540e914ace2STim Schumacher &dbuf_caches[DB_DBUF_CACHE].size, 541dcbf3bd6SGeorge Wilson db->db.db_size, db); 542adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 543adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 544dcbf3bd6SGeorge Wilson dbuf_destroy(db); 545dcbf3bd6SGeorge Wilson } else { 546dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 547dcbf3bd6SGeorge Wilson } 548dcbf3bd6SGeorge Wilson } 549dcbf3bd6SGeorge Wilson 550dcbf3bd6SGeorge Wilson /* 551dcbf3bd6SGeorge Wilson * The dbuf evict thread is responsible for aging out dbufs from the 552dcbf3bd6SGeorge Wilson * cache. Once the cache has reached it's maximum size, dbufs are removed 553dcbf3bd6SGeorge Wilson * and destroyed. The eviction thread will continue running until the size 554dcbf3bd6SGeorge Wilson * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 555dcbf3bd6SGeorge Wilson * out of the cache it is destroyed and becomes eligible for arc eviction. 556dcbf3bd6SGeorge Wilson */ 5573f7978d0SAlan Somers /* ARGSUSED */ 558dcbf3bd6SGeorge Wilson static void 5593f7978d0SAlan Somers dbuf_evict_thread(void *unused) 560dcbf3bd6SGeorge Wilson { 561dcbf3bd6SGeorge Wilson callb_cpr_t cpr; 562dcbf3bd6SGeorge Wilson 563dcbf3bd6SGeorge Wilson CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 564dcbf3bd6SGeorge Wilson 565dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 566dcbf3bd6SGeorge Wilson while (!dbuf_evict_thread_exit) { 567dcbf3bd6SGeorge Wilson while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 568dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_BEGIN(&cpr); 569dcbf3bd6SGeorge Wilson (void) cv_timedwait_hires(&dbuf_evict_cv, 570dcbf3bd6SGeorge Wilson &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 571dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 572dcbf3bd6SGeorge Wilson } 573dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 574dcbf3bd6SGeorge Wilson 575dcbf3bd6SGeorge Wilson /* 576dcbf3bd6SGeorge Wilson * Keep evicting as long as we're above the low water mark 577dcbf3bd6SGeorge Wilson * for the cache. We do this without holding the locks to 578dcbf3bd6SGeorge Wilson * minimize lock contention. 579dcbf3bd6SGeorge Wilson */ 580dcbf3bd6SGeorge Wilson while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 581dcbf3bd6SGeorge Wilson dbuf_evict_one(); 582dcbf3bd6SGeorge Wilson } 583dcbf3bd6SGeorge Wilson 584dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 585dcbf3bd6SGeorge Wilson } 586ea8dc4b6Seschrock 587dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 588dcbf3bd6SGeorge Wilson cv_broadcast(&dbuf_evict_cv); 589dcbf3bd6SGeorge Wilson CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 590dcbf3bd6SGeorge Wilson thread_exit(); 591dcbf3bd6SGeorge Wilson } 592dcbf3bd6SGeorge Wilson 593dcbf3bd6SGeorge Wilson /* 594dcbf3bd6SGeorge Wilson * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 595dcbf3bd6SGeorge Wilson * If the dbuf cache is at its high water mark, then evict a dbuf from the 596dcbf3bd6SGeorge Wilson * dbuf cache using the callers context. 597dcbf3bd6SGeorge Wilson */ 598dcbf3bd6SGeorge Wilson static void 599dcbf3bd6SGeorge Wilson dbuf_evict_notify(void) 600dcbf3bd6SGeorge Wilson { 601dbfd9f93SMatthew Ahrens /* 602dbfd9f93SMatthew Ahrens * We check if we should evict without holding the dbuf_evict_lock, 603dbfd9f93SMatthew Ahrens * because it's OK to occasionally make the wrong decision here, 604dbfd9f93SMatthew Ahrens * and grabbing the lock results in massive lock contention. 605dbfd9f93SMatthew Ahrens */ 606e914ace2STim Schumacher if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 607adb52d92SMatthew Ahrens dbuf_cache_max_bytes) { 608dbfd9f93SMatthew Ahrens if (dbuf_cache_above_hiwater()) 609dcbf3bd6SGeorge Wilson dbuf_evict_one(); 610dbfd9f93SMatthew Ahrens cv_signal(&dbuf_evict_cv); 611dcbf3bd6SGeorge Wilson } 612ea8dc4b6Seschrock } 613ea8dc4b6Seschrock 614fa9e4066Sahrens void 615fa9e4066Sahrens dbuf_init(void) 616fa9e4066Sahrens { 617ea8dc4b6Seschrock uint64_t hsize = 1ULL << 16; 618fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 619fa9e4066Sahrens int i; 620fa9e4066Sahrens 621fa9e4066Sahrens /* 622fa9e4066Sahrens * The hash table is big enough to fill all of physical memory 623ea8dc4b6Seschrock * with an average 4K block size. The table will take up 624ea8dc4b6Seschrock * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 625fa9e4066Sahrens */ 626ea8dc4b6Seschrock while (hsize * 4096 < physmem * PAGESIZE) 627fa9e4066Sahrens hsize <<= 1; 628fa9e4066Sahrens 629ea8dc4b6Seschrock retry: 630fa9e4066Sahrens h->hash_table_mask = hsize - 1; 631ea8dc4b6Seschrock h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 632ea8dc4b6Seschrock if (h->hash_table == NULL) { 633ea8dc4b6Seschrock /* XXX - we should really return an error instead of assert */ 634ea8dc4b6Seschrock ASSERT(hsize > (1ULL << 10)); 635ea8dc4b6Seschrock hsize >>= 1; 636ea8dc4b6Seschrock goto retry; 637ea8dc4b6Seschrock } 638fa9e4066Sahrens 639dcbf3bd6SGeorge Wilson dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 640fa9e4066Sahrens sizeof (dmu_buf_impl_t), 641fa9e4066Sahrens 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 642fa9e4066Sahrens 643fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 644fa9e4066Sahrens mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 645bc9014e6SJustin Gibbs 646dcbf3bd6SGeorge Wilson /* 647adb52d92SMatthew Ahrens * Setup the parameters for the dbuf caches. We set the sizes of the 648adb52d92SMatthew Ahrens * dbuf cache and the metadata cache to 1/32nd and 1/16th (default) 649adb52d92SMatthew Ahrens * of the size of the ARC, respectively. If the values are set in 650adb52d92SMatthew Ahrens * /etc/system and they're not greater than the size of the ARC, then 651adb52d92SMatthew Ahrens * we honor that value. 652dcbf3bd6SGeorge Wilson */ 653268bbb2aSGeorge Wilson if (dbuf_cache_max_bytes == 0 || 654268bbb2aSGeorge Wilson dbuf_cache_max_bytes >= arc_max_bytes()) { 655268bbb2aSGeorge Wilson dbuf_cache_max_bytes = arc_max_bytes() >> dbuf_cache_shift; 656268bbb2aSGeorge Wilson } 657adb52d92SMatthew Ahrens if (dbuf_metadata_cache_max_bytes == 0 || 658adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes >= arc_max_bytes()) { 659adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes = 660adb52d92SMatthew Ahrens arc_max_bytes() >> dbuf_metadata_cache_shift; 661adb52d92SMatthew Ahrens } 662dcbf3bd6SGeorge Wilson 663bc9014e6SJustin Gibbs /* 664bc9014e6SJustin Gibbs * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 665bc9014e6SJustin Gibbs * configuration is not required. 666bc9014e6SJustin Gibbs */ 667bc9014e6SJustin Gibbs dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 668dcbf3bd6SGeorge Wilson 669adb52d92SMatthew Ahrens for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 670adb52d92SMatthew Ahrens dbuf_caches[dcs].cache = 671adb52d92SMatthew Ahrens multilist_create(sizeof (dmu_buf_impl_t), 672adb52d92SMatthew Ahrens offsetof(dmu_buf_impl_t, db_cache_link), 673adb52d92SMatthew Ahrens dbuf_cache_multilist_index_func); 674e914ace2STim Schumacher zfs_refcount_create(&dbuf_caches[dcs].size); 675adb52d92SMatthew Ahrens } 676dcbf3bd6SGeorge Wilson 677dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 678dcbf3bd6SGeorge Wilson mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 679dcbf3bd6SGeorge Wilson cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 680dcbf3bd6SGeorge Wilson dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 681dcbf3bd6SGeorge Wilson NULL, 0, &p0, TS_RUN, minclsyspri); 682fa9e4066Sahrens } 683fa9e4066Sahrens 684fa9e4066Sahrens void 685fa9e4066Sahrens dbuf_fini(void) 686fa9e4066Sahrens { 687fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 688fa9e4066Sahrens int i; 689fa9e4066Sahrens 690fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 691fa9e4066Sahrens mutex_destroy(&h->hash_mutexes[i]); 692fa9e4066Sahrens kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 693dcbf3bd6SGeorge Wilson kmem_cache_destroy(dbuf_kmem_cache); 694bc9014e6SJustin Gibbs taskq_destroy(dbu_evict_taskq); 695dcbf3bd6SGeorge Wilson 696dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 697dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_TRUE; 698dcbf3bd6SGeorge Wilson while (dbuf_evict_thread_exit) { 699dcbf3bd6SGeorge Wilson cv_signal(&dbuf_evict_cv); 700dcbf3bd6SGeorge Wilson cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 701dcbf3bd6SGeorge Wilson } 702dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 703dcbf3bd6SGeorge Wilson 704dcbf3bd6SGeorge Wilson mutex_destroy(&dbuf_evict_lock); 705dcbf3bd6SGeorge Wilson cv_destroy(&dbuf_evict_cv); 706dcbf3bd6SGeorge Wilson 707adb52d92SMatthew Ahrens for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 708e914ace2STim Schumacher zfs_refcount_destroy(&dbuf_caches[dcs].size); 709adb52d92SMatthew Ahrens multilist_destroy(dbuf_caches[dcs].cache); 710adb52d92SMatthew Ahrens } 711fa9e4066Sahrens } 712fa9e4066Sahrens 713fa9e4066Sahrens /* 714fa9e4066Sahrens * Other stuff. 715fa9e4066Sahrens */ 716fa9e4066Sahrens 7179c9dc39aSek #ifdef ZFS_DEBUG 718fa9e4066Sahrens static void 719fa9e4066Sahrens dbuf_verify(dmu_buf_impl_t *db) 720fa9e4066Sahrens { 721744947dcSTom Erickson dnode_t *dn; 722b24ab676SJeff Bonwick dbuf_dirty_record_t *dr; 723fa9e4066Sahrens 724fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 725fa9e4066Sahrens 726fa9e4066Sahrens if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 727fa9e4066Sahrens return; 728fa9e4066Sahrens 729fa9e4066Sahrens ASSERT(db->db_objset != NULL); 730744947dcSTom Erickson DB_DNODE_ENTER(db); 731744947dcSTom Erickson dn = DB_DNODE(db); 732fa9e4066Sahrens if (dn == NULL) { 733fa9e4066Sahrens ASSERT(db->db_parent == NULL); 734fa9e4066Sahrens ASSERT(db->db_blkptr == NULL); 735fa9e4066Sahrens } else { 736fa9e4066Sahrens ASSERT3U(db->db.db_object, ==, dn->dn_object); 737fa9e4066Sahrens ASSERT3P(db->db_objset, ==, dn->dn_objset); 738fa9e4066Sahrens ASSERT3U(db->db_level, <, dn->dn_nlevels); 739744947dcSTom Erickson ASSERT(db->db_blkid == DMU_BONUS_BLKID || 740744947dcSTom Erickson db->db_blkid == DMU_SPILL_BLKID || 7410f6d88adSAlex Reece !avl_is_empty(&dn->dn_dbufs)); 742fa9e4066Sahrens } 7430a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 744fa9e4066Sahrens ASSERT(dn != NULL); 7451934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 7460a586ceaSMark Shellenbaum ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 7470a586ceaSMark Shellenbaum } else if (db->db_blkid == DMU_SPILL_BLKID) { 7480a586ceaSMark Shellenbaum ASSERT(dn != NULL); 749fb09f5aaSMadhav Suresh ASSERT0(db->db.db_offset); 750fa9e4066Sahrens } else { 751fa9e4066Sahrens ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 752fa9e4066Sahrens } 753fa9e4066Sahrens 754b24ab676SJeff Bonwick for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 755b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 756b24ab676SJeff Bonwick 757b24ab676SJeff Bonwick for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 758b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 759b24ab676SJeff Bonwick 76088b7b0f2SMatthew Ahrens /* 76188b7b0f2SMatthew Ahrens * We can't assert that db_size matches dn_datablksz because it 76288b7b0f2SMatthew Ahrens * can be momentarily different when another thread is doing 76388b7b0f2SMatthew Ahrens * dnode_set_blksz(). 76488b7b0f2SMatthew Ahrens */ 76588b7b0f2SMatthew Ahrens if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 766b24ab676SJeff Bonwick dr = db->db_data_pending; 76788b7b0f2SMatthew Ahrens /* 76888b7b0f2SMatthew Ahrens * It should only be modified in syncing context, so 76988b7b0f2SMatthew Ahrens * make sure we only have one copy of the data. 77088b7b0f2SMatthew Ahrens */ 77188b7b0f2SMatthew Ahrens ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 772fa9e4066Sahrens } 773fa9e4066Sahrens 774fa9e4066Sahrens /* verify db->db_blkptr */ 775fa9e4066Sahrens if (db->db_blkptr) { 776fa9e4066Sahrens if (db->db_parent == dn->dn_dbuf) { 777fa9e4066Sahrens /* db is pointed to by the dnode */ 778fa9e4066Sahrens /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 77914843421SMatthew Ahrens if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 780fa9e4066Sahrens ASSERT(db->db_parent == NULL); 781fa9e4066Sahrens else 782fa9e4066Sahrens ASSERT(db->db_parent != NULL); 7830a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 7840a586ceaSMark Shellenbaum ASSERT3P(db->db_blkptr, ==, 7850a586ceaSMark Shellenbaum &dn->dn_phys->dn_blkptr[db->db_blkid]); 786fa9e4066Sahrens } else { 787fa9e4066Sahrens /* db is pointed to by an indirect block */ 788fa9e4066Sahrens int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 789fa9e4066Sahrens ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 790fa9e4066Sahrens ASSERT3U(db->db_parent->db.db_object, ==, 791fa9e4066Sahrens db->db.db_object); 792fa9e4066Sahrens /* 793fa9e4066Sahrens * dnode_grow_indblksz() can make this fail if we don't 794*9704bf7fSPaul Dagnelie * have the parent's rwlock. XXX indblksz no longer 795fa9e4066Sahrens * grows. safe to do this now? 796fa9e4066Sahrens */ 797*9704bf7fSPaul Dagnelie if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) { 798fa9e4066Sahrens ASSERT3P(db->db_blkptr, ==, 799fa9e4066Sahrens ((blkptr_t *)db->db_parent->db.db_data + 800fa9e4066Sahrens db->db_blkid % epb)); 801fa9e4066Sahrens } 802fa9e4066Sahrens } 803fa9e4066Sahrens } 804fa9e4066Sahrens if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 8053f9d6ad7SLin Ling (db->db_buf == NULL || db->db_buf->b_data) && 8060a586ceaSMark Shellenbaum db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 807fa9e4066Sahrens db->db_state != DB_FILL && !dn->dn_free_txg) { 808fa9e4066Sahrens /* 809fa9e4066Sahrens * If the blkptr isn't set but they have nonzero data, 810fa9e4066Sahrens * it had better be dirty, otherwise we'll lose that 811fa9e4066Sahrens * data when we evict this buffer. 8128df0bcf0SPaul Dagnelie * 8138df0bcf0SPaul Dagnelie * There is an exception to this rule for indirect blocks; in 8148df0bcf0SPaul Dagnelie * this case, if the indirect block is a hole, we fill in a few 8158df0bcf0SPaul Dagnelie * fields on each of the child blocks (importantly, birth time) 8168df0bcf0SPaul Dagnelie * to prevent hole birth times from being lost when you 8178df0bcf0SPaul Dagnelie * partially fill in a hole. 818fa9e4066Sahrens */ 819fa9e4066Sahrens if (db->db_dirtycnt == 0) { 8208df0bcf0SPaul Dagnelie if (db->db_level == 0) { 8218df0bcf0SPaul Dagnelie uint64_t *buf = db->db.db_data; 8228df0bcf0SPaul Dagnelie int i; 823fa9e4066Sahrens 8248df0bcf0SPaul Dagnelie for (i = 0; i < db->db.db_size >> 3; i++) { 8258df0bcf0SPaul Dagnelie ASSERT(buf[i] == 0); 8268df0bcf0SPaul Dagnelie } 8278df0bcf0SPaul Dagnelie } else { 8288df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 8298df0bcf0SPaul Dagnelie ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 8308df0bcf0SPaul Dagnelie db->db.db_size); 8318df0bcf0SPaul Dagnelie /* 8328df0bcf0SPaul Dagnelie * We want to verify that all the blkptrs in the 8338df0bcf0SPaul Dagnelie * indirect block are holes, but we may have 8348df0bcf0SPaul Dagnelie * automatically set up a few fields for them. 8358df0bcf0SPaul Dagnelie * We iterate through each blkptr and verify 8368df0bcf0SPaul Dagnelie * they only have those fields set. 8378df0bcf0SPaul Dagnelie */ 8388df0bcf0SPaul Dagnelie for (int i = 0; 8398df0bcf0SPaul Dagnelie i < db->db.db_size / sizeof (blkptr_t); 8408df0bcf0SPaul Dagnelie i++) { 8418df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 8428df0bcf0SPaul Dagnelie ASSERT(ZIO_CHECKSUM_IS_ZERO( 8438df0bcf0SPaul Dagnelie &bp->blk_cksum)); 8448df0bcf0SPaul Dagnelie ASSERT( 8458df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[0]) && 8468df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[1]) && 8478df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[2])); 8488df0bcf0SPaul Dagnelie ASSERT0(bp->blk_fill); 8498df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[0]); 8508df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[1]); 8518df0bcf0SPaul Dagnelie ASSERT(!BP_IS_EMBEDDED(bp)); 8528df0bcf0SPaul Dagnelie ASSERT(BP_IS_HOLE(bp)); 8538df0bcf0SPaul Dagnelie ASSERT0(bp->blk_phys_birth); 8548df0bcf0SPaul Dagnelie } 855fa9e4066Sahrens } 856fa9e4066Sahrens } 857fa9e4066Sahrens } 858744947dcSTom Erickson DB_DNODE_EXIT(db); 859fa9e4066Sahrens } 8609c9dc39aSek #endif 861fa9e4066Sahrens 862bc9014e6SJustin Gibbs static void 863bc9014e6SJustin Gibbs dbuf_clear_data(dmu_buf_impl_t *db) 864bc9014e6SJustin Gibbs { 865bc9014e6SJustin Gibbs ASSERT(MUTEX_HELD(&db->db_mtx)); 866bc9014e6SJustin Gibbs dbuf_evict_user(db); 867dcbf3bd6SGeorge Wilson ASSERT3P(db->db_buf, ==, NULL); 868bc9014e6SJustin Gibbs db->db.db_data = NULL; 869bc9014e6SJustin Gibbs if (db->db_state != DB_NOFILL) 870bc9014e6SJustin Gibbs db->db_state = DB_UNCACHED; 871bc9014e6SJustin Gibbs } 872bc9014e6SJustin Gibbs 873*9704bf7fSPaul Dagnelie /* 874*9704bf7fSPaul Dagnelie * This function is used to lock the parent of the provided dbuf. This should be 875*9704bf7fSPaul Dagnelie * used when modifying or reading db_blkptr. 876*9704bf7fSPaul Dagnelie */ 877*9704bf7fSPaul Dagnelie db_lock_type_t 878*9704bf7fSPaul Dagnelie dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag) 879*9704bf7fSPaul Dagnelie { 880*9704bf7fSPaul Dagnelie enum db_lock_type ret = DLT_NONE; 881*9704bf7fSPaul Dagnelie if (db->db_parent != NULL) { 882*9704bf7fSPaul Dagnelie rw_enter(&db->db_parent->db_rwlock, rw); 883*9704bf7fSPaul Dagnelie ret = DLT_PARENT; 884*9704bf7fSPaul Dagnelie } else if (dmu_objset_ds(db->db_objset) != NULL) { 885*9704bf7fSPaul Dagnelie rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw, 886*9704bf7fSPaul Dagnelie tag); 887*9704bf7fSPaul Dagnelie ret = DLT_OBJSET; 888*9704bf7fSPaul Dagnelie } 889*9704bf7fSPaul Dagnelie /* 890*9704bf7fSPaul Dagnelie * We only return a DLT_NONE lock when it's the top-most indirect block 891*9704bf7fSPaul Dagnelie * of the meta-dnode of the MOS. 892*9704bf7fSPaul Dagnelie */ 893*9704bf7fSPaul Dagnelie return (ret); 894*9704bf7fSPaul Dagnelie } 895*9704bf7fSPaul Dagnelie 896*9704bf7fSPaul Dagnelie /* 897*9704bf7fSPaul Dagnelie * We need to pass the lock type in because it's possible that the block will 898*9704bf7fSPaul Dagnelie * move from being the topmost indirect block in a dnode (and thus, have no 899*9704bf7fSPaul Dagnelie * parent) to not the top-most via an indirection increase. This would cause a 900*9704bf7fSPaul Dagnelie * panic if we didn't pass the lock type in. 901*9704bf7fSPaul Dagnelie */ 902*9704bf7fSPaul Dagnelie void 903*9704bf7fSPaul Dagnelie dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag) 904*9704bf7fSPaul Dagnelie { 905*9704bf7fSPaul Dagnelie if (type == DLT_PARENT) 906*9704bf7fSPaul Dagnelie rw_exit(&db->db_parent->db_rwlock); 907*9704bf7fSPaul Dagnelie else if (type == DLT_OBJSET) 908*9704bf7fSPaul Dagnelie rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag); 909*9704bf7fSPaul Dagnelie } 910*9704bf7fSPaul Dagnelie 911fa9e4066Sahrens static void 912fa9e4066Sahrens dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 913fa9e4066Sahrens { 914fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 915bc9014e6SJustin Gibbs ASSERT(buf != NULL); 916bc9014e6SJustin Gibbs 917fa9e4066Sahrens db->db_buf = buf; 918bc9014e6SJustin Gibbs ASSERT(buf->b_data != NULL); 919bc9014e6SJustin Gibbs db->db.db_data = buf->b_data; 920fa9e4066Sahrens } 921fa9e4066Sahrens 922c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 923c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Loan out an arc_buf for read. Return the loaned arc_buf. 924c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 925c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 926c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dbuf_loan_arcbuf(dmu_buf_impl_t *db) 927c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 928c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf; 929c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 930dcbf3bd6SGeorge Wilson ASSERT(db->db_blkid != DMU_BONUS_BLKID); 931c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_enter(&db->db_mtx); 932e914ace2STim Schumacher if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) { 933c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int blksz = db->db.db_size; 93443466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 935744947dcSTom Erickson 936c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 9375602294fSDan Kimmel abuf = arc_loan_buf(spa, B_FALSE, blksz); 938c242f9a0Schunli zhang - Sun Microsystems - Irvine United States bcopy(db->db.db_data, abuf->b_data, blksz); 939c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 940c242f9a0Schunli zhang - Sun Microsystems - Irvine United States abuf = db->db_buf; 941c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_loan_inuse_buf(abuf, db); 942dcbf3bd6SGeorge Wilson db->db_buf = NULL; 943bc9014e6SJustin Gibbs dbuf_clear_data(db); 944c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 945c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 946c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (abuf); 947c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 948c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 949a2cdcdd2SPaul Dagnelie /* 950a2cdcdd2SPaul Dagnelie * Calculate which level n block references the data at the level 0 offset 951a2cdcdd2SPaul Dagnelie * provided. 952a2cdcdd2SPaul Dagnelie */ 953fa9e4066Sahrens uint64_t 954a2cdcdd2SPaul Dagnelie dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 955fa9e4066Sahrens { 956a2cdcdd2SPaul Dagnelie if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 957a2cdcdd2SPaul Dagnelie /* 958a2cdcdd2SPaul Dagnelie * The level n blkid is equal to the level 0 blkid divided by 959a2cdcdd2SPaul Dagnelie * the number of level 0s in a level n block. 960a2cdcdd2SPaul Dagnelie * 961a2cdcdd2SPaul Dagnelie * The level 0 blkid is offset >> datablkshift = 962a2cdcdd2SPaul Dagnelie * offset / 2^datablkshift. 963a2cdcdd2SPaul Dagnelie * 964a2cdcdd2SPaul Dagnelie * The number of level 0s in a level n is the number of block 965a2cdcdd2SPaul Dagnelie * pointers in an indirect block, raised to the power of level. 966a2cdcdd2SPaul Dagnelie * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 967a2cdcdd2SPaul Dagnelie * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 968a2cdcdd2SPaul Dagnelie * 969a2cdcdd2SPaul Dagnelie * Thus, the level n blkid is: offset / 970a2cdcdd2SPaul Dagnelie * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 971a2cdcdd2SPaul Dagnelie * = offset / 2^(datablkshift + level * 972a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 973a2cdcdd2SPaul Dagnelie * = offset >> (datablkshift + level * 974a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 975a2cdcdd2SPaul Dagnelie */ 976a2cdcdd2SPaul Dagnelie return (offset >> (dn->dn_datablkshift + level * 977a2cdcdd2SPaul Dagnelie (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 978fa9e4066Sahrens } else { 979fa9e4066Sahrens ASSERT3U(offset, <, dn->dn_datablksz); 980fa9e4066Sahrens return (0); 981fa9e4066Sahrens } 982fa9e4066Sahrens } 983fa9e4066Sahrens 984eb633035STom Caputi /* ARGSUSED */ 985fa9e4066Sahrens static void 986a3874b8bSToomas Soome dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 987a3874b8bSToomas Soome arc_buf_t *buf, void *vdb) 988fa9e4066Sahrens { 989fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 990fa9e4066Sahrens 991fa9e4066Sahrens mutex_enter(&db->db_mtx); 992fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_READ); 993fa9e4066Sahrens /* 994fa9e4066Sahrens * All reads are synchronous, so we must have a hold on the dbuf 995fa9e4066Sahrens */ 996e914ace2STim Schumacher ASSERT(zfs_refcount_count(&db->db_holds) > 0); 997ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 998fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 999fa98e487SMatthew Ahrens if (buf == NULL) { 1000fa98e487SMatthew Ahrens /* i/o error */ 1001fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error != 0); 1002fa98e487SMatthew Ahrens ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1003fa98e487SMatthew Ahrens ASSERT3P(db->db_buf, ==, NULL); 1004fa98e487SMatthew Ahrens db->db_state = DB_UNCACHED; 1005fa98e487SMatthew Ahrens } else if (db->db_level == 0 && db->db_freed_in_flight) { 1006a3874b8bSToomas Soome /* we were freed in flight; disregard any error */ 1007fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 1008a3874b8bSToomas Soome if (buf == NULL) { 1009a3874b8bSToomas Soome buf = arc_alloc_buf(db->db_objset->os_spa, 1010a3874b8bSToomas Soome db, DBUF_GET_BUFC_TYPE(db), db->db.db_size); 1011a3874b8bSToomas Soome } 1012fa9e4066Sahrens arc_release(buf, db); 1013fa9e4066Sahrens bzero(buf->b_data, db->db.db_size); 10146b4acc8bSahrens arc_buf_freeze(buf); 1015c717a561Smaybee db->db_freed_in_flight = FALSE; 1016fa9e4066Sahrens dbuf_set_data(db, buf); 1017fa9e4066Sahrens db->db_state = DB_CACHED; 1018a3874b8bSToomas Soome } else if (buf != NULL) { 1019fa98e487SMatthew Ahrens /* success */ 1020fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 1021fa9e4066Sahrens dbuf_set_data(db, buf); 1022fa9e4066Sahrens db->db_state = DB_CACHED; 1023fa9e4066Sahrens } 1024fa9e4066Sahrens cv_broadcast(&db->db_changed); 1025c2919acbSMatthew Ahrens dbuf_rele_and_unlock(db, NULL, B_FALSE); 1026fa9e4066Sahrens } 1027fa9e4066Sahrens 1028eb633035STom Caputi 1029eb633035STom Caputi /* 1030eb633035STom Caputi * This function ensures that, when doing a decrypting read of a block, 1031eb633035STom Caputi * we make sure we have decrypted the dnode associated with it. We must do 1032eb633035STom Caputi * this so that we ensure we are fully authenticating the checksum-of-MACs 1033eb633035STom Caputi * tree from the root of the objset down to this block. Indirect blocks are 1034eb633035STom Caputi * always verified against their secure checksum-of-MACs assuming that the 1035eb633035STom Caputi * dnode containing them is correct. Now that we are doing a decrypting read, 1036eb633035STom Caputi * we can be sure that the key is loaded and verify that assumption. This is 1037eb633035STom Caputi * especially important considering that we always read encrypted dnode 1038eb633035STom Caputi * blocks as raw data (without verifying their MACs) to start, and 1039eb633035STom Caputi * decrypt / authenticate them when we need to read an encrypted bonus buffer. 1040eb633035STom Caputi */ 1041eb633035STom Caputi static int 1042eb633035STom Caputi dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags) 1043eb633035STom Caputi { 1044eb633035STom Caputi int err = 0; 1045eb633035STom Caputi objset_t *os = db->db_objset; 1046eb633035STom Caputi arc_buf_t *dnode_abuf; 1047eb633035STom Caputi dnode_t *dn; 1048eb633035STom Caputi zbookmark_phys_t zb; 1049eb633035STom Caputi 1050eb633035STom Caputi ASSERT(MUTEX_HELD(&db->db_mtx)); 1051eb633035STom Caputi 1052eb633035STom Caputi if (!os->os_encrypted || os->os_raw_receive || 1053eb633035STom Caputi (flags & DB_RF_NO_DECRYPT) != 0) 1054eb633035STom Caputi return (0); 1055eb633035STom Caputi 1056eb633035STom Caputi DB_DNODE_ENTER(db); 1057eb633035STom Caputi dn = DB_DNODE(db); 1058eb633035STom Caputi dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL; 1059eb633035STom Caputi 1060eb633035STom Caputi if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) { 1061eb633035STom Caputi DB_DNODE_EXIT(db); 1062eb633035STom Caputi return (0); 1063eb633035STom Caputi } 1064eb633035STom Caputi 1065eb633035STom Caputi SET_BOOKMARK(&zb, dmu_objset_id(os), 1066eb633035STom Caputi DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid); 1067eb633035STom Caputi err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE); 1068eb633035STom Caputi 1069eb633035STom Caputi /* 1070eb633035STom Caputi * An error code of EACCES tells us that the key is still not 1071eb633035STom Caputi * available. This is ok if we are only reading authenticated 1072eb633035STom Caputi * (and therefore non-encrypted) blocks. 1073eb633035STom Caputi */ 1074eb633035STom Caputi if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID && 1075eb633035STom Caputi !DMU_OT_IS_ENCRYPTED(dn->dn_type)) || 1076eb633035STom Caputi (db->db_blkid == DMU_BONUS_BLKID && 1077eb633035STom Caputi !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)))) 1078eb633035STom Caputi err = 0; 1079eb633035STom Caputi 1080eb633035STom Caputi DB_DNODE_EXIT(db); 1081eb633035STom Caputi 1082eb633035STom Caputi return (err); 1083eb633035STom Caputi } 1084eb633035STom Caputi 1085*9704bf7fSPaul Dagnelie /* 1086*9704bf7fSPaul Dagnelie * Drops db_mtx and the parent lock specified by dblt and tag before 1087*9704bf7fSPaul Dagnelie * returning. 1088*9704bf7fSPaul Dagnelie */ 1089eb633035STom Caputi static int 1090*9704bf7fSPaul Dagnelie dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags, 1091*9704bf7fSPaul Dagnelie db_lock_type_t dblt, void *tag) 1092fa9e4066Sahrens { 1093744947dcSTom Erickson dnode_t *dn; 10947802d7bfSMatthew Ahrens zbookmark_phys_t zb; 10957adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_NOWAIT; 1096eb633035STom Caputi int err, zio_flags = 0; 1097fa9e4066Sahrens 1098744947dcSTom Erickson DB_DNODE_ENTER(db); 1099744947dcSTom Erickson dn = DB_DNODE(db); 1100e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1101ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&db->db_mtx)); 1102ea8dc4b6Seschrock ASSERT(db->db_state == DB_UNCACHED); 1103ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 1104*9704bf7fSPaul Dagnelie ASSERT(db->db_parent == NULL || 1105*9704bf7fSPaul Dagnelie RW_LOCK_HELD(&db->db_parent->db_rwlock)); 1106fa9e4066Sahrens 11070a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 110854811da5SToomas Soome /* 110954811da5SToomas Soome * The bonus length stored in the dnode may be less than 111054811da5SToomas Soome * the maximum available space in the bonus buffer. 111154811da5SToomas Soome */ 1112cf04dda1SMark Maybee int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 111354811da5SToomas Soome int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 11141934e92fSmaybee 1115eb633035STom Caputi /* if the underlying dnode block is encrypted, decrypt it */ 1116eb633035STom Caputi err = dbuf_read_verify_dnode_crypt(db, flags); 1117eb633035STom Caputi if (err != 0) { 1118eb633035STom Caputi DB_DNODE_EXIT(db); 1119eb633035STom Caputi mutex_exit(&db->db_mtx); 1120eb633035STom Caputi return (err); 1121eb633035STom Caputi } 1122eb633035STom Caputi 11231934e92fSmaybee ASSERT3U(bonuslen, <=, db->db.db_size); 112454811da5SToomas Soome db->db.db_data = zio_buf_alloc(max_bonuslen); 112554811da5SToomas Soome arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 112654811da5SToomas Soome if (bonuslen < max_bonuslen) 112754811da5SToomas Soome bzero(db->db.db_data, max_bonuslen); 1128cf04dda1SMark Maybee if (bonuslen) 1129cf04dda1SMark Maybee bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 1130744947dcSTom Erickson DB_DNODE_EXIT(db); 1131fa9e4066Sahrens db->db_state = DB_CACHED; 1132fa9e4066Sahrens mutex_exit(&db->db_mtx); 1133*9704bf7fSPaul Dagnelie dmu_buf_unlock_parent(db, dblt, tag); 1134eb633035STom Caputi return (0); 1135fa9e4066Sahrens } 1136fa9e4066Sahrens 11371c8564a7SMark Maybee /* 11381c8564a7SMark Maybee * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 11391c8564a7SMark Maybee * processes the delete record and clears the bp while we are waiting 11401c8564a7SMark Maybee * for the dn_mtx (resulting in a "no" from block_freed). 11411c8564a7SMark Maybee */ 1142088f3894Sahrens if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 11431c8564a7SMark Maybee (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 11441c8564a7SMark Maybee BP_IS_HOLE(db->db_blkptr)))) { 1145ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1146ad23a2dbSjohansen 11475602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 11485602294fSDan Kimmel db->db.db_size)); 1149fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 11508df0bcf0SPaul Dagnelie 11518df0bcf0SPaul Dagnelie if (db->db_blkptr != NULL && db->db_level > 0 && 11528df0bcf0SPaul Dagnelie BP_IS_HOLE(db->db_blkptr) && 11538df0bcf0SPaul Dagnelie db->db_blkptr->blk_birth != 0) { 11548df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 11558df0bcf0SPaul Dagnelie for (int i = 0; i < ((1 << 11568df0bcf0SPaul Dagnelie DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 11578df0bcf0SPaul Dagnelie i++) { 11588df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 11598df0bcf0SPaul Dagnelie ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 11608df0bcf0SPaul Dagnelie 1 << dn->dn_indblkshift); 11618df0bcf0SPaul Dagnelie BP_SET_LSIZE(bp, 11628df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) == 1 ? 11638df0bcf0SPaul Dagnelie dn->dn_datablksz : 11648df0bcf0SPaul Dagnelie BP_GET_LSIZE(db->db_blkptr)); 11658df0bcf0SPaul Dagnelie BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 11668df0bcf0SPaul Dagnelie BP_SET_LEVEL(bp, 11678df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) - 1); 11688df0bcf0SPaul Dagnelie BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 11698df0bcf0SPaul Dagnelie } 11708df0bcf0SPaul Dagnelie } 11718df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 1172fa9e4066Sahrens db->db_state = DB_CACHED; 1173fa9e4066Sahrens mutex_exit(&db->db_mtx); 1174*9704bf7fSPaul Dagnelie dmu_buf_unlock_parent(db, dblt, tag); 1175eb633035STom Caputi return (0); 1176eb633035STom Caputi } 1177eb633035STom Caputi 1178eb633035STom Caputi SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1179eb633035STom Caputi db->db.db_object, db->db_level, db->db_blkid); 1180eb633035STom Caputi 1181eb633035STom Caputi /* 1182eb633035STom Caputi * All bps of an encrypted os should have the encryption bit set. 1183eb633035STom Caputi * If this is not true it indicates tampering and we report an error. 1184eb633035STom Caputi */ 1185eb633035STom Caputi if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) { 1186eb633035STom Caputi spa_log_error(db->db_objset->os_spa, &zb); 1187eb633035STom Caputi zfs_panic_recover("unencrypted block in encrypted " 1188eb633035STom Caputi "object set %llu", dmu_objset_id(db->db_objset)); 1189eb633035STom Caputi DB_DNODE_EXIT(db); 1190eb633035STom Caputi mutex_exit(&db->db_mtx); 1191*9704bf7fSPaul Dagnelie dmu_buf_unlock_parent(db, dblt, tag); 1192eb633035STom Caputi return (SET_ERROR(EIO)); 1193eb633035STom Caputi } 1194eb633035STom Caputi 1195eb633035STom Caputi err = dbuf_read_verify_dnode_crypt(db, flags); 1196eb633035STom Caputi if (err != 0) { 1197eb633035STom Caputi DB_DNODE_EXIT(db); 1198*9704bf7fSPaul Dagnelie dmu_buf_unlock_parent(db, dblt, tag); 1199eb633035STom Caputi mutex_exit(&db->db_mtx); 1200eb633035STom Caputi return (err); 1201fa9e4066Sahrens } 1202fa9e4066Sahrens 1203744947dcSTom Erickson DB_DNODE_EXIT(db); 1204744947dcSTom Erickson 1205fa9e4066Sahrens db->db_state = DB_READ; 1206fa9e4066Sahrens mutex_exit(&db->db_mtx); 1207fa9e4066Sahrens 12083baa08fcSek if (DBUF_IS_L2CACHEABLE(db)) 12097adb730bSGeorge Wilson aflags |= ARC_FLAG_L2CACHE; 12103baa08fcSek 1211ea8dc4b6Seschrock dbuf_add_ref(db, NULL); 1212088f3894Sahrens 1213eb633035STom Caputi zio_flags = (flags & DB_RF_CANFAIL) ? 1214eb633035STom Caputi ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; 1215eb633035STom Caputi 1216eb633035STom Caputi if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr)) 1217eb633035STom Caputi zio_flags |= ZIO_FLAG_RAW; 1218*9704bf7fSPaul Dagnelie /* 1219*9704bf7fSPaul Dagnelie * The zio layer will copy the provided blkptr later, but we need to 1220*9704bf7fSPaul Dagnelie * do this now so that we can release the parent's rwlock. We have to 1221*9704bf7fSPaul Dagnelie * do that now so that if dbuf_read_done is called synchronously (on 1222*9704bf7fSPaul Dagnelie * an l1 cache hit) we don't acquire the db_mtx while holding the 1223*9704bf7fSPaul Dagnelie * parent's rwlock, which would be a lock ordering violation. 1224*9704bf7fSPaul Dagnelie */ 1225*9704bf7fSPaul Dagnelie blkptr_t bp = *db->db_blkptr; 1226*9704bf7fSPaul Dagnelie dmu_buf_unlock_parent(db, dblt, tag); 1227*9704bf7fSPaul Dagnelie (void) arc_read(zio, db->db_objset->os_spa, &bp, 1228eb633035STom Caputi dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, 122913506d1eSmaybee &aflags, &zb); 1230eb633035STom Caputi return (err); 1231fa9e4066Sahrens } 1232fa9e4066Sahrens 12335602294fSDan Kimmel /* 12345602294fSDan Kimmel * This is our just-in-time copy function. It makes a copy of buffers that 12355602294fSDan Kimmel * have been modified in a previous transaction group before we access them in 12365602294fSDan Kimmel * the current active group. 12375602294fSDan Kimmel * 12385602294fSDan Kimmel * This function is used in three places: when we are dirtying a buffer for the 12395602294fSDan Kimmel * first time in a txg, when we are freeing a range in a dnode that includes 12405602294fSDan Kimmel * this buffer, and when we are accessing a buffer which was received compressed 12415602294fSDan Kimmel * and later referenced in a WRITE_BYREF record. 12425602294fSDan Kimmel * 12435602294fSDan Kimmel * Note that when we are called from dbuf_free_range() we do not put a hold on 12445602294fSDan Kimmel * the buffer, we just traverse the active dbuf list for the dnode. 12455602294fSDan Kimmel */ 12465602294fSDan Kimmel static void 12475602294fSDan Kimmel dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 12485602294fSDan Kimmel { 12495602294fSDan Kimmel dbuf_dirty_record_t *dr = db->db_last_dirty; 12505602294fSDan Kimmel 12515602294fSDan Kimmel ASSERT(MUTEX_HELD(&db->db_mtx)); 12525602294fSDan Kimmel ASSERT(db->db.db_data != NULL); 12535602294fSDan Kimmel ASSERT(db->db_level == 0); 12545602294fSDan Kimmel ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 12555602294fSDan Kimmel 12565602294fSDan Kimmel if (dr == NULL || 12575602294fSDan Kimmel (dr->dt.dl.dr_data != 12585602294fSDan Kimmel ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 12595602294fSDan Kimmel return; 12605602294fSDan Kimmel 12615602294fSDan Kimmel /* 12625602294fSDan Kimmel * If the last dirty record for this dbuf has not yet synced 12635602294fSDan Kimmel * and its referencing the dbuf data, either: 12645602294fSDan Kimmel * reset the reference to point to a new copy, 12655602294fSDan Kimmel * or (if there a no active holders) 12665602294fSDan Kimmel * just null out the current db_data pointer. 12675602294fSDan Kimmel */ 1268eb633035STom Caputi ASSERT3U(dr->dr_txg, >=, txg - 2); 12695602294fSDan Kimmel if (db->db_blkid == DMU_BONUS_BLKID) { 12705602294fSDan Kimmel /* Note that the data bufs here are zio_bufs */ 127154811da5SToomas Soome dnode_t *dn = DB_DNODE(db); 127254811da5SToomas Soome int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 127354811da5SToomas Soome dr->dt.dl.dr_data = zio_buf_alloc(bonuslen); 127454811da5SToomas Soome arc_space_consume(bonuslen, ARC_SPACE_BONUS); 127554811da5SToomas Soome bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen); 1276e914ace2STim Schumacher } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { 1277eb633035STom Caputi dnode_t *dn = DB_DNODE(db); 12785602294fSDan Kimmel int size = arc_buf_size(db->db_buf); 12795602294fSDan Kimmel arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 12805602294fSDan Kimmel spa_t *spa = db->db_objset->os_spa; 12815602294fSDan Kimmel enum zio_compress compress_type = 12825602294fSDan Kimmel arc_get_compression(db->db_buf); 12835602294fSDan Kimmel 1284eb633035STom Caputi if (arc_is_encrypted(db->db_buf)) { 1285eb633035STom Caputi boolean_t byteorder; 1286eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 1287eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 1288eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 1289eb633035STom Caputi 1290eb633035STom Caputi arc_get_raw_params(db->db_buf, &byteorder, salt, 1291eb633035STom Caputi iv, mac); 1292eb633035STom Caputi dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db, 1293eb633035STom Caputi dmu_objset_id(dn->dn_objset), byteorder, salt, iv, 1294eb633035STom Caputi mac, dn->dn_type, size, arc_buf_lsize(db->db_buf), 1295eb633035STom Caputi compress_type); 1296eb633035STom Caputi } else if (compress_type != ZIO_COMPRESS_OFF) { 12975602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 12985602294fSDan Kimmel dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 12995602294fSDan Kimmel size, arc_buf_lsize(db->db_buf), compress_type); 1300eb633035STom Caputi } else { 1301eb633035STom Caputi dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 13025602294fSDan Kimmel } 13035602294fSDan Kimmel bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 13045602294fSDan Kimmel } else { 13055602294fSDan Kimmel db->db_buf = NULL; 13065602294fSDan Kimmel dbuf_clear_data(db); 13075602294fSDan Kimmel } 13085602294fSDan Kimmel } 13095602294fSDan Kimmel 1310ea8dc4b6Seschrock int 1311ea8dc4b6Seschrock dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1312fa9e4066Sahrens { 1313ea8dc4b6Seschrock int err = 0; 131443466aaeSMax Grossman boolean_t prefetch; 1315744947dcSTom Erickson dnode_t *dn; 1316fa9e4066Sahrens 1317fa9e4066Sahrens /* 1318fa9e4066Sahrens * We don't have to hold the mutex to check db_state because it 1319fa9e4066Sahrens * can't be freed while we have a hold on the buffer. 1320fa9e4066Sahrens */ 1321e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1322fa9e4066Sahrens 132382c9918fSTim Haley if (db->db_state == DB_NOFILL) 1324be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 132582c9918fSTim Haley 1326744947dcSTom Erickson DB_DNODE_ENTER(db); 1327744947dcSTom Erickson dn = DB_DNODE(db); 1328ea8dc4b6Seschrock 13290a586ceaSMark Shellenbaum prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1330744947dcSTom Erickson (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 13313baa08fcSek DBUF_IS_CACHEABLE(db); 133213506d1eSmaybee 1333ea8dc4b6Seschrock mutex_enter(&db->db_mtx); 1334ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 1335eb633035STom Caputi spa_t *spa = dn->dn_objset->os_spa; 1336eb633035STom Caputi 13375602294fSDan Kimmel /* 1338eb633035STom Caputi * Ensure that this block's dnode has been decrypted if 1339eb633035STom Caputi * the caller has requested decrypted data. 13405602294fSDan Kimmel */ 1341eb633035STom Caputi err = dbuf_read_verify_dnode_crypt(db, flags); 1342eb633035STom Caputi 1343eb633035STom Caputi /* 1344eb633035STom Caputi * If the arc buf is compressed or encrypted and the caller 1345eb633035STom Caputi * requested uncompressed data, we need to untransform it 1346eb633035STom Caputi * before returning. We also call arc_untransform() on any 1347eb633035STom Caputi * unauthenticated blocks, which will verify their MAC if 1348eb633035STom Caputi * the key is now available. 1349eb633035STom Caputi */ 1350eb633035STom Caputi if (err == 0 && db->db_buf != NULL && 1351eb633035STom Caputi (flags & DB_RF_NO_DECRYPT) == 0 && 1352eb633035STom Caputi (arc_is_encrypted(db->db_buf) || 1353eb633035STom Caputi arc_is_unauthenticated(db->db_buf) || 1354eb633035STom Caputi arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { 1355eb633035STom Caputi zbookmark_phys_t zb; 1356eb633035STom Caputi 1357eb633035STom Caputi SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1358eb633035STom Caputi db->db.db_object, db->db_level, db->db_blkid); 1359eb633035STom Caputi dbuf_fix_old_data(db, spa_syncing_txg(spa)); 1360eb633035STom Caputi err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); 13615602294fSDan Kimmel dbuf_set_data(db, db->db_buf); 13625602294fSDan Kimmel } 1363ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 1364*9704bf7fSPaul Dagnelie if (err == 0 && prefetch) { 1365*9704bf7fSPaul Dagnelie dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1366*9704bf7fSPaul Dagnelie flags & DB_RF_HAVESTRUCT); 1367*9704bf7fSPaul Dagnelie } 1368744947dcSTom Erickson DB_DNODE_EXIT(db); 1369ea8dc4b6Seschrock } else if (db->db_state == DB_UNCACHED) { 1370744947dcSTom Erickson spa_t *spa = dn->dn_objset->os_spa; 1371def4fac5SMatthew Ahrens boolean_t need_wait = B_FALSE; 1372744947dcSTom Erickson 1373*9704bf7fSPaul Dagnelie db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 1374*9704bf7fSPaul Dagnelie 1375def4fac5SMatthew Ahrens if (zio == NULL && 1376def4fac5SMatthew Ahrens db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1377744947dcSTom Erickson zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1378def4fac5SMatthew Ahrens need_wait = B_TRUE; 1379def4fac5SMatthew Ahrens } 1380*9704bf7fSPaul Dagnelie err = dbuf_read_impl(db, zio, flags, dblt, FTAG); 1381*9704bf7fSPaul Dagnelie /* 1382*9704bf7fSPaul Dagnelie * dbuf_read_impl has dropped db_mtx and our parent's rwlock 1383*9704bf7fSPaul Dagnelie * for us 1384*9704bf7fSPaul Dagnelie */ 1385*9704bf7fSPaul Dagnelie if (!err && prefetch) { 1386*9704bf7fSPaul Dagnelie dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1387*9704bf7fSPaul Dagnelie flags & DB_RF_HAVESTRUCT); 1388*9704bf7fSPaul Dagnelie } 1389ea8dc4b6Seschrock 1390744947dcSTom Erickson DB_DNODE_EXIT(db); 1391fa9e4066Sahrens 1392eb633035STom Caputi if (!err && need_wait) 1393ea8dc4b6Seschrock err = zio_wait(zio); 1394ea8dc4b6Seschrock } else { 13953e30c24aSWill Andrews /* 13963e30c24aSWill Andrews * Another reader came in while the dbuf was in flight 13973e30c24aSWill Andrews * between UNCACHED and CACHED. Either a writer will finish 13983e30c24aSWill Andrews * writing the buffer (sending the dbuf to CACHED) or the 13993e30c24aSWill Andrews * first reader's request will reach the read_done callback 14003e30c24aSWill Andrews * and send the dbuf to CACHED. Otherwise, a failure 14013e30c24aSWill Andrews * occurred and the dbuf went to UNCACHED. 14023e30c24aSWill Andrews */ 140313506d1eSmaybee mutex_exit(&db->db_mtx); 1404*9704bf7fSPaul Dagnelie if (prefetch) { 1405*9704bf7fSPaul Dagnelie dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1406*9704bf7fSPaul Dagnelie flags & DB_RF_HAVESTRUCT); 1407*9704bf7fSPaul Dagnelie } 1408744947dcSTom Erickson DB_DNODE_EXIT(db); 140913506d1eSmaybee 14103e30c24aSWill Andrews /* Skip the wait per the caller's request. */ 141113506d1eSmaybee mutex_enter(&db->db_mtx); 1412ea8dc4b6Seschrock if ((flags & DB_RF_NEVERWAIT) == 0) { 1413ea8dc4b6Seschrock while (db->db_state == DB_READ || 1414ea8dc4b6Seschrock db->db_state == DB_FILL) { 1415ea8dc4b6Seschrock ASSERT(db->db_state == DB_READ || 1416ea8dc4b6Seschrock (flags & DB_RF_HAVESTRUCT) == 0); 1417f6164ad6SAdam H. Leventhal DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1418f6164ad6SAdam H. Leventhal db, zio_t *, zio); 1419ea8dc4b6Seschrock cv_wait(&db->db_changed, &db->db_mtx); 1420ea8dc4b6Seschrock } 1421ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 1422be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 1423ea8dc4b6Seschrock } 1424ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 1425fa9e4066Sahrens } 1426fa9e4066Sahrens 1427ea8dc4b6Seschrock return (err); 1428fa9e4066Sahrens } 1429fa9e4066Sahrens 1430fa9e4066Sahrens static void 1431fa9e4066Sahrens dbuf_noread(dmu_buf_impl_t *db) 1432fa9e4066Sahrens { 1433e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 14340a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1435fa9e4066Sahrens mutex_enter(&db->db_mtx); 1436fa9e4066Sahrens while (db->db_state == DB_READ || db->db_state == DB_FILL) 1437fa9e4066Sahrens cv_wait(&db->db_changed, &db->db_mtx); 1438fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 1439ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 144043466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 1441ad23a2dbSjohansen 1442ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 1443fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 14445602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1445fa9e4066Sahrens db->db_state = DB_FILL; 144682c9918fSTim Haley } else if (db->db_state == DB_NOFILL) { 1447bc9014e6SJustin Gibbs dbuf_clear_data(db); 1448fa9e4066Sahrens } else { 1449fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_CACHED); 1450fa9e4066Sahrens } 1451fa9e4066Sahrens mutex_exit(&db->db_mtx); 1452fa9e4066Sahrens } 1453fa9e4066Sahrens 1454c717a561Smaybee void 1455c717a561Smaybee dbuf_unoverride(dbuf_dirty_record_t *dr) 1456ea8dc4b6Seschrock { 1457c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1458b24ab676SJeff Bonwick blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1459c717a561Smaybee uint64_t txg = dr->dr_txg; 1460ea8dc4b6Seschrock 1461ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&db->db_mtx)); 146240713f2bSAlan Somers /* 146340713f2bSAlan Somers * This assert is valid because dmu_sync() expects to be called by 146440713f2bSAlan Somers * a zilog's get_data while holding a range lock. This call only 146540713f2bSAlan Somers * comes from dbuf_dirty() callers who must also hold a range lock. 146640713f2bSAlan Somers */ 1467c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1468c717a561Smaybee ASSERT(db->db_level == 0); 1469ea8dc4b6Seschrock 14700a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 1471c717a561Smaybee dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1472c717a561Smaybee return; 1473ea8dc4b6Seschrock 1474b24ab676SJeff Bonwick ASSERT(db->db_data_pending != dr); 1475b24ab676SJeff Bonwick 1476c717a561Smaybee /* free this block */ 147743466aaeSMax Grossman if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 147843466aaeSMax Grossman zio_free(db->db_objset->os_spa, txg, bp); 1479b24ab676SJeff Bonwick 1480c717a561Smaybee dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 148180901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = B_FALSE; 1482eb633035STom Caputi dr->dt.dl.dr_has_raw_params = B_FALSE; 148380901aeaSGeorge Wilson 1484c717a561Smaybee /* 1485c717a561Smaybee * Release the already-written buffer, so we leave it in 1486c717a561Smaybee * a consistent dirty state. Note that all callers are 1487c717a561Smaybee * modifying the buffer, so they will immediately do 1488c717a561Smaybee * another (redundant) arc_release(). Therefore, leave 1489c717a561Smaybee * the buf thawed to save the effort of freezing & 1490c717a561Smaybee * immediately re-thawing it. 1491c717a561Smaybee */ 1492c717a561Smaybee arc_release(dr->dt.dl.dr_data, db); 1493fa9e4066Sahrens } 1494fa9e4066Sahrens 1495cdb0ab79Smaybee /* 1496cdb0ab79Smaybee * Evict (if its unreferenced) or clear (if its referenced) any level-0 1497cdb0ab79Smaybee * data blocks in the free range, so that any future readers will find 149843466aaeSMax Grossman * empty blocks. 1499cdb0ab79Smaybee */ 1500fa9e4066Sahrens void 15010f6d88adSAlex Reece dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 15020f6d88adSAlex Reece dmu_tx_t *tx) 1503fa9e4066Sahrens { 1504bc9014e6SJustin Gibbs dmu_buf_impl_t db_search; 1505bc9014e6SJustin Gibbs dmu_buf_impl_t *db, *db_next; 1506fa9e4066Sahrens uint64_t txg = tx->tx_txg; 15070f6d88adSAlex Reece avl_index_t where; 1508fa9e4066Sahrens 1509653af1b8SStephen Blinick if (end_blkid > dn->dn_maxblkid && 1510653af1b8SStephen Blinick !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 15110f6d88adSAlex Reece end_blkid = dn->dn_maxblkid; 15120f6d88adSAlex Reece dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 15130f6d88adSAlex Reece 15140f6d88adSAlex Reece db_search.db_level = 0; 15150f6d88adSAlex Reece db_search.db_blkid = start_blkid; 151686bb58aeSAlex Reece db_search.db_state = DB_SEARCH; 15172f3d8780SMatthew Ahrens 1518713d6c20SMatthew Ahrens mutex_enter(&dn->dn_dbufs_mtx); 15190f6d88adSAlex Reece db = avl_find(&dn->dn_dbufs, &db_search, &where); 15200f6d88adSAlex Reece ASSERT3P(db, ==, NULL); 1521653af1b8SStephen Blinick 15220f6d88adSAlex Reece db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 15230f6d88adSAlex Reece 15240f6d88adSAlex Reece for (; db != NULL; db = db_next) { 15250f6d88adSAlex Reece db_next = AVL_NEXT(&dn->dn_dbufs, db); 15260a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1527cdb0ab79Smaybee 15280f6d88adSAlex Reece if (db->db_level != 0 || db->db_blkid > end_blkid) { 15290f6d88adSAlex Reece break; 15300f6d88adSAlex Reece } 15310f6d88adSAlex Reece ASSERT3U(db->db_blkid, >=, start_blkid); 1532fa9e4066Sahrens 1533fa9e4066Sahrens /* found a level 0 buffer in the range */ 15343b2aab18SMatthew Ahrens mutex_enter(&db->db_mtx); 15353b2aab18SMatthew Ahrens if (dbuf_undirty(db, tx)) { 15363b2aab18SMatthew Ahrens /* mutex has been dropped and dbuf destroyed */ 1537fa9e4066Sahrens continue; 15383b2aab18SMatthew Ahrens } 1539fa9e4066Sahrens 1540ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED || 154182c9918fSTim Haley db->db_state == DB_NOFILL || 1542ea8dc4b6Seschrock db->db_state == DB_EVICTING) { 1543fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 1544fa9e4066Sahrens mutex_exit(&db->db_mtx); 1545fa9e4066Sahrens continue; 1546fa9e4066Sahrens } 1547c543ec06Sahrens if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1548c543ec06Sahrens /* will be handled in dbuf_read_done or dbuf_rele */ 1549c717a561Smaybee db->db_freed_in_flight = TRUE; 1550fa9e4066Sahrens mutex_exit(&db->db_mtx); 1551fa9e4066Sahrens continue; 1552fa9e4066Sahrens } 1553e914ace2STim Schumacher if (zfs_refcount_count(&db->db_holds) == 0) { 1554ea8dc4b6Seschrock ASSERT(db->db_buf); 1555dcbf3bd6SGeorge Wilson dbuf_destroy(db); 1556ea8dc4b6Seschrock continue; 1557ea8dc4b6Seschrock } 1558c717a561Smaybee /* The dbuf is referenced */ 1559fa9e4066Sahrens 1560c717a561Smaybee if (db->db_last_dirty != NULL) { 1561c717a561Smaybee dbuf_dirty_record_t *dr = db->db_last_dirty; 1562c717a561Smaybee 1563c717a561Smaybee if (dr->dr_txg == txg) { 156444eda4d7Smaybee /* 1565c717a561Smaybee * This buffer is "in-use", re-adjust the file 1566c717a561Smaybee * size to reflect that this buffer may 1567c717a561Smaybee * contain new data when we sync. 156844eda4d7Smaybee */ 156906e0070dSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID && 157006e0070dSMark Shellenbaum db->db_blkid > dn->dn_maxblkid) 1571c717a561Smaybee dn->dn_maxblkid = db->db_blkid; 1572c717a561Smaybee dbuf_unoverride(dr); 1573c717a561Smaybee } else { 1574c717a561Smaybee /* 1575c717a561Smaybee * This dbuf is not dirty in the open context. 1576c717a561Smaybee * Either uncache it (if its not referenced in 1577c717a561Smaybee * the open context) or reset its contents to 1578c717a561Smaybee * empty. 1579c717a561Smaybee */ 1580c717a561Smaybee dbuf_fix_old_data(db, txg); 158144eda4d7Smaybee } 1582ea8dc4b6Seschrock } 1583c717a561Smaybee /* clear the contents if its cached */ 1584ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 1585ea8dc4b6Seschrock ASSERT(db->db.db_data != NULL); 1586fa9e4066Sahrens arc_release(db->db_buf, db); 1587*9704bf7fSPaul Dagnelie rw_enter(&db->db_rwlock, RW_WRITER); 1588fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 1589*9704bf7fSPaul Dagnelie rw_exit(&db->db_rwlock); 15906b4acc8bSahrens arc_buf_freeze(db->db_buf); 1591fa9e4066Sahrens } 1592ea8dc4b6Seschrock 1593fa9e4066Sahrens mutex_exit(&db->db_mtx); 1594fa9e4066Sahrens } 1595fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 1596fa9e4066Sahrens } 1597fa9e4066Sahrens 1598fa9e4066Sahrens void 1599fa9e4066Sahrens dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1600fa9e4066Sahrens { 1601fa9e4066Sahrens arc_buf_t *buf, *obuf; 1602fa9e4066Sahrens int osize = db->db.db_size; 1603ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1604744947dcSTom Erickson dnode_t *dn; 1605fa9e4066Sahrens 16060a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1607ea8dc4b6Seschrock 1608744947dcSTom Erickson DB_DNODE_ENTER(db); 1609744947dcSTom Erickson dn = DB_DNODE(db); 1610744947dcSTom Erickson 1611ea8dc4b6Seschrock /* 1612ea8dc4b6Seschrock * XXX we should be doing a dbuf_read, checking the return 1613ea8dc4b6Seschrock * value and returning that up to our callers 1614ea8dc4b6Seschrock */ 161543466aaeSMax Grossman dmu_buf_will_dirty(&db->db, tx); 1616fa9e4066Sahrens 1617fa9e4066Sahrens /* create the data buffer for the new block */ 16185602294fSDan Kimmel buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1619fa9e4066Sahrens 1620fa9e4066Sahrens /* copy old block data to the new block */ 1621fa9e4066Sahrens obuf = db->db_buf; 1622f65e61c0Sahrens bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1623fa9e4066Sahrens /* zero the remainder */ 1624f65e61c0Sahrens if (size > osize) 1625f65e61c0Sahrens bzero((uint8_t *)buf->b_data + osize, size - osize); 1626fa9e4066Sahrens 1627fa9e4066Sahrens mutex_enter(&db->db_mtx); 1628fa9e4066Sahrens dbuf_set_data(db, buf); 1629dcbf3bd6SGeorge Wilson arc_buf_destroy(obuf, db); 1630fa9e4066Sahrens db->db.db_size = size; 1631fa9e4066Sahrens 1632c717a561Smaybee if (db->db_level == 0) { 1633c717a561Smaybee ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1634c717a561Smaybee db->db_last_dirty->dt.dl.dr_data = buf; 1635c717a561Smaybee } 1636fa9e4066Sahrens mutex_exit(&db->db_mtx); 1637fa9e4066Sahrens 163861e255ceSMatthew Ahrens dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1639744947dcSTom Erickson DB_DNODE_EXIT(db); 1640fa9e4066Sahrens } 1641fa9e4066Sahrens 16423f9d6ad7SLin Ling void 16433f9d6ad7SLin Ling dbuf_release_bp(dmu_buf_impl_t *db) 16443f9d6ad7SLin Ling { 164543466aaeSMax Grossman objset_t *os = db->db_objset; 16463f9d6ad7SLin Ling 16473f9d6ad7SLin Ling ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 16483f9d6ad7SLin Ling ASSERT(arc_released(os->os_phys_buf) || 16493f9d6ad7SLin Ling list_link_active(&os->os_dsl_dataset->ds_synced_link)); 16503f9d6ad7SLin Ling ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 16513f9d6ad7SLin Ling 16521b912ec7SGeorge Wilson (void) arc_release(db->db_buf, db); 16533f9d6ad7SLin Ling } 16543f9d6ad7SLin Ling 16550f2e7d03SMatthew Ahrens /* 16560f2e7d03SMatthew Ahrens * We already have a dirty record for this TXG, and we are being 16570f2e7d03SMatthew Ahrens * dirtied again. 16580f2e7d03SMatthew Ahrens */ 16590f2e7d03SMatthew Ahrens static void 16600f2e7d03SMatthew Ahrens dbuf_redirty(dbuf_dirty_record_t *dr) 16610f2e7d03SMatthew Ahrens { 16620f2e7d03SMatthew Ahrens dmu_buf_impl_t *db = dr->dr_dbuf; 16630f2e7d03SMatthew Ahrens 16640f2e7d03SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 16650f2e7d03SMatthew Ahrens 16660f2e7d03SMatthew Ahrens if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 16670f2e7d03SMatthew Ahrens /* 16680f2e7d03SMatthew Ahrens * If this buffer has already been written out, 16690f2e7d03SMatthew Ahrens * we now need to reset its state. 16700f2e7d03SMatthew Ahrens */ 16710f2e7d03SMatthew Ahrens dbuf_unoverride(dr); 16720f2e7d03SMatthew Ahrens if (db->db.db_object != DMU_META_DNODE_OBJECT && 16730f2e7d03SMatthew Ahrens db->db_state != DB_NOFILL) { 16740f2e7d03SMatthew Ahrens /* Already released on initial dirty, so just thaw. */ 16750f2e7d03SMatthew Ahrens ASSERT(arc_released(db->db_buf)); 16760f2e7d03SMatthew Ahrens arc_buf_thaw(db->db_buf); 16770f2e7d03SMatthew Ahrens } 16780f2e7d03SMatthew Ahrens } 16790f2e7d03SMatthew Ahrens } 16800f2e7d03SMatthew Ahrens 1681c717a561Smaybee dbuf_dirty_record_t * 1682fa9e4066Sahrens dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1683fa9e4066Sahrens { 1684744947dcSTom Erickson dnode_t *dn; 1685744947dcSTom Erickson objset_t *os; 1686c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 1687fa9e4066Sahrens int txgoff = tx->tx_txg & TXG_MASK; 1688*9704bf7fSPaul Dagnelie boolean_t drop_struct_rwlock = B_FALSE; 1689fa9e4066Sahrens 1690fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1691e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 16929c9dc39aSek DMU_TX_DIRTY_BUF(tx, db); 1693fa9e4066Sahrens 1694744947dcSTom Erickson DB_DNODE_ENTER(db); 1695744947dcSTom Erickson dn = DB_DNODE(db); 1696fa9e4066Sahrens /* 1697fa9e4066Sahrens * Shouldn't dirty a regular buffer in syncing context. Private 1698fa9e4066Sahrens * objects may be dirtied in syncing context, but only if they 1699fa9e4066Sahrens * were already pre-dirtied in open context. 1700fa9e4066Sahrens */ 1701c166b69dSPaul Dagnelie #ifdef DEBUG 1702c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1703c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1704c166b69dSPaul Dagnelie RW_READER, FTAG); 1705c166b69dSPaul Dagnelie } 1706c717a561Smaybee ASSERT(!dmu_tx_is_syncing(tx) || 1707c717a561Smaybee BP_IS_HOLE(dn->dn_objset->os_rootbp) || 170814843421SMatthew Ahrens DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 170914843421SMatthew Ahrens dn->dn_objset->os_dsl_dataset == NULL); 1710c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1711c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1712c166b69dSPaul Dagnelie #endif 1713fa9e4066Sahrens /* 1714fa9e4066Sahrens * We make this assert for private objects as well, but after we 1715fa9e4066Sahrens * check if we're already dirty. They are allowed to re-dirty 1716fa9e4066Sahrens * in syncing context. 1717fa9e4066Sahrens */ 1718ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1719c717a561Smaybee dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1720fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1721fa9e4066Sahrens 1722fa9e4066Sahrens mutex_enter(&db->db_mtx); 1723fa9e4066Sahrens /* 1724c717a561Smaybee * XXX make this true for indirects too? The problem is that 1725c717a561Smaybee * transactions created with dmu_tx_create_assigned() from 1726c717a561Smaybee * syncing context don't bother holding ahead. 1727fa9e4066Sahrens */ 1728c717a561Smaybee ASSERT(db->db_level != 0 || 172982c9918fSTim Haley db->db_state == DB_CACHED || db->db_state == DB_FILL || 173082c9918fSTim Haley db->db_state == DB_NOFILL); 1731fa9e4066Sahrens 1732fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1733fa9e4066Sahrens /* 1734fa9e4066Sahrens * Don't set dirtyctx to SYNC if we're just modifying this as we 1735fa9e4066Sahrens * initialize the objset. 1736fa9e4066Sahrens */ 1737c166b69dSPaul Dagnelie if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1738c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1739c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1740c166b69dSPaul Dagnelie RW_READER, FTAG); 1741c166b69dSPaul Dagnelie } 1742c166b69dSPaul Dagnelie if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1743c166b69dSPaul Dagnelie dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1744c166b69dSPaul Dagnelie DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1745c166b69dSPaul Dagnelie ASSERT(dn->dn_dirtyctx_firstset == NULL); 1746c166b69dSPaul Dagnelie dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1747c166b69dSPaul Dagnelie } 1748c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1749c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1750c166b69dSPaul Dagnelie FTAG); 1751c166b69dSPaul Dagnelie } 1752fa9e4066Sahrens } 1753aa02ea01STom Caputi 1754aa02ea01STom Caputi if (tx->tx_txg > dn->dn_dirty_txg) 1755aa02ea01STom Caputi dn->dn_dirty_txg = tx->tx_txg; 1756fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1757fa9e4066Sahrens 17580a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 17590a586ceaSMark Shellenbaum dn->dn_have_spill = B_TRUE; 17600a586ceaSMark Shellenbaum 1761fa9e4066Sahrens /* 1762fa9e4066Sahrens * If this buffer is already dirty, we're done. 1763fa9e4066Sahrens */ 1764c717a561Smaybee drp = &db->db_last_dirty; 1765c717a561Smaybee ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1766c717a561Smaybee db->db.db_object == DMU_META_DNODE_OBJECT); 17677e2186e3Sbonwick while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 17687e2186e3Sbonwick drp = &dr->dr_next; 17697e2186e3Sbonwick if (dr && dr->dr_txg == tx->tx_txg) { 1770744947dcSTom Erickson DB_DNODE_EXIT(db); 1771744947dcSTom Erickson 17720f2e7d03SMatthew Ahrens dbuf_redirty(dr); 1773fa9e4066Sahrens mutex_exit(&db->db_mtx); 17747e2186e3Sbonwick return (dr); 1775fa9e4066Sahrens } 1776fa9e4066Sahrens 1777fa9e4066Sahrens /* 1778fa9e4066Sahrens * Only valid if not already dirty. 1779fa9e4066Sahrens */ 178014843421SMatthew Ahrens ASSERT(dn->dn_object == 0 || 178114843421SMatthew Ahrens dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1782fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1783fa9e4066Sahrens 1784fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, db->db_level); 1785fa9e4066Sahrens 1786fa9e4066Sahrens /* 1787fa9e4066Sahrens * We should only be dirtying in syncing context if it's the 178814843421SMatthew Ahrens * mos or we're initializing the os or it's a special object. 178914843421SMatthew Ahrens * However, we are allowed to dirty in syncing context provided 179014843421SMatthew Ahrens * we already dirtied it in open context. Hence we must make 179114843421SMatthew Ahrens * this assertion only if we're not already dirty. 1792fa9e4066Sahrens */ 1793744947dcSTom Erickson os = dn->dn_objset; 17943991b535SGeorge Wilson VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1795c166b69dSPaul Dagnelie #ifdef DEBUG 1796c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1797c166b69dSPaul Dagnelie rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 179814843421SMatthew Ahrens ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 179914843421SMatthew Ahrens os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1800c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1801c166b69dSPaul Dagnelie rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1802c166b69dSPaul Dagnelie #endif 1803fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1804fa9e4066Sahrens 1805fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1806fa9e4066Sahrens 18070a586ceaSMark Shellenbaum if (db->db_blkid != DMU_BONUS_BLKID) { 180861e255ceSMatthew Ahrens dmu_objset_willuse_space(os, db->db.db_size, tx); 18091934e92fSmaybee } 18101934e92fSmaybee 1811ea8dc4b6Seschrock /* 1812ea8dc4b6Seschrock * If this buffer is dirty in an old transaction group we need 1813ea8dc4b6Seschrock * to make a copy of it so that the changes we make in this 1814ea8dc4b6Seschrock * transaction group won't leak out when we sync the older txg. 1815ea8dc4b6Seschrock */ 1816c717a561Smaybee dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1817c717a561Smaybee if (db->db_level == 0) { 1818c717a561Smaybee void *data_old = db->db_buf; 1819c717a561Smaybee 182082c9918fSTim Haley if (db->db_state != DB_NOFILL) { 18210a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 182282c9918fSTim Haley dbuf_fix_old_data(db, tx->tx_txg); 182382c9918fSTim Haley data_old = db->db.db_data; 182482c9918fSTim Haley } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 182582c9918fSTim Haley /* 182682c9918fSTim Haley * Release the data buffer from the cache so 182782c9918fSTim Haley * that we can modify it without impacting 182882c9918fSTim Haley * possible other users of this cached data 182982c9918fSTim Haley * block. Note that indirect blocks and 183082c9918fSTim Haley * private objects are not released until the 183182c9918fSTim Haley * syncing state (since they are only modified 183282c9918fSTim Haley * then). 183382c9918fSTim Haley */ 183482c9918fSTim Haley arc_release(db->db_buf, db); 183582c9918fSTim Haley dbuf_fix_old_data(db, tx->tx_txg); 183682c9918fSTim Haley data_old = db->db_buf; 183782c9918fSTim Haley } 183882c9918fSTim Haley ASSERT(data_old != NULL); 1839fa9e4066Sahrens } 1840c717a561Smaybee dr->dt.dl.dr_data = data_old; 1841c717a561Smaybee } else { 1842c717a561Smaybee mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1843c717a561Smaybee list_create(&dr->dt.di.dr_children, 1844c717a561Smaybee sizeof (dbuf_dirty_record_t), 1845c717a561Smaybee offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1846fa9e4066Sahrens } 184769962b56SMatthew Ahrens if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 184869962b56SMatthew Ahrens dr->dr_accounted = db->db.db_size; 1849c717a561Smaybee dr->dr_dbuf = db; 1850c717a561Smaybee dr->dr_txg = tx->tx_txg; 1851c717a561Smaybee dr->dr_next = *drp; 1852c717a561Smaybee *drp = dr; 1853fa9e4066Sahrens 1854fa9e4066Sahrens /* 1855fa9e4066Sahrens * We could have been freed_in_flight between the dbuf_noread 1856fa9e4066Sahrens * and dbuf_dirty. We win, as though the dbuf_noread() had 1857fa9e4066Sahrens * happened after the free. 1858fa9e4066Sahrens */ 18590a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 18600a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) { 1861c717a561Smaybee mutex_enter(&dn->dn_mtx); 1862bf16b11eSMatthew Ahrens if (dn->dn_free_ranges[txgoff] != NULL) { 1863bf16b11eSMatthew Ahrens range_tree_clear(dn->dn_free_ranges[txgoff], 1864bf16b11eSMatthew Ahrens db->db_blkid, 1); 1865bf16b11eSMatthew Ahrens } 1866c717a561Smaybee mutex_exit(&dn->dn_mtx); 1867c717a561Smaybee db->db_freed_in_flight = FALSE; 1868fa9e4066Sahrens } 1869fa9e4066Sahrens 1870fa9e4066Sahrens /* 1871fa9e4066Sahrens * This buffer is now part of this txg 1872fa9e4066Sahrens */ 1873fa9e4066Sahrens dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1874fa9e4066Sahrens db->db_dirtycnt += 1; 1875fa9e4066Sahrens ASSERT3U(db->db_dirtycnt, <=, 3); 1876fa9e4066Sahrens 1877fa9e4066Sahrens mutex_exit(&db->db_mtx); 1878fa9e4066Sahrens 18790a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 18800a586ceaSMark Shellenbaum db->db_blkid == DMU_SPILL_BLKID) { 1881c717a561Smaybee mutex_enter(&dn->dn_mtx); 1882c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1883c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1884c717a561Smaybee mutex_exit(&dn->dn_mtx); 1885fa9e4066Sahrens dnode_setdirty(dn, tx); 1886744947dcSTom Erickson DB_DNODE_EXIT(db); 1887c717a561Smaybee return (dr); 188892654925SMatthew Ahrens } 188992654925SMatthew Ahrens 189092654925SMatthew Ahrens if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 189192654925SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 1892*9704bf7fSPaul Dagnelie drop_struct_rwlock = B_TRUE; 1893*9704bf7fSPaul Dagnelie } 1894*9704bf7fSPaul Dagnelie 1895*9704bf7fSPaul Dagnelie /* 1896*9704bf7fSPaul Dagnelie * If we are overwriting a dedup BP, then unless it is snapshotted, 1897*9704bf7fSPaul Dagnelie * when we get to syncing context we will need to decrement its 1898*9704bf7fSPaul Dagnelie * refcount in the DDT. Prefetch the relevant DDT block so that 1899*9704bf7fSPaul Dagnelie * syncing context won't have to wait for the i/o. 1900*9704bf7fSPaul Dagnelie */ 1901*9704bf7fSPaul Dagnelie if (db->db_blkptr != NULL) { 1902*9704bf7fSPaul Dagnelie db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 1903*9704bf7fSPaul Dagnelie ddt_prefetch(os->os_spa, db->db_blkptr); 1904*9704bf7fSPaul Dagnelie dmu_buf_unlock_parent(db, dblt, FTAG); 190592654925SMatthew Ahrens } 190692654925SMatthew Ahrens 1907dcb6872cSMatthew Ahrens /* 1908dcb6872cSMatthew Ahrens * We need to hold the dn_struct_rwlock to make this assertion, 1909dcb6872cSMatthew Ahrens * because it protects dn_phys / dn_next_nlevels from changing. 1910dcb6872cSMatthew Ahrens */ 1911dcb6872cSMatthew Ahrens ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1912dcb6872cSMatthew Ahrens dn->dn_phys->dn_nlevels > db->db_level || 1913dcb6872cSMatthew Ahrens dn->dn_next_nlevels[txgoff] > db->db_level || 1914dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1915dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1916dcb6872cSMatthew Ahrens 1917fa9e4066Sahrens 19188346f03fSJonathan W Adams if (db->db_level == 0) { 1919eb633035STom Caputi ASSERT(!db->db_objset->os_raw_receive || 1920eb633035STom Caputi dn->dn_maxblkid >= db->db_blkid); 1921eb633035STom Caputi dnode_new_blkid(dn, db->db_blkid, tx, 1922*9704bf7fSPaul Dagnelie drop_struct_rwlock, B_FALSE); 19238346f03fSJonathan W Adams ASSERT(dn->dn_maxblkid >= db->db_blkid); 19248346f03fSJonathan W Adams } 19258346f03fSJonathan W Adams 192644eda4d7Smaybee if (db->db_level+1 < dn->dn_nlevels) { 1927c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 1928c717a561Smaybee dbuf_dirty_record_t *di; 1929c717a561Smaybee int parent_held = FALSE; 1930c717a561Smaybee 1931c717a561Smaybee if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1932c717a561Smaybee int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1933*9704bf7fSPaul Dagnelie parent = dbuf_hold_level(dn, db->db_level + 1, 1934c717a561Smaybee db->db_blkid >> epbs, FTAG); 193501025c89SJohn Harres ASSERT(parent != NULL); 1936c717a561Smaybee parent_held = TRUE; 1937c717a561Smaybee } 1938*9704bf7fSPaul Dagnelie if (drop_struct_rwlock) 1939fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1940*9704bf7fSPaul Dagnelie ASSERT3U(db->db_level + 1, ==, parent->db_level); 1941c717a561Smaybee di = dbuf_dirty(parent, tx); 1942c717a561Smaybee if (parent_held) 1943c717a561Smaybee dbuf_rele(parent, FTAG); 1944c717a561Smaybee 1945c717a561Smaybee mutex_enter(&db->db_mtx); 194669962b56SMatthew Ahrens /* 194769962b56SMatthew Ahrens * Since we've dropped the mutex, it's possible that 194869962b56SMatthew Ahrens * dbuf_undirty() might have changed this out from under us. 194969962b56SMatthew Ahrens */ 1950c717a561Smaybee if (db->db_last_dirty == dr || 1951c717a561Smaybee dn->dn_object == DMU_META_DNODE_OBJECT) { 1952c717a561Smaybee mutex_enter(&di->dt.di.dr_mtx); 1953c717a561Smaybee ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1954c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1955c717a561Smaybee list_insert_tail(&di->dt.di.dr_children, dr); 1956c717a561Smaybee mutex_exit(&di->dt.di.dr_mtx); 1957c717a561Smaybee dr->dr_parent = di; 1958c717a561Smaybee } 1959c717a561Smaybee mutex_exit(&db->db_mtx); 1960fa9e4066Sahrens } else { 1961*9704bf7fSPaul Dagnelie ASSERT(db->db_level + 1 == dn->dn_nlevels); 1962c717a561Smaybee ASSERT(db->db_blkid < dn->dn_nblkptr); 1963744947dcSTom Erickson ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1964c717a561Smaybee mutex_enter(&dn->dn_mtx); 1965c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1966c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1967c717a561Smaybee mutex_exit(&dn->dn_mtx); 1968*9704bf7fSPaul Dagnelie if (drop_struct_rwlock) 1969fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1970fa9e4066Sahrens } 1971fa9e4066Sahrens 1972fa9e4066Sahrens dnode_setdirty(dn, tx); 1973744947dcSTom Erickson DB_DNODE_EXIT(db); 1974c717a561Smaybee return (dr); 1975fa9e4066Sahrens } 1976fa9e4066Sahrens 19773b2aab18SMatthew Ahrens /* 19783e30c24aSWill Andrews * Undirty a buffer in the transaction group referenced by the given 19793e30c24aSWill Andrews * transaction. Return whether this evicted the dbuf. 19803b2aab18SMatthew Ahrens */ 19813b2aab18SMatthew Ahrens static boolean_t 1982fa9e4066Sahrens dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1983fa9e4066Sahrens { 1984744947dcSTom Erickson dnode_t *dn; 1985c717a561Smaybee uint64_t txg = tx->tx_txg; 198617f17c2dSbonwick dbuf_dirty_record_t *dr, **drp; 1987fa9e4066Sahrens 1988c717a561Smaybee ASSERT(txg != 0); 198946e1baa6SMatthew Ahrens 199046e1baa6SMatthew Ahrens /* 199146e1baa6SMatthew Ahrens * Due to our use of dn_nlevels below, this can only be called 199246e1baa6SMatthew Ahrens * in open context, unless we are operating on the MOS. 199346e1baa6SMatthew Ahrens * From syncing context, dn_nlevels may be different from the 199446e1baa6SMatthew Ahrens * dn_nlevels used when dbuf was dirtied. 199546e1baa6SMatthew Ahrens */ 199646e1baa6SMatthew Ahrens ASSERT(db->db_objset == 199746e1baa6SMatthew Ahrens dmu_objset_pool(db->db_objset)->dp_meta_objset || 199846e1baa6SMatthew Ahrens txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 19990a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 20003b2aab18SMatthew Ahrens ASSERT0(db->db_level); 20013b2aab18SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 2002fa9e4066Sahrens 2003fa9e4066Sahrens /* 2004fa9e4066Sahrens * If this buffer is not dirty, we're done. 2005fa9e4066Sahrens */ 200617f17c2dSbonwick for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 2007c717a561Smaybee if (dr->dr_txg <= txg) 2008c717a561Smaybee break; 20093b2aab18SMatthew Ahrens if (dr == NULL || dr->dr_txg < txg) 20103b2aab18SMatthew Ahrens return (B_FALSE); 2011c717a561Smaybee ASSERT(dr->dr_txg == txg); 2012b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 2013fa9e4066Sahrens 2014744947dcSTom Erickson DB_DNODE_ENTER(db); 2015744947dcSTom Erickson dn = DB_DNODE(db); 2016744947dcSTom Erickson 2017fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2018fa9e4066Sahrens 2019fa9e4066Sahrens ASSERT(db->db.db_size != 0); 2020fa9e4066Sahrens 202146e1baa6SMatthew Ahrens dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 202246e1baa6SMatthew Ahrens dr->dr_accounted, txg); 2023fa9e4066Sahrens 202417f17c2dSbonwick *drp = dr->dr_next; 2025c717a561Smaybee 20263f2366c2SGordon Ross /* 20273f2366c2SGordon Ross * Note that there are three places in dbuf_dirty() 20283f2366c2SGordon Ross * where this dirty record may be put on a list. 20293f2366c2SGordon Ross * Make sure to do a list_remove corresponding to 20303f2366c2SGordon Ross * every one of those list_insert calls. 20313f2366c2SGordon Ross */ 2032c717a561Smaybee if (dr->dr_parent) { 2033c717a561Smaybee mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 2034c717a561Smaybee list_remove(&dr->dr_parent->dt.di.dr_children, dr); 2035c717a561Smaybee mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 20363f2366c2SGordon Ross } else if (db->db_blkid == DMU_SPILL_BLKID || 203746e1baa6SMatthew Ahrens db->db_level + 1 == dn->dn_nlevels) { 2038cdb0ab79Smaybee ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 2039c717a561Smaybee mutex_enter(&dn->dn_mtx); 2040c717a561Smaybee list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 2041c717a561Smaybee mutex_exit(&dn->dn_mtx); 2042c717a561Smaybee } 2043744947dcSTom Erickson DB_DNODE_EXIT(db); 2044c717a561Smaybee 20453b2aab18SMatthew Ahrens if (db->db_state != DB_NOFILL) { 20463b2aab18SMatthew Ahrens dbuf_unoverride(dr); 2047c717a561Smaybee 2048c717a561Smaybee ASSERT(db->db_buf != NULL); 20493b2aab18SMatthew Ahrens ASSERT(dr->dt.dl.dr_data != NULL); 20503b2aab18SMatthew Ahrens if (dr->dt.dl.dr_data != db->db_buf) 2051dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 2052c717a561Smaybee } 2053d2b3cbbdSJorgen Lundman 2054c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2055fa9e4066Sahrens 2056fa9e4066Sahrens ASSERT(db->db_dirtycnt > 0); 2057fa9e4066Sahrens db->db_dirtycnt -= 1; 2058fa9e4066Sahrens 2059e914ace2STim Schumacher if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 2060dcbf3bd6SGeorge Wilson ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 2061dcbf3bd6SGeorge Wilson dbuf_destroy(db); 20623b2aab18SMatthew Ahrens return (B_TRUE); 2063fa9e4066Sahrens } 2064fa9e4066Sahrens 20653b2aab18SMatthew Ahrens return (B_FALSE); 2066fa9e4066Sahrens } 2067fa9e4066Sahrens 2068eb633035STom Caputi static void 2069eb633035STom Caputi dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) 2070fa9e4066Sahrens { 207143466aaeSMax Grossman dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2072fa9e4066Sahrens 2073fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 2074e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2075fa9e4066Sahrens 20760f2e7d03SMatthew Ahrens /* 20770f2e7d03SMatthew Ahrens * Quick check for dirtyness. For already dirty blocks, this 20780f2e7d03SMatthew Ahrens * reduces runtime of this function by >90%, and overall performance 20790f2e7d03SMatthew Ahrens * by 50% for some workloads (e.g. file deletion with indirect blocks 20800f2e7d03SMatthew Ahrens * cached). 20810f2e7d03SMatthew Ahrens */ 20820f2e7d03SMatthew Ahrens mutex_enter(&db->db_mtx); 20830f2e7d03SMatthew Ahrens dbuf_dirty_record_t *dr; 20840f2e7d03SMatthew Ahrens for (dr = db->db_last_dirty; 20850f2e7d03SMatthew Ahrens dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 20860f2e7d03SMatthew Ahrens /* 20870f2e7d03SMatthew Ahrens * It's possible that it is already dirty but not cached, 20880f2e7d03SMatthew Ahrens * because there are some calls to dbuf_dirty() that don't 20890f2e7d03SMatthew Ahrens * go through dmu_buf_will_dirty(). 20900f2e7d03SMatthew Ahrens */ 20910f2e7d03SMatthew Ahrens if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 20920f2e7d03SMatthew Ahrens /* This dbuf is already dirty and cached. */ 20930f2e7d03SMatthew Ahrens dbuf_redirty(dr); 20940f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 20950f2e7d03SMatthew Ahrens return; 20960f2e7d03SMatthew Ahrens } 20970f2e7d03SMatthew Ahrens } 20980f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 20990f2e7d03SMatthew Ahrens 2100744947dcSTom Erickson DB_DNODE_ENTER(db); 2101744947dcSTom Erickson if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 2102eb633035STom Caputi flags |= DB_RF_HAVESTRUCT; 2103744947dcSTom Erickson DB_DNODE_EXIT(db); 2104eb633035STom Caputi (void) dbuf_read(db, NULL, flags); 2105c717a561Smaybee (void) dbuf_dirty(db, tx); 2106fa9e4066Sahrens } 2107fa9e4066Sahrens 2108eb633035STom Caputi void 2109eb633035STom Caputi dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2110eb633035STom Caputi { 2111eb633035STom Caputi dmu_buf_will_dirty_impl(db_fake, 2112eb633035STom Caputi DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx); 2113eb633035STom Caputi } 2114eb633035STom Caputi 211582c9918fSTim Haley void 211682c9918fSTim Haley dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 211782c9918fSTim Haley { 211882c9918fSTim Haley dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 211982c9918fSTim Haley 212082c9918fSTim Haley db->db_state = DB_NOFILL; 212182c9918fSTim Haley 212282c9918fSTim Haley dmu_buf_will_fill(db_fake, tx); 212382c9918fSTim Haley } 212482c9918fSTim Haley 2125fa9e4066Sahrens void 2126ea8dc4b6Seschrock dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2127fa9e4066Sahrens { 2128ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2129ea8dc4b6Seschrock 21300a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2131fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 2132fa9e4066Sahrens ASSERT(db->db_level == 0); 2133e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2134fa9e4066Sahrens 2135ea8dc4b6Seschrock ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 2136fa9e4066Sahrens dmu_tx_private_ok(tx)); 2137fa9e4066Sahrens 2138fa9e4066Sahrens dbuf_noread(db); 2139c717a561Smaybee (void) dbuf_dirty(db, tx); 2140fa9e4066Sahrens } 2141fa9e4066Sahrens 2142eb633035STom Caputi /* 2143eb633035STom Caputi * This function is effectively the same as dmu_buf_will_dirty(), but 2144eb633035STom Caputi * indicates the caller expects raw encrypted data in the db, and provides 2145eb633035STom Caputi * the crypt params (byteorder, salt, iv, mac) which should be stored in the 2146eb633035STom Caputi * blkptr_t when this dbuf is written. This is only used for blocks of 2147eb633035STom Caputi * dnodes during a raw receive. 2148eb633035STom Caputi */ 2149eb633035STom Caputi void 2150eb633035STom Caputi dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, 2151eb633035STom Caputi const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx) 2152eb633035STom Caputi { 2153eb633035STom Caputi dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2154eb633035STom Caputi dbuf_dirty_record_t *dr; 2155eb633035STom Caputi 2156eb633035STom Caputi /* 2157eb633035STom Caputi * dr_has_raw_params is only processed for blocks of dnodes 2158eb633035STom Caputi * (see dbuf_sync_dnode_leaf_crypt()). 2159eb633035STom Caputi */ 2160eb633035STom Caputi ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 2161eb633035STom Caputi ASSERT3U(db->db_level, ==, 0); 2162eb633035STom Caputi 2163eb633035STom Caputi dmu_buf_will_dirty_impl(db_fake, 2164eb633035STom Caputi DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx); 2165eb633035STom Caputi 2166eb633035STom Caputi dr = db->db_last_dirty; 2167eb633035STom Caputi while (dr != NULL && dr->dr_txg > tx->tx_txg) 2168eb633035STom Caputi dr = dr->dr_next; 2169eb633035STom Caputi 2170eb633035STom Caputi ASSERT3P(dr, !=, NULL); 2171eb633035STom Caputi ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2172eb633035STom Caputi 2173eb633035STom Caputi dr->dt.dl.dr_has_raw_params = B_TRUE; 2174eb633035STom Caputi dr->dt.dl.dr_byteorder = byteorder; 2175eb633035STom Caputi bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN); 2176eb633035STom Caputi bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN); 2177eb633035STom Caputi bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN); 2178eb633035STom Caputi } 2179eb633035STom Caputi 2180fa9e4066Sahrens #pragma weak dmu_buf_fill_done = dbuf_fill_done 2181fa9e4066Sahrens /* ARGSUSED */ 2182fa9e4066Sahrens void 2183fa9e4066Sahrens dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 2184fa9e4066Sahrens { 2185fa9e4066Sahrens mutex_enter(&db->db_mtx); 21869c9dc39aSek DBUF_VERIFY(db); 2187fa9e4066Sahrens 2188fa9e4066Sahrens if (db->db_state == DB_FILL) { 2189c717a561Smaybee if (db->db_level == 0 && db->db_freed_in_flight) { 21900a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2191fa9e4066Sahrens /* we were freed while filling */ 2192fa9e4066Sahrens /* XXX dbuf_undirty? */ 2193fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 2194c717a561Smaybee db->db_freed_in_flight = FALSE; 2195fa9e4066Sahrens } 2196fa9e4066Sahrens db->db_state = DB_CACHED; 2197fa9e4066Sahrens cv_broadcast(&db->db_changed); 2198fa9e4066Sahrens } 2199fa9e4066Sahrens mutex_exit(&db->db_mtx); 2200fa9e4066Sahrens } 2201fa9e4066Sahrens 22025d7b4d43SMatthew Ahrens void 22035d7b4d43SMatthew Ahrens dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 22045d7b4d43SMatthew Ahrens bp_embedded_type_t etype, enum zio_compress comp, 22055d7b4d43SMatthew Ahrens int uncompressed_size, int compressed_size, int byteorder, 22065d7b4d43SMatthew Ahrens dmu_tx_t *tx) 22075d7b4d43SMatthew Ahrens { 22085d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 22095d7b4d43SMatthew Ahrens struct dirty_leaf *dl; 22105d7b4d43SMatthew Ahrens dmu_object_type_t type; 22115d7b4d43SMatthew Ahrens 2212ca0cc391SMatthew Ahrens if (etype == BP_EMBEDDED_TYPE_DATA) { 2213ca0cc391SMatthew Ahrens ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 2214ca0cc391SMatthew Ahrens SPA_FEATURE_EMBEDDED_DATA)); 2215ca0cc391SMatthew Ahrens } 2216ca0cc391SMatthew Ahrens 22175d7b4d43SMatthew Ahrens DB_DNODE_ENTER(db); 22185d7b4d43SMatthew Ahrens type = DB_DNODE(db)->dn_type; 22195d7b4d43SMatthew Ahrens DB_DNODE_EXIT(db); 22205d7b4d43SMatthew Ahrens 22215d7b4d43SMatthew Ahrens ASSERT0(db->db_level); 22225d7b4d43SMatthew Ahrens ASSERT(db->db_blkid != DMU_BONUS_BLKID); 22235d7b4d43SMatthew Ahrens 22245d7b4d43SMatthew Ahrens dmu_buf_will_not_fill(dbuf, tx); 22255d7b4d43SMatthew Ahrens 22265d7b4d43SMatthew Ahrens ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 22275d7b4d43SMatthew Ahrens dl = &db->db_last_dirty->dt.dl; 22285d7b4d43SMatthew Ahrens encode_embedded_bp_compressed(&dl->dr_overridden_by, 22295d7b4d43SMatthew Ahrens data, comp, uncompressed_size, compressed_size); 22305d7b4d43SMatthew Ahrens BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 22315d7b4d43SMatthew Ahrens BP_SET_TYPE(&dl->dr_overridden_by, type); 22325d7b4d43SMatthew Ahrens BP_SET_LEVEL(&dl->dr_overridden_by, 0); 22335d7b4d43SMatthew Ahrens BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 22345d7b4d43SMatthew Ahrens 22355d7b4d43SMatthew Ahrens dl->dr_override_state = DR_OVERRIDDEN; 22365d7b4d43SMatthew Ahrens dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 22375d7b4d43SMatthew Ahrens } 22385d7b4d43SMatthew Ahrens 22392fdbea25SAleksandr Guzovskiy /* 22402fdbea25SAleksandr Guzovskiy * Directly assign a provided arc buf to a given dbuf if it's not referenced 22412fdbea25SAleksandr Guzovskiy * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 22422fdbea25SAleksandr Guzovskiy */ 22432fdbea25SAleksandr Guzovskiy void 22442fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 22452fdbea25SAleksandr Guzovskiy { 2246e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 22470a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 22482fdbea25SAleksandr Guzovskiy ASSERT(db->db_level == 0); 22495602294fSDan Kimmel ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 22502fdbea25SAleksandr Guzovskiy ASSERT(buf != NULL); 22516ccda740Sloli ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size); 22522fdbea25SAleksandr Guzovskiy ASSERT(tx->tx_txg != 0); 22532fdbea25SAleksandr Guzovskiy 22542fdbea25SAleksandr Guzovskiy arc_return_buf(buf, db); 22552fdbea25SAleksandr Guzovskiy ASSERT(arc_released(buf)); 22562fdbea25SAleksandr Guzovskiy 22572fdbea25SAleksandr Guzovskiy mutex_enter(&db->db_mtx); 22582fdbea25SAleksandr Guzovskiy 22592fdbea25SAleksandr Guzovskiy while (db->db_state == DB_READ || db->db_state == DB_FILL) 22602fdbea25SAleksandr Guzovskiy cv_wait(&db->db_changed, &db->db_mtx); 22612fdbea25SAleksandr Guzovskiy 22622fdbea25SAleksandr Guzovskiy ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 22632fdbea25SAleksandr Guzovskiy 22642fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED && 2265e914ace2STim Schumacher zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 2266eb633035STom Caputi /* 2267eb633035STom Caputi * In practice, we will never have a case where we have an 2268eb633035STom Caputi * encrypted arc buffer while additional holds exist on the 2269eb633035STom Caputi * dbuf. We don't handle this here so we simply assert that 2270eb633035STom Caputi * fact instead. 2271eb633035STom Caputi */ 2272eb633035STom Caputi ASSERT(!arc_is_encrypted(buf)); 22732fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 22742fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 22752fdbea25SAleksandr Guzovskiy bcopy(buf->b_data, db->db.db_data, db->db.db_size); 2276dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, db); 2277c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_copied(); 22782fdbea25SAleksandr Guzovskiy return; 22792fdbea25SAleksandr Guzovskiy } 22802fdbea25SAleksandr Guzovskiy 2281c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_nocopy(); 22822fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED) { 22832fdbea25SAleksandr Guzovskiy dbuf_dirty_record_t *dr = db->db_last_dirty; 22842fdbea25SAleksandr Guzovskiy 22852fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf != NULL); 22862fdbea25SAleksandr Guzovskiy if (dr != NULL && dr->dr_txg == tx->tx_txg) { 22872fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_data == db->db_buf); 2288eb633035STom Caputi 22892fdbea25SAleksandr Guzovskiy if (!arc_released(db->db_buf)) { 22902fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_override_state == 22912fdbea25SAleksandr Guzovskiy DR_OVERRIDDEN); 22922fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 22932fdbea25SAleksandr Guzovskiy } 22942fdbea25SAleksandr Guzovskiy dr->dt.dl.dr_data = buf; 2295dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 22962fdbea25SAleksandr Guzovskiy } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 22972fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 2298dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 22992fdbea25SAleksandr Guzovskiy } 23002fdbea25SAleksandr Guzovskiy db->db_buf = NULL; 23012fdbea25SAleksandr Guzovskiy } 23022fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf == NULL); 23032fdbea25SAleksandr Guzovskiy dbuf_set_data(db, buf); 23042fdbea25SAleksandr Guzovskiy db->db_state = DB_FILL; 23052fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 23062fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 230743466aaeSMax Grossman dmu_buf_fill_done(&db->db, tx); 23082fdbea25SAleksandr Guzovskiy } 23092fdbea25SAleksandr Guzovskiy 2310ea8dc4b6Seschrock void 2311dcbf3bd6SGeorge Wilson dbuf_destroy(dmu_buf_impl_t *db) 2312fa9e4066Sahrens { 2313744947dcSTom Erickson dnode_t *dn; 2314ea8dc4b6Seschrock dmu_buf_impl_t *parent = db->db_parent; 2315744947dcSTom Erickson dmu_buf_impl_t *dndb; 2316fa9e4066Sahrens 2317fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 2318e914ace2STim Schumacher ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2319fa9e4066Sahrens 2320dcbf3bd6SGeorge Wilson if (db->db_buf != NULL) { 2321dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 2322dcbf3bd6SGeorge Wilson db->db_buf = NULL; 2323dcbf3bd6SGeorge Wilson } 2324ea8dc4b6Seschrock 2325dcbf3bd6SGeorge Wilson if (db->db_blkid == DMU_BONUS_BLKID) { 232654811da5SToomas Soome int slots = DB_DNODE(db)->dn_num_slots; 232754811da5SToomas Soome int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 232854811da5SToomas Soome if (db->db.db_data != NULL) { 232954811da5SToomas Soome zio_buf_free(db->db.db_data, bonuslen); 233054811da5SToomas Soome arc_space_return(bonuslen, ARC_SPACE_BONUS); 233154811da5SToomas Soome db->db_state = DB_UNCACHED; 233254811da5SToomas Soome } 2333fa9e4066Sahrens } 2334fa9e4066Sahrens 2335dcbf3bd6SGeorge Wilson dbuf_clear_data(db); 2336dcbf3bd6SGeorge Wilson 2337dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 2338adb52d92SMatthew Ahrens ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2339adb52d92SMatthew Ahrens db->db_caching_status == DB_DBUF_METADATA_CACHE); 2340adb52d92SMatthew Ahrens 2341adb52d92SMatthew Ahrens multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2342e914ace2STim Schumacher (void) zfs_refcount_remove_many( 2343adb52d92SMatthew Ahrens &dbuf_caches[db->db_caching_status].size, 2344dcbf3bd6SGeorge Wilson db->db.db_size, db); 2345adb52d92SMatthew Ahrens 2346adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2347dcbf3bd6SGeorge Wilson } 2348dcbf3bd6SGeorge Wilson 234982c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2350fa9e4066Sahrens ASSERT(db->db_data_pending == NULL); 2351fa9e4066Sahrens 2352ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2353ea8dc4b6Seschrock db->db_blkptr = NULL; 2354ea8dc4b6Seschrock 2355dcbf3bd6SGeorge Wilson /* 2356dcbf3bd6SGeorge Wilson * Now that db_state is DB_EVICTING, nobody else can find this via 2357dcbf3bd6SGeorge Wilson * the hash table. We can now drop db_mtx, which allows us to 2358dcbf3bd6SGeorge Wilson * acquire the dn_dbufs_mtx. 2359dcbf3bd6SGeorge Wilson */ 2360dcbf3bd6SGeorge Wilson mutex_exit(&db->db_mtx); 2361dcbf3bd6SGeorge Wilson 2362744947dcSTom Erickson DB_DNODE_ENTER(db); 2363744947dcSTom Erickson dn = DB_DNODE(db); 2364744947dcSTom Erickson dndb = dn->dn_dbuf; 2365dcbf3bd6SGeorge Wilson if (db->db_blkid != DMU_BONUS_BLKID) { 2366dcbf3bd6SGeorge Wilson boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2367dcbf3bd6SGeorge Wilson if (needlock) 2368dcbf3bd6SGeorge Wilson mutex_enter(&dn->dn_dbufs_mtx); 23690f6d88adSAlex Reece avl_remove(&dn->dn_dbufs, db); 2370640c1670SJosef 'Jeff' Sipek atomic_dec_32(&dn->dn_dbufs_count); 2371744947dcSTom Erickson membar_producer(); 2372744947dcSTom Erickson DB_DNODE_EXIT(db); 2373dcbf3bd6SGeorge Wilson if (needlock) 2374dcbf3bd6SGeorge Wilson mutex_exit(&dn->dn_dbufs_mtx); 2375744947dcSTom Erickson /* 2376744947dcSTom Erickson * Decrementing the dbuf count means that the hold corresponding 2377744947dcSTom Erickson * to the removed dbuf is no longer discounted in dnode_move(), 2378744947dcSTom Erickson * so the dnode cannot be moved until after we release the hold. 2379744947dcSTom Erickson * The membar_producer() ensures visibility of the decremented 2380744947dcSTom Erickson * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2381744947dcSTom Erickson * release any lock. 2382744947dcSTom Erickson */ 2383c2919acbSMatthew Ahrens mutex_enter(&dn->dn_mtx); 2384c2919acbSMatthew Ahrens dnode_rele_and_unlock(dn, db, B_TRUE); 2385744947dcSTom Erickson db->db_dnode_handle = NULL; 2386dcbf3bd6SGeorge Wilson 2387dcbf3bd6SGeorge Wilson dbuf_hash_remove(db); 2388744947dcSTom Erickson } else { 2389744947dcSTom Erickson DB_DNODE_EXIT(db); 2390ea8dc4b6Seschrock } 2391ea8dc4b6Seschrock 2392e914ace2STim Schumacher ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2393ea8dc4b6Seschrock 2394dcbf3bd6SGeorge Wilson db->db_parent = NULL; 2395dcbf3bd6SGeorge Wilson 2396dcbf3bd6SGeorge Wilson ASSERT(db->db_buf == NULL); 2397dcbf3bd6SGeorge Wilson ASSERT(db->db.db_data == NULL); 2398dcbf3bd6SGeorge Wilson ASSERT(db->db_hash_next == NULL); 2399dcbf3bd6SGeorge Wilson ASSERT(db->db_blkptr == NULL); 2400dcbf3bd6SGeorge Wilson ASSERT(db->db_data_pending == NULL); 2401adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 2402dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 2403dcbf3bd6SGeorge Wilson 2404dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2405dcbf3bd6SGeorge Wilson arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2406fa9e4066Sahrens 2407fa9e4066Sahrens /* 2408744947dcSTom Erickson * If this dbuf is referenced from an indirect dbuf, 2409fa9e4066Sahrens * decrement the ref count on the indirect dbuf. 2410fa9e4066Sahrens */ 2411c2919acbSMatthew Ahrens if (parent && parent != dndb) { 2412c2919acbSMatthew Ahrens mutex_enter(&parent->db_mtx); 2413c2919acbSMatthew Ahrens dbuf_rele_and_unlock(parent, db, B_TRUE); 2414c2919acbSMatthew Ahrens } 2415fa9e4066Sahrens } 2416fa9e4066Sahrens 2417a2cdcdd2SPaul Dagnelie /* 2418a2cdcdd2SPaul Dagnelie * Note: While bpp will always be updated if the function returns success, 2419a2cdcdd2SPaul Dagnelie * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2420f67950b2SNasf-Fan * this happens when the dnode is the meta-dnode, or {user|group|project}used 2421a2cdcdd2SPaul Dagnelie * object. 2422a2cdcdd2SPaul Dagnelie */ 2423fa9e4066Sahrens static int 2424fa9e4066Sahrens dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2425fa9e4066Sahrens dmu_buf_impl_t **parentp, blkptr_t **bpp) 2426fa9e4066Sahrens { 24270b69c2f0Sahrens *parentp = NULL; 24280b69c2f0Sahrens *bpp = NULL; 24290b69c2f0Sahrens 24300a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 24310a586ceaSMark Shellenbaum 24320a586ceaSMark Shellenbaum if (blkid == DMU_SPILL_BLKID) { 24330a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 243406e0070dSMark Shellenbaum if (dn->dn_have_spill && 243506e0070dSMark Shellenbaum (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 243654811da5SToomas Soome *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 24370a586ceaSMark Shellenbaum else 24380a586ceaSMark Shellenbaum *bpp = NULL; 24390a586ceaSMark Shellenbaum dbuf_add_ref(dn->dn_dbuf, NULL); 24400a586ceaSMark Shellenbaum *parentp = dn->dn_dbuf; 24410a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 24420a586ceaSMark Shellenbaum return (0); 24430a586ceaSMark Shellenbaum } 2444ea8dc4b6Seschrock 24457de35a3eSPaul Dagnelie int nlevels = 24467de35a3eSPaul Dagnelie (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 24477de35a3eSPaul Dagnelie int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2448fa9e4066Sahrens 2449fa9e4066Sahrens ASSERT3U(level * epbs, <, 64); 2450fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 24517de35a3eSPaul Dagnelie /* 24527de35a3eSPaul Dagnelie * This assertion shouldn't trip as long as the max indirect block size 24537de35a3eSPaul Dagnelie * is less than 1M. The reason for this is that up to that point, 24547de35a3eSPaul Dagnelie * the number of levels required to address an entire object with blocks 24557de35a3eSPaul Dagnelie * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 24567de35a3eSPaul Dagnelie * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 24577de35a3eSPaul Dagnelie * (i.e. we can address the entire object), objects will all use at most 24587de35a3eSPaul Dagnelie * N-1 levels and the assertion won't overflow. However, once epbs is 24597de35a3eSPaul Dagnelie * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 24607de35a3eSPaul Dagnelie * enough to address an entire object, so objects will have 5 levels, 24617de35a3eSPaul Dagnelie * but then this assertion will overflow. 24627de35a3eSPaul Dagnelie * 24637de35a3eSPaul Dagnelie * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 24647de35a3eSPaul Dagnelie * need to redo this logic to handle overflows. 24657de35a3eSPaul Dagnelie */ 24667de35a3eSPaul Dagnelie ASSERT(level >= nlevels || 24677de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs) + 24687de35a3eSPaul Dagnelie highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2469ea8dc4b6Seschrock if (level >= nlevels || 24707de35a3eSPaul Dagnelie blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 24717de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs)) || 24727de35a3eSPaul Dagnelie (fail_sparse && 24737de35a3eSPaul Dagnelie blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2474fa9e4066Sahrens /* the buffer has no parent yet */ 2475be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2476fa9e4066Sahrens } else if (level < nlevels-1) { 2477fa9e4066Sahrens /* this block is referenced from an indirect block */ 2478fa9e4066Sahrens int err = dbuf_hold_impl(dn, level+1, 2479a2cdcdd2SPaul Dagnelie blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2480fa9e4066Sahrens if (err) 2481fa9e4066Sahrens return (err); 2482ea8dc4b6Seschrock err = dbuf_read(*parentp, NULL, 2483ea8dc4b6Seschrock (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2484c543ec06Sahrens if (err) { 2485c543ec06Sahrens dbuf_rele(*parentp, NULL); 2486c543ec06Sahrens *parentp = NULL; 2487c543ec06Sahrens return (err); 2488ea8dc4b6Seschrock } 2489*9704bf7fSPaul Dagnelie rw_enter(&(*parentp)->db_rwlock, RW_READER); 2490c543ec06Sahrens *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2491c543ec06Sahrens (blkid & ((1ULL << epbs) - 1)); 24927de35a3eSPaul Dagnelie if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 24937de35a3eSPaul Dagnelie ASSERT(BP_IS_HOLE(*bpp)); 2494*9704bf7fSPaul Dagnelie rw_exit(&(*parentp)->db_rwlock); 2495c543ec06Sahrens return (0); 2496fa9e4066Sahrens } else { 2497fa9e4066Sahrens /* the block is referenced from the dnode */ 2498fa9e4066Sahrens ASSERT3U(level, ==, nlevels-1); 2499fa9e4066Sahrens ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2500fa9e4066Sahrens blkid < dn->dn_phys->dn_nblkptr); 2501c543ec06Sahrens if (dn->dn_dbuf) { 2502c543ec06Sahrens dbuf_add_ref(dn->dn_dbuf, NULL); 2503c543ec06Sahrens *parentp = dn->dn_dbuf; 2504c543ec06Sahrens } 2505fa9e4066Sahrens *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2506fa9e4066Sahrens return (0); 2507fa9e4066Sahrens } 2508fa9e4066Sahrens } 2509fa9e4066Sahrens 2510fa9e4066Sahrens static dmu_buf_impl_t * 2511fa9e4066Sahrens dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2512fa9e4066Sahrens dmu_buf_impl_t *parent, blkptr_t *blkptr) 2513fa9e4066Sahrens { 2514503ad85cSMatthew Ahrens objset_t *os = dn->dn_objset; 2515fa9e4066Sahrens dmu_buf_impl_t *db, *odb; 2516fa9e4066Sahrens 2517fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2518fa9e4066Sahrens ASSERT(dn->dn_type != DMU_OT_NONE); 2519fa9e4066Sahrens 2520dcbf3bd6SGeorge Wilson db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2521fa9e4066Sahrens 2522fa9e4066Sahrens db->db_objset = os; 2523fa9e4066Sahrens db->db.db_object = dn->dn_object; 2524fa9e4066Sahrens db->db_level = level; 2525fa9e4066Sahrens db->db_blkid = blkid; 2526c717a561Smaybee db->db_last_dirty = NULL; 2527ea8dc4b6Seschrock db->db_dirtycnt = 0; 2528744947dcSTom Erickson db->db_dnode_handle = dn->dn_handle; 2529ea8dc4b6Seschrock db->db_parent = parent; 2530ea8dc4b6Seschrock db->db_blkptr = blkptr; 2531fa9e4066Sahrens 2532bc9014e6SJustin Gibbs db->db_user = NULL; 2533d2058105SJustin T. Gibbs db->db_user_immediate_evict = FALSE; 2534d2058105SJustin T. Gibbs db->db_freed_in_flight = FALSE; 2535d2058105SJustin T. Gibbs db->db_pending_evict = FALSE; 2536ea8dc4b6Seschrock 25370a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID) { 2538ea8dc4b6Seschrock ASSERT3P(parent, ==, dn->dn_dbuf); 253954811da5SToomas Soome db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 25401934e92fSmaybee (dn->dn_nblkptr-1) * sizeof (blkptr_t); 25411934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 25420a586ceaSMark Shellenbaum db->db.db_offset = DMU_BONUS_BLKID; 2543ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2544adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2545ea8dc4b6Seschrock /* the bonus dbuf is not placed in the hash table */ 25465a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2547ea8dc4b6Seschrock return (db); 25480a586ceaSMark Shellenbaum } else if (blkid == DMU_SPILL_BLKID) { 25490a586ceaSMark Shellenbaum db->db.db_size = (blkptr != NULL) ? 25500a586ceaSMark Shellenbaum BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 25510a586ceaSMark Shellenbaum db->db.db_offset = 0; 2552fa9e4066Sahrens } else { 2553fa9e4066Sahrens int blocksize = 255469962b56SMatthew Ahrens db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2555fa9e4066Sahrens db->db.db_size = blocksize; 2556fa9e4066Sahrens db->db.db_offset = db->db_blkid * blocksize; 2557fa9e4066Sahrens } 2558fa9e4066Sahrens 2559fa9e4066Sahrens /* 2560fa9e4066Sahrens * Hold the dn_dbufs_mtx while we get the new dbuf 2561fa9e4066Sahrens * in the hash table *and* added to the dbufs list. 2562fa9e4066Sahrens * This prevents a possible deadlock with someone 2563fa9e4066Sahrens * trying to look up this dbuf before its added to the 2564fa9e4066Sahrens * dn_dbufs list. 2565fa9e4066Sahrens */ 2566fa9e4066Sahrens mutex_enter(&dn->dn_dbufs_mtx); 2567ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2568fa9e4066Sahrens if ((odb = dbuf_hash_insert(db)) != NULL) { 2569fa9e4066Sahrens /* someone else inserted it first */ 2570dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2571fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 2572fa9e4066Sahrens return (odb); 2573fa9e4066Sahrens } 25740f6d88adSAlex Reece avl_add(&dn->dn_dbufs, db); 2575653af1b8SStephen Blinick 2576ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2577adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2578fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 25795a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2580fa9e4066Sahrens 2581fa9e4066Sahrens if (parent && parent != dn->dn_dbuf) 2582fa9e4066Sahrens dbuf_add_ref(parent, db); 2583fa9e4066Sahrens 2584ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2585e914ace2STim Schumacher zfs_refcount_count(&dn->dn_holds) > 0); 2586e914ace2STim Schumacher (void) zfs_refcount_add(&dn->dn_holds, db); 2587640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 2588fa9e4066Sahrens 2589fa9e4066Sahrens dprintf_dbuf(db, "db=%p\n", db); 2590fa9e4066Sahrens 2591fa9e4066Sahrens return (db); 2592fa9e4066Sahrens } 2593fa9e4066Sahrens 2594a2cdcdd2SPaul Dagnelie typedef struct dbuf_prefetch_arg { 2595a2cdcdd2SPaul Dagnelie spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2596a2cdcdd2SPaul Dagnelie zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2597a2cdcdd2SPaul Dagnelie int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2598a2cdcdd2SPaul Dagnelie int dpa_curlevel; /* The current level that we're reading */ 2599dcbf3bd6SGeorge Wilson dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2600a2cdcdd2SPaul Dagnelie zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2601a2cdcdd2SPaul Dagnelie zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2602a2cdcdd2SPaul Dagnelie arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2603a2cdcdd2SPaul Dagnelie } dbuf_prefetch_arg_t; 2604a2cdcdd2SPaul Dagnelie 2605a2cdcdd2SPaul Dagnelie /* 2606a2cdcdd2SPaul Dagnelie * Actually issue the prefetch read for the block given. 2607a2cdcdd2SPaul Dagnelie */ 2608a2cdcdd2SPaul Dagnelie static void 2609a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2610a2cdcdd2SPaul Dagnelie { 2611a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2612a2cdcdd2SPaul Dagnelie return; 2613a2cdcdd2SPaul Dagnelie 2614eb633035STom Caputi int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2615a2cdcdd2SPaul Dagnelie arc_flags_t aflags = 2616a2cdcdd2SPaul Dagnelie dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2617a2cdcdd2SPaul Dagnelie 2618eb633035STom Caputi /* dnodes are always read as raw and then converted later */ 2619eb633035STom Caputi if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && 2620eb633035STom Caputi dpa->dpa_curlevel == 0) 2621eb633035STom Caputi zio_flags |= ZIO_FLAG_RAW; 2622eb633035STom Caputi 2623a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2624a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2625a2cdcdd2SPaul Dagnelie ASSERT(dpa->dpa_zio != NULL); 2626a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2627eb633035STom Caputi dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); 2628a2cdcdd2SPaul Dagnelie } 2629a2cdcdd2SPaul Dagnelie 2630a2cdcdd2SPaul Dagnelie /* 2631a2cdcdd2SPaul Dagnelie * Called when an indirect block above our prefetch target is read in. This 2632a2cdcdd2SPaul Dagnelie * will either read in the next indirect block down the tree or issue the actual 2633a2cdcdd2SPaul Dagnelie * prefetch if the next block down is our target. 2634a2cdcdd2SPaul Dagnelie */ 2635eb633035STom Caputi /* ARGSUSED */ 2636a2cdcdd2SPaul Dagnelie static void 2637a3874b8bSToomas Soome dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, 2638a3874b8bSToomas Soome const blkptr_t *iobp, arc_buf_t *abuf, void *private) 2639a2cdcdd2SPaul Dagnelie { 2640a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = private; 2641a2cdcdd2SPaul Dagnelie 2642a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2643a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_curlevel, >, 0); 2644dcbf3bd6SGeorge Wilson 2645fa98e487SMatthew Ahrens if (abuf == NULL) { 2646fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error != 0); 2647fa98e487SMatthew Ahrens kmem_free(dpa, sizeof (*dpa)); 2648fa98e487SMatthew Ahrens return; 2649fa98e487SMatthew Ahrens } 2650fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 2651fa98e487SMatthew Ahrens 2652dcbf3bd6SGeorge Wilson /* 2653dcbf3bd6SGeorge Wilson * The dpa_dnode is only valid if we are called with a NULL 2654dcbf3bd6SGeorge Wilson * zio. This indicates that the arc_read() returned without 2655dcbf3bd6SGeorge Wilson * first calling zio_read() to issue a physical read. Once 2656dcbf3bd6SGeorge Wilson * a physical read is made the dpa_dnode must be invalidated 2657dcbf3bd6SGeorge Wilson * as the locks guarding it may have been dropped. If the 2658dcbf3bd6SGeorge Wilson * dpa_dnode is still valid, then we want to add it to the dbuf 2659dcbf3bd6SGeorge Wilson * cache. To do so, we must hold the dbuf associated with the block 2660dcbf3bd6SGeorge Wilson * we just prefetched, read its contents so that we associate it 2661dcbf3bd6SGeorge Wilson * with an arc_buf_t, and then release it. 2662dcbf3bd6SGeorge Wilson */ 2663a2cdcdd2SPaul Dagnelie if (zio != NULL) { 2664a2cdcdd2SPaul Dagnelie ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2665eb633035STom Caputi if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { 2666dcbf3bd6SGeorge Wilson ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2667dcbf3bd6SGeorge Wilson } else { 2668dcbf3bd6SGeorge Wilson ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2669dcbf3bd6SGeorge Wilson } 2670a2cdcdd2SPaul Dagnelie ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2671dcbf3bd6SGeorge Wilson 2672dcbf3bd6SGeorge Wilson dpa->dpa_dnode = NULL; 2673dcbf3bd6SGeorge Wilson } else if (dpa->dpa_dnode != NULL) { 2674dcbf3bd6SGeorge Wilson uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2675dcbf3bd6SGeorge Wilson (dpa->dpa_epbs * (dpa->dpa_curlevel - 2676dcbf3bd6SGeorge Wilson dpa->dpa_zb.zb_level)); 2677dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2678dcbf3bd6SGeorge Wilson dpa->dpa_curlevel, curblkid, FTAG); 2679dcbf3bd6SGeorge Wilson (void) dbuf_read(db, NULL, 2680dcbf3bd6SGeorge Wilson DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2681dcbf3bd6SGeorge Wilson dbuf_rele(db, FTAG); 2682a2cdcdd2SPaul Dagnelie } 2683a2cdcdd2SPaul Dagnelie 2684a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel--; 2685a2cdcdd2SPaul Dagnelie uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2686a2cdcdd2SPaul Dagnelie (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2687a2cdcdd2SPaul Dagnelie blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2688a2cdcdd2SPaul Dagnelie P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2689a3874b8bSToomas Soome 2690fa98e487SMatthew Ahrens if (BP_IS_HOLE(bp)) { 2691a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2692a2cdcdd2SPaul Dagnelie } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2693a2cdcdd2SPaul Dagnelie ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2694a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, bp); 2695a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2696a2cdcdd2SPaul Dagnelie } else { 2697a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2698a2cdcdd2SPaul Dagnelie zbookmark_phys_t zb; 2699a2cdcdd2SPaul Dagnelie 270027295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 270127295216Sbenrubson if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 270227295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 270327295216Sbenrubson 2704a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2705a2cdcdd2SPaul Dagnelie 2706a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2707a2cdcdd2SPaul Dagnelie dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2708a2cdcdd2SPaul Dagnelie 2709a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2710a2cdcdd2SPaul Dagnelie bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2711a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2712a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2713a2cdcdd2SPaul Dagnelie } 2714dcbf3bd6SGeorge Wilson 2715dcbf3bd6SGeorge Wilson arc_buf_destroy(abuf, private); 2716a2cdcdd2SPaul Dagnelie } 2717a2cdcdd2SPaul Dagnelie 2718a2cdcdd2SPaul Dagnelie /* 2719a2cdcdd2SPaul Dagnelie * Issue prefetch reads for the given block on the given level. If the indirect 2720a2cdcdd2SPaul Dagnelie * blocks above that block are not in memory, we will read them in 2721a2cdcdd2SPaul Dagnelie * asynchronously. As a result, this call never blocks waiting for a read to 2722eb633035STom Caputi * complete. Note that the prefetch might fail if the dataset is encrypted and 2723eb633035STom Caputi * the encryption key is unmapped before the IO completes. 2724a2cdcdd2SPaul Dagnelie */ 2725fa9e4066Sahrens void 2726a2cdcdd2SPaul Dagnelie dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2727a2cdcdd2SPaul Dagnelie arc_flags_t aflags) 2728fa9e4066Sahrens { 2729a2cdcdd2SPaul Dagnelie blkptr_t bp; 2730a2cdcdd2SPaul Dagnelie int epbs, nlevels, curlevel; 2731a2cdcdd2SPaul Dagnelie uint64_t curblkid; 2732fa9e4066Sahrens 27330a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2734fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2735fa9e4066Sahrens 2736cf6106c8SMatthew Ahrens if (blkid > dn->dn_maxblkid) 2737cf6106c8SMatthew Ahrens return; 2738cf6106c8SMatthew Ahrens 2739*9704bf7fSPaul Dagnelie if (level == 0 && dnode_block_freed(dn, blkid)) 2740fa9e4066Sahrens return; 2741fa9e4066Sahrens 2742a2cdcdd2SPaul Dagnelie /* 2743a2cdcdd2SPaul Dagnelie * This dnode hasn't been written to disk yet, so there's nothing to 2744a2cdcdd2SPaul Dagnelie * prefetch. 2745a2cdcdd2SPaul Dagnelie */ 2746a2cdcdd2SPaul Dagnelie nlevels = dn->dn_phys->dn_nlevels; 2747a2cdcdd2SPaul Dagnelie if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2748a2cdcdd2SPaul Dagnelie return; 2749a2cdcdd2SPaul Dagnelie 2750a2cdcdd2SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2751a2cdcdd2SPaul Dagnelie if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2752a2cdcdd2SPaul Dagnelie return; 2753a2cdcdd2SPaul Dagnelie 2754a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2755a2cdcdd2SPaul Dagnelie level, blkid); 2756a2cdcdd2SPaul Dagnelie if (db != NULL) { 2757a2cdcdd2SPaul Dagnelie mutex_exit(&db->db_mtx); 27589e9c486fSGeorge Wilson /* 2759a2cdcdd2SPaul Dagnelie * This dbuf already exists. It is either CACHED, or 2760a2cdcdd2SPaul Dagnelie * (we assume) about to be read or filled. 27619e9c486fSGeorge Wilson */ 27629e9c486fSGeorge Wilson return; 2763fa9e4066Sahrens } 2764fa9e4066Sahrens 2765a2cdcdd2SPaul Dagnelie /* 2766a2cdcdd2SPaul Dagnelie * Find the closest ancestor (indirect block) of the target block 2767a2cdcdd2SPaul Dagnelie * that is present in the cache. In this indirect block, we will 2768a2cdcdd2SPaul Dagnelie * find the bp that is at curlevel, curblkid. 2769a2cdcdd2SPaul Dagnelie */ 2770a2cdcdd2SPaul Dagnelie curlevel = level; 2771a2cdcdd2SPaul Dagnelie curblkid = blkid; 2772a2cdcdd2SPaul Dagnelie while (curlevel < nlevels - 1) { 2773a2cdcdd2SPaul Dagnelie int parent_level = curlevel + 1; 2774a2cdcdd2SPaul Dagnelie uint64_t parent_blkid = curblkid >> epbs; 2775a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db; 2776a2cdcdd2SPaul Dagnelie 2777a2cdcdd2SPaul Dagnelie if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2778a2cdcdd2SPaul Dagnelie FALSE, TRUE, FTAG, &db) == 0) { 2779a2cdcdd2SPaul Dagnelie blkptr_t *bpp = db->db_buf->b_data; 2780a2cdcdd2SPaul Dagnelie bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2781a2cdcdd2SPaul Dagnelie dbuf_rele(db, FTAG); 2782a2cdcdd2SPaul Dagnelie break; 2783a2cdcdd2SPaul Dagnelie } 2784b24ab676SJeff Bonwick 2785a2cdcdd2SPaul Dagnelie curlevel = parent_level; 2786a2cdcdd2SPaul Dagnelie curblkid = parent_blkid; 2787a2cdcdd2SPaul Dagnelie } 2788ea8dc4b6Seschrock 2789a2cdcdd2SPaul Dagnelie if (curlevel == nlevels - 1) { 2790a2cdcdd2SPaul Dagnelie /* No cached indirect blocks found. */ 2791a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2792a2cdcdd2SPaul Dagnelie bp = dn->dn_phys->dn_blkptr[curblkid]; 2793fa9e4066Sahrens } 2794a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(&bp)) 2795a2cdcdd2SPaul Dagnelie return; 2796a2cdcdd2SPaul Dagnelie 2797a2cdcdd2SPaul Dagnelie ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2798a2cdcdd2SPaul Dagnelie 2799a2cdcdd2SPaul Dagnelie zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2800a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL); 2801a2cdcdd2SPaul Dagnelie 2802a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2803a2cdcdd2SPaul Dagnelie dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2804a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2805a2cdcdd2SPaul Dagnelie dn->dn_object, level, blkid); 2806a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel = curlevel; 2807a2cdcdd2SPaul Dagnelie dpa->dpa_prio = prio; 2808a2cdcdd2SPaul Dagnelie dpa->dpa_aflags = aflags; 2809a2cdcdd2SPaul Dagnelie dpa->dpa_spa = dn->dn_objset->os_spa; 2810dcbf3bd6SGeorge Wilson dpa->dpa_dnode = dn; 2811a2cdcdd2SPaul Dagnelie dpa->dpa_epbs = epbs; 2812a2cdcdd2SPaul Dagnelie dpa->dpa_zio = pio; 2813a2cdcdd2SPaul Dagnelie 281427295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 281527295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 281627295216Sbenrubson dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 281727295216Sbenrubson 2818a2cdcdd2SPaul Dagnelie /* 2819a2cdcdd2SPaul Dagnelie * If we have the indirect just above us, no need to do the asynchronous 2820a2cdcdd2SPaul Dagnelie * prefetch chain; we'll just run the last step ourselves. If we're at 2821a2cdcdd2SPaul Dagnelie * a higher level, though, we want to issue the prefetches for all the 2822a2cdcdd2SPaul Dagnelie * indirect blocks asynchronously, so we can go on with whatever we were 2823a2cdcdd2SPaul Dagnelie * doing. 2824a2cdcdd2SPaul Dagnelie */ 2825a2cdcdd2SPaul Dagnelie if (curlevel == level) { 2826a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, ==, blkid); 2827a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, &bp); 2828a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2829a2cdcdd2SPaul Dagnelie } else { 2830a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2831a2cdcdd2SPaul Dagnelie zbookmark_phys_t zb; 2832a2cdcdd2SPaul Dagnelie 283327295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 283427295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 283527295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 283627295216Sbenrubson 2837a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2838a2cdcdd2SPaul Dagnelie dn->dn_object, curlevel, curblkid); 2839a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2840a2cdcdd2SPaul Dagnelie &bp, dbuf_prefetch_indirect_done, dpa, prio, 2841a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2842a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2843a2cdcdd2SPaul Dagnelie } 2844a2cdcdd2SPaul Dagnelie /* 2845a2cdcdd2SPaul Dagnelie * We use pio here instead of dpa_zio since it's possible that 2846a2cdcdd2SPaul Dagnelie * dpa may have already been freed. 2847a2cdcdd2SPaul Dagnelie */ 2848a2cdcdd2SPaul Dagnelie zio_nowait(pio); 2849fa9e4066Sahrens } 2850fa9e4066Sahrens 2851eb633035STom Caputi /* 2852eb633035STom Caputi * Helper function for __dbuf_hold_impl() to copy a buffer. Handles 2853eb633035STom Caputi * the case of encrypted, compressed and uncompressed buffers by 2854eb633035STom Caputi * allocating the new buffer, respectively, with arc_alloc_raw_buf(), 2855eb633035STom Caputi * arc_alloc_compressed_buf() or arc_alloc_buf().* 2856eb633035STom Caputi * 2857eb633035STom Caputi * NOTE: Declared noinline to avoid stack bloat in __dbuf_hold_impl(). 2858eb633035STom Caputi */ 2859eb633035STom Caputi static void 2860eb633035STom Caputi dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db, dbuf_dirty_record_t *dr) 2861eb633035STom Caputi { 2862eb633035STom Caputi arc_buf_t *data = dr->dt.dl.dr_data; 2863eb633035STom Caputi enum zio_compress compress_type = arc_get_compression(data); 2864eb633035STom Caputi 2865eb633035STom Caputi if (arc_is_encrypted(data)) { 2866eb633035STom Caputi boolean_t byteorder; 2867eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 2868eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 2869eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 2870eb633035STom Caputi 2871eb633035STom Caputi arc_get_raw_params(data, &byteorder, salt, iv, mac); 2872eb633035STom Caputi dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db, 2873eb633035STom Caputi dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac, 2874eb633035STom Caputi dn->dn_type, arc_buf_size(data), arc_buf_lsize(data), 2875eb633035STom Caputi compress_type)); 2876eb633035STom Caputi } else if (compress_type != ZIO_COMPRESS_OFF) { 2877eb633035STom Caputi dbuf_set_data(db, arc_alloc_compressed_buf( 2878eb633035STom Caputi dn->dn_objset->os_spa, db, arc_buf_size(data), 2879eb633035STom Caputi arc_buf_lsize(data), compress_type)); 2880eb633035STom Caputi } else { 2881eb633035STom Caputi dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db, 2882eb633035STom Caputi DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 2883eb633035STom Caputi } 2884eb633035STom Caputi 2885*9704bf7fSPaul Dagnelie rw_enter(&db->db_rwlock, RW_WRITER); 2886eb633035STom Caputi bcopy(data->b_data, db->db.db_data, arc_buf_size(data)); 2887*9704bf7fSPaul Dagnelie rw_exit(&db->db_rwlock); 2888eb633035STom Caputi } 2889eb633035STom Caputi 2890fa9e4066Sahrens /* 2891fa9e4066Sahrens * Returns with db_holds incremented, and db_mtx not held. 2892fa9e4066Sahrens * Note: dn_struct_rwlock must be held. 2893fa9e4066Sahrens */ 2894fa9e4066Sahrens int 2895a2cdcdd2SPaul Dagnelie dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2896a2cdcdd2SPaul Dagnelie boolean_t fail_sparse, boolean_t fail_uncached, 2897fa9e4066Sahrens void *tag, dmu_buf_impl_t **dbp) 2898fa9e4066Sahrens { 2899fa9e4066Sahrens dmu_buf_impl_t *db, *parent = NULL; 2900fa9e4066Sahrens 29010a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2902fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2903fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, level); 2904fa9e4066Sahrens 2905fa9e4066Sahrens *dbp = NULL; 2906ea8dc4b6Seschrock top: 2907fa9e4066Sahrens /* dbuf_find() returns with db_mtx held */ 2908e57a022bSJustin T. Gibbs db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2909fa9e4066Sahrens 2910fa9e4066Sahrens if (db == NULL) { 2911fa9e4066Sahrens blkptr_t *bp = NULL; 2912fa9e4066Sahrens int err; 2913fa9e4066Sahrens 2914a2cdcdd2SPaul Dagnelie if (fail_uncached) 2915a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2916a2cdcdd2SPaul Dagnelie 2917c543ec06Sahrens ASSERT3P(parent, ==, NULL); 2918fa9e4066Sahrens err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2919fa9e4066Sahrens if (fail_sparse) { 2920fa9e4066Sahrens if (err == 0 && bp && BP_IS_HOLE(bp)) 2921be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 2922fa9e4066Sahrens if (err) { 2923c543ec06Sahrens if (parent) 2924ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2925fa9e4066Sahrens return (err); 2926fa9e4066Sahrens } 2927fa9e4066Sahrens } 2928ea8dc4b6Seschrock if (err && err != ENOENT) 2929ea8dc4b6Seschrock return (err); 2930fa9e4066Sahrens db = dbuf_create(dn, level, blkid, parent, bp); 2931fa9e4066Sahrens } 2932fa9e4066Sahrens 2933a2cdcdd2SPaul Dagnelie if (fail_uncached && db->db_state != DB_CACHED) { 2934a2cdcdd2SPaul Dagnelie mutex_exit(&db->db_mtx); 2935a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2936a2cdcdd2SPaul Dagnelie } 2937a2cdcdd2SPaul Dagnelie 29387b38fab6SAlexander Motin if (db->db_buf != NULL) { 29397b38fab6SAlexander Motin arc_buf_access(db->db_buf); 2940ea8dc4b6Seschrock ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 29417b38fab6SAlexander Motin } 2942ea8dc4b6Seschrock 2943ea8dc4b6Seschrock ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2944ea8dc4b6Seschrock 2945fa9e4066Sahrens /* 2946c717a561Smaybee * If this buffer is currently syncing out, and we are are 2947c717a561Smaybee * still referencing it from db_data, we need to make a copy 2948c717a561Smaybee * of it in case we decide we want to dirty it again in this txg. 2949fa9e4066Sahrens */ 29500a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2951ea8dc4b6Seschrock dn->dn_object != DMU_META_DNODE_OBJECT && 2952c717a561Smaybee db->db_state == DB_CACHED && db->db_data_pending) { 2953c717a561Smaybee dbuf_dirty_record_t *dr = db->db_data_pending; 2954eb633035STom Caputi if (dr->dt.dl.dr_data == db->db_buf) 2955eb633035STom Caputi dbuf_hold_copy(dn, db, dr); 2956fa9e4066Sahrens } 2957fa9e4066Sahrens 2958dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 2959e914ace2STim Schumacher ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2960adb52d92SMatthew Ahrens ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2961adb52d92SMatthew Ahrens db->db_caching_status == DB_DBUF_METADATA_CACHE); 2962adb52d92SMatthew Ahrens 2963adb52d92SMatthew Ahrens multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2964e914ace2STim Schumacher (void) zfs_refcount_remove_many( 2965adb52d92SMatthew Ahrens &dbuf_caches[db->db_caching_status].size, 2966dcbf3bd6SGeorge Wilson db->db.db_size, db); 2967adb52d92SMatthew Ahrens 2968adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2969dcbf3bd6SGeorge Wilson } 2970e914ace2STim Schumacher (void) zfs_refcount_add(&db->db_holds, tag); 29719c9dc39aSek DBUF_VERIFY(db); 2972fa9e4066Sahrens mutex_exit(&db->db_mtx); 2973fa9e4066Sahrens 2974fa9e4066Sahrens /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2975c543ec06Sahrens if (parent) 2976ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2977fa9e4066Sahrens 2978744947dcSTom Erickson ASSERT3P(DB_DNODE(db), ==, dn); 2979fa9e4066Sahrens ASSERT3U(db->db_blkid, ==, blkid); 2980fa9e4066Sahrens ASSERT3U(db->db_level, ==, level); 2981fa9e4066Sahrens *dbp = db; 2982fa9e4066Sahrens 2983fa9e4066Sahrens return (0); 2984fa9e4066Sahrens } 2985fa9e4066Sahrens 2986fa9e4066Sahrens dmu_buf_impl_t * 2987ea8dc4b6Seschrock dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2988fa9e4066Sahrens { 2989a2cdcdd2SPaul Dagnelie return (dbuf_hold_level(dn, 0, blkid, tag)); 2990fa9e4066Sahrens } 2991fa9e4066Sahrens 2992fa9e4066Sahrens dmu_buf_impl_t * 2993fa9e4066Sahrens dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2994fa9e4066Sahrens { 2995fa9e4066Sahrens dmu_buf_impl_t *db; 2996a2cdcdd2SPaul Dagnelie int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2997ea8dc4b6Seschrock return (err ? NULL : db); 2998fa9e4066Sahrens } 2999fa9e4066Sahrens 30001934e92fSmaybee void 3001ea8dc4b6Seschrock dbuf_create_bonus(dnode_t *dn) 3002fa9e4066Sahrens { 3003ea8dc4b6Seschrock ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 3004ea8dc4b6Seschrock 3005ea8dc4b6Seschrock ASSERT(dn->dn_bonus == NULL); 30060a586ceaSMark Shellenbaum dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 30070a586ceaSMark Shellenbaum } 30080a586ceaSMark Shellenbaum 30090a586ceaSMark Shellenbaum int 30100a586ceaSMark Shellenbaum dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 30110a586ceaSMark Shellenbaum { 30120a586ceaSMark Shellenbaum dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3013744947dcSTom Erickson 30140a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 3015be6fd75aSMatthew Ahrens return (SET_ERROR(ENOTSUP)); 30160a586ceaSMark Shellenbaum if (blksz == 0) 30170a586ceaSMark Shellenbaum blksz = SPA_MINBLOCKSIZE; 3018b5152584SMatthew Ahrens ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 3019b5152584SMatthew Ahrens blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 30200a586ceaSMark Shellenbaum 30210a586ceaSMark Shellenbaum dbuf_new_size(db, blksz, tx); 30220a586ceaSMark Shellenbaum 30230a586ceaSMark Shellenbaum return (0); 30240a586ceaSMark Shellenbaum } 30250a586ceaSMark Shellenbaum 30260a586ceaSMark Shellenbaum void 30270a586ceaSMark Shellenbaum dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 30280a586ceaSMark Shellenbaum { 30290a586ceaSMark Shellenbaum dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 3030fa9e4066Sahrens } 3031fa9e4066Sahrens 3032ea8dc4b6Seschrock #pragma weak dmu_buf_add_ref = dbuf_add_ref 3033fa9e4066Sahrens void 3034fa9e4066Sahrens dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 3035fa9e4066Sahrens { 3036e914ace2STim Schumacher int64_t holds = zfs_refcount_add(&db->db_holds, tag); 3037dcbf3bd6SGeorge Wilson ASSERT3S(holds, >, 1); 3038fa9e4066Sahrens } 3039fa9e4066Sahrens 3040e57a022bSJustin T. Gibbs #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 3041e57a022bSJustin T. Gibbs boolean_t 3042e57a022bSJustin T. Gibbs dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 3043e57a022bSJustin T. Gibbs void *tag) 3044e57a022bSJustin T. Gibbs { 3045e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3046e57a022bSJustin T. Gibbs dmu_buf_impl_t *found_db; 3047e57a022bSJustin T. Gibbs boolean_t result = B_FALSE; 3048e57a022bSJustin T. Gibbs 3049e57a022bSJustin T. Gibbs if (db->db_blkid == DMU_BONUS_BLKID) 3050e57a022bSJustin T. Gibbs found_db = dbuf_find_bonus(os, obj); 3051e57a022bSJustin T. Gibbs else 3052e57a022bSJustin T. Gibbs found_db = dbuf_find(os, obj, 0, blkid); 3053e57a022bSJustin T. Gibbs 3054e57a022bSJustin T. Gibbs if (found_db != NULL) { 3055e57a022bSJustin T. Gibbs if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 3056e914ace2STim Schumacher (void) zfs_refcount_add(&db->db_holds, tag); 3057e57a022bSJustin T. Gibbs result = B_TRUE; 3058e57a022bSJustin T. Gibbs } 3059e57a022bSJustin T. Gibbs mutex_exit(&db->db_mtx); 3060e57a022bSJustin T. Gibbs } 3061e57a022bSJustin T. Gibbs return (result); 3062e57a022bSJustin T. Gibbs } 3063e57a022bSJustin T. Gibbs 3064744947dcSTom Erickson /* 3065744947dcSTom Erickson * If you call dbuf_rele() you had better not be referencing the dnode handle 3066744947dcSTom Erickson * unless you have some other direct or indirect hold on the dnode. (An indirect 3067744947dcSTom Erickson * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 3068744947dcSTom Erickson * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 3069744947dcSTom Erickson * dnode's parent dbuf evicting its dnode handles. 3070744947dcSTom Erickson */ 3071fa9e4066Sahrens void 3072ea8dc4b6Seschrock dbuf_rele(dmu_buf_impl_t *db, void *tag) 3073b24ab676SJeff Bonwick { 3074b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3075c2919acbSMatthew Ahrens dbuf_rele_and_unlock(db, tag, B_FALSE); 3076b24ab676SJeff Bonwick } 3077b24ab676SJeff Bonwick 307843466aaeSMax Grossman void 307943466aaeSMax Grossman dmu_buf_rele(dmu_buf_t *db, void *tag) 308043466aaeSMax Grossman { 308143466aaeSMax Grossman dbuf_rele((dmu_buf_impl_t *)db, tag); 308243466aaeSMax Grossman } 308343466aaeSMax Grossman 3084b24ab676SJeff Bonwick /* 3085b24ab676SJeff Bonwick * dbuf_rele() for an already-locked dbuf. This is necessary to allow 3086c2919acbSMatthew Ahrens * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 3087c2919acbSMatthew Ahrens * argument should be set if we are already in the dbuf-evicting code 3088c2919acbSMatthew Ahrens * path, in which case we don't want to recursively evict. This allows us to 3089c2919acbSMatthew Ahrens * avoid deeply nested stacks that would have a call flow similar to this: 3090c2919acbSMatthew Ahrens * 3091c2919acbSMatthew Ahrens * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 3092c2919acbSMatthew Ahrens * ^ | 3093c2919acbSMatthew Ahrens * | | 3094c2919acbSMatthew Ahrens * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 3095c2919acbSMatthew Ahrens * 3096b24ab676SJeff Bonwick */ 3097b24ab676SJeff Bonwick void 3098c2919acbSMatthew Ahrens dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting) 3099fa9e4066Sahrens { 3100fa9e4066Sahrens int64_t holds; 3101fa9e4066Sahrens 3102b24ab676SJeff Bonwick ASSERT(MUTEX_HELD(&db->db_mtx)); 31039c9dc39aSek DBUF_VERIFY(db); 3104fa9e4066Sahrens 3105744947dcSTom Erickson /* 3106744947dcSTom Erickson * Remove the reference to the dbuf before removing its hold on the 3107744947dcSTom Erickson * dnode so we can guarantee in dnode_move() that a referenced bonus 3108744947dcSTom Erickson * buffer has a corresponding dnode hold. 3109744947dcSTom Erickson */ 3110e914ace2STim Schumacher holds = zfs_refcount_remove(&db->db_holds, tag); 3111ea8dc4b6Seschrock ASSERT(holds >= 0); 3112ea8dc4b6Seschrock 3113c717a561Smaybee /* 3114c717a561Smaybee * We can't freeze indirects if there is a possibility that they 3115c717a561Smaybee * may be modified in the current syncing context. 3116c717a561Smaybee */ 3117dcbf3bd6SGeorge Wilson if (db->db_buf != NULL && 3118dcbf3bd6SGeorge Wilson holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 31196b4acc8bSahrens arc_buf_freeze(db->db_buf); 3120dcbf3bd6SGeorge Wilson } 31216b4acc8bSahrens 3122ea8dc4b6Seschrock if (holds == db->db_dirtycnt && 3123d2058105SJustin T. Gibbs db->db_level == 0 && db->db_user_immediate_evict) 3124ea8dc4b6Seschrock dbuf_evict_user(db); 3125fa9e4066Sahrens 3126fa9e4066Sahrens if (holds == 0) { 31270a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 3128cd485b49SJustin T. Gibbs dnode_t *dn; 3129d2058105SJustin T. Gibbs boolean_t evict_dbuf = db->db_pending_evict; 3130744947dcSTom Erickson 3131744947dcSTom Erickson /* 3132cd485b49SJustin T. Gibbs * If the dnode moves here, we cannot cross this 3133cd485b49SJustin T. Gibbs * barrier until the move completes. 3134744947dcSTom Erickson */ 3135744947dcSTom Erickson DB_DNODE_ENTER(db); 3136cd485b49SJustin T. Gibbs 3137cd485b49SJustin T. Gibbs dn = DB_DNODE(db); 3138cd485b49SJustin T. Gibbs atomic_dec_32(&dn->dn_dbufs_count); 3139cd485b49SJustin T. Gibbs 3140cd485b49SJustin T. Gibbs /* 3141cd485b49SJustin T. Gibbs * Decrementing the dbuf count means that the bonus 3142cd485b49SJustin T. Gibbs * buffer's dnode hold is no longer discounted in 3143cd485b49SJustin T. Gibbs * dnode_move(). The dnode cannot move until after 3144d2058105SJustin T. Gibbs * the dnode_rele() below. 3145cd485b49SJustin T. Gibbs */ 3146744947dcSTom Erickson DB_DNODE_EXIT(db); 3147cd485b49SJustin T. Gibbs 3148cd485b49SJustin T. Gibbs /* 3149cd485b49SJustin T. Gibbs * Do not reference db after its lock is dropped. 3150cd485b49SJustin T. Gibbs * Another thread may evict it. 3151cd485b49SJustin T. Gibbs */ 3152cd485b49SJustin T. Gibbs mutex_exit(&db->db_mtx); 3153cd485b49SJustin T. Gibbs 3154d2058105SJustin T. Gibbs if (evict_dbuf) 3155cd485b49SJustin T. Gibbs dnode_evict_bonus(dn); 3156d2058105SJustin T. Gibbs 3157d2058105SJustin T. Gibbs dnode_rele(dn, db); 3158ea8dc4b6Seschrock } else if (db->db_buf == NULL) { 3159ea8dc4b6Seschrock /* 3160ea8dc4b6Seschrock * This is a special case: we never associated this 3161ea8dc4b6Seschrock * dbuf with any data allocated from the ARC. 3162ea8dc4b6Seschrock */ 316382c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || 316482c9918fSTim Haley db->db_state == DB_NOFILL); 3165dcbf3bd6SGeorge Wilson dbuf_destroy(db); 31666b4acc8bSahrens } else if (arc_released(db->db_buf)) { 3167ea8dc4b6Seschrock /* 3168ea8dc4b6Seschrock * This dbuf has anonymous data associated with it. 3169ea8dc4b6Seschrock */ 3170dcbf3bd6SGeorge Wilson dbuf_destroy(db); 3171ea8dc4b6Seschrock } else { 3172dcbf3bd6SGeorge Wilson boolean_t do_arc_evict = B_FALSE; 3173dcbf3bd6SGeorge Wilson blkptr_t bp; 3174dcbf3bd6SGeorge Wilson spa_t *spa = dmu_objset_spa(db->db_objset); 3175dcbf3bd6SGeorge Wilson 3176dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) && 3177dcbf3bd6SGeorge Wilson db->db_blkptr != NULL && 3178dcbf3bd6SGeorge Wilson !BP_IS_HOLE(db->db_blkptr) && 3179dcbf3bd6SGeorge Wilson !BP_IS_EMBEDDED(db->db_blkptr)) { 3180dcbf3bd6SGeorge Wilson do_arc_evict = B_TRUE; 3181dcbf3bd6SGeorge Wilson bp = *db->db_blkptr; 3182dcbf3bd6SGeorge Wilson } 31839253d63dSGeorge Wilson 3184dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) || 3185dcbf3bd6SGeorge Wilson db->db_pending_evict) { 3186dcbf3bd6SGeorge Wilson dbuf_destroy(db); 3187dcbf3bd6SGeorge Wilson } else if (!multilist_link_active(&db->db_cache_link)) { 3188adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, 3189adb52d92SMatthew Ahrens DB_NO_CACHE); 3190adb52d92SMatthew Ahrens 3191adb52d92SMatthew Ahrens dbuf_cached_state_t dcs = 3192adb52d92SMatthew Ahrens dbuf_include_in_metadata_cache(db) ? 3193adb52d92SMatthew Ahrens DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 3194adb52d92SMatthew Ahrens db->db_caching_status = dcs; 3195adb52d92SMatthew Ahrens 3196adb52d92SMatthew Ahrens multilist_insert(dbuf_caches[dcs].cache, db); 3197e914ace2STim Schumacher (void) zfs_refcount_add_many( 3198e914ace2STim Schumacher &dbuf_caches[dcs].size, db->db.db_size, db); 31993baa08fcSek mutex_exit(&db->db_mtx); 3200dcbf3bd6SGeorge Wilson 3201c2919acbSMatthew Ahrens if (db->db_caching_status == DB_DBUF_CACHE && 3202c2919acbSMatthew Ahrens !evicting) { 3203adb52d92SMatthew Ahrens dbuf_evict_notify(); 3204adb52d92SMatthew Ahrens } 3205bbfa8ea8SMatthew Ahrens } 3206dcbf3bd6SGeorge Wilson 3207dcbf3bd6SGeorge Wilson if (do_arc_evict) 3208dcbf3bd6SGeorge Wilson arc_freed(spa, &bp); 3209ea8dc4b6Seschrock } 3210fa9e4066Sahrens } else { 3211fa9e4066Sahrens mutex_exit(&db->db_mtx); 3212fa9e4066Sahrens } 3213dcbf3bd6SGeorge Wilson 3214fa9e4066Sahrens } 3215fa9e4066Sahrens 3216fa9e4066Sahrens #pragma weak dmu_buf_refcount = dbuf_refcount 3217fa9e4066Sahrens uint64_t 3218fa9e4066Sahrens dbuf_refcount(dmu_buf_impl_t *db) 3219fa9e4066Sahrens { 3220e914ace2STim Schumacher return (zfs_refcount_count(&db->db_holds)); 3221fa9e4066Sahrens } 3222fa9e4066Sahrens 3223eb633035STom Caputi uint64_t 3224eb633035STom Caputi dmu_buf_user_refcount(dmu_buf_t *db_fake) 3225eb633035STom Caputi { 3226eb633035STom Caputi uint64_t holds; 3227eb633035STom Caputi dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3228eb633035STom Caputi 3229eb633035STom Caputi mutex_enter(&db->db_mtx); 3230eb633035STom Caputi ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt); 3231eb633035STom Caputi holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt; 3232eb633035STom Caputi mutex_exit(&db->db_mtx); 3233eb633035STom Caputi 3234eb633035STom Caputi return (holds); 3235eb633035STom Caputi } 3236eb633035STom Caputi 3237fa9e4066Sahrens void * 3238bc9014e6SJustin Gibbs dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 3239bc9014e6SJustin Gibbs dmu_buf_user_t *new_user) 3240fa9e4066Sahrens { 3241bc9014e6SJustin Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3242bc9014e6SJustin Gibbs 3243bc9014e6SJustin Gibbs mutex_enter(&db->db_mtx); 3244bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 3245bc9014e6SJustin Gibbs if (db->db_user == old_user) 3246bc9014e6SJustin Gibbs db->db_user = new_user; 3247bc9014e6SJustin Gibbs else 3248bc9014e6SJustin Gibbs old_user = db->db_user; 3249bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 3250bc9014e6SJustin Gibbs mutex_exit(&db->db_mtx); 3251bc9014e6SJustin Gibbs 3252bc9014e6SJustin Gibbs return (old_user); 3253fa9e4066Sahrens } 3254fa9e4066Sahrens 3255fa9e4066Sahrens void * 3256bc9014e6SJustin Gibbs dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3257fa9e4066Sahrens { 3258bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, NULL, user)); 3259fa9e4066Sahrens } 3260fa9e4066Sahrens 3261fa9e4066Sahrens void * 3262bc9014e6SJustin Gibbs dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3263fa9e4066Sahrens { 3264fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3265fa9e4066Sahrens 3266d2058105SJustin T. Gibbs db->db_user_immediate_evict = TRUE; 3267bc9014e6SJustin Gibbs return (dmu_buf_set_user(db_fake, user)); 3268bc9014e6SJustin Gibbs } 3269fa9e4066Sahrens 3270bc9014e6SJustin Gibbs void * 3271bc9014e6SJustin Gibbs dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3272bc9014e6SJustin Gibbs { 3273bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, user, NULL)); 3274fa9e4066Sahrens } 3275fa9e4066Sahrens 3276fa9e4066Sahrens void * 3277fa9e4066Sahrens dmu_buf_get_user(dmu_buf_t *db_fake) 3278fa9e4066Sahrens { 3279fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3280fa9e4066Sahrens 3281bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 3282bc9014e6SJustin Gibbs return (db->db_user); 3283bc9014e6SJustin Gibbs } 3284bc9014e6SJustin Gibbs 3285bc9014e6SJustin Gibbs void 3286bc9014e6SJustin Gibbs dmu_buf_user_evict_wait() 3287bc9014e6SJustin Gibbs { 3288bc9014e6SJustin Gibbs taskq_wait(dbu_evict_taskq); 3289fa9e4066Sahrens } 3290fa9e4066Sahrens 329180901aeaSGeorge Wilson blkptr_t * 329280901aeaSGeorge Wilson dmu_buf_get_blkptr(dmu_buf_t *db) 329380901aeaSGeorge Wilson { 329480901aeaSGeorge Wilson dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 329580901aeaSGeorge Wilson return (dbi->db_blkptr); 329680901aeaSGeorge Wilson } 329780901aeaSGeorge Wilson 3298ae972795SMatthew Ahrens objset_t * 3299ae972795SMatthew Ahrens dmu_buf_get_objset(dmu_buf_t *db) 3300ae972795SMatthew Ahrens { 3301ae972795SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3302ae972795SMatthew Ahrens return (dbi->db_objset); 3303ae972795SMatthew Ahrens } 3304ae972795SMatthew Ahrens 330579d72832SMatthew Ahrens dnode_t * 330679d72832SMatthew Ahrens dmu_buf_dnode_enter(dmu_buf_t *db) 330779d72832SMatthew Ahrens { 330879d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 330979d72832SMatthew Ahrens DB_DNODE_ENTER(dbi); 331079d72832SMatthew Ahrens return (DB_DNODE(dbi)); 331179d72832SMatthew Ahrens } 331279d72832SMatthew Ahrens 331379d72832SMatthew Ahrens void 331479d72832SMatthew Ahrens dmu_buf_dnode_exit(dmu_buf_t *db) 331579d72832SMatthew Ahrens { 331679d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 331779d72832SMatthew Ahrens DB_DNODE_EXIT(dbi); 331879d72832SMatthew Ahrens } 331979d72832SMatthew Ahrens 3320c717a561Smaybee static void 3321c717a561Smaybee dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 3322fa9e4066Sahrens { 3323c717a561Smaybee /* ASSERT(dmu_tx_is_syncing(tx) */ 3324c717a561Smaybee ASSERT(MUTEX_HELD(&db->db_mtx)); 3325c717a561Smaybee 3326c717a561Smaybee if (db->db_blkptr != NULL) 3327c717a561Smaybee return; 3328c717a561Smaybee 33290a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 333054811da5SToomas Soome db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 33310a586ceaSMark Shellenbaum BP_ZERO(db->db_blkptr); 33320a586ceaSMark Shellenbaum return; 33330a586ceaSMark Shellenbaum } 3334c717a561Smaybee if (db->db_level == dn->dn_phys->dn_nlevels-1) { 3335c717a561Smaybee /* 3336c717a561Smaybee * This buffer was allocated at a time when there was 3337c717a561Smaybee * no available blkptrs from the dnode, or it was 3338c717a561Smaybee * inappropriate to hook it in (i.e., nlevels mis-match). 3339c717a561Smaybee */ 3340c717a561Smaybee ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 3341c717a561Smaybee ASSERT(db->db_parent == NULL); 3342c717a561Smaybee db->db_parent = dn->dn_dbuf; 3343c717a561Smaybee db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 3344c717a561Smaybee DBUF_VERIFY(db); 3345c717a561Smaybee } else { 3346c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 3347c717a561Smaybee int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3348c717a561Smaybee 3349c717a561Smaybee ASSERT(dn->dn_phys->dn_nlevels > 1); 3350c717a561Smaybee if (parent == NULL) { 3351c717a561Smaybee mutex_exit(&db->db_mtx); 3352c717a561Smaybee rw_enter(&dn->dn_struct_rwlock, RW_READER); 3353a2cdcdd2SPaul Dagnelie parent = dbuf_hold_level(dn, db->db_level + 1, 3354a2cdcdd2SPaul Dagnelie db->db_blkid >> epbs, db); 3355c717a561Smaybee rw_exit(&dn->dn_struct_rwlock); 3356c717a561Smaybee mutex_enter(&db->db_mtx); 3357c717a561Smaybee db->db_parent = parent; 3358c717a561Smaybee } 3359c717a561Smaybee db->db_blkptr = (blkptr_t *)parent->db.db_data + 3360c717a561Smaybee (db->db_blkid & ((1ULL << epbs) - 1)); 3361c717a561Smaybee DBUF_VERIFY(db); 3362c717a561Smaybee } 3363c717a561Smaybee } 3364c717a561Smaybee 3365eb633035STom Caputi /* 3366eb633035STom Caputi * When syncing out blocks of dnodes, adjust the block to deal with 3367eb633035STom Caputi * encryption. Normally, we make sure the block is decrypted before writing 3368eb633035STom Caputi * it. If we have crypt params, then we are writing a raw (encrypted) block, 3369eb633035STom Caputi * from a raw receive. In this case, set the ARC buf's crypt params so 3370eb633035STom Caputi * that the BP will be filled with the correct byteorder, salt, iv, and mac. 3371eb633035STom Caputi * 3372eb633035STom Caputi * XXX we should handle decrypting the dnode block in dbuf_dirty(). 3373eb633035STom Caputi */ 3374eb633035STom Caputi static void 3375eb633035STom Caputi dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) 3376eb633035STom Caputi { 3377eb633035STom Caputi int err; 3378eb633035STom Caputi dmu_buf_impl_t *db = dr->dr_dbuf; 3379eb633035STom Caputi 3380eb633035STom Caputi ASSERT(MUTEX_HELD(&db->db_mtx)); 3381eb633035STom Caputi ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 3382eb633035STom Caputi ASSERT3U(db->db_level, ==, 0); 3383eb633035STom Caputi 3384eb633035STom Caputi if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { 3385eb633035STom Caputi zbookmark_phys_t zb; 3386eb633035STom Caputi 3387eb633035STom Caputi /* 3388eb633035STom Caputi * Unfortunately, there is currently no mechanism for 3389eb633035STom Caputi * syncing context to handle decryption errors. An error 3390eb633035STom Caputi * here is only possible if an attacker maliciously 3391eb633035STom Caputi * changed a dnode block and updated the associated 3392eb633035STom Caputi * checksums going up the block tree. 3393eb633035STom Caputi */ 3394eb633035STom Caputi SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 3395eb633035STom Caputi db->db.db_object, db->db_level, db->db_blkid); 3396eb633035STom Caputi err = arc_untransform(db->db_buf, db->db_objset->os_spa, 3397eb633035STom Caputi &zb, B_TRUE); 3398eb633035STom Caputi if (err) 3399eb633035STom Caputi panic("Invalid dnode block MAC"); 3400eb633035STom Caputi } else if (dr->dt.dl.dr_has_raw_params) { 3401eb633035STom Caputi (void) arc_release(dr->dt.dl.dr_data, db); 3402eb633035STom Caputi arc_convert_to_raw(dr->dt.dl.dr_data, 3403eb633035STom Caputi dmu_objset_id(db->db_objset), 3404eb633035STom Caputi dr->dt.dl.dr_byteorder, DMU_OT_DNODE, 3405eb633035STom Caputi dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac); 3406eb633035STom Caputi } 3407eb633035STom Caputi } 3408eb633035STom Caputi 3409c717a561Smaybee static void 3410c717a561Smaybee dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3411c717a561Smaybee { 3412c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 3413744947dcSTom Erickson dnode_t *dn; 3414c717a561Smaybee zio_t *zio; 3415c717a561Smaybee 3416c717a561Smaybee ASSERT(dmu_tx_is_syncing(tx)); 3417c717a561Smaybee 3418c717a561Smaybee dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3419c717a561Smaybee 3420c717a561Smaybee mutex_enter(&db->db_mtx); 3421c717a561Smaybee 3422c717a561Smaybee ASSERT(db->db_level > 0); 3423c717a561Smaybee DBUF_VERIFY(db); 3424c717a561Smaybee 34253e30c24aSWill Andrews /* Read the block if it hasn't been read yet. */ 3426c717a561Smaybee if (db->db_buf == NULL) { 3427c717a561Smaybee mutex_exit(&db->db_mtx); 3428c717a561Smaybee (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 3429c717a561Smaybee mutex_enter(&db->db_mtx); 3430c717a561Smaybee } 3431c717a561Smaybee ASSERT3U(db->db_state, ==, DB_CACHED); 3432c717a561Smaybee ASSERT(db->db_buf != NULL); 3433c717a561Smaybee 3434744947dcSTom Erickson DB_DNODE_ENTER(db); 3435744947dcSTom Erickson dn = DB_DNODE(db); 34363e30c24aSWill Andrews /* Indirect block size must match what the dnode thinks it is. */ 3437744947dcSTom Erickson ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3438c717a561Smaybee dbuf_check_blkptr(dn, db); 3439744947dcSTom Erickson DB_DNODE_EXIT(db); 3440c717a561Smaybee 34413e30c24aSWill Andrews /* Provide the pending dirty record to child dbufs */ 3442c717a561Smaybee db->db_data_pending = dr; 3443c717a561Smaybee 3444af2c4821Smaybee mutex_exit(&db->db_mtx); 34455cabbc6bSPrashanth Sreenivasa 3446088f3894Sahrens dbuf_write(dr, db->db_buf, tx); 3447c717a561Smaybee 3448c717a561Smaybee zio = dr->dr_zio; 3449c717a561Smaybee mutex_enter(&dr->dt.di.dr_mtx); 345046e1baa6SMatthew Ahrens dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3451c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3452c717a561Smaybee mutex_exit(&dr->dt.di.dr_mtx); 3453c717a561Smaybee zio_nowait(zio); 3454c717a561Smaybee } 3455c717a561Smaybee 3456c717a561Smaybee static void 3457c717a561Smaybee dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3458c717a561Smaybee { 3459c717a561Smaybee arc_buf_t **datap = &dr->dt.dl.dr_data; 3460c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 3461744947dcSTom Erickson dnode_t *dn; 3462744947dcSTom Erickson objset_t *os; 3463c717a561Smaybee uint64_t txg = tx->tx_txg; 3464fa9e4066Sahrens 3465fa9e4066Sahrens ASSERT(dmu_tx_is_syncing(tx)); 3466fa9e4066Sahrens 3467fa9e4066Sahrens dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3468fa9e4066Sahrens 3469fa9e4066Sahrens mutex_enter(&db->db_mtx); 3470fa9e4066Sahrens /* 3471fa9e4066Sahrens * To be synced, we must be dirtied. But we 3472fa9e4066Sahrens * might have been freed after the dirty. 3473fa9e4066Sahrens */ 3474fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 3475fa9e4066Sahrens /* This buffer has been freed since it was dirtied */ 3476fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 3477fa9e4066Sahrens } else if (db->db_state == DB_FILL) { 3478fa9e4066Sahrens /* This buffer was freed and is now being re-filled */ 3479c717a561Smaybee ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3480fa9e4066Sahrens } else { 348182c9918fSTim Haley ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3482fa9e4066Sahrens } 34839c9dc39aSek DBUF_VERIFY(db); 3484fa9e4066Sahrens 3485744947dcSTom Erickson DB_DNODE_ENTER(db); 3486744947dcSTom Erickson dn = DB_DNODE(db); 3487744947dcSTom Erickson 34880a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 34890a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 34900a586ceaSMark Shellenbaum dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 34910a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 34920a586ceaSMark Shellenbaum } 34930a586ceaSMark Shellenbaum 3494fa9e4066Sahrens /* 3495c717a561Smaybee * If this is a bonus buffer, simply copy the bonus data into the 3496c717a561Smaybee * dnode. It will be written out when the dnode is synced (and it 3497c717a561Smaybee * will be synced, since it must have been dirty for dbuf_sync to 3498c717a561Smaybee * be called). 3499fa9e4066Sahrens */ 35000a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 3501c717a561Smaybee dbuf_dirty_record_t **drp; 35021934e92fSmaybee 3503ea8dc4b6Seschrock ASSERT(*datap != NULL); 3504fb09f5aaSMadhav Suresh ASSERT0(db->db_level); 350554811da5SToomas Soome ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 350654811da5SToomas Soome DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 350754811da5SToomas Soome bcopy(*datap, DN_BONUS(dn->dn_phys), 350854811da5SToomas Soome DN_MAX_BONUS_LEN(dn->dn_phys)); 3509744947dcSTom Erickson DB_DNODE_EXIT(db); 3510744947dcSTom Erickson 35110e8c6158Smaybee if (*datap != db->db.db_data) { 351254811da5SToomas Soome int slots = DB_DNODE(db)->dn_num_slots; 351354811da5SToomas Soome int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 351454811da5SToomas Soome zio_buf_free(*datap, bonuslen); 351554811da5SToomas Soome arc_space_return(bonuslen, ARC_SPACE_BONUS); 35160e8c6158Smaybee } 3517ea8dc4b6Seschrock db->db_data_pending = NULL; 3518c717a561Smaybee drp = &db->db_last_dirty; 3519c717a561Smaybee while (*drp != dr) 3520c717a561Smaybee drp = &(*drp)->dr_next; 352117f17c2dSbonwick ASSERT(dr->dr_next == NULL); 3522b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 352317f17c2dSbonwick *drp = dr->dr_next; 3524c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3525ea8dc4b6Seschrock ASSERT(db->db_dirtycnt > 0); 3526ea8dc4b6Seschrock db->db_dirtycnt -= 1; 3527c2919acbSMatthew Ahrens dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE); 3528ea8dc4b6Seschrock return; 3529ea8dc4b6Seschrock } 3530ea8dc4b6Seschrock 3531744947dcSTom Erickson os = dn->dn_objset; 3532744947dcSTom Erickson 3533f82bfe17Sgw /* 3534f82bfe17Sgw * This function may have dropped the db_mtx lock allowing a dmu_sync 3535f82bfe17Sgw * operation to sneak in. As a result, we need to ensure that we 3536f82bfe17Sgw * don't check the dr_override_state until we have returned from 3537f82bfe17Sgw * dbuf_check_blkptr. 3538f82bfe17Sgw */ 3539f82bfe17Sgw dbuf_check_blkptr(dn, db); 3540f82bfe17Sgw 3541c717a561Smaybee /* 3542744947dcSTom Erickson * If this buffer is in the middle of an immediate write, 3543c717a561Smaybee * wait for the synchronous IO to complete. 3544c717a561Smaybee */ 3545c717a561Smaybee while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3546c717a561Smaybee ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3547c717a561Smaybee cv_wait(&db->db_changed, &db->db_mtx); 3548c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3549c717a561Smaybee } 3550c5c6ffa0Smaybee 3551eb633035STom Caputi /* 3552eb633035STom Caputi * If this is a dnode block, ensure it is appropriately encrypted 3553eb633035STom Caputi * or decrypted, depending on what we are writing to it this txg. 3554eb633035STom Caputi */ 3555eb633035STom Caputi if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) 3556eb633035STom Caputi dbuf_prepare_encrypted_dnode_leaf(dr); 3557eb633035STom Caputi 3558ab69d62fSMatthew Ahrens if (db->db_state != DB_NOFILL && 3559ab69d62fSMatthew Ahrens dn->dn_object != DMU_META_DNODE_OBJECT && 3560e914ace2STim Schumacher zfs_refcount_count(&db->db_holds) > 1 && 3561b24ab676SJeff Bonwick dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3562ab69d62fSMatthew Ahrens *datap == db->db_buf) { 3563ab69d62fSMatthew Ahrens /* 3564ab69d62fSMatthew Ahrens * If this buffer is currently "in use" (i.e., there 3565ab69d62fSMatthew Ahrens * are active holds and db_data still references it), 3566ab69d62fSMatthew Ahrens * then make a copy before we start the write so that 3567ab69d62fSMatthew Ahrens * any modifications from the open txg will not leak 3568ab69d62fSMatthew Ahrens * into this write. 3569ab69d62fSMatthew Ahrens * 3570ab69d62fSMatthew Ahrens * NOTE: this copy does not need to be made for 3571ab69d62fSMatthew Ahrens * objects only modified in the syncing context (e.g. 3572ab69d62fSMatthew Ahrens * DNONE_DNODE blocks). 3573ab69d62fSMatthew Ahrens */ 35745602294fSDan Kimmel int psize = arc_buf_size(*datap); 3575eb633035STom Caputi int lsize = arc_buf_lsize(*datap); 3576ab69d62fSMatthew Ahrens arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 35775602294fSDan Kimmel enum zio_compress compress_type = arc_get_compression(*datap); 35785602294fSDan Kimmel 3579eb633035STom Caputi if (arc_is_encrypted(*datap)) { 3580eb633035STom Caputi boolean_t byteorder; 3581eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 3582eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 3583eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 3584eb633035STom Caputi 3585eb633035STom Caputi arc_get_raw_params(*datap, &byteorder, salt, iv, mac); 3586eb633035STom Caputi *datap = arc_alloc_raw_buf(os->os_spa, db, 3587eb633035STom Caputi dmu_objset_id(os), byteorder, salt, iv, mac, 3588eb633035STom Caputi dn->dn_type, psize, lsize, compress_type); 3589eb633035STom Caputi } else if (compress_type != ZIO_COMPRESS_OFF) { 35905602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 35915602294fSDan Kimmel *datap = arc_alloc_compressed_buf(os->os_spa, db, 35925602294fSDan Kimmel psize, lsize, compress_type); 3593eb633035STom Caputi } else { 3594eb633035STom Caputi *datap = arc_alloc_buf(os->os_spa, db, type, psize); 35955602294fSDan Kimmel } 35965602294fSDan Kimmel bcopy(db->db.db_data, (*datap)->b_data, psize); 359782c9918fSTim Haley } 3598c717a561Smaybee db->db_data_pending = dr; 3599fa9e4066Sahrens 3600c717a561Smaybee mutex_exit(&db->db_mtx); 3601fa9e4066Sahrens 3602088f3894Sahrens dbuf_write(dr, *datap, tx); 3603fa9e4066Sahrens 3604c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 3605744947dcSTom Erickson if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3606c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3607744947dcSTom Erickson DB_DNODE_EXIT(db); 3608744947dcSTom Erickson } else { 3609744947dcSTom Erickson /* 3610744947dcSTom Erickson * Although zio_nowait() does not "wait for an IO", it does 3611744947dcSTom Erickson * initiate the IO. If this is an empty write it seems plausible 3612744947dcSTom Erickson * that the IO could actually be completed before the nowait 3613744947dcSTom Erickson * returns. We need to DB_DNODE_EXIT() first in case 3614744947dcSTom Erickson * zio_nowait() invalidates the dbuf. 3615744947dcSTom Erickson */ 3616744947dcSTom Erickson DB_DNODE_EXIT(db); 3617c717a561Smaybee zio_nowait(dr->dr_zio); 3618744947dcSTom Erickson } 3619c717a561Smaybee } 362023b11526Smaybee 3621c717a561Smaybee void 362246e1baa6SMatthew Ahrens dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3623c717a561Smaybee { 3624c717a561Smaybee dbuf_dirty_record_t *dr; 3625c717a561Smaybee 3626c717a561Smaybee while (dr = list_head(list)) { 3627c717a561Smaybee if (dr->dr_zio != NULL) { 3628c717a561Smaybee /* 3629c717a561Smaybee * If we find an already initialized zio then we 3630c717a561Smaybee * are processing the meta-dnode, and we have finished. 3631c717a561Smaybee * The dbufs for all dnodes are put back on the list 3632c717a561Smaybee * during processing, so that we can zio_wait() 3633c717a561Smaybee * these IOs after initiating all child IOs. 3634c717a561Smaybee */ 3635c717a561Smaybee ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3636c717a561Smaybee DMU_META_DNODE_OBJECT); 3637c717a561Smaybee break; 363823b11526Smaybee } 363946e1baa6SMatthew Ahrens if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 364046e1baa6SMatthew Ahrens dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 364146e1baa6SMatthew Ahrens VERIFY3U(dr->dr_dbuf->db_level, ==, level); 364246e1baa6SMatthew Ahrens } 3643c717a561Smaybee list_remove(list, dr); 3644c717a561Smaybee if (dr->dr_dbuf->db_level > 0) 3645c717a561Smaybee dbuf_sync_indirect(dr, tx); 3646c717a561Smaybee else 3647c717a561Smaybee dbuf_sync_leaf(dr, tx); 364823b11526Smaybee } 3649c717a561Smaybee } 365023b11526Smaybee 3651fa9e4066Sahrens /* ARGSUSED */ 3652fa9e4066Sahrens static void 3653c717a561Smaybee dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3654fa9e4066Sahrens { 3655fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 3656744947dcSTom Erickson dnode_t *dn; 3657e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3658c717a561Smaybee blkptr_t *bp_orig = &zio->io_bp_orig; 3659b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 3660b24ab676SJeff Bonwick int64_t delta; 3661fa9e4066Sahrens uint64_t fill = 0; 3662b24ab676SJeff Bonwick int i; 3663fa9e4066Sahrens 366411ceac77SAlex Reece ASSERT3P(db->db_blkptr, !=, NULL); 366511ceac77SAlex Reece ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3666e14bb325SJeff Bonwick 3667744947dcSTom Erickson DB_DNODE_ENTER(db); 3668744947dcSTom Erickson dn = DB_DNODE(db); 3669b24ab676SJeff Bonwick delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3670b24ab676SJeff Bonwick dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3671b24ab676SJeff Bonwick zio->io_prev_space_delta = delta; 3672fa9e4066Sahrens 367343466aaeSMax Grossman if (bp->blk_birth != 0) { 367443466aaeSMax Grossman ASSERT((db->db_blkid != DMU_SPILL_BLKID && 367543466aaeSMax Grossman BP_GET_TYPE(bp) == dn->dn_type) || 367643466aaeSMax Grossman (db->db_blkid == DMU_SPILL_BLKID && 36775d7b4d43SMatthew Ahrens BP_GET_TYPE(bp) == dn->dn_bonustype) || 36785d7b4d43SMatthew Ahrens BP_IS_EMBEDDED(bp)); 367943466aaeSMax Grossman ASSERT(BP_GET_LEVEL(bp) == db->db_level); 3680c717a561Smaybee } 3681c5c6ffa0Smaybee 3682c717a561Smaybee mutex_enter(&db->db_mtx); 3683fa9e4066Sahrens 36840a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 36850a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 36860a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 368711ceac77SAlex Reece ASSERT(!(BP_IS_HOLE(bp)) && 368854811da5SToomas Soome db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 36890a586ceaSMark Shellenbaum } 36900a586ceaSMark Shellenbaum #endif 36910a586ceaSMark Shellenbaum 3692fa9e4066Sahrens if (db->db_level == 0) { 3693fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 36940a586ceaSMark Shellenbaum if (db->db_blkid > dn->dn_phys->dn_maxblkid && 3695eb633035STom Caputi db->db_blkid != DMU_SPILL_BLKID) { 3696eb633035STom Caputi ASSERT0(db->db_objset->os_raw_receive); 3697fa9e4066Sahrens dn->dn_phys->dn_maxblkid = db->db_blkid; 3698eb633035STom Caputi } 3699fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 3700fa9e4066Sahrens 3701fa9e4066Sahrens if (dn->dn_type == DMU_OT_DNODE) { 370254811da5SToomas Soome i = 0; 370354811da5SToomas Soome while (i < db->db.db_size) { 370454811da5SToomas Soome dnode_phys_t *dnp = 370554811da5SToomas Soome (void *)(((char *)db->db.db_data) + i); 370654811da5SToomas Soome 370754811da5SToomas Soome i += DNODE_MIN_SIZE; 370854811da5SToomas Soome if (dnp->dn_type != DMU_OT_NONE) { 3709fa9e4066Sahrens fill++; 371054811da5SToomas Soome i += dnp->dn_extra_slots * 371154811da5SToomas Soome DNODE_MIN_SIZE; 371254811da5SToomas Soome } 3713fa9e4066Sahrens } 3714fa9e4066Sahrens } else { 371543466aaeSMax Grossman if (BP_IS_HOLE(bp)) { 371643466aaeSMax Grossman fill = 0; 371743466aaeSMax Grossman } else { 371843466aaeSMax Grossman fill = 1; 371943466aaeSMax Grossman } 3720fa9e4066Sahrens } 3721fa9e4066Sahrens } else { 3722e14bb325SJeff Bonwick blkptr_t *ibp = db->db.db_data; 3723fa9e4066Sahrens ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3724e14bb325SJeff Bonwick for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3725e14bb325SJeff Bonwick if (BP_IS_HOLE(ibp)) 3726fa9e4066Sahrens continue; 37275d7b4d43SMatthew Ahrens fill += BP_GET_FILL(ibp); 3728fa9e4066Sahrens } 3729fa9e4066Sahrens } 3730744947dcSTom Erickson DB_DNODE_EXIT(db); 3731fa9e4066Sahrens 37325d7b4d43SMatthew Ahrens if (!BP_IS_EMBEDDED(bp)) 3733eb633035STom Caputi BP_SET_FILL(bp, fill); 3734c717a561Smaybee 3735c717a561Smaybee mutex_exit(&db->db_mtx); 373611ceac77SAlex Reece 3737*9704bf7fSPaul Dagnelie db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG); 373811ceac77SAlex Reece *db->db_blkptr = *bp; 3739*9704bf7fSPaul Dagnelie dmu_buf_unlock_parent(db, dblt, FTAG); 3740c717a561Smaybee } 3741fa9e4066Sahrens 37428df0bcf0SPaul Dagnelie /* ARGSUSED */ 37438df0bcf0SPaul Dagnelie /* 37448df0bcf0SPaul Dagnelie * This function gets called just prior to running through the compression 37458df0bcf0SPaul Dagnelie * stage of the zio pipeline. If we're an indirect block comprised of only 37468df0bcf0SPaul Dagnelie * holes, then we want this indirect to be compressed away to a hole. In 37478df0bcf0SPaul Dagnelie * order to do that we must zero out any information about the holes that 37488df0bcf0SPaul Dagnelie * this indirect points to prior to before we try to compress it. 37498df0bcf0SPaul Dagnelie */ 37508df0bcf0SPaul Dagnelie static void 37518df0bcf0SPaul Dagnelie dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 37528df0bcf0SPaul Dagnelie { 37538df0bcf0SPaul Dagnelie dmu_buf_impl_t *db = vdb; 37548df0bcf0SPaul Dagnelie dnode_t *dn; 37558df0bcf0SPaul Dagnelie blkptr_t *bp; 37561a01181fSGeorge Wilson unsigned int epbs, i; 37578df0bcf0SPaul Dagnelie 37588df0bcf0SPaul Dagnelie ASSERT3U(db->db_level, >, 0); 37598df0bcf0SPaul Dagnelie DB_DNODE_ENTER(db); 37608df0bcf0SPaul Dagnelie dn = DB_DNODE(db); 37618df0bcf0SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 37621a01181fSGeorge Wilson ASSERT3U(epbs, <, 31); 37638df0bcf0SPaul Dagnelie 37648df0bcf0SPaul Dagnelie /* Determine if all our children are holes */ 37658df0bcf0SPaul Dagnelie for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 37668df0bcf0SPaul Dagnelie if (!BP_IS_HOLE(bp)) 37678df0bcf0SPaul Dagnelie break; 37688df0bcf0SPaul Dagnelie } 37698df0bcf0SPaul Dagnelie 37708df0bcf0SPaul Dagnelie /* 37718df0bcf0SPaul Dagnelie * If all the children are holes, then zero them all out so that 37728df0bcf0SPaul Dagnelie * we may get compressed away. 37738df0bcf0SPaul Dagnelie */ 37748df0bcf0SPaul Dagnelie if (i == 1 << epbs) { 37751a01181fSGeorge Wilson /* 37761a01181fSGeorge Wilson * We only found holes. Grab the rwlock to prevent 37771a01181fSGeorge Wilson * anybody from reading the blocks we're about to 37781a01181fSGeorge Wilson * zero out. 37791a01181fSGeorge Wilson */ 3780*9704bf7fSPaul Dagnelie rw_enter(&db->db_rwlock, RW_WRITER); 37818df0bcf0SPaul Dagnelie bzero(db->db.db_data, db->db.db_size); 3782*9704bf7fSPaul Dagnelie rw_exit(&db->db_rwlock); 37838df0bcf0SPaul Dagnelie } 37848df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 37858df0bcf0SPaul Dagnelie } 37868df0bcf0SPaul Dagnelie 378769962b56SMatthew Ahrens /* 378869962b56SMatthew Ahrens * The SPA will call this callback several times for each zio - once 378969962b56SMatthew Ahrens * for every physical child i/o (zio->io_phys_children times). This 379069962b56SMatthew Ahrens * allows the DMU to monitor the progress of each logical i/o. For example, 379169962b56SMatthew Ahrens * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 379269962b56SMatthew Ahrens * block. There may be a long delay before all copies/fragments are completed, 379369962b56SMatthew Ahrens * so this callback allows us to retire dirty space gradually, as the physical 379469962b56SMatthew Ahrens * i/os complete. 379569962b56SMatthew Ahrens */ 379669962b56SMatthew Ahrens /* ARGSUSED */ 379769962b56SMatthew Ahrens static void 379869962b56SMatthew Ahrens dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 379969962b56SMatthew Ahrens { 380069962b56SMatthew Ahrens dmu_buf_impl_t *db = arg; 380169962b56SMatthew Ahrens objset_t *os = db->db_objset; 380269962b56SMatthew Ahrens dsl_pool_t *dp = dmu_objset_pool(os); 380369962b56SMatthew Ahrens dbuf_dirty_record_t *dr; 380469962b56SMatthew Ahrens int delta = 0; 380569962b56SMatthew Ahrens 380669962b56SMatthew Ahrens dr = db->db_data_pending; 380769962b56SMatthew Ahrens ASSERT3U(dr->dr_txg, ==, zio->io_txg); 380869962b56SMatthew Ahrens 380969962b56SMatthew Ahrens /* 381069962b56SMatthew Ahrens * The callback will be called io_phys_children times. Retire one 381169962b56SMatthew Ahrens * portion of our dirty space each time we are called. Any rounding 381269962b56SMatthew Ahrens * error will be cleaned up by dsl_pool_sync()'s call to 381369962b56SMatthew Ahrens * dsl_pool_undirty_space(). 381469962b56SMatthew Ahrens */ 381569962b56SMatthew Ahrens delta = dr->dr_accounted / zio->io_phys_children; 381669962b56SMatthew Ahrens dsl_pool_undirty_space(dp, delta, zio->io_txg); 381769962b56SMatthew Ahrens } 381869962b56SMatthew Ahrens 3819c717a561Smaybee /* ARGSUSED */ 3820c717a561Smaybee static void 3821c717a561Smaybee dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3822c717a561Smaybee { 3823c717a561Smaybee dmu_buf_impl_t *db = vdb; 3824b24ab676SJeff Bonwick blkptr_t *bp_orig = &zio->io_bp_orig; 382543466aaeSMax Grossman blkptr_t *bp = db->db_blkptr; 382643466aaeSMax Grossman objset_t *os = db->db_objset; 382743466aaeSMax Grossman dmu_tx_t *tx = os->os_synctx; 3828c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 3829c717a561Smaybee 3830fb09f5aaSMadhav Suresh ASSERT0(zio->io_error); 3831b24ab676SJeff Bonwick ASSERT(db->db_blkptr == bp); 3832b24ab676SJeff Bonwick 383380901aeaSGeorge Wilson /* 383480901aeaSGeorge Wilson * For nopwrites and rewrites we ensure that the bp matches our 383580901aeaSGeorge Wilson * original and bypass all the accounting. 383680901aeaSGeorge Wilson */ 383780901aeaSGeorge Wilson if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3838b24ab676SJeff Bonwick ASSERT(BP_EQUAL(bp, bp_orig)); 3839b24ab676SJeff Bonwick } else { 384043466aaeSMax Grossman dsl_dataset_t *ds = os->os_dsl_dataset; 3841b24ab676SJeff Bonwick (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3842b24ab676SJeff Bonwick dsl_dataset_block_born(ds, bp, tx); 3843b24ab676SJeff Bonwick } 3844c717a561Smaybee 3845c717a561Smaybee mutex_enter(&db->db_mtx); 3846c717a561Smaybee 3847b24ab676SJeff Bonwick DBUF_VERIFY(db); 3848b24ab676SJeff Bonwick 3849c717a561Smaybee drp = &db->db_last_dirty; 385017f17c2dSbonwick while ((dr = *drp) != db->db_data_pending) 385117f17c2dSbonwick drp = &dr->dr_next; 385217f17c2dSbonwick ASSERT(!list_link_active(&dr->dr_dirty_node)); 3853b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 385417f17c2dSbonwick ASSERT(dr->dr_next == NULL); 385517f17c2dSbonwick *drp = dr->dr_next; 3856c717a561Smaybee 38570a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 38580a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 3859744947dcSTom Erickson dnode_t *dn; 3860744947dcSTom Erickson 3861744947dcSTom Erickson DB_DNODE_ENTER(db); 3862744947dcSTom Erickson dn = DB_DNODE(db); 38630a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 38640a586ceaSMark Shellenbaum ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 386554811da5SToomas Soome db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 3866744947dcSTom Erickson DB_DNODE_EXIT(db); 38670a586ceaSMark Shellenbaum } 38680a586ceaSMark Shellenbaum #endif 38690a586ceaSMark Shellenbaum 3870c717a561Smaybee if (db->db_level == 0) { 38710a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3872c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 387382c9918fSTim Haley if (db->db_state != DB_NOFILL) { 387482c9918fSTim Haley if (dr->dt.dl.dr_data != db->db_buf) 3875dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 387682c9918fSTim Haley } 3877c717a561Smaybee } else { 3878744947dcSTom Erickson dnode_t *dn; 3879744947dcSTom Erickson 3880744947dcSTom Erickson DB_DNODE_ENTER(db); 3881744947dcSTom Erickson dn = DB_DNODE(db); 3882c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 388343466aaeSMax Grossman ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3884c717a561Smaybee if (!BP_IS_HOLE(db->db_blkptr)) { 3885c717a561Smaybee int epbs = 3886c717a561Smaybee dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 388743466aaeSMax Grossman ASSERT3U(db->db_blkid, <=, 388843466aaeSMax Grossman dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3889c717a561Smaybee ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3890c717a561Smaybee db->db.db_size); 3891c717a561Smaybee } 3892744947dcSTom Erickson DB_DNODE_EXIT(db); 3893c25056deSgw mutex_destroy(&dr->dt.di.dr_mtx); 3894c25056deSgw list_destroy(&dr->dt.di.dr_children); 3895c717a561Smaybee } 3896c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3897fa9e4066Sahrens 3898fa9e4066Sahrens cv_broadcast(&db->db_changed); 3899fa9e4066Sahrens ASSERT(db->db_dirtycnt > 0); 3900fa9e4066Sahrens db->db_dirtycnt -= 1; 3901c717a561Smaybee db->db_data_pending = NULL; 3902c2919acbSMatthew Ahrens dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 3903b24ab676SJeff Bonwick } 3904b24ab676SJeff Bonwick 3905b24ab676SJeff Bonwick static void 3906b24ab676SJeff Bonwick dbuf_write_nofill_ready(zio_t *zio) 3907b24ab676SJeff Bonwick { 3908b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, zio->io_private); 3909b24ab676SJeff Bonwick } 3910b24ab676SJeff Bonwick 3911b24ab676SJeff Bonwick static void 3912b24ab676SJeff Bonwick dbuf_write_nofill_done(zio_t *zio) 3913b24ab676SJeff Bonwick { 3914b24ab676SJeff Bonwick dbuf_write_done(zio, NULL, zio->io_private); 3915b24ab676SJeff Bonwick } 3916b24ab676SJeff Bonwick 3917b24ab676SJeff Bonwick static void 3918b24ab676SJeff Bonwick dbuf_write_override_ready(zio_t *zio) 3919b24ab676SJeff Bonwick { 3920b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3921b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3922b24ab676SJeff Bonwick 3923b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, db); 3924b24ab676SJeff Bonwick } 3925b24ab676SJeff Bonwick 3926b24ab676SJeff Bonwick static void 3927b24ab676SJeff Bonwick dbuf_write_override_done(zio_t *zio) 3928b24ab676SJeff Bonwick { 3929b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3930b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3931b24ab676SJeff Bonwick blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3932b24ab676SJeff Bonwick 3933b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3934b24ab676SJeff Bonwick if (!BP_EQUAL(zio->io_bp, obp)) { 3935b24ab676SJeff Bonwick if (!BP_IS_HOLE(obp)) 3936b24ab676SJeff Bonwick dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3937b24ab676SJeff Bonwick arc_release(dr->dt.dl.dr_data, db); 3938b24ab676SJeff Bonwick } 3939fa9e4066Sahrens mutex_exit(&db->db_mtx); 39404ee0199eSRobert Mustacchi dbuf_write_done(zio, NULL, db); 3941770499e1SDan Kimmel 3942770499e1SDan Kimmel if (zio->io_abd != NULL) 3943770499e1SDan Kimmel abd_put(zio->io_abd); 3944b24ab676SJeff Bonwick } 3945b24ab676SJeff Bonwick 39465cabbc6bSPrashanth Sreenivasa typedef struct dbuf_remap_impl_callback_arg { 39475cabbc6bSPrashanth Sreenivasa objset_t *drica_os; 39485cabbc6bSPrashanth Sreenivasa uint64_t drica_blk_birth; 39495cabbc6bSPrashanth Sreenivasa dmu_tx_t *drica_tx; 39505cabbc6bSPrashanth Sreenivasa } dbuf_remap_impl_callback_arg_t; 39515cabbc6bSPrashanth Sreenivasa 39525cabbc6bSPrashanth Sreenivasa static void 39535cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 39545cabbc6bSPrashanth Sreenivasa void *arg) 39555cabbc6bSPrashanth Sreenivasa { 39565cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t *drica = arg; 39575cabbc6bSPrashanth Sreenivasa objset_t *os = drica->drica_os; 39585cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(os); 39595cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = drica->drica_tx; 39605cabbc6bSPrashanth Sreenivasa 39615cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 39625cabbc6bSPrashanth Sreenivasa 39635cabbc6bSPrashanth Sreenivasa if (os == spa_meta_objset(spa)) { 39645cabbc6bSPrashanth Sreenivasa spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 39655cabbc6bSPrashanth Sreenivasa } else { 39665cabbc6bSPrashanth Sreenivasa dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 39675cabbc6bSPrashanth Sreenivasa size, drica->drica_blk_birth, tx); 39685cabbc6bSPrashanth Sreenivasa } 39695cabbc6bSPrashanth Sreenivasa } 39705cabbc6bSPrashanth Sreenivasa 39715cabbc6bSPrashanth Sreenivasa static void 3972*9704bf7fSPaul Dagnelie dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) 39735cabbc6bSPrashanth Sreenivasa { 39745cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = *bp; 39755cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 39765cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t drica; 39775cabbc6bSPrashanth Sreenivasa 39785cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 39795cabbc6bSPrashanth Sreenivasa 39805cabbc6bSPrashanth Sreenivasa drica.drica_os = dn->dn_objset; 39815cabbc6bSPrashanth Sreenivasa drica.drica_blk_birth = bp->blk_birth; 39825cabbc6bSPrashanth Sreenivasa drica.drica_tx = tx; 39835cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 39845cabbc6bSPrashanth Sreenivasa &drica)) { 39855cabbc6bSPrashanth Sreenivasa /* 3986*9704bf7fSPaul Dagnelie * The db_rwlock prevents dbuf_read_impl() from 39875cabbc6bSPrashanth Sreenivasa * dereferencing the BP while we are changing it. To 39885cabbc6bSPrashanth Sreenivasa * avoid lock contention, only grab it when we are actually 39895cabbc6bSPrashanth Sreenivasa * changing the BP. 39905cabbc6bSPrashanth Sreenivasa */ 3991*9704bf7fSPaul Dagnelie if (rw != NULL) 3992*9704bf7fSPaul Dagnelie rw_enter(rw, RW_WRITER); 39935cabbc6bSPrashanth Sreenivasa *bp = bp_copy; 3994*9704bf7fSPaul Dagnelie if (rw != NULL) 3995*9704bf7fSPaul Dagnelie rw_exit(rw); 39965cabbc6bSPrashanth Sreenivasa } 39975cabbc6bSPrashanth Sreenivasa } 39985cabbc6bSPrashanth Sreenivasa 39995cabbc6bSPrashanth Sreenivasa /* 40005cabbc6bSPrashanth Sreenivasa * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 40015cabbc6bSPrashanth Sreenivasa * to remap a copy of every bp in the dbuf. 40025cabbc6bSPrashanth Sreenivasa */ 40035cabbc6bSPrashanth Sreenivasa boolean_t 40045cabbc6bSPrashanth Sreenivasa dbuf_can_remap(const dmu_buf_impl_t *db) 40055cabbc6bSPrashanth Sreenivasa { 40065cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 40075cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 40085cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 40095cabbc6bSPrashanth Sreenivasa 40105cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_level, >, 0); 40115cabbc6bSPrashanth Sreenivasa ASSERT3S(db->db_state, ==, DB_CACHED); 40125cabbc6bSPrashanth Sreenivasa 40135cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 40145cabbc6bSPrashanth Sreenivasa 40155cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 40165cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 40175cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = bp[i]; 40185cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 40195cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 40205cabbc6bSPrashanth Sreenivasa break; 40215cabbc6bSPrashanth Sreenivasa } 40225cabbc6bSPrashanth Sreenivasa } 40235cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 40245cabbc6bSPrashanth Sreenivasa 40255cabbc6bSPrashanth Sreenivasa return (ret); 40265cabbc6bSPrashanth Sreenivasa } 40275cabbc6bSPrashanth Sreenivasa 40285cabbc6bSPrashanth Sreenivasa boolean_t 40295cabbc6bSPrashanth Sreenivasa dnode_needs_remap(const dnode_t *dn) 40305cabbc6bSPrashanth Sreenivasa { 40315cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 40325cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 40335cabbc6bSPrashanth Sreenivasa 40345cabbc6bSPrashanth Sreenivasa if (dn->dn_phys->dn_nlevels == 0) { 40355cabbc6bSPrashanth Sreenivasa return (B_FALSE); 40365cabbc6bSPrashanth Sreenivasa } 40375cabbc6bSPrashanth Sreenivasa 40385cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 40395cabbc6bSPrashanth Sreenivasa 40405cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 40415cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 40425cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 40435cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 40445cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 40455cabbc6bSPrashanth Sreenivasa break; 40465cabbc6bSPrashanth Sreenivasa } 40475cabbc6bSPrashanth Sreenivasa } 40485cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 40495cabbc6bSPrashanth Sreenivasa 40505cabbc6bSPrashanth Sreenivasa return (ret); 40515cabbc6bSPrashanth Sreenivasa } 40525cabbc6bSPrashanth Sreenivasa 40535cabbc6bSPrashanth Sreenivasa /* 40545cabbc6bSPrashanth Sreenivasa * Remap any existing BP's to concrete vdevs, if possible. 40555cabbc6bSPrashanth Sreenivasa */ 40565cabbc6bSPrashanth Sreenivasa static void 40575cabbc6bSPrashanth Sreenivasa dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 40585cabbc6bSPrashanth Sreenivasa { 40595cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 40605cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 40615cabbc6bSPrashanth Sreenivasa 40625cabbc6bSPrashanth Sreenivasa if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 40635cabbc6bSPrashanth Sreenivasa return; 40645cabbc6bSPrashanth Sreenivasa 40655cabbc6bSPrashanth Sreenivasa if (db->db_level > 0) { 40665cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 40675cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 4068*9704bf7fSPaul Dagnelie dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx); 40695cabbc6bSPrashanth Sreenivasa } 40705cabbc6bSPrashanth Sreenivasa } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 40715cabbc6bSPrashanth Sreenivasa dnode_phys_t *dnp = db->db.db_data; 40725cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 40735cabbc6bSPrashanth Sreenivasa DMU_OT_DNODE); 40745cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 40755cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 4076*9704bf7fSPaul Dagnelie krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL : 4077*9704bf7fSPaul Dagnelie &dn->dn_dbuf->db_rwlock); 4078*9704bf7fSPaul Dagnelie dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock, 4079*9704bf7fSPaul Dagnelie tx); 40805cabbc6bSPrashanth Sreenivasa } 40815cabbc6bSPrashanth Sreenivasa } 40825cabbc6bSPrashanth Sreenivasa } 40835cabbc6bSPrashanth Sreenivasa } 40845cabbc6bSPrashanth Sreenivasa 40855cabbc6bSPrashanth Sreenivasa 40863e30c24aSWill Andrews /* Issue I/O to commit a dirty buffer to disk. */ 4087b24ab676SJeff Bonwick static void 4088b24ab676SJeff Bonwick dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 4089b24ab676SJeff Bonwick { 4090b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 4091744947dcSTom Erickson dnode_t *dn; 4092744947dcSTom Erickson objset_t *os; 4093b24ab676SJeff Bonwick dmu_buf_impl_t *parent = db->db_parent; 4094b24ab676SJeff Bonwick uint64_t txg = tx->tx_txg; 40957802d7bfSMatthew Ahrens zbookmark_phys_t zb; 4096b24ab676SJeff Bonwick zio_prop_t zp; 4097b24ab676SJeff Bonwick zio_t *zio; 40980a586ceaSMark Shellenbaum int wp_flag = 0; 4099b24ab676SJeff Bonwick 410011ceac77SAlex Reece ASSERT(dmu_tx_is_syncing(tx)); 410111ceac77SAlex Reece 4102744947dcSTom Erickson DB_DNODE_ENTER(db); 4103744947dcSTom Erickson dn = DB_DNODE(db); 4104744947dcSTom Erickson os = dn->dn_objset; 4105744947dcSTom Erickson 4106b24ab676SJeff Bonwick if (db->db_state != DB_NOFILL) { 4107b24ab676SJeff Bonwick if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 4108b24ab676SJeff Bonwick /* 4109b24ab676SJeff Bonwick * Private object buffers are released here rather 4110b24ab676SJeff Bonwick * than in dbuf_dirty() since they are only modified 4111b24ab676SJeff Bonwick * in the syncing context and we don't want the 4112b24ab676SJeff Bonwick * overhead of making multiple copies of the data. 4113b24ab676SJeff Bonwick */ 4114b24ab676SJeff Bonwick if (BP_IS_HOLE(db->db_blkptr)) { 4115b24ab676SJeff Bonwick arc_buf_thaw(data); 4116b24ab676SJeff Bonwick } else { 41173f9d6ad7SLin Ling dbuf_release_bp(db); 4118b24ab676SJeff Bonwick } 41195cabbc6bSPrashanth Sreenivasa dbuf_remap(dn, db, tx); 4120b24ab676SJeff Bonwick } 4121b24ab676SJeff Bonwick } 4122b24ab676SJeff Bonwick 4123b24ab676SJeff Bonwick if (parent != dn->dn_dbuf) { 41243e30c24aSWill Andrews /* Our parent is an indirect block. */ 41253e30c24aSWill Andrews /* We have a dirty parent that has been scheduled for write. */ 4126b24ab676SJeff Bonwick ASSERT(parent && parent->db_data_pending); 41273e30c24aSWill Andrews /* Our parent's buffer is one level closer to the dnode. */ 4128b24ab676SJeff Bonwick ASSERT(db->db_level == parent->db_level-1); 41293e30c24aSWill Andrews /* 41303e30c24aSWill Andrews * We're about to modify our parent's db_data by modifying 41313e30c24aSWill Andrews * our block pointer, so the parent must be released. 41323e30c24aSWill Andrews */ 4133b24ab676SJeff Bonwick ASSERT(arc_released(parent->db_buf)); 4134b24ab676SJeff Bonwick zio = parent->db_data_pending->dr_zio; 4135b24ab676SJeff Bonwick } else { 41363e30c24aSWill Andrews /* Our parent is the dnode itself. */ 41370a586ceaSMark Shellenbaum ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 41380a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) || 41390a586ceaSMark Shellenbaum (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 41400a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 41410a586ceaSMark Shellenbaum ASSERT3P(db->db_blkptr, ==, 41420a586ceaSMark Shellenbaum &dn->dn_phys->dn_blkptr[db->db_blkid]); 4143b24ab676SJeff Bonwick zio = dn->dn_zio; 4144b24ab676SJeff Bonwick } 4145b24ab676SJeff Bonwick 4146b24ab676SJeff Bonwick ASSERT(db->db_level == 0 || data == db->db_buf); 4147b24ab676SJeff Bonwick ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 4148b24ab676SJeff Bonwick ASSERT(zio); 4149fa9e4066Sahrens 4150b24ab676SJeff Bonwick SET_BOOKMARK(&zb, os->os_dsl_dataset ? 4151b24ab676SJeff Bonwick os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 4152b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 4153b24ab676SJeff Bonwick 41540a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 41550a586ceaSMark Shellenbaum wp_flag = WP_SPILL; 41560a586ceaSMark Shellenbaum wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 41570a586ceaSMark Shellenbaum 4158adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 4159eb633035STom Caputi 4160744947dcSTom Erickson DB_DNODE_EXIT(db); 4161b24ab676SJeff Bonwick 416211ceac77SAlex Reece /* 416311ceac77SAlex Reece * We copy the blkptr now (rather than when we instantiate the dirty 416411ceac77SAlex Reece * record), because its value can change between open context and 416511ceac77SAlex Reece * syncing context. We do not need to hold dn_struct_rwlock to read 416611ceac77SAlex Reece * db_blkptr because we are in syncing context. 416711ceac77SAlex Reece */ 416811ceac77SAlex Reece dr->dr_bp_copy = *db->db_blkptr; 416911ceac77SAlex Reece 41705d7b4d43SMatthew Ahrens if (db->db_level == 0 && 41715d7b4d43SMatthew Ahrens dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 41725d7b4d43SMatthew Ahrens /* 41735d7b4d43SMatthew Ahrens * The BP for this block has been provided by open context 41745d7b4d43SMatthew Ahrens * (by dmu_sync() or dmu_buf_write_embedded()). 41755d7b4d43SMatthew Ahrens */ 4176770499e1SDan Kimmel abd_t *contents = (data != NULL) ? 4177770499e1SDan Kimmel abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 41785d7b4d43SMatthew Ahrens 41795602294fSDan Kimmel dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 41805602294fSDan Kimmel contents, db->db.db_size, db->db.db_size, &zp, 41818df0bcf0SPaul Dagnelie dbuf_write_override_ready, NULL, NULL, 41828df0bcf0SPaul Dagnelie dbuf_write_override_done, 418369962b56SMatthew Ahrens dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 4184b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 4185b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 4186b24ab676SJeff Bonwick zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 418780901aeaSGeorge Wilson dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 4188b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 4189b24ab676SJeff Bonwick } else if (db->db_state == DB_NOFILL) { 4190810e43b2SBill Pijewski ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 4191810e43b2SBill Pijewski zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 4192b24ab676SJeff Bonwick dr->dr_zio = zio_write(zio, os->os_spa, txg, 41935602294fSDan Kimmel &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 41948df0bcf0SPaul Dagnelie dbuf_write_nofill_ready, NULL, NULL, 41958df0bcf0SPaul Dagnelie dbuf_write_nofill_done, db, 4196b24ab676SJeff Bonwick ZIO_PRIORITY_ASYNC_WRITE, 4197b24ab676SJeff Bonwick ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 4198b24ab676SJeff Bonwick } else { 4199b24ab676SJeff Bonwick ASSERT(arc_released(data)); 42008df0bcf0SPaul Dagnelie 42018df0bcf0SPaul Dagnelie /* 42028df0bcf0SPaul Dagnelie * For indirect blocks, we want to setup the children 42038df0bcf0SPaul Dagnelie * ready callback so that we can properly handle an indirect 42048df0bcf0SPaul Dagnelie * block that only contains holes. 42058df0bcf0SPaul Dagnelie */ 4206a3874b8bSToomas Soome arc_write_done_func_t *children_ready_cb = NULL; 42078df0bcf0SPaul Dagnelie if (db->db_level != 0) 42088df0bcf0SPaul Dagnelie children_ready_cb = dbuf_write_children_ready; 42098df0bcf0SPaul Dagnelie 4210b24ab676SJeff Bonwick dr->dr_zio = arc_write(zio, os->os_spa, txg, 421111ceac77SAlex Reece &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 4212dcbf3bd6SGeorge Wilson &zp, dbuf_write_ready, children_ready_cb, 421369962b56SMatthew Ahrens dbuf_write_physdone, dbuf_write_done, db, 421469962b56SMatthew Ahrens ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 4215b24ab676SJeff Bonwick } 4216fa9e4066Sahrens } 4217