1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5f65e61c0Sahrens * Common Development and Distribution License (the "License"). 6f65e61c0Sahrens * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2201025c89SJohn Harres * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 239dccfd2aSAlbert Lee * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24*4bb73804SMatthew Ahrens * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 259dccfd2aSAlbert Lee */ 26fa9e4066Sahrens 27fa9e4066Sahrens #include <sys/dmu.h> 28fa9e4066Sahrens #include <sys/dmu_impl.h> 29fa9e4066Sahrens #include <sys/dbuf.h> 30fa9e4066Sahrens #include <sys/dmu_tx.h> 31fa9e4066Sahrens #include <sys/dmu_objset.h> 32fa9e4066Sahrens #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 33fa9e4066Sahrens #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 34fa9e4066Sahrens #include <sys/dsl_pool.h> 358a2f1b91Sahrens #include <sys/zap_impl.h> /* for fzap_default_block_shift */ 36fa9e4066Sahrens #include <sys/spa.h> 370a586ceaSMark Shellenbaum #include <sys/sa.h> 380a586ceaSMark Shellenbaum #include <sys/sa_impl.h> 39fa9e4066Sahrens #include <sys/zfs_context.h> 400a586ceaSMark Shellenbaum #include <sys/varargs.h> 41fa9e4066Sahrens 42ea8dc4b6Seschrock typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 43ea8dc4b6Seschrock uint64_t arg1, uint64_t arg2); 44ea8dc4b6Seschrock 45fa9e4066Sahrens 46fa9e4066Sahrens dmu_tx_t * 471d452cf5Sahrens dmu_tx_create_dd(dsl_dir_t *dd) 48fa9e4066Sahrens { 49fa9e4066Sahrens dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 50fa9e4066Sahrens tx->tx_dir = dd; 514445fffbSMatthew Ahrens if (dd != NULL) 52fa9e4066Sahrens tx->tx_pool = dd->dd_pool; 53fa9e4066Sahrens list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 548a2f1b91Sahrens offsetof(dmu_tx_hold_t, txh_node)); 55d20e665cSRicardo M. Correia list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 56d20e665cSRicardo M. Correia offsetof(dmu_tx_callback_t, dcb_node)); 5769962b56SMatthew Ahrens tx->tx_start = gethrtime(); 588a2f1b91Sahrens #ifdef ZFS_DEBUG 59fa9e4066Sahrens refcount_create(&tx->tx_space_written); 60fa9e4066Sahrens refcount_create(&tx->tx_space_freed); 618a2f1b91Sahrens #endif 62fa9e4066Sahrens return (tx); 63fa9e4066Sahrens } 64fa9e4066Sahrens 65fa9e4066Sahrens dmu_tx_t * 66fa9e4066Sahrens dmu_tx_create(objset_t *os) 67fa9e4066Sahrens { 68503ad85cSMatthew Ahrens dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 69fa9e4066Sahrens tx->tx_objset = os; 70503ad85cSMatthew Ahrens tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 71fa9e4066Sahrens return (tx); 72fa9e4066Sahrens } 73fa9e4066Sahrens 74fa9e4066Sahrens dmu_tx_t * 75fa9e4066Sahrens dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 76fa9e4066Sahrens { 771d452cf5Sahrens dmu_tx_t *tx = dmu_tx_create_dd(NULL); 78fa9e4066Sahrens 79fa9e4066Sahrens ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 80fa9e4066Sahrens tx->tx_pool = dp; 81fa9e4066Sahrens tx->tx_txg = txg; 82fa9e4066Sahrens tx->tx_anyobj = TRUE; 83fa9e4066Sahrens 84fa9e4066Sahrens return (tx); 85fa9e4066Sahrens } 86fa9e4066Sahrens 87fa9e4066Sahrens int 88fa9e4066Sahrens dmu_tx_is_syncing(dmu_tx_t *tx) 89fa9e4066Sahrens { 90fa9e4066Sahrens return (tx->tx_anyobj); 91fa9e4066Sahrens } 92fa9e4066Sahrens 93fa9e4066Sahrens int 94fa9e4066Sahrens dmu_tx_private_ok(dmu_tx_t *tx) 95fa9e4066Sahrens { 96ea8dc4b6Seschrock return (tx->tx_anyobj); 97fa9e4066Sahrens } 98fa9e4066Sahrens 998a2f1b91Sahrens static dmu_tx_hold_t * 100fa9e4066Sahrens dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 1018a2f1b91Sahrens enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 102fa9e4066Sahrens { 1038a2f1b91Sahrens dmu_tx_hold_t *txh; 104fa9e4066Sahrens dnode_t *dn = NULL; 105ea8dc4b6Seschrock int err; 106fa9e4066Sahrens 107fa9e4066Sahrens if (object != DMU_NEW_OBJECT) { 108503ad85cSMatthew Ahrens err = dnode_hold(os, object, tx, &dn); 109ea8dc4b6Seschrock if (err) { 110ea8dc4b6Seschrock tx->tx_err = err; 1118a2f1b91Sahrens return (NULL); 112ea8dc4b6Seschrock } 113fa9e4066Sahrens 114ea8dc4b6Seschrock if (err == 0 && tx->tx_txg != 0) { 115fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 116fa9e4066Sahrens /* 117fa9e4066Sahrens * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 118fa9e4066Sahrens * problem, but there's no way for it to happen (for 119fa9e4066Sahrens * now, at least). 120fa9e4066Sahrens */ 121fa9e4066Sahrens ASSERT(dn->dn_assigned_txg == 0); 122fa9e4066Sahrens dn->dn_assigned_txg = tx->tx_txg; 123fa9e4066Sahrens (void) refcount_add(&dn->dn_tx_holds, tx); 124fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 125fa9e4066Sahrens } 126fa9e4066Sahrens } 127fa9e4066Sahrens 1288a2f1b91Sahrens txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 1298a2f1b91Sahrens txh->txh_tx = tx; 1308a2f1b91Sahrens txh->txh_dnode = dn; 1318a2f1b91Sahrens #ifdef ZFS_DEBUG 1328a2f1b91Sahrens txh->txh_type = type; 1338a2f1b91Sahrens txh->txh_arg1 = arg1; 1348a2f1b91Sahrens txh->txh_arg2 = arg2; 1358a2f1b91Sahrens #endif 1368a2f1b91Sahrens list_insert_tail(&tx->tx_holds, txh); 137ea8dc4b6Seschrock 1388a2f1b91Sahrens return (txh); 139fa9e4066Sahrens } 140fa9e4066Sahrens 141fa9e4066Sahrens void 142fa9e4066Sahrens dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 143fa9e4066Sahrens { 144fa9e4066Sahrens /* 145fa9e4066Sahrens * If we're syncing, they can manipulate any object anyhow, and 146fa9e4066Sahrens * the hold on the dnode_t can cause problems. 147fa9e4066Sahrens */ 148fa9e4066Sahrens if (!dmu_tx_is_syncing(tx)) { 1498a2f1b91Sahrens (void) dmu_tx_hold_object_impl(tx, os, 1508a2f1b91Sahrens object, THT_NEWOBJECT, 0, 0); 151fa9e4066Sahrens } 152fa9e4066Sahrens } 153fa9e4066Sahrens 154ea8dc4b6Seschrock static int 155ea8dc4b6Seschrock dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 156ea8dc4b6Seschrock { 157ea8dc4b6Seschrock int err; 158ea8dc4b6Seschrock dmu_buf_impl_t *db; 159ea8dc4b6Seschrock 160ea8dc4b6Seschrock rw_enter(&dn->dn_struct_rwlock, RW_READER); 161ea8dc4b6Seschrock db = dbuf_hold_level(dn, level, blkid, FTAG); 162ea8dc4b6Seschrock rw_exit(&dn->dn_struct_rwlock); 163ea8dc4b6Seschrock if (db == NULL) 164be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 1651ab7f2deSmaybee err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 166ea8dc4b6Seschrock dbuf_rele(db, FTAG); 167ea8dc4b6Seschrock return (err); 168ea8dc4b6Seschrock } 169ea8dc4b6Seschrock 1704a7f2a75SMark Maybee static void 171b24ab676SJeff Bonwick dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, 172b24ab676SJeff Bonwick int level, uint64_t blkid, boolean_t freeable, uint64_t *history) 1734a7f2a75SMark Maybee { 174b24ab676SJeff Bonwick objset_t *os = dn->dn_objset; 175b24ab676SJeff Bonwick dsl_dataset_t *ds = os->os_dsl_dataset; 176b24ab676SJeff Bonwick int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 177b24ab676SJeff Bonwick dmu_buf_impl_t *parent = NULL; 178b24ab676SJeff Bonwick blkptr_t *bp = NULL; 179b24ab676SJeff Bonwick uint64_t space; 180b24ab676SJeff Bonwick 181b24ab676SJeff Bonwick if (level >= dn->dn_nlevels || history[level] == blkid) 1824a7f2a75SMark Maybee return; 1834a7f2a75SMark Maybee 184b24ab676SJeff Bonwick history[level] = blkid; 1854a7f2a75SMark Maybee 186b24ab676SJeff Bonwick space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); 1874a7f2a75SMark Maybee 188b24ab676SJeff Bonwick if (db == NULL || db == dn->dn_dbuf) { 189b24ab676SJeff Bonwick ASSERT(level != 0); 190b24ab676SJeff Bonwick db = NULL; 191b24ab676SJeff Bonwick } else { 192744947dcSTom Erickson ASSERT(DB_DNODE(db) == dn); 193b24ab676SJeff Bonwick ASSERT(db->db_level == level); 194b24ab676SJeff Bonwick ASSERT(db->db.db_size == space); 195b24ab676SJeff Bonwick ASSERT(db->db_blkid == blkid); 196b24ab676SJeff Bonwick bp = db->db_blkptr; 197b24ab676SJeff Bonwick parent = db->db_parent; 1984a7f2a75SMark Maybee } 199b24ab676SJeff Bonwick 200b24ab676SJeff Bonwick freeable = (bp && (freeable || 201c7cd2421SGeorge Wilson dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); 202b24ab676SJeff Bonwick 203b24ab676SJeff Bonwick if (freeable) 204b24ab676SJeff Bonwick txh->txh_space_tooverwrite += space; 205b24ab676SJeff Bonwick else 206b24ab676SJeff Bonwick txh->txh_space_towrite += space; 207b24ab676SJeff Bonwick if (bp) 208b24ab676SJeff Bonwick txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp); 209b24ab676SJeff Bonwick 210b24ab676SJeff Bonwick dmu_tx_count_twig(txh, dn, parent, level + 1, 211b24ab676SJeff Bonwick blkid >> epbs, freeable, history); 2124a7f2a75SMark Maybee } 2134a7f2a75SMark Maybee 214fa9e4066Sahrens /* ARGSUSED */ 215fa9e4066Sahrens static void 2168a2f1b91Sahrens dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 217fa9e4066Sahrens { 2188a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 2198a2f1b91Sahrens uint64_t start, end, i; 220fa9e4066Sahrens int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 2218a2f1b91Sahrens int err = 0; 222fa9e4066Sahrens 223fa9e4066Sahrens if (len == 0) 224fa9e4066Sahrens return; 225fa9e4066Sahrens 226fa9e4066Sahrens min_bs = SPA_MINBLOCKSHIFT; 227fa9e4066Sahrens max_bs = SPA_MAXBLOCKSHIFT; 228fa9e4066Sahrens min_ibs = DN_MIN_INDBLKSHIFT; 229fa9e4066Sahrens max_ibs = DN_MAX_INDBLKSHIFT; 230fa9e4066Sahrens 2318a2f1b91Sahrens if (dn) { 232b24ab676SJeff Bonwick uint64_t history[DN_MAX_LEVELS]; 2334a7f2a75SMark Maybee int nlvls = dn->dn_nlevels; 2344a7f2a75SMark Maybee int delta; 2354a7f2a75SMark Maybee 2364a7f2a75SMark Maybee /* 2374a7f2a75SMark Maybee * For i/o error checking, read the first and last level-0 2384a7f2a75SMark Maybee * blocks (if they are not aligned), and all the level-1 blocks. 2394a7f2a75SMark Maybee */ 240ea8dc4b6Seschrock if (dn->dn_maxblkid == 0) { 2414a7f2a75SMark Maybee delta = dn->dn_datablksz; 2424a7f2a75SMark Maybee start = (off < dn->dn_datablksz) ? 0 : 1; 2434a7f2a75SMark Maybee end = (off+len <= dn->dn_datablksz) ? 0 : 1; 2444a7f2a75SMark Maybee if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 24582c9918fSTim Haley err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 24682c9918fSTim Haley if (err) 24782c9918fSTim Haley goto out; 2484a7f2a75SMark Maybee delta -= off; 24982c9918fSTim Haley } 250ea8dc4b6Seschrock } else { 2518a2f1b91Sahrens zio_t *zio = zio_root(dn->dn_objset->os_spa, 252ea8dc4b6Seschrock NULL, NULL, ZIO_FLAG_CANFAIL); 253ea8dc4b6Seschrock 254ea8dc4b6Seschrock /* first level-0 block */ 25599653d4eSeschrock start = off >> dn->dn_datablkshift; 25699653d4eSeschrock if (P2PHASE(off, dn->dn_datablksz) || 25799653d4eSeschrock len < dn->dn_datablksz) { 25899653d4eSeschrock err = dmu_tx_check_ioerr(zio, dn, 0, start); 2598a2f1b91Sahrens if (err) 2608a2f1b91Sahrens goto out; 261ea8dc4b6Seschrock } 262ea8dc4b6Seschrock 263ea8dc4b6Seschrock /* last level-0 block */ 26499653d4eSeschrock end = (off+len-1) >> dn->dn_datablkshift; 26582c9918fSTim Haley if (end != start && end <= dn->dn_maxblkid && 26699653d4eSeschrock P2PHASE(off+len, dn->dn_datablksz)) { 267ea8dc4b6Seschrock err = dmu_tx_check_ioerr(zio, dn, 0, end); 2688a2f1b91Sahrens if (err) 2698a2f1b91Sahrens goto out; 270ea8dc4b6Seschrock } 271ea8dc4b6Seschrock 272ea8dc4b6Seschrock /* level-1 blocks */ 2734a7f2a75SMark Maybee if (nlvls > 1) { 2744a7f2a75SMark Maybee int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2754a7f2a75SMark Maybee for (i = (start>>shft)+1; i < end>>shft; i++) { 276ea8dc4b6Seschrock err = dmu_tx_check_ioerr(zio, dn, 1, i); 2778a2f1b91Sahrens if (err) 2788a2f1b91Sahrens goto out; 279ea8dc4b6Seschrock } 280ea8dc4b6Seschrock } 281ea8dc4b6Seschrock 282ea8dc4b6Seschrock err = zio_wait(zio); 2838a2f1b91Sahrens if (err) 2848a2f1b91Sahrens goto out; 2854a7f2a75SMark Maybee delta = P2NPHASE(off, dn->dn_datablksz); 286ea8dc4b6Seschrock } 287ea8dc4b6Seschrock 288bda88194SGeorge Wilson min_ibs = max_ibs = dn->dn_indblkshift; 2894a7f2a75SMark Maybee if (dn->dn_maxblkid > 0) { 2904a7f2a75SMark Maybee /* 2914a7f2a75SMark Maybee * The blocksize can't change, 2924a7f2a75SMark Maybee * so we can make a more precise estimate. 2934a7f2a75SMark Maybee */ 2944a7f2a75SMark Maybee ASSERT(dn->dn_datablkshift != 0); 295fa9e4066Sahrens min_bs = max_bs = dn->dn_datablkshift; 2964a7f2a75SMark Maybee } 2974a7f2a75SMark Maybee 2984a7f2a75SMark Maybee /* 2994a7f2a75SMark Maybee * If this write is not off the end of the file 3004a7f2a75SMark Maybee * we need to account for overwrites/unref. 3014a7f2a75SMark Maybee */ 302b24ab676SJeff Bonwick if (start <= dn->dn_maxblkid) { 303b24ab676SJeff Bonwick for (int l = 0; l < DN_MAX_LEVELS; l++) 304b24ab676SJeff Bonwick history[l] = -1ULL; 305b24ab676SJeff Bonwick } 3064a7f2a75SMark Maybee while (start <= dn->dn_maxblkid) { 3074a7f2a75SMark Maybee dmu_buf_impl_t *db; 3084a7f2a75SMark Maybee 3094a7f2a75SMark Maybee rw_enter(&dn->dn_struct_rwlock, RW_READER); 31001025c89SJohn Harres err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db); 3114a7f2a75SMark Maybee rw_exit(&dn->dn_struct_rwlock); 31201025c89SJohn Harres 31301025c89SJohn Harres if (err) { 31401025c89SJohn Harres txh->txh_tx->tx_err = err; 31501025c89SJohn Harres return; 31601025c89SJohn Harres } 31701025c89SJohn Harres 318b24ab676SJeff Bonwick dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, 319b24ab676SJeff Bonwick history); 3204a7f2a75SMark Maybee dbuf_rele(db, FTAG); 3214a7f2a75SMark Maybee if (++start > end) { 3224a7f2a75SMark Maybee /* 3234a7f2a75SMark Maybee * Account for new indirects appearing 3244a7f2a75SMark Maybee * before this IO gets assigned into a txg. 3254a7f2a75SMark Maybee */ 3264a7f2a75SMark Maybee bits = 64 - min_bs; 3274a7f2a75SMark Maybee epbs = min_ibs - SPA_BLKPTRSHIFT; 3284a7f2a75SMark Maybee for (bits -= epbs * (nlvls - 1); 3294a7f2a75SMark Maybee bits >= 0; bits -= epbs) 3304a7f2a75SMark Maybee txh->txh_fudge += 1ULL << max_ibs; 3314a7f2a75SMark Maybee goto out; 3324a7f2a75SMark Maybee } 3334a7f2a75SMark Maybee off += delta; 3344a7f2a75SMark Maybee if (len >= delta) 3354a7f2a75SMark Maybee len -= delta; 3364a7f2a75SMark Maybee delta = dn->dn_datablksz; 3374a7f2a75SMark Maybee } 338fa9e4066Sahrens } 339fa9e4066Sahrens 340fa9e4066Sahrens /* 341fa9e4066Sahrens * 'end' is the last thing we will access, not one past. 342fa9e4066Sahrens * This way we won't overflow when accessing the last byte. 343fa9e4066Sahrens */ 344fa9e4066Sahrens start = P2ALIGN(off, 1ULL << max_bs); 345fa9e4066Sahrens end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 3468a2f1b91Sahrens txh->txh_space_towrite += end - start + 1; 347fa9e4066Sahrens 348fa9e4066Sahrens start >>= min_bs; 349fa9e4066Sahrens end >>= min_bs; 350fa9e4066Sahrens 351fa9e4066Sahrens epbs = min_ibs - SPA_BLKPTRSHIFT; 352fa9e4066Sahrens 353fa9e4066Sahrens /* 354fa9e4066Sahrens * The object contains at most 2^(64 - min_bs) blocks, 355fa9e4066Sahrens * and each indirect level maps 2^epbs. 356fa9e4066Sahrens */ 357fa9e4066Sahrens for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 358fa9e4066Sahrens start >>= epbs; 359fa9e4066Sahrens end >>= epbs; 3604a7f2a75SMark Maybee ASSERT3U(end, >=, start); 3618a2f1b91Sahrens txh->txh_space_towrite += (end - start + 1) << max_ibs; 3624a7f2a75SMark Maybee if (start != 0) { 3634a7f2a75SMark Maybee /* 3644a7f2a75SMark Maybee * We also need a new blkid=0 indirect block 3654a7f2a75SMark Maybee * to reference any existing file data. 3664a7f2a75SMark Maybee */ 3674a7f2a75SMark Maybee txh->txh_space_towrite += 1ULL << max_ibs; 3684a7f2a75SMark Maybee } 369fa9e4066Sahrens } 370fa9e4066Sahrens 3718a2f1b91Sahrens out: 3724a7f2a75SMark Maybee if (txh->txh_space_towrite + txh->txh_space_tooverwrite > 3734a7f2a75SMark Maybee 2 * DMU_MAX_ACCESS) 374be6fd75aSMatthew Ahrens err = SET_ERROR(EFBIG); 3754a7f2a75SMark Maybee 3768a2f1b91Sahrens if (err) 3778a2f1b91Sahrens txh->txh_tx->tx_err = err; 378fa9e4066Sahrens } 379fa9e4066Sahrens 380fa9e4066Sahrens static void 3818a2f1b91Sahrens dmu_tx_count_dnode(dmu_tx_hold_t *txh) 382fa9e4066Sahrens { 3838a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 384744947dcSTom Erickson dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); 3858a2f1b91Sahrens uint64_t space = mdn->dn_datablksz + 3868a2f1b91Sahrens ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); 387fa9e4066Sahrens 388fa9e4066Sahrens if (dn && dn->dn_dbuf->db_blkptr && 389fa9e4066Sahrens dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 390c7cd2421SGeorge Wilson dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { 3918a2f1b91Sahrens txh->txh_space_tooverwrite += space; 3924a7f2a75SMark Maybee txh->txh_space_tounref += space; 3938a2f1b91Sahrens } else { 3948a2f1b91Sahrens txh->txh_space_towrite += space; 395a9799022Sck if (dn && dn->dn_dbuf->db_blkptr) 396a9799022Sck txh->txh_space_tounref += space; 397fa9e4066Sahrens } 398fa9e4066Sahrens } 399fa9e4066Sahrens 400fa9e4066Sahrens void 401fa9e4066Sahrens dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 402fa9e4066Sahrens { 4038a2f1b91Sahrens dmu_tx_hold_t *txh; 4048a2f1b91Sahrens 405fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 406ea8dc4b6Seschrock ASSERT(len < DMU_MAX_ACCESS); 407dd6ef538Smaybee ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 408fa9e4066Sahrens 4098a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 4108a2f1b91Sahrens object, THT_WRITE, off, len); 4118a2f1b91Sahrens if (txh == NULL) 4128a2f1b91Sahrens return; 4138a2f1b91Sahrens 4148a2f1b91Sahrens dmu_tx_count_write(txh, off, len); 4158a2f1b91Sahrens dmu_tx_count_dnode(txh); 416fa9e4066Sahrens } 417fa9e4066Sahrens 418fa9e4066Sahrens static void 4198a2f1b91Sahrens dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 420fa9e4066Sahrens { 421cdb0ab79Smaybee uint64_t blkid, nblks, lastblk; 422cdb0ab79Smaybee uint64_t space = 0, unref = 0, skipped = 0; 4238a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 424fa9e4066Sahrens dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 4258a2f1b91Sahrens spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 426cdb0ab79Smaybee int epbs; 42731495a1eSArne Jansen uint64_t l0span = 0, nl1blks = 0; 428fa9e4066Sahrens 429cdb0ab79Smaybee if (dn->dn_nlevels == 0) 430fa9e4066Sahrens return; 431c543ec06Sahrens 432fa9e4066Sahrens /* 433cdb0ab79Smaybee * The struct_rwlock protects us against dn_nlevels 434c543ec06Sahrens * changing, in case (against all odds) we manage to dirty & 435c543ec06Sahrens * sync out the changes after we check for being dirty. 43601025c89SJohn Harres * Also, dbuf_hold_impl() wants us to have the struct_rwlock. 437fa9e4066Sahrens */ 438fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 439cdb0ab79Smaybee epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 440cdb0ab79Smaybee if (dn->dn_maxblkid == 0) { 441c543ec06Sahrens if (off == 0 && len >= dn->dn_datablksz) { 442c543ec06Sahrens blkid = 0; 443c543ec06Sahrens nblks = 1; 444c543ec06Sahrens } else { 445c543ec06Sahrens rw_exit(&dn->dn_struct_rwlock); 446c543ec06Sahrens return; 447c543ec06Sahrens } 448c543ec06Sahrens } else { 449c543ec06Sahrens blkid = off >> dn->dn_datablkshift; 450cdb0ab79Smaybee nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 451fa9e4066Sahrens 452be9000ccSMatthew Ahrens if (blkid > dn->dn_maxblkid) { 453c543ec06Sahrens rw_exit(&dn->dn_struct_rwlock); 454c543ec06Sahrens return; 455c543ec06Sahrens } 456cdb0ab79Smaybee if (blkid + nblks > dn->dn_maxblkid) 457be9000ccSMatthew Ahrens nblks = dn->dn_maxblkid - blkid + 1; 458fa9e4066Sahrens 459c543ec06Sahrens } 46031495a1eSArne Jansen l0span = nblks; /* save for later use to calc level > 1 overhead */ 461cdb0ab79Smaybee if (dn->dn_nlevels == 1) { 462fa9e4066Sahrens int i; 463fa9e4066Sahrens for (i = 0; i < nblks; i++) { 464fa9e4066Sahrens blkptr_t *bp = dn->dn_phys->dn_blkptr; 465cdb0ab79Smaybee ASSERT3U(blkid + i, <, dn->dn_nblkptr); 466fa9e4066Sahrens bp += blkid + i; 467c7cd2421SGeorge Wilson if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { 468fa9e4066Sahrens dprintf_bp(bp, "can free old%s", ""); 469b24ab676SJeff Bonwick space += bp_get_dsize(spa, bp); 470fa9e4066Sahrens } 471a9799022Sck unref += BP_GET_ASIZE(bp); 472fa9e4066Sahrens } 47331495a1eSArne Jansen nl1blks = 1; 474ea8dc4b6Seschrock nblks = 0; 475fa9e4066Sahrens } 476fa9e4066Sahrens 477cdb0ab79Smaybee lastblk = blkid + nblks - 1; 478fa9e4066Sahrens while (nblks) { 479fa9e4066Sahrens dmu_buf_impl_t *dbuf; 480cdb0ab79Smaybee uint64_t ibyte, new_blkid; 481cdb0ab79Smaybee int epb = 1 << epbs; 482cdb0ab79Smaybee int err, i, blkoff, tochk; 483cdb0ab79Smaybee blkptr_t *bp; 484cdb0ab79Smaybee 485cdb0ab79Smaybee ibyte = blkid << dn->dn_datablkshift; 486cdb0ab79Smaybee err = dnode_next_offset(dn, 487cdb0ab79Smaybee DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 488cdb0ab79Smaybee new_blkid = ibyte >> dn->dn_datablkshift; 489b7e50089Smaybee if (err == ESRCH) { 490b7e50089Smaybee skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 491cdb0ab79Smaybee break; 492b7e50089Smaybee } 493cdb0ab79Smaybee if (err) { 494cdb0ab79Smaybee txh->txh_tx->tx_err = err; 495cdb0ab79Smaybee break; 496cdb0ab79Smaybee } 497b7e50089Smaybee if (new_blkid > lastblk) { 498b7e50089Smaybee skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 499cdb0ab79Smaybee break; 500b7e50089Smaybee } 501fa9e4066Sahrens 502cdb0ab79Smaybee if (new_blkid > blkid) { 503b7e50089Smaybee ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 504b7e50089Smaybee skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 505cdb0ab79Smaybee nblks -= new_blkid - blkid; 506cdb0ab79Smaybee blkid = new_blkid; 507cdb0ab79Smaybee } 508cdb0ab79Smaybee blkoff = P2PHASE(blkid, epb); 509cdb0ab79Smaybee tochk = MIN(epb - blkoff, nblks); 510fa9e4066Sahrens 51101025c89SJohn Harres err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf); 51201025c89SJohn Harres if (err) { 51301025c89SJohn Harres txh->txh_tx->tx_err = err; 51401025c89SJohn Harres break; 51501025c89SJohn Harres } 516cdb0ab79Smaybee 517cdb0ab79Smaybee txh->txh_memory_tohold += dbuf->db.db_size; 51877179d12SLori Alt 51977179d12SLori Alt /* 52077179d12SLori Alt * We don't check memory_tohold against DMU_MAX_ACCESS because 52177179d12SLori Alt * memory_tohold is an over-estimation (especially the >L1 52277179d12SLori Alt * indirect blocks), so it could fail. Callers should have 52377179d12SLori Alt * already verified that they will not be holding too much 52477179d12SLori Alt * memory. 52577179d12SLori Alt */ 52677179d12SLori Alt 527cdb0ab79Smaybee err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 528cdb0ab79Smaybee if (err != 0) { 5298a2f1b91Sahrens txh->txh_tx->tx_err = err; 530cdb0ab79Smaybee dbuf_rele(dbuf, FTAG); 531c543ec06Sahrens break; 532fa9e4066Sahrens } 533fa9e4066Sahrens 534cdb0ab79Smaybee bp = dbuf->db.db_data; 535cdb0ab79Smaybee bp += blkoff; 536cdb0ab79Smaybee 537cdb0ab79Smaybee for (i = 0; i < tochk; i++) { 538c7cd2421SGeorge Wilson if (dsl_dataset_block_freeable(ds, &bp[i], 539c7cd2421SGeorge Wilson bp[i].blk_birth)) { 540cdb0ab79Smaybee dprintf_bp(&bp[i], "can free old%s", ""); 541b24ab676SJeff Bonwick space += bp_get_dsize(spa, &bp[i]); 542cdb0ab79Smaybee } 543cdb0ab79Smaybee unref += BP_GET_ASIZE(bp); 544cdb0ab79Smaybee } 545cdb0ab79Smaybee dbuf_rele(dbuf, FTAG); 546cdb0ab79Smaybee 54731495a1eSArne Jansen ++nl1blks; 548fa9e4066Sahrens blkid += tochk; 549fa9e4066Sahrens nblks -= tochk; 550fa9e4066Sahrens } 551fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 552fa9e4066Sahrens 55331495a1eSArne Jansen /* 55431495a1eSArne Jansen * Add in memory requirements of higher-level indirects. 55531495a1eSArne Jansen * This assumes a worst-possible scenario for dn_nlevels and a 55631495a1eSArne Jansen * worst-possible distribution of l1-blocks over the region to free. 55731495a1eSArne Jansen */ 55831495a1eSArne Jansen { 55931495a1eSArne Jansen uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); 56031495a1eSArne Jansen int level = 2; 56131495a1eSArne Jansen /* 56231495a1eSArne Jansen * Here we don't use DN_MAX_LEVEL, but calculate it with the 56331495a1eSArne Jansen * given datablkshift and indblkshift. This makes the 56431495a1eSArne Jansen * difference between 19 and 8 on large files. 56531495a1eSArne Jansen */ 56631495a1eSArne Jansen int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / 56731495a1eSArne Jansen (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 56831495a1eSArne Jansen 56931495a1eSArne Jansen while (level++ < maxlevel) { 5708f0b538dSChristopher Siden txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1) 57131495a1eSArne Jansen << dn->dn_indblkshift; 57231495a1eSArne Jansen blkcnt = 1 + (blkcnt >> epbs); 57331495a1eSArne Jansen } 57431495a1eSArne Jansen } 57531495a1eSArne Jansen 576cdb0ab79Smaybee /* account for new level 1 indirect blocks that might show up */ 577b7e50089Smaybee if (skipped > 0) { 578715614a4Smaybee txh->txh_fudge += skipped << dn->dn_indblkshift; 579cdb0ab79Smaybee skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 580cdb0ab79Smaybee txh->txh_memory_tohold += skipped << dn->dn_indblkshift; 581cdb0ab79Smaybee } 5828a2f1b91Sahrens txh->txh_space_tofree += space; 583a9799022Sck txh->txh_space_tounref += unref; 584fa9e4066Sahrens } 585fa9e4066Sahrens 586*4bb73804SMatthew Ahrens /* 587*4bb73804SMatthew Ahrens * This function marks the transaction as being a "net free". The end 588*4bb73804SMatthew Ahrens * result is that refquotas will be disabled for this transaction, and 589*4bb73804SMatthew Ahrens * this transaction will be able to use half of the pool space overhead 590*4bb73804SMatthew Ahrens * (see dsl_pool_adjustedsize()). Therefore this function should only 591*4bb73804SMatthew Ahrens * be called for transactions that we expect will not cause a net increase 592*4bb73804SMatthew Ahrens * in the amount of space used (but it's OK if that is occasionally not true). 593*4bb73804SMatthew Ahrens */ 594*4bb73804SMatthew Ahrens void 595*4bb73804SMatthew Ahrens dmu_tx_mark_netfree(dmu_tx_t *tx) 596*4bb73804SMatthew Ahrens { 597*4bb73804SMatthew Ahrens dmu_tx_hold_t *txh; 598*4bb73804SMatthew Ahrens 599*4bb73804SMatthew Ahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 600*4bb73804SMatthew Ahrens DMU_NEW_OBJECT, THT_FREE, 0, 0); 601*4bb73804SMatthew Ahrens 602*4bb73804SMatthew Ahrens /* 603*4bb73804SMatthew Ahrens * Pretend that this operation will free 1GB of space. This 604*4bb73804SMatthew Ahrens * should be large enough to cancel out the largest write. 605*4bb73804SMatthew Ahrens * We don't want to use something like UINT64_MAX, because that would 606*4bb73804SMatthew Ahrens * cause overflows when doing math with these values (e.g. in 607*4bb73804SMatthew Ahrens * dmu_tx_try_assign()). 608*4bb73804SMatthew Ahrens */ 609*4bb73804SMatthew Ahrens txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024; 610*4bb73804SMatthew Ahrens } 611*4bb73804SMatthew Ahrens 6128a2f1b91Sahrens void 6138a2f1b91Sahrens dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 614fa9e4066Sahrens { 6158a2f1b91Sahrens dmu_tx_hold_t *txh; 6168a2f1b91Sahrens dnode_t *dn; 6172f3d8780SMatthew Ahrens int err; 618ea8dc4b6Seschrock zio_t *zio; 619fa9e4066Sahrens 6208a2f1b91Sahrens ASSERT(tx->tx_txg == 0); 6218a2f1b91Sahrens 6228a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 6238a2f1b91Sahrens object, THT_FREE, off, len); 6248a2f1b91Sahrens if (txh == NULL) 6258a2f1b91Sahrens return; 6268a2f1b91Sahrens dn = txh->txh_dnode; 62769962b56SMatthew Ahrens dmu_tx_count_dnode(txh); 6288a2f1b91Sahrens 629fa9e4066Sahrens if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 630fa9e4066Sahrens return; 631fa9e4066Sahrens if (len == DMU_OBJECT_END) 632fa9e4066Sahrens len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 633fa9e4066Sahrens 634ea8dc4b6Seschrock /* 6352f3d8780SMatthew Ahrens * For i/o error checking, we read the first and last level-0 6362f3d8780SMatthew Ahrens * blocks if they are not aligned, and all the level-1 blocks. 6372f3d8780SMatthew Ahrens * 6382f3d8780SMatthew Ahrens * Note: dbuf_free_range() assumes that we have not instantiated 6392f3d8780SMatthew Ahrens * any level-0 dbufs that will be completely freed. Therefore we must 6402f3d8780SMatthew Ahrens * exercise care to not read or count the first and last blocks 6412f3d8780SMatthew Ahrens * if they are blocksize-aligned. 6422f3d8780SMatthew Ahrens */ 6432f3d8780SMatthew Ahrens if (dn->dn_datablkshift == 0) { 644713d6c20SMatthew Ahrens if (off != 0 || len < dn->dn_datablksz) 6455253393bSMatthew Ahrens dmu_tx_count_write(txh, 0, dn->dn_datablksz); 6462f3d8780SMatthew Ahrens } else { 6472f3d8780SMatthew Ahrens /* first block will be modified if it is not aligned */ 6482f3d8780SMatthew Ahrens if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 6492f3d8780SMatthew Ahrens dmu_tx_count_write(txh, off, 1); 6502f3d8780SMatthew Ahrens /* last block will be modified if it is not aligned */ 6512f3d8780SMatthew Ahrens if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 6522f3d8780SMatthew Ahrens dmu_tx_count_write(txh, off+len, 1); 6532f3d8780SMatthew Ahrens } 6542f3d8780SMatthew Ahrens 6552f3d8780SMatthew Ahrens /* 6562f3d8780SMatthew Ahrens * Check level-1 blocks. 657ea8dc4b6Seschrock */ 65898572ac1Sahrens if (dn->dn_nlevels > 1) { 6592f3d8780SMatthew Ahrens int shift = dn->dn_datablkshift + dn->dn_indblkshift - 66098572ac1Sahrens SPA_BLKPTRSHIFT; 6612f3d8780SMatthew Ahrens uint64_t start = off >> shift; 6622f3d8780SMatthew Ahrens uint64_t end = (off + len) >> shift; 6632f3d8780SMatthew Ahrens 6642f3d8780SMatthew Ahrens ASSERT(dn->dn_indblkshift != 0); 66598572ac1Sahrens 666bb411a08SMatthew Ahrens /* 667bb411a08SMatthew Ahrens * dnode_reallocate() can result in an object with indirect 668bb411a08SMatthew Ahrens * blocks having an odd data block size. In this case, 669bb411a08SMatthew Ahrens * just check the single block. 670bb411a08SMatthew Ahrens */ 671bb411a08SMatthew Ahrens if (dn->dn_datablkshift == 0) 672bb411a08SMatthew Ahrens start = end = 0; 673bb411a08SMatthew Ahrens 67498572ac1Sahrens zio = zio_root(tx->tx_pool->dp_spa, 67598572ac1Sahrens NULL, NULL, ZIO_FLAG_CANFAIL); 6762f3d8780SMatthew Ahrens for (uint64_t i = start; i <= end; i++) { 67798572ac1Sahrens uint64_t ibyte = i << shift; 678cdb0ab79Smaybee err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 67998572ac1Sahrens i = ibyte >> shift; 68098572ac1Sahrens if (err == ESRCH) 68198572ac1Sahrens break; 68298572ac1Sahrens if (err) { 68398572ac1Sahrens tx->tx_err = err; 68498572ac1Sahrens return; 68598572ac1Sahrens } 686ea8dc4b6Seschrock 68798572ac1Sahrens err = dmu_tx_check_ioerr(zio, dn, 1, i); 68898572ac1Sahrens if (err) { 68998572ac1Sahrens tx->tx_err = err; 69098572ac1Sahrens return; 69198572ac1Sahrens } 69298572ac1Sahrens } 69398572ac1Sahrens err = zio_wait(zio); 694ea8dc4b6Seschrock if (err) { 695ea8dc4b6Seschrock tx->tx_err = err; 696ea8dc4b6Seschrock return; 697ea8dc4b6Seschrock } 698ea8dc4b6Seschrock } 699ea8dc4b6Seschrock 7008a2f1b91Sahrens dmu_tx_count_free(txh, off, len); 701fa9e4066Sahrens } 702fa9e4066Sahrens 703fa9e4066Sahrens void 70414843421SMatthew Ahrens dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 705fa9e4066Sahrens { 7068a2f1b91Sahrens dmu_tx_hold_t *txh; 7078a2f1b91Sahrens dnode_t *dn; 708fa9e4066Sahrens uint64_t nblocks; 709ea8dc4b6Seschrock int epbs, err; 710fa9e4066Sahrens 7118a2f1b91Sahrens ASSERT(tx->tx_txg == 0); 7128a2f1b91Sahrens 7138a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 7148a2f1b91Sahrens object, THT_ZAP, add, (uintptr_t)name); 7158a2f1b91Sahrens if (txh == NULL) 7168a2f1b91Sahrens return; 7178a2f1b91Sahrens dn = txh->txh_dnode; 7188a2f1b91Sahrens 7198a2f1b91Sahrens dmu_tx_count_dnode(txh); 720fa9e4066Sahrens 721fa9e4066Sahrens if (dn == NULL) { 722fa9e4066Sahrens /* 723ea8dc4b6Seschrock * We will be able to fit a new object's entries into one leaf 724fa9e4066Sahrens * block. So there will be at most 2 blocks total, 725fa9e4066Sahrens * including the header block. 726fa9e4066Sahrens */ 7278a2f1b91Sahrens dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 728fa9e4066Sahrens return; 729fa9e4066Sahrens } 730fa9e4066Sahrens 731ad135b5dSChristopher Siden ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 732fa9e4066Sahrens 733ea8dc4b6Seschrock if (dn->dn_maxblkid == 0 && !add) { 7349dccfd2aSAlbert Lee blkptr_t *bp; 7359dccfd2aSAlbert Lee 736fa9e4066Sahrens /* 737fa9e4066Sahrens * If there is only one block (i.e. this is a micro-zap) 738ea8dc4b6Seschrock * and we are not adding anything, the accounting is simple. 739fa9e4066Sahrens */ 740ea8dc4b6Seschrock err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 741ea8dc4b6Seschrock if (err) { 742ea8dc4b6Seschrock tx->tx_err = err; 743ea8dc4b6Seschrock return; 744ea8dc4b6Seschrock } 745ea8dc4b6Seschrock 746b6130eadSmaybee /* 747b6130eadSmaybee * Use max block size here, since we don't know how much 748b6130eadSmaybee * the size will change between now and the dbuf dirty call. 749b6130eadSmaybee */ 7509dccfd2aSAlbert Lee bp = &dn->dn_phys->dn_blkptr[0]; 751fa9e4066Sahrens if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 7529dccfd2aSAlbert Lee bp, bp->blk_birth)) 753b6130eadSmaybee txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE; 7549dccfd2aSAlbert Lee else 755b6130eadSmaybee txh->txh_space_towrite += SPA_MAXBLOCKSIZE; 7569dccfd2aSAlbert Lee if (!BP_IS_HOLE(bp)) 757f878aa38SChris Kirby txh->txh_space_tounref += SPA_MAXBLOCKSIZE; 758fa9e4066Sahrens return; 759fa9e4066Sahrens } 760fa9e4066Sahrens 761ea8dc4b6Seschrock if (dn->dn_maxblkid > 0 && name) { 762ea8dc4b6Seschrock /* 763ea8dc4b6Seschrock * access the name in this fat-zap so that we'll check 764ea8dc4b6Seschrock * for i/o errors to the leaf blocks, etc. 765ea8dc4b6Seschrock */ 766503ad85cSMatthew Ahrens err = zap_lookup(dn->dn_objset, dn->dn_object, name, 767ea8dc4b6Seschrock 8, 0, NULL); 768ea8dc4b6Seschrock if (err == EIO) { 769ea8dc4b6Seschrock tx->tx_err = err; 770ea8dc4b6Seschrock return; 771ea8dc4b6Seschrock } 772ea8dc4b6Seschrock } 773ea8dc4b6Seschrock 774503ad85cSMatthew Ahrens err = zap_count_write(dn->dn_objset, dn->dn_object, name, add, 775720d1aa1SSanjeev Bagewadi &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 776fa9e4066Sahrens 777fa9e4066Sahrens /* 778fa9e4066Sahrens * If the modified blocks are scattered to the four winds, 779fa9e4066Sahrens * we'll have to modify an indirect twig for each. 780fa9e4066Sahrens */ 781fa9e4066Sahrens epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 782fa9e4066Sahrens for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) 7833d692628SSanjeev Bagewadi if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj) 7843d692628SSanjeev Bagewadi txh->txh_space_towrite += 3 << dn->dn_indblkshift; 7853d692628SSanjeev Bagewadi else 7863d692628SSanjeev Bagewadi txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; 787fa9e4066Sahrens } 788fa9e4066Sahrens 789fa9e4066Sahrens void 790fa9e4066Sahrens dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 791fa9e4066Sahrens { 7928a2f1b91Sahrens dmu_tx_hold_t *txh; 793fa9e4066Sahrens 7948a2f1b91Sahrens ASSERT(tx->tx_txg == 0); 795fa9e4066Sahrens 7968a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 7978a2f1b91Sahrens object, THT_BONUS, 0, 0); 7988a2f1b91Sahrens if (txh) 7998a2f1b91Sahrens dmu_tx_count_dnode(txh); 800fa9e4066Sahrens } 801fa9e4066Sahrens 802fa9e4066Sahrens void 803fa9e4066Sahrens dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 804fa9e4066Sahrens { 8058a2f1b91Sahrens dmu_tx_hold_t *txh; 806fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 807fa9e4066Sahrens 8088a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 8098a2f1b91Sahrens DMU_NEW_OBJECT, THT_SPACE, space, 0); 8108a2f1b91Sahrens 8118a2f1b91Sahrens txh->txh_space_towrite += space; 812fa9e4066Sahrens } 813fa9e4066Sahrens 814fa9e4066Sahrens int 815fa9e4066Sahrens dmu_tx_holds(dmu_tx_t *tx, uint64_t object) 816fa9e4066Sahrens { 8178a2f1b91Sahrens dmu_tx_hold_t *txh; 818fa9e4066Sahrens int holds = 0; 819fa9e4066Sahrens 820fa9e4066Sahrens /* 821fa9e4066Sahrens * By asserting that the tx is assigned, we're counting the 822fa9e4066Sahrens * number of dn_tx_holds, which is the same as the number of 823fa9e4066Sahrens * dn_holds. Otherwise, we'd be counting dn_holds, but 824fa9e4066Sahrens * dn_tx_holds could be 0. 825fa9e4066Sahrens */ 826fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 827fa9e4066Sahrens 828fa9e4066Sahrens /* if (tx->tx_anyobj == TRUE) */ 829fa9e4066Sahrens /* return (0); */ 830fa9e4066Sahrens 8318a2f1b91Sahrens for (txh = list_head(&tx->tx_holds); txh; 8328a2f1b91Sahrens txh = list_next(&tx->tx_holds, txh)) { 8338a2f1b91Sahrens if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 834fa9e4066Sahrens holds++; 835fa9e4066Sahrens } 836fa9e4066Sahrens 837fa9e4066Sahrens return (holds); 838fa9e4066Sahrens } 839fa9e4066Sahrens 8409c9dc39aSek #ifdef ZFS_DEBUG 841fa9e4066Sahrens void 842fa9e4066Sahrens dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 843fa9e4066Sahrens { 8448a2f1b91Sahrens dmu_tx_hold_t *txh; 845fa9e4066Sahrens int match_object = FALSE, match_offset = FALSE; 846744947dcSTom Erickson dnode_t *dn; 847fa9e4066Sahrens 848744947dcSTom Erickson DB_DNODE_ENTER(db); 849744947dcSTom Erickson dn = DB_DNODE(db); 850fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 851503ad85cSMatthew Ahrens ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 852fa9e4066Sahrens ASSERT3U(dn->dn_object, ==, db->db.db_object); 853fa9e4066Sahrens 854744947dcSTom Erickson if (tx->tx_anyobj) { 855744947dcSTom Erickson DB_DNODE_EXIT(db); 856fa9e4066Sahrens return; 857744947dcSTom Erickson } 858fa9e4066Sahrens 859fa9e4066Sahrens /* XXX No checking on the meta dnode for now */ 860744947dcSTom Erickson if (db->db.db_object == DMU_META_DNODE_OBJECT) { 861744947dcSTom Erickson DB_DNODE_EXIT(db); 862fa9e4066Sahrens return; 863744947dcSTom Erickson } 864fa9e4066Sahrens 8658a2f1b91Sahrens for (txh = list_head(&tx->tx_holds); txh; 8668a2f1b91Sahrens txh = list_next(&tx->tx_holds, txh)) { 867fa9e4066Sahrens ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 8688a2f1b91Sahrens if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 869fa9e4066Sahrens match_object = TRUE; 8708a2f1b91Sahrens if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 871fa9e4066Sahrens int datablkshift = dn->dn_datablkshift ? 872fa9e4066Sahrens dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 873fa9e4066Sahrens int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 874fa9e4066Sahrens int shift = datablkshift + epbs * db->db_level; 875fa9e4066Sahrens uint64_t beginblk = shift >= 64 ? 0 : 8768a2f1b91Sahrens (txh->txh_arg1 >> shift); 877fa9e4066Sahrens uint64_t endblk = shift >= 64 ? 0 : 8788a2f1b91Sahrens ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 879fa9e4066Sahrens uint64_t blkid = db->db_blkid; 880fa9e4066Sahrens 8818a2f1b91Sahrens /* XXX txh_arg2 better not be zero... */ 882fa9e4066Sahrens 8838a2f1b91Sahrens dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 8848a2f1b91Sahrens txh->txh_type, beginblk, endblk); 885fa9e4066Sahrens 8868a2f1b91Sahrens switch (txh->txh_type) { 887fa9e4066Sahrens case THT_WRITE: 888fa9e4066Sahrens if (blkid >= beginblk && blkid <= endblk) 889fa9e4066Sahrens match_offset = TRUE; 890fa9e4066Sahrens /* 891fa9e4066Sahrens * We will let this hold work for the bonus 8920a586ceaSMark Shellenbaum * or spill buffer so that we don't need to 8930a586ceaSMark Shellenbaum * hold it when creating a new object. 894fa9e4066Sahrens */ 8950a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID || 8960a586ceaSMark Shellenbaum blkid == DMU_SPILL_BLKID) 897fa9e4066Sahrens match_offset = TRUE; 898fa9e4066Sahrens /* 899fa9e4066Sahrens * They might have to increase nlevels, 900fa9e4066Sahrens * thus dirtying the new TLIBs. Or the 901fa9e4066Sahrens * might have to change the block size, 902fa9e4066Sahrens * thus dirying the new lvl=0 blk=0. 903fa9e4066Sahrens */ 904fa9e4066Sahrens if (blkid == 0) 905fa9e4066Sahrens match_offset = TRUE; 906fa9e4066Sahrens break; 907fa9e4066Sahrens case THT_FREE: 908cdb0ab79Smaybee /* 909cdb0ab79Smaybee * We will dirty all the level 1 blocks in 910cdb0ab79Smaybee * the free range and perhaps the first and 911cdb0ab79Smaybee * last level 0 block. 912cdb0ab79Smaybee */ 913cdb0ab79Smaybee if (blkid >= beginblk && (blkid <= endblk || 914cdb0ab79Smaybee txh->txh_arg2 == DMU_OBJECT_END)) 915fa9e4066Sahrens match_offset = TRUE; 916fa9e4066Sahrens break; 9170a586ceaSMark Shellenbaum case THT_SPILL: 9180a586ceaSMark Shellenbaum if (blkid == DMU_SPILL_BLKID) 9190a586ceaSMark Shellenbaum match_offset = TRUE; 9200a586ceaSMark Shellenbaum break; 921fa9e4066Sahrens case THT_BONUS: 9220a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID) 923fa9e4066Sahrens match_offset = TRUE; 924fa9e4066Sahrens break; 925fa9e4066Sahrens case THT_ZAP: 926fa9e4066Sahrens match_offset = TRUE; 927fa9e4066Sahrens break; 928fa9e4066Sahrens case THT_NEWOBJECT: 929fa9e4066Sahrens match_object = TRUE; 930fa9e4066Sahrens break; 931fa9e4066Sahrens default: 9328a2f1b91Sahrens ASSERT(!"bad txh_type"); 933fa9e4066Sahrens } 934fa9e4066Sahrens } 935744947dcSTom Erickson if (match_object && match_offset) { 936744947dcSTom Erickson DB_DNODE_EXIT(db); 937fa9e4066Sahrens return; 938744947dcSTom Erickson } 939fa9e4066Sahrens } 940744947dcSTom Erickson DB_DNODE_EXIT(db); 941fa9e4066Sahrens panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 942fa9e4066Sahrens (u_longlong_t)db->db.db_object, db->db_level, 943fa9e4066Sahrens (u_longlong_t)db->db_blkid); 944fa9e4066Sahrens } 9459c9dc39aSek #endif 946fa9e4066Sahrens 94769962b56SMatthew Ahrens /* 94869962b56SMatthew Ahrens * If we can't do 10 iops, something is wrong. Let us go ahead 94969962b56SMatthew Ahrens * and hit zfs_dirty_data_max. 95069962b56SMatthew Ahrens */ 95169962b56SMatthew Ahrens hrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 95269962b56SMatthew Ahrens int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 95369962b56SMatthew Ahrens 95469962b56SMatthew Ahrens /* 95569962b56SMatthew Ahrens * We delay transactions when we've determined that the backend storage 95669962b56SMatthew Ahrens * isn't able to accommodate the rate of incoming writes. 95769962b56SMatthew Ahrens * 95869962b56SMatthew Ahrens * If there is already a transaction waiting, we delay relative to when 95969962b56SMatthew Ahrens * that transaction finishes waiting. This way the calculated min_time 96069962b56SMatthew Ahrens * is independent of the number of threads concurrently executing 96169962b56SMatthew Ahrens * transactions. 96269962b56SMatthew Ahrens * 96369962b56SMatthew Ahrens * If we are the only waiter, wait relative to when the transaction 96469962b56SMatthew Ahrens * started, rather than the current time. This credits the transaction for 96569962b56SMatthew Ahrens * "time already served", e.g. reading indirect blocks. 96669962b56SMatthew Ahrens * 96769962b56SMatthew Ahrens * The minimum time for a transaction to take is calculated as: 96869962b56SMatthew Ahrens * min_time = scale * (dirty - min) / (max - dirty) 96969962b56SMatthew Ahrens * min_time is then capped at zfs_delay_max_ns. 97069962b56SMatthew Ahrens * 97169962b56SMatthew Ahrens * The delay has two degrees of freedom that can be adjusted via tunables. 97269962b56SMatthew Ahrens * The percentage of dirty data at which we start to delay is defined by 97369962b56SMatthew Ahrens * zfs_delay_min_dirty_percent. This should typically be at or above 97469962b56SMatthew Ahrens * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 97569962b56SMatthew Ahrens * delay after writing at full speed has failed to keep up with the incoming 97669962b56SMatthew Ahrens * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 97769962b56SMatthew Ahrens * speaking, this variable determines the amount of delay at the midpoint of 97869962b56SMatthew Ahrens * the curve. 97969962b56SMatthew Ahrens * 98069962b56SMatthew Ahrens * delay 98169962b56SMatthew Ahrens * 10ms +-------------------------------------------------------------*+ 98269962b56SMatthew Ahrens * | *| 98369962b56SMatthew Ahrens * 9ms + *+ 98469962b56SMatthew Ahrens * | *| 98569962b56SMatthew Ahrens * 8ms + *+ 98669962b56SMatthew Ahrens * | * | 98769962b56SMatthew Ahrens * 7ms + * + 98869962b56SMatthew Ahrens * | * | 98969962b56SMatthew Ahrens * 6ms + * + 99069962b56SMatthew Ahrens * | * | 99169962b56SMatthew Ahrens * 5ms + * + 99269962b56SMatthew Ahrens * | * | 99369962b56SMatthew Ahrens * 4ms + * + 99469962b56SMatthew Ahrens * | * | 99569962b56SMatthew Ahrens * 3ms + * + 99669962b56SMatthew Ahrens * | * | 99769962b56SMatthew Ahrens * 2ms + (midpoint) * + 99869962b56SMatthew Ahrens * | | ** | 99969962b56SMatthew Ahrens * 1ms + v *** + 100069962b56SMatthew Ahrens * | zfs_delay_scale ----------> ******** | 100169962b56SMatthew Ahrens * 0 +-------------------------------------*********----------------+ 100269962b56SMatthew Ahrens * 0% <- zfs_dirty_data_max -> 100% 100369962b56SMatthew Ahrens * 100469962b56SMatthew Ahrens * Note that since the delay is added to the outstanding time remaining on the 100569962b56SMatthew Ahrens * most recent transaction, the delay is effectively the inverse of IOPS. 100669962b56SMatthew Ahrens * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 100769962b56SMatthew Ahrens * was chosen such that small changes in the amount of accumulated dirty data 100869962b56SMatthew Ahrens * in the first 3/4 of the curve yield relatively small differences in the 100969962b56SMatthew Ahrens * amount of delay. 101069962b56SMatthew Ahrens * 101169962b56SMatthew Ahrens * The effects can be easier to understand when the amount of delay is 101269962b56SMatthew Ahrens * represented on a log scale: 101369962b56SMatthew Ahrens * 101469962b56SMatthew Ahrens * delay 101569962b56SMatthew Ahrens * 100ms +-------------------------------------------------------------++ 101669962b56SMatthew Ahrens * + + 101769962b56SMatthew Ahrens * | | 101869962b56SMatthew Ahrens * + *+ 101969962b56SMatthew Ahrens * 10ms + *+ 102069962b56SMatthew Ahrens * + ** + 102169962b56SMatthew Ahrens * | (midpoint) ** | 102269962b56SMatthew Ahrens * + | ** + 102369962b56SMatthew Ahrens * 1ms + v **** + 102469962b56SMatthew Ahrens * + zfs_delay_scale ----------> ***** + 102569962b56SMatthew Ahrens * | **** | 102669962b56SMatthew Ahrens * + **** + 102769962b56SMatthew Ahrens * 100us + ** + 102869962b56SMatthew Ahrens * + * + 102969962b56SMatthew Ahrens * | * | 103069962b56SMatthew Ahrens * + * + 103169962b56SMatthew Ahrens * 10us + * + 103269962b56SMatthew Ahrens * + + 103369962b56SMatthew Ahrens * | | 103469962b56SMatthew Ahrens * + + 103569962b56SMatthew Ahrens * +--------------------------------------------------------------+ 103669962b56SMatthew Ahrens * 0% <- zfs_dirty_data_max -> 100% 103769962b56SMatthew Ahrens * 103869962b56SMatthew Ahrens * Note here that only as the amount of dirty data approaches its limit does 103969962b56SMatthew Ahrens * the delay start to increase rapidly. The goal of a properly tuned system 104069962b56SMatthew Ahrens * should be to keep the amount of dirty data out of that range by first 104169962b56SMatthew Ahrens * ensuring that the appropriate limits are set for the I/O scheduler to reach 104269962b56SMatthew Ahrens * optimal throughput on the backend storage, and then by changing the value 104369962b56SMatthew Ahrens * of zfs_delay_scale to increase the steepness of the curve. 104469962b56SMatthew Ahrens */ 104569962b56SMatthew Ahrens static void 104669962b56SMatthew Ahrens dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 104769962b56SMatthew Ahrens { 104869962b56SMatthew Ahrens dsl_pool_t *dp = tx->tx_pool; 104969962b56SMatthew Ahrens uint64_t delay_min_bytes = 105069962b56SMatthew Ahrens zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 105169962b56SMatthew Ahrens hrtime_t wakeup, min_tx_time, now; 105269962b56SMatthew Ahrens 105369962b56SMatthew Ahrens if (dirty <= delay_min_bytes) 105469962b56SMatthew Ahrens return; 105569962b56SMatthew Ahrens 105669962b56SMatthew Ahrens /* 105769962b56SMatthew Ahrens * The caller has already waited until we are under the max. 105869962b56SMatthew Ahrens * We make them pass us the amount of dirty data so we don't 105969962b56SMatthew Ahrens * have to handle the case of it being >= the max, which could 106069962b56SMatthew Ahrens * cause a divide-by-zero if it's == the max. 106169962b56SMatthew Ahrens */ 106269962b56SMatthew Ahrens ASSERT3U(dirty, <, zfs_dirty_data_max); 106369962b56SMatthew Ahrens 106469962b56SMatthew Ahrens now = gethrtime(); 106569962b56SMatthew Ahrens min_tx_time = zfs_delay_scale * 106669962b56SMatthew Ahrens (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 106769962b56SMatthew Ahrens if (now > tx->tx_start + min_tx_time) 106869962b56SMatthew Ahrens return; 106969962b56SMatthew Ahrens 107069962b56SMatthew Ahrens min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 107169962b56SMatthew Ahrens 107269962b56SMatthew Ahrens DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 107369962b56SMatthew Ahrens uint64_t, min_tx_time); 107469962b56SMatthew Ahrens 107569962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 107669962b56SMatthew Ahrens wakeup = MAX(tx->tx_start + min_tx_time, 107769962b56SMatthew Ahrens dp->dp_last_wakeup + min_tx_time); 107869962b56SMatthew Ahrens dp->dp_last_wakeup = wakeup; 107969962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 108069962b56SMatthew Ahrens 108169962b56SMatthew Ahrens #ifdef _KERNEL 108269962b56SMatthew Ahrens mutex_enter(&curthread->t_delay_lock); 108369962b56SMatthew Ahrens while (cv_timedwait_hires(&curthread->t_delay_cv, 108469962b56SMatthew Ahrens &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 108569962b56SMatthew Ahrens CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 108669962b56SMatthew Ahrens continue; 108769962b56SMatthew Ahrens mutex_exit(&curthread->t_delay_lock); 108869962b56SMatthew Ahrens #else 108969962b56SMatthew Ahrens hrtime_t delta = wakeup - gethrtime(); 109069962b56SMatthew Ahrens struct timespec ts; 109169962b56SMatthew Ahrens ts.tv_sec = delta / NANOSEC; 109269962b56SMatthew Ahrens ts.tv_nsec = delta % NANOSEC; 109369962b56SMatthew Ahrens (void) nanosleep(&ts, NULL); 109469962b56SMatthew Ahrens #endif 109569962b56SMatthew Ahrens } 109669962b56SMatthew Ahrens 1097fa9e4066Sahrens static int 10983b2aab18SMatthew Ahrens dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 1099fa9e4066Sahrens { 11008a2f1b91Sahrens dmu_tx_hold_t *txh; 11010a4e9518Sgw spa_t *spa = tx->tx_pool->dp_spa; 1102cdb0ab79Smaybee uint64_t memory, asize, fsize, usize; 1103715614a4Smaybee uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 1104fa9e4066Sahrens 1105fb09f5aaSMadhav Suresh ASSERT0(tx->tx_txg); 11060a4e9518Sgw 11078a2f1b91Sahrens if (tx->tx_err) 11088a2f1b91Sahrens return (tx->tx_err); 1109fa9e4066Sahrens 1110e14bb325SJeff Bonwick if (spa_suspended(spa)) { 11110a4e9518Sgw /* 11120a4e9518Sgw * If the user has indicated a blocking failure mode 11130a4e9518Sgw * then return ERESTART which will block in dmu_tx_wait(). 11140a4e9518Sgw * Otherwise, return EIO so that an error can get 11150a4e9518Sgw * propagated back to the VOP calls. 11160a4e9518Sgw * 11170a4e9518Sgw * Note that we always honor the txg_how flag regardless 11180a4e9518Sgw * of the failuremode setting. 11190a4e9518Sgw */ 11200a4e9518Sgw if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 11210a4e9518Sgw txg_how != TXG_WAIT) 1122be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 11230a4e9518Sgw 1124be6fd75aSMatthew Ahrens return (SET_ERROR(ERESTART)); 11250a4e9518Sgw } 11260a4e9518Sgw 112769962b56SMatthew Ahrens if (!tx->tx_waited && 112869962b56SMatthew Ahrens dsl_pool_need_dirty_delay(tx->tx_pool)) { 112969962b56SMatthew Ahrens tx->tx_wait_dirty = B_TRUE; 113069962b56SMatthew Ahrens return (SET_ERROR(ERESTART)); 113169962b56SMatthew Ahrens } 113269962b56SMatthew Ahrens 1133fa9e4066Sahrens tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 11348a2f1b91Sahrens tx->tx_needassign_txh = NULL; 1135fa9e4066Sahrens 11368a2f1b91Sahrens /* 11378a2f1b91Sahrens * NB: No error returns are allowed after txg_hold_open, but 11388a2f1b91Sahrens * before processing the dnode holds, due to the 11398a2f1b91Sahrens * dmu_tx_unassign() logic. 11408a2f1b91Sahrens */ 1141fa9e4066Sahrens 1142715614a4Smaybee towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 11438a2f1b91Sahrens for (txh = list_head(&tx->tx_holds); txh; 11448a2f1b91Sahrens txh = list_next(&tx->tx_holds, txh)) { 11458a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 1146fa9e4066Sahrens if (dn != NULL) { 1147fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 11488a2f1b91Sahrens if (dn->dn_assigned_txg == tx->tx_txg - 1) { 11498a2f1b91Sahrens mutex_exit(&dn->dn_mtx); 11508a2f1b91Sahrens tx->tx_needassign_txh = txh; 1151be6fd75aSMatthew Ahrens return (SET_ERROR(ERESTART)); 1152fa9e4066Sahrens } 11538a2f1b91Sahrens if (dn->dn_assigned_txg == 0) 1154fa9e4066Sahrens dn->dn_assigned_txg = tx->tx_txg; 11558a2f1b91Sahrens ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1156fa9e4066Sahrens (void) refcount_add(&dn->dn_tx_holds, tx); 1157fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1158fa9e4066Sahrens } 11598a2f1b91Sahrens towrite += txh->txh_space_towrite; 11608a2f1b91Sahrens tofree += txh->txh_space_tofree; 11618a2f1b91Sahrens tooverwrite += txh->txh_space_tooverwrite; 1162a9799022Sck tounref += txh->txh_space_tounref; 1163cdb0ab79Smaybee tohold += txh->txh_memory_tohold; 1164715614a4Smaybee fudge += txh->txh_fudge; 1165ea8dc4b6Seschrock } 1166ea8dc4b6Seschrock 1167ea8dc4b6Seschrock /* 1168ea8dc4b6Seschrock * If a snapshot has been taken since we made our estimates, 1169ea8dc4b6Seschrock * assume that we won't be able to free or overwrite anything. 1170ea8dc4b6Seschrock */ 1171ea8dc4b6Seschrock if (tx->tx_objset && 1172503ad85cSMatthew Ahrens dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 1173ea8dc4b6Seschrock tx->tx_lastsnap_txg) { 11748a2f1b91Sahrens towrite += tooverwrite; 11758a2f1b91Sahrens tooverwrite = tofree = 0; 1176fa9e4066Sahrens } 1177fa9e4066Sahrens 1178cdb0ab79Smaybee /* needed allocation: worst-case estimate of write space */ 1179cdb0ab79Smaybee asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 1180cdb0ab79Smaybee /* freed space estimate: worst-case overwrite + free estimate */ 11818a2f1b91Sahrens fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 1182cdb0ab79Smaybee /* convert unrefd space to worst-case estimate */ 1183a9799022Sck usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 1184cdb0ab79Smaybee /* calculate memory footprint estimate */ 1185cdb0ab79Smaybee memory = towrite + tooverwrite + tohold; 11868a2f1b91Sahrens 11878a2f1b91Sahrens #ifdef ZFS_DEBUG 1188715614a4Smaybee /* 1189715614a4Smaybee * Add in 'tohold' to account for our dirty holds on this memory 1190715614a4Smaybee * XXX - the "fudge" factor is to account for skipped blocks that 1191715614a4Smaybee * we missed because dnode_next_offset() misses in-core-only blocks. 1192715614a4Smaybee */ 1193cdb0ab79Smaybee tx->tx_space_towrite = asize + 1194715614a4Smaybee spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 11958a2f1b91Sahrens tx->tx_space_tofree = tofree; 11968a2f1b91Sahrens tx->tx_space_tooverwrite = tooverwrite; 1197a9799022Sck tx->tx_space_tounref = tounref; 11988a2f1b91Sahrens #endif 1199fa9e4066Sahrens 1200fa9e4066Sahrens if (tx->tx_dir && asize != 0) { 1201cdb0ab79Smaybee int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1202cdb0ab79Smaybee asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 12038a2f1b91Sahrens if (err) 1204fa9e4066Sahrens return (err); 1205fa9e4066Sahrens } 1206fa9e4066Sahrens 1207fa9e4066Sahrens return (0); 1208fa9e4066Sahrens } 1209fa9e4066Sahrens 12108a2f1b91Sahrens static void 12118a2f1b91Sahrens dmu_tx_unassign(dmu_tx_t *tx) 1212fa9e4066Sahrens { 12138a2f1b91Sahrens dmu_tx_hold_t *txh; 1214fa9e4066Sahrens 12158a2f1b91Sahrens if (tx->tx_txg == 0) 12168a2f1b91Sahrens return; 1217fa9e4066Sahrens 1218fa9e4066Sahrens txg_rele_to_quiesce(&tx->tx_txgh); 1219fa9e4066Sahrens 12203e30c24aSWill Andrews /* 12213e30c24aSWill Andrews * Walk the transaction's hold list, removing the hold on the 12223e30c24aSWill Andrews * associated dnode, and notifying waiters if the refcount drops to 0. 12233e30c24aSWill Andrews */ 12248a2f1b91Sahrens for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 12258a2f1b91Sahrens txh = list_next(&tx->tx_holds, txh)) { 12268a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 1227fa9e4066Sahrens 1228fa9e4066Sahrens if (dn == NULL) 1229fa9e4066Sahrens continue; 1230fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 12318a2f1b91Sahrens ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1232fa9e4066Sahrens 1233fa9e4066Sahrens if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1234fa9e4066Sahrens dn->dn_assigned_txg = 0; 1235fa9e4066Sahrens cv_broadcast(&dn->dn_notxholds); 1236fa9e4066Sahrens } 1237fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1238fa9e4066Sahrens } 1239fa9e4066Sahrens 1240fa9e4066Sahrens txg_rele_to_sync(&tx->tx_txgh); 1241fa9e4066Sahrens 12428a2f1b91Sahrens tx->tx_lasttried_txg = tx->tx_txg; 1243fa9e4066Sahrens tx->tx_txg = 0; 1244fa9e4066Sahrens } 1245fa9e4066Sahrens 1246fa9e4066Sahrens /* 1247fa9e4066Sahrens * Assign tx to a transaction group. txg_how can be one of: 1248fa9e4066Sahrens * 1249fa9e4066Sahrens * (1) TXG_WAIT. If the current open txg is full, waits until there's 1250fa9e4066Sahrens * a new one. This should be used when you're not holding locks. 12513b2aab18SMatthew Ahrens * It will only fail if we're truly out of space (or over quota). 1252fa9e4066Sahrens * 1253fa9e4066Sahrens * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1254fa9e4066Sahrens * blocking, returns immediately with ERESTART. This should be used 1255fa9e4066Sahrens * whenever you're holding locks. On an ERESTART error, the caller 12568a2f1b91Sahrens * should drop locks, do a dmu_tx_wait(tx), and try again. 125769962b56SMatthew Ahrens * 125869962b56SMatthew Ahrens * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 125969962b56SMatthew Ahrens * has already been called on behalf of this operation (though 126069962b56SMatthew Ahrens * most likely on a different tx). 1261fa9e4066Sahrens */ 1262fa9e4066Sahrens int 12633b2aab18SMatthew Ahrens dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 1264fa9e4066Sahrens { 1265fa9e4066Sahrens int err; 1266fa9e4066Sahrens 1267fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 126869962b56SMatthew Ahrens ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 126969962b56SMatthew Ahrens txg_how == TXG_WAITED); 1270fa9e4066Sahrens ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1271fa9e4066Sahrens 12723b2aab18SMatthew Ahrens /* If we might wait, we must not hold the config lock. */ 12733b2aab18SMatthew Ahrens ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 12743b2aab18SMatthew Ahrens 127569962b56SMatthew Ahrens if (txg_how == TXG_WAITED) 127669962b56SMatthew Ahrens tx->tx_waited = B_TRUE; 127769962b56SMatthew Ahrens 12788a2f1b91Sahrens while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 12798a2f1b91Sahrens dmu_tx_unassign(tx); 1280fa9e4066Sahrens 1281fa9e4066Sahrens if (err != ERESTART || txg_how != TXG_WAIT) 1282fa9e4066Sahrens return (err); 1283fa9e4066Sahrens 12848a2f1b91Sahrens dmu_tx_wait(tx); 1285fa9e4066Sahrens } 1286fa9e4066Sahrens 1287fa9e4066Sahrens txg_rele_to_quiesce(&tx->tx_txgh); 1288fa9e4066Sahrens 1289fa9e4066Sahrens return (0); 1290fa9e4066Sahrens } 1291fa9e4066Sahrens 12928a2f1b91Sahrens void 12938a2f1b91Sahrens dmu_tx_wait(dmu_tx_t *tx) 12948a2f1b91Sahrens { 12950a4e9518Sgw spa_t *spa = tx->tx_pool->dp_spa; 129669962b56SMatthew Ahrens dsl_pool_t *dp = tx->tx_pool; 12970a4e9518Sgw 12988a2f1b91Sahrens ASSERT(tx->tx_txg == 0); 12993b2aab18SMatthew Ahrens ASSERT(!dsl_pool_config_held(tx->tx_pool)); 13008a2f1b91Sahrens 130169962b56SMatthew Ahrens if (tx->tx_wait_dirty) { 130269962b56SMatthew Ahrens /* 130369962b56SMatthew Ahrens * dmu_tx_try_assign() has determined that we need to wait 130469962b56SMatthew Ahrens * because we've consumed much or all of the dirty buffer 130569962b56SMatthew Ahrens * space. 130669962b56SMatthew Ahrens */ 130769962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 130869962b56SMatthew Ahrens while (dp->dp_dirty_total >= zfs_dirty_data_max) 130969962b56SMatthew Ahrens cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 131069962b56SMatthew Ahrens uint64_t dirty = dp->dp_dirty_total; 131169962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 131269962b56SMatthew Ahrens 131369962b56SMatthew Ahrens dmu_tx_delay(tx, dirty); 131469962b56SMatthew Ahrens 131569962b56SMatthew Ahrens tx->tx_wait_dirty = B_FALSE; 131669962b56SMatthew Ahrens 131769962b56SMatthew Ahrens /* 131869962b56SMatthew Ahrens * Note: setting tx_waited only has effect if the caller 131969962b56SMatthew Ahrens * used TX_WAIT. Otherwise they are going to destroy 132069962b56SMatthew Ahrens * this tx and try again. The common case, zfs_write(), 132169962b56SMatthew Ahrens * uses TX_WAIT. 132269962b56SMatthew Ahrens */ 132369962b56SMatthew Ahrens tx->tx_waited = B_TRUE; 132469962b56SMatthew Ahrens } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 132569962b56SMatthew Ahrens /* 132669962b56SMatthew Ahrens * If the pool is suspended we need to wait until it 132769962b56SMatthew Ahrens * is resumed. Note that it's possible that the pool 132869962b56SMatthew Ahrens * has become active after this thread has tried to 132969962b56SMatthew Ahrens * obtain a tx. If that's the case then tx_lasttried_txg 133069962b56SMatthew Ahrens * would not have been set. 133169962b56SMatthew Ahrens */ 133269962b56SMatthew Ahrens txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 13330a4e9518Sgw } else if (tx->tx_needassign_txh) { 133469962b56SMatthew Ahrens /* 133569962b56SMatthew Ahrens * A dnode is assigned to the quiescing txg. Wait for its 133669962b56SMatthew Ahrens * transaction to complete. 133769962b56SMatthew Ahrens */ 13388a2f1b91Sahrens dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 13398a2f1b91Sahrens 13408a2f1b91Sahrens mutex_enter(&dn->dn_mtx); 13418a2f1b91Sahrens while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 13428a2f1b91Sahrens cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 13438a2f1b91Sahrens mutex_exit(&dn->dn_mtx); 13448a2f1b91Sahrens tx->tx_needassign_txh = NULL; 13458a2f1b91Sahrens } else { 13468a2f1b91Sahrens txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 13478a2f1b91Sahrens } 13488a2f1b91Sahrens } 13498a2f1b91Sahrens 1350fa9e4066Sahrens void 1351fa9e4066Sahrens dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1352fa9e4066Sahrens { 13538a2f1b91Sahrens #ifdef ZFS_DEBUG 1354fa9e4066Sahrens if (tx->tx_dir == NULL || delta == 0) 1355fa9e4066Sahrens return; 1356fa9e4066Sahrens 1357fa9e4066Sahrens if (delta > 0) { 1358fa9e4066Sahrens ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1359fa9e4066Sahrens tx->tx_space_towrite); 1360fa9e4066Sahrens (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1361fa9e4066Sahrens } else { 1362fa9e4066Sahrens (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1363fa9e4066Sahrens } 13648a2f1b91Sahrens #endif 1365fa9e4066Sahrens } 1366fa9e4066Sahrens 1367fa9e4066Sahrens void 1368fa9e4066Sahrens dmu_tx_commit(dmu_tx_t *tx) 1369fa9e4066Sahrens { 13708a2f1b91Sahrens dmu_tx_hold_t *txh; 1371fa9e4066Sahrens 1372fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1373fa9e4066Sahrens 13743e30c24aSWill Andrews /* 13753e30c24aSWill Andrews * Go through the transaction's hold list and remove holds on 13763e30c24aSWill Andrews * associated dnodes, notifying waiters if no holds remain. 13773e30c24aSWill Andrews */ 13788a2f1b91Sahrens while (txh = list_head(&tx->tx_holds)) { 13798a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 1380fa9e4066Sahrens 13818a2f1b91Sahrens list_remove(&tx->tx_holds, txh); 13828a2f1b91Sahrens kmem_free(txh, sizeof (dmu_tx_hold_t)); 1383fa9e4066Sahrens if (dn == NULL) 1384fa9e4066Sahrens continue; 1385fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1386fa9e4066Sahrens ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1387fa9e4066Sahrens 1388fa9e4066Sahrens if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1389fa9e4066Sahrens dn->dn_assigned_txg = 0; 1390fa9e4066Sahrens cv_broadcast(&dn->dn_notxholds); 1391fa9e4066Sahrens } 1392fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1393fa9e4066Sahrens dnode_rele(dn, tx); 1394fa9e4066Sahrens } 1395fa9e4066Sahrens 13968a2f1b91Sahrens if (tx->tx_tempreserve_cookie) 1397fa9e4066Sahrens dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1398fa9e4066Sahrens 1399d20e665cSRicardo M. Correia if (!list_is_empty(&tx->tx_callbacks)) 1400d20e665cSRicardo M. Correia txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1401d20e665cSRicardo M. Correia 1402fa9e4066Sahrens if (tx->tx_anyobj == FALSE) 1403fa9e4066Sahrens txg_rele_to_sync(&tx->tx_txgh); 1404d20e665cSRicardo M. Correia 1405d20e665cSRicardo M. Correia list_destroy(&tx->tx_callbacks); 14068f38d419Sek list_destroy(&tx->tx_holds); 14078a2f1b91Sahrens #ifdef ZFS_DEBUG 1408fa9e4066Sahrens dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1409fa9e4066Sahrens tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1410fa9e4066Sahrens tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1411fa9e4066Sahrens refcount_destroy_many(&tx->tx_space_written, 1412fa9e4066Sahrens refcount_count(&tx->tx_space_written)); 1413fa9e4066Sahrens refcount_destroy_many(&tx->tx_space_freed, 1414fa9e4066Sahrens refcount_count(&tx->tx_space_freed)); 1415fa9e4066Sahrens #endif 1416fa9e4066Sahrens kmem_free(tx, sizeof (dmu_tx_t)); 1417fa9e4066Sahrens } 1418fa9e4066Sahrens 1419fa9e4066Sahrens void 1420fa9e4066Sahrens dmu_tx_abort(dmu_tx_t *tx) 1421fa9e4066Sahrens { 14228a2f1b91Sahrens dmu_tx_hold_t *txh; 1423fa9e4066Sahrens 1424fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 1425fa9e4066Sahrens 14268a2f1b91Sahrens while (txh = list_head(&tx->tx_holds)) { 14278a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 1428fa9e4066Sahrens 14298a2f1b91Sahrens list_remove(&tx->tx_holds, txh); 14308a2f1b91Sahrens kmem_free(txh, sizeof (dmu_tx_hold_t)); 1431fa9e4066Sahrens if (dn != NULL) 1432fa9e4066Sahrens dnode_rele(dn, tx); 1433fa9e4066Sahrens } 1434d20e665cSRicardo M. Correia 1435d20e665cSRicardo M. Correia /* 1436d20e665cSRicardo M. Correia * Call any registered callbacks with an error code. 1437d20e665cSRicardo M. Correia */ 1438d20e665cSRicardo M. Correia if (!list_is_empty(&tx->tx_callbacks)) 1439d20e665cSRicardo M. Correia dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1440d20e665cSRicardo M. Correia 1441d20e665cSRicardo M. Correia list_destroy(&tx->tx_callbacks); 14428f38d419Sek list_destroy(&tx->tx_holds); 14438a2f1b91Sahrens #ifdef ZFS_DEBUG 1444fa9e4066Sahrens refcount_destroy_many(&tx->tx_space_written, 1445fa9e4066Sahrens refcount_count(&tx->tx_space_written)); 1446fa9e4066Sahrens refcount_destroy_many(&tx->tx_space_freed, 1447fa9e4066Sahrens refcount_count(&tx->tx_space_freed)); 1448fa9e4066Sahrens #endif 1449fa9e4066Sahrens kmem_free(tx, sizeof (dmu_tx_t)); 1450fa9e4066Sahrens } 1451fa9e4066Sahrens 1452fa9e4066Sahrens uint64_t 1453fa9e4066Sahrens dmu_tx_get_txg(dmu_tx_t *tx) 1454fa9e4066Sahrens { 1455fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1456fa9e4066Sahrens return (tx->tx_txg); 1457fa9e4066Sahrens } 1458d20e665cSRicardo M. Correia 14593b2aab18SMatthew Ahrens dsl_pool_t * 14603b2aab18SMatthew Ahrens dmu_tx_pool(dmu_tx_t *tx) 14613b2aab18SMatthew Ahrens { 14623b2aab18SMatthew Ahrens ASSERT(tx->tx_pool != NULL); 14633b2aab18SMatthew Ahrens return (tx->tx_pool); 14643b2aab18SMatthew Ahrens } 14653b2aab18SMatthew Ahrens 14663b2aab18SMatthew Ahrens 1467d20e665cSRicardo M. Correia void 1468d20e665cSRicardo M. Correia dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1469d20e665cSRicardo M. Correia { 1470d20e665cSRicardo M. Correia dmu_tx_callback_t *dcb; 1471d20e665cSRicardo M. Correia 1472d20e665cSRicardo M. Correia dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1473d20e665cSRicardo M. Correia 1474d20e665cSRicardo M. Correia dcb->dcb_func = func; 1475d20e665cSRicardo M. Correia dcb->dcb_data = data; 1476d20e665cSRicardo M. Correia 1477d20e665cSRicardo M. Correia list_insert_tail(&tx->tx_callbacks, dcb); 1478d20e665cSRicardo M. Correia } 1479d20e665cSRicardo M. Correia 1480d20e665cSRicardo M. Correia /* 1481d20e665cSRicardo M. Correia * Call all the commit callbacks on a list, with a given error code. 1482d20e665cSRicardo M. Correia */ 1483d20e665cSRicardo M. Correia void 1484d20e665cSRicardo M. Correia dmu_tx_do_callbacks(list_t *cb_list, int error) 1485d20e665cSRicardo M. Correia { 1486d20e665cSRicardo M. Correia dmu_tx_callback_t *dcb; 1487d20e665cSRicardo M. Correia 1488d20e665cSRicardo M. Correia while (dcb = list_head(cb_list)) { 1489d20e665cSRicardo M. Correia list_remove(cb_list, dcb); 1490d20e665cSRicardo M. Correia dcb->dcb_func(dcb->dcb_data, error); 1491d20e665cSRicardo M. Correia kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1492d20e665cSRicardo M. Correia } 1493d20e665cSRicardo M. Correia } 14940a586ceaSMark Shellenbaum 14950a586ceaSMark Shellenbaum /* 14960a586ceaSMark Shellenbaum * Interface to hold a bunch of attributes. 14970a586ceaSMark Shellenbaum * used for creating new files. 14980a586ceaSMark Shellenbaum * attrsize is the total size of all attributes 14990a586ceaSMark Shellenbaum * to be added during object creation 15000a586ceaSMark Shellenbaum * 15010a586ceaSMark Shellenbaum * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 15020a586ceaSMark Shellenbaum */ 15030a586ceaSMark Shellenbaum 15040a586ceaSMark Shellenbaum /* 15050a586ceaSMark Shellenbaum * hold necessary attribute name for attribute registration. 15060a586ceaSMark Shellenbaum * should be a very rare case where this is needed. If it does 15070a586ceaSMark Shellenbaum * happen it would only happen on the first write to the file system. 15080a586ceaSMark Shellenbaum */ 15090a586ceaSMark Shellenbaum static void 15100a586ceaSMark Shellenbaum dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 15110a586ceaSMark Shellenbaum { 15120a586ceaSMark Shellenbaum int i; 15130a586ceaSMark Shellenbaum 15140a586ceaSMark Shellenbaum if (!sa->sa_need_attr_registration) 15150a586ceaSMark Shellenbaum return; 15160a586ceaSMark Shellenbaum 15170a586ceaSMark Shellenbaum for (i = 0; i != sa->sa_num_attrs; i++) { 15180a586ceaSMark Shellenbaum if (!sa->sa_attr_table[i].sa_registered) { 15190a586ceaSMark Shellenbaum if (sa->sa_reg_attr_obj) 15200a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 15210a586ceaSMark Shellenbaum B_TRUE, sa->sa_attr_table[i].sa_name); 15220a586ceaSMark Shellenbaum else 15230a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 15240a586ceaSMark Shellenbaum B_TRUE, sa->sa_attr_table[i].sa_name); 15250a586ceaSMark Shellenbaum } 15260a586ceaSMark Shellenbaum } 15270a586ceaSMark Shellenbaum } 15280a586ceaSMark Shellenbaum 15290a586ceaSMark Shellenbaum 15300a586ceaSMark Shellenbaum void 15310a586ceaSMark Shellenbaum dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 15320a586ceaSMark Shellenbaum { 15330a586ceaSMark Shellenbaum dnode_t *dn; 15340a586ceaSMark Shellenbaum dmu_tx_hold_t *txh; 15350a586ceaSMark Shellenbaum 15360a586ceaSMark Shellenbaum txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 15370a586ceaSMark Shellenbaum THT_SPILL, 0, 0); 15380a586ceaSMark Shellenbaum 15390a586ceaSMark Shellenbaum dn = txh->txh_dnode; 15400a586ceaSMark Shellenbaum 15410a586ceaSMark Shellenbaum if (dn == NULL) 15420a586ceaSMark Shellenbaum return; 15430a586ceaSMark Shellenbaum 15440a586ceaSMark Shellenbaum /* If blkptr doesn't exist then add space to towrite */ 15459dccfd2aSAlbert Lee if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 15460a586ceaSMark Shellenbaum txh->txh_space_towrite += SPA_MAXBLOCKSIZE; 15470a586ceaSMark Shellenbaum } else { 15489dccfd2aSAlbert Lee blkptr_t *bp; 15499dccfd2aSAlbert Lee 15509dccfd2aSAlbert Lee bp = &dn->dn_phys->dn_spill; 15510a586ceaSMark Shellenbaum if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 1552c7cd2421SGeorge Wilson bp, bp->blk_birth)) 15530a586ceaSMark Shellenbaum txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE; 15540a586ceaSMark Shellenbaum else 15550a586ceaSMark Shellenbaum txh->txh_space_towrite += SPA_MAXBLOCKSIZE; 15569dccfd2aSAlbert Lee if (!BP_IS_HOLE(bp)) 15570a586ceaSMark Shellenbaum txh->txh_space_tounref += SPA_MAXBLOCKSIZE; 15580a586ceaSMark Shellenbaum } 15590a586ceaSMark Shellenbaum } 15600a586ceaSMark Shellenbaum 15610a586ceaSMark Shellenbaum void 15620a586ceaSMark Shellenbaum dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 15630a586ceaSMark Shellenbaum { 15640a586ceaSMark Shellenbaum sa_os_t *sa = tx->tx_objset->os_sa; 15650a586ceaSMark Shellenbaum 15660a586ceaSMark Shellenbaum dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 15670a586ceaSMark Shellenbaum 15680a586ceaSMark Shellenbaum if (tx->tx_objset->os_sa->sa_master_obj == 0) 15690a586ceaSMark Shellenbaum return; 15700a586ceaSMark Shellenbaum 15710a586ceaSMark Shellenbaum if (tx->tx_objset->os_sa->sa_layout_attr_obj) 15720a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 15730a586ceaSMark Shellenbaum else { 15740a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 15750a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 15760a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 15770a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 15780a586ceaSMark Shellenbaum } 15790a586ceaSMark Shellenbaum 15800a586ceaSMark Shellenbaum dmu_tx_sa_registration_hold(sa, tx); 15810a586ceaSMark Shellenbaum 15820a586ceaSMark Shellenbaum if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 15830a586ceaSMark Shellenbaum return; 15840a586ceaSMark Shellenbaum 15850a586ceaSMark Shellenbaum (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 15860a586ceaSMark Shellenbaum THT_SPILL, 0, 0); 15870a586ceaSMark Shellenbaum } 15880a586ceaSMark Shellenbaum 15890a586ceaSMark Shellenbaum /* 15900a586ceaSMark Shellenbaum * Hold SA attribute 15910a586ceaSMark Shellenbaum * 15920a586ceaSMark Shellenbaum * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 15930a586ceaSMark Shellenbaum * 15940a586ceaSMark Shellenbaum * variable_size is the total size of all variable sized attributes 15950a586ceaSMark Shellenbaum * passed to this function. It is not the total size of all 15960a586ceaSMark Shellenbaum * variable size attributes that *may* exist on this object. 15970a586ceaSMark Shellenbaum */ 15980a586ceaSMark Shellenbaum void 15990a586ceaSMark Shellenbaum dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 16000a586ceaSMark Shellenbaum { 16010a586ceaSMark Shellenbaum uint64_t object; 16020a586ceaSMark Shellenbaum sa_os_t *sa = tx->tx_objset->os_sa; 16030a586ceaSMark Shellenbaum 16040a586ceaSMark Shellenbaum ASSERT(hdl != NULL); 16050a586ceaSMark Shellenbaum 16060a586ceaSMark Shellenbaum object = sa_handle_object(hdl); 16070a586ceaSMark Shellenbaum 16080a586ceaSMark Shellenbaum dmu_tx_hold_bonus(tx, object); 16090a586ceaSMark Shellenbaum 16100a586ceaSMark Shellenbaum if (tx->tx_objset->os_sa->sa_master_obj == 0) 16110a586ceaSMark Shellenbaum return; 16120a586ceaSMark Shellenbaum 16130a586ceaSMark Shellenbaum if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 16140a586ceaSMark Shellenbaum tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 16150a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 16160a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 16170a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 16180a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 16190a586ceaSMark Shellenbaum } 16200a586ceaSMark Shellenbaum 16210a586ceaSMark Shellenbaum dmu_tx_sa_registration_hold(sa, tx); 16220a586ceaSMark Shellenbaum 16230a586ceaSMark Shellenbaum if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 16240a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 16250a586ceaSMark Shellenbaum 1626744947dcSTom Erickson if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 16270a586ceaSMark Shellenbaum ASSERT(tx->tx_txg == 0); 16280a586ceaSMark Shellenbaum dmu_tx_hold_spill(tx, object); 1629744947dcSTom Erickson } else { 1630744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1631744947dcSTom Erickson dnode_t *dn; 1632744947dcSTom Erickson 1633744947dcSTom Erickson DB_DNODE_ENTER(db); 1634744947dcSTom Erickson dn = DB_DNODE(db); 1635744947dcSTom Erickson if (dn->dn_have_spill) { 1636744947dcSTom Erickson ASSERT(tx->tx_txg == 0); 1637744947dcSTom Erickson dmu_tx_hold_spill(tx, object); 1638744947dcSTom Erickson } 1639744947dcSTom Erickson DB_DNODE_EXIT(db); 16400a586ceaSMark Shellenbaum } 16410a586ceaSMark Shellenbaum } 1642