1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dbuf.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/dmu_objset.h> 33 #include <sys/dsl_dataset.h> 34 #include <sys/dsl_dir.h> 35 #include <sys/dsl_pool.h> 36 #include <sys/zap_impl.h> 37 #include <sys/spa.h> 38 #include <sys/sa.h> 39 #include <sys/sa_impl.h> 40 #include <sys/zfs_context.h> 41 #include <sys/varargs.h> 42 43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 44 uint64_t arg1, uint64_t arg2); 45 46 47 dmu_tx_t * 48 dmu_tx_create_dd(dsl_dir_t *dd) 49 { 50 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 51 tx->tx_dir = dd; 52 if (dd != NULL) 53 tx->tx_pool = dd->dd_pool; 54 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 55 offsetof(dmu_tx_hold_t, txh_node)); 56 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 57 offsetof(dmu_tx_callback_t, dcb_node)); 58 tx->tx_start = gethrtime(); 59 return (tx); 60 } 61 62 dmu_tx_t * 63 dmu_tx_create(objset_t *os) 64 { 65 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 66 tx->tx_objset = os; 67 return (tx); 68 } 69 70 dmu_tx_t * 71 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 72 { 73 dmu_tx_t *tx = dmu_tx_create_dd(NULL); 74 75 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 76 tx->tx_pool = dp; 77 tx->tx_txg = txg; 78 tx->tx_anyobj = TRUE; 79 80 return (tx); 81 } 82 83 int 84 dmu_tx_is_syncing(dmu_tx_t *tx) 85 { 86 return (tx->tx_anyobj); 87 } 88 89 int 90 dmu_tx_private_ok(dmu_tx_t *tx) 91 { 92 return (tx->tx_anyobj); 93 } 94 95 static dmu_tx_hold_t * 96 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type, 97 uint64_t arg1, uint64_t arg2) 98 { 99 dmu_tx_hold_t *txh; 100 101 if (dn != NULL) { 102 (void) refcount_add(&dn->dn_holds, tx); 103 if (tx->tx_txg != 0) { 104 mutex_enter(&dn->dn_mtx); 105 /* 106 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 107 * problem, but there's no way for it to happen (for 108 * now, at least). 109 */ 110 ASSERT(dn->dn_assigned_txg == 0); 111 dn->dn_assigned_txg = tx->tx_txg; 112 (void) refcount_add(&dn->dn_tx_holds, tx); 113 mutex_exit(&dn->dn_mtx); 114 } 115 } 116 117 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 118 txh->txh_tx = tx; 119 txh->txh_dnode = dn; 120 refcount_create(&txh->txh_space_towrite); 121 refcount_create(&txh->txh_memory_tohold); 122 txh->txh_type = type; 123 txh->txh_arg1 = arg1; 124 txh->txh_arg2 = arg2; 125 list_insert_tail(&tx->tx_holds, txh); 126 127 return (txh); 128 } 129 130 static dmu_tx_hold_t * 131 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 132 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 133 { 134 dnode_t *dn = NULL; 135 dmu_tx_hold_t *txh; 136 int err; 137 138 if (object != DMU_NEW_OBJECT) { 139 err = dnode_hold(os, object, FTAG, &dn); 140 if (err != 0) { 141 tx->tx_err = err; 142 return (NULL); 143 } 144 } 145 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2); 146 if (dn != NULL) 147 dnode_rele(dn, FTAG); 148 return (txh); 149 } 150 151 void 152 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn) 153 { 154 /* 155 * If we're syncing, they can manipulate any object anyhow, and 156 * the hold on the dnode_t can cause problems. 157 */ 158 if (!dmu_tx_is_syncing(tx)) 159 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0); 160 } 161 162 /* 163 * This function reads specified data from disk. The specified data will 164 * be needed to perform the transaction -- i.e, it will be read after 165 * we do dmu_tx_assign(). There are two reasons that we read the data now 166 * (before dmu_tx_assign()): 167 * 168 * 1. Reading it now has potentially better performance. The transaction 169 * has not yet been assigned, so the TXG is not held open, and also the 170 * caller typically has less locks held when calling dmu_tx_hold_*() than 171 * after the transaction has been assigned. This reduces the lock (and txg) 172 * hold times, thus reducing lock contention. 173 * 174 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors 175 * that are detected before they start making changes to the DMU state 176 * (i.e. now). Once the transaction has been assigned, and some DMU 177 * state has been changed, it can be difficult to recover from an i/o 178 * error (e.g. to undo the changes already made in memory at the DMU 179 * layer). Typically code to do so does not exist in the caller -- it 180 * assumes that the data has already been cached and thus i/o errors are 181 * not possible. 182 * 183 * It has been observed that the i/o initiated here can be a performance 184 * problem, and it appears to be optional, because we don't look at the 185 * data which is read. However, removing this read would only serve to 186 * move the work elsewhere (after the dmu_tx_assign()), where it may 187 * have a greater impact on performance (in addition to the impact on 188 * fault tolerance noted above). 189 */ 190 static int 191 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 192 { 193 int err; 194 dmu_buf_impl_t *db; 195 196 rw_enter(&dn->dn_struct_rwlock, RW_READER); 197 db = dbuf_hold_level(dn, level, blkid, FTAG); 198 rw_exit(&dn->dn_struct_rwlock); 199 if (db == NULL) 200 return (SET_ERROR(EIO)); 201 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 202 dbuf_rele(db, FTAG); 203 return (err); 204 } 205 206 /* ARGSUSED */ 207 static void 208 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 209 { 210 dnode_t *dn = txh->txh_dnode; 211 int err = 0; 212 213 if (len == 0) 214 return; 215 216 (void) refcount_add_many(&txh->txh_space_towrite, len, FTAG); 217 218 if (refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS) 219 err = SET_ERROR(EFBIG); 220 221 if (dn == NULL) 222 return; 223 224 /* 225 * For i/o error checking, read the blocks that will be needed 226 * to perform the write: the first and last level-0 blocks (if 227 * they are not aligned, i.e. if they are partial-block writes), 228 * and all the level-1 blocks. 229 */ 230 if (dn->dn_maxblkid == 0) { 231 if (off < dn->dn_datablksz && 232 (off > 0 || len < dn->dn_datablksz)) { 233 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 234 if (err != 0) { 235 txh->txh_tx->tx_err = err; 236 } 237 } 238 } else { 239 zio_t *zio = zio_root(dn->dn_objset->os_spa, 240 NULL, NULL, ZIO_FLAG_CANFAIL); 241 242 /* first level-0 block */ 243 uint64_t start = off >> dn->dn_datablkshift; 244 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { 245 err = dmu_tx_check_ioerr(zio, dn, 0, start); 246 if (err != 0) { 247 txh->txh_tx->tx_err = err; 248 } 249 } 250 251 /* last level-0 block */ 252 uint64_t end = (off + len - 1) >> dn->dn_datablkshift; 253 if (end != start && end <= dn->dn_maxblkid && 254 P2PHASE(off + len, dn->dn_datablksz)) { 255 err = dmu_tx_check_ioerr(zio, dn, 0, end); 256 if (err != 0) { 257 txh->txh_tx->tx_err = err; 258 } 259 } 260 261 /* level-1 blocks */ 262 if (dn->dn_nlevels > 1) { 263 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 264 for (uint64_t i = (start >> shft) + 1; 265 i < end >> shft; i++) { 266 err = dmu_tx_check_ioerr(zio, dn, 1, i); 267 if (err != 0) { 268 txh->txh_tx->tx_err = err; 269 } 270 } 271 } 272 273 err = zio_wait(zio); 274 if (err != 0) { 275 txh->txh_tx->tx_err = err; 276 } 277 } 278 } 279 280 static void 281 dmu_tx_count_dnode(dmu_tx_hold_t *txh) 282 { 283 (void) refcount_add_many(&txh->txh_space_towrite, DNODE_SIZE, FTAG); 284 } 285 286 void 287 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 288 { 289 dmu_tx_hold_t *txh; 290 291 ASSERT0(tx->tx_txg); 292 ASSERT3U(len, <=, DMU_MAX_ACCESS); 293 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 294 295 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 296 object, THT_WRITE, off, len); 297 if (txh != NULL) { 298 dmu_tx_count_write(txh, off, len); 299 dmu_tx_count_dnode(txh); 300 } 301 } 302 303 void 304 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) 305 { 306 dmu_tx_hold_t *txh; 307 308 ASSERT0(tx->tx_txg); 309 ASSERT3U(len, <=, DMU_MAX_ACCESS); 310 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 311 312 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len); 313 if (txh != NULL) { 314 dmu_tx_count_write(txh, off, len); 315 dmu_tx_count_dnode(txh); 316 } 317 } 318 319 /* 320 * This function marks the transaction as being a "net free". The end 321 * result is that refquotas will be disabled for this transaction, and 322 * this transaction will be able to use half of the pool space overhead 323 * (see dsl_pool_adjustedsize()). Therefore this function should only 324 * be called for transactions that we expect will not cause a net increase 325 * in the amount of space used (but it's OK if that is occasionally not true). 326 */ 327 void 328 dmu_tx_mark_netfree(dmu_tx_t *tx) 329 { 330 tx->tx_netfree = B_TRUE; 331 } 332 333 static void 334 dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 335 { 336 dmu_tx_t *tx; 337 dnode_t *dn; 338 int err; 339 zio_t *zio; 340 341 tx = txh->txh_tx; 342 ASSERT(tx->tx_txg == 0); 343 344 dn = txh->txh_dnode; 345 dmu_tx_count_dnode(txh); 346 347 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz) 348 return; 349 if (len == DMU_OBJECT_END) 350 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off; 351 352 /* 353 * For i/o error checking, we read the first and last level-0 354 * blocks if they are not aligned, and all the level-1 blocks. 355 * 356 * Note: dbuf_free_range() assumes that we have not instantiated 357 * any level-0 dbufs that will be completely freed. Therefore we must 358 * exercise care to not read or count the first and last blocks 359 * if they are blocksize-aligned. 360 */ 361 if (dn->dn_datablkshift == 0) { 362 if (off != 0 || len < dn->dn_datablksz) 363 dmu_tx_count_write(txh, 0, dn->dn_datablksz); 364 } else { 365 /* first block will be modified if it is not aligned */ 366 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 367 dmu_tx_count_write(txh, off, 1); 368 /* last block will be modified if it is not aligned */ 369 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 370 dmu_tx_count_write(txh, off + len, 1); 371 } 372 373 /* 374 * Check level-1 blocks. 375 */ 376 if (dn->dn_nlevels > 1) { 377 int shift = dn->dn_datablkshift + dn->dn_indblkshift - 378 SPA_BLKPTRSHIFT; 379 uint64_t start = off >> shift; 380 uint64_t end = (off + len) >> shift; 381 382 ASSERT(dn->dn_indblkshift != 0); 383 384 /* 385 * dnode_reallocate() can result in an object with indirect 386 * blocks having an odd data block size. In this case, 387 * just check the single block. 388 */ 389 if (dn->dn_datablkshift == 0) 390 start = end = 0; 391 392 zio_t *zio = zio_root(tx->tx_pool->dp_spa, 393 NULL, NULL, ZIO_FLAG_CANFAIL); 394 for (uint64_t i = start; i <= end; i++) { 395 uint64_t ibyte = i << shift; 396 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 397 i = ibyte >> shift; 398 if (err == ESRCH || i > end) 399 break; 400 if (err != 0) { 401 tx->tx_err = err; 402 (void) zio_wait(zio); 403 return; 404 } 405 406 (void) refcount_add_many(&txh->txh_memory_tohold, 407 1 << dn->dn_indblkshift, FTAG); 408 409 err = dmu_tx_check_ioerr(zio, dn, 1, i); 410 if (err != 0) { 411 tx->tx_err = err; 412 (void) zio_wait(zio); 413 return; 414 } 415 } 416 err = zio_wait(zio); 417 if (err != 0) { 418 tx->tx_err = err; 419 return; 420 } 421 } 422 } 423 424 void 425 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 426 { 427 dmu_tx_hold_t *txh; 428 429 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 430 object, THT_FREE, off, len); 431 if (txh != NULL) 432 (void) dmu_tx_hold_free_impl(txh, off, len); 433 } 434 435 void 436 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len) 437 { 438 dmu_tx_hold_t *txh; 439 440 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len); 441 if (txh != NULL) 442 (void) dmu_tx_hold_free_impl(txh, off, len); 443 } 444 445 static void 446 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, int add, const char *name) 447 { 448 dmu_tx_t *tx = txh->txh_tx; 449 dnode_t *dn; 450 int err; 451 452 ASSERT(tx->tx_txg == 0); 453 454 dn = txh->txh_dnode; 455 456 dmu_tx_count_dnode(txh); 457 458 /* 459 * Modifying a almost-full microzap is around the worst case (128KB) 460 * 461 * If it is a fat zap, the worst case would be 7*16KB=112KB: 462 * - 3 blocks overwritten: target leaf, ptrtbl block, header block 463 * - 4 new blocks written if adding: 464 * - 2 blocks for possibly split leaves, 465 * - 2 grown ptrtbl blocks 466 */ 467 (void) refcount_add_many(&txh->txh_space_towrite, 468 MZAP_MAX_BLKSZ, FTAG); 469 470 if (dn == NULL) 471 return; 472 473 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 474 475 if (dn->dn_maxblkid == 0 || name == NULL) { 476 /* 477 * This is a microzap (only one block), or we don't know 478 * the name. Check the first block for i/o errors. 479 */ 480 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 481 if (err != 0) { 482 tx->tx_err = err; 483 } 484 } else { 485 /* 486 * Access the name so that we'll check for i/o errors to 487 * the leaf blocks, etc. We ignore ENOENT, as this name 488 * may not yet exist. 489 */ 490 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL); 491 if (err == EIO || err == ECKSUM || err == ENXIO) { 492 tx->tx_err = err; 493 } 494 } 495 } 496 497 void 498 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 499 { 500 dmu_tx_hold_t *txh; 501 502 ASSERT0(tx->tx_txg); 503 504 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 505 object, THT_ZAP, add, (uintptr_t)name); 506 if (txh != NULL) 507 dmu_tx_hold_zap_impl(txh, add, name); 508 } 509 510 void 511 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name) 512 { 513 dmu_tx_hold_t *txh; 514 515 ASSERT0(tx->tx_txg); 516 ASSERT(dn != NULL); 517 518 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name); 519 if (txh != NULL) 520 dmu_tx_hold_zap_impl(txh, add, name); 521 } 522 523 void 524 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 525 { 526 dmu_tx_hold_t *txh; 527 528 ASSERT(tx->tx_txg == 0); 529 530 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 531 object, THT_BONUS, 0, 0); 532 if (txh) 533 dmu_tx_count_dnode(txh); 534 } 535 536 void 537 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn) 538 { 539 dmu_tx_hold_t *txh; 540 541 ASSERT0(tx->tx_txg); 542 543 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0); 544 if (txh) 545 dmu_tx_count_dnode(txh); 546 } 547 548 void 549 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 550 { 551 dmu_tx_hold_t *txh; 552 ASSERT(tx->tx_txg == 0); 553 554 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 555 DMU_NEW_OBJECT, THT_SPACE, space, 0); 556 557 (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG); 558 } 559 560 #ifdef ZFS_DEBUG 561 void 562 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 563 { 564 boolean_t match_object = B_FALSE; 565 boolean_t match_offset = B_FALSE; 566 567 DB_DNODE_ENTER(db); 568 dnode_t *dn = DB_DNODE(db); 569 ASSERT(tx->tx_txg != 0); 570 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 571 ASSERT3U(dn->dn_object, ==, db->db.db_object); 572 573 if (tx->tx_anyobj) { 574 DB_DNODE_EXIT(db); 575 return; 576 } 577 578 /* XXX No checking on the meta dnode for now */ 579 if (db->db.db_object == DMU_META_DNODE_OBJECT) { 580 DB_DNODE_EXIT(db); 581 return; 582 } 583 584 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 585 txh = list_next(&tx->tx_holds, txh)) { 586 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 587 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 588 match_object = TRUE; 589 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 590 int datablkshift = dn->dn_datablkshift ? 591 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 592 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 593 int shift = datablkshift + epbs * db->db_level; 594 uint64_t beginblk = shift >= 64 ? 0 : 595 (txh->txh_arg1 >> shift); 596 uint64_t endblk = shift >= 64 ? 0 : 597 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 598 uint64_t blkid = db->db_blkid; 599 600 /* XXX txh_arg2 better not be zero... */ 601 602 dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 603 txh->txh_type, beginblk, endblk); 604 605 switch (txh->txh_type) { 606 case THT_WRITE: 607 if (blkid >= beginblk && blkid <= endblk) 608 match_offset = TRUE; 609 /* 610 * We will let this hold work for the bonus 611 * or spill buffer so that we don't need to 612 * hold it when creating a new object. 613 */ 614 if (blkid == DMU_BONUS_BLKID || 615 blkid == DMU_SPILL_BLKID) 616 match_offset = TRUE; 617 /* 618 * They might have to increase nlevels, 619 * thus dirtying the new TLIBs. Or the 620 * might have to change the block size, 621 * thus dirying the new lvl=0 blk=0. 622 */ 623 if (blkid == 0) 624 match_offset = TRUE; 625 break; 626 case THT_FREE: 627 /* 628 * We will dirty all the level 1 blocks in 629 * the free range and perhaps the first and 630 * last level 0 block. 631 */ 632 if (blkid >= beginblk && (blkid <= endblk || 633 txh->txh_arg2 == DMU_OBJECT_END)) 634 match_offset = TRUE; 635 break; 636 case THT_SPILL: 637 if (blkid == DMU_SPILL_BLKID) 638 match_offset = TRUE; 639 break; 640 case THT_BONUS: 641 if (blkid == DMU_BONUS_BLKID) 642 match_offset = TRUE; 643 break; 644 case THT_ZAP: 645 match_offset = TRUE; 646 break; 647 case THT_NEWOBJECT: 648 match_object = TRUE; 649 break; 650 default: 651 ASSERT(!"bad txh_type"); 652 } 653 } 654 if (match_object && match_offset) { 655 DB_DNODE_EXIT(db); 656 return; 657 } 658 } 659 DB_DNODE_EXIT(db); 660 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 661 (u_longlong_t)db->db.db_object, db->db_level, 662 (u_longlong_t)db->db_blkid); 663 } 664 #endif 665 666 /* 667 * If we can't do 10 iops, something is wrong. Let us go ahead 668 * and hit zfs_dirty_data_max. 669 */ 670 hrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 671 int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 672 673 /* 674 * We delay transactions when we've determined that the backend storage 675 * isn't able to accommodate the rate of incoming writes. 676 * 677 * If there is already a transaction waiting, we delay relative to when 678 * that transaction finishes waiting. This way the calculated min_time 679 * is independent of the number of threads concurrently executing 680 * transactions. 681 * 682 * If we are the only waiter, wait relative to when the transaction 683 * started, rather than the current time. This credits the transaction for 684 * "time already served", e.g. reading indirect blocks. 685 * 686 * The minimum time for a transaction to take is calculated as: 687 * min_time = scale * (dirty - min) / (max - dirty) 688 * min_time is then capped at zfs_delay_max_ns. 689 * 690 * The delay has two degrees of freedom that can be adjusted via tunables. 691 * The percentage of dirty data at which we start to delay is defined by 692 * zfs_delay_min_dirty_percent. This should typically be at or above 693 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 694 * delay after writing at full speed has failed to keep up with the incoming 695 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 696 * speaking, this variable determines the amount of delay at the midpoint of 697 * the curve. 698 * 699 * delay 700 * 10ms +-------------------------------------------------------------*+ 701 * | *| 702 * 9ms + *+ 703 * | *| 704 * 8ms + *+ 705 * | * | 706 * 7ms + * + 707 * | * | 708 * 6ms + * + 709 * | * | 710 * 5ms + * + 711 * | * | 712 * 4ms + * + 713 * | * | 714 * 3ms + * + 715 * | * | 716 * 2ms + (midpoint) * + 717 * | | ** | 718 * 1ms + v *** + 719 * | zfs_delay_scale ----------> ******** | 720 * 0 +-------------------------------------*********----------------+ 721 * 0% <- zfs_dirty_data_max -> 100% 722 * 723 * Note that since the delay is added to the outstanding time remaining on the 724 * most recent transaction, the delay is effectively the inverse of IOPS. 725 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 726 * was chosen such that small changes in the amount of accumulated dirty data 727 * in the first 3/4 of the curve yield relatively small differences in the 728 * amount of delay. 729 * 730 * The effects can be easier to understand when the amount of delay is 731 * represented on a log scale: 732 * 733 * delay 734 * 100ms +-------------------------------------------------------------++ 735 * + + 736 * | | 737 * + *+ 738 * 10ms + *+ 739 * + ** + 740 * | (midpoint) ** | 741 * + | ** + 742 * 1ms + v **** + 743 * + zfs_delay_scale ----------> ***** + 744 * | **** | 745 * + **** + 746 * 100us + ** + 747 * + * + 748 * | * | 749 * + * + 750 * 10us + * + 751 * + + 752 * | | 753 * + + 754 * +--------------------------------------------------------------+ 755 * 0% <- zfs_dirty_data_max -> 100% 756 * 757 * Note here that only as the amount of dirty data approaches its limit does 758 * the delay start to increase rapidly. The goal of a properly tuned system 759 * should be to keep the amount of dirty data out of that range by first 760 * ensuring that the appropriate limits are set for the I/O scheduler to reach 761 * optimal throughput on the backend storage, and then by changing the value 762 * of zfs_delay_scale to increase the steepness of the curve. 763 */ 764 static void 765 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 766 { 767 dsl_pool_t *dp = tx->tx_pool; 768 uint64_t delay_min_bytes = 769 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 770 hrtime_t wakeup, min_tx_time, now; 771 772 if (dirty <= delay_min_bytes) 773 return; 774 775 /* 776 * The caller has already waited until we are under the max. 777 * We make them pass us the amount of dirty data so we don't 778 * have to handle the case of it being >= the max, which could 779 * cause a divide-by-zero if it's == the max. 780 */ 781 ASSERT3U(dirty, <, zfs_dirty_data_max); 782 783 now = gethrtime(); 784 min_tx_time = zfs_delay_scale * 785 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 786 if (now > tx->tx_start + min_tx_time) 787 return; 788 789 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 790 791 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 792 uint64_t, min_tx_time); 793 794 mutex_enter(&dp->dp_lock); 795 wakeup = MAX(tx->tx_start + min_tx_time, 796 dp->dp_last_wakeup + min_tx_time); 797 dp->dp_last_wakeup = wakeup; 798 mutex_exit(&dp->dp_lock); 799 800 #ifdef _KERNEL 801 mutex_enter(&curthread->t_delay_lock); 802 while (cv_timedwait_hires(&curthread->t_delay_cv, 803 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 804 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 805 continue; 806 mutex_exit(&curthread->t_delay_lock); 807 #else 808 hrtime_t delta = wakeup - gethrtime(); 809 struct timespec ts; 810 ts.tv_sec = delta / NANOSEC; 811 ts.tv_nsec = delta % NANOSEC; 812 (void) nanosleep(&ts, NULL); 813 #endif 814 } 815 816 /* 817 * This routine attempts to assign the transaction to a transaction group. 818 * To do so, we must determine if there is sufficient free space on disk. 819 * 820 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree() 821 * on it), then it is assumed that there is sufficient free space, 822 * unless there's insufficient slop space in the pool (see the comment 823 * above spa_slop_shift in spa_misc.c). 824 * 825 * If it is not a "netfree" transaction, then if the data already on disk 826 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or 827 * ENOSPC. Otherwise, if the current rough estimate of pending changes, 828 * plus the rough estimate of this transaction's changes, may exceed the 829 * allowed usage, then this will fail with ERESTART, which will cause the 830 * caller to wait for the pending changes to be written to disk (by waiting 831 * for the next TXG to open), and then check the space usage again. 832 * 833 * The rough estimate of pending changes is comprised of the sum of: 834 * 835 * - this transaction's holds' txh_space_towrite 836 * 837 * - dd_tempreserved[], which is the sum of in-flight transactions' 838 * holds' txh_space_towrite (i.e. those transactions that have called 839 * dmu_tx_assign() but not yet called dmu_tx_commit()). 840 * 841 * - dd_space_towrite[], which is the amount of dirtied dbufs. 842 * 843 * Note that all of these values are inflated by spa_get_worst_case_asize(), 844 * which means that we may get ERESTART well before we are actually in danger 845 * of running out of space, but this also mitigates any small inaccuracies 846 * in the rough estimate (e.g. txh_space_towrite doesn't take into account 847 * indirect blocks, and dd_space_towrite[] doesn't take into account changes 848 * to the MOS). 849 * 850 * Note that due to this algorithm, it is possible to exceed the allowed 851 * usage by one transaction. Also, as we approach the allowed usage, 852 * we will allow a very limited amount of changes into each TXG, thus 853 * decreasing performance. 854 */ 855 static int 856 dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 857 { 858 spa_t *spa = tx->tx_pool->dp_spa; 859 860 ASSERT0(tx->tx_txg); 861 862 if (tx->tx_err) 863 return (tx->tx_err); 864 865 if (spa_suspended(spa)) { 866 /* 867 * If the user has indicated a blocking failure mode 868 * then return ERESTART which will block in dmu_tx_wait(). 869 * Otherwise, return EIO so that an error can get 870 * propagated back to the VOP calls. 871 * 872 * Note that we always honor the txg_how flag regardless 873 * of the failuremode setting. 874 */ 875 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 876 txg_how != TXG_WAIT) 877 return (SET_ERROR(EIO)); 878 879 return (SET_ERROR(ERESTART)); 880 } 881 882 if (!tx->tx_waited && 883 dsl_pool_need_dirty_delay(tx->tx_pool)) { 884 tx->tx_wait_dirty = B_TRUE; 885 return (SET_ERROR(ERESTART)); 886 } 887 888 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 889 tx->tx_needassign_txh = NULL; 890 891 /* 892 * NB: No error returns are allowed after txg_hold_open, but 893 * before processing the dnode holds, due to the 894 * dmu_tx_unassign() logic. 895 */ 896 897 uint64_t towrite = 0; 898 uint64_t tohold = 0; 899 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 900 txh = list_next(&tx->tx_holds, txh)) { 901 dnode_t *dn = txh->txh_dnode; 902 if (dn != NULL) { 903 mutex_enter(&dn->dn_mtx); 904 if (dn->dn_assigned_txg == tx->tx_txg - 1) { 905 mutex_exit(&dn->dn_mtx); 906 tx->tx_needassign_txh = txh; 907 return (SET_ERROR(ERESTART)); 908 } 909 if (dn->dn_assigned_txg == 0) 910 dn->dn_assigned_txg = tx->tx_txg; 911 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 912 (void) refcount_add(&dn->dn_tx_holds, tx); 913 mutex_exit(&dn->dn_mtx); 914 } 915 towrite += refcount_count(&txh->txh_space_towrite); 916 tohold += refcount_count(&txh->txh_memory_tohold); 917 } 918 919 /* needed allocation: worst-case estimate of write space */ 920 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite); 921 /* calculate memory footprint estimate */ 922 uint64_t memory = towrite + tohold; 923 924 if (tx->tx_dir != NULL && asize != 0) { 925 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 926 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx); 927 if (err != 0) 928 return (err); 929 } 930 931 return (0); 932 } 933 934 static void 935 dmu_tx_unassign(dmu_tx_t *tx) 936 { 937 if (tx->tx_txg == 0) 938 return; 939 940 txg_rele_to_quiesce(&tx->tx_txgh); 941 942 /* 943 * Walk the transaction's hold list, removing the hold on the 944 * associated dnode, and notifying waiters if the refcount drops to 0. 945 */ 946 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); 947 txh != tx->tx_needassign_txh; 948 txh = list_next(&tx->tx_holds, txh)) { 949 dnode_t *dn = txh->txh_dnode; 950 951 if (dn == NULL) 952 continue; 953 mutex_enter(&dn->dn_mtx); 954 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 955 956 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 957 dn->dn_assigned_txg = 0; 958 cv_broadcast(&dn->dn_notxholds); 959 } 960 mutex_exit(&dn->dn_mtx); 961 } 962 963 txg_rele_to_sync(&tx->tx_txgh); 964 965 tx->tx_lasttried_txg = tx->tx_txg; 966 tx->tx_txg = 0; 967 } 968 969 /* 970 * Assign tx to a transaction group. txg_how can be one of: 971 * 972 * (1) TXG_WAIT. If the current open txg is full, waits until there's 973 * a new one. This should be used when you're not holding locks. 974 * It will only fail if we're truly out of space (or over quota). 975 * 976 * (2) TXG_NOWAIT. If we can't assign into the current open txg without 977 * blocking, returns immediately with ERESTART. This should be used 978 * whenever you're holding locks. On an ERESTART error, the caller 979 * should drop locks, do a dmu_tx_wait(tx), and try again. 980 * 981 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 982 * has already been called on behalf of this operation (though 983 * most likely on a different tx). 984 */ 985 int 986 dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 987 { 988 int err; 989 990 ASSERT(tx->tx_txg == 0); 991 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 992 txg_how == TXG_WAITED); 993 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 994 995 /* If we might wait, we must not hold the config lock. */ 996 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 997 998 if (txg_how == TXG_WAITED) 999 tx->tx_waited = B_TRUE; 1000 1001 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1002 dmu_tx_unassign(tx); 1003 1004 if (err != ERESTART || txg_how != TXG_WAIT) 1005 return (err); 1006 1007 dmu_tx_wait(tx); 1008 } 1009 1010 txg_rele_to_quiesce(&tx->tx_txgh); 1011 1012 return (0); 1013 } 1014 1015 void 1016 dmu_tx_wait(dmu_tx_t *tx) 1017 { 1018 spa_t *spa = tx->tx_pool->dp_spa; 1019 dsl_pool_t *dp = tx->tx_pool; 1020 1021 ASSERT(tx->tx_txg == 0); 1022 ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1023 1024 if (tx->tx_wait_dirty) { 1025 /* 1026 * dmu_tx_try_assign() has determined that we need to wait 1027 * because we've consumed much or all of the dirty buffer 1028 * space. 1029 */ 1030 mutex_enter(&dp->dp_lock); 1031 while (dp->dp_dirty_total >= zfs_dirty_data_max) 1032 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1033 uint64_t dirty = dp->dp_dirty_total; 1034 mutex_exit(&dp->dp_lock); 1035 1036 dmu_tx_delay(tx, dirty); 1037 1038 tx->tx_wait_dirty = B_FALSE; 1039 1040 /* 1041 * Note: setting tx_waited only has effect if the caller 1042 * used TX_WAIT. Otherwise they are going to destroy 1043 * this tx and try again. The common case, zfs_write(), 1044 * uses TX_WAIT. 1045 */ 1046 tx->tx_waited = B_TRUE; 1047 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1048 /* 1049 * If the pool is suspended we need to wait until it 1050 * is resumed. Note that it's possible that the pool 1051 * has become active after this thread has tried to 1052 * obtain a tx. If that's the case then tx_lasttried_txg 1053 * would not have been set. 1054 */ 1055 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1056 } else if (tx->tx_needassign_txh) { 1057 /* 1058 * A dnode is assigned to the quiescing txg. Wait for its 1059 * transaction to complete. 1060 */ 1061 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1062 1063 mutex_enter(&dn->dn_mtx); 1064 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1065 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1066 mutex_exit(&dn->dn_mtx); 1067 tx->tx_needassign_txh = NULL; 1068 } else { 1069 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 1070 } 1071 } 1072 1073 static void 1074 dmu_tx_destroy(dmu_tx_t *tx) 1075 { 1076 dmu_tx_hold_t *txh; 1077 1078 while ((txh = list_head(&tx->tx_holds)) != NULL) { 1079 dnode_t *dn = txh->txh_dnode; 1080 1081 list_remove(&tx->tx_holds, txh); 1082 refcount_destroy_many(&txh->txh_space_towrite, 1083 refcount_count(&txh->txh_space_towrite)); 1084 refcount_destroy_many(&txh->txh_memory_tohold, 1085 refcount_count(&txh->txh_memory_tohold)); 1086 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1087 if (dn != NULL) 1088 dnode_rele(dn, tx); 1089 } 1090 1091 list_destroy(&tx->tx_callbacks); 1092 list_destroy(&tx->tx_holds); 1093 kmem_free(tx, sizeof (dmu_tx_t)); 1094 } 1095 1096 void 1097 dmu_tx_commit(dmu_tx_t *tx) 1098 { 1099 ASSERT(tx->tx_txg != 0); 1100 1101 /* 1102 * Go through the transaction's hold list and remove holds on 1103 * associated dnodes, notifying waiters if no holds remain. 1104 */ 1105 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 1106 txh = list_next(&tx->tx_holds, txh)) { 1107 dnode_t *dn = txh->txh_dnode; 1108 1109 if (dn == NULL) 1110 continue; 1111 1112 mutex_enter(&dn->dn_mtx); 1113 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1114 1115 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1116 dn->dn_assigned_txg = 0; 1117 cv_broadcast(&dn->dn_notxholds); 1118 } 1119 mutex_exit(&dn->dn_mtx); 1120 } 1121 1122 if (tx->tx_tempreserve_cookie) 1123 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1124 1125 if (!list_is_empty(&tx->tx_callbacks)) 1126 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1127 1128 if (tx->tx_anyobj == FALSE) 1129 txg_rele_to_sync(&tx->tx_txgh); 1130 1131 dmu_tx_destroy(tx); 1132 } 1133 1134 void 1135 dmu_tx_abort(dmu_tx_t *tx) 1136 { 1137 ASSERT(tx->tx_txg == 0); 1138 1139 /* 1140 * Call any registered callbacks with an error code. 1141 */ 1142 if (!list_is_empty(&tx->tx_callbacks)) 1143 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1144 1145 dmu_tx_destroy(tx); 1146 } 1147 1148 uint64_t 1149 dmu_tx_get_txg(dmu_tx_t *tx) 1150 { 1151 ASSERT(tx->tx_txg != 0); 1152 return (tx->tx_txg); 1153 } 1154 1155 dsl_pool_t * 1156 dmu_tx_pool(dmu_tx_t *tx) 1157 { 1158 ASSERT(tx->tx_pool != NULL); 1159 return (tx->tx_pool); 1160 } 1161 1162 void 1163 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1164 { 1165 dmu_tx_callback_t *dcb; 1166 1167 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1168 1169 dcb->dcb_func = func; 1170 dcb->dcb_data = data; 1171 1172 list_insert_tail(&tx->tx_callbacks, dcb); 1173 } 1174 1175 /* 1176 * Call all the commit callbacks on a list, with a given error code. 1177 */ 1178 void 1179 dmu_tx_do_callbacks(list_t *cb_list, int error) 1180 { 1181 dmu_tx_callback_t *dcb; 1182 1183 while ((dcb = list_head(cb_list)) != NULL) { 1184 list_remove(cb_list, dcb); 1185 dcb->dcb_func(dcb->dcb_data, error); 1186 kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1187 } 1188 } 1189 1190 /* 1191 * Interface to hold a bunch of attributes. 1192 * used for creating new files. 1193 * attrsize is the total size of all attributes 1194 * to be added during object creation 1195 * 1196 * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1197 */ 1198 1199 /* 1200 * hold necessary attribute name for attribute registration. 1201 * should be a very rare case where this is needed. If it does 1202 * happen it would only happen on the first write to the file system. 1203 */ 1204 static void 1205 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1206 { 1207 if (!sa->sa_need_attr_registration) 1208 return; 1209 1210 for (int i = 0; i != sa->sa_num_attrs; i++) { 1211 if (!sa->sa_attr_table[i].sa_registered) { 1212 if (sa->sa_reg_attr_obj) 1213 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1214 B_TRUE, sa->sa_attr_table[i].sa_name); 1215 else 1216 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1217 B_TRUE, sa->sa_attr_table[i].sa_name); 1218 } 1219 } 1220 } 1221 1222 void 1223 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1224 { 1225 dmu_tx_hold_t *txh = dmu_tx_hold_object_impl(tx, 1226 tx->tx_objset, object, THT_SPILL, 0, 0); 1227 1228 (void) refcount_add_many(&txh->txh_space_towrite, 1229 SPA_OLD_MAXBLOCKSIZE, FTAG); 1230 } 1231 1232 void 1233 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1234 { 1235 sa_os_t *sa = tx->tx_objset->os_sa; 1236 1237 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1238 1239 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1240 return; 1241 1242 if (tx->tx_objset->os_sa->sa_layout_attr_obj) { 1243 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1244 } else { 1245 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1246 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1247 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1248 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1249 } 1250 1251 dmu_tx_sa_registration_hold(sa, tx); 1252 1253 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 1254 return; 1255 1256 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1257 THT_SPILL, 0, 0); 1258 } 1259 1260 /* 1261 * Hold SA attribute 1262 * 1263 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1264 * 1265 * variable_size is the total size of all variable sized attributes 1266 * passed to this function. It is not the total size of all 1267 * variable size attributes that *may* exist on this object. 1268 */ 1269 void 1270 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1271 { 1272 uint64_t object; 1273 sa_os_t *sa = tx->tx_objset->os_sa; 1274 1275 ASSERT(hdl != NULL); 1276 1277 object = sa_handle_object(hdl); 1278 1279 dmu_tx_hold_bonus(tx, object); 1280 1281 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1282 return; 1283 1284 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1285 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1286 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1287 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1288 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1289 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1290 } 1291 1292 dmu_tx_sa_registration_hold(sa, tx); 1293 1294 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1295 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1296 1297 if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1298 ASSERT(tx->tx_txg == 0); 1299 dmu_tx_hold_spill(tx, object); 1300 } else { 1301 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1302 dnode_t *dn; 1303 1304 DB_DNODE_ENTER(db); 1305 dn = DB_DNODE(db); 1306 if (dn->dn_have_spill) { 1307 ASSERT(tx->tx_txg == 0); 1308 dmu_tx_hold_spill(tx, object); 1309 } 1310 DB_DNODE_EXIT(db); 1311 } 1312 } 1313