xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_tx.c (revision b515258426fed6c7311fd3f1dea697cfbd4085c6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25  */
26 
27 #include <sys/dmu.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dbuf.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
36 #include <sys/spa.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/varargs.h>
41 
42 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43     uint64_t arg1, uint64_t arg2);
44 
45 
46 dmu_tx_t *
47 dmu_tx_create_dd(dsl_dir_t *dd)
48 {
49 	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50 	tx->tx_dir = dd;
51 	if (dd != NULL)
52 		tx->tx_pool = dd->dd_pool;
53 	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54 	    offsetof(dmu_tx_hold_t, txh_node));
55 	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56 	    offsetof(dmu_tx_callback_t, dcb_node));
57 	tx->tx_start = gethrtime();
58 #ifdef ZFS_DEBUG
59 	refcount_create(&tx->tx_space_written);
60 	refcount_create(&tx->tx_space_freed);
61 #endif
62 	return (tx);
63 }
64 
65 dmu_tx_t *
66 dmu_tx_create(objset_t *os)
67 {
68 	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
69 	tx->tx_objset = os;
70 	tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
71 	return (tx);
72 }
73 
74 dmu_tx_t *
75 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
76 {
77 	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
78 
79 	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
80 	tx->tx_pool = dp;
81 	tx->tx_txg = txg;
82 	tx->tx_anyobj = TRUE;
83 
84 	return (tx);
85 }
86 
87 int
88 dmu_tx_is_syncing(dmu_tx_t *tx)
89 {
90 	return (tx->tx_anyobj);
91 }
92 
93 int
94 dmu_tx_private_ok(dmu_tx_t *tx)
95 {
96 	return (tx->tx_anyobj);
97 }
98 
99 static dmu_tx_hold_t *
100 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
101     enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
102 {
103 	dmu_tx_hold_t *txh;
104 	dnode_t *dn = NULL;
105 	int err;
106 
107 	if (object != DMU_NEW_OBJECT) {
108 		err = dnode_hold(os, object, tx, &dn);
109 		if (err) {
110 			tx->tx_err = err;
111 			return (NULL);
112 		}
113 
114 		if (err == 0 && tx->tx_txg != 0) {
115 			mutex_enter(&dn->dn_mtx);
116 			/*
117 			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
118 			 * problem, but there's no way for it to happen (for
119 			 * now, at least).
120 			 */
121 			ASSERT(dn->dn_assigned_txg == 0);
122 			dn->dn_assigned_txg = tx->tx_txg;
123 			(void) refcount_add(&dn->dn_tx_holds, tx);
124 			mutex_exit(&dn->dn_mtx);
125 		}
126 	}
127 
128 	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
129 	txh->txh_tx = tx;
130 	txh->txh_dnode = dn;
131 #ifdef ZFS_DEBUG
132 	txh->txh_type = type;
133 	txh->txh_arg1 = arg1;
134 	txh->txh_arg2 = arg2;
135 #endif
136 	list_insert_tail(&tx->tx_holds, txh);
137 
138 	return (txh);
139 }
140 
141 void
142 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
143 {
144 	/*
145 	 * If we're syncing, they can manipulate any object anyhow, and
146 	 * the hold on the dnode_t can cause problems.
147 	 */
148 	if (!dmu_tx_is_syncing(tx)) {
149 		(void) dmu_tx_hold_object_impl(tx, os,
150 		    object, THT_NEWOBJECT, 0, 0);
151 	}
152 }
153 
154 static int
155 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
156 {
157 	int err;
158 	dmu_buf_impl_t *db;
159 
160 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
161 	db = dbuf_hold_level(dn, level, blkid, FTAG);
162 	rw_exit(&dn->dn_struct_rwlock);
163 	if (db == NULL)
164 		return (SET_ERROR(EIO));
165 	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
166 	dbuf_rele(db, FTAG);
167 	return (err);
168 }
169 
170 static void
171 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
172     int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
173 {
174 	objset_t *os = dn->dn_objset;
175 	dsl_dataset_t *ds = os->os_dsl_dataset;
176 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
177 	dmu_buf_impl_t *parent = NULL;
178 	blkptr_t *bp = NULL;
179 	uint64_t space;
180 
181 	if (level >= dn->dn_nlevels || history[level] == blkid)
182 		return;
183 
184 	history[level] = blkid;
185 
186 	space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
187 
188 	if (db == NULL || db == dn->dn_dbuf) {
189 		ASSERT(level != 0);
190 		db = NULL;
191 	} else {
192 		ASSERT(DB_DNODE(db) == dn);
193 		ASSERT(db->db_level == level);
194 		ASSERT(db->db.db_size == space);
195 		ASSERT(db->db_blkid == blkid);
196 		bp = db->db_blkptr;
197 		parent = db->db_parent;
198 	}
199 
200 	freeable = (bp && (freeable ||
201 	    dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
202 
203 	if (freeable)
204 		txh->txh_space_tooverwrite += space;
205 	else
206 		txh->txh_space_towrite += space;
207 	if (bp)
208 		txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
209 
210 	dmu_tx_count_twig(txh, dn, parent, level + 1,
211 	    blkid >> epbs, freeable, history);
212 }
213 
214 /* ARGSUSED */
215 static void
216 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
217 {
218 	dnode_t *dn = txh->txh_dnode;
219 	uint64_t start, end, i;
220 	int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
221 	int err = 0;
222 
223 	if (len == 0)
224 		return;
225 
226 	min_bs = SPA_MINBLOCKSHIFT;
227 	max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1;
228 	min_ibs = DN_MIN_INDBLKSHIFT;
229 	max_ibs = DN_MAX_INDBLKSHIFT;
230 
231 	if (dn) {
232 		uint64_t history[DN_MAX_LEVELS];
233 		int nlvls = dn->dn_nlevels;
234 		int delta;
235 
236 		/*
237 		 * For i/o error checking, read the first and last level-0
238 		 * blocks (if they are not aligned), and all the level-1 blocks.
239 		 */
240 		if (dn->dn_maxblkid == 0) {
241 			delta = dn->dn_datablksz;
242 			start = (off < dn->dn_datablksz) ? 0 : 1;
243 			end = (off+len <= dn->dn_datablksz) ? 0 : 1;
244 			if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
245 				err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
246 				if (err)
247 					goto out;
248 				delta -= off;
249 			}
250 		} else {
251 			zio_t *zio = zio_root(dn->dn_objset->os_spa,
252 			    NULL, NULL, ZIO_FLAG_CANFAIL);
253 
254 			/* first level-0 block */
255 			start = off >> dn->dn_datablkshift;
256 			if (P2PHASE(off, dn->dn_datablksz) ||
257 			    len < dn->dn_datablksz) {
258 				err = dmu_tx_check_ioerr(zio, dn, 0, start);
259 				if (err)
260 					goto out;
261 			}
262 
263 			/* last level-0 block */
264 			end = (off+len-1) >> dn->dn_datablkshift;
265 			if (end != start && end <= dn->dn_maxblkid &&
266 			    P2PHASE(off+len, dn->dn_datablksz)) {
267 				err = dmu_tx_check_ioerr(zio, dn, 0, end);
268 				if (err)
269 					goto out;
270 			}
271 
272 			/* level-1 blocks */
273 			if (nlvls > 1) {
274 				int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
275 				for (i = (start>>shft)+1; i < end>>shft; i++) {
276 					err = dmu_tx_check_ioerr(zio, dn, 1, i);
277 					if (err)
278 						goto out;
279 				}
280 			}
281 
282 			err = zio_wait(zio);
283 			if (err)
284 				goto out;
285 			delta = P2NPHASE(off, dn->dn_datablksz);
286 		}
287 
288 		min_ibs = max_ibs = dn->dn_indblkshift;
289 		if (dn->dn_maxblkid > 0) {
290 			/*
291 			 * The blocksize can't change,
292 			 * so we can make a more precise estimate.
293 			 */
294 			ASSERT(dn->dn_datablkshift != 0);
295 			min_bs = max_bs = dn->dn_datablkshift;
296 		} else {
297 			/*
298 			 * The blocksize can increase up to the recordsize,
299 			 * or if it is already more than the recordsize,
300 			 * up to the next power of 2.
301 			 */
302 			min_bs = highbit64(dn->dn_datablksz - 1);
303 			max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1));
304 		}
305 
306 		/*
307 		 * If this write is not off the end of the file
308 		 * we need to account for overwrites/unref.
309 		 */
310 		if (start <= dn->dn_maxblkid) {
311 			for (int l = 0; l < DN_MAX_LEVELS; l++)
312 				history[l] = -1ULL;
313 		}
314 		while (start <= dn->dn_maxblkid) {
315 			dmu_buf_impl_t *db;
316 
317 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
318 			err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
319 			rw_exit(&dn->dn_struct_rwlock);
320 
321 			if (err) {
322 				txh->txh_tx->tx_err = err;
323 				return;
324 			}
325 
326 			dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
327 			    history);
328 			dbuf_rele(db, FTAG);
329 			if (++start > end) {
330 				/*
331 				 * Account for new indirects appearing
332 				 * before this IO gets assigned into a txg.
333 				 */
334 				bits = 64 - min_bs;
335 				epbs = min_ibs - SPA_BLKPTRSHIFT;
336 				for (bits -= epbs * (nlvls - 1);
337 				    bits >= 0; bits -= epbs)
338 					txh->txh_fudge += 1ULL << max_ibs;
339 				goto out;
340 			}
341 			off += delta;
342 			if (len >= delta)
343 				len -= delta;
344 			delta = dn->dn_datablksz;
345 		}
346 	}
347 
348 	/*
349 	 * 'end' is the last thing we will access, not one past.
350 	 * This way we won't overflow when accessing the last byte.
351 	 */
352 	start = P2ALIGN(off, 1ULL << max_bs);
353 	end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
354 	txh->txh_space_towrite += end - start + 1;
355 
356 	start >>= min_bs;
357 	end >>= min_bs;
358 
359 	epbs = min_ibs - SPA_BLKPTRSHIFT;
360 
361 	/*
362 	 * The object contains at most 2^(64 - min_bs) blocks,
363 	 * and each indirect level maps 2^epbs.
364 	 */
365 	for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
366 		start >>= epbs;
367 		end >>= epbs;
368 		ASSERT3U(end, >=, start);
369 		txh->txh_space_towrite += (end - start + 1) << max_ibs;
370 		if (start != 0) {
371 			/*
372 			 * We also need a new blkid=0 indirect block
373 			 * to reference any existing file data.
374 			 */
375 			txh->txh_space_towrite += 1ULL << max_ibs;
376 		}
377 	}
378 
379 out:
380 	if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
381 	    2 * DMU_MAX_ACCESS)
382 		err = SET_ERROR(EFBIG);
383 
384 	if (err)
385 		txh->txh_tx->tx_err = err;
386 }
387 
388 static void
389 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
390 {
391 	dnode_t *dn = txh->txh_dnode;
392 	dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
393 	uint64_t space = mdn->dn_datablksz +
394 	    ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
395 
396 	if (dn && dn->dn_dbuf->db_blkptr &&
397 	    dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
398 	    dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
399 		txh->txh_space_tooverwrite += space;
400 		txh->txh_space_tounref += space;
401 	} else {
402 		txh->txh_space_towrite += space;
403 		if (dn && dn->dn_dbuf->db_blkptr)
404 			txh->txh_space_tounref += space;
405 	}
406 }
407 
408 void
409 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
410 {
411 	dmu_tx_hold_t *txh;
412 
413 	ASSERT(tx->tx_txg == 0);
414 	ASSERT(len < DMU_MAX_ACCESS);
415 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
416 
417 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
418 	    object, THT_WRITE, off, len);
419 	if (txh == NULL)
420 		return;
421 
422 	dmu_tx_count_write(txh, off, len);
423 	dmu_tx_count_dnode(txh);
424 }
425 
426 static void
427 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
428 {
429 	uint64_t blkid, nblks, lastblk;
430 	uint64_t space = 0, unref = 0, skipped = 0;
431 	dnode_t *dn = txh->txh_dnode;
432 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
433 	spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
434 	int epbs;
435 	uint64_t l0span = 0, nl1blks = 0;
436 
437 	if (dn->dn_nlevels == 0)
438 		return;
439 
440 	/*
441 	 * The struct_rwlock protects us against dn_nlevels
442 	 * changing, in case (against all odds) we manage to dirty &
443 	 * sync out the changes after we check for being dirty.
444 	 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
445 	 */
446 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
447 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
448 	if (dn->dn_maxblkid == 0) {
449 		if (off == 0 && len >= dn->dn_datablksz) {
450 			blkid = 0;
451 			nblks = 1;
452 		} else {
453 			rw_exit(&dn->dn_struct_rwlock);
454 			return;
455 		}
456 	} else {
457 		blkid = off >> dn->dn_datablkshift;
458 		nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
459 
460 		if (blkid > dn->dn_maxblkid) {
461 			rw_exit(&dn->dn_struct_rwlock);
462 			return;
463 		}
464 		if (blkid + nblks > dn->dn_maxblkid)
465 			nblks = dn->dn_maxblkid - blkid + 1;
466 
467 	}
468 	l0span = nblks;    /* save for later use to calc level > 1 overhead */
469 	if (dn->dn_nlevels == 1) {
470 		int i;
471 		for (i = 0; i < nblks; i++) {
472 			blkptr_t *bp = dn->dn_phys->dn_blkptr;
473 			ASSERT3U(blkid + i, <, dn->dn_nblkptr);
474 			bp += blkid + i;
475 			if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
476 				dprintf_bp(bp, "can free old%s", "");
477 				space += bp_get_dsize(spa, bp);
478 			}
479 			unref += BP_GET_ASIZE(bp);
480 		}
481 		nl1blks = 1;
482 		nblks = 0;
483 	}
484 
485 	lastblk = blkid + nblks - 1;
486 	while (nblks) {
487 		dmu_buf_impl_t *dbuf;
488 		uint64_t ibyte, new_blkid;
489 		int epb = 1 << epbs;
490 		int err, i, blkoff, tochk;
491 		blkptr_t *bp;
492 
493 		ibyte = blkid << dn->dn_datablkshift;
494 		err = dnode_next_offset(dn,
495 		    DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
496 		new_blkid = ibyte >> dn->dn_datablkshift;
497 		if (err == ESRCH) {
498 			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
499 			break;
500 		}
501 		if (err) {
502 			txh->txh_tx->tx_err = err;
503 			break;
504 		}
505 		if (new_blkid > lastblk) {
506 			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
507 			break;
508 		}
509 
510 		if (new_blkid > blkid) {
511 			ASSERT((new_blkid >> epbs) > (blkid >> epbs));
512 			skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
513 			nblks -= new_blkid - blkid;
514 			blkid = new_blkid;
515 		}
516 		blkoff = P2PHASE(blkid, epb);
517 		tochk = MIN(epb - blkoff, nblks);
518 
519 		err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
520 		if (err) {
521 			txh->txh_tx->tx_err = err;
522 			break;
523 		}
524 
525 		txh->txh_memory_tohold += dbuf->db.db_size;
526 
527 		/*
528 		 * We don't check memory_tohold against DMU_MAX_ACCESS because
529 		 * memory_tohold is an over-estimation (especially the >L1
530 		 * indirect blocks), so it could fail.  Callers should have
531 		 * already verified that they will not be holding too much
532 		 * memory.
533 		 */
534 
535 		err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
536 		if (err != 0) {
537 			txh->txh_tx->tx_err = err;
538 			dbuf_rele(dbuf, FTAG);
539 			break;
540 		}
541 
542 		bp = dbuf->db.db_data;
543 		bp += blkoff;
544 
545 		for (i = 0; i < tochk; i++) {
546 			if (dsl_dataset_block_freeable(ds, &bp[i],
547 			    bp[i].blk_birth)) {
548 				dprintf_bp(&bp[i], "can free old%s", "");
549 				space += bp_get_dsize(spa, &bp[i]);
550 			}
551 			unref += BP_GET_ASIZE(bp);
552 		}
553 		dbuf_rele(dbuf, FTAG);
554 
555 		++nl1blks;
556 		blkid += tochk;
557 		nblks -= tochk;
558 	}
559 	rw_exit(&dn->dn_struct_rwlock);
560 
561 	/*
562 	 * Add in memory requirements of higher-level indirects.
563 	 * This assumes a worst-possible scenario for dn_nlevels and a
564 	 * worst-possible distribution of l1-blocks over the region to free.
565 	 */
566 	{
567 		uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
568 		int level = 2;
569 		/*
570 		 * Here we don't use DN_MAX_LEVEL, but calculate it with the
571 		 * given datablkshift and indblkshift. This makes the
572 		 * difference between 19 and 8 on large files.
573 		 */
574 		int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
575 		    (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
576 
577 		while (level++ < maxlevel) {
578 			txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
579 			    << dn->dn_indblkshift;
580 			blkcnt = 1 + (blkcnt >> epbs);
581 		}
582 	}
583 
584 	/* account for new level 1 indirect blocks that might show up */
585 	if (skipped > 0) {
586 		txh->txh_fudge += skipped << dn->dn_indblkshift;
587 		skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
588 		txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
589 	}
590 	txh->txh_space_tofree += space;
591 	txh->txh_space_tounref += unref;
592 }
593 
594 /*
595  * This function marks the transaction as being a "net free".  The end
596  * result is that refquotas will be disabled for this transaction, and
597  * this transaction will be able to use half of the pool space overhead
598  * (see dsl_pool_adjustedsize()).  Therefore this function should only
599  * be called for transactions that we expect will not cause a net increase
600  * in the amount of space used (but it's OK if that is occasionally not true).
601  */
602 void
603 dmu_tx_mark_netfree(dmu_tx_t *tx)
604 {
605 	dmu_tx_hold_t *txh;
606 
607 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
608 	    DMU_NEW_OBJECT, THT_FREE, 0, 0);
609 
610 	/*
611 	 * Pretend that this operation will free 1GB of space.  This
612 	 * should be large enough to cancel out the largest write.
613 	 * We don't want to use something like UINT64_MAX, because that would
614 	 * cause overflows when doing math with these values (e.g. in
615 	 * dmu_tx_try_assign()).
616 	 */
617 	txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024;
618 }
619 
620 void
621 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
622 {
623 	dmu_tx_hold_t *txh;
624 	dnode_t *dn;
625 	int err;
626 	zio_t *zio;
627 
628 	ASSERT(tx->tx_txg == 0);
629 
630 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
631 	    object, THT_FREE, off, len);
632 	if (txh == NULL)
633 		return;
634 	dn = txh->txh_dnode;
635 	dmu_tx_count_dnode(txh);
636 
637 	if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
638 		return;
639 	if (len == DMU_OBJECT_END)
640 		len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
641 
642 	/*
643 	 * For i/o error checking, we read the first and last level-0
644 	 * blocks if they are not aligned, and all the level-1 blocks.
645 	 *
646 	 * Note:  dbuf_free_range() assumes that we have not instantiated
647 	 * any level-0 dbufs that will be completely freed.  Therefore we must
648 	 * exercise care to not read or count the first and last blocks
649 	 * if they are blocksize-aligned.
650 	 */
651 	if (dn->dn_datablkshift == 0) {
652 		if (off != 0 || len < dn->dn_datablksz)
653 			dmu_tx_count_write(txh, 0, dn->dn_datablksz);
654 	} else {
655 		/* first block will be modified if it is not aligned */
656 		if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
657 			dmu_tx_count_write(txh, off, 1);
658 		/* last block will be modified if it is not aligned */
659 		if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
660 			dmu_tx_count_write(txh, off+len, 1);
661 	}
662 
663 	/*
664 	 * Check level-1 blocks.
665 	 */
666 	if (dn->dn_nlevels > 1) {
667 		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
668 		    SPA_BLKPTRSHIFT;
669 		uint64_t start = off >> shift;
670 		uint64_t end = (off + len) >> shift;
671 
672 		ASSERT(dn->dn_indblkshift != 0);
673 
674 		/*
675 		 * dnode_reallocate() can result in an object with indirect
676 		 * blocks having an odd data block size.  In this case,
677 		 * just check the single block.
678 		 */
679 		if (dn->dn_datablkshift == 0)
680 			start = end = 0;
681 
682 		zio = zio_root(tx->tx_pool->dp_spa,
683 		    NULL, NULL, ZIO_FLAG_CANFAIL);
684 		for (uint64_t i = start; i <= end; i++) {
685 			uint64_t ibyte = i << shift;
686 			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
687 			i = ibyte >> shift;
688 			if (err == ESRCH)
689 				break;
690 			if (err) {
691 				tx->tx_err = err;
692 				return;
693 			}
694 
695 			err = dmu_tx_check_ioerr(zio, dn, 1, i);
696 			if (err) {
697 				tx->tx_err = err;
698 				return;
699 			}
700 		}
701 		err = zio_wait(zio);
702 		if (err) {
703 			tx->tx_err = err;
704 			return;
705 		}
706 	}
707 
708 	dmu_tx_count_free(txh, off, len);
709 }
710 
711 void
712 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
713 {
714 	dmu_tx_hold_t *txh;
715 	dnode_t *dn;
716 	uint64_t nblocks;
717 	int epbs, err;
718 
719 	ASSERT(tx->tx_txg == 0);
720 
721 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
722 	    object, THT_ZAP, add, (uintptr_t)name);
723 	if (txh == NULL)
724 		return;
725 	dn = txh->txh_dnode;
726 
727 	dmu_tx_count_dnode(txh);
728 
729 	if (dn == NULL) {
730 		/*
731 		 * We will be able to fit a new object's entries into one leaf
732 		 * block.  So there will be at most 2 blocks total,
733 		 * including the header block.
734 		 */
735 		dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
736 		return;
737 	}
738 
739 	ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
740 
741 	if (dn->dn_maxblkid == 0 && !add) {
742 		blkptr_t *bp;
743 
744 		/*
745 		 * If there is only one block  (i.e. this is a micro-zap)
746 		 * and we are not adding anything, the accounting is simple.
747 		 */
748 		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
749 		if (err) {
750 			tx->tx_err = err;
751 			return;
752 		}
753 
754 		/*
755 		 * Use max block size here, since we don't know how much
756 		 * the size will change between now and the dbuf dirty call.
757 		 */
758 		bp = &dn->dn_phys->dn_blkptr[0];
759 		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
760 		    bp, bp->blk_birth))
761 			txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ;
762 		else
763 			txh->txh_space_towrite += MZAP_MAX_BLKSZ;
764 		if (!BP_IS_HOLE(bp))
765 			txh->txh_space_tounref += MZAP_MAX_BLKSZ;
766 		return;
767 	}
768 
769 	if (dn->dn_maxblkid > 0 && name) {
770 		/*
771 		 * access the name in this fat-zap so that we'll check
772 		 * for i/o errors to the leaf blocks, etc.
773 		 */
774 		err = zap_lookup(dn->dn_objset, dn->dn_object, name,
775 		    8, 0, NULL);
776 		if (err == EIO) {
777 			tx->tx_err = err;
778 			return;
779 		}
780 	}
781 
782 	err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
783 	    &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
784 
785 	/*
786 	 * If the modified blocks are scattered to the four winds,
787 	 * we'll have to modify an indirect twig for each.
788 	 */
789 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
790 	for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
791 		if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
792 			txh->txh_space_towrite += 3 << dn->dn_indblkshift;
793 		else
794 			txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
795 }
796 
797 void
798 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
799 {
800 	dmu_tx_hold_t *txh;
801 
802 	ASSERT(tx->tx_txg == 0);
803 
804 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
805 	    object, THT_BONUS, 0, 0);
806 	if (txh)
807 		dmu_tx_count_dnode(txh);
808 }
809 
810 void
811 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
812 {
813 	dmu_tx_hold_t *txh;
814 	ASSERT(tx->tx_txg == 0);
815 
816 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
817 	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
818 
819 	txh->txh_space_towrite += space;
820 }
821 
822 int
823 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
824 {
825 	dmu_tx_hold_t *txh;
826 	int holds = 0;
827 
828 	/*
829 	 * By asserting that the tx is assigned, we're counting the
830 	 * number of dn_tx_holds, which is the same as the number of
831 	 * dn_holds.  Otherwise, we'd be counting dn_holds, but
832 	 * dn_tx_holds could be 0.
833 	 */
834 	ASSERT(tx->tx_txg != 0);
835 
836 	/* if (tx->tx_anyobj == TRUE) */
837 		/* return (0); */
838 
839 	for (txh = list_head(&tx->tx_holds); txh;
840 	    txh = list_next(&tx->tx_holds, txh)) {
841 		if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
842 			holds++;
843 	}
844 
845 	return (holds);
846 }
847 
848 #ifdef ZFS_DEBUG
849 void
850 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
851 {
852 	dmu_tx_hold_t *txh;
853 	int match_object = FALSE, match_offset = FALSE;
854 	dnode_t *dn;
855 
856 	DB_DNODE_ENTER(db);
857 	dn = DB_DNODE(db);
858 	ASSERT(tx->tx_txg != 0);
859 	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
860 	ASSERT3U(dn->dn_object, ==, db->db.db_object);
861 
862 	if (tx->tx_anyobj) {
863 		DB_DNODE_EXIT(db);
864 		return;
865 	}
866 
867 	/* XXX No checking on the meta dnode for now */
868 	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
869 		DB_DNODE_EXIT(db);
870 		return;
871 	}
872 
873 	for (txh = list_head(&tx->tx_holds); txh;
874 	    txh = list_next(&tx->tx_holds, txh)) {
875 		ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
876 		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
877 			match_object = TRUE;
878 		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
879 			int datablkshift = dn->dn_datablkshift ?
880 			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
881 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
882 			int shift = datablkshift + epbs * db->db_level;
883 			uint64_t beginblk = shift >= 64 ? 0 :
884 			    (txh->txh_arg1 >> shift);
885 			uint64_t endblk = shift >= 64 ? 0 :
886 			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
887 			uint64_t blkid = db->db_blkid;
888 
889 			/* XXX txh_arg2 better not be zero... */
890 
891 			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
892 			    txh->txh_type, beginblk, endblk);
893 
894 			switch (txh->txh_type) {
895 			case THT_WRITE:
896 				if (blkid >= beginblk && blkid <= endblk)
897 					match_offset = TRUE;
898 				/*
899 				 * We will let this hold work for the bonus
900 				 * or spill buffer so that we don't need to
901 				 * hold it when creating a new object.
902 				 */
903 				if (blkid == DMU_BONUS_BLKID ||
904 				    blkid == DMU_SPILL_BLKID)
905 					match_offset = TRUE;
906 				/*
907 				 * They might have to increase nlevels,
908 				 * thus dirtying the new TLIBs.  Or the
909 				 * might have to change the block size,
910 				 * thus dirying the new lvl=0 blk=0.
911 				 */
912 				if (blkid == 0)
913 					match_offset = TRUE;
914 				break;
915 			case THT_FREE:
916 				/*
917 				 * We will dirty all the level 1 blocks in
918 				 * the free range and perhaps the first and
919 				 * last level 0 block.
920 				 */
921 				if (blkid >= beginblk && (blkid <= endblk ||
922 				    txh->txh_arg2 == DMU_OBJECT_END))
923 					match_offset = TRUE;
924 				break;
925 			case THT_SPILL:
926 				if (blkid == DMU_SPILL_BLKID)
927 					match_offset = TRUE;
928 				break;
929 			case THT_BONUS:
930 				if (blkid == DMU_BONUS_BLKID)
931 					match_offset = TRUE;
932 				break;
933 			case THT_ZAP:
934 				match_offset = TRUE;
935 				break;
936 			case THT_NEWOBJECT:
937 				match_object = TRUE;
938 				break;
939 			default:
940 				ASSERT(!"bad txh_type");
941 			}
942 		}
943 		if (match_object && match_offset) {
944 			DB_DNODE_EXIT(db);
945 			return;
946 		}
947 	}
948 	DB_DNODE_EXIT(db);
949 	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
950 	    (u_longlong_t)db->db.db_object, db->db_level,
951 	    (u_longlong_t)db->db_blkid);
952 }
953 #endif
954 
955 /*
956  * If we can't do 10 iops, something is wrong.  Let us go ahead
957  * and hit zfs_dirty_data_max.
958  */
959 hrtime_t zfs_delay_max_ns = MSEC2NSEC(100);
960 int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
961 
962 /*
963  * We delay transactions when we've determined that the backend storage
964  * isn't able to accommodate the rate of incoming writes.
965  *
966  * If there is already a transaction waiting, we delay relative to when
967  * that transaction finishes waiting.  This way the calculated min_time
968  * is independent of the number of threads concurrently executing
969  * transactions.
970  *
971  * If we are the only waiter, wait relative to when the transaction
972  * started, rather than the current time.  This credits the transaction for
973  * "time already served", e.g. reading indirect blocks.
974  *
975  * The minimum time for a transaction to take is calculated as:
976  *     min_time = scale * (dirty - min) / (max - dirty)
977  *     min_time is then capped at zfs_delay_max_ns.
978  *
979  * The delay has two degrees of freedom that can be adjusted via tunables.
980  * The percentage of dirty data at which we start to delay is defined by
981  * zfs_delay_min_dirty_percent. This should typically be at or above
982  * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
983  * delay after writing at full speed has failed to keep up with the incoming
984  * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
985  * speaking, this variable determines the amount of delay at the midpoint of
986  * the curve.
987  *
988  * delay
989  *  10ms +-------------------------------------------------------------*+
990  *       |                                                             *|
991  *   9ms +                                                             *+
992  *       |                                                             *|
993  *   8ms +                                                             *+
994  *       |                                                            * |
995  *   7ms +                                                            * +
996  *       |                                                            * |
997  *   6ms +                                                            * +
998  *       |                                                            * |
999  *   5ms +                                                           *  +
1000  *       |                                                           *  |
1001  *   4ms +                                                           *  +
1002  *       |                                                           *  |
1003  *   3ms +                                                          *   +
1004  *       |                                                          *   |
1005  *   2ms +                                              (midpoint) *    +
1006  *       |                                                  |    **     |
1007  *   1ms +                                                  v ***       +
1008  *       |             zfs_delay_scale ---------->     ********         |
1009  *     0 +-------------------------------------*********----------------+
1010  *       0%                    <- zfs_dirty_data_max ->               100%
1011  *
1012  * Note that since the delay is added to the outstanding time remaining on the
1013  * most recent transaction, the delay is effectively the inverse of IOPS.
1014  * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1015  * was chosen such that small changes in the amount of accumulated dirty data
1016  * in the first 3/4 of the curve yield relatively small differences in the
1017  * amount of delay.
1018  *
1019  * The effects can be easier to understand when the amount of delay is
1020  * represented on a log scale:
1021  *
1022  * delay
1023  * 100ms +-------------------------------------------------------------++
1024  *       +                                                              +
1025  *       |                                                              |
1026  *       +                                                             *+
1027  *  10ms +                                                             *+
1028  *       +                                                           ** +
1029  *       |                                              (midpoint)  **  |
1030  *       +                                                  |     **    +
1031  *   1ms +                                                  v ****      +
1032  *       +             zfs_delay_scale ---------->        *****         +
1033  *       |                                             ****             |
1034  *       +                                          ****                +
1035  * 100us +                                        **                    +
1036  *       +                                       *                      +
1037  *       |                                      *                       |
1038  *       +                                     *                        +
1039  *  10us +                                     *                        +
1040  *       +                                                              +
1041  *       |                                                              |
1042  *       +                                                              +
1043  *       +--------------------------------------------------------------+
1044  *       0%                    <- zfs_dirty_data_max ->               100%
1045  *
1046  * Note here that only as the amount of dirty data approaches its limit does
1047  * the delay start to increase rapidly. The goal of a properly tuned system
1048  * should be to keep the amount of dirty data out of that range by first
1049  * ensuring that the appropriate limits are set for the I/O scheduler to reach
1050  * optimal throughput on the backend storage, and then by changing the value
1051  * of zfs_delay_scale to increase the steepness of the curve.
1052  */
1053 static void
1054 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
1055 {
1056 	dsl_pool_t *dp = tx->tx_pool;
1057 	uint64_t delay_min_bytes =
1058 	    zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
1059 	hrtime_t wakeup, min_tx_time, now;
1060 
1061 	if (dirty <= delay_min_bytes)
1062 		return;
1063 
1064 	/*
1065 	 * The caller has already waited until we are under the max.
1066 	 * We make them pass us the amount of dirty data so we don't
1067 	 * have to handle the case of it being >= the max, which could
1068 	 * cause a divide-by-zero if it's == the max.
1069 	 */
1070 	ASSERT3U(dirty, <, zfs_dirty_data_max);
1071 
1072 	now = gethrtime();
1073 	min_tx_time = zfs_delay_scale *
1074 	    (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
1075 	if (now > tx->tx_start + min_tx_time)
1076 		return;
1077 
1078 	min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
1079 
1080 	DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
1081 	    uint64_t, min_tx_time);
1082 
1083 	mutex_enter(&dp->dp_lock);
1084 	wakeup = MAX(tx->tx_start + min_tx_time,
1085 	    dp->dp_last_wakeup + min_tx_time);
1086 	dp->dp_last_wakeup = wakeup;
1087 	mutex_exit(&dp->dp_lock);
1088 
1089 #ifdef _KERNEL
1090 	mutex_enter(&curthread->t_delay_lock);
1091 	while (cv_timedwait_hires(&curthread->t_delay_cv,
1092 	    &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns,
1093 	    CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0)
1094 		continue;
1095 	mutex_exit(&curthread->t_delay_lock);
1096 #else
1097 	hrtime_t delta = wakeup - gethrtime();
1098 	struct timespec ts;
1099 	ts.tv_sec = delta / NANOSEC;
1100 	ts.tv_nsec = delta % NANOSEC;
1101 	(void) nanosleep(&ts, NULL);
1102 #endif
1103 }
1104 
1105 static int
1106 dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
1107 {
1108 	dmu_tx_hold_t *txh;
1109 	spa_t *spa = tx->tx_pool->dp_spa;
1110 	uint64_t memory, asize, fsize, usize;
1111 	uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
1112 
1113 	ASSERT0(tx->tx_txg);
1114 
1115 	if (tx->tx_err)
1116 		return (tx->tx_err);
1117 
1118 	if (spa_suspended(spa)) {
1119 		/*
1120 		 * If the user has indicated a blocking failure mode
1121 		 * then return ERESTART which will block in dmu_tx_wait().
1122 		 * Otherwise, return EIO so that an error can get
1123 		 * propagated back to the VOP calls.
1124 		 *
1125 		 * Note that we always honor the txg_how flag regardless
1126 		 * of the failuremode setting.
1127 		 */
1128 		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1129 		    txg_how != TXG_WAIT)
1130 			return (SET_ERROR(EIO));
1131 
1132 		return (SET_ERROR(ERESTART));
1133 	}
1134 
1135 	if (!tx->tx_waited &&
1136 	    dsl_pool_need_dirty_delay(tx->tx_pool)) {
1137 		tx->tx_wait_dirty = B_TRUE;
1138 		return (SET_ERROR(ERESTART));
1139 	}
1140 
1141 	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1142 	tx->tx_needassign_txh = NULL;
1143 
1144 	/*
1145 	 * NB: No error returns are allowed after txg_hold_open, but
1146 	 * before processing the dnode holds, due to the
1147 	 * dmu_tx_unassign() logic.
1148 	 */
1149 
1150 	towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
1151 	for (txh = list_head(&tx->tx_holds); txh;
1152 	    txh = list_next(&tx->tx_holds, txh)) {
1153 		dnode_t *dn = txh->txh_dnode;
1154 		if (dn != NULL) {
1155 			mutex_enter(&dn->dn_mtx);
1156 			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1157 				mutex_exit(&dn->dn_mtx);
1158 				tx->tx_needassign_txh = txh;
1159 				return (SET_ERROR(ERESTART));
1160 			}
1161 			if (dn->dn_assigned_txg == 0)
1162 				dn->dn_assigned_txg = tx->tx_txg;
1163 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1164 			(void) refcount_add(&dn->dn_tx_holds, tx);
1165 			mutex_exit(&dn->dn_mtx);
1166 		}
1167 		towrite += txh->txh_space_towrite;
1168 		tofree += txh->txh_space_tofree;
1169 		tooverwrite += txh->txh_space_tooverwrite;
1170 		tounref += txh->txh_space_tounref;
1171 		tohold += txh->txh_memory_tohold;
1172 		fudge += txh->txh_fudge;
1173 	}
1174 
1175 	/*
1176 	 * If a snapshot has been taken since we made our estimates,
1177 	 * assume that we won't be able to free or overwrite anything.
1178 	 */
1179 	if (tx->tx_objset &&
1180 	    dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
1181 	    tx->tx_lastsnap_txg) {
1182 		towrite += tooverwrite;
1183 		tooverwrite = tofree = 0;
1184 	}
1185 
1186 	/* needed allocation: worst-case estimate of write space */
1187 	asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
1188 	/* freed space estimate: worst-case overwrite + free estimate */
1189 	fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
1190 	/* convert unrefd space to worst-case estimate */
1191 	usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
1192 	/* calculate memory footprint estimate */
1193 	memory = towrite + tooverwrite + tohold;
1194 
1195 #ifdef ZFS_DEBUG
1196 	/*
1197 	 * Add in 'tohold' to account for our dirty holds on this memory
1198 	 * XXX - the "fudge" factor is to account for skipped blocks that
1199 	 * we missed because dnode_next_offset() misses in-core-only blocks.
1200 	 */
1201 	tx->tx_space_towrite = asize +
1202 	    spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1203 	tx->tx_space_tofree = tofree;
1204 	tx->tx_space_tooverwrite = tooverwrite;
1205 	tx->tx_space_tounref = tounref;
1206 #endif
1207 
1208 	if (tx->tx_dir && asize != 0) {
1209 		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1210 		    asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1211 		if (err)
1212 			return (err);
1213 	}
1214 
1215 	return (0);
1216 }
1217 
1218 static void
1219 dmu_tx_unassign(dmu_tx_t *tx)
1220 {
1221 	dmu_tx_hold_t *txh;
1222 
1223 	if (tx->tx_txg == 0)
1224 		return;
1225 
1226 	txg_rele_to_quiesce(&tx->tx_txgh);
1227 
1228 	/*
1229 	 * Walk the transaction's hold list, removing the hold on the
1230 	 * associated dnode, and notifying waiters if the refcount drops to 0.
1231 	 */
1232 	for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1233 	    txh = list_next(&tx->tx_holds, txh)) {
1234 		dnode_t *dn = txh->txh_dnode;
1235 
1236 		if (dn == NULL)
1237 			continue;
1238 		mutex_enter(&dn->dn_mtx);
1239 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1240 
1241 		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1242 			dn->dn_assigned_txg = 0;
1243 			cv_broadcast(&dn->dn_notxholds);
1244 		}
1245 		mutex_exit(&dn->dn_mtx);
1246 	}
1247 
1248 	txg_rele_to_sync(&tx->tx_txgh);
1249 
1250 	tx->tx_lasttried_txg = tx->tx_txg;
1251 	tx->tx_txg = 0;
1252 }
1253 
1254 /*
1255  * Assign tx to a transaction group.  txg_how can be one of:
1256  *
1257  * (1)	TXG_WAIT.  If the current open txg is full, waits until there's
1258  *	a new one.  This should be used when you're not holding locks.
1259  *	It will only fail if we're truly out of space (or over quota).
1260  *
1261  * (2)	TXG_NOWAIT.  If we can't assign into the current open txg without
1262  *	blocking, returns immediately with ERESTART.  This should be used
1263  *	whenever you're holding locks.  On an ERESTART error, the caller
1264  *	should drop locks, do a dmu_tx_wait(tx), and try again.
1265  *
1266  * (3)  TXG_WAITED.  Like TXG_NOWAIT, but indicates that dmu_tx_wait()
1267  *      has already been called on behalf of this operation (though
1268  *      most likely on a different tx).
1269  */
1270 int
1271 dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
1272 {
1273 	int err;
1274 
1275 	ASSERT(tx->tx_txg == 0);
1276 	ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT ||
1277 	    txg_how == TXG_WAITED);
1278 	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1279 
1280 	/* If we might wait, we must not hold the config lock. */
1281 	ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
1282 
1283 	if (txg_how == TXG_WAITED)
1284 		tx->tx_waited = B_TRUE;
1285 
1286 	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1287 		dmu_tx_unassign(tx);
1288 
1289 		if (err != ERESTART || txg_how != TXG_WAIT)
1290 			return (err);
1291 
1292 		dmu_tx_wait(tx);
1293 	}
1294 
1295 	txg_rele_to_quiesce(&tx->tx_txgh);
1296 
1297 	return (0);
1298 }
1299 
1300 void
1301 dmu_tx_wait(dmu_tx_t *tx)
1302 {
1303 	spa_t *spa = tx->tx_pool->dp_spa;
1304 	dsl_pool_t *dp = tx->tx_pool;
1305 
1306 	ASSERT(tx->tx_txg == 0);
1307 	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1308 
1309 	if (tx->tx_wait_dirty) {
1310 		/*
1311 		 * dmu_tx_try_assign() has determined that we need to wait
1312 		 * because we've consumed much or all of the dirty buffer
1313 		 * space.
1314 		 */
1315 		mutex_enter(&dp->dp_lock);
1316 		while (dp->dp_dirty_total >= zfs_dirty_data_max)
1317 			cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1318 		uint64_t dirty = dp->dp_dirty_total;
1319 		mutex_exit(&dp->dp_lock);
1320 
1321 		dmu_tx_delay(tx, dirty);
1322 
1323 		tx->tx_wait_dirty = B_FALSE;
1324 
1325 		/*
1326 		 * Note: setting tx_waited only has effect if the caller
1327 		 * used TX_WAIT.  Otherwise they are going to destroy
1328 		 * this tx and try again.  The common case, zfs_write(),
1329 		 * uses TX_WAIT.
1330 		 */
1331 		tx->tx_waited = B_TRUE;
1332 	} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1333 		/*
1334 		 * If the pool is suspended we need to wait until it
1335 		 * is resumed.  Note that it's possible that the pool
1336 		 * has become active after this thread has tried to
1337 		 * obtain a tx.  If that's the case then tx_lasttried_txg
1338 		 * would not have been set.
1339 		 */
1340 		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1341 	} else if (tx->tx_needassign_txh) {
1342 		/*
1343 		 * A dnode is assigned to the quiescing txg.  Wait for its
1344 		 * transaction to complete.
1345 		 */
1346 		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1347 
1348 		mutex_enter(&dn->dn_mtx);
1349 		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1350 			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1351 		mutex_exit(&dn->dn_mtx);
1352 		tx->tx_needassign_txh = NULL;
1353 	} else {
1354 		txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1355 	}
1356 }
1357 
1358 void
1359 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1360 {
1361 #ifdef ZFS_DEBUG
1362 	if (tx->tx_dir == NULL || delta == 0)
1363 		return;
1364 
1365 	if (delta > 0) {
1366 		ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1367 		    tx->tx_space_towrite);
1368 		(void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1369 	} else {
1370 		(void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1371 	}
1372 #endif
1373 }
1374 
1375 void
1376 dmu_tx_commit(dmu_tx_t *tx)
1377 {
1378 	dmu_tx_hold_t *txh;
1379 
1380 	ASSERT(tx->tx_txg != 0);
1381 
1382 	/*
1383 	 * Go through the transaction's hold list and remove holds on
1384 	 * associated dnodes, notifying waiters if no holds remain.
1385 	 */
1386 	while (txh = list_head(&tx->tx_holds)) {
1387 		dnode_t *dn = txh->txh_dnode;
1388 
1389 		list_remove(&tx->tx_holds, txh);
1390 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1391 		if (dn == NULL)
1392 			continue;
1393 		mutex_enter(&dn->dn_mtx);
1394 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1395 
1396 		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1397 			dn->dn_assigned_txg = 0;
1398 			cv_broadcast(&dn->dn_notxholds);
1399 		}
1400 		mutex_exit(&dn->dn_mtx);
1401 		dnode_rele(dn, tx);
1402 	}
1403 
1404 	if (tx->tx_tempreserve_cookie)
1405 		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1406 
1407 	if (!list_is_empty(&tx->tx_callbacks))
1408 		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1409 
1410 	if (tx->tx_anyobj == FALSE)
1411 		txg_rele_to_sync(&tx->tx_txgh);
1412 
1413 	list_destroy(&tx->tx_callbacks);
1414 	list_destroy(&tx->tx_holds);
1415 #ifdef ZFS_DEBUG
1416 	dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1417 	    tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1418 	    tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1419 	refcount_destroy_many(&tx->tx_space_written,
1420 	    refcount_count(&tx->tx_space_written));
1421 	refcount_destroy_many(&tx->tx_space_freed,
1422 	    refcount_count(&tx->tx_space_freed));
1423 #endif
1424 	kmem_free(tx, sizeof (dmu_tx_t));
1425 }
1426 
1427 void
1428 dmu_tx_abort(dmu_tx_t *tx)
1429 {
1430 	dmu_tx_hold_t *txh;
1431 
1432 	ASSERT(tx->tx_txg == 0);
1433 
1434 	while (txh = list_head(&tx->tx_holds)) {
1435 		dnode_t *dn = txh->txh_dnode;
1436 
1437 		list_remove(&tx->tx_holds, txh);
1438 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1439 		if (dn != NULL)
1440 			dnode_rele(dn, tx);
1441 	}
1442 
1443 	/*
1444 	 * Call any registered callbacks with an error code.
1445 	 */
1446 	if (!list_is_empty(&tx->tx_callbacks))
1447 		dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1448 
1449 	list_destroy(&tx->tx_callbacks);
1450 	list_destroy(&tx->tx_holds);
1451 #ifdef ZFS_DEBUG
1452 	refcount_destroy_many(&tx->tx_space_written,
1453 	    refcount_count(&tx->tx_space_written));
1454 	refcount_destroy_many(&tx->tx_space_freed,
1455 	    refcount_count(&tx->tx_space_freed));
1456 #endif
1457 	kmem_free(tx, sizeof (dmu_tx_t));
1458 }
1459 
1460 uint64_t
1461 dmu_tx_get_txg(dmu_tx_t *tx)
1462 {
1463 	ASSERT(tx->tx_txg != 0);
1464 	return (tx->tx_txg);
1465 }
1466 
1467 dsl_pool_t *
1468 dmu_tx_pool(dmu_tx_t *tx)
1469 {
1470 	ASSERT(tx->tx_pool != NULL);
1471 	return (tx->tx_pool);
1472 }
1473 
1474 
1475 void
1476 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1477 {
1478 	dmu_tx_callback_t *dcb;
1479 
1480 	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1481 
1482 	dcb->dcb_func = func;
1483 	dcb->dcb_data = data;
1484 
1485 	list_insert_tail(&tx->tx_callbacks, dcb);
1486 }
1487 
1488 /*
1489  * Call all the commit callbacks on a list, with a given error code.
1490  */
1491 void
1492 dmu_tx_do_callbacks(list_t *cb_list, int error)
1493 {
1494 	dmu_tx_callback_t *dcb;
1495 
1496 	while (dcb = list_head(cb_list)) {
1497 		list_remove(cb_list, dcb);
1498 		dcb->dcb_func(dcb->dcb_data, error);
1499 		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1500 	}
1501 }
1502 
1503 /*
1504  * Interface to hold a bunch of attributes.
1505  * used for creating new files.
1506  * attrsize is the total size of all attributes
1507  * to be added during object creation
1508  *
1509  * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1510  */
1511 
1512 /*
1513  * hold necessary attribute name for attribute registration.
1514  * should be a very rare case where this is needed.  If it does
1515  * happen it would only happen on the first write to the file system.
1516  */
1517 static void
1518 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1519 {
1520 	int i;
1521 
1522 	if (!sa->sa_need_attr_registration)
1523 		return;
1524 
1525 	for (i = 0; i != sa->sa_num_attrs; i++) {
1526 		if (!sa->sa_attr_table[i].sa_registered) {
1527 			if (sa->sa_reg_attr_obj)
1528 				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1529 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1530 			else
1531 				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1532 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1533 		}
1534 	}
1535 }
1536 
1537 
1538 void
1539 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1540 {
1541 	dnode_t *dn;
1542 	dmu_tx_hold_t *txh;
1543 
1544 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1545 	    THT_SPILL, 0, 0);
1546 
1547 	dn = txh->txh_dnode;
1548 
1549 	if (dn == NULL)
1550 		return;
1551 
1552 	/* If blkptr doesn't exist then add space to towrite */
1553 	if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1554 		txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE;
1555 	} else {
1556 		blkptr_t *bp;
1557 
1558 		bp = &dn->dn_phys->dn_spill;
1559 		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1560 		    bp, bp->blk_birth))
1561 			txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE;
1562 		else
1563 			txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE;
1564 		if (!BP_IS_HOLE(bp))
1565 			txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE;
1566 	}
1567 }
1568 
1569 void
1570 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1571 {
1572 	sa_os_t *sa = tx->tx_objset->os_sa;
1573 
1574 	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1575 
1576 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1577 		return;
1578 
1579 	if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1580 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1581 	else {
1582 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1583 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1584 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1585 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1586 	}
1587 
1588 	dmu_tx_sa_registration_hold(sa, tx);
1589 
1590 	if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1591 		return;
1592 
1593 	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1594 	    THT_SPILL, 0, 0);
1595 }
1596 
1597 /*
1598  * Hold SA attribute
1599  *
1600  * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1601  *
1602  * variable_size is the total size of all variable sized attributes
1603  * passed to this function.  It is not the total size of all
1604  * variable size attributes that *may* exist on this object.
1605  */
1606 void
1607 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1608 {
1609 	uint64_t object;
1610 	sa_os_t *sa = tx->tx_objset->os_sa;
1611 
1612 	ASSERT(hdl != NULL);
1613 
1614 	object = sa_handle_object(hdl);
1615 
1616 	dmu_tx_hold_bonus(tx, object);
1617 
1618 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1619 		return;
1620 
1621 	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1622 	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1623 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1624 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1625 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1626 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1627 	}
1628 
1629 	dmu_tx_sa_registration_hold(sa, tx);
1630 
1631 	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1632 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1633 
1634 	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1635 		ASSERT(tx->tx_txg == 0);
1636 		dmu_tx_hold_spill(tx, object);
1637 	} else {
1638 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1639 		dnode_t *dn;
1640 
1641 		DB_DNODE_ENTER(db);
1642 		dn = DB_DNODE(db);
1643 		if (dn->dn_have_spill) {
1644 			ASSERT(tx->tx_txg == 0);
1645 			dmu_tx_hold_spill(tx, object);
1646 		}
1647 		DB_DNODE_EXIT(db);
1648 	}
1649 }
1650