xref: /illumos-gate/usr/src/uts/common/fs/zfs/dnode_sync.c (revision 8671400134a11c848244896ca51a7db4d0f69da4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  */
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
36 #include <sys/range_tree.h>
37 #include <sys/zfeature.h>
38 
39 static void
40 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
41 {
42 	dmu_buf_impl_t *db;
43 	int txgoff = tx->tx_txg & TXG_MASK;
44 	int nblkptr = dn->dn_phys->dn_nblkptr;
45 	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
46 	int new_level = dn->dn_next_nlevels[txgoff];
47 	int i;
48 
49 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
50 
51 	/* this dnode can't be paged out because it's dirty */
52 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
53 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
54 	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
55 
56 	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
57 	ASSERT(db != NULL);
58 
59 	dn->dn_phys->dn_nlevels = new_level;
60 	dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
61 	    dn->dn_object, dn->dn_phys->dn_nlevels);
62 
63 	/* transfer dnode's block pointers to new indirect block */
64 	(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
65 	ASSERT(db->db.db_data);
66 	ASSERT(arc_released(db->db_buf));
67 	ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
68 	bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
69 	    sizeof (blkptr_t) * nblkptr);
70 	arc_buf_freeze(db->db_buf);
71 
72 	/* set dbuf's parent pointers to new indirect buf */
73 	for (i = 0; i < nblkptr; i++) {
74 		dmu_buf_impl_t *child =
75 		    dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i);
76 
77 		if (child == NULL)
78 			continue;
79 #ifdef	DEBUG
80 		DB_DNODE_ENTER(child);
81 		ASSERT3P(DB_DNODE(child), ==, dn);
82 		DB_DNODE_EXIT(child);
83 #endif	/* DEBUG */
84 		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
85 			ASSERT(child->db_parent->db_level == db->db_level);
86 			ASSERT(child->db_blkptr !=
87 			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
88 			mutex_exit(&child->db_mtx);
89 			continue;
90 		}
91 		ASSERT(child->db_parent == NULL ||
92 		    child->db_parent == dn->dn_dbuf);
93 
94 		child->db_parent = db;
95 		dbuf_add_ref(db, child);
96 		if (db->db.db_data)
97 			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
98 		else
99 			child->db_blkptr = NULL;
100 		dprintf_dbuf_bp(child, child->db_blkptr,
101 		    "changed db_blkptr to new indirect %s", "");
102 
103 		mutex_exit(&child->db_mtx);
104 	}
105 
106 	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
107 
108 	dbuf_rele(db, FTAG);
109 
110 	rw_exit(&dn->dn_struct_rwlock);
111 }
112 
113 static void
114 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
115 {
116 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
117 	uint64_t bytesfreed = 0;
118 
119 	dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
120 
121 	for (int i = 0; i < num; i++, bp++) {
122 		if (BP_IS_HOLE(bp))
123 			continue;
124 
125 		bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
126 		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
127 
128 		/*
129 		 * Save some useful information on the holes being
130 		 * punched, including logical size, type, and indirection
131 		 * level. Retaining birth time enables detection of when
132 		 * holes are punched for reducing the number of free
133 		 * records transmitted during a zfs send.
134 		 */
135 
136 		uint64_t lsize = BP_GET_LSIZE(bp);
137 		dmu_object_type_t type = BP_GET_TYPE(bp);
138 		uint64_t lvl = BP_GET_LEVEL(bp);
139 
140 		bzero(bp, sizeof (blkptr_t));
141 
142 		if (spa_feature_is_active(dn->dn_objset->os_spa,
143 		    SPA_FEATURE_HOLE_BIRTH)) {
144 			BP_SET_LSIZE(bp, lsize);
145 			BP_SET_TYPE(bp, type);
146 			BP_SET_LEVEL(bp, lvl);
147 			BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
148 		}
149 	}
150 	dnode_diduse_space(dn, -bytesfreed);
151 }
152 
153 #ifdef ZFS_DEBUG
154 static void
155 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
156 {
157 	int off, num;
158 	int i, err, epbs;
159 	uint64_t txg = tx->tx_txg;
160 	dnode_t *dn;
161 
162 	DB_DNODE_ENTER(db);
163 	dn = DB_DNODE(db);
164 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
165 	off = start - (db->db_blkid * 1<<epbs);
166 	num = end - start + 1;
167 
168 	ASSERT3U(off, >=, 0);
169 	ASSERT3U(num, >=, 0);
170 	ASSERT3U(db->db_level, >, 0);
171 	ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
172 	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
173 	ASSERT(db->db_blkptr != NULL);
174 
175 	for (i = off; i < off+num; i++) {
176 		uint64_t *buf;
177 		dmu_buf_impl_t *child;
178 		dbuf_dirty_record_t *dr;
179 		int j;
180 
181 		ASSERT(db->db_level == 1);
182 
183 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
184 		err = dbuf_hold_impl(dn, db->db_level-1,
185 		    (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child);
186 		rw_exit(&dn->dn_struct_rwlock);
187 		if (err == ENOENT)
188 			continue;
189 		ASSERT(err == 0);
190 		ASSERT(child->db_level == 0);
191 		dr = child->db_last_dirty;
192 		while (dr && dr->dr_txg > txg)
193 			dr = dr->dr_next;
194 		ASSERT(dr == NULL || dr->dr_txg == txg);
195 
196 		/* data_old better be zeroed */
197 		if (dr) {
198 			buf = dr->dt.dl.dr_data->b_data;
199 			for (j = 0; j < child->db.db_size >> 3; j++) {
200 				if (buf[j] != 0) {
201 					panic("freed data not zero: "
202 					    "child=%p i=%d off=%d num=%d\n",
203 					    (void *)child, i, off, num);
204 				}
205 			}
206 		}
207 
208 		/*
209 		 * db_data better be zeroed unless it's dirty in a
210 		 * future txg.
211 		 */
212 		mutex_enter(&child->db_mtx);
213 		buf = child->db.db_data;
214 		if (buf != NULL && child->db_state != DB_FILL &&
215 		    child->db_last_dirty == NULL) {
216 			for (j = 0; j < child->db.db_size >> 3; j++) {
217 				if (buf[j] != 0) {
218 					panic("freed data not zero: "
219 					    "child=%p i=%d off=%d num=%d\n",
220 					    (void *)child, i, off, num);
221 				}
222 			}
223 		}
224 		mutex_exit(&child->db_mtx);
225 
226 		dbuf_rele(child, FTAG);
227 	}
228 	DB_DNODE_EXIT(db);
229 }
230 #endif
231 
232 static void
233 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
234     dmu_tx_t *tx)
235 {
236 	dnode_t *dn;
237 	blkptr_t *bp;
238 	dmu_buf_impl_t *subdb;
239 	uint64_t start, end, dbstart, dbend;
240 	unsigned int epbs, shift, i;
241 
242 	/*
243 	 * There is a small possibility that this block will not be cached:
244 	 *   1 - if level > 1 and there are no children with level <= 1
245 	 *   2 - if this block was evicted since we read it from
246 	 *	 dmu_tx_hold_free().
247 	 */
248 	if (db->db_state != DB_CACHED)
249 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
250 
251 	dbuf_release_bp(db);
252 	bp = db->db.db_data;
253 
254 	DB_DNODE_ENTER(db);
255 	dn = DB_DNODE(db);
256 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
257 	ASSERT3U(epbs, <, 31);
258 	shift = (db->db_level - 1) * epbs;
259 	dbstart = db->db_blkid << epbs;
260 	start = blkid >> shift;
261 	if (dbstart < start) {
262 		bp += start - dbstart;
263 	} else {
264 		start = dbstart;
265 	}
266 	dbend = ((db->db_blkid + 1) << epbs) - 1;
267 	end = (blkid + nblks - 1) >> shift;
268 	if (dbend <= end)
269 		end = dbend;
270 
271 	ASSERT3U(start, <=, end);
272 
273 	if (db->db_level == 1) {
274 		FREE_VERIFY(db, start, end, tx);
275 		free_blocks(dn, bp, end-start+1, tx);
276 	} else {
277 		for (uint64_t id = start; id <= end; id++, bp++) {
278 			if (BP_IS_HOLE(bp))
279 				continue;
280 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
281 			VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
282 			    id, TRUE, FALSE, FTAG, &subdb));
283 			rw_exit(&dn->dn_struct_rwlock);
284 			ASSERT3P(bp, ==, subdb->db_blkptr);
285 
286 			free_children(subdb, blkid, nblks, tx);
287 			dbuf_rele(subdb, FTAG);
288 		}
289 	}
290 
291 	/* If this whole block is free, free ourself too. */
292 	for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
293 		if (!BP_IS_HOLE(bp))
294 			break;
295 	}
296 	if (i == 1 << epbs) {
297 		/*
298 		 * We only found holes. Grab the rwlock to prevent
299 		 * anybody from reading the blocks we're about to
300 		 * zero out.
301 		 */
302 		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
303 		bzero(db->db.db_data, db->db.db_size);
304 		rw_exit(&dn->dn_struct_rwlock);
305 		free_blocks(dn, db->db_blkptr, 1, tx);
306 	} else {
307 		/*
308 		 * Partial block free; must be marked dirty so that it
309 		 * will be written out.
310 		 */
311 		ASSERT(db->db_dirtycnt > 0);
312 	}
313 
314 	DB_DNODE_EXIT(db);
315 	arc_buf_freeze(db->db_buf);
316 }
317 
318 /*
319  * Traverse the indicated range of the provided file
320  * and "free" all the blocks contained there.
321  */
322 static void
323 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
324     dmu_tx_t *tx)
325 {
326 	blkptr_t *bp = dn->dn_phys->dn_blkptr;
327 	int dnlevel = dn->dn_phys->dn_nlevels;
328 	boolean_t trunc = B_FALSE;
329 
330 	if (blkid > dn->dn_phys->dn_maxblkid)
331 		return;
332 
333 	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
334 	if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
335 		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
336 		trunc = B_TRUE;
337 	}
338 
339 	/* There are no indirect blocks in the object */
340 	if (dnlevel == 1) {
341 		if (blkid >= dn->dn_phys->dn_nblkptr) {
342 			/* this range was never made persistent */
343 			return;
344 		}
345 		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
346 		free_blocks(dn, bp + blkid, nblks, tx);
347 	} else {
348 		int shift = (dnlevel - 1) *
349 		    (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
350 		int start = blkid >> shift;
351 		int end = (blkid + nblks - 1) >> shift;
352 		dmu_buf_impl_t *db;
353 
354 		ASSERT(start < dn->dn_phys->dn_nblkptr);
355 		bp += start;
356 		for (int i = start; i <= end; i++, bp++) {
357 			if (BP_IS_HOLE(bp))
358 				continue;
359 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
360 			VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
361 			    TRUE, FALSE, FTAG, &db));
362 			rw_exit(&dn->dn_struct_rwlock);
363 
364 			free_children(db, blkid, nblks, tx);
365 			dbuf_rele(db, FTAG);
366 		}
367 	}
368 
369 	if (trunc) {
370 		dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
371 
372 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
373 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
374 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
375 		    dn->dn_phys->dn_maxblkid == 0 ||
376 		    dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
377 	}
378 }
379 
380 typedef struct dnode_sync_free_range_arg {
381 	dnode_t *dsfra_dnode;
382 	dmu_tx_t *dsfra_tx;
383 } dnode_sync_free_range_arg_t;
384 
385 static void
386 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
387 {
388 	dnode_sync_free_range_arg_t *dsfra = arg;
389 	dnode_t *dn = dsfra->dsfra_dnode;
390 
391 	mutex_exit(&dn->dn_mtx);
392 	dnode_sync_free_range_impl(dn, blkid, nblks, dsfra->dsfra_tx);
393 	mutex_enter(&dn->dn_mtx);
394 }
395 
396 /*
397  * Try to kick all the dnode's dbufs out of the cache...
398  */
399 void
400 dnode_evict_dbufs(dnode_t *dn)
401 {
402 	dmu_buf_impl_t db_marker;
403 	dmu_buf_impl_t *db, *db_next;
404 
405 	mutex_enter(&dn->dn_dbufs_mtx);
406 	for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
407 
408 #ifdef	DEBUG
409 		DB_DNODE_ENTER(db);
410 		ASSERT3P(DB_DNODE(db), ==, dn);
411 		DB_DNODE_EXIT(db);
412 #endif	/* DEBUG */
413 
414 		mutex_enter(&db->db_mtx);
415 		if (db->db_state != DB_EVICTING &&
416 		    refcount_is_zero(&db->db_holds)) {
417 			db_marker.db_level = db->db_level;
418 			db_marker.db_blkid = db->db_blkid;
419 			db_marker.db_state = DB_SEARCH;
420 			avl_insert_here(&dn->dn_dbufs, &db_marker, db,
421 			    AVL_BEFORE);
422 
423 			dbuf_destroy(db);
424 
425 			db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker);
426 			avl_remove(&dn->dn_dbufs, &db_marker);
427 		} else {
428 			db->db_pending_evict = TRUE;
429 			mutex_exit(&db->db_mtx);
430 			db_next = AVL_NEXT(&dn->dn_dbufs, db);
431 		}
432 	}
433 	mutex_exit(&dn->dn_dbufs_mtx);
434 
435 	dnode_evict_bonus(dn);
436 }
437 
438 void
439 dnode_evict_bonus(dnode_t *dn)
440 {
441 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
442 	if (dn->dn_bonus != NULL) {
443 		if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
444 			mutex_enter(&dn->dn_bonus->db_mtx);
445 			dbuf_destroy(dn->dn_bonus);
446 			dn->dn_bonus = NULL;
447 		} else {
448 			dn->dn_bonus->db_pending_evict = TRUE;
449 		}
450 	}
451 	rw_exit(&dn->dn_struct_rwlock);
452 }
453 
454 static void
455 dnode_undirty_dbufs(list_t *list)
456 {
457 	dbuf_dirty_record_t *dr;
458 
459 	while (dr = list_head(list)) {
460 		dmu_buf_impl_t *db = dr->dr_dbuf;
461 		uint64_t txg = dr->dr_txg;
462 
463 		if (db->db_level != 0)
464 			dnode_undirty_dbufs(&dr->dt.di.dr_children);
465 
466 		mutex_enter(&db->db_mtx);
467 		/* XXX - use dbuf_undirty()? */
468 		list_remove(list, dr);
469 		ASSERT(db->db_last_dirty == dr);
470 		db->db_last_dirty = NULL;
471 		db->db_dirtycnt -= 1;
472 		if (db->db_level == 0) {
473 			ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
474 			    dr->dt.dl.dr_data == db->db_buf);
475 			dbuf_unoverride(dr);
476 		} else {
477 			mutex_destroy(&dr->dt.di.dr_mtx);
478 			list_destroy(&dr->dt.di.dr_children);
479 		}
480 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
481 		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
482 	}
483 }
484 
485 static void
486 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
487 {
488 	int txgoff = tx->tx_txg & TXG_MASK;
489 
490 	ASSERT(dmu_tx_is_syncing(tx));
491 
492 	/*
493 	 * Our contents should have been freed in dnode_sync() by the
494 	 * free range record inserted by the caller of dnode_free().
495 	 */
496 	ASSERT0(DN_USED_BYTES(dn->dn_phys));
497 	ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
498 
499 	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
500 	dnode_evict_dbufs(dn);
501 
502 	/*
503 	 * XXX - It would be nice to assert this, but we may still
504 	 * have residual holds from async evictions from the arc...
505 	 *
506 	 * zfs_obj_to_path() also depends on this being
507 	 * commented out.
508 	 *
509 	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
510 	 */
511 
512 	/* Undirty next bits */
513 	dn->dn_next_nlevels[txgoff] = 0;
514 	dn->dn_next_indblkshift[txgoff] = 0;
515 	dn->dn_next_blksz[txgoff] = 0;
516 
517 	/* ASSERT(blkptrs are zero); */
518 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
519 	ASSERT(dn->dn_type != DMU_OT_NONE);
520 
521 	ASSERT(dn->dn_free_txg > 0);
522 	if (dn->dn_allocated_txg != dn->dn_free_txg)
523 		dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
524 	bzero(dn->dn_phys, sizeof (dnode_phys_t));
525 
526 	mutex_enter(&dn->dn_mtx);
527 	dn->dn_type = DMU_OT_NONE;
528 	dn->dn_maxblkid = 0;
529 	dn->dn_allocated_txg = 0;
530 	dn->dn_free_txg = 0;
531 	dn->dn_have_spill = B_FALSE;
532 	mutex_exit(&dn->dn_mtx);
533 
534 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
535 
536 	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
537 	/*
538 	 * Now that we've released our hold, the dnode may
539 	 * be evicted, so we musn't access it.
540 	 */
541 }
542 
543 /*
544  * Write out the dnode's dirty buffers.
545  */
546 void
547 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
548 {
549 	dnode_phys_t *dnp = dn->dn_phys;
550 	int txgoff = tx->tx_txg & TXG_MASK;
551 	list_t *list = &dn->dn_dirty_records[txgoff];
552 	static const dnode_phys_t zerodn = { 0 };
553 	boolean_t kill_spill = B_FALSE;
554 
555 	ASSERT(dmu_tx_is_syncing(tx));
556 	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
557 	ASSERT(dnp->dn_type != DMU_OT_NONE ||
558 	    bcmp(dnp, &zerodn, DNODE_SIZE) == 0);
559 	DNODE_VERIFY(dn);
560 
561 	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
562 
563 	if (dmu_objset_userused_enabled(dn->dn_objset) &&
564 	    !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
565 		mutex_enter(&dn->dn_mtx);
566 		dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
567 		dn->dn_oldflags = dn->dn_phys->dn_flags;
568 		dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
569 		mutex_exit(&dn->dn_mtx);
570 		dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
571 	} else {
572 		/* Once we account for it, we should always account for it. */
573 		ASSERT(!(dn->dn_phys->dn_flags &
574 		    DNODE_FLAG_USERUSED_ACCOUNTED));
575 	}
576 
577 	mutex_enter(&dn->dn_mtx);
578 	if (dn->dn_allocated_txg == tx->tx_txg) {
579 		/* The dnode is newly allocated or reallocated */
580 		if (dnp->dn_type == DMU_OT_NONE) {
581 			/* this is a first alloc, not a realloc */
582 			dnp->dn_nlevels = 1;
583 			dnp->dn_nblkptr = dn->dn_nblkptr;
584 		}
585 
586 		dnp->dn_type = dn->dn_type;
587 		dnp->dn_bonustype = dn->dn_bonustype;
588 		dnp->dn_bonuslen = dn->dn_bonuslen;
589 	}
590 	ASSERT(dnp->dn_nlevels > 1 ||
591 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
592 	    BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
593 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
594 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
595 	ASSERT(dnp->dn_nlevels < 2 ||
596 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
597 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
598 
599 	if (dn->dn_next_type[txgoff] != 0) {
600 		dnp->dn_type = dn->dn_type;
601 		dn->dn_next_type[txgoff] = 0;
602 	}
603 
604 	if (dn->dn_next_blksz[txgoff] != 0) {
605 		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
606 		    SPA_MINBLOCKSIZE) == 0);
607 		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
608 		    dn->dn_maxblkid == 0 || list_head(list) != NULL ||
609 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
610 		    dnp->dn_datablkszsec ||
611 		    !range_tree_is_empty(dn->dn_free_ranges[txgoff]));
612 		dnp->dn_datablkszsec =
613 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
614 		dn->dn_next_blksz[txgoff] = 0;
615 	}
616 
617 	if (dn->dn_next_bonuslen[txgoff] != 0) {
618 		if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
619 			dnp->dn_bonuslen = 0;
620 		else
621 			dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
622 		ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
623 		dn->dn_next_bonuslen[txgoff] = 0;
624 	}
625 
626 	if (dn->dn_next_bonustype[txgoff] != 0) {
627 		ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
628 		dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
629 		dn->dn_next_bonustype[txgoff] = 0;
630 	}
631 
632 	boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
633 	    dn->dn_free_txg <= tx->tx_txg;
634 
635 	/*
636 	 * Remove the spill block if we have been explicitly asked to
637 	 * remove it, or if the object is being removed.
638 	 */
639 	if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
640 		if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
641 			kill_spill = B_TRUE;
642 		dn->dn_rm_spillblk[txgoff] = 0;
643 	}
644 
645 	if (dn->dn_next_indblkshift[txgoff] != 0) {
646 		ASSERT(dnp->dn_nlevels == 1);
647 		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
648 		dn->dn_next_indblkshift[txgoff] = 0;
649 	}
650 
651 	/*
652 	 * Just take the live (open-context) values for checksum and compress.
653 	 * Strictly speaking it's a future leak, but nothing bad happens if we
654 	 * start using the new checksum or compress algorithm a little early.
655 	 */
656 	dnp->dn_checksum = dn->dn_checksum;
657 	dnp->dn_compress = dn->dn_compress;
658 
659 	mutex_exit(&dn->dn_mtx);
660 
661 	if (kill_spill) {
662 		free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx);
663 		mutex_enter(&dn->dn_mtx);
664 		dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
665 		mutex_exit(&dn->dn_mtx);
666 	}
667 
668 	/* process all the "freed" ranges in the file */
669 	if (dn->dn_free_ranges[txgoff] != NULL) {
670 		dnode_sync_free_range_arg_t dsfra;
671 		dsfra.dsfra_dnode = dn;
672 		dsfra.dsfra_tx = tx;
673 		mutex_enter(&dn->dn_mtx);
674 		range_tree_vacate(dn->dn_free_ranges[txgoff],
675 		    dnode_sync_free_range, &dsfra);
676 		range_tree_destroy(dn->dn_free_ranges[txgoff]);
677 		dn->dn_free_ranges[txgoff] = NULL;
678 		mutex_exit(&dn->dn_mtx);
679 	}
680 
681 	if (freeing_dnode) {
682 		dn->dn_objset->os_freed_dnodes++;
683 		dnode_sync_free(dn, tx);
684 		return;
685 	}
686 
687 	if (dn->dn_next_nlevels[txgoff]) {
688 		dnode_increase_indirection(dn, tx);
689 		dn->dn_next_nlevels[txgoff] = 0;
690 	}
691 
692 	if (dn->dn_next_nblkptr[txgoff]) {
693 		/* this should only happen on a realloc */
694 		ASSERT(dn->dn_allocated_txg == tx->tx_txg);
695 		if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
696 			/* zero the new blkptrs we are gaining */
697 			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
698 			    sizeof (blkptr_t) *
699 			    (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
700 #ifdef ZFS_DEBUG
701 		} else {
702 			int i;
703 			ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
704 			/* the blkptrs we are losing better be unallocated */
705 			for (i = dn->dn_next_nblkptr[txgoff];
706 			    i < dnp->dn_nblkptr; i++)
707 				ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
708 #endif
709 		}
710 		mutex_enter(&dn->dn_mtx);
711 		dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
712 		dn->dn_next_nblkptr[txgoff] = 0;
713 		mutex_exit(&dn->dn_mtx);
714 	}
715 
716 	dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
717 
718 	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
719 		ASSERT3P(list_head(list), ==, NULL);
720 		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
721 	}
722 
723 	/*
724 	 * Although we have dropped our reference to the dnode, it
725 	 * can't be evicted until its written, and we haven't yet
726 	 * initiated the IO for the dnode's dbuf.
727 	 */
728 }
729