1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 */
27
28#include <sys/zfs_context.h>
29#include <sys/dbuf.h>
30#include <sys/dnode.h>
31#include <sys/dmu.h>
32#include <sys/dmu_tx.h>
33#include <sys/dmu_objset.h>
34#include <sys/dmu_recv.h>
35#include <sys/dsl_dataset.h>
36#include <sys/spa.h>
37#include <sys/range_tree.h>
38#include <sys/zfeature.h>
39
40static void
41dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
42{
43	dmu_buf_impl_t *db;
44	int txgoff = tx->tx_txg & TXG_MASK;
45	int nblkptr = dn->dn_phys->dn_nblkptr;
46	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
47	int new_level = dn->dn_next_nlevels[txgoff];
48	int i;
49
50	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
51
52	/* this dnode can't be paged out because it's dirty */
53	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
54	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
55	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
56
57	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
58	ASSERT(db != NULL);
59
60	dn->dn_phys->dn_nlevels = new_level;
61	dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
62	    dn->dn_object, dn->dn_phys->dn_nlevels);
63
64	/* transfer dnode's block pointers to new indirect block */
65	(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
66	ASSERT(db->db.db_data);
67	ASSERT(arc_released(db->db_buf));
68	ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
69	bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
70	    sizeof (blkptr_t) * nblkptr);
71	arc_buf_freeze(db->db_buf);
72
73	/* set dbuf's parent pointers to new indirect buf */
74	for (i = 0; i < nblkptr; i++) {
75		dmu_buf_impl_t *child =
76		    dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i);
77
78		if (child == NULL)
79			continue;
80#ifdef	DEBUG
81		DB_DNODE_ENTER(child);
82		ASSERT3P(DB_DNODE(child), ==, dn);
83		DB_DNODE_EXIT(child);
84#endif	/* DEBUG */
85		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
86			ASSERT(child->db_parent->db_level == db->db_level);
87			ASSERT(child->db_blkptr !=
88			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
89			mutex_exit(&child->db_mtx);
90			continue;
91		}
92		ASSERT(child->db_parent == NULL ||
93		    child->db_parent == dn->dn_dbuf);
94
95		child->db_parent = db;
96		dbuf_add_ref(db, child);
97		if (db->db.db_data)
98			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
99		else
100			child->db_blkptr = NULL;
101		dprintf_dbuf_bp(child, child->db_blkptr,
102		    "changed db_blkptr to new indirect %s", "");
103
104		mutex_exit(&child->db_mtx);
105	}
106
107	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
108
109	dbuf_rele(db, FTAG);
110
111	rw_exit(&dn->dn_struct_rwlock);
112}
113
114static void
115free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
116{
117	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
118	uint64_t bytesfreed = 0;
119
120	dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
121
122	for (int i = 0; i < num; i++, bp++) {
123		if (BP_IS_HOLE(bp))
124			continue;
125
126		bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
127		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
128
129		/*
130		 * Save some useful information on the holes being
131		 * punched, including logical size, type, and indirection
132		 * level. Retaining birth time enables detection of when
133		 * holes are punched for reducing the number of free
134		 * records transmitted during a zfs send.
135		 */
136
137		uint64_t lsize = BP_GET_LSIZE(bp);
138		dmu_object_type_t type = BP_GET_TYPE(bp);
139		uint64_t lvl = BP_GET_LEVEL(bp);
140
141		bzero(bp, sizeof (blkptr_t));
142
143		if (spa_feature_is_active(dn->dn_objset->os_spa,
144		    SPA_FEATURE_HOLE_BIRTH)) {
145			BP_SET_LSIZE(bp, lsize);
146			BP_SET_TYPE(bp, type);
147			BP_SET_LEVEL(bp, lvl);
148			BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
149		}
150	}
151	dnode_diduse_space(dn, -bytesfreed);
152}
153
154#ifdef ZFS_DEBUG
155static void
156free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
157{
158	int off, num;
159	int i, err, epbs;
160	uint64_t txg = tx->tx_txg;
161	dnode_t *dn;
162
163	DB_DNODE_ENTER(db);
164	dn = DB_DNODE(db);
165	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
166	off = start - (db->db_blkid * 1<<epbs);
167	num = end - start + 1;
168
169	ASSERT3U(off, >=, 0);
170	ASSERT3U(num, >=, 0);
171	ASSERT3U(db->db_level, >, 0);
172	ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
173	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
174	ASSERT(db->db_blkptr != NULL);
175
176	for (i = off; i < off+num; i++) {
177		uint64_t *buf;
178		dmu_buf_impl_t *child;
179		dbuf_dirty_record_t *dr;
180		int j;
181
182		ASSERT(db->db_level == 1);
183
184		rw_enter(&dn->dn_struct_rwlock, RW_READER);
185		err = dbuf_hold_impl(dn, db->db_level-1,
186		    (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child);
187		rw_exit(&dn->dn_struct_rwlock);
188		if (err == ENOENT)
189			continue;
190		ASSERT(err == 0);
191		ASSERT(child->db_level == 0);
192		dr = child->db_last_dirty;
193		while (dr && dr->dr_txg > txg)
194			dr = dr->dr_next;
195		ASSERT(dr == NULL || dr->dr_txg == txg);
196
197		/* data_old better be zeroed */
198		if (dr) {
199			buf = dr->dt.dl.dr_data->b_data;
200			for (j = 0; j < child->db.db_size >> 3; j++) {
201				if (buf[j] != 0) {
202					panic("freed data not zero: "
203					    "child=%p i=%d off=%d num=%d\n",
204					    (void *)child, i, off, num);
205				}
206			}
207		}
208
209		/*
210		 * db_data better be zeroed unless it's dirty in a
211		 * future txg.
212		 */
213		mutex_enter(&child->db_mtx);
214		buf = child->db.db_data;
215		if (buf != NULL && child->db_state != DB_FILL &&
216		    child->db_last_dirty == NULL) {
217			for (j = 0; j < child->db.db_size >> 3; j++) {
218				if (buf[j] != 0) {
219					panic("freed data not zero: "
220					    "child=%p i=%d off=%d num=%d\n",
221					    (void *)child, i, off, num);
222				}
223			}
224		}
225		mutex_exit(&child->db_mtx);
226
227		dbuf_rele(child, FTAG);
228	}
229	DB_DNODE_EXIT(db);
230}
231#endif
232
233/*
234 * We don't usually free the indirect blocks here.  If in one txg we have a
235 * free_range and a write to the same indirect block, it's important that we
236 * preserve the hole's birth times. Therefore, we don't free any any indirect
237 * blocks in free_children().  If an indirect block happens to turn into all
238 * holes, it will be freed by dbuf_write_children_ready, which happens at a
239 * point in the syncing process where we know for certain the contents of the
240 * indirect block.
241 *
242 * However, if we're freeing a dnode, its space accounting must go to zero
243 * before we actually try to free the dnode, or we will trip an assertion. In
244 * addition, we know the case described above cannot occur, because the dnode is
245 * being freed.  Therefore, we free the indirect blocks immediately in that
246 * case.
247 */
248static void
249free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
250    boolean_t free_indirects, dmu_tx_t *tx)
251{
252	dnode_t *dn;
253	blkptr_t *bp;
254	dmu_buf_impl_t *subdb;
255	uint64_t start, end, dbstart, dbend;
256	unsigned int epbs, shift, i;
257
258	/*
259	 * There is a small possibility that this block will not be cached:
260	 *   1 - if level > 1 and there are no children with level <= 1
261	 *   2 - if this block was evicted since we read it from
262	 *	 dmu_tx_hold_free().
263	 */
264	if (db->db_state != DB_CACHED)
265		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
266
267	/*
268	 * If we modify this indirect block, and we are not freeing the
269	 * dnode (!free_indirects), then this indirect block needs to get
270	 * written to disk by dbuf_write().  If it is dirty, we know it will
271	 * be written (otherwise, we would have incorrect on-disk state
272	 * because the space would be freed but still referenced by the BP
273	 * in this indirect block).  Therefore we VERIFY that it is
274	 * dirty.
275	 *
276	 * Our VERIFY covers some cases that do not actually have to be
277	 * dirty, but the open-context code happens to dirty.  E.g. if the
278	 * blocks we are freeing are all holes, because in that case, we
279	 * are only freeing part of this indirect block, so it is an
280	 * ancestor of the first or last block to be freed.  The first and
281	 * last L1 indirect blocks are always dirtied by dnode_free_range().
282	 */
283	VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0);
284
285	dbuf_release_bp(db);
286	bp = db->db.db_data;
287
288	DB_DNODE_ENTER(db);
289	dn = DB_DNODE(db);
290	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
291	ASSERT3U(epbs, <, 31);
292	shift = (db->db_level - 1) * epbs;
293	dbstart = db->db_blkid << epbs;
294	start = blkid >> shift;
295	if (dbstart < start) {
296		bp += start - dbstart;
297	} else {
298		start = dbstart;
299	}
300	dbend = ((db->db_blkid + 1) << epbs) - 1;
301	end = (blkid + nblks - 1) >> shift;
302	if (dbend <= end)
303		end = dbend;
304
305	ASSERT3U(start, <=, end);
306
307	if (db->db_level == 1) {
308		FREE_VERIFY(db, start, end, tx);
309		free_blocks(dn, bp, end-start+1, tx);
310	} else {
311		for (uint64_t id = start; id <= end; id++, bp++) {
312			if (BP_IS_HOLE(bp))
313				continue;
314			rw_enter(&dn->dn_struct_rwlock, RW_READER);
315			VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
316			    id, TRUE, FALSE, FTAG, &subdb));
317			rw_exit(&dn->dn_struct_rwlock);
318			ASSERT3P(bp, ==, subdb->db_blkptr);
319
320			free_children(subdb, blkid, nblks, free_indirects, tx);
321			dbuf_rele(subdb, FTAG);
322		}
323	}
324
325	if (free_indirects) {
326		for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++)
327			ASSERT(BP_IS_HOLE(bp));
328		bzero(db->db.db_data, db->db.db_size);
329		free_blocks(dn, db->db_blkptr, 1, tx);
330	}
331
332	DB_DNODE_EXIT(db);
333	arc_buf_freeze(db->db_buf);
334}
335
336/*
337 * Traverse the indicated range of the provided file
338 * and "free" all the blocks contained there.
339 */
340static void
341dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
342    boolean_t free_indirects, dmu_tx_t *tx)
343{
344	blkptr_t *bp = dn->dn_phys->dn_blkptr;
345	int dnlevel = dn->dn_phys->dn_nlevels;
346	boolean_t trunc = B_FALSE;
347
348	if (blkid > dn->dn_phys->dn_maxblkid)
349		return;
350
351	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
352	if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
353		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
354		trunc = B_TRUE;
355	}
356
357	/* There are no indirect blocks in the object */
358	if (dnlevel == 1) {
359		if (blkid >= dn->dn_phys->dn_nblkptr) {
360			/* this range was never made persistent */
361			return;
362		}
363		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
364		free_blocks(dn, bp + blkid, nblks, tx);
365	} else {
366		int shift = (dnlevel - 1) *
367		    (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
368		int start = blkid >> shift;
369		int end = (blkid + nblks - 1) >> shift;
370		dmu_buf_impl_t *db;
371
372		ASSERT(start < dn->dn_phys->dn_nblkptr);
373		bp += start;
374		for (int i = start; i <= end; i++, bp++) {
375			if (BP_IS_HOLE(bp))
376				continue;
377			rw_enter(&dn->dn_struct_rwlock, RW_READER);
378			VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
379			    TRUE, FALSE, FTAG, &db));
380			rw_exit(&dn->dn_struct_rwlock);
381
382			free_children(db, blkid, nblks, free_indirects, tx);
383			dbuf_rele(db, FTAG);
384		}
385	}
386
387	/*
388	 * Do not truncate the maxblkid if we are performing a raw
389	 * receive. The raw receive sets the maxblkid manually and
390	 * must not be overridden. Usually, the last DRR_FREE record
391	 * will be at the maxblkid, because the source system sets
392	 * the maxblkid when truncating. However, if the last block
393	 * was freed by overwriting with zeros and being compressed
394	 * away to a hole, the source system will generate a DRR_FREE
395	 * record while leaving the maxblkid after the end of that
396	 * record. In this case we need to leave the maxblkid as
397	 * indicated in the DRR_OBJECT record, so that it matches the
398	 * source system, ensuring that the cryptographic hashes will
399	 * match.
400	 */
401	if (trunc && !dn->dn_objset->os_raw_receive) {
402		dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
403
404		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
405		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
406		ASSERT(off < dn->dn_phys->dn_maxblkid ||
407		    dn->dn_phys->dn_maxblkid == 0 ||
408		    dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
409	}
410}
411
412typedef struct dnode_sync_free_range_arg {
413	dnode_t *dsfra_dnode;
414	dmu_tx_t *dsfra_tx;
415	boolean_t dsfra_free_indirects;
416} dnode_sync_free_range_arg_t;
417
418static void
419dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
420{
421	dnode_sync_free_range_arg_t *dsfra = arg;
422	dnode_t *dn = dsfra->dsfra_dnode;
423
424	mutex_exit(&dn->dn_mtx);
425	dnode_sync_free_range_impl(dn, blkid, nblks,
426	    dsfra->dsfra_free_indirects, dsfra->dsfra_tx);
427	mutex_enter(&dn->dn_mtx);
428}
429
430/*
431 * Try to kick all the dnode's dbufs out of the cache...
432 */
433void
434dnode_evict_dbufs(dnode_t *dn)
435{
436	dmu_buf_impl_t db_marker;
437	dmu_buf_impl_t *db, *db_next;
438
439	mutex_enter(&dn->dn_dbufs_mtx);
440	for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
441
442#ifdef	DEBUG
443		DB_DNODE_ENTER(db);
444		ASSERT3P(DB_DNODE(db), ==, dn);
445		DB_DNODE_EXIT(db);
446#endif	/* DEBUG */
447
448		mutex_enter(&db->db_mtx);
449		if (db->db_state != DB_EVICTING &&
450		    zfs_refcount_is_zero(&db->db_holds)) {
451			db_marker.db_level = db->db_level;
452			db_marker.db_blkid = db->db_blkid;
453			db_marker.db_state = DB_SEARCH;
454			avl_insert_here(&dn->dn_dbufs, &db_marker, db,
455			    AVL_BEFORE);
456
457			/*
458			 * We need to use the "marker" dbuf rather than
459			 * simply getting the next dbuf, because
460			 * dbuf_destroy() may actually remove multiple dbufs.
461			 * It can call itself recursively on the parent dbuf,
462			 * which may also be removed from dn_dbufs.  The code
463			 * flow would look like:
464			 *
465			 * dbuf_destroy():
466			 *   dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
467			 *	if (!cacheable || pending_evict)
468			 *	  dbuf_destroy()
469			 */
470			dbuf_destroy(db);
471
472			db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker);
473			avl_remove(&dn->dn_dbufs, &db_marker);
474		} else {
475			db->db_pending_evict = TRUE;
476			mutex_exit(&db->db_mtx);
477			db_next = AVL_NEXT(&dn->dn_dbufs, db);
478		}
479	}
480	mutex_exit(&dn->dn_dbufs_mtx);
481
482	dnode_evict_bonus(dn);
483}
484
485void
486dnode_evict_bonus(dnode_t *dn)
487{
488	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
489	if (dn->dn_bonus != NULL) {
490		if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
491			mutex_enter(&dn->dn_bonus->db_mtx);
492			dbuf_destroy(dn->dn_bonus);
493			dn->dn_bonus = NULL;
494		} else {
495			dn->dn_bonus->db_pending_evict = TRUE;
496		}
497	}
498	rw_exit(&dn->dn_struct_rwlock);
499}
500
501static void
502dnode_undirty_dbufs(list_t *list)
503{
504	dbuf_dirty_record_t *dr;
505
506	while (dr = list_head(list)) {
507		dmu_buf_impl_t *db = dr->dr_dbuf;
508		uint64_t txg = dr->dr_txg;
509
510		if (db->db_level != 0)
511			dnode_undirty_dbufs(&dr->dt.di.dr_children);
512
513		mutex_enter(&db->db_mtx);
514		/* XXX - use dbuf_undirty()? */
515		list_remove(list, dr);
516		ASSERT(db->db_last_dirty == dr);
517		db->db_last_dirty = NULL;
518		db->db_dirtycnt -= 1;
519		if (db->db_level == 0) {
520			ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
521			    dr->dt.dl.dr_data == db->db_buf);
522			dbuf_unoverride(dr);
523		} else {
524			mutex_destroy(&dr->dt.di.dr_mtx);
525			list_destroy(&dr->dt.di.dr_children);
526		}
527		kmem_free(dr, sizeof (dbuf_dirty_record_t));
528		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
529	}
530}
531
532static void
533dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
534{
535	int txgoff = tx->tx_txg & TXG_MASK;
536
537	ASSERT(dmu_tx_is_syncing(tx));
538
539	/*
540	 * Our contents should have been freed in dnode_sync() by the
541	 * free range record inserted by the caller of dnode_free().
542	 */
543	ASSERT0(DN_USED_BYTES(dn->dn_phys));
544	ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
545
546	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
547	dnode_evict_dbufs(dn);
548
549	/*
550	 * XXX - It would be nice to assert this, but we may still
551	 * have residual holds from async evictions from the arc...
552	 *
553	 * zfs_obj_to_path() also depends on this being
554	 * commented out.
555	 *
556	 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
557	 */
558
559	/* Undirty next bits */
560	dn->dn_next_nlevels[txgoff] = 0;
561	dn->dn_next_indblkshift[txgoff] = 0;
562	dn->dn_next_blksz[txgoff] = 0;
563	dn->dn_next_maxblkid[txgoff] = 0;
564
565	/* ASSERT(blkptrs are zero); */
566	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
567	ASSERT(dn->dn_type != DMU_OT_NONE);
568
569	ASSERT(dn->dn_free_txg > 0);
570	if (dn->dn_allocated_txg != dn->dn_free_txg)
571		dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
572	bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots);
573	dnode_free_interior_slots(dn);
574
575	mutex_enter(&dn->dn_mtx);
576	dn->dn_type = DMU_OT_NONE;
577	dn->dn_maxblkid = 0;
578	dn->dn_allocated_txg = 0;
579	dn->dn_free_txg = 0;
580	dn->dn_have_spill = B_FALSE;
581	dn->dn_num_slots = 1;
582	mutex_exit(&dn->dn_mtx);
583
584	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
585
586	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
587	/*
588	 * Now that we've released our hold, the dnode may
589	 * be evicted, so we mustn't access it.
590	 */
591}
592
593/*
594 * Write out the dnode's dirty buffers.
595 */
596void
597dnode_sync(dnode_t *dn, dmu_tx_t *tx)
598{
599	objset_t *os = dn->dn_objset;
600	dnode_phys_t *dnp = dn->dn_phys;
601	int txgoff = tx->tx_txg & TXG_MASK;
602	list_t *list = &dn->dn_dirty_records[txgoff];
603	static const dnode_phys_t zerodn = { 0 };
604	boolean_t kill_spill = B_FALSE;
605
606	ASSERT(dmu_tx_is_syncing(tx));
607	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
608	ASSERT(dnp->dn_type != DMU_OT_NONE ||
609	    bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
610	DNODE_VERIFY(dn);
611
612	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
613
614	/*
615	 * Do user accounting if it is enabled and this is not
616	 * an encrypted receive.
617	 */
618	if (dmu_objset_userused_enabled(os) &&
619	    !DMU_OBJECT_IS_SPECIAL(dn->dn_object) &&
620	    (!os->os_encrypted || !dmu_objset_is_receiving(os))) {
621		mutex_enter(&dn->dn_mtx);
622		dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
623		dn->dn_oldflags = dn->dn_phys->dn_flags;
624		dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
625		if (dmu_objset_userobjused_enabled(dn->dn_objset))
626			dn->dn_phys->dn_flags |=
627			    DNODE_FLAG_USEROBJUSED_ACCOUNTED;
628		mutex_exit(&dn->dn_mtx);
629		dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
630	} else {
631		/* Once we account for it, we should always account for it */
632		ASSERT(!(dn->dn_phys->dn_flags &
633		    DNODE_FLAG_USERUSED_ACCOUNTED));
634		ASSERT(!(dn->dn_phys->dn_flags &
635		    DNODE_FLAG_USEROBJUSED_ACCOUNTED));
636	}
637
638	mutex_enter(&dn->dn_mtx);
639	if (dn->dn_allocated_txg == tx->tx_txg) {
640		/* The dnode is newly allocated or reallocated */
641		if (dnp->dn_type == DMU_OT_NONE) {
642			/* this is a first alloc, not a realloc */
643			dnp->dn_nlevels = 1;
644			dnp->dn_nblkptr = dn->dn_nblkptr;
645		}
646
647		dnp->dn_type = dn->dn_type;
648		dnp->dn_bonustype = dn->dn_bonustype;
649		dnp->dn_bonuslen = dn->dn_bonuslen;
650	}
651
652	dnp->dn_extra_slots = dn->dn_num_slots - 1;
653
654	ASSERT(dnp->dn_nlevels > 1 ||
655	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
656	    BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
657	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
658	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
659	ASSERT(dnp->dn_nlevels < 2 ||
660	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
661	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
662
663	if (dn->dn_next_type[txgoff] != 0) {
664		dnp->dn_type = dn->dn_type;
665		dn->dn_next_type[txgoff] = 0;
666	}
667
668	if (dn->dn_next_blksz[txgoff] != 0) {
669		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
670		    SPA_MINBLOCKSIZE) == 0);
671		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
672		    dn->dn_maxblkid == 0 || list_head(list) != NULL ||
673		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
674		    dnp->dn_datablkszsec ||
675		    !range_tree_is_empty(dn->dn_free_ranges[txgoff]));
676		dnp->dn_datablkszsec =
677		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
678		dn->dn_next_blksz[txgoff] = 0;
679	}
680
681	if (dn->dn_next_bonuslen[txgoff] != 0) {
682		if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
683			dnp->dn_bonuslen = 0;
684		else
685			dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
686		ASSERT(dnp->dn_bonuslen <=
687		    DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1));
688		dn->dn_next_bonuslen[txgoff] = 0;
689	}
690
691	if (dn->dn_next_bonustype[txgoff] != 0) {
692		ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
693		dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
694		dn->dn_next_bonustype[txgoff] = 0;
695	}
696
697	boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
698	    dn->dn_free_txg <= tx->tx_txg;
699
700	/*
701	 * Remove the spill block if we have been explicitly asked to
702	 * remove it, or if the object is being removed.
703	 */
704	if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
705		if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
706			kill_spill = B_TRUE;
707		dn->dn_rm_spillblk[txgoff] = 0;
708	}
709
710	if (dn->dn_next_indblkshift[txgoff] != 0) {
711		ASSERT(dnp->dn_nlevels == 1);
712		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
713		dn->dn_next_indblkshift[txgoff] = 0;
714	}
715
716	/*
717	 * Just take the live (open-context) values for checksum and compress.
718	 * Strictly speaking it's a future leak, but nothing bad happens if we
719	 * start using the new checksum or compress algorithm a little early.
720	 */
721	dnp->dn_checksum = dn->dn_checksum;
722	dnp->dn_compress = dn->dn_compress;
723
724	mutex_exit(&dn->dn_mtx);
725
726	if (kill_spill) {
727		free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx);
728		mutex_enter(&dn->dn_mtx);
729		dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
730		mutex_exit(&dn->dn_mtx);
731	}
732
733	/* process all the "freed" ranges in the file */
734	if (dn->dn_free_ranges[txgoff] != NULL) {
735		dnode_sync_free_range_arg_t dsfra;
736		dsfra.dsfra_dnode = dn;
737		dsfra.dsfra_tx = tx;
738		dsfra.dsfra_free_indirects = freeing_dnode;
739		if (freeing_dnode) {
740			ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff],
741			    0, dn->dn_maxblkid + 1));
742		}
743		mutex_enter(&dn->dn_mtx);
744		range_tree_vacate(dn->dn_free_ranges[txgoff],
745		    dnode_sync_free_range, &dsfra);
746		range_tree_destroy(dn->dn_free_ranges[txgoff]);
747		dn->dn_free_ranges[txgoff] = NULL;
748		mutex_exit(&dn->dn_mtx);
749	}
750
751	if (freeing_dnode) {
752		dn->dn_objset->os_freed_dnodes++;
753		dnode_sync_free(dn, tx);
754		return;
755	}
756
757	if (dn->dn_num_slots > DNODE_MIN_SLOTS) {
758		dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
759		mutex_enter(&ds->ds_lock);
760		ds->ds_feature_activation_needed[SPA_FEATURE_LARGE_DNODE] =
761		    B_TRUE;
762		mutex_exit(&ds->ds_lock);
763	}
764
765	if (dn->dn_next_nlevels[txgoff]) {
766		dnode_increase_indirection(dn, tx);
767		dn->dn_next_nlevels[txgoff] = 0;
768	}
769
770	/*
771	 * This must be done after dnode_sync_free_range()
772	 * and dnode_increase_indirection(). See dnode_new_blkid()
773	 * for an explanation of the high bit being set.
774	 */
775	if (dn->dn_next_maxblkid[txgoff]) {
776		mutex_enter(&dn->dn_mtx);
777		dnp->dn_maxblkid =
778		    dn->dn_next_maxblkid[txgoff] & ~DMU_NEXT_MAXBLKID_SET;
779		dn->dn_next_maxblkid[txgoff] = 0;
780		mutex_exit(&dn->dn_mtx);
781	}
782
783	if (dn->dn_next_nblkptr[txgoff]) {
784		/* this should only happen on a realloc */
785		ASSERT(dn->dn_allocated_txg == tx->tx_txg);
786		if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
787			/* zero the new blkptrs we are gaining */
788			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
789			    sizeof (blkptr_t) *
790			    (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
791#ifdef ZFS_DEBUG
792		} else {
793			int i;
794			ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
795			/* the blkptrs we are losing better be unallocated */
796			for (i = dn->dn_next_nblkptr[txgoff];
797			    i < dnp->dn_nblkptr; i++)
798				ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
799#endif
800		}
801		mutex_enter(&dn->dn_mtx);
802		dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
803		dn->dn_next_nblkptr[txgoff] = 0;
804		mutex_exit(&dn->dn_mtx);
805	}
806
807	dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
808
809	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
810		ASSERT3P(list_head(list), ==, NULL);
811		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
812	}
813
814	/*
815	 * Although we have dropped our reference to the dnode, it
816	 * can't be evicted until its written, and we haven't yet
817	 * initiated the IO for the dnode's dbuf.
818	 */
819}
820