1fa9e406ahrens/*
2fa9e406ahrens * CDDL HEADER START
3fa9e406ahrens *
4fa9e406ahrens * The contents of this file are subject to the terms of the
5ea8dc4beschrock * Common Development and Distribution License (the "License").
6ea8dc4beschrock * You may not use this file except in compliance with the License.
7fa9e406ahrens *
8fa9e406ahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e406ahrens * or http://www.opensolaris.org/os/licensing.
10fa9e406ahrens * See the License for the specific language governing permissions
11fa9e406ahrens * and limitations under the License.
12fa9e406ahrens *
13fa9e406ahrens * When distributing Covered Code, include this CDDL HEADER in each
14fa9e406ahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e406ahrens * If applicable, add the following below this CDDL HEADER, with the
16fa9e406ahrens * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e406ahrens * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e406ahrens *
19fa9e406ahrens * CDDL HEADER END
20fa9e406ahrens */
21fa9e406ahrens/*
2206e0070Mark Shellenbaum * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23bf26014Matthew Ahrens * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
24e77d42eMatthew Ahrens * Copyright 2014 HybridCluster. All rights reserved.
25fa9e406ahrens */
26fa9e406ahrens
27eb63303Tom Caputi#include <sys/dbuf.h>
28fa9e406ahrens#include <sys/dmu.h>
29fa9e406ahrens#include <sys/dmu_objset.h>
30fa9e406ahrens#include <sys/dmu_tx.h>
31fa9e406ahrens#include <sys/dnode.h>
322acef22Matthew Ahrens#include <sys/zap.h>
332acef22Matthew Ahrens#include <sys/zfeature.h>
3454811daToomas Soome#include <sys/dsl_dataset.h>
35fa9e406ahrens
3654811daToomas Soome/*
3754811daToomas Soome * Each of the concurrent object allocators will grab
3854811daToomas Soome * 2^dmu_object_alloc_chunk_shift dnode slots at a time.  The default is to
3954811daToomas Soome * grab 128 slots, which is 4 blocks worth.  This was experimentally
4054811daToomas Soome * determined to be the lowest value that eliminates the measurable effect
4154811daToomas Soome * of lock contention from this code path.
4254811daToomas Soome */
4354811daToomas Soomeint dmu_object_alloc_chunk_shift = 7;
4454811daToomas Soome
4554811daToomas Soomestatic uint64_t
4654811daToomas Soomedmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
4754811daToomas Soome    int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen,
4854811daToomas Soome    int dnodesize, dmu_tx_t *tx)
49fa9e406ahrens{
50fa9e406ahrens	uint64_t object;
51af346dfNed Bass	uint64_t L1_dnode_count = DNODES_PER_BLOCK <<
52744947dTom Erickson	    (DMU_META_DNODE(os)->dn_indblkshift - SPA_BLKPTRSHIFT);
53ea8dc4beschrock	dnode_t *dn = NULL;
5454811daToomas Soome	int dn_slots = dnodesize >> DNODE_SHIFT;
5554811daToomas Soome	boolean_t restarted = B_FALSE;
5654811daToomas Soome	uint64_t *cpuobj = &os->os_obj_next_percpu[CPU_SEQID %
5754811daToomas Soome	    os->os_obj_next_percpu_len];
5854811daToomas Soome	int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
5954811daToomas Soome	int error;
6054811daToomas Soome
6154811daToomas Soome	if (dn_slots == 0) {
6254811daToomas Soome		dn_slots = DNODE_MIN_SLOTS;
6354811daToomas Soome	} else {
6454811daToomas Soome		ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS);
6554811daToomas Soome		ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
6654811daToomas Soome	}
6754811daToomas Soome
6854811daToomas Soome	/*
6954811daToomas Soome	 * The "chunk" of dnodes that is assigned to a CPU-specific
7054811daToomas Soome	 * allocator needs to be at least one block's worth, to avoid
7154811daToomas Soome	 * lock contention on the dbuf.  It can be at most one L1 block's
7254811daToomas Soome	 * worth, so that the "rescan after polishing off a L1's worth"
7354811daToomas Soome	 * logic below will be sure to kick in.
7454811daToomas Soome	 */
7554811daToomas Soome	if (dnodes_per_chunk < DNODES_PER_BLOCK)
7654811daToomas Soome		dnodes_per_chunk = DNODES_PER_BLOCK;
7754811daToomas Soome	if (dnodes_per_chunk > L1_dnode_count)
7854811daToomas Soome		dnodes_per_chunk = L1_dnode_count;
7954811daToomas Soome
8054811daToomas Soome	object = *cpuobj;
81fa9e406ahrens
82fa9e406ahrens	for (;;) {
83fa9e406ahrens		/*
8454811daToomas Soome		 * If we finished a chunk of dnodes, get a new one from
8554811daToomas Soome		 * the global allocator.
86fa9e406ahrens		 */
8754811daToomas Soome		if ((P2PHASE(object, dnodes_per_chunk) == 0) ||
8854811daToomas Soome		    (P2PHASE(object + dn_slots - 1, dnodes_per_chunk) <
8954811daToomas Soome		    dn_slots)) {
9054811daToomas Soome			DNODE_STAT_BUMP(dnode_alloc_next_chunk);
9154811daToomas Soome			mutex_enter(&os->os_obj_lock);
9254811daToomas Soome			ASSERT0(P2PHASE(os->os_obj_next_chunk,
9354811daToomas Soome			    dnodes_per_chunk));
9454811daToomas Soome			object = os->os_obj_next_chunk;
9554811daToomas Soome
9654811daToomas Soome			/*
9754811daToomas Soome			 * Each time we polish off a L1 bp worth of dnodes
9854811daToomas Soome			 * (2^12 objects), move to another L1 bp that's
9954811daToomas Soome			 * still reasonably sparse (at most 1/4 full). Look
10054811daToomas Soome			 * from the beginning at most once per txg. If we
10154811daToomas Soome			 * still can't allocate from that L1 block, search
10254811daToomas Soome			 * for an empty L0 block, which will quickly skip
10354811daToomas Soome			 * to the end of the metadnode if the no nearby L0
10454811daToomas Soome			 * blocks are empty. This fallback avoids a
10554811daToomas Soome			 * pathology where full dnode blocks containing
10654811daToomas Soome			 * large dnodes appear sparse because they have a
10754811daToomas Soome			 * low blk_fill, leading to many failed allocation
10854811daToomas Soome			 * attempts. In the long term a better mechanism to
10954811daToomas Soome			 * search for sparse metadnode regions, such as
11054811daToomas Soome			 * spacemaps, could be implemented.
11154811daToomas Soome			 *
11254811daToomas Soome			 * os_scan_dnodes is set during txg sync if enough
11354811daToomas Soome			 * objects have been freed since the previous
11454811daToomas Soome			 * rescan to justify backfilling again.
11554811daToomas Soome			 *
11654811daToomas Soome			 * Note that dmu_traverse depends on the behavior
11754811daToomas Soome			 * that we use multiple blocks of the dnode object
11854811daToomas Soome			 * before going back to reuse objects. Any change
11954811daToomas Soome			 * to this algorithm should preserve that property
12054811daToomas Soome			 * or find another solution to the issues described
12154811daToomas Soome			 * in traverse_visitbp.
12254811daToomas Soome			 */
12354811daToomas Soome			if (P2PHASE(object, L1_dnode_count) == 0) {
12454811daToomas Soome				uint64_t offset;
12554811daToomas Soome				uint64_t blkfill;
12654811daToomas Soome				int minlvl;
12754811daToomas Soome				if (os->os_rescan_dnodes) {
12854811daToomas Soome					offset = 0;
12954811daToomas Soome					os->os_rescan_dnodes = B_FALSE;
13054811daToomas Soome				} else {
13154811daToomas Soome					offset = object << DNODE_SHIFT;
13254811daToomas Soome				}
13354811daToomas Soome				blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
13454811daToomas Soome				minlvl = restarted ? 1 : 2;
13554811daToomas Soome				restarted = B_TRUE;
13654811daToomas Soome				error = dnode_next_offset(DMU_META_DNODE(os),
13754811daToomas Soome				    DNODE_FIND_HOLE, &offset, minlvl,
13854811daToomas Soome				    blkfill, 0);
13954811daToomas Soome				if (error == 0) {
14054811daToomas Soome					object = offset >> DNODE_SHIFT;
14154811daToomas Soome				}
142af346dfNed Bass			}
14354811daToomas Soome			/*
14454811daToomas Soome			 * Note: if "restarted", we may find a L0 that
14554811daToomas Soome			 * is not suitably aligned.
14654811daToomas Soome			 */
14754811daToomas Soome			os->os_obj_next_chunk =
14854811daToomas Soome			    P2ALIGN(object, dnodes_per_chunk) +
14954811daToomas Soome			    dnodes_per_chunk;
15054811daToomas Soome			(void) atomic_swap_64(cpuobj, object);
15154811daToomas Soome			mutex_exit(&os->os_obj_lock);
152fa9e406ahrens		}
15354811daToomas Soome
15454811daToomas Soome		/*
15554811daToomas Soome		 * The value of (*cpuobj) before adding dn_slots is the object
15654811daToomas Soome		 * ID assigned to us.  The value afterwards is the object ID
15754811daToomas Soome		 * assigned to whoever wants to do an allocation next.
15854811daToomas Soome		 */
15954811daToomas Soome		object = atomic_add_64_nv(cpuobj, dn_slots) - dn_slots;
160fa9e406ahrens
161ea8dc4beschrock		/*
162ea8dc4beschrock		 * XXX We should check for an i/o error here and return
163ea8dc4beschrock		 * up to our caller.  Actually we should pre-read it in
164ea8dc4beschrock		 * dmu_tx_assign(), but there is currently no mechanism
165ea8dc4beschrock		 * to do so.
166ea8dc4beschrock		 */
16754811daToomas Soome		error = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE,
16854811daToomas Soome		    dn_slots, FTAG, &dn);
16954811daToomas Soome		if (error == 0) {
17054811daToomas Soome			rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
17154811daToomas Soome			/*
17254811daToomas Soome			 * Another thread could have allocated it; check
17354811daToomas Soome			 * again now that we have the struct lock.
17454811daToomas Soome			 */
17554811daToomas Soome			if (dn->dn_type == DMU_OT_NONE) {
17654811daToomas Soome				dnode_allocate(dn, ot, blocksize, 0,
17754811daToomas Soome				    bonustype, bonuslen, dn_slots, tx);
17854811daToomas Soome				rw_exit(&dn->dn_struct_rwlock);
17954811daToomas Soome				dmu_tx_add_new_object(tx, dn);
18054811daToomas Soome				dnode_rele(dn, FTAG);
18154811daToomas Soome				return (object);
18254811daToomas Soome			}
18354811daToomas Soome			rw_exit(&dn->dn_struct_rwlock);
18454811daToomas Soome			dnode_rele(dn, FTAG);
18554811daToomas Soome			DNODE_STAT_BUMP(dnode_alloc_race);
18654811daToomas Soome		}
187fa9e406ahrens
18854811daToomas Soome		/*
18954811daToomas Soome		 * Skip to next known valid starting point on error. This
19054811daToomas Soome		 * is the start of the next block of dnodes.
19154811daToomas Soome		 */
19254811daToomas Soome		if (dmu_object_next(os, &object, B_TRUE, 0) != 0) {
19354811daToomas Soome			object = P2ROUNDUP(object + 1, DNODES_PER_BLOCK);
19454811daToomas Soome			DNODE_STAT_BUMP(dnode_alloc_next_block);
19554811daToomas Soome		}
19654811daToomas Soome		(void) atomic_swap_64(cpuobj, object);
197fa9e406ahrens	}
198fa9e406ahrens}
199fa9e406ahrens
200221813cMatthew Ahrensuint64_t
201221813cMatthew Ahrensdmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
202221813cMatthew Ahrens    dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
203221813cMatthew Ahrens{
20454811daToomas Soome	return (dmu_object_alloc_impl(os, ot, blocksize, 0, bonustype,
20554811daToomas Soome	    bonuslen, 0, tx));
20654811daToomas Soome}
20754811daToomas Soome
20854811daToomas Soomeuint64_t
20954811daToomas Soomedmu_object_alloc_ibs(objset_t *os, dmu_object_type_t ot, int blocksize,
21054811daToomas Soome    int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen,
21154811daToomas Soome    dmu_tx_t *tx)
21254811daToomas Soome{
21354811daToomas Soome	return (dmu_object_alloc_impl(os, ot, blocksize, indirect_blockshift,
21454811daToomas Soome	    bonustype, bonuslen, 0, tx));
21554811daToomas Soome}
21654811daToomas Soome
21754811daToomas Soomeuint64_t
21854811daToomas Soomedmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
21954811daToomas Soome    dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
22054811daToomas Soome{
22154811daToomas Soome	return (dmu_object_alloc_impl(os, ot, blocksize, 0, bonustype,
22254811daToomas Soome	    bonuslen, dnodesize, tx));
223221813cMatthew Ahrens}
224221813cMatthew Ahrens
225fa9e406ahrensint
226fa9e406ahrensdmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
227fa9e406ahrens    int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
228fa9e406ahrens{
22954811daToomas Soome	return (dmu_object_claim_dnsize(os, object, ot, blocksize, bonustype,
23054811daToomas Soome	    bonuslen, 0, tx));
23154811daToomas Soome}
23254811daToomas Soome
23354811daToomas Soomeint
23454811daToomas Soomedmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
23554811daToomas Soome    int blocksize, dmu_object_type_t bonustype, int bonuslen,
23654811daToomas Soome    int dnodesize, dmu_tx_t *tx)
23754811daToomas Soome{
238fa9e406ahrens	dnode_t *dn;
23954811daToomas Soome	int dn_slots = dnodesize >> DNODE_SHIFT;
240ea8dc4beschrock	int err;
241fa9e406ahrens
24254811daToomas Soome	if (dn_slots == 0)
24354811daToomas Soome		dn_slots = DNODE_MIN_SLOTS;
24454811daToomas Soome	ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS);
24554811daToomas Soome	ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
24654811daToomas Soome
247ea8dc4beschrock	if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx))
248be6fd75Matthew Ahrens		return (SET_ERROR(EBADF));
249fa9e406ahrens
25054811daToomas Soome	err = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots,
25154811daToomas Soome	    FTAG, &dn);
252ea8dc4beschrock	if (err)
253ea8dc4beschrock		return (err);
25454811daToomas Soome	dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, dn_slots, tx);
255b0c42cdbzzz	dmu_tx_add_new_object(tx, dn);
256b0c42cdbzzz
257fa9e406ahrens	dnode_rele(dn, FTAG);
258fa9e406ahrens
259fa9e406ahrens	return (0);
260fa9e406ahrens}
261fa9e406ahrens
262fa9e406ahrensint
263fa9e406ahrensdmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
264e77d42eMatthew Ahrens    int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
265fa9e406ahrens{
26654811daToomas Soome	return (dmu_object_reclaim_dnsize(os, object, ot, blocksize, bonustype,
267eb63303Tom Caputi	    bonuslen, DNODE_MIN_SIZE, B_FALSE, tx));
26854811daToomas Soome}
26954811daToomas Soome
27054811daToomas Soomeint
27154811daToomas Soomedmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
27254811daToomas Soome    int blocksize, dmu_object_type_t bonustype, int bonuslen, int dnodesize,
273eb63303Tom Caputi    boolean_t keep_spill, dmu_tx_t *tx)
27454811daToomas Soome{
275fa9e406ahrens	dnode_t *dn;
27654811daToomas Soome	int dn_slots = dnodesize >> DNODE_SHIFT;
277ea8dc4beschrock	int err;
278fa9e406ahrens
279811964cTom Caputi	if (dn_slots == 0)
280811964cTom Caputi		dn_slots = DNODE_MIN_SLOTS;
281811964cTom Caputi
2822bf405aMark Maybee	if (object == DMU_META_DNODE_OBJECT)
283be6fd75Matthew Ahrens		return (SET_ERROR(EBADF));
284fa9e406ahrens
28554811daToomas Soome	err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0,
286ea8dc4beschrock	    FTAG, &dn);
287ea8dc4beschrock	if (err)
288ea8dc4beschrock		return (err);
2892bf405aMark Maybee
290eb63303Tom Caputi	dnode_reallocate(dn, ot, blocksize, bonustype, bonuslen, dn_slots,
291eb63303Tom Caputi	    keep_spill, tx);
292eb63303Tom Caputi
293eb63303Tom Caputi	dnode_rele(dn, FTAG);
294eb63303Tom Caputi	return (err);
295eb63303Tom Caputi}
296eb63303Tom Caputi
297eb63303Tom Caputiint
298eb63303Tom Caputidmu_object_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
299eb63303Tom Caputi{
300eb63303Tom Caputi	dnode_t *dn;
301eb63303Tom Caputi	int err;
302eb63303Tom Caputi
303eb63303Tom Caputi	err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0,
304eb63303Tom Caputi	    FTAG, &dn);
305eb63303Tom Caputi	if (err)
306eb63303Tom Caputi		return (err);
307eb63303Tom Caputi
308eb63303Tom Caputi	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
309eb63303Tom Caputi	if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
310eb63303Tom Caputi		dbuf_rm_spill(dn, tx);
311eb63303Tom Caputi		dnode_rm_spill(dn, tx);
312eb63303Tom Caputi	}
313eb63303Tom Caputi	rw_exit(&dn->dn_struct_rwlock);
3142bf405aMark Maybee
315fa9e406ahrens	dnode_rele(dn, FTAG);
316cf04ddaMark Maybee	return (err);
317fa9e406ahrens}
318fa9e406ahrens
319fa9e406ahrensint
320fa9e406ahrensdmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx)
321fa9e406ahrens{
322fa9e406ahrens	dnode_t *dn;
323ea8dc4beschrock	int err;
324fa9e406ahrens
325ea8dc4beschrock	ASSERT(object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
326fa9e406ahrens
32754811daToomas Soome	err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0,
328ea8dc4beschrock	    FTAG, &dn);
329ea8dc4beschrock	if (err)
330ea8dc4beschrock		return (err);
331fa9e406ahrens
332fa9e406ahrens	ASSERT(dn->dn_type != DMU_OT_NONE);
333738e2a3Paul Dagnelie	/*
334738e2a3Paul Dagnelie	 * If we don't create this free range, we'll leak indirect blocks when
335738e2a3Paul Dagnelie	 * we get to freeing the dnode in syncing context.
336738e2a3Paul Dagnelie	 */
337cdb0ab7maybee	dnode_free_range(dn, 0, DMU_OBJECT_END, tx);
338fa9e406ahrens	dnode_free(dn, tx);
339fa9e406ahrens	dnode_rele(dn, FTAG);
340fa9e406ahrens
341fa9e406ahrens	return (0);
342fa9e406ahrens}
343fa9e406ahrens
344a2cdcddPaul Dagnelie/*
345a2cdcddPaul Dagnelie * Return (in *objectp) the next object which is allocated (or a hole)
346a2cdcddPaul Dagnelie * after *object, taking into account only objects that may have been modified
347a2cdcddPaul Dagnelie * after the specified txg.
348a2cdcddPaul Dagnelie */
349fa9e406ahrensint
3506754306ahrensdmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg)
351fa9e406ahrens{
35254811daToomas Soome	uint64_t offset;
35354811daToomas Soome	uint64_t start_obj;
35454811daToomas Soome	struct dsl_dataset *ds = os->os_dsl_dataset;
355fa9e406ahrens	int error;
356fa9e406ahrens
35754811daToomas Soome	if (*objectp == 0) {
35854811daToomas Soome		start_obj = 1;
35954811daToomas Soome	} else if (ds && ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) {
36054811daToomas Soome		uint64_t i = *objectp + 1;
36154811daToomas Soome		uint64_t last_obj = *objectp | (DNODES_PER_BLOCK - 1);
36254811daToomas Soome		dmu_object_info_t doi;
36354811daToomas Soome
36454811daToomas Soome		/*
36554811daToomas Soome		 * Scan through the remaining meta dnode block. The contents
36654811daToomas Soome		 * of each slot in the block are known so it can be quickly
36754811daToomas Soome		 * checked. If the block is exhausted without a match then
36854811daToomas Soome		 * hand off to dnode_next_offset() for further scanning.
36954811daToomas Soome		 */
37054811daToomas Soome		while (i <= last_obj) {
37154811daToomas Soome			error = dmu_object_info(os, i, &doi);
37254811daToomas Soome			if (error == ENOENT) {
37354811daToomas Soome				if (hole) {
37454811daToomas Soome					*objectp = i;
37554811daToomas Soome					return (0);
37654811daToomas Soome				} else {
37754811daToomas Soome					i++;
37854811daToomas Soome				}
37954811daToomas Soome			} else if (error == EEXIST) {
38054811daToomas Soome				i++;
38154811daToomas Soome			} else if (error == 0) {
38254811daToomas Soome				if (hole) {
38354811daToomas Soome					i += doi.doi_dnodesize >> DNODE_SHIFT;
38454811daToomas Soome				} else {
38554811daToomas Soome					*objectp = i;
38654811daToomas Soome					return (0);
38754811daToomas Soome				}
38854811daToomas Soome			} else {
38954811daToomas Soome				return (error);
39054811daToomas Soome			}
39154811daToomas Soome		}
39254811daToomas Soome
39354811daToomas Soome		start_obj = i;
39454811daToomas Soome	} else {
39554811daToomas Soome		start_obj = *objectp + 1;
39654811daToomas Soome	}
39754811daToomas Soome
39854811daToomas Soome	offset = start_obj << DNODE_SHIFT;
39954811daToomas Soome
400744947dTom Erickson	error = dnode_next_offset(DMU_META_DNODE(os),
401cdb0ab7maybee	    (hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg);
402fa9e406ahrens
403fa9e406ahrens	*objectp = offset >> DNODE_SHIFT;
404fa9e406ahrens
405fa9e406ahrens	return (error);
406fa9e406ahrens}
4072acef22Matthew Ahrens
4082acef22Matthew Ahrens/*
4092acef22Matthew Ahrens * Turn this object from old_type into DMU_OTN_ZAP_METADATA, and bump the
4102acef22Matthew Ahrens * refcount on SPA_FEATURE_EXTENSIBLE_DATASET.
4112acef22Matthew Ahrens *
4122acef22Matthew Ahrens * Only for use from syncing context, on MOS objects.
4132acef22Matthew Ahrens */
4142acef22Matthew Ahrensvoid
4152acef22Matthew Ahrensdmu_object_zapify(objset_t *mos, uint64_t object, dmu_object_type_t old_type,
4162acef22Matthew Ahrens    dmu_tx_t *tx)
4172acef22Matthew Ahrens{
4182acef22Matthew Ahrens	dnode_t *dn;
4192acef22Matthew Ahrens
4202acef22Matthew Ahrens	ASSERT(dmu_tx_is_syncing(tx));
4212acef22Matthew Ahrens
4222acef22Matthew Ahrens	VERIFY0(dnode_hold(mos, object, FTAG, &dn));
4232acef22Matthew Ahrens	if (dn->dn_type == DMU_OTN_ZAP_METADATA) {
4242acef22Matthew Ahrens		dnode_rele(dn, FTAG);
4252acef22Matthew Ahrens		return;
4262acef22Matthew Ahrens	}
4272acef22Matthew Ahrens	ASSERT3U(dn->dn_type, ==, old_type);
4282acef22Matthew Ahrens	ASSERT0(dn->dn_maxblkid);
429bf26014Matthew Ahrens
430bf26014Matthew Ahrens	/*
431bf26014Matthew Ahrens	 * We must initialize the ZAP data before changing the type,
432bf26014Matthew Ahrens	 * so that concurrent calls to *_is_zapified() can determine if
433bf26014Matthew Ahrens	 * the object has been completely zapified by checking the type.
434bf26014Matthew Ahrens	 */
435bf26014Matthew Ahrens	mzap_create_impl(mos, object, 0, 0, tx);
436bf26014Matthew Ahrens
4372acef22Matthew Ahrens	dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type =
4382acef22Matthew Ahrens	    DMU_OTN_ZAP_METADATA;
4392acef22Matthew Ahrens	dnode_setdirty(dn, tx);
4402acef22Matthew Ahrens	dnode_rele(dn, FTAG);
4412acef22Matthew Ahrens
4422acef22Matthew Ahrens	spa_feature_incr(dmu_objset_spa(mos),
4432acef22Matthew Ahrens	    SPA_FEATURE_EXTENSIBLE_DATASET, tx);
4442acef22Matthew Ahrens}
4452acef22Matthew Ahrens
4462acef22Matthew Ahrensvoid
4472acef22Matthew Ahrensdmu_object_free_zapified(objset_t *mos, uint64_t object, dmu_tx_t *tx)
4482acef22Matthew Ahrens{
4492acef22Matthew Ahrens	dnode_t *dn;
4502acef22Matthew Ahrens	dmu_object_type_t t;
4512acef22Matthew Ahrens
4522acef22Matthew Ahrens	ASSERT(dmu_tx_is_syncing(tx));
4532acef22Matthew Ahrens
4542acef22Matthew Ahrens	VERIFY0(dnode_hold(mos, object, FTAG, &dn));
4552acef22Matthew Ahrens	t = dn->dn_type;
4562acef22Matthew Ahrens	dnode_rele(dn, FTAG);
4572acef22Matthew Ahrens
4582acef22Matthew Ahrens	if (t == DMU_OTN_ZAP_METADATA) {
4592acef22Matthew Ahrens		spa_feature_decr(dmu_objset_spa(mos),
4602acef22Matthew Ahrens		    SPA_FEATURE_EXTENSIBLE_DATASET, tx);
4612acef22Matthew Ahrens	}
4622acef22Matthew Ahrens	VERIFY0(dmu_object_free(mos, object, tx));
4632acef22Matthew Ahrens}
464