xref: /illumos-gate/usr/src/uts/common/fs/zfs/zap.c (revision bf26014c5541b6119f34e0d95294b7f2eb105ac2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
24  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
25  */
26 
27 /*
28  * This file contains the top half of the zfs directory structure
29  * implementation. The bottom half is in zap_leaf.c.
30  *
31  * The zdir is an extendable hash data structure. There is a table of
32  * pointers to buckets (zap_t->zd_data->zd_leafs). The buckets are
33  * each a constant size and hold a variable number of directory entries.
34  * The buckets (aka "leaf nodes") are implemented in zap_leaf.c.
35  *
36  * The pointer table holds a power of 2 number of pointers.
37  * (1<<zap_t->zd_data->zd_phys->zd_prefix_len).  The bucket pointed to
38  * by the pointer at index i in the table holds entries whose hash value
39  * has a zd_prefix_len - bit prefix
40  */
41 
42 #include <sys/spa.h>
43 #include <sys/dmu.h>
44 #include <sys/zfs_context.h>
45 #include <sys/zfs_znode.h>
46 #include <sys/fs/zfs.h>
47 #include <sys/zap.h>
48 #include <sys/refcount.h>
49 #include <sys/zap_impl.h>
50 #include <sys/zap_leaf.h>
51 
52 int fzap_default_block_shift = 14; /* 16k blocksize */
53 
54 extern inline zap_phys_t *zap_f_phys(zap_t *zap);
55 
56 static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks);
57 
58 void
59 fzap_byteswap(void *vbuf, size_t size)
60 {
61 	uint64_t block_type = *(uint64_t *)vbuf;
62 
63 	if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF))
64 		zap_leaf_byteswap(vbuf, size);
65 	else {
66 		/* it's a ptrtbl block */
67 		byteswap_uint64_array(vbuf, size);
68 	}
69 }
70 
71 void
72 fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
73 {
74 	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
75 	zap->zap_ismicro = FALSE;
76 
77 	zap->zap_dbu.dbu_evict_func_sync = zap_evict_sync;
78 	zap->zap_dbu.dbu_evict_func_async = NULL;
79 
80 	mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
81 	zap->zap_f.zap_block_shift = highbit64(zap->zap_dbuf->db_size) - 1;
82 
83 	zap_phys_t *zp = zap_f_phys(zap);
84 	/*
85 	 * explicitly zero it since it might be coming from an
86 	 * initialized microzap
87 	 */
88 	bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size);
89 	zp->zap_block_type = ZBT_HEADER;
90 	zp->zap_magic = ZAP_MAGIC;
91 
92 	zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap);
93 
94 	zp->zap_freeblk = 2;		/* block 1 will be the first leaf */
95 	zp->zap_num_leafs = 1;
96 	zp->zap_num_entries = 0;
97 	zp->zap_salt = zap->zap_salt;
98 	zp->zap_normflags = zap->zap_normflags;
99 	zp->zap_flags = flags;
100 
101 	/* block 1 will be the first leaf */
102 	for (int i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++)
103 		ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1;
104 
105 	/*
106 	 * set up block 1 - the first leaf
107 	 */
108 	dmu_buf_t *db;
109 	VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
110 	    1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH));
111 	dmu_buf_will_dirty(db, tx);
112 
113 	zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
114 	l->l_dbuf = db;
115 
116 	zap_leaf_init(l, zp->zap_normflags != 0);
117 
118 	kmem_free(l, sizeof (zap_leaf_t));
119 	dmu_buf_rele(db, FTAG);
120 }
121 
122 static int
123 zap_tryupgradedir(zap_t *zap, dmu_tx_t *tx)
124 {
125 	if (RW_WRITE_HELD(&zap->zap_rwlock))
126 		return (1);
127 	if (rw_tryupgrade(&zap->zap_rwlock)) {
128 		dmu_buf_will_dirty(zap->zap_dbuf, tx);
129 		return (1);
130 	}
131 	return (0);
132 }
133 
134 /*
135  * Generic routines for dealing with the pointer & cookie tables.
136  */
137 
138 static int
139 zap_table_grow(zap_t *zap, zap_table_phys_t *tbl,
140     void (*transfer_func)(const uint64_t *src, uint64_t *dst, int n),
141     dmu_tx_t *tx)
142 {
143 	uint64_t newblk;
144 	int bs = FZAP_BLOCK_SHIFT(zap);
145 	int hepb = 1<<(bs-4);
146 	/* hepb = half the number of entries in a block */
147 
148 	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
149 	ASSERT(tbl->zt_blk != 0);
150 	ASSERT(tbl->zt_numblks > 0);
151 
152 	if (tbl->zt_nextblk != 0) {
153 		newblk = tbl->zt_nextblk;
154 	} else {
155 		newblk = zap_allocate_blocks(zap, tbl->zt_numblks * 2);
156 		tbl->zt_nextblk = newblk;
157 		ASSERT0(tbl->zt_blks_copied);
158 		dmu_prefetch(zap->zap_objset, zap->zap_object, 0,
159 		    tbl->zt_blk << bs, tbl->zt_numblks << bs,
160 		    ZIO_PRIORITY_SYNC_READ);
161 	}
162 
163 	/*
164 	 * Copy the ptrtbl from the old to new location.
165 	 */
166 
167 	uint64_t b = tbl->zt_blks_copied;
168 	dmu_buf_t *db_old;
169 	int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
170 	    (tbl->zt_blk + b) << bs, FTAG, &db_old, DMU_READ_NO_PREFETCH);
171 	if (err != 0)
172 		return (err);
173 
174 	/* first half of entries in old[b] go to new[2*b+0] */
175 	dmu_buf_t *db_new;
176 	VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
177 	    (newblk + 2*b+0) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH));
178 	dmu_buf_will_dirty(db_new, tx);
179 	transfer_func(db_old->db_data, db_new->db_data, hepb);
180 	dmu_buf_rele(db_new, FTAG);
181 
182 	/* second half of entries in old[b] go to new[2*b+1] */
183 	VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
184 	    (newblk + 2*b+1) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH));
185 	dmu_buf_will_dirty(db_new, tx);
186 	transfer_func((uint64_t *)db_old->db_data + hepb,
187 	    db_new->db_data, hepb);
188 	dmu_buf_rele(db_new, FTAG);
189 
190 	dmu_buf_rele(db_old, FTAG);
191 
192 	tbl->zt_blks_copied++;
193 
194 	dprintf("copied block %llu of %llu\n",
195 	    tbl->zt_blks_copied, tbl->zt_numblks);
196 
197 	if (tbl->zt_blks_copied == tbl->zt_numblks) {
198 		(void) dmu_free_range(zap->zap_objset, zap->zap_object,
199 		    tbl->zt_blk << bs, tbl->zt_numblks << bs, tx);
200 
201 		tbl->zt_blk = newblk;
202 		tbl->zt_numblks *= 2;
203 		tbl->zt_shift++;
204 		tbl->zt_nextblk = 0;
205 		tbl->zt_blks_copied = 0;
206 
207 		dprintf("finished; numblocks now %llu (%lluk entries)\n",
208 		    tbl->zt_numblks, 1<<(tbl->zt_shift-10));
209 	}
210 
211 	return (0);
212 }
213 
214 static int
215 zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val,
216     dmu_tx_t *tx)
217 {
218 	int bs = FZAP_BLOCK_SHIFT(zap);
219 
220 	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
221 	ASSERT(tbl->zt_blk != 0);
222 
223 	dprintf("storing %llx at index %llx\n", val, idx);
224 
225 	uint64_t blk = idx >> (bs-3);
226 	uint64_t off = idx & ((1<<(bs-3))-1);
227 
228 	dmu_buf_t *db;
229 	int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
230 	    (tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH);
231 	if (err != 0)
232 		return (err);
233 	dmu_buf_will_dirty(db, tx);
234 
235 	if (tbl->zt_nextblk != 0) {
236 		uint64_t idx2 = idx * 2;
237 		uint64_t blk2 = idx2 >> (bs-3);
238 		uint64_t off2 = idx2 & ((1<<(bs-3))-1);
239 		dmu_buf_t *db2;
240 
241 		err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
242 		    (tbl->zt_nextblk + blk2) << bs, FTAG, &db2,
243 		    DMU_READ_NO_PREFETCH);
244 		if (err != 0) {
245 			dmu_buf_rele(db, FTAG);
246 			return (err);
247 		}
248 		dmu_buf_will_dirty(db2, tx);
249 		((uint64_t *)db2->db_data)[off2] = val;
250 		((uint64_t *)db2->db_data)[off2+1] = val;
251 		dmu_buf_rele(db2, FTAG);
252 	}
253 
254 	((uint64_t *)db->db_data)[off] = val;
255 	dmu_buf_rele(db, FTAG);
256 
257 	return (0);
258 }
259 
260 static int
261 zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp)
262 {
263 	int bs = FZAP_BLOCK_SHIFT(zap);
264 
265 	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
266 
267 	uint64_t blk = idx >> (bs-3);
268 	uint64_t off = idx & ((1<<(bs-3))-1);
269 
270 	/*
271 	 * Note: this is equivalent to dmu_buf_hold(), but we use
272 	 * _dnode_enter / _by_dnode because it's faster because we don't
273 	 * have to hold the dnode.
274 	 */
275 	dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf);
276 	dmu_buf_t *db;
277 	int err = dmu_buf_hold_by_dnode(dn,
278 	    (tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH);
279 	dmu_buf_dnode_exit(zap->zap_dbuf);
280 	if (err != 0)
281 		return (err);
282 	*valp = ((uint64_t *)db->db_data)[off];
283 	dmu_buf_rele(db, FTAG);
284 
285 	if (tbl->zt_nextblk != 0) {
286 		/*
287 		 * read the nextblk for the sake of i/o error checking,
288 		 * so that zap_table_load() will catch errors for
289 		 * zap_table_store.
290 		 */
291 		blk = (idx*2) >> (bs-3);
292 
293 		dn = dmu_buf_dnode_enter(zap->zap_dbuf);
294 		err = dmu_buf_hold_by_dnode(dn,
295 		    (tbl->zt_nextblk + blk) << bs, FTAG, &db,
296 		    DMU_READ_NO_PREFETCH);
297 		dmu_buf_dnode_exit(zap->zap_dbuf);
298 		if (err == 0)
299 			dmu_buf_rele(db, FTAG);
300 	}
301 	return (err);
302 }
303 
304 /*
305  * Routines for growing the ptrtbl.
306  */
307 
308 static void
309 zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n)
310 {
311 	for (int i = 0; i < n; i++) {
312 		uint64_t lb = src[i];
313 		dst[2 * i + 0] = lb;
314 		dst[2 * i + 1] = lb;
315 	}
316 }
317 
318 static int
319 zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx)
320 {
321 	/*
322 	 * The pointer table should never use more hash bits than we
323 	 * have (otherwise we'd be using useless zero bits to index it).
324 	 * If we are within 2 bits of running out, stop growing, since
325 	 * this is already an aberrant condition.
326 	 */
327 	if (zap_f_phys(zap)->zap_ptrtbl.zt_shift >= zap_hashbits(zap) - 2)
328 		return (SET_ERROR(ENOSPC));
329 
330 	if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) {
331 		/*
332 		 * We are outgrowing the "embedded" ptrtbl (the one
333 		 * stored in the header block).  Give it its own entire
334 		 * block, which will double the size of the ptrtbl.
335 		 */
336 		ASSERT3U(zap_f_phys(zap)->zap_ptrtbl.zt_shift, ==,
337 		    ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
338 		ASSERT0(zap_f_phys(zap)->zap_ptrtbl.zt_blk);
339 
340 		uint64_t newblk = zap_allocate_blocks(zap, 1);
341 		dmu_buf_t *db_new;
342 		int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
343 		    newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new,
344 		    DMU_READ_NO_PREFETCH);
345 		if (err != 0)
346 			return (err);
347 		dmu_buf_will_dirty(db_new, tx);
348 		zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
349 		    db_new->db_data, 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
350 		dmu_buf_rele(db_new, FTAG);
351 
352 		zap_f_phys(zap)->zap_ptrtbl.zt_blk = newblk;
353 		zap_f_phys(zap)->zap_ptrtbl.zt_numblks = 1;
354 		zap_f_phys(zap)->zap_ptrtbl.zt_shift++;
355 
356 		ASSERT3U(1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift, ==,
357 		    zap_f_phys(zap)->zap_ptrtbl.zt_numblks <<
358 		    (FZAP_BLOCK_SHIFT(zap)-3));
359 
360 		return (0);
361 	} else {
362 		return (zap_table_grow(zap, &zap_f_phys(zap)->zap_ptrtbl,
363 		    zap_ptrtbl_transfer, tx));
364 	}
365 }
366 
367 static void
368 zap_increment_num_entries(zap_t *zap, int delta, dmu_tx_t *tx)
369 {
370 	dmu_buf_will_dirty(zap->zap_dbuf, tx);
371 	mutex_enter(&zap->zap_f.zap_num_entries_mtx);
372 	ASSERT(delta > 0 || zap_f_phys(zap)->zap_num_entries >= -delta);
373 	zap_f_phys(zap)->zap_num_entries += delta;
374 	mutex_exit(&zap->zap_f.zap_num_entries_mtx);
375 }
376 
377 static uint64_t
378 zap_allocate_blocks(zap_t *zap, int nblocks)
379 {
380 	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
381 	uint64_t newblk = zap_f_phys(zap)->zap_freeblk;
382 	zap_f_phys(zap)->zap_freeblk += nblocks;
383 	return (newblk);
384 }
385 
386 static void
387 zap_leaf_evict_sync(void *dbu)
388 {
389 	zap_leaf_t *l = dbu;
390 
391 	rw_destroy(&l->l_rwlock);
392 	kmem_free(l, sizeof (zap_leaf_t));
393 }
394 
395 static zap_leaf_t *
396 zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
397 {
398 	zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
399 
400 	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
401 
402 	rw_init(&l->l_rwlock, 0, 0, 0);
403 	rw_enter(&l->l_rwlock, RW_WRITER);
404 	l->l_blkid = zap_allocate_blocks(zap, 1);
405 	l->l_dbuf = NULL;
406 
407 	VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
408 	    l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf,
409 	    DMU_READ_NO_PREFETCH));
410 	dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf);
411 	VERIFY3P(NULL, ==, dmu_buf_set_user(l->l_dbuf, &l->l_dbu));
412 	dmu_buf_will_dirty(l->l_dbuf, tx);
413 
414 	zap_leaf_init(l, zap->zap_normflags != 0);
415 
416 	zap_f_phys(zap)->zap_num_leafs++;
417 
418 	return (l);
419 }
420 
421 int
422 fzap_count(zap_t *zap, uint64_t *count)
423 {
424 	ASSERT(!zap->zap_ismicro);
425 	mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */
426 	*count = zap_f_phys(zap)->zap_num_entries;
427 	mutex_exit(&zap->zap_f.zap_num_entries_mtx);
428 	return (0);
429 }
430 
431 /*
432  * Routines for obtaining zap_leaf_t's
433  */
434 
435 void
436 zap_put_leaf(zap_leaf_t *l)
437 {
438 	rw_exit(&l->l_rwlock);
439 	dmu_buf_rele(l->l_dbuf, NULL);
440 }
441 
442 static zap_leaf_t *
443 zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
444 {
445 	ASSERT(blkid != 0);
446 
447 	zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
448 	rw_init(&l->l_rwlock, 0, 0, 0);
449 	rw_enter(&l->l_rwlock, RW_WRITER);
450 	l->l_blkid = blkid;
451 	l->l_bs = highbit64(db->db_size) - 1;
452 	l->l_dbuf = db;
453 
454 	dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf);
455 	zap_leaf_t *winner = dmu_buf_set_user(db, &l->l_dbu);
456 
457 	rw_exit(&l->l_rwlock);
458 	if (winner != NULL) {
459 		/* someone else set it first */
460 		zap_leaf_evict_sync(&l->l_dbu);
461 		l = winner;
462 	}
463 
464 	/*
465 	 * lhr_pad was previously used for the next leaf in the leaf
466 	 * chain.  There should be no chained leafs (as we have removed
467 	 * support for them).
468 	 */
469 	ASSERT0(zap_leaf_phys(l)->l_hdr.lh_pad1);
470 
471 	/*
472 	 * There should be more hash entries than there can be
473 	 * chunks to put in the hash table
474 	 */
475 	ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3);
476 
477 	/* The chunks should begin at the end of the hash table */
478 	ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==,
479 	    &zap_leaf_phys(l)->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]);
480 
481 	/* The chunks should end at the end of the block */
482 	ASSERT3U((uintptr_t)&ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)) -
483 	    (uintptr_t)zap_leaf_phys(l), ==, l->l_dbuf->db_size);
484 
485 	return (l);
486 }
487 
488 static int
489 zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
490     zap_leaf_t **lp)
491 {
492 	dmu_buf_t *db;
493 
494 	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
495 
496 	int bs = FZAP_BLOCK_SHIFT(zap);
497 	dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf);
498 	int err = dmu_buf_hold_by_dnode(dn,
499 	    blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH);
500 	dmu_buf_dnode_exit(zap->zap_dbuf);
501 	if (err != 0)
502 		return (err);
503 
504 	ASSERT3U(db->db_object, ==, zap->zap_object);
505 	ASSERT3U(db->db_offset, ==, blkid << bs);
506 	ASSERT3U(db->db_size, ==, 1 << bs);
507 	ASSERT(blkid != 0);
508 
509 	zap_leaf_t *l = dmu_buf_get_user(db);
510 
511 	if (l == NULL)
512 		l = zap_open_leaf(blkid, db);
513 
514 	rw_enter(&l->l_rwlock, lt);
515 	/*
516 	 * Must lock before dirtying, otherwise zap_leaf_phys(l) could change,
517 	 * causing ASSERT below to fail.
518 	 */
519 	if (lt == RW_WRITER)
520 		dmu_buf_will_dirty(db, tx);
521 	ASSERT3U(l->l_blkid, ==, blkid);
522 	ASSERT3P(l->l_dbuf, ==, db);
523 	ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_block_type, ==, ZBT_LEAF);
524 	ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
525 
526 	*lp = l;
527 	return (0);
528 }
529 
530 static int
531 zap_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t *valp)
532 {
533 	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
534 
535 	if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) {
536 		ASSERT3U(idx, <,
537 		    (1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift));
538 		*valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx);
539 		return (0);
540 	} else {
541 		return (zap_table_load(zap, &zap_f_phys(zap)->zap_ptrtbl,
542 		    idx, valp));
543 	}
544 }
545 
546 static int
547 zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t blk, dmu_tx_t *tx)
548 {
549 	ASSERT(tx != NULL);
550 	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
551 
552 	if (zap_f_phys(zap)->zap_ptrtbl.zt_blk == 0) {
553 		ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) = blk;
554 		return (0);
555 	} else {
556 		return (zap_table_store(zap, &zap_f_phys(zap)->zap_ptrtbl,
557 		    idx, blk, tx));
558 	}
559 }
560 
561 static int
562 zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp)
563 {
564 	uint64_t blk;
565 
566 	ASSERT(zap->zap_dbuf == NULL ||
567 	    zap_f_phys(zap) == zap->zap_dbuf->db_data);
568 
569 	/* Reality check for corrupt zap objects (leaf or header). */
570 	if ((zap_f_phys(zap)->zap_block_type != ZBT_LEAF &&
571 	    zap_f_phys(zap)->zap_block_type != ZBT_HEADER) ||
572 	    zap_f_phys(zap)->zap_magic != ZAP_MAGIC) {
573 		return (SET_ERROR(EIO));
574 	}
575 
576 	uint64_t idx = ZAP_HASH_IDX(h, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
577 	int err = zap_idx_to_blk(zap, idx, &blk);
578 	if (err != 0)
579 		return (err);
580 	err = zap_get_leaf_byblk(zap, blk, tx, lt, lp);
581 
582 	ASSERT(err ||
583 	    ZAP_HASH_IDX(h, zap_leaf_phys(*lp)->l_hdr.lh_prefix_len) ==
584 	    zap_leaf_phys(*lp)->l_hdr.lh_prefix);
585 	return (err);
586 }
587 
588 static int
589 zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l,
590     void *tag, dmu_tx_t *tx, zap_leaf_t **lp)
591 {
592 	zap_t *zap = zn->zn_zap;
593 	uint64_t hash = zn->zn_hash;
594 	int err;
595 	int old_prefix_len = zap_leaf_phys(l)->l_hdr.lh_prefix_len;
596 
597 	ASSERT3U(old_prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
598 	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
599 
600 	ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
601 	    zap_leaf_phys(l)->l_hdr.lh_prefix);
602 
603 	if (zap_tryupgradedir(zap, tx) == 0 ||
604 	    old_prefix_len == zap_f_phys(zap)->zap_ptrtbl.zt_shift) {
605 		/* We failed to upgrade, or need to grow the pointer table */
606 		objset_t *os = zap->zap_objset;
607 		uint64_t object = zap->zap_object;
608 
609 		zap_put_leaf(l);
610 		zap_unlockdir(zap, tag);
611 		err = zap_lockdir(os, object, tx, RW_WRITER,
612 		    FALSE, FALSE, tag, &zn->zn_zap);
613 		zap = zn->zn_zap;
614 		if (err != 0)
615 			return (err);
616 		ASSERT(!zap->zap_ismicro);
617 
618 		while (old_prefix_len ==
619 		    zap_f_phys(zap)->zap_ptrtbl.zt_shift) {
620 			err = zap_grow_ptrtbl(zap, tx);
621 			if (err != 0)
622 				return (err);
623 		}
624 
625 		err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l);
626 		if (err != 0)
627 			return (err);
628 
629 		if (zap_leaf_phys(l)->l_hdr.lh_prefix_len != old_prefix_len) {
630 			/* it split while our locks were down */
631 			*lp = l;
632 			return (0);
633 		}
634 	}
635 	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
636 	ASSERT3U(old_prefix_len, <, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
637 	ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
638 	    zap_leaf_phys(l)->l_hdr.lh_prefix);
639 
640 	int prefix_diff = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
641 	    (old_prefix_len + 1);
642 	uint64_t sibling =
643 	    (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff;
644 
645 	/* check for i/o errors before doing zap_leaf_split */
646 	for (int i = 0; i < (1ULL << prefix_diff); i++) {
647 		uint64_t blk;
648 		err = zap_idx_to_blk(zap, sibling + i, &blk);
649 		if (err != 0)
650 			return (err);
651 		ASSERT3U(blk, ==, l->l_blkid);
652 	}
653 
654 	zap_leaf_t *nl = zap_create_leaf(zap, tx);
655 	zap_leaf_split(l, nl, zap->zap_normflags != 0);
656 
657 	/* set sibling pointers */
658 	for (int i = 0; i < (1ULL << prefix_diff); i++) {
659 		err = zap_set_idx_to_blk(zap, sibling + i, nl->l_blkid, tx);
660 		ASSERT0(err); /* we checked for i/o errors above */
661 	}
662 
663 	if (hash & (1ULL << (64 - zap_leaf_phys(l)->l_hdr.lh_prefix_len))) {
664 		/* we want the sibling */
665 		zap_put_leaf(l);
666 		*lp = nl;
667 	} else {
668 		zap_put_leaf(nl);
669 		*lp = l;
670 	}
671 
672 	return (0);
673 }
674 
675 static void
676 zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l,
677     void *tag, dmu_tx_t *tx)
678 {
679 	zap_t *zap = zn->zn_zap;
680 	int shift = zap_f_phys(zap)->zap_ptrtbl.zt_shift;
681 	int leaffull = (zap_leaf_phys(l)->l_hdr.lh_prefix_len == shift &&
682 	    zap_leaf_phys(l)->l_hdr.lh_nfree < ZAP_LEAF_LOW_WATER);
683 
684 	zap_put_leaf(l);
685 
686 	if (leaffull || zap_f_phys(zap)->zap_ptrtbl.zt_nextblk) {
687 		/*
688 		 * We are in the middle of growing the pointer table, or
689 		 * this leaf will soon make us grow it.
690 		 */
691 		if (zap_tryupgradedir(zap, tx) == 0) {
692 			objset_t *os = zap->zap_objset;
693 			uint64_t zapobj = zap->zap_object;
694 
695 			zap_unlockdir(zap, tag);
696 			int err = zap_lockdir(os, zapobj, tx,
697 			    RW_WRITER, FALSE, FALSE, tag, &zn->zn_zap);
698 			zap = zn->zn_zap;
699 			if (err != 0)
700 				return;
701 		}
702 
703 		/* could have finished growing while our locks were down */
704 		if (zap_f_phys(zap)->zap_ptrtbl.zt_shift == shift)
705 			(void) zap_grow_ptrtbl(zap, tx);
706 	}
707 }
708 
709 static int
710 fzap_checkname(zap_name_t *zn)
711 {
712 	if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
713 		return (SET_ERROR(ENAMETOOLONG));
714 	return (0);
715 }
716 
717 static int
718 fzap_checksize(uint64_t integer_size, uint64_t num_integers)
719 {
720 	/* Only integer sizes supported by C */
721 	switch (integer_size) {
722 	case 1:
723 	case 2:
724 	case 4:
725 	case 8:
726 		break;
727 	default:
728 		return (SET_ERROR(EINVAL));
729 	}
730 
731 	if (integer_size * num_integers > ZAP_MAXVALUELEN)
732 		return (E2BIG);
733 
734 	return (0);
735 }
736 
737 static int
738 fzap_check(zap_name_t *zn, uint64_t integer_size, uint64_t num_integers)
739 {
740 	int err = fzap_checkname(zn);
741 	if (err != 0)
742 		return (err);
743 	return (fzap_checksize(integer_size, num_integers));
744 }
745 
746 /*
747  * Routines for manipulating attributes.
748  */
749 int
750 fzap_lookup(zap_name_t *zn,
751     uint64_t integer_size, uint64_t num_integers, void *buf,
752     char *realname, int rn_len, boolean_t *ncp)
753 {
754 	zap_leaf_t *l;
755 	zap_entry_handle_t zeh;
756 
757 	int err = fzap_checkname(zn);
758 	if (err != 0)
759 		return (err);
760 
761 	err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l);
762 	if (err != 0)
763 		return (err);
764 	err = zap_leaf_lookup(l, zn, &zeh);
765 	if (err == 0) {
766 		if ((err = fzap_checksize(integer_size, num_integers)) != 0) {
767 			zap_put_leaf(l);
768 			return (err);
769 		}
770 
771 		err = zap_entry_read(&zeh, integer_size, num_integers, buf);
772 		(void) zap_entry_read_name(zn->zn_zap, &zeh, rn_len, realname);
773 		if (ncp) {
774 			*ncp = zap_entry_normalization_conflict(&zeh,
775 			    zn, NULL, zn->zn_zap);
776 		}
777 	}
778 
779 	zap_put_leaf(l);
780 	return (err);
781 }
782 
783 int
784 fzap_add_cd(zap_name_t *zn,
785     uint64_t integer_size, uint64_t num_integers,
786     const void *val, uint32_t cd, void *tag, dmu_tx_t *tx)
787 {
788 	zap_leaf_t *l;
789 	int err;
790 	zap_entry_handle_t zeh;
791 	zap_t *zap = zn->zn_zap;
792 
793 	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
794 	ASSERT(!zap->zap_ismicro);
795 	ASSERT(fzap_check(zn, integer_size, num_integers) == 0);
796 
797 	err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
798 	if (err != 0)
799 		return (err);
800 retry:
801 	err = zap_leaf_lookup(l, zn, &zeh);
802 	if (err == 0) {
803 		err = SET_ERROR(EEXIST);
804 		goto out;
805 	}
806 	if (err != ENOENT)
807 		goto out;
808 
809 	err = zap_entry_create(l, zn, cd,
810 	    integer_size, num_integers, val, &zeh);
811 
812 	if (err == 0) {
813 		zap_increment_num_entries(zap, 1, tx);
814 	} else if (err == EAGAIN) {
815 		err = zap_expand_leaf(zn, l, tag, tx, &l);
816 		zap = zn->zn_zap;	/* zap_expand_leaf() may change zap */
817 		if (err == 0)
818 			goto retry;
819 	}
820 
821 out:
822 	if (zap != NULL)
823 		zap_put_leaf_maybe_grow_ptrtbl(zn, l, tag, tx);
824 	return (err);
825 }
826 
827 int
828 fzap_add(zap_name_t *zn,
829     uint64_t integer_size, uint64_t num_integers,
830     const void *val, void *tag, dmu_tx_t *tx)
831 {
832 	int err = fzap_check(zn, integer_size, num_integers);
833 	if (err != 0)
834 		return (err);
835 
836 	return (fzap_add_cd(zn, integer_size, num_integers,
837 	    val, ZAP_NEED_CD, tag, tx));
838 }
839 
840 int
841 fzap_update(zap_name_t *zn,
842     int integer_size, uint64_t num_integers, const void *val,
843     void *tag, dmu_tx_t *tx)
844 {
845 	zap_leaf_t *l;
846 	int err;
847 	boolean_t create;
848 	zap_entry_handle_t zeh;
849 	zap_t *zap = zn->zn_zap;
850 
851 	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
852 	err = fzap_check(zn, integer_size, num_integers);
853 	if (err != 0)
854 		return (err);
855 
856 	err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
857 	if (err != 0)
858 		return (err);
859 retry:
860 	err = zap_leaf_lookup(l, zn, &zeh);
861 	create = (err == ENOENT);
862 	ASSERT(err == 0 || err == ENOENT);
863 
864 	if (create) {
865 		err = zap_entry_create(l, zn, ZAP_NEED_CD,
866 		    integer_size, num_integers, val, &zeh);
867 		if (err == 0)
868 			zap_increment_num_entries(zap, 1, tx);
869 	} else {
870 		err = zap_entry_update(&zeh, integer_size, num_integers, val);
871 	}
872 
873 	if (err == EAGAIN) {
874 		err = zap_expand_leaf(zn, l, tag, tx, &l);
875 		zap = zn->zn_zap;	/* zap_expand_leaf() may change zap */
876 		if (err == 0)
877 			goto retry;
878 	}
879 
880 	if (zap != NULL)
881 		zap_put_leaf_maybe_grow_ptrtbl(zn, l, tag, tx);
882 	return (err);
883 }
884 
885 int
886 fzap_length(zap_name_t *zn,
887     uint64_t *integer_size, uint64_t *num_integers)
888 {
889 	zap_leaf_t *l;
890 	int err;
891 	zap_entry_handle_t zeh;
892 
893 	err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l);
894 	if (err != 0)
895 		return (err);
896 	err = zap_leaf_lookup(l, zn, &zeh);
897 	if (err != 0)
898 		goto out;
899 
900 	if (integer_size != 0)
901 		*integer_size = zeh.zeh_integer_size;
902 	if (num_integers != 0)
903 		*num_integers = zeh.zeh_num_integers;
904 out:
905 	zap_put_leaf(l);
906 	return (err);
907 }
908 
909 int
910 fzap_remove(zap_name_t *zn, dmu_tx_t *tx)
911 {
912 	zap_leaf_t *l;
913 	int err;
914 	zap_entry_handle_t zeh;
915 
916 	err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, tx, RW_WRITER, &l);
917 	if (err != 0)
918 		return (err);
919 	err = zap_leaf_lookup(l, zn, &zeh);
920 	if (err == 0) {
921 		zap_entry_remove(&zeh);
922 		zap_increment_num_entries(zn->zn_zap, -1, tx);
923 	}
924 	zap_put_leaf(l);
925 	return (err);
926 }
927 
928 void
929 fzap_prefetch(zap_name_t *zn)
930 {
931 	uint64_t blk;
932 	zap_t *zap = zn->zn_zap;
933 
934 	uint64_t idx = ZAP_HASH_IDX(zn->zn_hash,
935 	    zap_f_phys(zap)->zap_ptrtbl.zt_shift);
936 	if (zap_idx_to_blk(zap, idx, &blk) != 0)
937 		return;
938 	int bs = FZAP_BLOCK_SHIFT(zap);
939 	dmu_prefetch(zap->zap_objset, zap->zap_object, 0, blk << bs, 1 << bs,
940 	    ZIO_PRIORITY_SYNC_READ);
941 }
942 
943 /*
944  * Helper functions for consumers.
945  */
946 
947 uint64_t
948 zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj,
949     const char *name, dmu_tx_t *tx)
950 {
951 	uint64_t new_obj = zap_create(os, ot, DMU_OT_NONE, 0, tx);
952 	VERIFY(new_obj != 0);
953 	VERIFY0(zap_add(os, parent_obj, name, sizeof (uint64_t), 1, &new_obj,
954 	    tx));
955 
956 	return (new_obj);
957 }
958 
959 int
960 zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask,
961     char *name)
962 {
963 	zap_cursor_t zc;
964 	int err;
965 
966 	if (mask == 0)
967 		mask = -1ULL;
968 
969 	zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
970 	for (zap_cursor_init(&zc, os, zapobj);
971 	    (err = zap_cursor_retrieve(&zc, za)) == 0;
972 	    zap_cursor_advance(&zc)) {
973 		if ((za->za_first_integer & mask) == (value & mask)) {
974 			(void) strcpy(name, za->za_name);
975 			break;
976 		}
977 	}
978 	zap_cursor_fini(&zc);
979 	kmem_free(za, sizeof (*za));
980 	return (err);
981 }
982 
983 int
984 zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx)
985 {
986 	zap_cursor_t zc;
987 	int err = 0;
988 
989 	zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
990 	for (zap_cursor_init(&zc, os, fromobj);
991 	    zap_cursor_retrieve(&zc, za) == 0;
992 	    (void) zap_cursor_advance(&zc)) {
993 		if (za->za_integer_length != 8 || za->za_num_integers != 1) {
994 			err = SET_ERROR(EINVAL);
995 			break;
996 		}
997 		err = zap_add(os, intoobj, za->za_name,
998 		    8, 1, &za->za_first_integer, tx);
999 		if (err != 0)
1000 			break;
1001 	}
1002 	zap_cursor_fini(&zc);
1003 	kmem_free(za, sizeof (*za));
1004 	return (err);
1005 }
1006 
1007 int
1008 zap_join_key(objset_t *os, uint64_t fromobj, uint64_t intoobj,
1009     uint64_t value, dmu_tx_t *tx)
1010 {
1011 	zap_cursor_t zc;
1012 	int err = 0;
1013 
1014 	zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
1015 	for (zap_cursor_init(&zc, os, fromobj);
1016 	    zap_cursor_retrieve(&zc, za) == 0;
1017 	    (void) zap_cursor_advance(&zc)) {
1018 		if (za->za_integer_length != 8 || za->za_num_integers != 1) {
1019 			err = SET_ERROR(EINVAL);
1020 			break;
1021 		}
1022 		err = zap_add(os, intoobj, za->za_name,
1023 		    8, 1, &value, tx);
1024 		if (err != 0)
1025 			break;
1026 	}
1027 	zap_cursor_fini(&zc);
1028 	kmem_free(za, sizeof (*za));
1029 	return (err);
1030 }
1031 
1032 int
1033 zap_join_increment(objset_t *os, uint64_t fromobj, uint64_t intoobj,
1034     dmu_tx_t *tx)
1035 {
1036 	zap_cursor_t zc;
1037 	int err = 0;
1038 
1039 	zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
1040 	for (zap_cursor_init(&zc, os, fromobj);
1041 	    zap_cursor_retrieve(&zc, za) == 0;
1042 	    (void) zap_cursor_advance(&zc)) {
1043 		uint64_t delta = 0;
1044 
1045 		if (za->za_integer_length != 8 || za->za_num_integers != 1) {
1046 			err = SET_ERROR(EINVAL);
1047 			break;
1048 		}
1049 
1050 		err = zap_lookup(os, intoobj, za->za_name, 8, 1, &delta);
1051 		if (err != 0 && err != ENOENT)
1052 			break;
1053 		delta += za->za_first_integer;
1054 		err = zap_update(os, intoobj, za->za_name, 8, 1, &delta, tx);
1055 		if (err != 0)
1056 			break;
1057 	}
1058 	zap_cursor_fini(&zc);
1059 	kmem_free(za, sizeof (*za));
1060 	return (err);
1061 }
1062 
1063 int
1064 zap_add_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx)
1065 {
1066 	char name[20];
1067 
1068 	(void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
1069 	return (zap_add(os, obj, name, 8, 1, &value, tx));
1070 }
1071 
1072 int
1073 zap_remove_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx)
1074 {
1075 	char name[20];
1076 
1077 	(void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
1078 	return (zap_remove(os, obj, name, tx));
1079 }
1080 
1081 int
1082 zap_lookup_int(objset_t *os, uint64_t obj, uint64_t value)
1083 {
1084 	char name[20];
1085 
1086 	(void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
1087 	return (zap_lookup(os, obj, name, 8, 1, &value));
1088 }
1089 
1090 int
1091 zap_add_int_key(objset_t *os, uint64_t obj,
1092     uint64_t key, uint64_t value, dmu_tx_t *tx)
1093 {
1094 	char name[20];
1095 
1096 	(void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
1097 	return (zap_add(os, obj, name, 8, 1, &value, tx));
1098 }
1099 
1100 int
1101 zap_update_int_key(objset_t *os, uint64_t obj,
1102     uint64_t key, uint64_t value, dmu_tx_t *tx)
1103 {
1104 	char name[20];
1105 
1106 	(void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
1107 	return (zap_update(os, obj, name, 8, 1, &value, tx));
1108 }
1109 
1110 int
1111 zap_lookup_int_key(objset_t *os, uint64_t obj, uint64_t key, uint64_t *valuep)
1112 {
1113 	char name[20];
1114 
1115 	(void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
1116 	return (zap_lookup(os, obj, name, 8, 1, valuep));
1117 }
1118 
1119 int
1120 zap_increment(objset_t *os, uint64_t obj, const char *name, int64_t delta,
1121     dmu_tx_t *tx)
1122 {
1123 	uint64_t value = 0;
1124 
1125 	if (delta == 0)
1126 		return (0);
1127 
1128 	int err = zap_lookup(os, obj, name, 8, 1, &value);
1129 	if (err != 0 && err != ENOENT)
1130 		return (err);
1131 	value += delta;
1132 	if (value == 0)
1133 		err = zap_remove(os, obj, name, tx);
1134 	else
1135 		err = zap_update(os, obj, name, 8, 1, &value, tx);
1136 	return (err);
1137 }
1138 
1139 int
1140 zap_increment_int(objset_t *os, uint64_t obj, uint64_t key, int64_t delta,
1141     dmu_tx_t *tx)
1142 {
1143 	char name[20];
1144 
1145 	(void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
1146 	return (zap_increment(os, obj, name, delta, tx));
1147 }
1148 
1149 /*
1150  * Routines for iterating over the attributes.
1151  */
1152 
1153 int
1154 fzap_cursor_retrieve(zap_t *zap, zap_cursor_t *zc, zap_attribute_t *za)
1155 {
1156 	int err = ENOENT;
1157 	zap_entry_handle_t zeh;
1158 	zap_leaf_t *l;
1159 
1160 	/* retrieve the next entry at or after zc_hash/zc_cd */
1161 	/* if no entry, return ENOENT */
1162 
1163 	if (zc->zc_leaf &&
1164 	    (ZAP_HASH_IDX(zc->zc_hash,
1165 	    zap_leaf_phys(zc->zc_leaf)->l_hdr.lh_prefix_len) !=
1166 	    zap_leaf_phys(zc->zc_leaf)->l_hdr.lh_prefix)) {
1167 		rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
1168 		zap_put_leaf(zc->zc_leaf);
1169 		zc->zc_leaf = NULL;
1170 	}
1171 
1172 again:
1173 	if (zc->zc_leaf == NULL) {
1174 		err = zap_deref_leaf(zap, zc->zc_hash, NULL, RW_READER,
1175 		    &zc->zc_leaf);
1176 		if (err != 0)
1177 			return (err);
1178 	} else {
1179 		rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
1180 	}
1181 	l = zc->zc_leaf;
1182 
1183 	err = zap_leaf_lookup_closest(l, zc->zc_hash, zc->zc_cd, &zeh);
1184 
1185 	if (err == ENOENT) {
1186 		uint64_t nocare =
1187 		    (1ULL << (64 - zap_leaf_phys(l)->l_hdr.lh_prefix_len)) - 1;
1188 		zc->zc_hash = (zc->zc_hash & ~nocare) + nocare + 1;
1189 		zc->zc_cd = 0;
1190 		if (zap_leaf_phys(l)->l_hdr.lh_prefix_len == 0 ||
1191 		    zc->zc_hash == 0) {
1192 			zc->zc_hash = -1ULL;
1193 		} else {
1194 			zap_put_leaf(zc->zc_leaf);
1195 			zc->zc_leaf = NULL;
1196 			goto again;
1197 		}
1198 	}
1199 
1200 	if (err == 0) {
1201 		zc->zc_hash = zeh.zeh_hash;
1202 		zc->zc_cd = zeh.zeh_cd;
1203 		za->za_integer_length = zeh.zeh_integer_size;
1204 		za->za_num_integers = zeh.zeh_num_integers;
1205 		if (zeh.zeh_num_integers == 0) {
1206 			za->za_first_integer = 0;
1207 		} else {
1208 			err = zap_entry_read(&zeh, 8, 1, &za->za_first_integer);
1209 			ASSERT(err == 0 || err == EOVERFLOW);
1210 		}
1211 		err = zap_entry_read_name(zap, &zeh,
1212 		    sizeof (za->za_name), za->za_name);
1213 		ASSERT(err == 0);
1214 
1215 		za->za_normalization_conflict =
1216 		    zap_entry_normalization_conflict(&zeh,
1217 		    NULL, za->za_name, zap);
1218 	}
1219 	rw_exit(&zc->zc_leaf->l_rwlock);
1220 	return (err);
1221 }
1222 
1223 static void
1224 zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs)
1225 {
1226 	uint64_t lastblk = 0;
1227 
1228 	/*
1229 	 * NB: if a leaf has more pointers than an entire ptrtbl block
1230 	 * can hold, then it'll be accounted for more than once, since
1231 	 * we won't have lastblk.
1232 	 */
1233 	for (int i = 0; i < len; i++) {
1234 		zap_leaf_t *l;
1235 
1236 		if (tbl[i] == lastblk)
1237 			continue;
1238 		lastblk = tbl[i];
1239 
1240 		int err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l);
1241 		if (err == 0) {
1242 			zap_leaf_stats(zap, l, zs);
1243 			zap_put_leaf(l);
1244 		}
1245 	}
1246 }
1247 
1248 void
1249 fzap_get_stats(zap_t *zap, zap_stats_t *zs)
1250 {
1251 	int bs = FZAP_BLOCK_SHIFT(zap);
1252 	zs->zs_blocksize = 1ULL << bs;
1253 
1254 	/*
1255 	 * Set zap_phys_t fields
1256 	 */
1257 	zs->zs_num_leafs = zap_f_phys(zap)->zap_num_leafs;
1258 	zs->zs_num_entries = zap_f_phys(zap)->zap_num_entries;
1259 	zs->zs_num_blocks = zap_f_phys(zap)->zap_freeblk;
1260 	zs->zs_block_type = zap_f_phys(zap)->zap_block_type;
1261 	zs->zs_magic = zap_f_phys(zap)->zap_magic;
1262 	zs->zs_salt = zap_f_phys(zap)->zap_salt;
1263 
1264 	/*
1265 	 * Set zap_ptrtbl fields
1266 	 */
1267 	zs->zs_ptrtbl_len = 1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift;
1268 	zs->zs_ptrtbl_nextblk = zap_f_phys(zap)->zap_ptrtbl.zt_nextblk;
1269 	zs->zs_ptrtbl_blks_copied =
1270 	    zap_f_phys(zap)->zap_ptrtbl.zt_blks_copied;
1271 	zs->zs_ptrtbl_zt_blk = zap_f_phys(zap)->zap_ptrtbl.zt_blk;
1272 	zs->zs_ptrtbl_zt_numblks = zap_f_phys(zap)->zap_ptrtbl.zt_numblks;
1273 	zs->zs_ptrtbl_zt_shift = zap_f_phys(zap)->zap_ptrtbl.zt_shift;
1274 
1275 	if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) {
1276 		/* the ptrtbl is entirely in the header block. */
1277 		zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
1278 		    1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs);
1279 	} else {
1280 		dmu_prefetch(zap->zap_objset, zap->zap_object, 0,
1281 		    zap_f_phys(zap)->zap_ptrtbl.zt_blk << bs,
1282 		    zap_f_phys(zap)->zap_ptrtbl.zt_numblks << bs,
1283 		    ZIO_PRIORITY_SYNC_READ);
1284 
1285 		for (int b = 0; b < zap_f_phys(zap)->zap_ptrtbl.zt_numblks;
1286 		    b++) {
1287 			dmu_buf_t *db;
1288 			int err;
1289 
1290 			err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
1291 			    (zap_f_phys(zap)->zap_ptrtbl.zt_blk + b) << bs,
1292 			    FTAG, &db, DMU_READ_NO_PREFETCH);
1293 			if (err == 0) {
1294 				zap_stats_ptrtbl(zap, db->db_data,
1295 				    1<<(bs-3), zs);
1296 				dmu_buf_rele(db, FTAG);
1297 			}
1298 		}
1299 	}
1300 }
1301