1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright (c) 2014 Integros [integros.com]
27 * Copyright 2017 Nexenta Systems, Inc.
28 */
29
30#include <sys/zio.h>
31#include <sys/spa.h>
32#include <sys/dmu.h>
33#include <sys/zfs_context.h>
34#include <sys/zap.h>
35#include <sys/refcount.h>
36#include <sys/zap_impl.h>
37#include <sys/zap_leaf.h>
38#include <sys/avl.h>
39#include <sys/arc.h>
40#include <sys/dmu_objset.h>
41
42#ifdef _KERNEL
43#include <sys/sunddi.h>
44#endif
45
46extern inline mzap_phys_t *zap_m_phys(zap_t *zap);
47
48static int mzap_upgrade(zap_t **zapp,
49    void *tag, dmu_tx_t *tx, zap_flags_t flags);
50
51uint64_t
52zap_getflags(zap_t *zap)
53{
54	if (zap->zap_ismicro)
55		return (0);
56	return (zap_f_phys(zap)->zap_flags);
57}
58
59int
60zap_hashbits(zap_t *zap)
61{
62	if (zap_getflags(zap) & ZAP_FLAG_HASH64)
63		return (48);
64	else
65		return (28);
66}
67
68uint32_t
69zap_maxcd(zap_t *zap)
70{
71	if (zap_getflags(zap) & ZAP_FLAG_HASH64)
72		return ((1<<16)-1);
73	else
74		return (-1U);
75}
76
77static uint64_t
78zap_hash(zap_name_t *zn)
79{
80	zap_t *zap = zn->zn_zap;
81	uint64_t h = 0;
82
83	if (zap_getflags(zap) & ZAP_FLAG_PRE_HASHED_KEY) {
84		ASSERT(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY);
85		h = *(uint64_t *)zn->zn_key_orig;
86	} else {
87		h = zap->zap_salt;
88		ASSERT(h != 0);
89		ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
90
91		if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) {
92			const uint64_t *wp = zn->zn_key_norm;
93
94			ASSERT(zn->zn_key_intlen == 8);
95			for (int i = 0; i < zn->zn_key_norm_numints;
96			    wp++, i++) {
97				uint64_t word = *wp;
98
99				for (int j = 0; j < zn->zn_key_intlen; j++) {
100					h = (h >> 8) ^
101					    zfs_crc64_table[(h ^ word) & 0xFF];
102					word >>= NBBY;
103				}
104			}
105		} else {
106			const uint8_t *cp = zn->zn_key_norm;
107
108			/*
109			 * We previously stored the terminating null on
110			 * disk, but didn't hash it, so we need to
111			 * continue to not hash it.  (The
112			 * zn_key_*_numints includes the terminating
113			 * null for non-binary keys.)
114			 */
115			int len = zn->zn_key_norm_numints - 1;
116
117			ASSERT(zn->zn_key_intlen == 1);
118			for (int i = 0; i < len; cp++, i++) {
119				h = (h >> 8) ^
120				    zfs_crc64_table[(h ^ *cp) & 0xFF];
121			}
122		}
123	}
124	/*
125	 * Don't use all 64 bits, since we need some in the cookie for
126	 * the collision differentiator.  We MUST use the high bits,
127	 * since those are the ones that we first pay attention to when
128	 * chosing the bucket.
129	 */
130	h &= ~((1ULL << (64 - zap_hashbits(zap))) - 1);
131
132	return (h);
133}
134
135static int
136zap_normalize(zap_t *zap, const char *name, char *namenorm, int normflags)
137{
138	ASSERT(!(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY));
139
140	size_t inlen = strlen(name) + 1;
141	size_t outlen = ZAP_MAXNAMELEN;
142
143	int err = 0;
144	(void) u8_textprep_str((char *)name, &inlen, namenorm, &outlen,
145	    normflags | U8_TEXTPREP_IGNORE_NULL | U8_TEXTPREP_IGNORE_INVALID,
146	    U8_UNICODE_LATEST, &err);
147
148	return (err);
149}
150
151boolean_t
152zap_match(zap_name_t *zn, const char *matchname)
153{
154	ASSERT(!(zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY));
155
156	if (zn->zn_matchtype & MT_NORMALIZE) {
157		char norm[ZAP_MAXNAMELEN];
158
159		if (zap_normalize(zn->zn_zap, matchname, norm,
160		    zn->zn_normflags) != 0)
161			return (B_FALSE);
162
163		return (strcmp(zn->zn_key_norm, norm) == 0);
164	} else {
165		return (strcmp(zn->zn_key_orig, matchname) == 0);
166	}
167}
168
169void
170zap_name_free(zap_name_t *zn)
171{
172	kmem_free(zn, sizeof (zap_name_t));
173}
174
175zap_name_t *
176zap_name_alloc(zap_t *zap, const char *key, matchtype_t mt)
177{
178	zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
179
180	zn->zn_zap = zap;
181	zn->zn_key_intlen = sizeof (*key);
182	zn->zn_key_orig = key;
183	zn->zn_key_orig_numints = strlen(zn->zn_key_orig) + 1;
184	zn->zn_matchtype = mt;
185	zn->zn_normflags = zap->zap_normflags;
186
187	/*
188	 * If we're dealing with a case sensitive lookup on a mixed or
189	 * insensitive fs, remove U8_TEXTPREP_TOUPPER or the lookup
190	 * will fold case to all caps overriding the lookup request.
191	 */
192	if (mt & MT_MATCH_CASE)
193		zn->zn_normflags &= ~U8_TEXTPREP_TOUPPER;
194
195	if (zap->zap_normflags) {
196		/*
197		 * We *must* use zap_normflags because this normalization is
198		 * what the hash is computed from.
199		 */
200		if (zap_normalize(zap, key, zn->zn_normbuf,
201		    zap->zap_normflags) != 0) {
202			zap_name_free(zn);
203			return (NULL);
204		}
205		zn->zn_key_norm = zn->zn_normbuf;
206		zn->zn_key_norm_numints = strlen(zn->zn_key_norm) + 1;
207	} else {
208		if (mt != 0) {
209			zap_name_free(zn);
210			return (NULL);
211		}
212		zn->zn_key_norm = zn->zn_key_orig;
213		zn->zn_key_norm_numints = zn->zn_key_orig_numints;
214	}
215
216	zn->zn_hash = zap_hash(zn);
217
218	if (zap->zap_normflags != zn->zn_normflags) {
219		/*
220		 * We *must* use zn_normflags because this normalization is
221		 * what the matching is based on.  (Not the hash!)
222		 */
223		if (zap_normalize(zap, key, zn->zn_normbuf,
224		    zn->zn_normflags) != 0) {
225			zap_name_free(zn);
226			return (NULL);
227		}
228		zn->zn_key_norm_numints = strlen(zn->zn_key_norm) + 1;
229	}
230
231	return (zn);
232}
233
234zap_name_t *
235zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
236{
237	zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
238
239	ASSERT(zap->zap_normflags == 0);
240	zn->zn_zap = zap;
241	zn->zn_key_intlen = sizeof (*key);
242	zn->zn_key_orig = zn->zn_key_norm = key;
243	zn->zn_key_orig_numints = zn->zn_key_norm_numints = numints;
244	zn->zn_matchtype = 0;
245
246	zn->zn_hash = zap_hash(zn);
247	return (zn);
248}
249
250static void
251mzap_byteswap(mzap_phys_t *buf, size_t size)
252{
253	buf->mz_block_type = BSWAP_64(buf->mz_block_type);
254	buf->mz_salt = BSWAP_64(buf->mz_salt);
255	buf->mz_normflags = BSWAP_64(buf->mz_normflags);
256	int max = (size / MZAP_ENT_LEN) - 1;
257	for (int i = 0; i < max; i++) {
258		buf->mz_chunk[i].mze_value =
259		    BSWAP_64(buf->mz_chunk[i].mze_value);
260		buf->mz_chunk[i].mze_cd =
261		    BSWAP_32(buf->mz_chunk[i].mze_cd);
262	}
263}
264
265void
266zap_byteswap(void *buf, size_t size)
267{
268	uint64_t block_type = *(uint64_t *)buf;
269
270	if (block_type == ZBT_MICRO || block_type == BSWAP_64(ZBT_MICRO)) {
271		/* ASSERT(magic == ZAP_LEAF_MAGIC); */
272		mzap_byteswap(buf, size);
273	} else {
274		fzap_byteswap(buf, size);
275	}
276}
277
278static int
279mze_compare(const void *arg1, const void *arg2)
280{
281	const mzap_ent_t *mze1 = arg1;
282	const mzap_ent_t *mze2 = arg2;
283
284	int cmp = AVL_CMP(mze1->mze_hash, mze2->mze_hash);
285	if (likely(cmp))
286		return (cmp);
287
288	return (AVL_CMP(mze1->mze_cd, mze2->mze_cd));
289}
290
291static void
292mze_insert(zap_t *zap, int chunkid, uint64_t hash)
293{
294	ASSERT(zap->zap_ismicro);
295	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
296
297	mzap_ent_t *mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP);
298	mze->mze_chunkid = chunkid;
299	mze->mze_hash = hash;
300	mze->mze_cd = MZE_PHYS(zap, mze)->mze_cd;
301	ASSERT(MZE_PHYS(zap, mze)->mze_name[0] != 0);
302	avl_add(&zap->zap_m.zap_avl, mze);
303}
304
305static mzap_ent_t *
306mze_find(zap_name_t *zn)
307{
308	mzap_ent_t mze_tofind;
309	mzap_ent_t *mze;
310	avl_index_t idx;
311	avl_tree_t *avl = &zn->zn_zap->zap_m.zap_avl;
312
313	ASSERT(zn->zn_zap->zap_ismicro);
314	ASSERT(RW_LOCK_HELD(&zn->zn_zap->zap_rwlock));
315
316	mze_tofind.mze_hash = zn->zn_hash;
317	mze_tofind.mze_cd = 0;
318
319	mze = avl_find(avl, &mze_tofind, &idx);
320	if (mze == NULL)
321		mze = avl_nearest(avl, idx, AVL_AFTER);
322	for (; mze && mze->mze_hash == zn->zn_hash; mze = AVL_NEXT(avl, mze)) {
323		ASSERT3U(mze->mze_cd, ==, MZE_PHYS(zn->zn_zap, mze)->mze_cd);
324		if (zap_match(zn, MZE_PHYS(zn->zn_zap, mze)->mze_name))
325			return (mze);
326	}
327
328	return (NULL);
329}
330
331static uint32_t
332mze_find_unused_cd(zap_t *zap, uint64_t hash)
333{
334	mzap_ent_t mze_tofind;
335	avl_index_t idx;
336	avl_tree_t *avl = &zap->zap_m.zap_avl;
337
338	ASSERT(zap->zap_ismicro);
339	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
340
341	mze_tofind.mze_hash = hash;
342	mze_tofind.mze_cd = 0;
343
344	uint32_t cd = 0;
345	for (mzap_ent_t *mze = avl_find(avl, &mze_tofind, &idx);
346	    mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
347		if (mze->mze_cd != cd)
348			break;
349		cd++;
350	}
351
352	return (cd);
353}
354
355static void
356mze_remove(zap_t *zap, mzap_ent_t *mze)
357{
358	ASSERT(zap->zap_ismicro);
359	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
360
361	avl_remove(&zap->zap_m.zap_avl, mze);
362	kmem_free(mze, sizeof (mzap_ent_t));
363}
364
365static void
366mze_destroy(zap_t *zap)
367{
368	mzap_ent_t *mze;
369	void *avlcookie = NULL;
370
371	while (mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie))
372		kmem_free(mze, sizeof (mzap_ent_t));
373	avl_destroy(&zap->zap_m.zap_avl);
374}
375
376static zap_t *
377mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
378{
379	zap_t *winner;
380	uint64_t *zap_hdr = (uint64_t *)db->db_data;
381	uint64_t zap_block_type = zap_hdr[0];
382	uint64_t zap_magic = zap_hdr[1];
383
384	ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
385
386	zap_t *zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
387	rw_init(&zap->zap_rwlock, 0, 0, 0);
388	rw_enter(&zap->zap_rwlock, RW_WRITER);
389	zap->zap_objset = os;
390	zap->zap_object = obj;
391	zap->zap_dbuf = db;
392
393	if (zap_block_type != ZBT_MICRO) {
394		mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
395		zap->zap_f.zap_block_shift = highbit64(db->db_size) - 1;
396		if (zap_block_type != ZBT_HEADER || zap_magic != ZAP_MAGIC) {
397			winner = NULL;	/* No actual winner here... */
398			goto handle_winner;
399		}
400	} else {
401		zap->zap_ismicro = TRUE;
402	}
403
404	/*
405	 * Make sure that zap_ismicro is set before we let others see
406	 * it, because zap_lockdir() checks zap_ismicro without the lock
407	 * held.
408	 */
409	dmu_buf_init_user(&zap->zap_dbu, zap_evict_sync, NULL, &zap->zap_dbuf);
410	winner = dmu_buf_set_user(db, &zap->zap_dbu);
411
412	if (winner != NULL)
413		goto handle_winner;
414
415	if (zap->zap_ismicro) {
416		zap->zap_salt = zap_m_phys(zap)->mz_salt;
417		zap->zap_normflags = zap_m_phys(zap)->mz_normflags;
418		zap->zap_m.zap_num_chunks = db->db_size / MZAP_ENT_LEN - 1;
419		avl_create(&zap->zap_m.zap_avl, mze_compare,
420		    sizeof (mzap_ent_t), offsetof(mzap_ent_t, mze_node));
421
422		for (int i = 0; i < zap->zap_m.zap_num_chunks; i++) {
423			mzap_ent_phys_t *mze =
424			    &zap_m_phys(zap)->mz_chunk[i];
425			if (mze->mze_name[0]) {
426				zap_name_t *zn;
427
428				zap->zap_m.zap_num_entries++;
429				zn = zap_name_alloc(zap, mze->mze_name, 0);
430				mze_insert(zap, i, zn->zn_hash);
431				zap_name_free(zn);
432			}
433		}
434	} else {
435		zap->zap_salt = zap_f_phys(zap)->zap_salt;
436		zap->zap_normflags = zap_f_phys(zap)->zap_normflags;
437
438		ASSERT3U(sizeof (struct zap_leaf_header), ==,
439		    2*ZAP_LEAF_CHUNKSIZE);
440
441		/*
442		 * The embedded pointer table should not overlap the
443		 * other members.
444		 */
445		ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), >,
446		    &zap_f_phys(zap)->zap_salt);
447
448		/*
449		 * The embedded pointer table should end at the end of
450		 * the block
451		 */
452		ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap,
453		    1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)) -
454		    (uintptr_t)zap_f_phys(zap), ==,
455		    zap->zap_dbuf->db_size);
456	}
457	rw_exit(&zap->zap_rwlock);
458	return (zap);
459
460handle_winner:
461	rw_exit(&zap->zap_rwlock);
462	rw_destroy(&zap->zap_rwlock);
463	if (!zap->zap_ismicro)
464		mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
465	kmem_free(zap, sizeof (zap_t));
466	return (winner);
467}
468
469/*
470 * This routine "consumes" the caller's hold on the dbuf, which must
471 * have the specified tag.
472 */
473static int
474zap_lockdir_impl(dmu_buf_t *db, void *tag, dmu_tx_t *tx,
475    krw_t lti, boolean_t fatreader, boolean_t adding, zap_t **zapp)
476{
477	ASSERT0(db->db_offset);
478	objset_t *os = dmu_buf_get_objset(db);
479	uint64_t obj = db->db_object;
480
481	*zapp = NULL;
482
483	zap_t *zap = dmu_buf_get_user(db);
484	if (zap == NULL) {
485		zap = mzap_open(os, obj, db);
486		if (zap == NULL) {
487			/*
488			 * mzap_open() didn't like what it saw on-disk.
489			 * Check for corruption!
490			 */
491			return (SET_ERROR(EIO));
492		}
493	}
494
495	/*
496	 * We're checking zap_ismicro without the lock held, in order to
497	 * tell what type of lock we want.  Once we have some sort of
498	 * lock, see if it really is the right type.  In practice this
499	 * can only be different if it was upgraded from micro to fat,
500	 * and micro wanted WRITER but fat only needs READER.
501	 */
502	krw_t lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti;
503	rw_enter(&zap->zap_rwlock, lt);
504	if (lt != ((!zap->zap_ismicro && fatreader) ? RW_READER : lti)) {
505		/* it was upgraded, now we only need reader */
506		ASSERT(lt == RW_WRITER);
507		ASSERT(RW_READER ==
508		    (!zap->zap_ismicro && fatreader) ? RW_READER : lti);
509		rw_downgrade(&zap->zap_rwlock);
510		lt = RW_READER;
511	}
512
513	zap->zap_objset = os;
514
515	if (lt == RW_WRITER)
516		dmu_buf_will_dirty(db, tx);
517
518	ASSERT3P(zap->zap_dbuf, ==, db);
519
520	ASSERT(!zap->zap_ismicro ||
521	    zap->zap_m.zap_num_entries <= zap->zap_m.zap_num_chunks);
522	if (zap->zap_ismicro && tx && adding &&
523	    zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) {
524		uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE;
525		if (newsz > MZAP_MAX_BLKSZ) {
526			dprintf("upgrading obj %llu: num_entries=%u\n",
527			    obj, zap->zap_m.zap_num_entries);
528			*zapp = zap;
529			int err = mzap_upgrade(zapp, tag, tx, 0);
530			if (err != 0)
531				rw_exit(&zap->zap_rwlock);
532			return (err);
533		}
534		VERIFY0(dmu_object_set_blocksize(os, obj, newsz, 0, tx));
535		zap->zap_m.zap_num_chunks =
536		    db->db_size / MZAP_ENT_LEN - 1;
537	}
538
539	*zapp = zap;
540	return (0);
541}
542
543static int
544zap_lockdir_by_dnode(dnode_t *dn, dmu_tx_t *tx,
545    krw_t lti, boolean_t fatreader, boolean_t adding, void *tag, zap_t **zapp)
546{
547	dmu_buf_t *db;
548
549	int err = dmu_buf_hold_by_dnode(dn, 0, tag, &db, DMU_READ_NO_PREFETCH);
550	if (err != 0) {
551		return (err);
552	}
553#ifdef ZFS_DEBUG
554	{
555		dmu_object_info_t doi;
556		dmu_object_info_from_db(db, &doi);
557		ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
558	}
559#endif
560
561	err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
562	if (err != 0) {
563		dmu_buf_rele(db, tag);
564	}
565	return (err);
566}
567
568int
569zap_lockdir(objset_t *os, uint64_t obj, dmu_tx_t *tx,
570    krw_t lti, boolean_t fatreader, boolean_t adding, void *tag, zap_t **zapp)
571{
572	dmu_buf_t *db;
573
574	int err = dmu_buf_hold(os, obj, 0, tag, &db, DMU_READ_NO_PREFETCH);
575	if (err != 0)
576		return (err);
577#ifdef ZFS_DEBUG
578	{
579		dmu_object_info_t doi;
580		dmu_object_info_from_db(db, &doi);
581		ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
582	}
583#endif
584	err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
585	if (err != 0)
586		dmu_buf_rele(db, tag);
587	return (err);
588}
589
590void
591zap_unlockdir(zap_t *zap, void *tag)
592{
593	rw_exit(&zap->zap_rwlock);
594	dmu_buf_rele(zap->zap_dbuf, tag);
595}
596
597static int
598mzap_upgrade(zap_t **zapp, void *tag, dmu_tx_t *tx, zap_flags_t flags)
599{
600	int err = 0;
601	zap_t *zap = *zapp;
602
603	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
604
605	int sz = zap->zap_dbuf->db_size;
606	mzap_phys_t *mzp = zio_buf_alloc(sz);
607	bcopy(zap->zap_dbuf->db_data, mzp, sz);
608	int nchunks = zap->zap_m.zap_num_chunks;
609
610	if (!flags) {
611		err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object,
612		    1ULL << fzap_default_block_shift, 0, tx);
613		if (err != 0) {
614			zio_buf_free(mzp, sz);
615			return (err);
616		}
617	}
618
619	dprintf("upgrading obj=%llu with %u chunks\n",
620	    zap->zap_object, nchunks);
621	/* XXX destroy the avl later, so we can use the stored hash value */
622	mze_destroy(zap);
623
624	fzap_upgrade(zap, tx, flags);
625
626	for (int i = 0; i < nchunks; i++) {
627		mzap_ent_phys_t *mze = &mzp->mz_chunk[i];
628		if (mze->mze_name[0] == 0)
629			continue;
630		dprintf("adding %s=%llu\n",
631		    mze->mze_name, mze->mze_value);
632		zap_name_t *zn = zap_name_alloc(zap, mze->mze_name, 0);
633		err = fzap_add_cd(zn, 8, 1, &mze->mze_value, mze->mze_cd,
634		    tag, tx);
635		zap = zn->zn_zap;	/* fzap_add_cd() may change zap */
636		zap_name_free(zn);
637		if (err != 0)
638			break;
639	}
640	zio_buf_free(mzp, sz);
641	*zapp = zap;
642	return (err);
643}
644
645/*
646 * The "normflags" determine the behavior of the matchtype_t which is
647 * passed to zap_lookup_norm().  Names which have the same normalized
648 * version will be stored with the same hash value, and therefore we can
649 * perform normalization-insensitive lookups.  We can be Unicode form-
650 * insensitive and/or case-insensitive.  The following flags are valid for
651 * "normflags":
652 *
653 * U8_TEXTPREP_NFC
654 * U8_TEXTPREP_NFD
655 * U8_TEXTPREP_NFKC
656 * U8_TEXTPREP_NFKD
657 * U8_TEXTPREP_TOUPPER
658 *
659 * The *_NF* (Normalization Form) flags are mutually exclusive; at most one
660 * of them may be supplied.
661 */
662void
663mzap_create_impl(objset_t *os, uint64_t obj, int normflags, zap_flags_t flags,
664    dmu_tx_t *tx)
665{
666	dmu_buf_t *db;
667
668	VERIFY0(dmu_buf_hold(os, obj, 0, FTAG, &db, DMU_READ_NO_PREFETCH));
669
670	dmu_buf_will_dirty(db, tx);
671	mzap_phys_t *zp = db->db_data;
672	zp->mz_block_type = ZBT_MICRO;
673	(void) random_get_pseudo_bytes((void *)&zp->mz_salt, sizeof (uint64_t));
674	zp->mz_normflags = normflags;
675
676	if (flags != 0) {
677		zap_t *zap;
678		/* Only fat zap supports flags; upgrade immediately. */
679		VERIFY0(zap_lockdir_impl(db, FTAG, tx, RW_WRITER,
680		    B_FALSE, B_FALSE, &zap));
681		VERIFY0(mzap_upgrade(&zap, FTAG, tx, flags));
682		zap_unlockdir(zap, FTAG);
683	} else {
684		dmu_buf_rele(db, FTAG);
685	}
686}
687
688int
689zap_create_claim(objset_t *os, uint64_t obj, dmu_object_type_t ot,
690    dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
691{
692	return (zap_create_claim_dnsize(os, obj, ot, bonustype, bonuslen,
693	    0, tx));
694}
695
696int
697zap_create_claim_dnsize(objset_t *os, uint64_t obj, dmu_object_type_t ot,
698    dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
699{
700	return (zap_create_claim_norm_dnsize(os, obj,
701	    0, ot, bonustype, bonuslen, dnodesize, tx));
702}
703
704int
705zap_create_claim_norm(objset_t *os, uint64_t obj, int normflags,
706    dmu_object_type_t ot,
707    dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
708{
709	return (zap_create_claim_norm_dnsize(os, obj, normflags, ot, bonustype,
710	    bonuslen, 0, tx));
711}
712
713int
714zap_create_claim_norm_dnsize(objset_t *os, uint64_t obj, int normflags,
715    dmu_object_type_t ot, dmu_object_type_t bonustype, int bonuslen,
716    int dnodesize, dmu_tx_t *tx)
717{
718	int err;
719
720	err = dmu_object_claim_dnsize(os, obj, ot, 0, bonustype, bonuslen,
721	    dnodesize, tx);
722	if (err != 0)
723		return (err);
724	mzap_create_impl(os, obj, normflags, 0, tx);
725	return (0);
726}
727
728uint64_t
729zap_create(objset_t *os, dmu_object_type_t ot,
730    dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
731{
732	return (zap_create_norm(os, 0, ot, bonustype, bonuslen, tx));
733}
734
735uint64_t
736zap_create_dnsize(objset_t *os, dmu_object_type_t ot,
737    dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
738{
739	return (zap_create_norm_dnsize(os, 0, ot, bonustype, bonuslen,
740	    dnodesize, tx));
741}
742
743uint64_t
744zap_create_norm(objset_t *os, int normflags, dmu_object_type_t ot,
745    dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
746{
747	ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
748	return (zap_create_norm_dnsize(os, normflags, ot, bonustype, bonuslen,
749	    0, tx));
750}
751
752uint64_t
753zap_create_norm_dnsize(objset_t *os, int normflags, dmu_object_type_t ot,
754    dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
755{
756	uint64_t obj = dmu_object_alloc_dnsize(os, ot, 0, bonustype, bonuslen,
757	    dnodesize, tx);
758
759	mzap_create_impl(os, obj, normflags, 0, tx);
760	return (obj);
761}
762
763uint64_t
764zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
765    dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
766    dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
767{
768	ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
769	return (zap_create_flags_dnsize(os, normflags, flags, ot,
770	    leaf_blockshift, indirect_blockshift, bonustype, bonuslen, 0, tx));
771}
772
773uint64_t
774zap_create_flags_dnsize(objset_t *os, int normflags, zap_flags_t flags,
775    dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
776    dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
777{
778	uint64_t obj = dmu_object_alloc_dnsize(os, ot, 0, bonustype, bonuslen,
779	    dnodesize, tx);
780
781	ASSERT(leaf_blockshift >= SPA_MINBLOCKSHIFT &&
782	    leaf_blockshift <= SPA_OLD_MAXBLOCKSHIFT &&
783	    indirect_blockshift >= SPA_MINBLOCKSHIFT &&
784	    indirect_blockshift <= SPA_OLD_MAXBLOCKSHIFT);
785
786	VERIFY(dmu_object_set_blocksize(os, obj,
787	    1ULL << leaf_blockshift, indirect_blockshift, tx) == 0);
788
789	mzap_create_impl(os, obj, normflags, flags, tx);
790	return (obj);
791}
792
793int
794zap_destroy(objset_t *os, uint64_t zapobj, dmu_tx_t *tx)
795{
796	/*
797	 * dmu_object_free will free the object number and free the
798	 * data.  Freeing the data will cause our pageout function to be
799	 * called, which will destroy our data (zap_leaf_t's and zap_t).
800	 */
801
802	return (dmu_object_free(os, zapobj, tx));
803}
804
805void
806zap_evict_sync(void *dbu)
807{
808	zap_t *zap = dbu;
809
810	rw_destroy(&zap->zap_rwlock);
811
812	if (zap->zap_ismicro)
813		mze_destroy(zap);
814	else
815		mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
816
817	kmem_free(zap, sizeof (zap_t));
818}
819
820int
821zap_count(objset_t *os, uint64_t zapobj, uint64_t *count)
822{
823	zap_t *zap;
824
825	int err =
826	    zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
827	if (err != 0)
828		return (err);
829	if (!zap->zap_ismicro) {
830		err = fzap_count(zap, count);
831	} else {
832		*count = zap->zap_m.zap_num_entries;
833	}
834	zap_unlockdir(zap, FTAG);
835	return (err);
836}
837
838/*
839 * zn may be NULL; if not specified, it will be computed if needed.
840 * See also the comment above zap_entry_normalization_conflict().
841 */
842static boolean_t
843mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze)
844{
845	int direction = AVL_BEFORE;
846	boolean_t allocdzn = B_FALSE;
847
848	if (zap->zap_normflags == 0)
849		return (B_FALSE);
850
851again:
852	for (mzap_ent_t *other = avl_walk(&zap->zap_m.zap_avl, mze, direction);
853	    other && other->mze_hash == mze->mze_hash;
854	    other = avl_walk(&zap->zap_m.zap_avl, other, direction)) {
855
856		if (zn == NULL) {
857			zn = zap_name_alloc(zap, MZE_PHYS(zap, mze)->mze_name,
858			    MT_NORMALIZE);
859			allocdzn = B_TRUE;
860		}
861		if (zap_match(zn, MZE_PHYS(zap, other)->mze_name)) {
862			if (allocdzn)
863				zap_name_free(zn);
864			return (B_TRUE);
865		}
866	}
867
868	if (direction == AVL_BEFORE) {
869		direction = AVL_AFTER;
870		goto again;
871	}
872
873	if (allocdzn)
874		zap_name_free(zn);
875	return (B_FALSE);
876}
877
878/*
879 * Routines for manipulating attributes.
880 */
881
882int
883zap_lookup(objset_t *os, uint64_t zapobj, const char *name,
884    uint64_t integer_size, uint64_t num_integers, void *buf)
885{
886	return (zap_lookup_norm(os, zapobj, name, integer_size,
887	    num_integers, buf, 0, NULL, 0, NULL));
888}
889
890static int
891zap_lookup_impl(zap_t *zap, const char *name,
892    uint64_t integer_size, uint64_t num_integers, void *buf,
893    matchtype_t mt, char *realname, int rn_len,
894    boolean_t *ncp)
895{
896	int err = 0;
897
898	zap_name_t *zn = zap_name_alloc(zap, name, mt);
899	if (zn == NULL)
900		return (SET_ERROR(ENOTSUP));
901
902	if (!zap->zap_ismicro) {
903		err = fzap_lookup(zn, integer_size, num_integers, buf,
904		    realname, rn_len, ncp);
905	} else {
906		mzap_ent_t *mze = mze_find(zn);
907		if (mze == NULL) {
908			err = SET_ERROR(ENOENT);
909		} else {
910			if (num_integers < 1) {
911				err = SET_ERROR(EOVERFLOW);
912			} else if (integer_size != 8) {
913				err = SET_ERROR(EINVAL);
914			} else {
915				*(uint64_t *)buf =
916				    MZE_PHYS(zap, mze)->mze_value;
917				(void) strlcpy(realname,
918				    MZE_PHYS(zap, mze)->mze_name, rn_len);
919				if (ncp) {
920					*ncp = mzap_normalization_conflict(zap,
921					    zn, mze);
922				}
923			}
924		}
925	}
926	zap_name_free(zn);
927	return (err);
928}
929
930int
931zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
932    uint64_t integer_size, uint64_t num_integers, void *buf,
933    matchtype_t mt, char *realname, int rn_len,
934    boolean_t *ncp)
935{
936	zap_t *zap;
937
938	int err =
939	    zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
940	if (err != 0)
941		return (err);
942	err = zap_lookup_impl(zap, name, integer_size,
943	    num_integers, buf, mt, realname, rn_len, ncp);
944	zap_unlockdir(zap, FTAG);
945	return (err);
946}
947
948int
949zap_lookup_by_dnode(dnode_t *dn, const char *name,
950    uint64_t integer_size, uint64_t num_integers, void *buf)
951{
952	return (zap_lookup_norm_by_dnode(dn, name, integer_size,
953	    num_integers, buf, 0, NULL, 0, NULL));
954}
955
956int
957zap_lookup_norm_by_dnode(dnode_t *dn, const char *name,
958    uint64_t integer_size, uint64_t num_integers, void *buf,
959    matchtype_t mt, char *realname, int rn_len,
960    boolean_t *ncp)
961{
962	zap_t *zap;
963
964	int err = zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE,
965	    FTAG, &zap);
966	if (err != 0)
967		return (err);
968	err = zap_lookup_impl(zap, name, integer_size,
969	    num_integers, buf, mt, realname, rn_len, ncp);
970	zap_unlockdir(zap, FTAG);
971	return (err);
972}
973
974int
975zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
976    int key_numints)
977{
978	zap_t *zap;
979
980	int err =
981	    zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
982	if (err != 0)
983		return (err);
984	zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
985	if (zn == NULL) {
986		zap_unlockdir(zap, FTAG);
987		return (SET_ERROR(ENOTSUP));
988	}
989
990	fzap_prefetch(zn);
991	zap_name_free(zn);
992	zap_unlockdir(zap, FTAG);
993	return (err);
994}
995
996int
997zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
998    int key_numints, uint64_t integer_size, uint64_t num_integers, void *buf)
999{
1000	zap_t *zap;
1001
1002	int err =
1003	    zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1004	if (err != 0)
1005		return (err);
1006	zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1007	if (zn == NULL) {
1008		zap_unlockdir(zap, FTAG);
1009		return (SET_ERROR(ENOTSUP));
1010	}
1011
1012	err = fzap_lookup(zn, integer_size, num_integers, buf,
1013	    NULL, 0, NULL);
1014	zap_name_free(zn);
1015	zap_unlockdir(zap, FTAG);
1016	return (err);
1017}
1018
1019int
1020zap_contains(objset_t *os, uint64_t zapobj, const char *name)
1021{
1022	int err = zap_lookup_norm(os, zapobj, name, 0,
1023	    0, NULL, 0, NULL, 0, NULL);
1024	if (err == EOVERFLOW || err == EINVAL)
1025		err = 0; /* found, but skipped reading the value */
1026	return (err);
1027}
1028
1029int
1030zap_length(objset_t *os, uint64_t zapobj, const char *name,
1031    uint64_t *integer_size, uint64_t *num_integers)
1032{
1033	zap_t *zap;
1034
1035	int err =
1036	    zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1037	if (err != 0)
1038		return (err);
1039	zap_name_t *zn = zap_name_alloc(zap, name, 0);
1040	if (zn == NULL) {
1041		zap_unlockdir(zap, FTAG);
1042		return (SET_ERROR(ENOTSUP));
1043	}
1044	if (!zap->zap_ismicro) {
1045		err = fzap_length(zn, integer_size, num_integers);
1046	} else {
1047		mzap_ent_t *mze = mze_find(zn);
1048		if (mze == NULL) {
1049			err = SET_ERROR(ENOENT);
1050		} else {
1051			if (integer_size)
1052				*integer_size = 8;
1053			if (num_integers)
1054				*num_integers = 1;
1055		}
1056	}
1057	zap_name_free(zn);
1058	zap_unlockdir(zap, FTAG);
1059	return (err);
1060}
1061
1062int
1063zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1064    int key_numints, uint64_t *integer_size, uint64_t *num_integers)
1065{
1066	zap_t *zap;
1067
1068	int err =
1069	    zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1070	if (err != 0)
1071		return (err);
1072	zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1073	if (zn == NULL) {
1074		zap_unlockdir(zap, FTAG);
1075		return (SET_ERROR(ENOTSUP));
1076	}
1077	err = fzap_length(zn, integer_size, num_integers);
1078	zap_name_free(zn);
1079	zap_unlockdir(zap, FTAG);
1080	return (err);
1081}
1082
1083static void
1084mzap_addent(zap_name_t *zn, uint64_t value)
1085{
1086	zap_t *zap = zn->zn_zap;
1087	int start = zap->zap_m.zap_alloc_next;
1088
1089	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
1090
1091#ifdef ZFS_DEBUG
1092	for (int i = 0; i < zap->zap_m.zap_num_chunks; i++) {
1093		mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
1094		ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0);
1095	}
1096#endif
1097
1098	uint32_t cd = mze_find_unused_cd(zap, zn->zn_hash);
1099	/* given the limited size of the microzap, this can't happen */
1100	ASSERT(cd < zap_maxcd(zap));
1101
1102again:
1103	for (int i = start; i < zap->zap_m.zap_num_chunks; i++) {
1104		mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
1105		if (mze->mze_name[0] == 0) {
1106			mze->mze_value = value;
1107			mze->mze_cd = cd;
1108			(void) strcpy(mze->mze_name, zn->zn_key_orig);
1109			zap->zap_m.zap_num_entries++;
1110			zap->zap_m.zap_alloc_next = i+1;
1111			if (zap->zap_m.zap_alloc_next ==
1112			    zap->zap_m.zap_num_chunks)
1113				zap->zap_m.zap_alloc_next = 0;
1114			mze_insert(zap, i, zn->zn_hash);
1115			return;
1116		}
1117	}
1118	if (start != 0) {
1119		start = 0;
1120		goto again;
1121	}
1122	ASSERT(!"out of entries!");
1123}
1124
1125static int
1126zap_add_impl(zap_t *zap, const char *key,
1127    int integer_size, uint64_t num_integers,
1128    const void *val, dmu_tx_t *tx, void *tag)
1129{
1130	const uint64_t *intval = val;
1131	int err = 0;
1132
1133	zap_name_t *zn = zap_name_alloc(zap, key, 0);
1134	if (zn == NULL) {
1135		zap_unlockdir(zap, tag);
1136		return (SET_ERROR(ENOTSUP));
1137	}
1138	if (!zap->zap_ismicro) {
1139		err = fzap_add(zn, integer_size, num_integers, val, tag, tx);
1140		zap = zn->zn_zap;	/* fzap_add() may change zap */
1141	} else if (integer_size != 8 || num_integers != 1 ||
1142	    strlen(key) >= MZAP_NAME_LEN) {
1143		err = mzap_upgrade(&zn->zn_zap, tag, tx, 0);
1144		if (err == 0) {
1145			err = fzap_add(zn, integer_size, num_integers, val,
1146			    tag, tx);
1147		}
1148		zap = zn->zn_zap;	/* fzap_add() may change zap */
1149	} else {
1150		if (mze_find(zn) != NULL) {
1151			err = SET_ERROR(EEXIST);
1152		} else {
1153			mzap_addent(zn, *intval);
1154		}
1155	}
1156	ASSERT(zap == zn->zn_zap);
1157	zap_name_free(zn);
1158	if (zap != NULL)	/* may be NULL if fzap_add() failed */
1159		zap_unlockdir(zap, tag);
1160	return (err);
1161}
1162
1163int
1164zap_add(objset_t *os, uint64_t zapobj, const char *key,
1165    int integer_size, uint64_t num_integers,
1166    const void *val, dmu_tx_t *tx)
1167{
1168	zap_t *zap;
1169	int err;
1170
1171	err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1172	if (err != 0)
1173		return (err);
1174	err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
1175	/* zap_add_impl() calls zap_unlockdir() */
1176	return (err);
1177}
1178
1179int
1180zap_add_by_dnode(dnode_t *dn, const char *key,
1181    int integer_size, uint64_t num_integers,
1182    const void *val, dmu_tx_t *tx)
1183{
1184	zap_t *zap;
1185	int err;
1186
1187	err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1188	if (err != 0)
1189		return (err);
1190	err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
1191	/* zap_add_impl() calls zap_unlockdir() */
1192	return (err);
1193}
1194
1195int
1196zap_add_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1197    int key_numints, int integer_size, uint64_t num_integers,
1198    const void *val, dmu_tx_t *tx)
1199{
1200	zap_t *zap;
1201
1202	int err =
1203	    zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1204	if (err != 0)
1205		return (err);
1206	zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1207	if (zn == NULL) {
1208		zap_unlockdir(zap, FTAG);
1209		return (SET_ERROR(ENOTSUP));
1210	}
1211	err = fzap_add(zn, integer_size, num_integers, val, FTAG, tx);
1212	zap = zn->zn_zap;	/* fzap_add() may change zap */
1213	zap_name_free(zn);
1214	if (zap != NULL)	/* may be NULL if fzap_add() failed */
1215		zap_unlockdir(zap, FTAG);
1216	return (err);
1217}
1218
1219int
1220zap_update(objset_t *os, uint64_t zapobj, const char *name,
1221    int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
1222{
1223	zap_t *zap;
1224	uint64_t oldval;
1225	const uint64_t *intval = val;
1226
1227#ifdef ZFS_DEBUG
1228	/*
1229	 * If there is an old value, it shouldn't change across the
1230	 * lockdir (eg, due to bprewrite's xlation).
1231	 */
1232	if (integer_size == 8 && num_integers == 1)
1233		(void) zap_lookup(os, zapobj, name, 8, 1, &oldval);
1234#endif
1235
1236	int err =
1237	    zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1238	if (err != 0)
1239		return (err);
1240	zap_name_t *zn = zap_name_alloc(zap, name, 0);
1241	if (zn == NULL) {
1242		zap_unlockdir(zap, FTAG);
1243		return (SET_ERROR(ENOTSUP));
1244	}
1245	if (!zap->zap_ismicro) {
1246		err = fzap_update(zn, integer_size, num_integers, val,
1247		    FTAG, tx);
1248		zap = zn->zn_zap;	/* fzap_update() may change zap */
1249	} else if (integer_size != 8 || num_integers != 1 ||
1250	    strlen(name) >= MZAP_NAME_LEN) {
1251		dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n",
1252		    zapobj, integer_size, num_integers, name);
1253		err = mzap_upgrade(&zn->zn_zap, FTAG, tx, 0);
1254		if (err == 0) {
1255			err = fzap_update(zn, integer_size, num_integers,
1256			    val, FTAG, tx);
1257		}
1258		zap = zn->zn_zap;	/* fzap_update() may change zap */
1259	} else {
1260		mzap_ent_t *mze = mze_find(zn);
1261		if (mze != NULL) {
1262			ASSERT3U(MZE_PHYS(zap, mze)->mze_value, ==, oldval);
1263			MZE_PHYS(zap, mze)->mze_value = *intval;
1264		} else {
1265			mzap_addent(zn, *intval);
1266		}
1267	}
1268	ASSERT(zap == zn->zn_zap);
1269	zap_name_free(zn);
1270	if (zap != NULL)	/* may be NULL if fzap_upgrade() failed */
1271		zap_unlockdir(zap, FTAG);
1272	return (err);
1273}
1274
1275int
1276zap_update_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1277    int key_numints,
1278    int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
1279{
1280	zap_t *zap;
1281
1282	int err =
1283	    zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1284	if (err != 0)
1285		return (err);
1286	zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1287	if (zn == NULL) {
1288		zap_unlockdir(zap, FTAG);
1289		return (SET_ERROR(ENOTSUP));
1290	}
1291	err = fzap_update(zn, integer_size, num_integers, val, FTAG, tx);
1292	zap = zn->zn_zap;	/* fzap_update() may change zap */
1293	zap_name_free(zn);
1294	if (zap != NULL)	/* may be NULL if fzap_upgrade() failed */
1295		zap_unlockdir(zap, FTAG);
1296	return (err);
1297}
1298
1299int
1300zap_remove(objset_t *os, uint64_t zapobj, const char *name, dmu_tx_t *tx)
1301{
1302	return (zap_remove_norm(os, zapobj, name, 0, tx));
1303}
1304
1305static int
1306zap_remove_impl(zap_t *zap, const char *name,
1307    matchtype_t mt, dmu_tx_t *tx)
1308{
1309	int err = 0;
1310
1311	zap_name_t *zn = zap_name_alloc(zap, name, mt);
1312	if (zn == NULL)
1313		return (SET_ERROR(ENOTSUP));
1314	if (!zap->zap_ismicro) {
1315		err = fzap_remove(zn, tx);
1316	} else {
1317		mzap_ent_t *mze = mze_find(zn);
1318		if (mze == NULL) {
1319			err = SET_ERROR(ENOENT);
1320		} else {
1321			zap->zap_m.zap_num_entries--;
1322			bzero(&zap_m_phys(zap)->mz_chunk[mze->mze_chunkid],
1323			    sizeof (mzap_ent_phys_t));
1324			mze_remove(zap, mze);
1325		}
1326	}
1327	zap_name_free(zn);
1328	return (err);
1329}
1330
1331int
1332zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
1333    matchtype_t mt, dmu_tx_t *tx)
1334{
1335	zap_t *zap;
1336	int err;
1337
1338	err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1339	if (err)
1340		return (err);
1341	err = zap_remove_impl(zap, name, mt, tx);
1342	zap_unlockdir(zap, FTAG);
1343	return (err);
1344}
1345
1346int
1347zap_remove_by_dnode(dnode_t *dn, const char *name, dmu_tx_t *tx)
1348{
1349	zap_t *zap;
1350	int err;
1351
1352	err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1353	if (err)
1354		return (err);
1355	err = zap_remove_impl(zap, name, 0, tx);
1356	zap_unlockdir(zap, FTAG);
1357	return (err);
1358}
1359
1360int
1361zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1362    int key_numints, dmu_tx_t *tx)
1363{
1364	zap_t *zap;
1365
1366	int err =
1367	    zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1368	if (err != 0)
1369		return (err);
1370	zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1371	if (zn == NULL) {
1372		zap_unlockdir(zap, FTAG);
1373		return (SET_ERROR(ENOTSUP));
1374	}
1375	err = fzap_remove(zn, tx);
1376	zap_name_free(zn);
1377	zap_unlockdir(zap, FTAG);
1378	return (err);
1379}
1380
1381/*
1382 * Routines for iterating over the attributes.
1383 */
1384
1385static void
1386zap_cursor_init_impl(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
1387    uint64_t serialized, boolean_t prefetch)
1388{
1389	zc->zc_objset = os;
1390	zc->zc_zap = NULL;
1391	zc->zc_leaf = NULL;
1392	zc->zc_zapobj = zapobj;
1393	zc->zc_serialized = serialized;
1394	zc->zc_hash = 0;
1395	zc->zc_cd = 0;
1396	zc->zc_prefetch = prefetch;
1397}
1398void
1399zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
1400    uint64_t serialized)
1401{
1402	zap_cursor_init_impl(zc, os, zapobj, serialized, B_TRUE);
1403}
1404
1405/*
1406 * Initialize a cursor at the beginning of the ZAP object.  The entire
1407 * ZAP object will be prefetched.
1408 */
1409void
1410zap_cursor_init(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
1411{
1412	zap_cursor_init_impl(zc, os, zapobj, 0, B_TRUE);
1413}
1414
1415/*
1416 * Initialize a cursor at the beginning, but request that we not prefetch
1417 * the entire ZAP object.
1418 */
1419void
1420zap_cursor_init_noprefetch(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
1421{
1422	zap_cursor_init_impl(zc, os, zapobj, 0, B_FALSE);
1423}
1424
1425void
1426zap_cursor_fini(zap_cursor_t *zc)
1427{
1428	if (zc->zc_zap) {
1429		rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
1430		zap_unlockdir(zc->zc_zap, NULL);
1431		zc->zc_zap = NULL;
1432	}
1433	if (zc->zc_leaf) {
1434		rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
1435		zap_put_leaf(zc->zc_leaf);
1436		zc->zc_leaf = NULL;
1437	}
1438	zc->zc_objset = NULL;
1439}
1440
1441uint64_t
1442zap_cursor_serialize(zap_cursor_t *zc)
1443{
1444	if (zc->zc_hash == -1ULL)
1445		return (-1ULL);
1446	if (zc->zc_zap == NULL)
1447		return (zc->zc_serialized);
1448	ASSERT((zc->zc_hash & zap_maxcd(zc->zc_zap)) == 0);
1449	ASSERT(zc->zc_cd < zap_maxcd(zc->zc_zap));
1450
1451	/*
1452	 * We want to keep the high 32 bits of the cursor zero if we can, so
1453	 * that 32-bit programs can access this.  So usually use a small
1454	 * (28-bit) hash value so we can fit 4 bits of cd into the low 32-bits
1455	 * of the cursor.
1456	 *
1457	 * [ collision differentiator | zap_hashbits()-bit hash value ]
1458	 */
1459	return ((zc->zc_hash >> (64 - zap_hashbits(zc->zc_zap))) |
1460	    ((uint64_t)zc->zc_cd << zap_hashbits(zc->zc_zap)));
1461}
1462
1463int
1464zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
1465{
1466	int err;
1467
1468	if (zc->zc_hash == -1ULL)
1469		return (SET_ERROR(ENOENT));
1470
1471	if (zc->zc_zap == NULL) {
1472		int hb;
1473		err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL,
1474		    RW_READER, TRUE, FALSE, NULL, &zc->zc_zap);
1475		if (err != 0)
1476			return (err);
1477
1478		/*
1479		 * To support zap_cursor_init_serialized, advance, retrieve,
1480		 * we must add to the existing zc_cd, which may already
1481		 * be 1 due to the zap_cursor_advance.
1482		 */
1483		ASSERT(zc->zc_hash == 0);
1484		hb = zap_hashbits(zc->zc_zap);
1485		zc->zc_hash = zc->zc_serialized << (64 - hb);
1486		zc->zc_cd += zc->zc_serialized >> hb;
1487		if (zc->zc_cd >= zap_maxcd(zc->zc_zap)) /* corrupt serialized */
1488			zc->zc_cd = 0;
1489	} else {
1490		rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
1491	}
1492	if (!zc->zc_zap->zap_ismicro) {
1493		err = fzap_cursor_retrieve(zc->zc_zap, zc, za);
1494	} else {
1495		avl_index_t idx;
1496		mzap_ent_t mze_tofind;
1497
1498		mze_tofind.mze_hash = zc->zc_hash;
1499		mze_tofind.mze_cd = zc->zc_cd;
1500
1501		mzap_ent_t *mze =
1502		    avl_find(&zc->zc_zap->zap_m.zap_avl, &mze_tofind, &idx);
1503		if (mze == NULL) {
1504			mze = avl_nearest(&zc->zc_zap->zap_m.zap_avl,
1505			    idx, AVL_AFTER);
1506		}
1507		if (mze) {
1508			mzap_ent_phys_t *mzep = MZE_PHYS(zc->zc_zap, mze);
1509			ASSERT3U(mze->mze_cd, ==, mzep->mze_cd);
1510			za->za_normalization_conflict =
1511			    mzap_normalization_conflict(zc->zc_zap, NULL, mze);
1512			za->za_integer_length = 8;
1513			za->za_num_integers = 1;
1514			za->za_first_integer = mzep->mze_value;
1515			(void) strcpy(za->za_name, mzep->mze_name);
1516			zc->zc_hash = mze->mze_hash;
1517			zc->zc_cd = mze->mze_cd;
1518			err = 0;
1519		} else {
1520			zc->zc_hash = -1ULL;
1521			err = SET_ERROR(ENOENT);
1522		}
1523	}
1524	rw_exit(&zc->zc_zap->zap_rwlock);
1525	return (err);
1526}
1527
1528void
1529zap_cursor_advance(zap_cursor_t *zc)
1530{
1531	if (zc->zc_hash == -1ULL)
1532		return;
1533	zc->zc_cd++;
1534}
1535
1536int
1537zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
1538{
1539	zap_t *zap;
1540
1541	int err =
1542	    zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1543	if (err != 0)
1544		return (err);
1545
1546	bzero(zs, sizeof (zap_stats_t));
1547
1548	if (zap->zap_ismicro) {
1549		zs->zs_blocksize = zap->zap_dbuf->db_size;
1550		zs->zs_num_entries = zap->zap_m.zap_num_entries;
1551		zs->zs_num_blocks = 1;
1552	} else {
1553		fzap_get_stats(zap, zs);
1554	}
1555	zap_unlockdir(zap, FTAG);
1556	return (0);
1557}
1558