1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25/*
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
27 * Copyright 2019 Joyent, Inc.
28 */
29
30#include <sys/zfs_context.h>
31#include <sys/spa.h>
32#include <sys/dmu.h>
33#include <sys/dmu_tx.h>
34#include <sys/dnode.h>
35#include <sys/dsl_pool.h>
36#include <sys/zio.h>
37#include <sys/space_map.h>
38#include <sys/spa_log_spacemap.h>
39#include <sys/refcount.h>
40#include <sys/zfeature.h>
41
42/*
43 * Note on space map block size:
44 *
45 * The data for a given space map can be kept on blocks of any size.
46 * Larger blocks entail fewer I/O operations, but they also cause the
47 * DMU to keep more data in-core, and also to waste more I/O bandwidth
48 * when only a few blocks have changed since the last transaction group.
49 */
50
51/*
52 * Enabled whenever we want to stress test the use of double-word
53 * space map entries.
54 */
55boolean_t zfs_force_some_double_word_sm_entries = B_FALSE;
56
57/*
58 * Override the default indirect block size of 128K, instead using 16K for
59 * spacemaps (2^14 bytes).  This dramatically reduces write inflation since
60 * appending to a spacemap typically has to write one data block (4KB) and one
61 * or two indirect blocks (16K-32K, rather than 128K).
62 */
63int space_map_ibs = 14;
64
65boolean_t
66sm_entry_is_debug(uint64_t e)
67{
68	return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX);
69}
70
71boolean_t
72sm_entry_is_single_word(uint64_t e)
73{
74	uint8_t prefix = SM_PREFIX_DECODE(e);
75	return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX);
76}
77
78boolean_t
79sm_entry_is_double_word(uint64_t e)
80{
81	return (SM_PREFIX_DECODE(e) == SM2_PREFIX);
82}
83
84/*
85 * Iterate through the space map, invoking the callback on each (non-debug)
86 * space map entry. Stop after reading 'end' bytes of the space map.
87 */
88int
89space_map_iterate(space_map_t *sm, uint64_t end, sm_cb_t callback, void *arg)
90{
91	uint64_t blksz = sm->sm_blksz;
92
93	ASSERT3U(blksz, !=, 0);
94	ASSERT3U(end, <=, space_map_length(sm));
95	ASSERT0(P2PHASE(end, sizeof (uint64_t)));
96
97	dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, end,
98	    ZIO_PRIORITY_SYNC_READ);
99
100	int error = 0;
101	for (uint64_t block_base = 0; block_base < end && error == 0;
102	    block_base += blksz) {
103		dmu_buf_t *db;
104		error = dmu_buf_hold(sm->sm_os, space_map_object(sm),
105		    block_base, FTAG, &db, DMU_READ_PREFETCH);
106		if (error != 0)
107			return (error);
108
109		uint64_t *block_start = db->db_data;
110		uint64_t block_length = MIN(end - block_base, blksz);
111		uint64_t *block_end = block_start +
112		    (block_length / sizeof (uint64_t));
113
114		VERIFY0(P2PHASE(block_length, sizeof (uint64_t)));
115		VERIFY3U(block_length, !=, 0);
116		ASSERT3U(blksz, ==, db->db_size);
117
118		for (uint64_t *block_cursor = block_start;
119		    block_cursor < block_end && error == 0; block_cursor++) {
120			uint64_t e = *block_cursor;
121
122			if (sm_entry_is_debug(e)) /* Skip debug entries */
123				continue;
124
125			uint64_t raw_offset, raw_run, vdev_id;
126			maptype_t type;
127			if (sm_entry_is_single_word(e)) {
128				type = SM_TYPE_DECODE(e);
129				vdev_id = SM_NO_VDEVID;
130				raw_offset = SM_OFFSET_DECODE(e);
131				raw_run = SM_RUN_DECODE(e);
132			} else {
133				/* it is a two-word entry */
134				ASSERT(sm_entry_is_double_word(e));
135				raw_run = SM2_RUN_DECODE(e);
136				vdev_id = SM2_VDEV_DECODE(e);
137
138				/* move on to the second word */
139				block_cursor++;
140				e = *block_cursor;
141				VERIFY3P(block_cursor, <=, block_end);
142
143				type = SM2_TYPE_DECODE(e);
144				raw_offset = SM2_OFFSET_DECODE(e);
145			}
146
147			uint64_t entry_offset = (raw_offset << sm->sm_shift) +
148			    sm->sm_start;
149			uint64_t entry_run = raw_run << sm->sm_shift;
150
151			VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
152			VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
153			ASSERT3U(entry_offset, >=, sm->sm_start);
154			ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size);
155			ASSERT3U(entry_run, <=, sm->sm_size);
156			ASSERT3U(entry_offset + entry_run, <=,
157			    sm->sm_start + sm->sm_size);
158
159			space_map_entry_t sme = {
160			    .sme_type = type,
161			    .sme_vdev = vdev_id,
162			    .sme_offset = entry_offset,
163			    .sme_run = entry_run
164			};
165			error = callback(&sme, arg);
166		}
167		dmu_buf_rele(db, FTAG);
168	}
169	return (error);
170}
171
172/*
173 * Reads the entries from the last block of the space map into
174 * buf in reverse order. Populates nwords with number of words
175 * in the last block.
176 *
177 * Refer to block comment within space_map_incremental_destroy()
178 * to understand why this function is needed.
179 */
180static int
181space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf,
182    uint64_t bufsz, uint64_t *nwords)
183{
184	int error = 0;
185	dmu_buf_t *db;
186
187	/*
188	 * Find the offset of the last word in the space map and use
189	 * that to read the last block of the space map with
190	 * dmu_buf_hold().
191	 */
192	uint64_t last_word_offset =
193	    sm->sm_phys->smp_length - sizeof (uint64_t);
194	error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset,
195	    FTAG, &db, DMU_READ_NO_PREFETCH);
196	if (error != 0)
197		return (error);
198
199	ASSERT3U(sm->sm_object, ==, db->db_object);
200	ASSERT3U(sm->sm_blksz, ==, db->db_size);
201	ASSERT3U(bufsz, >=, db->db_size);
202	ASSERT(nwords != NULL);
203
204	uint64_t *words = db->db_data;
205	*nwords =
206	    (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
207
208	ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t));
209
210	uint64_t n = *nwords;
211	uint64_t j = n - 1;
212	for (uint64_t i = 0; i < n; i++) {
213		uint64_t entry = words[i];
214		if (sm_entry_is_double_word(entry)) {
215			/*
216			 * Since we are populating the buffer backwards
217			 * we have to be extra careful and add the two
218			 * words of the double-word entry in the right
219			 * order.
220			 */
221			ASSERT3U(j, >, 0);
222			buf[j - 1] = entry;
223
224			i++;
225			ASSERT3U(i, <, n);
226			entry = words[i];
227			buf[j] = entry;
228			j -= 2;
229		} else {
230			ASSERT(sm_entry_is_debug(entry) ||
231			    sm_entry_is_single_word(entry));
232			buf[j] = entry;
233			j--;
234		}
235	}
236
237	/*
238	 * Assert that we wrote backwards all the
239	 * way to the beginning of the buffer.
240	 */
241	ASSERT3S(j, ==, -1);
242
243	dmu_buf_rele(db, FTAG);
244	return (error);
245}
246
247/*
248 * Note: This function performs destructive actions - specifically
249 * it deletes entries from the end of the space map. Thus, callers
250 * should ensure that they are holding the appropriate locks for
251 * the space map that they provide.
252 */
253int
254space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
255    dmu_tx_t *tx)
256{
257	uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
258	uint64_t *buf = zio_buf_alloc(bufsz);
259
260	dmu_buf_will_dirty(sm->sm_dbuf, tx);
261
262	/*
263	 * Ideally we would want to iterate from the beginning of the
264	 * space map to the end in incremental steps. The issue with this
265	 * approach is that we don't have any field on-disk that points
266	 * us where to start between each step. We could try zeroing out
267	 * entries that we've destroyed, but this doesn't work either as
268	 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
269	 *
270	 * As a result, we destroy its entries incrementally starting from
271	 * the end after applying the callback to each of them.
272	 *
273	 * The problem with this approach is that we cannot literally
274	 * iterate through the words in the space map backwards as we
275	 * can't distinguish two-word space map entries from their second
276	 * word. Thus we do the following:
277	 *
278	 * 1] We get all the entries from the last block of the space map
279	 *    and put them into a buffer in reverse order. This way the
280	 *    last entry comes first in the buffer, the second to last is
281	 *    second, etc.
282	 * 2] We iterate through the entries in the buffer and we apply
283	 *    the callback to each one. As we move from entry to entry we
284	 *    we decrease the size of the space map, deleting effectively
285	 *    each entry.
286	 * 3] If there are no more entries in the space map or the callback
287	 *    returns a value other than 0, we stop iterating over the
288	 *    space map. If there are entries remaining and the callback
289	 *    returned 0, we go back to step [1].
290	 */
291	int error = 0;
292	while (space_map_length(sm) > 0 && error == 0) {
293		uint64_t nwords = 0;
294		error = space_map_reversed_last_block_entries(sm, buf, bufsz,
295		    &nwords);
296		if (error != 0)
297			break;
298
299		ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t));
300
301		for (uint64_t i = 0; i < nwords; i++) {
302			uint64_t e = buf[i];
303
304			if (sm_entry_is_debug(e)) {
305				sm->sm_phys->smp_length -= sizeof (uint64_t);
306				continue;
307			}
308
309			int words = 1;
310			uint64_t raw_offset, raw_run, vdev_id;
311			maptype_t type;
312			if (sm_entry_is_single_word(e)) {
313				type = SM_TYPE_DECODE(e);
314				vdev_id = SM_NO_VDEVID;
315				raw_offset = SM_OFFSET_DECODE(e);
316				raw_run = SM_RUN_DECODE(e);
317			} else {
318				ASSERT(sm_entry_is_double_word(e));
319				words = 2;
320
321				raw_run = SM2_RUN_DECODE(e);
322				vdev_id = SM2_VDEV_DECODE(e);
323
324				/* move to the second word */
325				i++;
326				e = buf[i];
327
328				ASSERT3P(i, <=, nwords);
329
330				type = SM2_TYPE_DECODE(e);
331				raw_offset = SM2_OFFSET_DECODE(e);
332			}
333
334			uint64_t entry_offset =
335			    (raw_offset << sm->sm_shift) + sm->sm_start;
336			uint64_t entry_run = raw_run << sm->sm_shift;
337
338			VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
339			VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
340			VERIFY3U(entry_offset, >=, sm->sm_start);
341			VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size);
342			VERIFY3U(entry_run, <=, sm->sm_size);
343			VERIFY3U(entry_offset + entry_run, <=,
344			    sm->sm_start + sm->sm_size);
345
346			space_map_entry_t sme = {
347			    .sme_type = type,
348			    .sme_vdev = vdev_id,
349			    .sme_offset = entry_offset,
350			    .sme_run = entry_run
351			};
352			error = callback(&sme, arg);
353			if (error != 0)
354				break;
355
356			if (type == SM_ALLOC)
357				sm->sm_phys->smp_alloc -= entry_run;
358			else
359				sm->sm_phys->smp_alloc += entry_run;
360			sm->sm_phys->smp_length -= words * sizeof (uint64_t);
361		}
362	}
363
364	if (space_map_length(sm) == 0) {
365		ASSERT0(error);
366		ASSERT0(space_map_allocated(sm));
367	}
368
369	zio_buf_free(buf, bufsz);
370	return (error);
371}
372
373typedef struct space_map_load_arg {
374	space_map_t	*smla_sm;
375	range_tree_t	*smla_rt;
376	maptype_t	smla_type;
377} space_map_load_arg_t;
378
379static int
380space_map_load_callback(space_map_entry_t *sme, void *arg)
381{
382	space_map_load_arg_t *smla = arg;
383	if (sme->sme_type == smla->smla_type) {
384		VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=,
385		    smla->smla_sm->sm_size);
386		range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run);
387	} else {
388		range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run);
389	}
390
391	return (0);
392}
393
394/*
395 * Load the spacemap into the rangetree, like space_map_load. But only
396 * read the first 'length' bytes of the spacemap.
397 */
398int
399space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
400    uint64_t length)
401{
402	space_map_load_arg_t smla;
403
404	VERIFY0(range_tree_space(rt));
405
406	if (maptype == SM_FREE)
407		range_tree_add(rt, sm->sm_start, sm->sm_size);
408
409	smla.smla_rt = rt;
410	smla.smla_sm = sm;
411	smla.smla_type = maptype;
412	int err = space_map_iterate(sm, length,
413	    space_map_load_callback, &smla);
414
415	if (err != 0)
416		range_tree_vacate(rt, NULL, NULL);
417
418	return (err);
419}
420
421/*
422 * Load the space map disk into the specified range tree. Segments of maptype
423 * are added to the range tree, other segment types are removed.
424 */
425int
426space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
427{
428	return (space_map_load_length(sm, rt, maptype, space_map_length(sm)));
429}
430
431void
432space_map_histogram_clear(space_map_t *sm)
433{
434	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
435		return;
436
437	bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
438}
439
440boolean_t
441space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
442{
443	/*
444	 * Verify that the in-core range tree does not have any
445	 * ranges smaller than our sm_shift size.
446	 */
447	for (int i = 0; i < sm->sm_shift; i++) {
448		if (rt->rt_histogram[i] != 0)
449			return (B_FALSE);
450	}
451	return (B_TRUE);
452}
453
454void
455space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
456{
457	int idx = 0;
458
459	ASSERT(dmu_tx_is_syncing(tx));
460	VERIFY3U(space_map_object(sm), !=, 0);
461
462	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
463		return;
464
465	dmu_buf_will_dirty(sm->sm_dbuf, tx);
466
467	ASSERT(space_map_histogram_verify(sm, rt));
468	/*
469	 * Transfer the content of the range tree histogram to the space
470	 * map histogram. The space map histogram contains 32 buckets ranging
471	 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
472	 * however, can represent ranges from 2^0 to 2^63. Since the space
473	 * map only cares about allocatable blocks (minimum of sm_shift) we
474	 * can safely ignore all ranges in the range tree smaller than sm_shift.
475	 */
476	for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
477
478		/*
479		 * Since the largest histogram bucket in the space map is
480		 * 2^(32+sm_shift-1), we need to normalize the values in
481		 * the range tree for any bucket larger than that size. For
482		 * example given an sm_shift of 9, ranges larger than 2^40
483		 * would get normalized as if they were 1TB ranges. Assume
484		 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
485		 * the calculation below would normalize this to 5 * 2^4 (16).
486		 */
487		ASSERT3U(i, >=, idx + sm->sm_shift);
488		sm->sm_phys->smp_histogram[idx] +=
489		    rt->rt_histogram[i] << (i - idx - sm->sm_shift);
490
491		/*
492		 * Increment the space map's index as long as we haven't
493		 * reached the maximum bucket size. Accumulate all ranges
494		 * larger than the max bucket size into the last bucket.
495		 */
496		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
497			ASSERT3U(idx + sm->sm_shift, ==, i);
498			idx++;
499			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
500		}
501	}
502}
503
504static void
505space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
506{
507	dmu_buf_will_dirty(sm->sm_dbuf, tx);
508
509	uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
510	    SM_DEBUG_ACTION_ENCODE(maptype) |
511	    SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) |
512	    SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
513
514	dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_length,
515	    sizeof (dentry), &dentry, tx);
516
517	sm->sm_phys->smp_length += sizeof (dentry);
518}
519
520/*
521 * Writes one or more entries given a segment.
522 *
523 * Note: The function may release the dbuf from the pointer initially
524 * passed to it, and return a different dbuf. Also, the space map's
525 * dbuf must be dirty for the changes in sm_phys to take effect.
526 */
527static void
528space_map_write_seg(space_map_t *sm, range_seg_t *rs, maptype_t maptype,
529    uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, void *tag, dmu_tx_t *tx)
530{
531	ASSERT3U(words, !=, 0);
532	ASSERT3U(words, <=, 2);
533
534	/* ensure the vdev_id can be represented by the space map */
535	ASSERT3U(vdev_id, <=, SM_NO_VDEVID);
536
537	/*
538	 * if this is a single word entry, ensure that no vdev was
539	 * specified.
540	 */
541	IMPLY(words == 1, vdev_id == SM_NO_VDEVID);
542
543	dmu_buf_t *db = *dbp;
544	ASSERT3U(db->db_size, ==, sm->sm_blksz);
545
546	uint64_t *block_base = db->db_data;
547	uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t));
548	uint64_t *block_cursor = block_base +
549	    (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
550
551	ASSERT3P(block_cursor, <=, block_end);
552
553	uint64_t size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
554	uint64_t start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
555	uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX;
556
557	ASSERT3U(rs->rs_start, >=, sm->sm_start);
558	ASSERT3U(rs->rs_start, <, sm->sm_start + sm->sm_size);
559	ASSERT3U(rs->rs_end - rs->rs_start, <=, sm->sm_size);
560	ASSERT3U(rs->rs_end, <=, sm->sm_start + sm->sm_size);
561
562	while (size != 0) {
563		ASSERT3P(block_cursor, <=, block_end);
564
565		/*
566		 * If we are at the end of this block, flush it and start
567		 * writing again from the beginning.
568		 */
569		if (block_cursor == block_end) {
570			dmu_buf_rele(db, tag);
571
572			uint64_t next_word_offset = sm->sm_phys->smp_length;
573			VERIFY0(dmu_buf_hold(sm->sm_os,
574			    space_map_object(sm), next_word_offset,
575			    tag, &db, DMU_READ_PREFETCH));
576			dmu_buf_will_dirty(db, tx);
577
578			/* update caller's dbuf */
579			*dbp = db;
580
581			ASSERT3U(db->db_size, ==, sm->sm_blksz);
582
583			block_base = db->db_data;
584			block_cursor = block_base;
585			block_end = block_base +
586			    (db->db_size / sizeof (uint64_t));
587		}
588
589		/*
590		 * If we are writing a two-word entry and we only have one
591		 * word left on this block, just pad it with an empty debug
592		 * entry and write the two-word entry in the next block.
593		 */
594		uint64_t *next_entry = block_cursor + 1;
595		if (next_entry == block_end && words > 1) {
596			ASSERT3U(words, ==, 2);
597			*block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
598			    SM_DEBUG_ACTION_ENCODE(0) |
599			    SM_DEBUG_SYNCPASS_ENCODE(0) |
600			    SM_DEBUG_TXG_ENCODE(0);
601			block_cursor++;
602			sm->sm_phys->smp_length += sizeof (uint64_t);
603			ASSERT3P(block_cursor, ==, block_end);
604			continue;
605		}
606
607		uint64_t run_len = MIN(size, run_max);
608		switch (words) {
609		case 1:
610			*block_cursor = SM_OFFSET_ENCODE(start) |
611			    SM_TYPE_ENCODE(maptype) |
612			    SM_RUN_ENCODE(run_len);
613			block_cursor++;
614			break;
615		case 2:
616			/* write the first word of the entry */
617			*block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) |
618			    SM2_RUN_ENCODE(run_len) |
619			    SM2_VDEV_ENCODE(vdev_id);
620			block_cursor++;
621
622			/* move on to the second word of the entry */
623			ASSERT3P(block_cursor, <, block_end);
624			*block_cursor = SM2_TYPE_ENCODE(maptype) |
625			    SM2_OFFSET_ENCODE(start);
626			block_cursor++;
627			break;
628		default:
629			panic("%d-word space map entries are not supported",
630			    words);
631			break;
632		}
633		sm->sm_phys->smp_length += words * sizeof (uint64_t);
634
635		start += run_len;
636		size -= run_len;
637	}
638	ASSERT0(size);
639
640}
641
642/*
643 * Note: The space map's dbuf must be dirty for the changes in sm_phys to
644 * take effect.
645 */
646static void
647space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
648    uint64_t vdev_id, dmu_tx_t *tx)
649{
650	spa_t *spa = tx->tx_pool->dp_spa;
651	dmu_buf_t *db;
652
653	space_map_write_intro_debug(sm, maptype, tx);
654
655#ifdef DEBUG
656	/*
657	 * We do this right after we write the intro debug entry
658	 * because the estimate does not take it into account.
659	 */
660	uint64_t initial_objsize = sm->sm_phys->smp_length;
661	uint64_t estimated_growth =
662	    space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID);
663	uint64_t estimated_final_objsize = initial_objsize + estimated_growth;
664#endif
665
666	/*
667	 * Find the offset right after the last word in the space map
668	 * and use that to get a hold of the last block, so we can
669	 * start appending to it.
670	 */
671	uint64_t next_word_offset = sm->sm_phys->smp_length;
672	VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm),
673	    next_word_offset, FTAG, &db, DMU_READ_PREFETCH));
674	ASSERT3U(db->db_size, ==, sm->sm_blksz);
675
676	dmu_buf_will_dirty(db, tx);
677
678	avl_tree_t *t = &rt->rt_root;
679	for (range_seg_t *rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
680		uint64_t offset = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
681		uint64_t length = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
682		uint8_t words = 1;
683
684		/*
685		 * We only write two-word entries when both of the following
686		 * are true:
687		 *
688		 * [1] The feature is enabled.
689		 * [2] The offset or run is too big for a single-word entry,
690		 *	or the vdev_id is set (meaning not equal to
691		 *	SM_NO_VDEVID).
692		 *
693		 * Note that for purposes of testing we've added the case that
694		 * we write two-word entries occasionally when the feature is
695		 * enabled and zfs_force_some_double_word_sm_entries has been
696		 * set.
697		 */
698		if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) &&
699		    (offset >= (1ULL << SM_OFFSET_BITS) ||
700		    length > SM_RUN_MAX ||
701		    vdev_id != SM_NO_VDEVID ||
702		    (zfs_force_some_double_word_sm_entries &&
703		    spa_get_random(100) == 0)))
704			words = 2;
705
706		space_map_write_seg(sm, rs, maptype, vdev_id, words,
707		    &db, FTAG, tx);
708	}
709
710	dmu_buf_rele(db, FTAG);
711
712#ifdef DEBUG
713	/*
714	 * We expect our estimation to be based on the worst case
715	 * scenario [see comment in space_map_estimate_optimal_size()].
716	 * Therefore we expect the actual objsize to be equal or less
717	 * than whatever we estimated it to be.
718	 */
719	ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_length);
720#endif
721}
722
723/*
724 * Note: This function manipulates the state of the given space map but
725 * does not hold any locks implicitly. Thus the caller is responsible
726 * for synchronizing writes to the space map.
727 */
728void
729space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
730    uint64_t vdev_id, dmu_tx_t *tx)
731{
732	objset_t *os = sm->sm_os;
733
734	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
735	VERIFY3U(space_map_object(sm), !=, 0);
736
737	dmu_buf_will_dirty(sm->sm_dbuf, tx);
738
739	/*
740	 * This field is no longer necessary since the in-core space map
741	 * now contains the object number but is maintained for backwards
742	 * compatibility.
743	 */
744	sm->sm_phys->smp_object = sm->sm_object;
745
746	if (range_tree_is_empty(rt)) {
747		VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
748		return;
749	}
750
751	if (maptype == SM_ALLOC)
752		sm->sm_phys->smp_alloc += range_tree_space(rt);
753	else
754		sm->sm_phys->smp_alloc -= range_tree_space(rt);
755
756	uint64_t nodes = avl_numnodes(&rt->rt_root);
757	uint64_t rt_space = range_tree_space(rt);
758
759	space_map_write_impl(sm, rt, maptype, vdev_id, tx);
760
761	/*
762	 * Ensure that the space_map's accounting wasn't changed
763	 * while we were in the middle of writing it out.
764	 */
765	VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
766	VERIFY3U(range_tree_space(rt), ==, rt_space);
767}
768
769static int
770space_map_open_impl(space_map_t *sm)
771{
772	int error;
773	u_longlong_t blocks;
774
775	error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
776	if (error)
777		return (error);
778
779	dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
780	sm->sm_phys = sm->sm_dbuf->db_data;
781	return (0);
782}
783
784int
785space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
786    uint64_t start, uint64_t size, uint8_t shift)
787{
788	space_map_t *sm;
789	int error;
790
791	ASSERT(*smp == NULL);
792	ASSERT(os != NULL);
793	ASSERT(object != 0);
794
795	sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
796
797	sm->sm_start = start;
798	sm->sm_size = size;
799	sm->sm_shift = shift;
800	sm->sm_os = os;
801	sm->sm_object = object;
802
803	error = space_map_open_impl(sm);
804	if (error != 0) {
805		space_map_close(sm);
806		return (error);
807	}
808	*smp = sm;
809
810	return (0);
811}
812
813void
814space_map_close(space_map_t *sm)
815{
816	if (sm == NULL)
817		return;
818
819	if (sm->sm_dbuf != NULL)
820		dmu_buf_rele(sm->sm_dbuf, sm);
821	sm->sm_dbuf = NULL;
822	sm->sm_phys = NULL;
823
824	kmem_free(sm, sizeof (*sm));
825}
826
827void
828space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
829{
830	objset_t *os = sm->sm_os;
831	spa_t *spa = dmu_objset_spa(os);
832	dmu_object_info_t doi;
833
834	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
835	ASSERT(dmu_tx_is_syncing(tx));
836	VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
837
838	dmu_object_info_from_db(sm->sm_dbuf, &doi);
839
840	/*
841	 * If the space map has the wrong bonus size (because
842	 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
843	 * the wrong block size (because space_map_blksz has changed),
844	 * free and re-allocate its object with the updated sizes.
845	 *
846	 * Otherwise, just truncate the current object.
847	 */
848	if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
849	    doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
850	    doi.doi_data_block_size != blocksize ||
851	    doi.doi_metadata_block_size != 1 << space_map_ibs) {
852		zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
853		    "object[%llu]: old bonus %u, old blocksz %u",
854		    dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
855		    doi.doi_bonus_size, doi.doi_data_block_size);
856
857		space_map_free(sm, tx);
858		dmu_buf_rele(sm->sm_dbuf, sm);
859
860		sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
861		VERIFY0(space_map_open_impl(sm));
862	} else {
863		VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
864
865		/*
866		 * If the spacemap is reallocated, its histogram
867		 * will be reset.  Do the same in the common case so that
868		 * bugs related to the uncommon case do not go unnoticed.
869		 */
870		bzero(sm->sm_phys->smp_histogram,
871		    sizeof (sm->sm_phys->smp_histogram));
872	}
873
874	dmu_buf_will_dirty(sm->sm_dbuf, tx);
875	sm->sm_phys->smp_length = 0;
876	sm->sm_phys->smp_alloc = 0;
877}
878
879uint64_t
880space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
881{
882	spa_t *spa = dmu_objset_spa(os);
883	uint64_t object;
884	int bonuslen;
885
886	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
887		spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
888		bonuslen = sizeof (space_map_phys_t);
889		ASSERT3U(bonuslen, <=, dmu_bonus_max());
890	} else {
891		bonuslen = SPACE_MAP_SIZE_V0;
892	}
893
894	object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize,
895	    space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
896
897	return (object);
898}
899
900void
901space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
902{
903	spa_t *spa = dmu_objset_spa(os);
904	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
905		dmu_object_info_t doi;
906
907		VERIFY0(dmu_object_info(os, smobj, &doi));
908		if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
909			spa_feature_decr(spa,
910			    SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
911		}
912	}
913
914	VERIFY0(dmu_object_free(os, smobj, tx));
915}
916
917void
918space_map_free(space_map_t *sm, dmu_tx_t *tx)
919{
920	if (sm == NULL)
921		return;
922
923	space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
924	sm->sm_object = 0;
925}
926
927/*
928 * Given a range tree, it makes a worst-case estimate of how much
929 * space would the tree's segments take if they were written to
930 * the given space map.
931 */
932uint64_t
933space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
934    uint64_t vdev_id)
935{
936	spa_t *spa = dmu_objset_spa(sm->sm_os);
937	uint64_t shift = sm->sm_shift;
938	uint64_t *histogram = rt->rt_histogram;
939	uint64_t entries_for_seg = 0;
940
941	/*
942	 * In order to get a quick estimate of the optimal size that this
943	 * range tree would have on-disk as a space map, we iterate through
944	 * its histogram buckets instead of iterating through its nodes.
945	 *
946	 * Note that this is a highest-bound/worst-case estimate for the
947	 * following reasons:
948	 *
949	 * 1] We assume that we always add a debug padding for each block
950	 *    we write and we also assume that we start at the last word
951	 *    of a block attempting to write a two-word entry.
952	 * 2] Rounding up errors due to the way segments are distributed
953	 *    in the buckets of the range tree's histogram.
954	 * 3] The activation of zfs_force_some_double_word_sm_entries
955	 *    (tunable) when testing.
956	 *
957	 * = Math and Rounding Errors =
958	 *
959	 * rt_histogram[i] bucket of a range tree represents the number
960	 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
961	 * that, we want to divide the buckets into groups: Buckets that
962	 * can be represented using a single-word entry, ones that can
963	 * be represented with a double-word entry, and ones that can
964	 * only be represented with multiple two-word entries.
965	 *
966	 * [Note that if the new encoding feature is not enabled there
967	 * are only two groups: single-word entry buckets and multiple
968	 * single-word entry buckets. The information below assumes
969	 * two-word entries enabled, but it can easily applied when
970	 * the feature is not enabled]
971	 *
972	 * To find the highest bucket that can be represented with a
973	 * single-word entry we look at the maximum run that such entry
974	 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
975	 * the run of a space map entry is shifted by sm_shift, thus we
976	 * add it to the exponent]. This way, excluding the value of the
977	 * maximum run that can be represented by a single-word entry,
978	 * all runs that are smaller exist in buckets 0 to
979	 * SM_RUN_BITS + shift - 1.
980	 *
981	 * To find the highest bucket that can be represented with a
982	 * double-word entry, we follow the same approach. Finally, any
983	 * bucket higher than that are represented with multiple two-word
984	 * entries. To be more specific, if the highest bucket whose
985	 * segments can be represented with a single two-word entry is X,
986	 * then bucket X+1 will need 2 two-word entries for each of its
987	 * segments, X+2 will need 4, X+3 will need 8, ...etc.
988	 *
989	 * With all of the above we make our estimation based on bucket
990	 * groups. There is a rounding error though. As we mentioned in
991	 * the example with the one-word entry, the maximum run that can
992	 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
993	 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
994	 * that length fall into the next bucket (and bucket group) where
995	 * we start counting two-word entries and this is one more reason
996	 * why the estimated size may end up being bigger than the actual
997	 * size written.
998	 */
999	uint64_t size = 0;
1000	uint64_t idx = 0;
1001
1002	if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) ||
1003	    (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) {
1004
1005		/*
1006		 * If we are trying to force some double word entries just
1007		 * assume the worst-case of every single word entry being
1008		 * written as a double word entry.
1009		 */
1010		uint64_t entry_size =
1011		    (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) &&
1012		    zfs_force_some_double_word_sm_entries) ?
1013		    (2 * sizeof (uint64_t)) : sizeof (uint64_t);
1014
1015		uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1;
1016		for (; idx <= single_entry_max_bucket; idx++)
1017			size += histogram[idx] * entry_size;
1018
1019		if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
1020			for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1021				ASSERT3U(idx, >=, single_entry_max_bucket);
1022				entries_for_seg =
1023				    1ULL << (idx - single_entry_max_bucket);
1024				size += histogram[idx] *
1025				    entries_for_seg * entry_size;
1026			}
1027			return (size);
1028		}
1029	}
1030
1031	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2));
1032
1033	uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1;
1034	for (; idx <= double_entry_max_bucket; idx++)
1035		size += histogram[idx] * 2 * sizeof (uint64_t);
1036
1037	for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1038		ASSERT3U(idx, >=, double_entry_max_bucket);
1039		entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
1040		size += histogram[idx] *
1041		    entries_for_seg * 2 * sizeof (uint64_t);
1042	}
1043
1044	/*
1045	 * Assume the worst case where we start with the padding at the end
1046	 * of the current block and we add an extra padding entry at the end
1047	 * of all subsequent blocks.
1048	 */
1049	size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t);
1050
1051	return (size);
1052}
1053
1054uint64_t
1055space_map_object(space_map_t *sm)
1056{
1057	return (sm != NULL ? sm->sm_object : 0);
1058}
1059
1060int64_t
1061space_map_allocated(space_map_t *sm)
1062{
1063	return (sm != NULL ? sm->sm_phys->smp_alloc : 0);
1064}
1065
1066uint64_t
1067space_map_length(space_map_t *sm)
1068{
1069	return (sm != NULL ? sm->sm_phys->smp_length : 0);
1070}
1071
1072uint64_t
1073space_map_nblocks(space_map_t *sm)
1074{
1075	if (sm == NULL)
1076		return (0);
1077	return (DIV_ROUND_UP(space_map_length(sm), sm->sm_blksz));
1078}
1079