space_map.c revision 17f11284b49b98353b5119463254074fd9bc0a28
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25/*
26 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/spa.h>
31#include <sys/dmu.h>
32#include <sys/dmu_tx.h>
33#include <sys/dnode.h>
34#include <sys/dsl_pool.h>
35#include <sys/zio.h>
36#include <sys/space_map.h>
37#include <sys/refcount.h>
38#include <sys/zfeature.h>
39
40/*
41 * Note on space map block size:
42 *
43 * The data for a given space map can be kept on blocks of any size.
44 * Larger blocks entail fewer I/O operations, but they also cause the
45 * DMU to keep more data in-core, and also to waste more I/O bandwidth
46 * when only a few blocks have changed since the last transaction group.
47 */
48
49/*
50 * Enabled whenever we want to stress test the use of double-word
51 * space map entries.
52 */
53boolean_t zfs_force_some_double_word_sm_entries = B_FALSE;
54
55boolean_t
56sm_entry_is_debug(uint64_t e)
57{
58	return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX);
59}
60
61boolean_t
62sm_entry_is_single_word(uint64_t e)
63{
64	uint8_t prefix = SM_PREFIX_DECODE(e);
65	return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX);
66}
67
68boolean_t
69sm_entry_is_double_word(uint64_t e)
70{
71	return (SM_PREFIX_DECODE(e) == SM2_PREFIX);
72}
73
74/*
75 * Iterate through the space map, invoking the callback on each (non-debug)
76 * space map entry.
77 */
78int
79space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg)
80{
81	uint64_t sm_len = space_map_length(sm);
82	ASSERT3U(sm->sm_blksz, !=, 0);
83
84	dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, sm_len,
85	    ZIO_PRIORITY_SYNC_READ);
86
87	uint64_t blksz = sm->sm_blksz;
88	int error = 0;
89	for (uint64_t block_base = 0; block_base < sm_len && error == 0;
90	    block_base += blksz) {
91		dmu_buf_t *db;
92		error = dmu_buf_hold(sm->sm_os, space_map_object(sm),
93		    block_base, FTAG, &db, DMU_READ_PREFETCH);
94		if (error != 0)
95			return (error);
96
97		uint64_t *block_start = db->db_data;
98		uint64_t block_length = MIN(sm_len - block_base, blksz);
99		uint64_t *block_end = block_start +
100		    (block_length / sizeof (uint64_t));
101
102		VERIFY0(P2PHASE(block_length, sizeof (uint64_t)));
103		VERIFY3U(block_length, !=, 0);
104		ASSERT3U(blksz, ==, db->db_size);
105
106		for (uint64_t *block_cursor = block_start;
107		    block_cursor < block_end && error == 0; block_cursor++) {
108			uint64_t e = *block_cursor;
109
110			if (sm_entry_is_debug(e)) /* Skip debug entries */
111				continue;
112
113			uint64_t raw_offset, raw_run, vdev_id;
114			maptype_t type;
115			if (sm_entry_is_single_word(e)) {
116				type = SM_TYPE_DECODE(e);
117				vdev_id = SM_NO_VDEVID;
118				raw_offset = SM_OFFSET_DECODE(e);
119				raw_run = SM_RUN_DECODE(e);
120			} else {
121				/* it is a two-word entry */
122				ASSERT(sm_entry_is_double_word(e));
123				raw_run = SM2_RUN_DECODE(e);
124				vdev_id = SM2_VDEV_DECODE(e);
125
126				/* move on to the second word */
127				block_cursor++;
128				e = *block_cursor;
129				VERIFY3P(block_cursor, <=, block_end);
130
131				type = SM2_TYPE_DECODE(e);
132				raw_offset = SM2_OFFSET_DECODE(e);
133			}
134
135			uint64_t entry_offset = (raw_offset << sm->sm_shift) +
136			    sm->sm_start;
137			uint64_t entry_run = raw_run << sm->sm_shift;
138
139			VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
140			VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
141			ASSERT3U(entry_offset, >=, sm->sm_start);
142			ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size);
143			ASSERT3U(entry_run, <=, sm->sm_size);
144			ASSERT3U(entry_offset + entry_run, <=,
145			    sm->sm_start + sm->sm_size);
146
147			space_map_entry_t sme = {
148			    .sme_type = type,
149			    .sme_vdev = vdev_id,
150			    .sme_offset = entry_offset,
151			    .sme_run = entry_run
152			};
153			error = callback(&sme, arg);
154		}
155		dmu_buf_rele(db, FTAG);
156	}
157	return (error);
158}
159
160/*
161 * Reads the entries from the last block of the space map into
162 * buf in reverse order. Populates nwords with number of words
163 * in the last block.
164 *
165 * Refer to block comment within space_map_incremental_destroy()
166 * to understand why this function is needed.
167 */
168static int
169space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf,
170    uint64_t bufsz, uint64_t *nwords)
171{
172	int error = 0;
173	dmu_buf_t *db;
174
175	/*
176	 * Find the offset of the last word in the space map and use
177	 * that to read the last block of the space map with
178	 * dmu_buf_hold().
179	 */
180	uint64_t last_word_offset =
181	    sm->sm_phys->smp_objsize - sizeof (uint64_t);
182	error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset,
183	    FTAG, &db, DMU_READ_NO_PREFETCH);
184	if (error != 0)
185		return (error);
186
187	ASSERT3U(sm->sm_object, ==, db->db_object);
188	ASSERT3U(sm->sm_blksz, ==, db->db_size);
189	ASSERT3U(bufsz, >=, db->db_size);
190	ASSERT(nwords != NULL);
191
192	uint64_t *words = db->db_data;
193	*nwords =
194	    (sm->sm_phys->smp_objsize - db->db_offset) / sizeof (uint64_t);
195
196	ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t));
197
198	uint64_t n = *nwords;
199	uint64_t j = n - 1;
200	for (uint64_t i = 0; i < n; i++) {
201		uint64_t entry = words[i];
202		if (sm_entry_is_double_word(entry)) {
203			/*
204			 * Since we are populating the buffer backwards
205			 * we have to be extra careful and add the two
206			 * words of the double-word entry in the right
207			 * order.
208			 */
209			ASSERT3U(j, >, 0);
210			buf[j - 1] = entry;
211
212			i++;
213			ASSERT3U(i, <, n);
214			entry = words[i];
215			buf[j] = entry;
216			j -= 2;
217		} else {
218			ASSERT(sm_entry_is_debug(entry) ||
219			    sm_entry_is_single_word(entry));
220			buf[j] = entry;
221			j--;
222		}
223	}
224
225	/*
226	 * Assert that we wrote backwards all the
227	 * way to the beginning of the buffer.
228	 */
229	ASSERT3S(j, ==, -1);
230
231	dmu_buf_rele(db, FTAG);
232	return (error);
233}
234
235/*
236 * Note: This function performs destructive actions - specifically
237 * it deletes entries from the end of the space map. Thus, callers
238 * should ensure that they are holding the appropriate locks for
239 * the space map that they provide.
240 */
241int
242space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
243    dmu_tx_t *tx)
244{
245	uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
246	uint64_t *buf = zio_buf_alloc(bufsz);
247
248	dmu_buf_will_dirty(sm->sm_dbuf, tx);
249
250	/*
251	 * Ideally we would want to iterate from the beginning of the
252	 * space map to the end in incremental steps. The issue with this
253	 * approach is that we don't have any field on-disk that points
254	 * us where to start between each step. We could try zeroing out
255	 * entries that we've destroyed, but this doesn't work either as
256	 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
257	 *
258	 * As a result, we destroy its entries incrementally starting from
259	 * the end after applying the callback to each of them.
260	 *
261	 * The problem with this approach is that we cannot literally
262	 * iterate through the words in the space map backwards as we
263	 * can't distinguish two-word space map entries from their second
264	 * word. Thus we do the following:
265	 *
266	 * 1] We get all the entries from the last block of the space map
267	 *    and put them into a buffer in reverse order. This way the
268	 *    last entry comes first in the buffer, the second to last is
269	 *    second, etc.
270	 * 2] We iterate through the entries in the buffer and we apply
271	 *    the callback to each one. As we move from entry to entry we
272	 *    we decrease the size of the space map, deleting effectively
273	 *    each entry.
274	 * 3] If there are no more entries in the space map or the callback
275	 *    returns a value other than 0, we stop iterating over the
276	 *    space map. If there are entries remaining and the callback
277	 *    returned 0, we go back to step [1].
278	 */
279	int error = 0;
280	while (space_map_length(sm) > 0 && error == 0) {
281		uint64_t nwords = 0;
282		error = space_map_reversed_last_block_entries(sm, buf, bufsz,
283		    &nwords);
284		if (error != 0)
285			break;
286
287		ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t));
288
289		for (uint64_t i = 0; i < nwords; i++) {
290			uint64_t e = buf[i];
291
292			if (sm_entry_is_debug(e)) {
293				sm->sm_phys->smp_objsize -= sizeof (uint64_t);
294				space_map_update(sm);
295				continue;
296			}
297
298			int words = 1;
299			uint64_t raw_offset, raw_run, vdev_id;
300			maptype_t type;
301			if (sm_entry_is_single_word(e)) {
302				type = SM_TYPE_DECODE(e);
303				vdev_id = SM_NO_VDEVID;
304				raw_offset = SM_OFFSET_DECODE(e);
305				raw_run = SM_RUN_DECODE(e);
306			} else {
307				ASSERT(sm_entry_is_double_word(e));
308				words = 2;
309
310				raw_run = SM2_RUN_DECODE(e);
311				vdev_id = SM2_VDEV_DECODE(e);
312
313				/* move to the second word */
314				i++;
315				e = buf[i];
316
317				ASSERT3P(i, <=, nwords);
318
319				type = SM2_TYPE_DECODE(e);
320				raw_offset = SM2_OFFSET_DECODE(e);
321			}
322
323			uint64_t entry_offset =
324			    (raw_offset << sm->sm_shift) + sm->sm_start;
325			uint64_t entry_run = raw_run << sm->sm_shift;
326
327			VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
328			VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
329			VERIFY3U(entry_offset, >=, sm->sm_start);
330			VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size);
331			VERIFY3U(entry_run, <=, sm->sm_size);
332			VERIFY3U(entry_offset + entry_run, <=,
333			    sm->sm_start + sm->sm_size);
334
335			space_map_entry_t sme = {
336			    .sme_type = type,
337			    .sme_vdev = vdev_id,
338			    .sme_offset = entry_offset,
339			    .sme_run = entry_run
340			};
341			error = callback(&sme, arg);
342			if (error != 0)
343				break;
344
345			if (type == SM_ALLOC)
346				sm->sm_phys->smp_alloc -= entry_run;
347			else
348				sm->sm_phys->smp_alloc += entry_run;
349			sm->sm_phys->smp_objsize -= words * sizeof (uint64_t);
350			space_map_update(sm);
351		}
352	}
353
354	if (space_map_length(sm) == 0) {
355		ASSERT0(error);
356		ASSERT0(sm->sm_phys->smp_objsize);
357		ASSERT0(sm->sm_alloc);
358	}
359
360	zio_buf_free(buf, bufsz);
361	return (error);
362}
363
364typedef struct space_map_load_arg {
365	space_map_t	*smla_sm;
366	range_tree_t	*smla_rt;
367	maptype_t	smla_type;
368} space_map_load_arg_t;
369
370static int
371space_map_load_callback(space_map_entry_t *sme, void *arg)
372{
373	space_map_load_arg_t *smla = arg;
374	if (sme->sme_type == smla->smla_type) {
375		VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=,
376		    smla->smla_sm->sm_size);
377		range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run);
378	} else {
379		range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run);
380	}
381
382	return (0);
383}
384
385/*
386 * Load the space map disk into the specified range tree. Segments of maptype
387 * are added to the range tree, other segment types are removed.
388 */
389int
390space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
391{
392	uint64_t space;
393	int err;
394	space_map_load_arg_t smla;
395
396	VERIFY0(range_tree_space(rt));
397	space = space_map_allocated(sm);
398
399	if (maptype == SM_FREE) {
400		range_tree_add(rt, sm->sm_start, sm->sm_size);
401		space = sm->sm_size - space;
402	}
403
404	smla.smla_rt = rt;
405	smla.smla_sm = sm;
406	smla.smla_type = maptype;
407	err = space_map_iterate(sm, space_map_load_callback, &smla);
408
409	if (err == 0) {
410		VERIFY3U(range_tree_space(rt), ==, space);
411	} else {
412		range_tree_vacate(rt, NULL, NULL);
413	}
414
415	return (err);
416}
417
418void
419space_map_histogram_clear(space_map_t *sm)
420{
421	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
422		return;
423
424	bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
425}
426
427boolean_t
428space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
429{
430	/*
431	 * Verify that the in-core range tree does not have any
432	 * ranges smaller than our sm_shift size.
433	 */
434	for (int i = 0; i < sm->sm_shift; i++) {
435		if (rt->rt_histogram[i] != 0)
436			return (B_FALSE);
437	}
438	return (B_TRUE);
439}
440
441void
442space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
443{
444	int idx = 0;
445
446	ASSERT(dmu_tx_is_syncing(tx));
447	VERIFY3U(space_map_object(sm), !=, 0);
448
449	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
450		return;
451
452	dmu_buf_will_dirty(sm->sm_dbuf, tx);
453
454	ASSERT(space_map_histogram_verify(sm, rt));
455	/*
456	 * Transfer the content of the range tree histogram to the space
457	 * map histogram. The space map histogram contains 32 buckets ranging
458	 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
459	 * however, can represent ranges from 2^0 to 2^63. Since the space
460	 * map only cares about allocatable blocks (minimum of sm_shift) we
461	 * can safely ignore all ranges in the range tree smaller than sm_shift.
462	 */
463	for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
464
465		/*
466		 * Since the largest histogram bucket in the space map is
467		 * 2^(32+sm_shift-1), we need to normalize the values in
468		 * the range tree for any bucket larger than that size. For
469		 * example given an sm_shift of 9, ranges larger than 2^40
470		 * would get normalized as if they were 1TB ranges. Assume
471		 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
472		 * the calculation below would normalize this to 5 * 2^4 (16).
473		 */
474		ASSERT3U(i, >=, idx + sm->sm_shift);
475		sm->sm_phys->smp_histogram[idx] +=
476		    rt->rt_histogram[i] << (i - idx - sm->sm_shift);
477
478		/*
479		 * Increment the space map's index as long as we haven't
480		 * reached the maximum bucket size. Accumulate all ranges
481		 * larger than the max bucket size into the last bucket.
482		 */
483		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
484			ASSERT3U(idx + sm->sm_shift, ==, i);
485			idx++;
486			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
487		}
488	}
489}
490
491static void
492space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
493{
494	dmu_buf_will_dirty(sm->sm_dbuf, tx);
495
496	uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
497	    SM_DEBUG_ACTION_ENCODE(maptype) |
498	    SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) |
499	    SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
500
501	dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_objsize,
502	    sizeof (dentry), &dentry, tx);
503
504	sm->sm_phys->smp_objsize += sizeof (dentry);
505}
506
507/*
508 * Writes one or more entries given a segment.
509 *
510 * Note: The function may release the dbuf from the pointer initially
511 * passed to it, and return a different dbuf. Also, the space map's
512 * dbuf must be dirty for the changes in sm_phys to take effect.
513 */
514static void
515space_map_write_seg(space_map_t *sm, range_seg_t *rs, maptype_t maptype,
516    uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, void *tag, dmu_tx_t *tx)
517{
518	ASSERT3U(words, !=, 0);
519	ASSERT3U(words, <=, 2);
520
521	/* ensure the vdev_id can be represented by the space map */
522	ASSERT3U(vdev_id, <=, SM_NO_VDEVID);
523
524	/*
525	 * if this is a single word entry, ensure that no vdev was
526	 * specified.
527	 */
528	IMPLY(words == 1, vdev_id == SM_NO_VDEVID);
529
530	dmu_buf_t *db = *dbp;
531	ASSERT3U(db->db_size, ==, sm->sm_blksz);
532
533	uint64_t *block_base = db->db_data;
534	uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t));
535	uint64_t *block_cursor = block_base +
536	    (sm->sm_phys->smp_objsize - db->db_offset) / sizeof (uint64_t);
537
538	ASSERT3P(block_cursor, <=, block_end);
539
540	uint64_t size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
541	uint64_t start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
542	uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX;
543
544	ASSERT3U(rs->rs_start, >=, sm->sm_start);
545	ASSERT3U(rs->rs_start, <, sm->sm_start + sm->sm_size);
546	ASSERT3U(rs->rs_end - rs->rs_start, <=, sm->sm_size);
547	ASSERT3U(rs->rs_end, <=, sm->sm_start + sm->sm_size);
548
549	while (size != 0) {
550		ASSERT3P(block_cursor, <=, block_end);
551
552		/*
553		 * If we are at the end of this block, flush it and start
554		 * writing again from the beginning.
555		 */
556		if (block_cursor == block_end) {
557			dmu_buf_rele(db, tag);
558
559			uint64_t next_word_offset = sm->sm_phys->smp_objsize;
560			VERIFY0(dmu_buf_hold(sm->sm_os,
561			    space_map_object(sm), next_word_offset,
562			    tag, &db, DMU_READ_PREFETCH));
563			dmu_buf_will_dirty(db, tx);
564
565			/* update caller's dbuf */
566			*dbp = db;
567
568			ASSERT3U(db->db_size, ==, sm->sm_blksz);
569
570			block_base = db->db_data;
571			block_cursor = block_base;
572			block_end = block_base +
573			    (db->db_size / sizeof (uint64_t));
574		}
575
576		/*
577		 * If we are writing a two-word entry and we only have one
578		 * word left on this block, just pad it with an empty debug
579		 * entry and write the two-word entry in the next block.
580		 */
581		uint64_t *next_entry = block_cursor + 1;
582		if (next_entry == block_end && words > 1) {
583			ASSERT3U(words, ==, 2);
584			*block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
585			    SM_DEBUG_ACTION_ENCODE(0) |
586			    SM_DEBUG_SYNCPASS_ENCODE(0) |
587			    SM_DEBUG_TXG_ENCODE(0);
588			block_cursor++;
589			sm->sm_phys->smp_objsize += sizeof (uint64_t);
590			ASSERT3P(block_cursor, ==, block_end);
591			continue;
592		}
593
594		uint64_t run_len = MIN(size, run_max);
595		switch (words) {
596		case 1:
597			*block_cursor = SM_OFFSET_ENCODE(start) |
598			    SM_TYPE_ENCODE(maptype) |
599			    SM_RUN_ENCODE(run_len);
600			block_cursor++;
601			break;
602		case 2:
603			/* write the first word of the entry */
604			*block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) |
605			    SM2_RUN_ENCODE(run_len) |
606			    SM2_VDEV_ENCODE(vdev_id);
607			block_cursor++;
608
609			/* move on to the second word of the entry */
610			ASSERT3P(block_cursor, <, block_end);
611			*block_cursor = SM2_TYPE_ENCODE(maptype) |
612			    SM2_OFFSET_ENCODE(start);
613			block_cursor++;
614			break;
615		default:
616			panic("%d-word space map entries are not supported",
617			    words);
618			break;
619		}
620		sm->sm_phys->smp_objsize += words * sizeof (uint64_t);
621
622		start += run_len;
623		size -= run_len;
624	}
625	ASSERT0(size);
626
627}
628
629/*
630 * Note: The space map's dbuf must be dirty for the changes in sm_phys to
631 * take effect.
632 */
633static void
634space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
635    uint64_t vdev_id, dmu_tx_t *tx)
636{
637	spa_t *spa = tx->tx_pool->dp_spa;
638	dmu_buf_t *db;
639
640	space_map_write_intro_debug(sm, maptype, tx);
641
642#ifdef DEBUG
643	/*
644	 * We do this right after we write the intro debug entry
645	 * because the estimate does not take it into account.
646	 */
647	uint64_t initial_objsize = sm->sm_phys->smp_objsize;
648	uint64_t estimated_growth =
649	    space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID);
650	uint64_t estimated_final_objsize = initial_objsize + estimated_growth;
651#endif
652
653	/*
654	 * Find the offset right after the last word in the space map
655	 * and use that to get a hold of the last block, so we can
656	 * start appending to it.
657	 */
658	uint64_t next_word_offset = sm->sm_phys->smp_objsize;
659	VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm),
660	    next_word_offset, FTAG, &db, DMU_READ_PREFETCH));
661	ASSERT3U(db->db_size, ==, sm->sm_blksz);
662
663	dmu_buf_will_dirty(db, tx);
664
665	avl_tree_t *t = &rt->rt_root;
666	for (range_seg_t *rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
667		uint64_t offset = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
668		uint64_t length = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
669		uint8_t words = 1;
670
671		/*
672		 * We only write two-word entries when both of the following
673		 * are true:
674		 *
675		 * [1] The feature is enabled.
676		 * [2] The offset or run is too big for a single-word entry,
677		 * 	or the vdev_id is set (meaning not equal to
678		 * 	SM_NO_VDEVID).
679		 *
680		 * Note that for purposes of testing we've added the case that
681		 * we write two-word entries occasionally when the feature is
682		 * enabled and zfs_force_some_double_word_sm_entries has been
683		 * set.
684		 */
685		if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) &&
686		    (offset >= (1ULL << SM_OFFSET_BITS) ||
687		    length > SM_RUN_MAX ||
688		    vdev_id != SM_NO_VDEVID ||
689		    (zfs_force_some_double_word_sm_entries &&
690		    spa_get_random(100) == 0)))
691			words = 2;
692
693		space_map_write_seg(sm, rs, maptype, vdev_id, words,
694		    &db, FTAG, tx);
695	}
696
697	dmu_buf_rele(db, FTAG);
698
699#ifdef DEBUG
700	/*
701	 * We expect our estimation to be based on the worst case
702	 * scenario [see comment in space_map_estimate_optimal_size()].
703	 * Therefore we expect the actual objsize to be equal or less
704	 * than whatever we estimated it to be.
705	 */
706	ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_objsize);
707#endif
708}
709
710/*
711 * Note: This function manipulates the state of the given space map but
712 * does not hold any locks implicitly. Thus the caller is responsible
713 * for synchronizing writes to the space map.
714 */
715void
716space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
717    uint64_t vdev_id, dmu_tx_t *tx)
718{
719	objset_t *os = sm->sm_os;
720
721	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
722	VERIFY3U(space_map_object(sm), !=, 0);
723
724	dmu_buf_will_dirty(sm->sm_dbuf, tx);
725
726	/*
727	 * This field is no longer necessary since the in-core space map
728	 * now contains the object number but is maintained for backwards
729	 * compatibility.
730	 */
731	sm->sm_phys->smp_object = sm->sm_object;
732
733	if (range_tree_is_empty(rt)) {
734		VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
735		return;
736	}
737
738	if (maptype == SM_ALLOC)
739		sm->sm_phys->smp_alloc += range_tree_space(rt);
740	else
741		sm->sm_phys->smp_alloc -= range_tree_space(rt);
742
743	uint64_t nodes = avl_numnodes(&rt->rt_root);
744	uint64_t rt_space = range_tree_space(rt);
745
746	space_map_write_impl(sm, rt, maptype, vdev_id, tx);
747
748	/*
749	 * Ensure that the space_map's accounting wasn't changed
750	 * while we were in the middle of writing it out.
751	 */
752	VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
753	VERIFY3U(range_tree_space(rt), ==, rt_space);
754}
755
756static int
757space_map_open_impl(space_map_t *sm)
758{
759	int error;
760	u_longlong_t blocks;
761
762	error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
763	if (error)
764		return (error);
765
766	dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
767	sm->sm_phys = sm->sm_dbuf->db_data;
768	return (0);
769}
770
771int
772space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
773    uint64_t start, uint64_t size, uint8_t shift)
774{
775	space_map_t *sm;
776	int error;
777
778	ASSERT(*smp == NULL);
779	ASSERT(os != NULL);
780	ASSERT(object != 0);
781
782	sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
783
784	sm->sm_start = start;
785	sm->sm_size = size;
786	sm->sm_shift = shift;
787	sm->sm_os = os;
788	sm->sm_object = object;
789
790	error = space_map_open_impl(sm);
791	if (error != 0) {
792		space_map_close(sm);
793		return (error);
794	}
795	*smp = sm;
796
797	return (0);
798}
799
800void
801space_map_close(space_map_t *sm)
802{
803	if (sm == NULL)
804		return;
805
806	if (sm->sm_dbuf != NULL)
807		dmu_buf_rele(sm->sm_dbuf, sm);
808	sm->sm_dbuf = NULL;
809	sm->sm_phys = NULL;
810
811	kmem_free(sm, sizeof (*sm));
812}
813
814void
815space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
816{
817	objset_t *os = sm->sm_os;
818	spa_t *spa = dmu_objset_spa(os);
819	dmu_object_info_t doi;
820
821	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
822	ASSERT(dmu_tx_is_syncing(tx));
823	VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
824
825	dmu_object_info_from_db(sm->sm_dbuf, &doi);
826
827	/*
828	 * If the space map has the wrong bonus size (because
829	 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
830	 * the wrong block size (because space_map_blksz has changed),
831	 * free and re-allocate its object with the updated sizes.
832	 *
833	 * Otherwise, just truncate the current object.
834	 */
835	if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
836	    doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
837	    doi.doi_data_block_size != blocksize) {
838		zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
839		    "object[%llu]: old bonus %u, old blocksz %u",
840		    dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
841		    doi.doi_bonus_size, doi.doi_data_block_size);
842
843		space_map_free(sm, tx);
844		dmu_buf_rele(sm->sm_dbuf, sm);
845
846		sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
847		VERIFY0(space_map_open_impl(sm));
848	} else {
849		VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
850
851		/*
852		 * If the spacemap is reallocated, its histogram
853		 * will be reset.  Do the same in the common case so that
854		 * bugs related to the uncommon case do not go unnoticed.
855		 */
856		bzero(sm->sm_phys->smp_histogram,
857		    sizeof (sm->sm_phys->smp_histogram));
858	}
859
860	dmu_buf_will_dirty(sm->sm_dbuf, tx);
861	sm->sm_phys->smp_objsize = 0;
862	sm->sm_phys->smp_alloc = 0;
863}
864
865/*
866 * Update the in-core space_map allocation and length values.
867 */
868void
869space_map_update(space_map_t *sm)
870{
871	if (sm == NULL)
872		return;
873
874	sm->sm_alloc = sm->sm_phys->smp_alloc;
875	sm->sm_length = sm->sm_phys->smp_objsize;
876}
877
878uint64_t
879space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
880{
881	spa_t *spa = dmu_objset_spa(os);
882	uint64_t object;
883	int bonuslen;
884
885	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
886		spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
887		bonuslen = sizeof (space_map_phys_t);
888		ASSERT3U(bonuslen, <=, dmu_bonus_max());
889	} else {
890		bonuslen = SPACE_MAP_SIZE_V0;
891	}
892
893	object = dmu_object_alloc(os, DMU_OT_SPACE_MAP, blocksize,
894	    DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
895
896	return (object);
897}
898
899void
900space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
901{
902	spa_t *spa = dmu_objset_spa(os);
903	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
904		dmu_object_info_t doi;
905
906		VERIFY0(dmu_object_info(os, smobj, &doi));
907		if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
908			spa_feature_decr(spa,
909			    SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
910		}
911	}
912
913	VERIFY0(dmu_object_free(os, smobj, tx));
914}
915
916void
917space_map_free(space_map_t *sm, dmu_tx_t *tx)
918{
919	if (sm == NULL)
920		return;
921
922	space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
923	sm->sm_object = 0;
924}
925
926/*
927 * Given a range tree, it makes a worst-case estimate of how much
928 * space would the tree's segments take if they were written to
929 * the given space map.
930 */
931uint64_t
932space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
933    uint64_t vdev_id)
934{
935	spa_t *spa = dmu_objset_spa(sm->sm_os);
936	uint64_t shift = sm->sm_shift;
937	uint64_t *histogram = rt->rt_histogram;
938	uint64_t entries_for_seg = 0;
939
940	/*
941	 * In order to get a quick estimate of the optimal size that this
942	 * range tree would have on-disk as a space map, we iterate through
943	 * its histogram buckets instead of iterating through its nodes.
944	 *
945	 * Note that this is a highest-bound/worst-case estimate for the
946	 * following reasons:
947	 *
948	 * 1] We assume that we always add a debug padding for each block
949	 *    we write and we also assume that we start at the last word
950	 *    of a block attempting to write a two-word entry.
951	 * 2] Rounding up errors due to the way segments are distributed
952	 *    in the buckets of the range tree's histogram.
953	 * 3] The activation of zfs_force_some_double_word_sm_entries
954	 *    (tunable) when testing.
955	 *
956	 * = Math and Rounding Errors =
957	 *
958	 * rt_histogram[i] bucket of a range tree represents the number
959	 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
960	 * that, we want to divide the buckets into groups: Buckets that
961	 * can be represented using a single-word entry, ones that can
962	 * be represented with a double-word entry, and ones that can
963	 * only be represented with multiple two-word entries.
964	 *
965	 * [Note that if the new encoding feature is not enabled there
966	 * are only two groups: single-word entry buckets and multiple
967	 * single-word entry buckets. The information below assumes
968	 * two-word entries enabled, but it can easily applied when
969	 * the feature is not enabled]
970	 *
971	 * To find the highest bucket that can be represented with a
972	 * single-word entry we look at the maximum run that such entry
973	 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
974	 * the run of a space map entry is shifted by sm_shift, thus we
975	 * add it to the exponent]. This way, excluding the value of the
976	 * maximum run that can be represented by a single-word entry,
977	 * all runs that are smaller exist in buckets 0 to
978	 * SM_RUN_BITS + shift - 1.
979	 *
980	 * To find the highest bucket that can be represented with a
981	 * double-word entry, we follow the same approach. Finally, any
982	 * bucket higher than that are represented with multiple two-word
983	 * entries. To be more specific, if the highest bucket whose
984	 * segments can be represented with a single two-word entry is X,
985	 * then bucket X+1 will need 2 two-word entries for each of its
986	 * segments, X+2 will need 4, X+3 will need 8, ...etc.
987	 *
988	 * With all of the above we make our estimation based on bucket
989	 * groups. There is a rounding error though. As we mentioned in
990	 * the example with the one-word entry, the maximum run that can
991	 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
992	 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
993	 * that length fall into the next bucket (and bucket group) where
994	 * we start counting two-word entries and this is one more reason
995	 * why the estimated size may end up being bigger than the actual
996	 * size written.
997	 */
998	uint64_t size = 0;
999	uint64_t idx = 0;
1000
1001	if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) ||
1002	    (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) {
1003
1004		/*
1005		 * If we are trying to force some double word entries just
1006		 * assume the worst-case of every single word entry being
1007		 * written as a double word entry.
1008		 */
1009		uint64_t entry_size =
1010		    (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) &&
1011		    zfs_force_some_double_word_sm_entries) ?
1012		    (2 * sizeof (uint64_t)) : sizeof (uint64_t);
1013
1014		uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1;
1015		for (; idx <= single_entry_max_bucket; idx++)
1016			size += histogram[idx] * entry_size;
1017
1018		if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
1019			for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1020				ASSERT3U(idx, >=, single_entry_max_bucket);
1021				entries_for_seg =
1022				    1ULL << (idx - single_entry_max_bucket);
1023				size += histogram[idx] *
1024				    entries_for_seg * entry_size;
1025			}
1026			return (size);
1027		}
1028	}
1029
1030	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2));
1031
1032	uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1;
1033	for (; idx <= double_entry_max_bucket; idx++)
1034		size += histogram[idx] * 2 * sizeof (uint64_t);
1035
1036	for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1037		ASSERT3U(idx, >=, double_entry_max_bucket);
1038		entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
1039		size += histogram[idx] *
1040		    entries_for_seg * 2 * sizeof (uint64_t);
1041	}
1042
1043	/*
1044	 * Assume the worst case where we start with the padding at the end
1045	 * of the current block and we add an extra padding entry at the end
1046	 * of all subsequent blocks.
1047	 */
1048	size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t);
1049
1050	return (size);
1051}
1052
1053uint64_t
1054space_map_object(space_map_t *sm)
1055{
1056	return (sm != NULL ? sm->sm_object : 0);
1057}
1058
1059/*
1060 * Returns the already synced, on-disk allocated space.
1061 */
1062uint64_t
1063space_map_allocated(space_map_t *sm)
1064{
1065	return (sm != NULL ? sm->sm_alloc : 0);
1066}
1067
1068/*
1069 * Returns the already synced, on-disk length;
1070 */
1071uint64_t
1072space_map_length(space_map_t *sm)
1073{
1074	return (sm != NULL ? sm->sm_length : 0);
1075}
1076
1077/*
1078 * Returns the allocated space that is currently syncing.
1079 */
1080int64_t
1081space_map_alloc_delta(space_map_t *sm)
1082{
1083	if (sm == NULL)
1084		return (0);
1085	ASSERT(sm->sm_dbuf != NULL);
1086	return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
1087}
1088