1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25/*
26 * Copyright (c) 2013, 2019 by Delphix. All rights reserved.
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/spa.h>
31#include <sys/dmu.h>
32#include <sys/dnode.h>
33#include <sys/zio.h>
34#include <sys/range_tree.h>
35
36/*
37 * Range trees are tree-based data structures that can be used to
38 * track free space or generally any space allocation information.
39 * A range tree keeps track of individual segments and automatically
40 * provides facilities such as adjacent extent merging and extent
41 * splitting in response to range add/remove requests.
42 *
43 * A range tree starts out completely empty, with no segments in it.
44 * Adding an allocation via range_tree_add to the range tree can either:
45 * 1) create a new extent
46 * 2) extend an adjacent extent
47 * 3) merge two adjacent extents
48 * Conversely, removing an allocation via range_tree_remove can:
49 * 1) completely remove an extent
50 * 2) shorten an extent (if the allocation was near one of its ends)
51 * 3) split an extent into two extents, in effect punching a hole
52 *
53 * A range tree is also capable of 'bridging' gaps when adding
54 * allocations. This is useful for cases when close proximity of
55 * allocations is an important detail that needs to be represented
56 * in the range tree. See range_tree_set_gap(). The default behavior
57 * is not to bridge gaps (i.e. the maximum allowed gap size is 0).
58 *
59 * In order to traverse a range tree, use either the range_tree_walk()
60 * or range_tree_vacate() functions.
61 *
62 * To obtain more accurate information on individual segment
63 * operations that the range tree performs "under the hood", you can
64 * specify a set of callbacks by passing a range_tree_ops_t structure
65 * to the range_tree_create function. Any callbacks that are non-NULL
66 * are then called at the appropriate times.
67 *
68 * The range tree code also supports a special variant of range trees
69 * that can bridge small gaps between segments. This kind of tree is used
70 * by the dsl scanning code to group I/Os into mostly sequential chunks to
71 * optimize disk performance. The code here attempts to do this with as
72 * little memory and computational overhead as possible. One limitation of
73 * this implementation is that segments of range trees with gaps can only
74 * support removing complete segments.
75 */
76
77static inline void
78rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
79{
80	ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
81	size_t size = 0;
82	switch (rt->rt_type) {
83	case RANGE_SEG32:
84		size = sizeof (range_seg32_t);
85		break;
86	case RANGE_SEG64:
87		size = sizeof (range_seg64_t);
88		break;
89	case RANGE_SEG_GAP:
90		size = sizeof (range_seg_gap_t);
91		break;
92	default:
93		VERIFY(0);
94	}
95	bcopy(src, dest, size);
96}
97
98void
99range_tree_stat_verify(range_tree_t *rt)
100{
101	range_seg_t *rs;
102	zfs_btree_index_t where;
103	uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
104	int i;
105
106	for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL;
107	    rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
108		uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
109		int idx	= highbit64(size) - 1;
110
111		hist[idx]++;
112		ASSERT3U(hist[idx], !=, 0);
113	}
114
115	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
116		if (hist[i] != rt->rt_histogram[i]) {
117			zfs_dbgmsg("i=%d, hist=%p, hist=%llu, rt_hist=%llu",
118			    i, hist, hist[i], rt->rt_histogram[i]);
119		}
120		VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
121	}
122}
123
124static void
125range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs)
126{
127	uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
128	int idx = highbit64(size) - 1;
129
130	ASSERT(size != 0);
131	ASSERT3U(idx, <,
132	    sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
133
134	rt->rt_histogram[idx]++;
135	ASSERT3U(rt->rt_histogram[idx], !=, 0);
136}
137
138static void
139range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs)
140{
141	uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
142	int idx = highbit64(size) - 1;
143
144	ASSERT(size != 0);
145	ASSERT3U(idx, <,
146	    sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
147
148	ASSERT3U(rt->rt_histogram[idx], !=, 0);
149	rt->rt_histogram[idx]--;
150}
151
152static int
153range_tree_seg32_compare(const void *x1, const void *x2)
154{
155	const range_seg32_t *r1 = x1;
156	const range_seg32_t *r2 = x2;
157
158	ASSERT3U(r1->rs_start, <=, r1->rs_end);
159	ASSERT3U(r2->rs_start, <=, r2->rs_end);
160
161	return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
162}
163
164static int
165range_tree_seg64_compare(const void *x1, const void *x2)
166{
167	const range_seg64_t *r1 = x1;
168	const range_seg64_t *r2 = x2;
169
170	ASSERT3U(r1->rs_start, <=, r1->rs_end);
171	ASSERT3U(r2->rs_start, <=, r2->rs_end);
172
173	return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
174}
175
176static int
177range_tree_seg_gap_compare(const void *x1, const void *x2)
178{
179	const range_seg_gap_t *r1 = x1;
180	const range_seg_gap_t *r2 = x2;
181
182	ASSERT3U(r1->rs_start, <=, r1->rs_end);
183	ASSERT3U(r2->rs_start, <=, r2->rs_end);
184
185	return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
186}
187
188range_tree_t *
189range_tree_create_impl(range_tree_ops_t *ops, range_seg_type_t type, void *arg,
190    uint64_t start, uint64_t shift,
191    int (*zfs_btree_compare) (const void *, const void *),
192    uint64_t gap)
193{
194	range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP);
195
196	ASSERT3U(shift, <, 64);
197	ASSERT3U(type, <=, RANGE_SEG_NUM_TYPES);
198	size_t size;
199	int (*compare) (const void *, const void *);
200	switch (type) {
201	case RANGE_SEG32:
202		size = sizeof (range_seg32_t);
203		compare = range_tree_seg32_compare;
204		break;
205	case RANGE_SEG64:
206		size = sizeof (range_seg64_t);
207		compare = range_tree_seg64_compare;
208		break;
209	case RANGE_SEG_GAP:
210		size = sizeof (range_seg_gap_t);
211		compare = range_tree_seg_gap_compare;
212		break;
213	default:
214		panic("Invalid range seg type %d", type);
215	}
216	zfs_btree_create(&rt->rt_root, compare, size);
217
218	rt->rt_ops = ops;
219	rt->rt_arg = arg;
220	rt->rt_gap = gap;
221	rt->rt_type = type;
222	rt->rt_start = start;
223	rt->rt_shift = shift;
224	rt->rt_btree_compare = zfs_btree_compare;
225
226	if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL)
227		rt->rt_ops->rtop_create(rt, rt->rt_arg);
228
229	return (rt);
230}
231
232range_tree_t *
233range_tree_create(range_tree_ops_t *ops, range_seg_type_t type,
234    void *arg, uint64_t start, uint64_t shift)
235{
236	return (range_tree_create_impl(ops, type, arg, start, shift, NULL, 0));
237}
238
239void
240range_tree_destroy(range_tree_t *rt)
241{
242	VERIFY0(rt->rt_space);
243
244	if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
245		rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
246
247	zfs_btree_destroy(&rt->rt_root);
248	kmem_free(rt, sizeof (*rt));
249}
250
251void
252range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta)
253{
254	ASSERT3U(rs_get_fill(rs, rt) + delta, !=, 0);
255	ASSERT3U(rs_get_fill(rs, rt) + delta, <=, rs_get_end(rs, rt) -
256	    rs_get_start(rs, rt));
257
258	if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
259		rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
260	rs_set_fill(rs, rt, rs_get_fill(rs, rt) + delta);
261	if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
262		rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
263}
264
265static void
266range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
267{
268	range_tree_t *rt = arg;
269	zfs_btree_index_t where;
270	range_seg_t *rs_before, *rs_after, *rs;
271	range_seg_max_t tmp, rsearch;
272	uint64_t end = start + size, gap = rt->rt_gap;
273	uint64_t bridge_size = 0;
274	boolean_t merge_before, merge_after;
275
276	ASSERT3U(size, !=, 0);
277	ASSERT3U(fill, <=, size);
278	ASSERT3U(start + size, >, start);
279
280	rs_set_start(&rsearch, rt, start);
281	rs_set_end(&rsearch, rt, end);
282	rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
283
284	/*
285	 * If this is a gap-supporting range tree, it is possible that we
286	 * are inserting into an existing segment. In this case simply
287	 * bump the fill count and call the remove / add callbacks. If the
288	 * new range will extend an existing segment, we remove the
289	 * existing one, apply the new extent to it and re-insert it using
290	 * the normal code paths.
291	 */
292	if (rs != NULL) {
293		ASSERT3U(rt->rt_gap, !=, 0);
294		uint64_t rstart = rs_get_start(rs, rt);
295		uint64_t rend = rs_get_end(rs, rt);
296		ASSERT3U(gap, !=, 0);
297		if (rstart <= start && rend >= end) {
298			range_tree_adjust_fill(rt, rs, fill);
299			return;
300		}
301
302		zfs_btree_remove(&rt->rt_root, rs);
303		if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
304			rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
305
306		range_tree_stat_decr(rt, rs);
307		rt->rt_space -= rend - rstart;
308
309		fill += rs_get_fill(rs, rt);
310		start = MIN(start, rstart);
311		end = MAX(end, rend);
312		size = end - start;
313
314		range_tree_add_impl(rt, start, size, fill);
315		return;
316	}
317
318	ASSERT3P(rs, ==, NULL);
319
320	/*
321	 * Determine whether or not we will have to merge with our neighbors.
322	 * If gap != 0, we might need to merge with our neighbors even if we
323	 * aren't directly touching.
324	 */
325	zfs_btree_index_t where_before, where_after;
326	rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before);
327	rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after);
328
329	merge_before = (rs_before != NULL && rs_get_end(rs_before, rt) >=
330	    start - gap);
331	merge_after = (rs_after != NULL && rs_get_start(rs_after, rt) <= end +
332	    gap);
333
334	if (merge_before && gap != 0)
335		bridge_size += start - rs_get_end(rs_before, rt);
336	if (merge_after && gap != 0)
337		bridge_size += rs_get_start(rs_after, rt) - end;
338
339	if (merge_before && merge_after) {
340		if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
341			rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
342			rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
343		}
344
345		range_tree_stat_decr(rt, rs_before);
346		range_tree_stat_decr(rt, rs_after);
347
348		rs_copy(rs_after, &tmp, rt);
349		uint64_t before_start = rs_get_start_raw(rs_before, rt);
350		uint64_t before_fill = rs_get_fill(rs_before, rt);
351		uint64_t after_fill = rs_get_fill(rs_after, rt);
352		zfs_btree_remove_from(&rt->rt_root, &where_before);
353
354		/*
355		 * We have to re-find the node because our old reference is
356		 * invalid as soon as we do any mutating btree operations.
357		 */
358		rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
359		rs_set_start_raw(rs_after, rt, before_start);
360		rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
361		rs = rs_after;
362	} else if (merge_before) {
363		if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
364			rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
365
366		range_tree_stat_decr(rt, rs_before);
367
368		uint64_t before_fill = rs_get_fill(rs_before, rt);
369		rs_set_end(rs_before, rt, end);
370		rs_set_fill(rs_before, rt, before_fill + fill);
371		rs = rs_before;
372	} else if (merge_after) {
373		if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
374			rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
375
376		range_tree_stat_decr(rt, rs_after);
377
378		uint64_t after_fill = rs_get_fill(rs_after, rt);
379		rs_set_start(rs_after, rt, start);
380		rs_set_fill(rs_after, rt, after_fill + fill);
381		rs = rs_after;
382	} else {
383		rs = &tmp;
384
385		rs_set_start(rs, rt, start);
386		rs_set_end(rs, rt, end);
387		rs_set_fill(rs, rt, fill);
388		zfs_btree_insert(&rt->rt_root, rs, &where);
389	}
390
391	if (gap != 0) {
392		ASSERT3U(rs_get_fill(rs, rt), <=, rs_get_end(rs, rt) -
393		    rs_get_start(rs, rt));
394	} else {
395		ASSERT3U(rs_get_fill(rs, rt), ==, rs_get_end(rs, rt) -
396		    rs_get_start(rs, rt));
397	}
398
399	if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
400		rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
401
402	range_tree_stat_incr(rt, rs);
403	rt->rt_space += size + bridge_size;
404}
405
406void
407range_tree_add(void *arg, uint64_t start, uint64_t size)
408{
409	range_tree_add_impl(arg, start, size, size);
410}
411
412static void
413range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
414    boolean_t do_fill)
415{
416	zfs_btree_index_t where;
417	range_seg_t *rs;
418	range_seg_max_t rsearch, rs_tmp;
419	uint64_t end = start + size;
420	boolean_t left_over, right_over;
421
422	VERIFY3U(size, !=, 0);
423	VERIFY3U(size, <=, rt->rt_space);
424	if (rt->rt_type == RANGE_SEG64)
425		ASSERT3U(start + size, >, start);
426
427	rs_set_start(&rsearch, rt, start);
428	rs_set_end(&rsearch, rt, end);
429	rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
430
431	/* Make sure we completely overlap with someone */
432	if (rs == NULL) {
433		zfs_panic_recover("zfs: removing nonexistent segment from "
434		    "range tree (offset=%llu size=%llu)",
435		    (longlong_t)start, (longlong_t)size);
436		return;
437	}
438
439	/*
440	 * Range trees with gap support must only remove complete segments
441	 * from the tree. This allows us to maintain accurate fill accounting
442	 * and to ensure that bridged sections are not leaked. If we need to
443	 * remove less than the full segment, we can only adjust the fill count.
444	 */
445	if (rt->rt_gap != 0) {
446		if (do_fill) {
447			if (rs_get_fill(rs, rt) == size) {
448				start = rs_get_start(rs, rt);
449				end = rs_get_end(rs, rt);
450				size = end - start;
451			} else {
452				range_tree_adjust_fill(rt, rs, -size);
453				return;
454			}
455		} else if (rs_get_start(rs, rt) != start ||
456		    rs_get_end(rs, rt) != end) {
457			zfs_panic_recover("zfs: freeing partial segment of "
458			    "gap tree (offset=%llu size=%llu) of "
459			    "(offset=%llu size=%llu)",
460			    (longlong_t)start, (longlong_t)size,
461			    (longlong_t)rs_get_start(rs, rt),
462			    (longlong_t)rs_get_end(rs, rt) - rs_get_start(rs,
463			    rt));
464			return;
465		}
466	}
467
468	VERIFY3U(rs_get_start(rs, rt), <=, start);
469	VERIFY3U(rs_get_end(rs, rt), >=, end);
470
471	left_over = (rs_get_start(rs, rt) != start);
472	right_over = (rs_get_end(rs, rt) != end);
473
474	range_tree_stat_decr(rt, rs);
475
476	if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
477		rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
478
479	if (left_over && right_over) {
480		range_seg_max_t newseg;
481		rs_set_start(&newseg, rt, end);
482		rs_set_end_raw(&newseg, rt, rs_get_end_raw(rs, rt));
483		rs_set_fill(&newseg, rt, rs_get_end(rs, rt) - end);
484		range_tree_stat_incr(rt, &newseg);
485
486		// This modifies the buffer already inside the range tree
487		rs_set_end(rs, rt, start);
488
489		rs_copy(rs, &rs_tmp, rt);
490		if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL)
491			zfs_btree_insert(&rt->rt_root, &newseg, &where);
492		else
493			zfs_btree_add(&rt->rt_root, &newseg);
494
495		if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
496			rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg);
497	} else if (left_over) {
498		// This modifies the buffer already inside the range tree
499		rs_set_end(rs, rt, start);
500		rs_copy(rs, &rs_tmp, rt);
501	} else if (right_over) {
502		// This modifies the buffer already inside the range tree
503		rs_set_start(rs, rt, end);
504		rs_copy(rs, &rs_tmp, rt);
505	} else {
506		zfs_btree_remove_from(&rt->rt_root, &where);
507		rs = NULL;
508	}
509
510	if (rs != NULL) {
511		/*
512		 * The fill of the leftover segment will always be equal to
513		 * the size, since we do not support removing partial segments
514		 * of range trees with gaps.
515		 */
516		rs_set_fill_raw(rs, rt, rs_get_end_raw(rs, rt) -
517		    rs_get_start_raw(rs, rt));
518		range_tree_stat_incr(rt, &rs_tmp);
519
520		if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
521			rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg);
522	}
523
524	rt->rt_space -= size;
525}
526
527void
528range_tree_remove(void *arg, uint64_t start, uint64_t size)
529{
530	range_tree_remove_impl(arg, start, size, B_FALSE);
531}
532
533void
534range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size)
535{
536	range_tree_remove_impl(rt, start, size, B_TRUE);
537}
538
539void
540range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
541    uint64_t newstart, uint64_t newsize)
542{
543	int64_t delta = newsize - (rs_get_end(rs, rt) - rs_get_start(rs, rt));
544
545	range_tree_stat_decr(rt, rs);
546	if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
547		rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
548
549	rs_set_start(rs, rt, newstart);
550	rs_set_end(rs, rt, newstart + newsize);
551
552	range_tree_stat_incr(rt, rs);
553	if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
554		rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
555
556	rt->rt_space += delta;
557}
558
559static range_seg_t *
560range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
561{
562	range_seg_max_t rsearch;
563	uint64_t end = start + size;
564
565	VERIFY(size != 0);
566
567	rs_set_start(&rsearch, rt, start);
568	rs_set_end(&rsearch, rt, end);
569	return (zfs_btree_find(&rt->rt_root, &rsearch, NULL));
570}
571
572range_seg_t *
573range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size)
574{
575	if (rt->rt_type == RANGE_SEG64)
576		ASSERT3U(start + size, >, start);
577
578	range_seg_t *rs = range_tree_find_impl(rt, start, size);
579	if (rs != NULL && rs_get_start(rs, rt) <= start &&
580	    rs_get_end(rs, rt) >= start + size) {
581		return (rs);
582	}
583	return (NULL);
584}
585
586void
587range_tree_verify_not_present(range_tree_t *rt, uint64_t off, uint64_t size)
588{
589	range_seg_t *rs = range_tree_find(rt, off, size);
590	if (rs != NULL)
591		panic("segment already in tree; rs=%p", (void *)rs);
592}
593
594boolean_t
595range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size)
596{
597	return (range_tree_find(rt, start, size) != NULL);
598}
599
600/*
601 * Returns the first subset of the given range which overlaps with the range
602 * tree. Returns true if there is a segment in the range, and false if there
603 * isn't.
604 */
605boolean_t
606range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size,
607    uint64_t *ostart, uint64_t *osize)
608{
609	if (rt->rt_type == RANGE_SEG64)
610		ASSERT3U(start + size, >, start);
611
612	range_seg_max_t rsearch;
613	rs_set_start(&rsearch, rt, start);
614	rs_set_end_raw(&rsearch, rt, rs_get_start_raw(&rsearch, rt) + 1);
615
616	zfs_btree_index_t where;
617	range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
618	if (rs != NULL) {
619		*ostart = start;
620		*osize = MIN(size, rs_get_end(rs, rt) - start);
621		return (B_TRUE);
622	}
623
624	rs = zfs_btree_next(&rt->rt_root, &where, &where);
625	if (rs == NULL || rs_get_start(rs, rt) > start + size)
626		return (B_FALSE);
627
628	*ostart = rs_get_start(rs, rt);
629	*osize = MIN(start + size, rs_get_end(rs, rt)) -
630	    rs_get_start(rs, rt);
631	return (B_TRUE);
632}
633
634/*
635 * Ensure that this range is not in the tree, regardless of whether
636 * it is currently in the tree.
637 */
638void
639range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size)
640{
641	range_seg_t *rs;
642
643	if (size == 0)
644		return;
645
646	if (rt->rt_type == RANGE_SEG64)
647		ASSERT3U(start + size, >, start);
648
649	while ((rs = range_tree_find_impl(rt, start, size)) != NULL) {
650		uint64_t free_start = MAX(rs_get_start(rs, rt), start);
651		uint64_t free_end = MIN(rs_get_end(rs, rt), start + size);
652		range_tree_remove(rt, free_start, free_end - free_start);
653	}
654}
655
656void
657range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst)
658{
659	range_tree_t *rt;
660
661	ASSERT0(range_tree_space(*rtdst));
662	ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root));
663
664	rt = *rtsrc;
665	*rtsrc = *rtdst;
666	*rtdst = rt;
667}
668
669void
670range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
671{
672
673	if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
674		rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
675
676	if (func != NULL) {
677		range_seg_t *rs;
678		zfs_btree_index_t *cookie = NULL;
679
680		while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) !=
681		    NULL) {
682			func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
683			    rs_get_start(rs, rt));
684		}
685	} else {
686		zfs_btree_clear(&rt->rt_root);
687	}
688
689	bzero(rt->rt_histogram, sizeof (rt->rt_histogram));
690	rt->rt_space = 0;
691}
692
693void
694range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
695{
696	zfs_btree_index_t where;
697	for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where);
698	    rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
699		func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
700		    rs_get_start(rs, rt));
701	}
702}
703
704range_seg_t *
705range_tree_first(range_tree_t *rt)
706{
707	return (zfs_btree_first(&rt->rt_root, NULL));
708}
709
710uint64_t
711range_tree_space(range_tree_t *rt)
712{
713	return (rt->rt_space);
714}
715
716uint64_t
717range_tree_numsegs(range_tree_t *rt)
718{
719	return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root));
720}
721
722boolean_t
723range_tree_is_empty(range_tree_t *rt)
724{
725	ASSERT(rt != NULL);
726	return (range_tree_space(rt) == 0);
727}
728
729/* ARGSUSED */
730void
731rt_btree_create(range_tree_t *rt, void *arg)
732{
733	zfs_btree_t *size_tree = arg;
734
735	size_t size;
736	switch (rt->rt_type) {
737	case RANGE_SEG32:
738		size = sizeof (range_seg32_t);
739		break;
740	case RANGE_SEG64:
741		size = sizeof (range_seg64_t);
742		break;
743	case RANGE_SEG_GAP:
744		size = sizeof (range_seg_gap_t);
745		break;
746	default:
747		panic("Invalid range seg type %d", rt->rt_type);
748	}
749	zfs_btree_create(size_tree, rt->rt_btree_compare, size);
750}
751
752/* ARGSUSED */
753void
754rt_btree_destroy(range_tree_t *rt, void *arg)
755{
756	zfs_btree_t *size_tree = arg;
757	ASSERT0(zfs_btree_numnodes(size_tree));
758
759	zfs_btree_destroy(size_tree);
760}
761
762/* ARGSUSED */
763void
764rt_btree_add(range_tree_t *rt, range_seg_t *rs, void *arg)
765{
766	zfs_btree_t *size_tree = arg;
767
768	zfs_btree_add(size_tree, rs);
769}
770
771/* ARGSUSED */
772void
773rt_btree_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
774{
775	zfs_btree_t *size_tree = arg;
776
777	zfs_btree_remove(size_tree, rs);
778}
779
780/* ARGSUSED */
781void
782rt_btree_vacate(range_tree_t *rt, void *arg)
783{
784	zfs_btree_t *size_tree = arg;
785	zfs_btree_clear(size_tree);
786	zfs_btree_destroy(size_tree);
787
788	rt_btree_create(rt, arg);
789}
790
791range_tree_ops_t rt_btree_ops = {
792	.rtop_create = rt_btree_create,
793	.rtop_destroy = rt_btree_destroy,
794	.rtop_add = rt_btree_add,
795	.rtop_remove = rt_btree_remove,
796	.rtop_vacate = rt_btree_vacate
797};
798
799/*
800 * Remove any overlapping ranges between the given segment [start, end)
801 * from removefrom. Add non-overlapping leftovers to addto.
802 */
803void
804range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
805    range_tree_t *removefrom, range_tree_t *addto)
806{
807	zfs_btree_index_t where;
808	range_seg_max_t starting_rs;
809	rs_set_start(&starting_rs, removefrom, start);
810	rs_set_end_raw(&starting_rs, removefrom, rs_get_start_raw(&starting_rs,
811	    removefrom) + 1);
812
813	range_seg_t *curr = zfs_btree_find(&removefrom->rt_root,
814	    &starting_rs, &where);
815
816	if (curr == NULL)
817		curr = zfs_btree_next(&removefrom->rt_root, &where, &where);
818
819	range_seg_t *next;
820	for (; curr != NULL; curr = next) {
821		if (start == end)
822			return;
823		VERIFY3U(start, <, end);
824
825		/* there is no overlap */
826		if (end <= rs_get_start(curr, removefrom)) {
827			range_tree_add(addto, start, end - start);
828			return;
829		}
830
831		uint64_t overlap_start = MAX(rs_get_start(curr, removefrom),
832		    start);
833		uint64_t overlap_end = MIN(rs_get_end(curr, removefrom),
834		    end);
835		uint64_t overlap_size = overlap_end - overlap_start;
836		ASSERT3S(overlap_size, >, 0);
837		range_seg_max_t rs;
838		rs_copy(curr, &rs, removefrom);
839
840		range_tree_remove(removefrom, overlap_start, overlap_size);
841
842		if (start < overlap_start)
843			range_tree_add(addto, start, overlap_start - start);
844
845		start = overlap_end;
846		next = zfs_btree_find(&removefrom->rt_root, &rs, &where);
847		/*
848		 * If we find something here, we only removed part of the
849		 * curr segment. Either there's some left at the end
850		 * because we've reached the end of the range we're removing,
851		 * or there's some left at the start because we started
852		 * partway through the range.  Either way, we continue with
853		 * the loop. If it's the former, we'll return at the start of
854		 * the loop, and if it's the latter we'll see if there is more
855		 * area to process.
856		 */
857		if (next != NULL) {
858			ASSERT(start == end || start == rs_get_end(&rs,
859			    removefrom));
860		}
861
862		next = zfs_btree_next(&removefrom->rt_root, &where, &where);
863	}
864	VERIFY3P(curr, ==, NULL);
865
866	if (start != end) {
867		VERIFY3U(start, <, end);
868		range_tree_add(addto, start, end - start);
869	} else {
870		VERIFY3U(start, ==, end);
871	}
872}
873
874/*
875 * For each entry in rt, if it exists in removefrom, remove it
876 * from removefrom. Otherwise, add it to addto.
877 */
878void
879range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom,
880    range_tree_t *addto)
881{
882	zfs_btree_index_t where;
883	for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
884	    rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
885		range_tree_remove_xor_add_segment(rs_get_start(rs, rt),
886		    rs_get_end(rs, rt), removefrom, addto);
887	}
888}
889
890uint64_t
891range_tree_min(range_tree_t *rt)
892{
893	range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL);
894	return (rs != NULL ? rs_get_start(rs, rt) : 0);
895}
896
897uint64_t
898range_tree_max(range_tree_t *rt)
899{
900	range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL);
901	return (rs != NULL ? rs_get_end(rs, rt) : 0);
902}
903
904uint64_t
905range_tree_span(range_tree_t *rt)
906{
907	return (range_tree_max(rt) - range_tree_min(rt));
908}
909