xref: /illumos-gate/usr/src/uts/common/fs/zfs/refcount.c (revision 9a8c5287)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24  */
25 
26 #include <sys/zfs_context.h>
27 #include <sys/refcount.h>
28 
29 #ifdef	ZFS_DEBUG
30 
31 #ifdef _KERNEL
32 int reference_tracking_enable = FALSE; /* runs out of memory too easily */
33 #else
34 int reference_tracking_enable = TRUE;
35 #endif
36 int reference_history = 3; /* tunable */
37 
38 static kmem_cache_t *reference_cache;
39 static kmem_cache_t *reference_history_cache;
40 
41 void
zfs_refcount_init(void)42 zfs_refcount_init(void)
43 {
44 	reference_cache = kmem_cache_create("reference_cache",
45 	    sizeof (reference_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
46 
47 	reference_history_cache = kmem_cache_create("reference_history_cache",
48 	    sizeof (uint64_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
49 }
50 
51 void
zfs_refcount_fini(void)52 zfs_refcount_fini(void)
53 {
54 	kmem_cache_destroy(reference_cache);
55 	kmem_cache_destroy(reference_history_cache);
56 }
57 
58 static int
zfs_refcount_compare(const void * x1,const void * x2)59 zfs_refcount_compare(const void *x1, const void *x2)
60 {
61 	const reference_t *r1 = (const reference_t *)x1;
62 	const reference_t *r2 = (const reference_t *)x2;
63 
64 	int cmp1 = TREE_CMP(r1->ref_holder, r2->ref_holder);
65 	int cmp2 = TREE_CMP(r1->ref_number, r2->ref_number);
66 	int cmp = cmp1 ? cmp1 : cmp2;
67 	return ((cmp || r1->ref_search) ? cmp : TREE_PCMP(r1, r2));
68 }
69 
70 void
zfs_refcount_create(zfs_refcount_t * rc)71 zfs_refcount_create(zfs_refcount_t *rc)
72 {
73 	mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL);
74 	avl_create(&rc->rc_tree, zfs_refcount_compare, sizeof (reference_t),
75 	    offsetof(reference_t, ref_link.a));
76 	list_create(&rc->rc_removed, sizeof (reference_t),
77 	    offsetof(reference_t, ref_link.l));
78 	rc->rc_count = 0;
79 	rc->rc_removed_count = 0;
80 	rc->rc_tracked = reference_tracking_enable;
81 }
82 
83 void
zfs_refcount_create_tracked(zfs_refcount_t * rc)84 zfs_refcount_create_tracked(zfs_refcount_t *rc)
85 {
86 	zfs_refcount_create(rc);
87 	rc->rc_tracked = B_TRUE;
88 }
89 
90 void
zfs_refcount_create_untracked(zfs_refcount_t * rc)91 zfs_refcount_create_untracked(zfs_refcount_t *rc)
92 {
93 	zfs_refcount_create(rc);
94 	rc->rc_tracked = B_FALSE;
95 }
96 
97 void
zfs_refcount_destroy_many(zfs_refcount_t * rc,uint64_t number)98 zfs_refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
99 {
100 	reference_t *ref;
101 	void *cookie = NULL;
102 
103 	ASSERT3U(rc->rc_count, ==, number);
104 	while ((ref = avl_destroy_nodes(&rc->rc_tree, &cookie)) != NULL)
105 		kmem_cache_free(reference_cache, ref);
106 	avl_destroy(&rc->rc_tree);
107 
108 	while ((ref = list_remove_head(&rc->rc_removed))) {
109 		kmem_cache_free(reference_history_cache, ref->ref_removed);
110 		kmem_cache_free(reference_cache, ref);
111 	}
112 	list_destroy(&rc->rc_removed);
113 	mutex_destroy(&rc->rc_mtx);
114 }
115 
116 void
zfs_refcount_destroy(zfs_refcount_t * rc)117 zfs_refcount_destroy(zfs_refcount_t *rc)
118 {
119 	zfs_refcount_destroy_many(rc, 0);
120 }
121 
122 int
zfs_refcount_is_zero(zfs_refcount_t * rc)123 zfs_refcount_is_zero(zfs_refcount_t *rc)
124 {
125 	return (zfs_refcount_count(rc) == 0);
126 }
127 
128 int64_t
zfs_refcount_count(zfs_refcount_t * rc)129 zfs_refcount_count(zfs_refcount_t *rc)
130 {
131 	return (atomic_load_64(&rc->rc_count));
132 }
133 
134 int64_t
zfs_refcount_add_many(zfs_refcount_t * rc,uint64_t number,const void * holder)135 zfs_refcount_add_many(zfs_refcount_t *rc, uint64_t number, const void *holder)
136 {
137 	reference_t *ref;
138 	int64_t count;
139 
140 	if (likely(!rc->rc_tracked)) {
141 		count = atomic_add_64_nv(&(rc)->rc_count, number);
142 		ASSERT3U(count, >=, number);
143 		return (count);
144 	}
145 
146 	ref = kmem_cache_alloc(reference_cache, KM_SLEEP);
147 	ref->ref_holder = holder;
148 	ref->ref_number = number;
149 	ref->ref_search = B_FALSE;
150 	mutex_enter(&rc->rc_mtx);
151 	avl_add(&rc->rc_tree, ref);
152 	rc->rc_count += number;
153 	count = rc->rc_count;
154 	mutex_exit(&rc->rc_mtx);
155 
156 	return (count);
157 }
158 
159 int64_t
zfs_refcount_add(zfs_refcount_t * rc,const void * holder)160 zfs_refcount_add(zfs_refcount_t *rc, const void *holder)
161 {
162 	return (zfs_refcount_add_many(rc, 1, holder));
163 }
164 
165 void
zfs_refcount_add_few(zfs_refcount_t * rc,uint64_t number,const void * holder)166 zfs_refcount_add_few(zfs_refcount_t *rc, uint64_t number, const void *holder)
167 {
168 	if (likely(!rc->rc_tracked)) {
169 		(void) zfs_refcount_add_many(rc, number, holder);
170 	} else {
171 		for (; number > 0; number--)
172 			(void) zfs_refcount_add(rc, holder);
173 	}
174 }
175 
176 int64_t
zfs_refcount_remove_many(zfs_refcount_t * rc,uint64_t number,const void * holder)177 zfs_refcount_remove_many(zfs_refcount_t *rc, uint64_t number,
178     const void *holder)
179 {
180 	reference_t *ref, s;
181 	int64_t count;
182 
183 	if (likely(!rc->rc_tracked)) {
184 		count = atomic_add_64_nv(&(rc)->rc_count, -number);
185 		ASSERT3S(count, >=, 0);
186 		return (count);
187 	}
188 
189 	s.ref_holder = holder;
190 	s.ref_number = number;
191 	s.ref_search = B_TRUE;
192 	mutex_enter(&rc->rc_mtx);
193 	ASSERT3U(rc->rc_count, >=, number);
194 	ref = avl_find(&rc->rc_tree, &s, NULL);
195 	if (unlikely(ref == NULL)) {
196 		panic("No such hold %p with count %" PRIu64 " on refcount %llx",
197 		    holder, number, (u_longlong_t)(uintptr_t)rc);
198 	}
199 	avl_remove(&rc->rc_tree, ref);
200 	if (reference_history > 0) {
201 		ref->ref_removed = kmem_cache_alloc(reference_history_cache,
202 		    KM_SLEEP);
203 		list_insert_head(&rc->rc_removed, ref);
204 		if (rc->rc_removed_count >= reference_history) {
205 			ref = list_remove_tail(&rc->rc_removed);
206 			kmem_cache_free(reference_history_cache,
207 			    ref->ref_removed);
208 			kmem_cache_free(reference_cache, ref);
209 		} else {
210 			rc->rc_removed_count++;
211 		}
212 	} else {
213 		kmem_cache_free(reference_cache, ref);
214 	}
215 	rc->rc_count -= number;
216 	count = rc->rc_count;
217 	mutex_exit(&rc->rc_mtx);
218 	return (count);
219 }
220 
221 int64_t
zfs_refcount_remove(zfs_refcount_t * rc,const void * holder)222 zfs_refcount_remove(zfs_refcount_t *rc, const void *holder)
223 {
224 	return (zfs_refcount_remove_many(rc, 1, holder));
225 }
226 
227 void
zfs_refcount_remove_few(zfs_refcount_t * rc,uint64_t number,const void * holder)228 zfs_refcount_remove_few(zfs_refcount_t *rc, uint64_t number, const void *holder)
229 {
230 	if (likely(!rc->rc_tracked)) {
231 		(void) zfs_refcount_remove_many(rc, number, holder);
232 	} else {
233 		for (; number > 0; number--)
234 			(void) zfs_refcount_remove(rc, holder);
235 	}
236 }
237 
238 void
zfs_refcount_transfer(zfs_refcount_t * dst,zfs_refcount_t * src)239 zfs_refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
240 {
241 	avl_tree_t tree;
242 	list_t removed;
243 	reference_t *ref;
244 	void *cookie = NULL;
245 	uint64_t count;
246 	uint_t removed_count;
247 
248 	avl_create(&tree, zfs_refcount_compare, sizeof (reference_t),
249 	    offsetof(reference_t, ref_link.a));
250 	list_create(&removed, sizeof (reference_t),
251 	    offsetof(reference_t, ref_link.l));
252 
253 	mutex_enter(&src->rc_mtx);
254 	count = src->rc_count;
255 	removed_count = src->rc_removed_count;
256 	src->rc_count = 0;
257 	src->rc_removed_count = 0;
258 	avl_swap(&tree, &src->rc_tree);
259 	list_move_tail(&removed, &src->rc_removed);
260 	mutex_exit(&src->rc_mtx);
261 
262 	mutex_enter(&dst->rc_mtx);
263 	dst->rc_count += count;
264 	dst->rc_removed_count += removed_count;
265 	if (avl_is_empty(&dst->rc_tree))
266 		avl_swap(&dst->rc_tree, &tree);
267 	else while ((ref = avl_destroy_nodes(&tree, &cookie)) != NULL)
268 		avl_add(&dst->rc_tree, ref);
269 	list_move_tail(&dst->rc_removed, &removed);
270 	mutex_exit(&dst->rc_mtx);
271 
272 	avl_destroy(&tree);
273 	list_destroy(&removed);
274 }
275 
276 void
zfs_refcount_transfer_ownership_many(zfs_refcount_t * rc,uint64_t number,const void * current_holder,const void * new_holder)277 zfs_refcount_transfer_ownership_many(zfs_refcount_t *rc, uint64_t number,
278     const void *current_holder, const void *new_holder)
279 {
280 	reference_t *ref, s;
281 
282 	if (likely(!rc->rc_tracked))
283 		return;
284 
285 	s.ref_holder = current_holder;
286 	s.ref_number = number;
287 	s.ref_search = B_TRUE;
288 	mutex_enter(&rc->rc_mtx);
289 	ref = avl_find(&rc->rc_tree, &s, NULL);
290 	ASSERT3P(ref, !=, NULL);
291 	ref->ref_holder = new_holder;
292 	avl_update(&rc->rc_tree, ref);
293 	mutex_exit(&rc->rc_mtx);
294 }
295 
296 void
zfs_refcount_transfer_ownership(zfs_refcount_t * rc,const void * current_holder,const void * new_holder)297 zfs_refcount_transfer_ownership(zfs_refcount_t *rc, const void *current_holder,
298     const void *new_holder)
299 {
300 	zfs_refcount_transfer_ownership_many(rc, 1, current_holder,
301 	    new_holder);
302 }
303 
304 /*
305  * If tracking is enabled, return true if a reference exists that matches
306  * the "holder" tag. If tracking is disabled, then return true if a reference
307  * might be held.
308  */
309 boolean_t
zfs_refcount_held(zfs_refcount_t * rc,const void * holder)310 zfs_refcount_held(zfs_refcount_t *rc, const void *holder)
311 {
312 	reference_t *ref, s;
313 	avl_index_t idx;
314 	boolean_t res;
315 
316 	if (likely(!rc->rc_tracked))
317 		return (zfs_refcount_count(rc) > 0);
318 
319 	s.ref_holder = holder;
320 	s.ref_number = 0;
321 	s.ref_search = B_TRUE;
322 	mutex_enter(&rc->rc_mtx);
323 	ref = avl_find(&rc->rc_tree, &s, &idx);
324 	if (likely(ref == NULL))
325 		ref = avl_nearest(&rc->rc_tree, idx, AVL_AFTER);
326 	res = ref && ref->ref_holder == holder;
327 	mutex_exit(&rc->rc_mtx);
328 	return (res);
329 }
330 
331 /*
332  * If tracking is enabled, return true if a reference does not exist that
333  * matches the "holder" tag. If tracking is disabled, always return true
334  * since the reference might not be held.
335  */
336 boolean_t
zfs_refcount_not_held(zfs_refcount_t * rc,const void * holder)337 zfs_refcount_not_held(zfs_refcount_t *rc, const void *holder)
338 {
339 	reference_t *ref, s;
340 	avl_index_t idx;
341 	boolean_t res;
342 
343 	if (likely(!rc->rc_tracked))
344 		return (B_TRUE);
345 
346 	mutex_enter(&rc->rc_mtx);
347 	s.ref_holder = holder;
348 	s.ref_number = 0;
349 	s.ref_search = B_TRUE;
350 	ref = avl_find(&rc->rc_tree, &s, &idx);
351 	if (likely(ref == NULL))
352 		ref = avl_nearest(&rc->rc_tree, idx, AVL_AFTER);
353 	res = ref == NULL || ref->ref_holder != holder;
354 	mutex_exit(&rc->rc_mtx);
355 	return (res);
356 }
357 #endif	/* ZFS_DEBUG */
358