1/*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source.  A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12/*
13 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
14 * Copyright (c) 2016 by Delphix. All rights reserved.
15 */
16
17/*
18 * ARC buffer data (ABD).
19 *
20 * ABDs are an abstract data structure for the ARC which can use two
21 * different ways of storing the underlying data:
22 *
23 * (a) Linear buffer. In this case, all the data in the ABD is stored in one
24 *     contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
25 *
26 *         +-------------------+
27 *         | ABD (linear)      |
28 *         |   abd_flags = ... |
29 *         |   abd_size = ...  |     +--------------------------------+
30 *         |   abd_buf ------------->| raw buffer of size abd_size    |
31 *         +-------------------+     +--------------------------------+
32 *              no abd_chunks
33 *
34 * (b) Scattered buffer. In this case, the data in the ABD is split into
35 *     equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
36 *     to the chunks recorded in an array at the end of the ABD structure.
37 *
38 *         +-------------------+
39 *         | ABD (scattered)   |
40 *         |   abd_flags = ... |
41 *         |   abd_size = ...  |
42 *         |   abd_offset = 0  |                           +-----------+
43 *         |   abd_chunks[0] ----------------------------->| chunk 0   |
44 *         |   abd_chunks[1] ---------------------+        +-----------+
45 *         |   ...             |                  |        +-----------+
46 *         |   abd_chunks[N-1] ---------+         +------->| chunk 1   |
47 *         +-------------------+        |                  +-----------+
48 *                                      |                      ...
49 *                                      |                  +-----------+
50 *                                      +----------------->| chunk N-1 |
51 *                                                         +-----------+
52 *
53 * Using a large proportion of scattered ABDs decreases ARC fragmentation since
54 * when we are at the limit of allocatable space, using equal-size chunks will
55 * allow us to quickly reclaim enough space for a new large allocation (assuming
56 * it is also scattered).
57 *
58 * In addition to directly allocating a linear or scattered ABD, it is also
59 * possible to create an ABD by requesting the "sub-ABD" starting at an offset
60 * within an existing ABD. In linear buffers this is simple (set abd_buf of
61 * the new ABD to the starting point within the original raw buffer), but
62 * scattered ABDs are a little more complex. The new ABD makes a copy of the
63 * relevant abd_chunks pointers (but not the underlying data). However, to
64 * provide arbitrary rather than only chunk-aligned starting offsets, it also
65 * tracks an abd_offset field which represents the starting point of the data
66 * within the first chunk in abd_chunks. For both linear and scattered ABDs,
67 * creating an offset ABD marks the original ABD as the offset's parent, and the
68 * original ABD's abd_children refcount is incremented. This data allows us to
69 * ensure the root ABD isn't deleted before its children.
70 *
71 * Most consumers should never need to know what type of ABD they're using --
72 * the ABD public API ensures that it's possible to transparently switch from
73 * using a linear ABD to a scattered one when doing so would be beneficial.
74 *
75 * If you need to use the data within an ABD directly, if you know it's linear
76 * (because you allocated it) you can use abd_to_buf() to access the underlying
77 * raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
78 * which will allocate a raw buffer if necessary. Use the abd_return_buf*
79 * functions to return any raw buffers that are no longer necessary when you're
80 * done using them.
81 *
82 * There are a variety of ABD APIs that implement basic buffer operations:
83 * compare, copy, read, write, and fill with zeroes. If you need a custom
84 * function which progressively accesses the whole ABD, use the abd_iterate_*
85 * functions.
86 */
87
88#include <sys/abd.h>
89#include <sys/param.h>
90#include <sys/zio.h>
91#include <sys/zfs_context.h>
92#include <sys/zfs_znode.h>
93
94typedef struct abd_stats {
95	kstat_named_t abdstat_struct_size;
96	kstat_named_t abdstat_scatter_cnt;
97	kstat_named_t abdstat_scatter_data_size;
98	kstat_named_t abdstat_scatter_chunk_waste;
99	kstat_named_t abdstat_linear_cnt;
100	kstat_named_t abdstat_linear_data_size;
101} abd_stats_t;
102
103static abd_stats_t abd_stats = {
104	/* Amount of memory occupied by all of the abd_t struct allocations */
105	{ "struct_size",			KSTAT_DATA_UINT64 },
106	/*
107	 * The number of scatter ABDs which are currently allocated, excluding
108	 * ABDs which don't own their data (for instance the ones which were
109	 * allocated through abd_get_offset()).
110	 */
111	{ "scatter_cnt",			KSTAT_DATA_UINT64 },
112	/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
113	{ "scatter_data_size",			KSTAT_DATA_UINT64 },
114	/*
115	 * The amount of space wasted at the end of the last chunk across all
116	 * scatter ABDs tracked by scatter_cnt.
117	 */
118	{ "scatter_chunk_waste",		KSTAT_DATA_UINT64 },
119	/*
120	 * The number of linear ABDs which are currently allocated, excluding
121	 * ABDs which don't own their data (for instance the ones which were
122	 * allocated through abd_get_offset() and abd_get_from_buf()). If an
123	 * ABD takes ownership of its buf then it will become tracked.
124	 */
125	{ "linear_cnt",				KSTAT_DATA_UINT64 },
126	/* Amount of data stored in all linear ABDs tracked by linear_cnt */
127	{ "linear_data_size",			KSTAT_DATA_UINT64 },
128};
129
130#define	ABDSTAT(stat)		(abd_stats.stat.value.ui64)
131#define	ABDSTAT_INCR(stat, val) \
132	atomic_add_64(&abd_stats.stat.value.ui64, (val))
133#define	ABDSTAT_BUMP(stat)	ABDSTAT_INCR(stat, 1)
134#define	ABDSTAT_BUMPDOWN(stat)	ABDSTAT_INCR(stat, -1)
135
136/*
137 * It is possible to make all future ABDs be linear by setting this to B_FALSE.
138 * Otherwise, ABDs are allocated scattered by default unless the caller uses
139 * abd_alloc_linear().
140 */
141boolean_t zfs_abd_scatter_enabled = B_TRUE;
142
143/*
144 * The size of the chunks ABD allocates. Because the sizes allocated from the
145 * kmem_cache can't change, this tunable can only be modified at boot. Changing
146 * it at runtime would cause ABD iteration to work incorrectly for ABDs which
147 * were allocated with the old size, so a safeguard has been put in place which
148 * will cause the machine to panic if you change it and try to access the data
149 * within a scattered ABD.
150 */
151size_t zfs_abd_chunk_size = 4096;
152
153#ifdef _KERNEL
154extern vmem_t *zio_alloc_arena;
155#endif
156
157kmem_cache_t *abd_chunk_cache;
158static kstat_t *abd_ksp;
159
160extern inline boolean_t abd_is_linear(abd_t *abd);
161extern inline void abd_copy(abd_t *dabd, abd_t *sabd, size_t size);
162extern inline void abd_copy_from_buf(abd_t *abd, const void *buf, size_t size);
163extern inline void abd_copy_to_buf(void* buf, abd_t *abd, size_t size);
164extern inline int abd_cmp_buf(abd_t *abd, const void *buf, size_t size);
165extern inline void abd_zero(abd_t *abd, size_t size);
166
167static void *
168abd_alloc_chunk()
169{
170	void *c = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
171	ASSERT3P(c, !=, NULL);
172	return (c);
173}
174
175static void
176abd_free_chunk(void *c)
177{
178	kmem_cache_free(abd_chunk_cache, c);
179}
180
181void
182abd_init(void)
183{
184	vmem_t *data_alloc_arena = NULL;
185
186#ifdef _KERNEL
187	data_alloc_arena = zio_alloc_arena;
188#endif
189
190	/*
191	 * Since ABD chunks do not appear in crash dumps, we pass KMC_NOTOUCH
192	 * so that no allocator metadata is stored with the buffers.
193	 */
194	abd_chunk_cache = kmem_cache_create("abd_chunk", zfs_abd_chunk_size, 0,
195	    NULL, NULL, NULL, NULL, data_alloc_arena, KMC_NOTOUCH);
196
197	abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
198	    sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
199	if (abd_ksp != NULL) {
200		abd_ksp->ks_data = &abd_stats;
201		kstat_install(abd_ksp);
202	}
203}
204
205void
206abd_fini(void)
207{
208	if (abd_ksp != NULL) {
209		kstat_delete(abd_ksp);
210		abd_ksp = NULL;
211	}
212
213	kmem_cache_destroy(abd_chunk_cache);
214	abd_chunk_cache = NULL;
215}
216
217static inline size_t
218abd_chunkcnt_for_bytes(size_t size)
219{
220	return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size);
221}
222
223static inline size_t
224abd_scatter_chunkcnt(abd_t *abd)
225{
226	ASSERT(!abd_is_linear(abd));
227	return (abd_chunkcnt_for_bytes(
228	    abd->abd_u.abd_scatter.abd_offset + abd->abd_size));
229}
230
231static inline void
232abd_verify(abd_t *abd)
233{
234	ASSERT3U(abd->abd_size, >, 0);
235	ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
236	ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
237	    ABD_FLAG_OWNER | ABD_FLAG_META));
238	IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
239	IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
240	if (abd_is_linear(abd)) {
241		ASSERT3P(abd->abd_u.abd_linear.abd_buf, !=, NULL);
242	} else {
243		ASSERT3U(abd->abd_u.abd_scatter.abd_offset, <,
244		    zfs_abd_chunk_size);
245		size_t n = abd_scatter_chunkcnt(abd);
246		for (int i = 0; i < n; i++) {
247			ASSERT3P(
248			    abd->abd_u.abd_scatter.abd_chunks[i], !=, NULL);
249		}
250	}
251}
252
253static inline abd_t *
254abd_alloc_struct(size_t chunkcnt)
255{
256	size_t size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
257	abd_t *abd = kmem_alloc(size, KM_PUSHPAGE);
258	ASSERT3P(abd, !=, NULL);
259	ABDSTAT_INCR(abdstat_struct_size, size);
260
261	return (abd);
262}
263
264static inline void
265abd_free_struct(abd_t *abd)
266{
267	size_t chunkcnt = abd_is_linear(abd) ? 0 : abd_scatter_chunkcnt(abd);
268	int size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
269	kmem_free(abd, size);
270	ABDSTAT_INCR(abdstat_struct_size, -size);
271}
272
273/*
274 * Allocate an ABD, along with its own underlying data buffers. Use this if you
275 * don't care whether the ABD is linear or not.
276 */
277abd_t *
278abd_alloc(size_t size, boolean_t is_metadata)
279{
280	if (!zfs_abd_scatter_enabled)
281		return (abd_alloc_linear(size, is_metadata));
282
283	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
284
285	size_t n = abd_chunkcnt_for_bytes(size);
286	abd_t *abd = abd_alloc_struct(n);
287
288	abd->abd_flags = ABD_FLAG_OWNER;
289	if (is_metadata) {
290		abd->abd_flags |= ABD_FLAG_META;
291	}
292	abd->abd_size = size;
293	abd->abd_parent = NULL;
294	zfs_refcount_create(&abd->abd_children);
295
296	abd->abd_u.abd_scatter.abd_offset = 0;
297	abd->abd_u.abd_scatter.abd_chunk_size = zfs_abd_chunk_size;
298
299	for (int i = 0; i < n; i++) {
300		void *c = abd_alloc_chunk();
301		ASSERT3P(c, !=, NULL);
302		abd->abd_u.abd_scatter.abd_chunks[i] = c;
303	}
304
305	ABDSTAT_BUMP(abdstat_scatter_cnt);
306	ABDSTAT_INCR(abdstat_scatter_data_size, size);
307	ABDSTAT_INCR(abdstat_scatter_chunk_waste,
308	    n * zfs_abd_chunk_size - size);
309
310	return (abd);
311}
312
313static void
314abd_free_scatter(abd_t *abd)
315{
316	size_t n = abd_scatter_chunkcnt(abd);
317	for (int i = 0; i < n; i++) {
318		abd_free_chunk(abd->abd_u.abd_scatter.abd_chunks[i]);
319	}
320
321	zfs_refcount_destroy(&abd->abd_children);
322	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
323	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
324	ABDSTAT_INCR(abdstat_scatter_chunk_waste,
325	    abd->abd_size - n * zfs_abd_chunk_size);
326
327	abd_free_struct(abd);
328}
329
330/*
331 * Allocate an ABD that must be linear, along with its own underlying data
332 * buffer. Only use this when it would be very annoying to write your ABD
333 * consumer with a scattered ABD.
334 */
335abd_t *
336abd_alloc_linear(size_t size, boolean_t is_metadata)
337{
338	abd_t *abd = abd_alloc_struct(0);
339
340	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
341
342	abd->abd_flags = ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
343	if (is_metadata) {
344		abd->abd_flags |= ABD_FLAG_META;
345	}
346	abd->abd_size = size;
347	abd->abd_parent = NULL;
348	zfs_refcount_create(&abd->abd_children);
349
350	if (is_metadata) {
351		abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
352	} else {
353		abd->abd_u.abd_linear.abd_buf = zio_data_buf_alloc(size);
354	}
355
356	ABDSTAT_BUMP(abdstat_linear_cnt);
357	ABDSTAT_INCR(abdstat_linear_data_size, size);
358
359	return (abd);
360}
361
362static void
363abd_free_linear(abd_t *abd)
364{
365	if (abd->abd_flags & ABD_FLAG_META) {
366		zio_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
367	} else {
368		zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
369	}
370
371	zfs_refcount_destroy(&abd->abd_children);
372	ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
373	ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
374
375	abd_free_struct(abd);
376}
377
378/*
379 * Free an ABD. Only use this on ABDs allocated with abd_alloc() or
380 * abd_alloc_linear().
381 */
382void
383abd_free(abd_t *abd)
384{
385	abd_verify(abd);
386	ASSERT3P(abd->abd_parent, ==, NULL);
387	ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
388	if (abd_is_linear(abd))
389		abd_free_linear(abd);
390	else
391		abd_free_scatter(abd);
392}
393
394/*
395 * Allocate an ABD of the same format (same metadata flag, same scatterize
396 * setting) as another ABD.
397 */
398abd_t *
399abd_alloc_sametype(abd_t *sabd, size_t size)
400{
401	boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
402	if (abd_is_linear(sabd)) {
403		return (abd_alloc_linear(size, is_metadata));
404	} else {
405		return (abd_alloc(size, is_metadata));
406	}
407}
408
409/*
410 * If we're going to use this ABD for doing I/O using the block layer, the
411 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
412 * plan to store this ABD in memory for a long period of time, we should
413 * allocate the ABD type that requires the least data copying to do the I/O.
414 *
415 * Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os
416 * using a scatter/gather list we should switch to that and replace this call
417 * with vanilla abd_alloc().
418 */
419abd_t *
420abd_alloc_for_io(size_t size, boolean_t is_metadata)
421{
422	return (abd_alloc_linear(size, is_metadata));
423}
424
425/*
426 * Allocate a new ABD to point to offset off of sabd. It shares the underlying
427 * buffer data with sabd. Use abd_put() to free. sabd must not be freed while
428 * any derived ABDs exist.
429 */
430/* ARGSUSED */
431static inline abd_t *
432abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
433{
434	abd_t *abd;
435
436	abd_verify(sabd);
437	ASSERT3U(off, <=, sabd->abd_size);
438
439	if (abd_is_linear(sabd)) {
440		abd = abd_alloc_struct(0);
441
442		/*
443		 * Even if this buf is filesystem metadata, we only track that
444		 * if we own the underlying data buffer, which is not true in
445		 * this case. Therefore, we don't ever use ABD_FLAG_META here.
446		 */
447		abd->abd_flags = ABD_FLAG_LINEAR;
448
449		abd->abd_u.abd_linear.abd_buf =
450		    (char *)sabd->abd_u.abd_linear.abd_buf + off;
451	} else {
452		size_t new_offset = sabd->abd_u.abd_scatter.abd_offset + off;
453		size_t chunkcnt = abd_scatter_chunkcnt(sabd) -
454		    (new_offset / zfs_abd_chunk_size);
455
456		abd = abd_alloc_struct(chunkcnt);
457
458		/*
459		 * Even if this buf is filesystem metadata, we only track that
460		 * if we own the underlying data buffer, which is not true in
461		 * this case. Therefore, we don't ever use ABD_FLAG_META here.
462		 */
463		abd->abd_flags = 0;
464
465		abd->abd_u.abd_scatter.abd_offset =
466		    new_offset % zfs_abd_chunk_size;
467		abd->abd_u.abd_scatter.abd_chunk_size = zfs_abd_chunk_size;
468
469		/* Copy the scatterlist starting at the correct offset */
470		(void) memcpy(&abd->abd_u.abd_scatter.abd_chunks,
471		    &sabd->abd_u.abd_scatter.abd_chunks[new_offset /
472		    zfs_abd_chunk_size],
473		    chunkcnt * sizeof (void *));
474	}
475
476	abd->abd_size = sabd->abd_size - off;
477	abd->abd_parent = sabd;
478	zfs_refcount_create(&abd->abd_children);
479	(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
480
481	return (abd);
482}
483
484abd_t *
485abd_get_offset(abd_t *sabd, size_t off)
486{
487	size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
488
489	VERIFY3U(size, >, 0);
490
491	return (abd_get_offset_impl(sabd, off, size));
492}
493
494abd_t *
495abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
496{
497	ASSERT3U(off + size, <=, sabd->abd_size);
498
499	return (abd_get_offset_impl(sabd, off, size));
500}
501
502
503/*
504 * Allocate a linear ABD structure for buf. You must free this with abd_put()
505 * since the resulting ABD doesn't own its own buffer.
506 */
507abd_t *
508abd_get_from_buf(void *buf, size_t size)
509{
510	abd_t *abd = abd_alloc_struct(0);
511
512	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
513
514	/*
515	 * Even if this buf is filesystem metadata, we only track that if we
516	 * own the underlying data buffer, which is not true in this case.
517	 * Therefore, we don't ever use ABD_FLAG_META here.
518	 */
519	abd->abd_flags = ABD_FLAG_LINEAR;
520	abd->abd_size = size;
521	abd->abd_parent = NULL;
522	zfs_refcount_create(&abd->abd_children);
523
524	abd->abd_u.abd_linear.abd_buf = buf;
525
526	return (abd);
527}
528
529/*
530 * Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
531 * free the underlying scatterlist or buffer.
532 */
533void
534abd_put(abd_t *abd)
535{
536	abd_verify(abd);
537	ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
538
539	if (abd->abd_parent != NULL) {
540		(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
541		    abd->abd_size, abd);
542	}
543
544	zfs_refcount_destroy(&abd->abd_children);
545	abd_free_struct(abd);
546}
547
548/*
549 * Get the raw buffer associated with a linear ABD.
550 */
551void *
552abd_to_buf(abd_t *abd)
553{
554	ASSERT(abd_is_linear(abd));
555	abd_verify(abd);
556	return (abd->abd_u.abd_linear.abd_buf);
557}
558
559/*
560 * Borrow a raw buffer from an ABD without copying the contents of the ABD
561 * into the buffer. If the ABD is scattered, this will allocate a raw buffer
562 * whose contents are undefined. To copy over the existing data in the ABD, use
563 * abd_borrow_buf_copy() instead.
564 */
565void *
566abd_borrow_buf(abd_t *abd, size_t n)
567{
568	void *buf;
569	abd_verify(abd);
570	ASSERT3U(abd->abd_size, >=, n);
571	if (abd_is_linear(abd)) {
572		buf = abd_to_buf(abd);
573	} else {
574		buf = zio_buf_alloc(n);
575	}
576	(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
577
578	return (buf);
579}
580
581void *
582abd_borrow_buf_copy(abd_t *abd, size_t n)
583{
584	void *buf = abd_borrow_buf(abd, n);
585	if (!abd_is_linear(abd)) {
586		abd_copy_to_buf(buf, abd, n);
587	}
588	return (buf);
589}
590
591/*
592 * Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
593 * not change the contents of the ABD and will ASSERT that you didn't modify
594 * the buffer since it was borrowed. If you want any changes you made to buf to
595 * be copied back to abd, use abd_return_buf_copy() instead.
596 */
597void
598abd_return_buf(abd_t *abd, void *buf, size_t n)
599{
600	abd_verify(abd);
601	ASSERT3U(abd->abd_size, >=, n);
602	if (abd_is_linear(abd)) {
603		ASSERT3P(buf, ==, abd_to_buf(abd));
604	} else {
605		ASSERT0(abd_cmp_buf(abd, buf, n));
606		zio_buf_free(buf, n);
607	}
608	(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
609}
610
611void
612abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
613{
614	if (!abd_is_linear(abd)) {
615		abd_copy_from_buf(abd, buf, n);
616	}
617	abd_return_buf(abd, buf, n);
618}
619
620/*
621 * Give this ABD ownership of the buffer that it's storing. Can only be used on
622 * linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
623 * with abd_alloc_linear() which subsequently released ownership of their buf
624 * with abd_release_ownership_of_buf().
625 */
626void
627abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
628{
629	ASSERT(abd_is_linear(abd));
630	ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
631	abd_verify(abd);
632
633	abd->abd_flags |= ABD_FLAG_OWNER;
634	if (is_metadata) {
635		abd->abd_flags |= ABD_FLAG_META;
636	}
637
638	ABDSTAT_BUMP(abdstat_linear_cnt);
639	ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
640}
641
642void
643abd_release_ownership_of_buf(abd_t *abd)
644{
645	ASSERT(abd_is_linear(abd));
646	ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
647	abd_verify(abd);
648
649	abd->abd_flags &= ~ABD_FLAG_OWNER;
650	/* Disable this flag since we no longer own the data buffer */
651	abd->abd_flags &= ~ABD_FLAG_META;
652
653	ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
654	ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
655}
656
657struct abd_iter {
658	abd_t		*iter_abd;	/* ABD being iterated through */
659	size_t		iter_pos;	/* position (relative to abd_offset) */
660	void		*iter_mapaddr;	/* addr corresponding to iter_pos */
661	size_t		iter_mapsize;	/* length of data valid at mapaddr */
662};
663
664static inline size_t
665abd_iter_scatter_chunk_offset(struct abd_iter *aiter)
666{
667	ASSERT(!abd_is_linear(aiter->iter_abd));
668	return ((aiter->iter_abd->abd_u.abd_scatter.abd_offset +
669	    aiter->iter_pos) % zfs_abd_chunk_size);
670}
671
672static inline size_t
673abd_iter_scatter_chunk_index(struct abd_iter *aiter)
674{
675	ASSERT(!abd_is_linear(aiter->iter_abd));
676	return ((aiter->iter_abd->abd_u.abd_scatter.abd_offset +
677	    aiter->iter_pos) / zfs_abd_chunk_size);
678}
679
680/*
681 * Initialize the abd_iter.
682 */
683static void
684abd_iter_init(struct abd_iter *aiter, abd_t *abd)
685{
686	abd_verify(abd);
687	aiter->iter_abd = abd;
688	aiter->iter_pos = 0;
689	aiter->iter_mapaddr = NULL;
690	aiter->iter_mapsize = 0;
691}
692
693/*
694 * Advance the iterator by a certain amount. Cannot be called when a chunk is
695 * in use. This can be safely called when the aiter has already exhausted, in
696 * which case this does nothing.
697 */
698static void
699abd_iter_advance(struct abd_iter *aiter, size_t amount)
700{
701	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
702	ASSERT0(aiter->iter_mapsize);
703
704	/* There's nothing left to advance to, so do nothing */
705	if (aiter->iter_pos == aiter->iter_abd->abd_size)
706		return;
707
708	aiter->iter_pos += amount;
709}
710
711/*
712 * Map the current chunk into aiter. This can be safely called when the aiter
713 * has already exhausted, in which case this does nothing.
714 */
715static void
716abd_iter_map(struct abd_iter *aiter)
717{
718	void *paddr;
719	size_t offset = 0;
720
721	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
722	ASSERT0(aiter->iter_mapsize);
723
724	/* Panic if someone has changed zfs_abd_chunk_size */
725	IMPLY(!abd_is_linear(aiter->iter_abd), zfs_abd_chunk_size ==
726	    aiter->iter_abd->abd_u.abd_scatter.abd_chunk_size);
727
728	/* There's nothing left to iterate over, so do nothing */
729	if (aiter->iter_pos == aiter->iter_abd->abd_size)
730		return;
731
732	if (abd_is_linear(aiter->iter_abd)) {
733		offset = aiter->iter_pos;
734		aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
735		paddr = aiter->iter_abd->abd_u.abd_linear.abd_buf;
736	} else {
737		size_t index = abd_iter_scatter_chunk_index(aiter);
738		offset = abd_iter_scatter_chunk_offset(aiter);
739		aiter->iter_mapsize = zfs_abd_chunk_size - offset;
740		paddr = aiter->iter_abd->abd_u.abd_scatter.abd_chunks[index];
741	}
742	aiter->iter_mapaddr = (char *)paddr + offset;
743}
744
745/*
746 * Unmap the current chunk from aiter. This can be safely called when the aiter
747 * has already exhausted, in which case this does nothing.
748 */
749static void
750abd_iter_unmap(struct abd_iter *aiter)
751{
752	/* There's nothing left to unmap, so do nothing */
753	if (aiter->iter_pos == aiter->iter_abd->abd_size)
754		return;
755
756	ASSERT3P(aiter->iter_mapaddr, !=, NULL);
757	ASSERT3U(aiter->iter_mapsize, >, 0);
758
759	aiter->iter_mapaddr = NULL;
760	aiter->iter_mapsize = 0;
761}
762
763int
764abd_iterate_func(abd_t *abd, size_t off, size_t size,
765    abd_iter_func_t *func, void *private)
766{
767	int ret = 0;
768	struct abd_iter aiter;
769
770	abd_verify(abd);
771	ASSERT3U(off + size, <=, abd->abd_size);
772
773	abd_iter_init(&aiter, abd);
774	abd_iter_advance(&aiter, off);
775
776	while (size > 0) {
777		abd_iter_map(&aiter);
778
779		size_t len = MIN(aiter.iter_mapsize, size);
780		ASSERT3U(len, >, 0);
781
782		ret = func(aiter.iter_mapaddr, len, private);
783
784		abd_iter_unmap(&aiter);
785
786		if (ret != 0)
787			break;
788
789		size -= len;
790		abd_iter_advance(&aiter, len);
791	}
792
793	return (ret);
794}
795
796struct buf_arg {
797	void *arg_buf;
798};
799
800static int
801abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
802{
803	struct buf_arg *ba_ptr = private;
804
805	(void) memcpy(ba_ptr->arg_buf, buf, size);
806	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
807
808	return (0);
809}
810
811/*
812 * Copy abd to buf. (off is the offset in abd.)
813 */
814void
815abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
816{
817	struct buf_arg ba_ptr = { buf };
818
819	(void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
820	    &ba_ptr);
821}
822
823static int
824abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
825{
826	int ret;
827	struct buf_arg *ba_ptr = private;
828
829	ret = memcmp(buf, ba_ptr->arg_buf, size);
830	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
831
832	return (ret);
833}
834
835/*
836 * Compare the contents of abd to buf. (off is the offset in abd.)
837 */
838int
839abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
840{
841	struct buf_arg ba_ptr = { (void *) buf };
842
843	return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
844}
845
846static int
847abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
848{
849	struct buf_arg *ba_ptr = private;
850
851	(void) memcpy(buf, ba_ptr->arg_buf, size);
852	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
853
854	return (0);
855}
856
857/*
858 * Copy from buf to abd. (off is the offset in abd.)
859 */
860void
861abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
862{
863	struct buf_arg ba_ptr = { (void *) buf };
864
865	(void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
866	    &ba_ptr);
867}
868
869/*ARGSUSED*/
870static int
871abd_zero_off_cb(void *buf, size_t size, void *private)
872{
873	(void) memset(buf, 0, size);
874	return (0);
875}
876
877/*
878 * Zero out the abd from a particular offset to the end.
879 */
880void
881abd_zero_off(abd_t *abd, size_t off, size_t size)
882{
883	(void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
884}
885
886/*
887 * Iterate over two ABDs and call func incrementally on the two ABDs' data in
888 * equal-sized chunks (passed to func as raw buffers). func could be called many
889 * times during this iteration.
890 */
891int
892abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
893    size_t size, abd_iter_func2_t *func, void *private)
894{
895	int ret = 0;
896	struct abd_iter daiter, saiter;
897
898	abd_verify(dabd);
899	abd_verify(sabd);
900
901	ASSERT3U(doff + size, <=, dabd->abd_size);
902	ASSERT3U(soff + size, <=, sabd->abd_size);
903
904	abd_iter_init(&daiter, dabd);
905	abd_iter_init(&saiter, sabd);
906	abd_iter_advance(&daiter, doff);
907	abd_iter_advance(&saiter, soff);
908
909	while (size > 0) {
910		abd_iter_map(&daiter);
911		abd_iter_map(&saiter);
912
913		size_t dlen = MIN(daiter.iter_mapsize, size);
914		size_t slen = MIN(saiter.iter_mapsize, size);
915		size_t len = MIN(dlen, slen);
916		ASSERT(dlen > 0 || slen > 0);
917
918		ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
919		    private);
920
921		abd_iter_unmap(&saiter);
922		abd_iter_unmap(&daiter);
923
924		if (ret != 0)
925			break;
926
927		size -= len;
928		abd_iter_advance(&daiter, len);
929		abd_iter_advance(&saiter, len);
930	}
931
932	return (ret);
933}
934
935/*ARGSUSED*/
936static int
937abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
938{
939	(void) memcpy(dbuf, sbuf, size);
940	return (0);
941}
942
943/*
944 * Copy from sabd to dabd starting from soff and doff.
945 */
946void
947abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
948{
949	(void) abd_iterate_func2(dabd, sabd, doff, soff, size,
950	    abd_copy_off_cb, NULL);
951}
952
953/*ARGSUSED*/
954static int
955abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
956{
957	return (memcmp(bufa, bufb, size));
958}
959
960/*
961 * Compares the first size bytes of two ABDs.
962 */
963int
964abd_cmp(abd_t *dabd, abd_t *sabd, size_t size)
965{
966	return (abd_iterate_func2(dabd, sabd, 0, 0, size, abd_cmp_cb, NULL));
967}
968