xref: /illumos-gate/usr/src/uts/common/fs/zfs/arc.c (revision 0a95608c6241cd28e47950e9ab8dbc839e23775e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * DVA-based Adjustable Replacement Cache
28  *
29  * While much of the theory of operation used here is
30  * based on the self-tuning, low overhead replacement cache
31  * presented by Megiddo and Modha at FAST 2003, there are some
32  * significant differences:
33  *
34  * 1. The Megiddo and Modha model assumes any page is evictable.
35  * Pages in its cache cannot be "locked" into memory.  This makes
36  * the eviction algorithm simple: evict the last page in the list.
37  * This also make the performance characteristics easy to reason
38  * about.  Our cache is not so simple.  At any given moment, some
39  * subset of the blocks in the cache are un-evictable because we
40  * have handed out a reference to them.  Blocks are only evictable
41  * when there are no external references active.  This makes
42  * eviction far more problematic:  we choose to evict the evictable
43  * blocks that are the "lowest" in the list.
44  *
45  * There are times when it is not possible to evict the requested
46  * space.  In these circumstances we are unable to adjust the cache
47  * size.  To prevent the cache growing unbounded at these times we
48  * implement a "cache throttle" that slows the flow of new data
49  * into the cache until we can make space available.
50  *
51  * 2. The Megiddo and Modha model assumes a fixed cache size.
52  * Pages are evicted when the cache is full and there is a cache
53  * miss.  Our model has a variable sized cache.  It grows with
54  * high use, but also tries to react to memory pressure from the
55  * operating system: decreasing its size when system memory is
56  * tight.
57  *
58  * 3. The Megiddo and Modha model assumes a fixed page size. All
59  * elements of the cache are therefor exactly the same size.  So
60  * when adjusting the cache size following a cache miss, its simply
61  * a matter of choosing a single page to evict.  In our model, we
62  * have variable sized cache blocks (rangeing from 512 bytes to
63  * 128K bytes).  We therefor choose a set of blocks to evict to make
64  * space for a cache miss that approximates as closely as possible
65  * the space used by the new block.
66  *
67  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
68  * by N. Megiddo & D. Modha, FAST 2003
69  */
70 
71 /*
72  * The locking model:
73  *
74  * A new reference to a cache buffer can be obtained in two
75  * ways: 1) via a hash table lookup using the DVA as a key,
76  * or 2) via one of the ARC lists.  The arc_read() interface
77  * uses method 1, while the internal arc algorithms for
78  * adjusting the cache use method 2.  We therefor provide two
79  * types of locks: 1) the hash table lock array, and 2) the
80  * arc list locks.
81  *
82  * Buffers do not have their own mutexs, rather they rely on the
83  * hash table mutexs for the bulk of their protection (i.e. most
84  * fields in the arc_buf_hdr_t are protected by these mutexs).
85  *
86  * buf_hash_find() returns the appropriate mutex (held) when it
87  * locates the requested buffer in the hash table.  It returns
88  * NULL for the mutex if the buffer was not in the table.
89  *
90  * buf_hash_remove() expects the appropriate hash mutex to be
91  * already held before it is invoked.
92  *
93  * Each arc state also has a mutex which is used to protect the
94  * buffer list associated with the state.  When attempting to
95  * obtain a hash table lock while holding an arc list lock you
96  * must use: mutex_tryenter() to avoid deadlock.  Also note that
97  * the active state mutex must be held before the ghost state mutex.
98  *
99  * Arc buffers may have an associated eviction callback function.
100  * This function will be invoked prior to removing the buffer (e.g.
101  * in arc_do_user_evicts()).  Note however that the data associated
102  * with the buffer may be evicted prior to the callback.  The callback
103  * must be made with *no locks held* (to prevent deadlock).  Additionally,
104  * the users of callbacks must ensure that their private data is
105  * protected from simultaneous callbacks from arc_buf_evict()
106  * and arc_do_user_evicts().
107  *
108  * Note that the majority of the performance stats are manipulated
109  * with atomic operations.
110  *
111  * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
112  *
113  *	- L2ARC buflist creation
114  *	- L2ARC buflist eviction
115  *	- L2ARC write completion, which walks L2ARC buflists
116  *	- ARC header destruction, as it removes from L2ARC buflists
117  *	- ARC header release, as it removes from L2ARC buflists
118  */
119 
120 #include <sys/spa.h>
121 #include <sys/zio.h>
122 #include <sys/zio_checksum.h>
123 #include <sys/zfs_context.h>
124 #include <sys/arc.h>
125 #include <sys/refcount.h>
126 #include <sys/vdev.h>
127 #ifdef _KERNEL
128 #include <sys/vmsystm.h>
129 #include <vm/anon.h>
130 #include <sys/fs/swapnode.h>
131 #include <sys/dnlc.h>
132 #endif
133 #include <sys/callb.h>
134 #include <sys/kstat.h>
135 
136 static kmutex_t		arc_reclaim_thr_lock;
137 static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
138 static uint8_t		arc_thread_exit;
139 
140 extern int zfs_write_limit_shift;
141 extern uint64_t zfs_write_limit_max;
142 extern kmutex_t zfs_write_limit_lock;
143 
144 #define	ARC_REDUCE_DNLC_PERCENT	3
145 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
146 
147 typedef enum arc_reclaim_strategy {
148 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
149 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
150 } arc_reclaim_strategy_t;
151 
152 /* number of seconds before growing cache again */
153 static int		arc_grow_retry = 60;
154 
155 /* shift of arc_c for calculating both min and max arc_p */
156 static int		arc_p_min_shift = 4;
157 
158 /* log2(fraction of arc to reclaim) */
159 static int		arc_shrink_shift = 5;
160 
161 /*
162  * minimum lifespan of a prefetch block in clock ticks
163  * (initialized in arc_init())
164  */
165 static int		arc_min_prefetch_lifespan;
166 
167 static int arc_dead;
168 
169 /*
170  * The arc has filled available memory and has now warmed up.
171  */
172 static boolean_t arc_warm;
173 
174 /*
175  * These tunables are for performance analysis.
176  */
177 uint64_t zfs_arc_max;
178 uint64_t zfs_arc_min;
179 uint64_t zfs_arc_meta_limit = 0;
180 int zfs_mdcomp_disable = 0;
181 int zfs_arc_grow_retry = 0;
182 int zfs_arc_shrink_shift = 0;
183 int zfs_arc_p_min_shift = 0;
184 
185 /*
186  * Note that buffers can be in one of 6 states:
187  *	ARC_anon	- anonymous (discussed below)
188  *	ARC_mru		- recently used, currently cached
189  *	ARC_mru_ghost	- recentely used, no longer in cache
190  *	ARC_mfu		- frequently used, currently cached
191  *	ARC_mfu_ghost	- frequently used, no longer in cache
192  *	ARC_l2c_only	- exists in L2ARC but not other states
193  * When there are no active references to the buffer, they are
194  * are linked onto a list in one of these arc states.  These are
195  * the only buffers that can be evicted or deleted.  Within each
196  * state there are multiple lists, one for meta-data and one for
197  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
198  * etc.) is tracked separately so that it can be managed more
199  * explicitly: favored over data, limited explicitly.
200  *
201  * Anonymous buffers are buffers that are not associated with
202  * a DVA.  These are buffers that hold dirty block copies
203  * before they are written to stable storage.  By definition,
204  * they are "ref'd" and are considered part of arc_mru
205  * that cannot be freed.  Generally, they will aquire a DVA
206  * as they are written and migrate onto the arc_mru list.
207  *
208  * The ARC_l2c_only state is for buffers that are in the second
209  * level ARC but no longer in any of the ARC_m* lists.  The second
210  * level ARC itself may also contain buffers that are in any of
211  * the ARC_m* states - meaning that a buffer can exist in two
212  * places.  The reason for the ARC_l2c_only state is to keep the
213  * buffer header in the hash table, so that reads that hit the
214  * second level ARC benefit from these fast lookups.
215  */
216 
217 typedef struct arc_state {
218 	list_t	arcs_list[ARC_BUFC_NUMTYPES];	/* list of evictable buffers */
219 	uint64_t arcs_lsize[ARC_BUFC_NUMTYPES];	/* amount of evictable data */
220 	uint64_t arcs_size;	/* total amount of data in this state */
221 	kmutex_t arcs_mtx;
222 } arc_state_t;
223 
224 /* The 6 states: */
225 static arc_state_t ARC_anon;
226 static arc_state_t ARC_mru;
227 static arc_state_t ARC_mru_ghost;
228 static arc_state_t ARC_mfu;
229 static arc_state_t ARC_mfu_ghost;
230 static arc_state_t ARC_l2c_only;
231 
232 typedef struct arc_stats {
233 	kstat_named_t arcstat_hits;
234 	kstat_named_t arcstat_misses;
235 	kstat_named_t arcstat_demand_data_hits;
236 	kstat_named_t arcstat_demand_data_misses;
237 	kstat_named_t arcstat_demand_metadata_hits;
238 	kstat_named_t arcstat_demand_metadata_misses;
239 	kstat_named_t arcstat_prefetch_data_hits;
240 	kstat_named_t arcstat_prefetch_data_misses;
241 	kstat_named_t arcstat_prefetch_metadata_hits;
242 	kstat_named_t arcstat_prefetch_metadata_misses;
243 	kstat_named_t arcstat_mru_hits;
244 	kstat_named_t arcstat_mru_ghost_hits;
245 	kstat_named_t arcstat_mfu_hits;
246 	kstat_named_t arcstat_mfu_ghost_hits;
247 	kstat_named_t arcstat_deleted;
248 	kstat_named_t arcstat_recycle_miss;
249 	kstat_named_t arcstat_mutex_miss;
250 	kstat_named_t arcstat_evict_skip;
251 	kstat_named_t arcstat_hash_elements;
252 	kstat_named_t arcstat_hash_elements_max;
253 	kstat_named_t arcstat_hash_collisions;
254 	kstat_named_t arcstat_hash_chains;
255 	kstat_named_t arcstat_hash_chain_max;
256 	kstat_named_t arcstat_p;
257 	kstat_named_t arcstat_c;
258 	kstat_named_t arcstat_c_min;
259 	kstat_named_t arcstat_c_max;
260 	kstat_named_t arcstat_size;
261 	kstat_named_t arcstat_hdr_size;
262 	kstat_named_t arcstat_data_size;
263 	kstat_named_t arcstat_other_size;
264 	kstat_named_t arcstat_l2_hits;
265 	kstat_named_t arcstat_l2_misses;
266 	kstat_named_t arcstat_l2_feeds;
267 	kstat_named_t arcstat_l2_rw_clash;
268 	kstat_named_t arcstat_l2_read_bytes;
269 	kstat_named_t arcstat_l2_write_bytes;
270 	kstat_named_t arcstat_l2_writes_sent;
271 	kstat_named_t arcstat_l2_writes_done;
272 	kstat_named_t arcstat_l2_writes_error;
273 	kstat_named_t arcstat_l2_writes_hdr_miss;
274 	kstat_named_t arcstat_l2_evict_lock_retry;
275 	kstat_named_t arcstat_l2_evict_reading;
276 	kstat_named_t arcstat_l2_free_on_write;
277 	kstat_named_t arcstat_l2_abort_lowmem;
278 	kstat_named_t arcstat_l2_cksum_bad;
279 	kstat_named_t arcstat_l2_io_error;
280 	kstat_named_t arcstat_l2_size;
281 	kstat_named_t arcstat_l2_hdr_size;
282 	kstat_named_t arcstat_memory_throttle_count;
283 } arc_stats_t;
284 
285 static arc_stats_t arc_stats = {
286 	{ "hits",			KSTAT_DATA_UINT64 },
287 	{ "misses",			KSTAT_DATA_UINT64 },
288 	{ "demand_data_hits",		KSTAT_DATA_UINT64 },
289 	{ "demand_data_misses",		KSTAT_DATA_UINT64 },
290 	{ "demand_metadata_hits",	KSTAT_DATA_UINT64 },
291 	{ "demand_metadata_misses",	KSTAT_DATA_UINT64 },
292 	{ "prefetch_data_hits",		KSTAT_DATA_UINT64 },
293 	{ "prefetch_data_misses",	KSTAT_DATA_UINT64 },
294 	{ "prefetch_metadata_hits",	KSTAT_DATA_UINT64 },
295 	{ "prefetch_metadata_misses",	KSTAT_DATA_UINT64 },
296 	{ "mru_hits",			KSTAT_DATA_UINT64 },
297 	{ "mru_ghost_hits",		KSTAT_DATA_UINT64 },
298 	{ "mfu_hits",			KSTAT_DATA_UINT64 },
299 	{ "mfu_ghost_hits",		KSTAT_DATA_UINT64 },
300 	{ "deleted",			KSTAT_DATA_UINT64 },
301 	{ "recycle_miss",		KSTAT_DATA_UINT64 },
302 	{ "mutex_miss",			KSTAT_DATA_UINT64 },
303 	{ "evict_skip",			KSTAT_DATA_UINT64 },
304 	{ "hash_elements",		KSTAT_DATA_UINT64 },
305 	{ "hash_elements_max",		KSTAT_DATA_UINT64 },
306 	{ "hash_collisions",		KSTAT_DATA_UINT64 },
307 	{ "hash_chains",		KSTAT_DATA_UINT64 },
308 	{ "hash_chain_max",		KSTAT_DATA_UINT64 },
309 	{ "p",				KSTAT_DATA_UINT64 },
310 	{ "c",				KSTAT_DATA_UINT64 },
311 	{ "c_min",			KSTAT_DATA_UINT64 },
312 	{ "c_max",			KSTAT_DATA_UINT64 },
313 	{ "size",			KSTAT_DATA_UINT64 },
314 	{ "hdr_size",			KSTAT_DATA_UINT64 },
315 	{ "data_size",			KSTAT_DATA_UINT64 },
316 	{ "other_size",			KSTAT_DATA_UINT64 },
317 	{ "l2_hits",			KSTAT_DATA_UINT64 },
318 	{ "l2_misses",			KSTAT_DATA_UINT64 },
319 	{ "l2_feeds",			KSTAT_DATA_UINT64 },
320 	{ "l2_rw_clash",		KSTAT_DATA_UINT64 },
321 	{ "l2_read_bytes",		KSTAT_DATA_UINT64 },
322 	{ "l2_write_bytes",		KSTAT_DATA_UINT64 },
323 	{ "l2_writes_sent",		KSTAT_DATA_UINT64 },
324 	{ "l2_writes_done",		KSTAT_DATA_UINT64 },
325 	{ "l2_writes_error",		KSTAT_DATA_UINT64 },
326 	{ "l2_writes_hdr_miss",		KSTAT_DATA_UINT64 },
327 	{ "l2_evict_lock_retry",	KSTAT_DATA_UINT64 },
328 	{ "l2_evict_reading",		KSTAT_DATA_UINT64 },
329 	{ "l2_free_on_write",		KSTAT_DATA_UINT64 },
330 	{ "l2_abort_lowmem",		KSTAT_DATA_UINT64 },
331 	{ "l2_cksum_bad",		KSTAT_DATA_UINT64 },
332 	{ "l2_io_error",		KSTAT_DATA_UINT64 },
333 	{ "l2_size",			KSTAT_DATA_UINT64 },
334 	{ "l2_hdr_size",		KSTAT_DATA_UINT64 },
335 	{ "memory_throttle_count",	KSTAT_DATA_UINT64 }
336 };
337 
338 #define	ARCSTAT(stat)	(arc_stats.stat.value.ui64)
339 
340 #define	ARCSTAT_INCR(stat, val) \
341 	atomic_add_64(&arc_stats.stat.value.ui64, (val));
342 
343 #define	ARCSTAT_BUMP(stat) 	ARCSTAT_INCR(stat, 1)
344 #define	ARCSTAT_BUMPDOWN(stat)	ARCSTAT_INCR(stat, -1)
345 
346 #define	ARCSTAT_MAX(stat, val) {					\
347 	uint64_t m;							\
348 	while ((val) > (m = arc_stats.stat.value.ui64) &&		\
349 	    (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))	\
350 		continue;						\
351 }
352 
353 #define	ARCSTAT_MAXSTAT(stat) \
354 	ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
355 
356 /*
357  * We define a macro to allow ARC hits/misses to be easily broken down by
358  * two separate conditions, giving a total of four different subtypes for
359  * each of hits and misses (so eight statistics total).
360  */
361 #define	ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
362 	if (cond1) {							\
363 		if (cond2) {						\
364 			ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
365 		} else {						\
366 			ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
367 		}							\
368 	} else {							\
369 		if (cond2) {						\
370 			ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
371 		} else {						\
372 			ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
373 		}							\
374 	}
375 
376 kstat_t			*arc_ksp;
377 static arc_state_t 	*arc_anon;
378 static arc_state_t	*arc_mru;
379 static arc_state_t	*arc_mru_ghost;
380 static arc_state_t	*arc_mfu;
381 static arc_state_t	*arc_mfu_ghost;
382 static arc_state_t	*arc_l2c_only;
383 
384 /*
385  * There are several ARC variables that are critical to export as kstats --
386  * but we don't want to have to grovel around in the kstat whenever we wish to
387  * manipulate them.  For these variables, we therefore define them to be in
388  * terms of the statistic variable.  This assures that we are not introducing
389  * the possibility of inconsistency by having shadow copies of the variables,
390  * while still allowing the code to be readable.
391  */
392 #define	arc_size	ARCSTAT(arcstat_size)	/* actual total arc size */
393 #define	arc_p		ARCSTAT(arcstat_p)	/* target size of MRU */
394 #define	arc_c		ARCSTAT(arcstat_c)	/* target size of cache */
395 #define	arc_c_min	ARCSTAT(arcstat_c_min)	/* min target cache size */
396 #define	arc_c_max	ARCSTAT(arcstat_c_max)	/* max target cache size */
397 
398 static int		arc_no_grow;	/* Don't try to grow cache size */
399 static uint64_t		arc_tempreserve;
400 static uint64_t		arc_meta_used;
401 static uint64_t		arc_meta_limit;
402 static uint64_t		arc_meta_max = 0;
403 
404 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
405 
406 typedef struct arc_callback arc_callback_t;
407 
408 struct arc_callback {
409 	void			*acb_private;
410 	arc_done_func_t		*acb_done;
411 	arc_buf_t		*acb_buf;
412 	zio_t			*acb_zio_dummy;
413 	arc_callback_t		*acb_next;
414 };
415 
416 typedef struct arc_write_callback arc_write_callback_t;
417 
418 struct arc_write_callback {
419 	void		*awcb_private;
420 	arc_done_func_t	*awcb_ready;
421 	arc_done_func_t	*awcb_done;
422 	arc_buf_t	*awcb_buf;
423 };
424 
425 struct arc_buf_hdr {
426 	/* protected by hash lock */
427 	dva_t			b_dva;
428 	uint64_t		b_birth;
429 	uint64_t		b_cksum0;
430 
431 	kmutex_t		b_freeze_lock;
432 	zio_cksum_t		*b_freeze_cksum;
433 
434 	arc_buf_hdr_t		*b_hash_next;
435 	arc_buf_t		*b_buf;
436 	uint32_t		b_flags;
437 	uint32_t		b_datacnt;
438 
439 	arc_callback_t		*b_acb;
440 	kcondvar_t		b_cv;
441 
442 	/* immutable */
443 	arc_buf_contents_t	b_type;
444 	uint64_t		b_size;
445 	uint64_t		b_spa;
446 
447 	/* protected by arc state mutex */
448 	arc_state_t		*b_state;
449 	list_node_t		b_arc_node;
450 
451 	/* updated atomically */
452 	clock_t			b_arc_access;
453 
454 	/* self protecting */
455 	refcount_t		b_refcnt;
456 
457 	l2arc_buf_hdr_t		*b_l2hdr;
458 	list_node_t		b_l2node;
459 };
460 
461 static arc_buf_t *arc_eviction_list;
462 static kmutex_t arc_eviction_mtx;
463 static arc_buf_hdr_t arc_eviction_hdr;
464 static void arc_get_data_buf(arc_buf_t *buf);
465 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
466 static int arc_evict_needed(arc_buf_contents_t type);
467 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
468 
469 #define	GHOST_STATE(state)	\
470 	((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||	\
471 	(state) == arc_l2c_only)
472 
473 /*
474  * Private ARC flags.  These flags are private ARC only flags that will show up
475  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
476  * be passed in as arc_flags in things like arc_read.  However, these flags
477  * should never be passed and should only be set by ARC code.  When adding new
478  * public flags, make sure not to smash the private ones.
479  */
480 
481 #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
482 #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
483 #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
484 #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
485 #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
486 #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
487 #define	ARC_FREE_IN_PROGRESS	(1 << 15)	/* hdr about to be freed */
488 #define	ARC_L2_WRITING		(1 << 16)	/* L2ARC write in progress */
489 #define	ARC_L2_EVICTED		(1 << 17)	/* evicted during I/O */
490 #define	ARC_L2_WRITE_HEAD	(1 << 18)	/* head of write list */
491 #define	ARC_STORED		(1 << 19)	/* has been store()d to */
492 
493 #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
494 #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
495 #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
496 #define	HDR_PREFETCH(hdr)	((hdr)->b_flags & ARC_PREFETCH)
497 #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
498 #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
499 #define	HDR_FREE_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
500 #define	HDR_L2CACHE(hdr)	((hdr)->b_flags & ARC_L2CACHE)
501 #define	HDR_L2_READING(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS &&	\
502 				    (hdr)->b_l2hdr != NULL)
503 #define	HDR_L2_WRITING(hdr)	((hdr)->b_flags & ARC_L2_WRITING)
504 #define	HDR_L2_EVICTED(hdr)	((hdr)->b_flags & ARC_L2_EVICTED)
505 #define	HDR_L2_WRITE_HEAD(hdr)	((hdr)->b_flags & ARC_L2_WRITE_HEAD)
506 
507 /*
508  * Other sizes
509  */
510 
511 #define	HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
512 #define	L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
513 
514 /*
515  * Hash table routines
516  */
517 
518 #define	HT_LOCK_PAD	64
519 
520 struct ht_lock {
521 	kmutex_t	ht_lock;
522 #ifdef _KERNEL
523 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
524 #endif
525 };
526 
527 #define	BUF_LOCKS 256
528 typedef struct buf_hash_table {
529 	uint64_t ht_mask;
530 	arc_buf_hdr_t **ht_table;
531 	struct ht_lock ht_locks[BUF_LOCKS];
532 } buf_hash_table_t;
533 
534 static buf_hash_table_t buf_hash_table;
535 
536 #define	BUF_HASH_INDEX(spa, dva, birth) \
537 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
538 #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
539 #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
540 #define	HDR_LOCK(buf) \
541 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
542 
543 uint64_t zfs_crc64_table[256];
544 
545 /*
546  * Level 2 ARC
547  */
548 
549 #define	L2ARC_WRITE_SIZE	(8 * 1024 * 1024)	/* initial write max */
550 #define	L2ARC_HEADROOM		2		/* num of writes */
551 #define	L2ARC_FEED_SECS		1		/* caching interval secs */
552 #define	L2ARC_FEED_MIN_MS	200		/* min caching interval ms */
553 
554 #define	l2arc_writes_sent	ARCSTAT(arcstat_l2_writes_sent)
555 #define	l2arc_writes_done	ARCSTAT(arcstat_l2_writes_done)
556 
557 /*
558  * L2ARC Performance Tunables
559  */
560 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;	/* default max write size */
561 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;	/* extra write during warmup */
562 uint64_t l2arc_headroom = L2ARC_HEADROOM;	/* number of dev writes */
563 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;	/* interval seconds */
564 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS;	/* min interval milliseconds */
565 boolean_t l2arc_noprefetch = B_TRUE;		/* don't cache prefetch bufs */
566 boolean_t l2arc_feed_again = B_TRUE;		/* turbo warmup */
567 boolean_t l2arc_norw = B_TRUE;			/* no reads during writes */
568 
569 /*
570  * L2ARC Internals
571  */
572 typedef struct l2arc_dev {
573 	vdev_t			*l2ad_vdev;	/* vdev */
574 	spa_t			*l2ad_spa;	/* spa */
575 	uint64_t		l2ad_hand;	/* next write location */
576 	uint64_t		l2ad_write;	/* desired write size, bytes */
577 	uint64_t		l2ad_boost;	/* warmup write boost, bytes */
578 	uint64_t		l2ad_start;	/* first addr on device */
579 	uint64_t		l2ad_end;	/* last addr on device */
580 	uint64_t		l2ad_evict;	/* last addr eviction reached */
581 	boolean_t		l2ad_first;	/* first sweep through */
582 	boolean_t		l2ad_writing;	/* currently writing */
583 	list_t			*l2ad_buflist;	/* buffer list */
584 	list_node_t		l2ad_node;	/* device list node */
585 } l2arc_dev_t;
586 
587 static list_t L2ARC_dev_list;			/* device list */
588 static list_t *l2arc_dev_list;			/* device list pointer */
589 static kmutex_t l2arc_dev_mtx;			/* device list mutex */
590 static l2arc_dev_t *l2arc_dev_last;		/* last device used */
591 static kmutex_t l2arc_buflist_mtx;		/* mutex for all buflists */
592 static list_t L2ARC_free_on_write;		/* free after write buf list */
593 static list_t *l2arc_free_on_write;		/* free after write list ptr */
594 static kmutex_t l2arc_free_on_write_mtx;	/* mutex for list */
595 static uint64_t l2arc_ndev;			/* number of devices */
596 
597 typedef struct l2arc_read_callback {
598 	arc_buf_t	*l2rcb_buf;		/* read buffer */
599 	spa_t		*l2rcb_spa;		/* spa */
600 	blkptr_t	l2rcb_bp;		/* original blkptr */
601 	zbookmark_t	l2rcb_zb;		/* original bookmark */
602 	int		l2rcb_flags;		/* original flags */
603 } l2arc_read_callback_t;
604 
605 typedef struct l2arc_write_callback {
606 	l2arc_dev_t	*l2wcb_dev;		/* device info */
607 	arc_buf_hdr_t	*l2wcb_head;		/* head of write buflist */
608 } l2arc_write_callback_t;
609 
610 struct l2arc_buf_hdr {
611 	/* protected by arc_buf_hdr  mutex */
612 	l2arc_dev_t	*b_dev;			/* L2ARC device */
613 	uint64_t	b_daddr;		/* disk address, offset byte */
614 };
615 
616 typedef struct l2arc_data_free {
617 	/* protected by l2arc_free_on_write_mtx */
618 	void		*l2df_data;
619 	size_t		l2df_size;
620 	void		(*l2df_func)(void *, size_t);
621 	list_node_t	l2df_list_node;
622 } l2arc_data_free_t;
623 
624 static kmutex_t l2arc_feed_thr_lock;
625 static kcondvar_t l2arc_feed_thr_cv;
626 static uint8_t l2arc_thread_exit;
627 
628 static void l2arc_read_done(zio_t *zio);
629 static void l2arc_hdr_stat_add(void);
630 static void l2arc_hdr_stat_remove(void);
631 
632 static uint64_t
633 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
634 {
635 	uint8_t *vdva = (uint8_t *)dva;
636 	uint64_t crc = -1ULL;
637 	int i;
638 
639 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
640 
641 	for (i = 0; i < sizeof (dva_t); i++)
642 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
643 
644 	crc ^= (spa>>8) ^ birth;
645 
646 	return (crc);
647 }
648 
649 #define	BUF_EMPTY(buf)						\
650 	((buf)->b_dva.dva_word[0] == 0 &&			\
651 	(buf)->b_dva.dva_word[1] == 0 &&			\
652 	(buf)->b_birth == 0)
653 
654 #define	BUF_EQUAL(spa, dva, birth, buf)				\
655 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
656 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
657 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
658 
659 static arc_buf_hdr_t *
660 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
661 {
662 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
663 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
664 	arc_buf_hdr_t *buf;
665 
666 	mutex_enter(hash_lock);
667 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
668 	    buf = buf->b_hash_next) {
669 		if (BUF_EQUAL(spa, dva, birth, buf)) {
670 			*lockp = hash_lock;
671 			return (buf);
672 		}
673 	}
674 	mutex_exit(hash_lock);
675 	*lockp = NULL;
676 	return (NULL);
677 }
678 
679 /*
680  * Insert an entry into the hash table.  If there is already an element
681  * equal to elem in the hash table, then the already existing element
682  * will be returned and the new element will not be inserted.
683  * Otherwise returns NULL.
684  */
685 static arc_buf_hdr_t *
686 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
687 {
688 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
689 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
690 	arc_buf_hdr_t *fbuf;
691 	uint32_t i;
692 
693 	ASSERT(!HDR_IN_HASH_TABLE(buf));
694 	*lockp = hash_lock;
695 	mutex_enter(hash_lock);
696 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
697 	    fbuf = fbuf->b_hash_next, i++) {
698 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
699 			return (fbuf);
700 	}
701 
702 	buf->b_hash_next = buf_hash_table.ht_table[idx];
703 	buf_hash_table.ht_table[idx] = buf;
704 	buf->b_flags |= ARC_IN_HASH_TABLE;
705 
706 	/* collect some hash table performance data */
707 	if (i > 0) {
708 		ARCSTAT_BUMP(arcstat_hash_collisions);
709 		if (i == 1)
710 			ARCSTAT_BUMP(arcstat_hash_chains);
711 
712 		ARCSTAT_MAX(arcstat_hash_chain_max, i);
713 	}
714 
715 	ARCSTAT_BUMP(arcstat_hash_elements);
716 	ARCSTAT_MAXSTAT(arcstat_hash_elements);
717 
718 	return (NULL);
719 }
720 
721 static void
722 buf_hash_remove(arc_buf_hdr_t *buf)
723 {
724 	arc_buf_hdr_t *fbuf, **bufp;
725 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
726 
727 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
728 	ASSERT(HDR_IN_HASH_TABLE(buf));
729 
730 	bufp = &buf_hash_table.ht_table[idx];
731 	while ((fbuf = *bufp) != buf) {
732 		ASSERT(fbuf != NULL);
733 		bufp = &fbuf->b_hash_next;
734 	}
735 	*bufp = buf->b_hash_next;
736 	buf->b_hash_next = NULL;
737 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
738 
739 	/* collect some hash table performance data */
740 	ARCSTAT_BUMPDOWN(arcstat_hash_elements);
741 
742 	if (buf_hash_table.ht_table[idx] &&
743 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
744 		ARCSTAT_BUMPDOWN(arcstat_hash_chains);
745 }
746 
747 /*
748  * Global data structures and functions for the buf kmem cache.
749  */
750 static kmem_cache_t *hdr_cache;
751 static kmem_cache_t *buf_cache;
752 
753 static void
754 buf_fini(void)
755 {
756 	int i;
757 
758 	kmem_free(buf_hash_table.ht_table,
759 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
760 	for (i = 0; i < BUF_LOCKS; i++)
761 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
762 	kmem_cache_destroy(hdr_cache);
763 	kmem_cache_destroy(buf_cache);
764 }
765 
766 /*
767  * Constructor callback - called when the cache is empty
768  * and a new buf is requested.
769  */
770 /* ARGSUSED */
771 static int
772 hdr_cons(void *vbuf, void *unused, int kmflag)
773 {
774 	arc_buf_hdr_t *buf = vbuf;
775 
776 	bzero(buf, sizeof (arc_buf_hdr_t));
777 	refcount_create(&buf->b_refcnt);
778 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
779 	mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
780 	arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
781 
782 	return (0);
783 }
784 
785 /* ARGSUSED */
786 static int
787 buf_cons(void *vbuf, void *unused, int kmflag)
788 {
789 	arc_buf_t *buf = vbuf;
790 
791 	bzero(buf, sizeof (arc_buf_t));
792 	rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL);
793 	arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
794 
795 	return (0);
796 }
797 
798 /*
799  * Destructor callback - called when a cached buf is
800  * no longer required.
801  */
802 /* ARGSUSED */
803 static void
804 hdr_dest(void *vbuf, void *unused)
805 {
806 	arc_buf_hdr_t *buf = vbuf;
807 
808 	refcount_destroy(&buf->b_refcnt);
809 	cv_destroy(&buf->b_cv);
810 	mutex_destroy(&buf->b_freeze_lock);
811 	arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
812 }
813 
814 /* ARGSUSED */
815 static void
816 buf_dest(void *vbuf, void *unused)
817 {
818 	arc_buf_t *buf = vbuf;
819 
820 	rw_destroy(&buf->b_lock);
821 	arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
822 }
823 
824 /*
825  * Reclaim callback -- invoked when memory is low.
826  */
827 /* ARGSUSED */
828 static void
829 hdr_recl(void *unused)
830 {
831 	dprintf("hdr_recl called\n");
832 	/*
833 	 * umem calls the reclaim func when we destroy the buf cache,
834 	 * which is after we do arc_fini().
835 	 */
836 	if (!arc_dead)
837 		cv_signal(&arc_reclaim_thr_cv);
838 }
839 
840 static void
841 buf_init(void)
842 {
843 	uint64_t *ct;
844 	uint64_t hsize = 1ULL << 12;
845 	int i, j;
846 
847 	/*
848 	 * The hash table is big enough to fill all of physical memory
849 	 * with an average 64K block size.  The table will take up
850 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
851 	 */
852 	while (hsize * 65536 < physmem * PAGESIZE)
853 		hsize <<= 1;
854 retry:
855 	buf_hash_table.ht_mask = hsize - 1;
856 	buf_hash_table.ht_table =
857 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
858 	if (buf_hash_table.ht_table == NULL) {
859 		ASSERT(hsize > (1ULL << 8));
860 		hsize >>= 1;
861 		goto retry;
862 	}
863 
864 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
865 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
866 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
867 	    0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
868 
869 	for (i = 0; i < 256; i++)
870 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
871 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
872 
873 	for (i = 0; i < BUF_LOCKS; i++) {
874 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
875 		    NULL, MUTEX_DEFAULT, NULL);
876 	}
877 }
878 
879 #define	ARC_MINTIME	(hz>>4) /* 62 ms */
880 
881 static void
882 arc_cksum_verify(arc_buf_t *buf)
883 {
884 	zio_cksum_t zc;
885 
886 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
887 		return;
888 
889 	mutex_enter(&buf->b_hdr->b_freeze_lock);
890 	if (buf->b_hdr->b_freeze_cksum == NULL ||
891 	    (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
892 		mutex_exit(&buf->b_hdr->b_freeze_lock);
893 		return;
894 	}
895 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
896 	if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
897 		panic("buffer modified while frozen!");
898 	mutex_exit(&buf->b_hdr->b_freeze_lock);
899 }
900 
901 static int
902 arc_cksum_equal(arc_buf_t *buf)
903 {
904 	zio_cksum_t zc;
905 	int equal;
906 
907 	mutex_enter(&buf->b_hdr->b_freeze_lock);
908 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
909 	equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
910 	mutex_exit(&buf->b_hdr->b_freeze_lock);
911 
912 	return (equal);
913 }
914 
915 static void
916 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
917 {
918 	if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
919 		return;
920 
921 	mutex_enter(&buf->b_hdr->b_freeze_lock);
922 	if (buf->b_hdr->b_freeze_cksum != NULL) {
923 		mutex_exit(&buf->b_hdr->b_freeze_lock);
924 		return;
925 	}
926 	buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
927 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
928 	    buf->b_hdr->b_freeze_cksum);
929 	mutex_exit(&buf->b_hdr->b_freeze_lock);
930 }
931 
932 void
933 arc_buf_thaw(arc_buf_t *buf)
934 {
935 	if (zfs_flags & ZFS_DEBUG_MODIFY) {
936 		if (buf->b_hdr->b_state != arc_anon)
937 			panic("modifying non-anon buffer!");
938 		if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
939 			panic("modifying buffer while i/o in progress!");
940 		arc_cksum_verify(buf);
941 	}
942 
943 	mutex_enter(&buf->b_hdr->b_freeze_lock);
944 	if (buf->b_hdr->b_freeze_cksum != NULL) {
945 		kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
946 		buf->b_hdr->b_freeze_cksum = NULL;
947 	}
948 	mutex_exit(&buf->b_hdr->b_freeze_lock);
949 }
950 
951 void
952 arc_buf_freeze(arc_buf_t *buf)
953 {
954 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
955 		return;
956 
957 	ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
958 	    buf->b_hdr->b_state == arc_anon);
959 	arc_cksum_compute(buf, B_FALSE);
960 }
961 
962 static void
963 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
964 {
965 	ASSERT(MUTEX_HELD(hash_lock));
966 
967 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
968 	    (ab->b_state != arc_anon)) {
969 		uint64_t delta = ab->b_size * ab->b_datacnt;
970 		list_t *list = &ab->b_state->arcs_list[ab->b_type];
971 		uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
972 
973 		ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
974 		mutex_enter(&ab->b_state->arcs_mtx);
975 		ASSERT(list_link_active(&ab->b_arc_node));
976 		list_remove(list, ab);
977 		if (GHOST_STATE(ab->b_state)) {
978 			ASSERT3U(ab->b_datacnt, ==, 0);
979 			ASSERT3P(ab->b_buf, ==, NULL);
980 			delta = ab->b_size;
981 		}
982 		ASSERT(delta > 0);
983 		ASSERT3U(*size, >=, delta);
984 		atomic_add_64(size, -delta);
985 		mutex_exit(&ab->b_state->arcs_mtx);
986 		/* remove the prefetch flag if we get a reference */
987 		if (ab->b_flags & ARC_PREFETCH)
988 			ab->b_flags &= ~ARC_PREFETCH;
989 	}
990 }
991 
992 static int
993 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
994 {
995 	int cnt;
996 	arc_state_t *state = ab->b_state;
997 
998 	ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
999 	ASSERT(!GHOST_STATE(state));
1000 
1001 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1002 	    (state != arc_anon)) {
1003 		uint64_t *size = &state->arcs_lsize[ab->b_type];
1004 
1005 		ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1006 		mutex_enter(&state->arcs_mtx);
1007 		ASSERT(!list_link_active(&ab->b_arc_node));
1008 		list_insert_head(&state->arcs_list[ab->b_type], ab);
1009 		ASSERT(ab->b_datacnt > 0);
1010 		atomic_add_64(size, ab->b_size * ab->b_datacnt);
1011 		mutex_exit(&state->arcs_mtx);
1012 	}
1013 	return (cnt);
1014 }
1015 
1016 /*
1017  * Move the supplied buffer to the indicated state.  The mutex
1018  * for the buffer must be held by the caller.
1019  */
1020 static void
1021 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1022 {
1023 	arc_state_t *old_state = ab->b_state;
1024 	int64_t refcnt = refcount_count(&ab->b_refcnt);
1025 	uint64_t from_delta, to_delta;
1026 
1027 	ASSERT(MUTEX_HELD(hash_lock));
1028 	ASSERT(new_state != old_state);
1029 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1030 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1031 
1032 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
1033 
1034 	/*
1035 	 * If this buffer is evictable, transfer it from the
1036 	 * old state list to the new state list.
1037 	 */
1038 	if (refcnt == 0) {
1039 		if (old_state != arc_anon) {
1040 			int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1041 			uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1042 
1043 			if (use_mutex)
1044 				mutex_enter(&old_state->arcs_mtx);
1045 
1046 			ASSERT(list_link_active(&ab->b_arc_node));
1047 			list_remove(&old_state->arcs_list[ab->b_type], ab);
1048 
1049 			/*
1050 			 * If prefetching out of the ghost cache,
1051 			 * we will have a non-null datacnt.
1052 			 */
1053 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1054 				/* ghost elements have a ghost size */
1055 				ASSERT(ab->b_buf == NULL);
1056 				from_delta = ab->b_size;
1057 			}
1058 			ASSERT3U(*size, >=, from_delta);
1059 			atomic_add_64(size, -from_delta);
1060 
1061 			if (use_mutex)
1062 				mutex_exit(&old_state->arcs_mtx);
1063 		}
1064 		if (new_state != arc_anon) {
1065 			int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1066 			uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1067 
1068 			if (use_mutex)
1069 				mutex_enter(&new_state->arcs_mtx);
1070 
1071 			list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1072 
1073 			/* ghost elements have a ghost size */
1074 			if (GHOST_STATE(new_state)) {
1075 				ASSERT(ab->b_datacnt == 0);
1076 				ASSERT(ab->b_buf == NULL);
1077 				to_delta = ab->b_size;
1078 			}
1079 			atomic_add_64(size, to_delta);
1080 
1081 			if (use_mutex)
1082 				mutex_exit(&new_state->arcs_mtx);
1083 		}
1084 	}
1085 
1086 	ASSERT(!BUF_EMPTY(ab));
1087 	if (new_state == arc_anon) {
1088 		buf_hash_remove(ab);
1089 	}
1090 
1091 	/* adjust state sizes */
1092 	if (to_delta)
1093 		atomic_add_64(&new_state->arcs_size, to_delta);
1094 	if (from_delta) {
1095 		ASSERT3U(old_state->arcs_size, >=, from_delta);
1096 		atomic_add_64(&old_state->arcs_size, -from_delta);
1097 	}
1098 	ab->b_state = new_state;
1099 
1100 	/* adjust l2arc hdr stats */
1101 	if (new_state == arc_l2c_only)
1102 		l2arc_hdr_stat_add();
1103 	else if (old_state == arc_l2c_only)
1104 		l2arc_hdr_stat_remove();
1105 }
1106 
1107 void
1108 arc_space_consume(uint64_t space, arc_space_type_t type)
1109 {
1110 	ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1111 
1112 	switch (type) {
1113 	case ARC_SPACE_DATA:
1114 		ARCSTAT_INCR(arcstat_data_size, space);
1115 		break;
1116 	case ARC_SPACE_OTHER:
1117 		ARCSTAT_INCR(arcstat_other_size, space);
1118 		break;
1119 	case ARC_SPACE_HDRS:
1120 		ARCSTAT_INCR(arcstat_hdr_size, space);
1121 		break;
1122 	case ARC_SPACE_L2HDRS:
1123 		ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1124 		break;
1125 	}
1126 
1127 	atomic_add_64(&arc_meta_used, space);
1128 	atomic_add_64(&arc_size, space);
1129 }
1130 
1131 void
1132 arc_space_return(uint64_t space, arc_space_type_t type)
1133 {
1134 	ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1135 
1136 	switch (type) {
1137 	case ARC_SPACE_DATA:
1138 		ARCSTAT_INCR(arcstat_data_size, -space);
1139 		break;
1140 	case ARC_SPACE_OTHER:
1141 		ARCSTAT_INCR(arcstat_other_size, -space);
1142 		break;
1143 	case ARC_SPACE_HDRS:
1144 		ARCSTAT_INCR(arcstat_hdr_size, -space);
1145 		break;
1146 	case ARC_SPACE_L2HDRS:
1147 		ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1148 		break;
1149 	}
1150 
1151 	ASSERT(arc_meta_used >= space);
1152 	if (arc_meta_max < arc_meta_used)
1153 		arc_meta_max = arc_meta_used;
1154 	atomic_add_64(&arc_meta_used, -space);
1155 	ASSERT(arc_size >= space);
1156 	atomic_add_64(&arc_size, -space);
1157 }
1158 
1159 void *
1160 arc_data_buf_alloc(uint64_t size)
1161 {
1162 	if (arc_evict_needed(ARC_BUFC_DATA))
1163 		cv_signal(&arc_reclaim_thr_cv);
1164 	atomic_add_64(&arc_size, size);
1165 	return (zio_data_buf_alloc(size));
1166 }
1167 
1168 void
1169 arc_data_buf_free(void *buf, uint64_t size)
1170 {
1171 	zio_data_buf_free(buf, size);
1172 	ASSERT(arc_size >= size);
1173 	atomic_add_64(&arc_size, -size);
1174 }
1175 
1176 arc_buf_t *
1177 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1178 {
1179 	arc_buf_hdr_t *hdr;
1180 	arc_buf_t *buf;
1181 
1182 	ASSERT3U(size, >, 0);
1183 	hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1184 	ASSERT(BUF_EMPTY(hdr));
1185 	hdr->b_size = size;
1186 	hdr->b_type = type;
1187 	hdr->b_spa = spa_guid(spa);
1188 	hdr->b_state = arc_anon;
1189 	hdr->b_arc_access = 0;
1190 	buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1191 	buf->b_hdr = hdr;
1192 	buf->b_data = NULL;
1193 	buf->b_efunc = NULL;
1194 	buf->b_private = NULL;
1195 	buf->b_next = NULL;
1196 	hdr->b_buf = buf;
1197 	arc_get_data_buf(buf);
1198 	hdr->b_datacnt = 1;
1199 	hdr->b_flags = 0;
1200 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
1201 	(void) refcount_add(&hdr->b_refcnt, tag);
1202 
1203 	return (buf);
1204 }
1205 
1206 static arc_buf_t *
1207 arc_buf_clone(arc_buf_t *from)
1208 {
1209 	arc_buf_t *buf;
1210 	arc_buf_hdr_t *hdr = from->b_hdr;
1211 	uint64_t size = hdr->b_size;
1212 
1213 	buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1214 	buf->b_hdr = hdr;
1215 	buf->b_data = NULL;
1216 	buf->b_efunc = NULL;
1217 	buf->b_private = NULL;
1218 	buf->b_next = hdr->b_buf;
1219 	hdr->b_buf = buf;
1220 	arc_get_data_buf(buf);
1221 	bcopy(from->b_data, buf->b_data, size);
1222 	hdr->b_datacnt += 1;
1223 	return (buf);
1224 }
1225 
1226 void
1227 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1228 {
1229 	arc_buf_hdr_t *hdr;
1230 	kmutex_t *hash_lock;
1231 
1232 	/*
1233 	 * Check to see if this buffer is evicted.  Callers
1234 	 * must verify b_data != NULL to know if the add_ref
1235 	 * was successful.
1236 	 */
1237 	rw_enter(&buf->b_lock, RW_READER);
1238 	if (buf->b_data == NULL) {
1239 		rw_exit(&buf->b_lock);
1240 		return;
1241 	}
1242 	hdr = buf->b_hdr;
1243 	ASSERT(hdr != NULL);
1244 	hash_lock = HDR_LOCK(hdr);
1245 	mutex_enter(hash_lock);
1246 	rw_exit(&buf->b_lock);
1247 
1248 	ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1249 	add_reference(hdr, hash_lock, tag);
1250 	DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1251 	arc_access(hdr, hash_lock);
1252 	mutex_exit(hash_lock);
1253 	ARCSTAT_BUMP(arcstat_hits);
1254 	ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1255 	    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1256 	    data, metadata, hits);
1257 }
1258 
1259 /*
1260  * Free the arc data buffer.  If it is an l2arc write in progress,
1261  * the buffer is placed on l2arc_free_on_write to be freed later.
1262  */
1263 static void
1264 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t),
1265     void *data, size_t size)
1266 {
1267 	if (HDR_L2_WRITING(hdr)) {
1268 		l2arc_data_free_t *df;
1269 		df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1270 		df->l2df_data = data;
1271 		df->l2df_size = size;
1272 		df->l2df_func = free_func;
1273 		mutex_enter(&l2arc_free_on_write_mtx);
1274 		list_insert_head(l2arc_free_on_write, df);
1275 		mutex_exit(&l2arc_free_on_write_mtx);
1276 		ARCSTAT_BUMP(arcstat_l2_free_on_write);
1277 	} else {
1278 		free_func(data, size);
1279 	}
1280 }
1281 
1282 static void
1283 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1284 {
1285 	arc_buf_t **bufp;
1286 
1287 	/* free up data associated with the buf */
1288 	if (buf->b_data) {
1289 		arc_state_t *state = buf->b_hdr->b_state;
1290 		uint64_t size = buf->b_hdr->b_size;
1291 		arc_buf_contents_t type = buf->b_hdr->b_type;
1292 
1293 		arc_cksum_verify(buf);
1294 		if (!recycle) {
1295 			if (type == ARC_BUFC_METADATA) {
1296 				arc_buf_data_free(buf->b_hdr, zio_buf_free,
1297 				    buf->b_data, size);
1298 				arc_space_return(size, ARC_SPACE_DATA);
1299 			} else {
1300 				ASSERT(type == ARC_BUFC_DATA);
1301 				arc_buf_data_free(buf->b_hdr,
1302 				    zio_data_buf_free, buf->b_data, size);
1303 				ARCSTAT_INCR(arcstat_data_size, -size);
1304 				atomic_add_64(&arc_size, -size);
1305 			}
1306 		}
1307 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
1308 			uint64_t *cnt = &state->arcs_lsize[type];
1309 
1310 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1311 			ASSERT(state != arc_anon);
1312 
1313 			ASSERT3U(*cnt, >=, size);
1314 			atomic_add_64(cnt, -size);
1315 		}
1316 		ASSERT3U(state->arcs_size, >=, size);
1317 		atomic_add_64(&state->arcs_size, -size);
1318 		buf->b_data = NULL;
1319 		ASSERT(buf->b_hdr->b_datacnt > 0);
1320 		buf->b_hdr->b_datacnt -= 1;
1321 	}
1322 
1323 	/* only remove the buf if requested */
1324 	if (!all)
1325 		return;
1326 
1327 	/* remove the buf from the hdr list */
1328 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1329 		continue;
1330 	*bufp = buf->b_next;
1331 
1332 	ASSERT(buf->b_efunc == NULL);
1333 
1334 	/* clean up the buf */
1335 	buf->b_hdr = NULL;
1336 	kmem_cache_free(buf_cache, buf);
1337 }
1338 
1339 static void
1340 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1341 {
1342 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
1343 	ASSERT3P(hdr->b_state, ==, arc_anon);
1344 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1345 	ASSERT(!(hdr->b_flags & ARC_STORED));
1346 
1347 	if (hdr->b_l2hdr != NULL) {
1348 		if (!MUTEX_HELD(&l2arc_buflist_mtx)) {
1349 			/*
1350 			 * To prevent arc_free() and l2arc_evict() from
1351 			 * attempting to free the same buffer at the same time,
1352 			 * a FREE_IN_PROGRESS flag is given to arc_free() to
1353 			 * give it priority.  l2arc_evict() can't destroy this
1354 			 * header while we are waiting on l2arc_buflist_mtx.
1355 			 *
1356 			 * The hdr may be removed from l2ad_buflist before we
1357 			 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1358 			 */
1359 			mutex_enter(&l2arc_buflist_mtx);
1360 			if (hdr->b_l2hdr != NULL) {
1361 				list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist,
1362 				    hdr);
1363 			}
1364 			mutex_exit(&l2arc_buflist_mtx);
1365 		} else {
1366 			list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr);
1367 		}
1368 		ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1369 		kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t));
1370 		if (hdr->b_state == arc_l2c_only)
1371 			l2arc_hdr_stat_remove();
1372 		hdr->b_l2hdr = NULL;
1373 	}
1374 
1375 	if (!BUF_EMPTY(hdr)) {
1376 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
1377 		bzero(&hdr->b_dva, sizeof (dva_t));
1378 		hdr->b_birth = 0;
1379 		hdr->b_cksum0 = 0;
1380 	}
1381 	while (hdr->b_buf) {
1382 		arc_buf_t *buf = hdr->b_buf;
1383 
1384 		if (buf->b_efunc) {
1385 			mutex_enter(&arc_eviction_mtx);
1386 			rw_enter(&buf->b_lock, RW_WRITER);
1387 			ASSERT(buf->b_hdr != NULL);
1388 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1389 			hdr->b_buf = buf->b_next;
1390 			buf->b_hdr = &arc_eviction_hdr;
1391 			buf->b_next = arc_eviction_list;
1392 			arc_eviction_list = buf;
1393 			rw_exit(&buf->b_lock);
1394 			mutex_exit(&arc_eviction_mtx);
1395 		} else {
1396 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1397 		}
1398 	}
1399 	if (hdr->b_freeze_cksum != NULL) {
1400 		kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1401 		hdr->b_freeze_cksum = NULL;
1402 	}
1403 
1404 	ASSERT(!list_link_active(&hdr->b_arc_node));
1405 	ASSERT3P(hdr->b_hash_next, ==, NULL);
1406 	ASSERT3P(hdr->b_acb, ==, NULL);
1407 	kmem_cache_free(hdr_cache, hdr);
1408 }
1409 
1410 void
1411 arc_buf_free(arc_buf_t *buf, void *tag)
1412 {
1413 	arc_buf_hdr_t *hdr = buf->b_hdr;
1414 	int hashed = hdr->b_state != arc_anon;
1415 
1416 	ASSERT(buf->b_efunc == NULL);
1417 	ASSERT(buf->b_data != NULL);
1418 
1419 	if (hashed) {
1420 		kmutex_t *hash_lock = HDR_LOCK(hdr);
1421 
1422 		mutex_enter(hash_lock);
1423 		(void) remove_reference(hdr, hash_lock, tag);
1424 		if (hdr->b_datacnt > 1)
1425 			arc_buf_destroy(buf, FALSE, TRUE);
1426 		else
1427 			hdr->b_flags |= ARC_BUF_AVAILABLE;
1428 		mutex_exit(hash_lock);
1429 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
1430 		int destroy_hdr;
1431 		/*
1432 		 * We are in the middle of an async write.  Don't destroy
1433 		 * this buffer unless the write completes before we finish
1434 		 * decrementing the reference count.
1435 		 */
1436 		mutex_enter(&arc_eviction_mtx);
1437 		(void) remove_reference(hdr, NULL, tag);
1438 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
1439 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1440 		mutex_exit(&arc_eviction_mtx);
1441 		if (destroy_hdr)
1442 			arc_hdr_destroy(hdr);
1443 	} else {
1444 		if (remove_reference(hdr, NULL, tag) > 0) {
1445 			ASSERT(HDR_IO_ERROR(hdr));
1446 			arc_buf_destroy(buf, FALSE, TRUE);
1447 		} else {
1448 			arc_hdr_destroy(hdr);
1449 		}
1450 	}
1451 }
1452 
1453 int
1454 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1455 {
1456 	arc_buf_hdr_t *hdr = buf->b_hdr;
1457 	kmutex_t *hash_lock = HDR_LOCK(hdr);
1458 	int no_callback = (buf->b_efunc == NULL);
1459 
1460 	if (hdr->b_state == arc_anon) {
1461 		arc_buf_free(buf, tag);
1462 		return (no_callback);
1463 	}
1464 
1465 	mutex_enter(hash_lock);
1466 	ASSERT(hdr->b_state != arc_anon);
1467 	ASSERT(buf->b_data != NULL);
1468 
1469 	(void) remove_reference(hdr, hash_lock, tag);
1470 	if (hdr->b_datacnt > 1) {
1471 		if (no_callback)
1472 			arc_buf_destroy(buf, FALSE, TRUE);
1473 	} else if (no_callback) {
1474 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1475 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1476 	}
1477 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
1478 	    refcount_is_zero(&hdr->b_refcnt));
1479 	mutex_exit(hash_lock);
1480 	return (no_callback);
1481 }
1482 
1483 int
1484 arc_buf_size(arc_buf_t *buf)
1485 {
1486 	return (buf->b_hdr->b_size);
1487 }
1488 
1489 /*
1490  * Evict buffers from list until we've removed the specified number of
1491  * bytes.  Move the removed buffers to the appropriate evict state.
1492  * If the recycle flag is set, then attempt to "recycle" a buffer:
1493  * - look for a buffer to evict that is `bytes' long.
1494  * - return the data block from this buffer rather than freeing it.
1495  * This flag is used by callers that are trying to make space for a
1496  * new buffer in a full arc cache.
1497  *
1498  * This function makes a "best effort".  It skips over any buffers
1499  * it can't get a hash_lock on, and so may not catch all candidates.
1500  * It may also return without evicting as much space as requested.
1501  */
1502 static void *
1503 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1504     arc_buf_contents_t type)
1505 {
1506 	arc_state_t *evicted_state;
1507 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1508 	arc_buf_hdr_t *ab, *ab_prev = NULL;
1509 	list_t *list = &state->arcs_list[type];
1510 	kmutex_t *hash_lock;
1511 	boolean_t have_lock;
1512 	void *stolen = NULL;
1513 
1514 	ASSERT(state == arc_mru || state == arc_mfu);
1515 
1516 	evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1517 
1518 	mutex_enter(&state->arcs_mtx);
1519 	mutex_enter(&evicted_state->arcs_mtx);
1520 
1521 	for (ab = list_tail(list); ab; ab = ab_prev) {
1522 		ab_prev = list_prev(list, ab);
1523 		/* prefetch buffers have a minimum lifespan */
1524 		if (HDR_IO_IN_PROGRESS(ab) ||
1525 		    (spa && ab->b_spa != spa) ||
1526 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1527 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
1528 			skipped++;
1529 			continue;
1530 		}
1531 		/* "lookahead" for better eviction candidate */
1532 		if (recycle && ab->b_size != bytes &&
1533 		    ab_prev && ab_prev->b_size == bytes)
1534 			continue;
1535 		hash_lock = HDR_LOCK(ab);
1536 		have_lock = MUTEX_HELD(hash_lock);
1537 		if (have_lock || mutex_tryenter(hash_lock)) {
1538 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1539 			ASSERT(ab->b_datacnt > 0);
1540 			while (ab->b_buf) {
1541 				arc_buf_t *buf = ab->b_buf;
1542 				if (!rw_tryenter(&buf->b_lock, RW_WRITER)) {
1543 					missed += 1;
1544 					break;
1545 				}
1546 				if (buf->b_data) {
1547 					bytes_evicted += ab->b_size;
1548 					if (recycle && ab->b_type == type &&
1549 					    ab->b_size == bytes &&
1550 					    !HDR_L2_WRITING(ab)) {
1551 						stolen = buf->b_data;
1552 						recycle = FALSE;
1553 					}
1554 				}
1555 				if (buf->b_efunc) {
1556 					mutex_enter(&arc_eviction_mtx);
1557 					arc_buf_destroy(buf,
1558 					    buf->b_data == stolen, FALSE);
1559 					ab->b_buf = buf->b_next;
1560 					buf->b_hdr = &arc_eviction_hdr;
1561 					buf->b_next = arc_eviction_list;
1562 					arc_eviction_list = buf;
1563 					mutex_exit(&arc_eviction_mtx);
1564 					rw_exit(&buf->b_lock);
1565 				} else {
1566 					rw_exit(&buf->b_lock);
1567 					arc_buf_destroy(buf,
1568 					    buf->b_data == stolen, TRUE);
1569 				}
1570 			}
1571 			if (ab->b_datacnt == 0) {
1572 				arc_change_state(evicted_state, ab, hash_lock);
1573 				ASSERT(HDR_IN_HASH_TABLE(ab));
1574 				ab->b_flags |= ARC_IN_HASH_TABLE;
1575 				ab->b_flags &= ~ARC_BUF_AVAILABLE;
1576 				DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1577 			}
1578 			if (!have_lock)
1579 				mutex_exit(hash_lock);
1580 			if (bytes >= 0 && bytes_evicted >= bytes)
1581 				break;
1582 		} else {
1583 			missed += 1;
1584 		}
1585 	}
1586 
1587 	mutex_exit(&evicted_state->arcs_mtx);
1588 	mutex_exit(&state->arcs_mtx);
1589 
1590 	if (bytes_evicted < bytes)
1591 		dprintf("only evicted %lld bytes from %x",
1592 		    (longlong_t)bytes_evicted, state);
1593 
1594 	if (skipped)
1595 		ARCSTAT_INCR(arcstat_evict_skip, skipped);
1596 
1597 	if (missed)
1598 		ARCSTAT_INCR(arcstat_mutex_miss, missed);
1599 
1600 	/*
1601 	 * We have just evicted some date into the ghost state, make
1602 	 * sure we also adjust the ghost state size if necessary.
1603 	 */
1604 	if (arc_no_grow &&
1605 	    arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1606 		int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1607 		    arc_mru_ghost->arcs_size - arc_c;
1608 
1609 		if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1610 			int64_t todelete =
1611 			    MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1612 			arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1613 		} else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1614 			int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1615 			    arc_mru_ghost->arcs_size +
1616 			    arc_mfu_ghost->arcs_size - arc_c);
1617 			arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1618 		}
1619 	}
1620 
1621 	return (stolen);
1622 }
1623 
1624 /*
1625  * Remove buffers from list until we've removed the specified number of
1626  * bytes.  Destroy the buffers that are removed.
1627  */
1628 static void
1629 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1630 {
1631 	arc_buf_hdr_t *ab, *ab_prev;
1632 	list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1633 	kmutex_t *hash_lock;
1634 	uint64_t bytes_deleted = 0;
1635 	uint64_t bufs_skipped = 0;
1636 
1637 	ASSERT(GHOST_STATE(state));
1638 top:
1639 	mutex_enter(&state->arcs_mtx);
1640 	for (ab = list_tail(list); ab; ab = ab_prev) {
1641 		ab_prev = list_prev(list, ab);
1642 		if (spa && ab->b_spa != spa)
1643 			continue;
1644 		hash_lock = HDR_LOCK(ab);
1645 		if (mutex_tryenter(hash_lock)) {
1646 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
1647 			ASSERT(ab->b_buf == NULL);
1648 			ARCSTAT_BUMP(arcstat_deleted);
1649 			bytes_deleted += ab->b_size;
1650 
1651 			if (ab->b_l2hdr != NULL) {
1652 				/*
1653 				 * This buffer is cached on the 2nd Level ARC;
1654 				 * don't destroy the header.
1655 				 */
1656 				arc_change_state(arc_l2c_only, ab, hash_lock);
1657 				mutex_exit(hash_lock);
1658 			} else {
1659 				arc_change_state(arc_anon, ab, hash_lock);
1660 				mutex_exit(hash_lock);
1661 				arc_hdr_destroy(ab);
1662 			}
1663 
1664 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1665 			if (bytes >= 0 && bytes_deleted >= bytes)
1666 				break;
1667 		} else {
1668 			if (bytes < 0) {
1669 				mutex_exit(&state->arcs_mtx);
1670 				mutex_enter(hash_lock);
1671 				mutex_exit(hash_lock);
1672 				goto top;
1673 			}
1674 			bufs_skipped += 1;
1675 		}
1676 	}
1677 	mutex_exit(&state->arcs_mtx);
1678 
1679 	if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1680 	    (bytes < 0 || bytes_deleted < bytes)) {
1681 		list = &state->arcs_list[ARC_BUFC_METADATA];
1682 		goto top;
1683 	}
1684 
1685 	if (bufs_skipped) {
1686 		ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1687 		ASSERT(bytes >= 0);
1688 	}
1689 
1690 	if (bytes_deleted < bytes)
1691 		dprintf("only deleted %lld bytes from %p",
1692 		    (longlong_t)bytes_deleted, state);
1693 }
1694 
1695 static void
1696 arc_adjust(void)
1697 {
1698 	int64_t adjustment, delta;
1699 
1700 	/*
1701 	 * Adjust MRU size
1702 	 */
1703 
1704 	adjustment = MIN(arc_size - arc_c,
1705 	    arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - arc_p);
1706 
1707 	if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
1708 		delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
1709 		(void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
1710 		adjustment -= delta;
1711 	}
1712 
1713 	if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1714 		delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
1715 		(void) arc_evict(arc_mru, NULL, delta, FALSE,
1716 		    ARC_BUFC_METADATA);
1717 	}
1718 
1719 	/*
1720 	 * Adjust MFU size
1721 	 */
1722 
1723 	adjustment = arc_size - arc_c;
1724 
1725 	if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
1726 		delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
1727 		(void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
1728 		adjustment -= delta;
1729 	}
1730 
1731 	if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1732 		int64_t delta = MIN(adjustment,
1733 		    arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
1734 		(void) arc_evict(arc_mfu, NULL, delta, FALSE,
1735 		    ARC_BUFC_METADATA);
1736 	}
1737 
1738 	/*
1739 	 * Adjust ghost lists
1740 	 */
1741 
1742 	adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
1743 
1744 	if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
1745 		delta = MIN(arc_mru_ghost->arcs_size, adjustment);
1746 		arc_evict_ghost(arc_mru_ghost, NULL, delta);
1747 	}
1748 
1749 	adjustment =
1750 	    arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
1751 
1752 	if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
1753 		delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
1754 		arc_evict_ghost(arc_mfu_ghost, NULL, delta);
1755 	}
1756 }
1757 
1758 static void
1759 arc_do_user_evicts(void)
1760 {
1761 	mutex_enter(&arc_eviction_mtx);
1762 	while (arc_eviction_list != NULL) {
1763 		arc_buf_t *buf = arc_eviction_list;
1764 		arc_eviction_list = buf->b_next;
1765 		rw_enter(&buf->b_lock, RW_WRITER);
1766 		buf->b_hdr = NULL;
1767 		rw_exit(&buf->b_lock);
1768 		mutex_exit(&arc_eviction_mtx);
1769 
1770 		if (buf->b_efunc != NULL)
1771 			VERIFY(buf->b_efunc(buf) == 0);
1772 
1773 		buf->b_efunc = NULL;
1774 		buf->b_private = NULL;
1775 		kmem_cache_free(buf_cache, buf);
1776 		mutex_enter(&arc_eviction_mtx);
1777 	}
1778 	mutex_exit(&arc_eviction_mtx);
1779 }
1780 
1781 /*
1782  * Flush all *evictable* data from the cache for the given spa.
1783  * NOTE: this will not touch "active" (i.e. referenced) data.
1784  */
1785 void
1786 arc_flush(spa_t *spa)
1787 {
1788 	uint64_t guid = 0;
1789 
1790 	if (spa)
1791 		guid = spa_guid(spa);
1792 
1793 	while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
1794 		(void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
1795 		if (spa)
1796 			break;
1797 	}
1798 	while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
1799 		(void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
1800 		if (spa)
1801 			break;
1802 	}
1803 	while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
1804 		(void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
1805 		if (spa)
1806 			break;
1807 	}
1808 	while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
1809 		(void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
1810 		if (spa)
1811 			break;
1812 	}
1813 
1814 	arc_evict_ghost(arc_mru_ghost, guid, -1);
1815 	arc_evict_ghost(arc_mfu_ghost, guid, -1);
1816 
1817 	mutex_enter(&arc_reclaim_thr_lock);
1818 	arc_do_user_evicts();
1819 	mutex_exit(&arc_reclaim_thr_lock);
1820 	ASSERT(spa || arc_eviction_list == NULL);
1821 }
1822 
1823 void
1824 arc_shrink(void)
1825 {
1826 	if (arc_c > arc_c_min) {
1827 		uint64_t to_free;
1828 
1829 #ifdef _KERNEL
1830 		to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
1831 #else
1832 		to_free = arc_c >> arc_shrink_shift;
1833 #endif
1834 		if (arc_c > arc_c_min + to_free)
1835 			atomic_add_64(&arc_c, -to_free);
1836 		else
1837 			arc_c = arc_c_min;
1838 
1839 		atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1840 		if (arc_c > arc_size)
1841 			arc_c = MAX(arc_size, arc_c_min);
1842 		if (arc_p > arc_c)
1843 			arc_p = (arc_c >> 1);
1844 		ASSERT(arc_c >= arc_c_min);
1845 		ASSERT((int64_t)arc_p >= 0);
1846 	}
1847 
1848 	if (arc_size > arc_c)
1849 		arc_adjust();
1850 }
1851 
1852 static int
1853 arc_reclaim_needed(void)
1854 {
1855 	uint64_t extra;
1856 
1857 #ifdef _KERNEL
1858 
1859 	if (needfree)
1860 		return (1);
1861 
1862 	/*
1863 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1864 	 */
1865 	extra = desfree;
1866 
1867 	/*
1868 	 * check that we're out of range of the pageout scanner.  It starts to
1869 	 * schedule paging if freemem is less than lotsfree and needfree.
1870 	 * lotsfree is the high-water mark for pageout, and needfree is the
1871 	 * number of needed free pages.  We add extra pages here to make sure
1872 	 * the scanner doesn't start up while we're freeing memory.
1873 	 */
1874 	if (freemem < lotsfree + needfree + extra)
1875 		return (1);
1876 
1877 	/*
1878 	 * check to make sure that swapfs has enough space so that anon
1879 	 * reservations can still succeed. anon_resvmem() checks that the
1880 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1881 	 * swap pages.  We also add a bit of extra here just to prevent
1882 	 * circumstances from getting really dire.
1883 	 */
1884 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1885 		return (1);
1886 
1887 #if defined(__i386)
1888 	/*
1889 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1890 	 * kernel heap space before we ever run out of available physical
1891 	 * memory.  Most checks of the size of the heap_area compare against
1892 	 * tune.t_minarmem, which is the minimum available real memory that we
1893 	 * can have in the system.  However, this is generally fixed at 25 pages
1894 	 * which is so low that it's useless.  In this comparison, we seek to
1895 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1896 	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
1897 	 * free)
1898 	 */
1899 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1900 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1901 		return (1);
1902 #endif
1903 
1904 #else
1905 	if (spa_get_random(100) == 0)
1906 		return (1);
1907 #endif
1908 	return (0);
1909 }
1910 
1911 static void
1912 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1913 {
1914 	size_t			i;
1915 	kmem_cache_t		*prev_cache = NULL;
1916 	kmem_cache_t		*prev_data_cache = NULL;
1917 	extern kmem_cache_t	*zio_buf_cache[];
1918 	extern kmem_cache_t	*zio_data_buf_cache[];
1919 
1920 #ifdef _KERNEL
1921 	if (arc_meta_used >= arc_meta_limit) {
1922 		/*
1923 		 * We are exceeding our meta-data cache limit.
1924 		 * Purge some DNLC entries to release holds on meta-data.
1925 		 */
1926 		dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
1927 	}
1928 #if defined(__i386)
1929 	/*
1930 	 * Reclaim unused memory from all kmem caches.
1931 	 */
1932 	kmem_reap();
1933 #endif
1934 #endif
1935 
1936 	/*
1937 	 * An aggressive reclamation will shrink the cache size as well as
1938 	 * reap free buffers from the arc kmem caches.
1939 	 */
1940 	if (strat == ARC_RECLAIM_AGGR)
1941 		arc_shrink();
1942 
1943 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1944 		if (zio_buf_cache[i] != prev_cache) {
1945 			prev_cache = zio_buf_cache[i];
1946 			kmem_cache_reap_now(zio_buf_cache[i]);
1947 		}
1948 		if (zio_data_buf_cache[i] != prev_data_cache) {
1949 			prev_data_cache = zio_data_buf_cache[i];
1950 			kmem_cache_reap_now(zio_data_buf_cache[i]);
1951 		}
1952 	}
1953 	kmem_cache_reap_now(buf_cache);
1954 	kmem_cache_reap_now(hdr_cache);
1955 }
1956 
1957 static void
1958 arc_reclaim_thread(void)
1959 {
1960 	clock_t			growtime = 0;
1961 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1962 	callb_cpr_t		cpr;
1963 
1964 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1965 
1966 	mutex_enter(&arc_reclaim_thr_lock);
1967 	while (arc_thread_exit == 0) {
1968 		if (arc_reclaim_needed()) {
1969 
1970 			if (arc_no_grow) {
1971 				if (last_reclaim == ARC_RECLAIM_CONS) {
1972 					last_reclaim = ARC_RECLAIM_AGGR;
1973 				} else {
1974 					last_reclaim = ARC_RECLAIM_CONS;
1975 				}
1976 			} else {
1977 				arc_no_grow = TRUE;
1978 				last_reclaim = ARC_RECLAIM_AGGR;
1979 				membar_producer();
1980 			}
1981 
1982 			/* reset the growth delay for every reclaim */
1983 			growtime = lbolt + (arc_grow_retry * hz);
1984 
1985 			arc_kmem_reap_now(last_reclaim);
1986 			arc_warm = B_TRUE;
1987 
1988 		} else if (arc_no_grow && lbolt >= growtime) {
1989 			arc_no_grow = FALSE;
1990 		}
1991 
1992 		if (2 * arc_c < arc_size +
1993 		    arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)
1994 			arc_adjust();
1995 
1996 		if (arc_eviction_list != NULL)
1997 			arc_do_user_evicts();
1998 
1999 		/* block until needed, or one second, whichever is shorter */
2000 		CALLB_CPR_SAFE_BEGIN(&cpr);
2001 		(void) cv_timedwait(&arc_reclaim_thr_cv,
2002 		    &arc_reclaim_thr_lock, (lbolt + hz));
2003 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2004 	}
2005 
2006 	arc_thread_exit = 0;
2007 	cv_broadcast(&arc_reclaim_thr_cv);
2008 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
2009 	thread_exit();
2010 }
2011 
2012 /*
2013  * Adapt arc info given the number of bytes we are trying to add and
2014  * the state that we are comming from.  This function is only called
2015  * when we are adding new content to the cache.
2016  */
2017 static void
2018 arc_adapt(int bytes, arc_state_t *state)
2019 {
2020 	int mult;
2021 	uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2022 
2023 	if (state == arc_l2c_only)
2024 		return;
2025 
2026 	ASSERT(bytes > 0);
2027 	/*
2028 	 * Adapt the target size of the MRU list:
2029 	 *	- if we just hit in the MRU ghost list, then increase
2030 	 *	  the target size of the MRU list.
2031 	 *	- if we just hit in the MFU ghost list, then increase
2032 	 *	  the target size of the MFU list by decreasing the
2033 	 *	  target size of the MRU list.
2034 	 */
2035 	if (state == arc_mru_ghost) {
2036 		mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2037 		    1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2038 
2039 		arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2040 	} else if (state == arc_mfu_ghost) {
2041 		uint64_t delta;
2042 
2043 		mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2044 		    1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2045 
2046 		delta = MIN(bytes * mult, arc_p);
2047 		arc_p = MAX(arc_p_min, arc_p - delta);
2048 	}
2049 	ASSERT((int64_t)arc_p >= 0);
2050 
2051 	if (arc_reclaim_needed()) {
2052 		cv_signal(&arc_reclaim_thr_cv);
2053 		return;
2054 	}
2055 
2056 	if (arc_no_grow)
2057 		return;
2058 
2059 	if (arc_c >= arc_c_max)
2060 		return;
2061 
2062 	/*
2063 	 * If we're within (2 * maxblocksize) bytes of the target
2064 	 * cache size, increment the target cache size
2065 	 */
2066 	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2067 		atomic_add_64(&arc_c, (int64_t)bytes);
2068 		if (arc_c > arc_c_max)
2069 			arc_c = arc_c_max;
2070 		else if (state == arc_anon)
2071 			atomic_add_64(&arc_p, (int64_t)bytes);
2072 		if (arc_p > arc_c)
2073 			arc_p = arc_c;
2074 	}
2075 	ASSERT((int64_t)arc_p >= 0);
2076 }
2077 
2078 /*
2079  * Check if the cache has reached its limits and eviction is required
2080  * prior to insert.
2081  */
2082 static int
2083 arc_evict_needed(arc_buf_contents_t type)
2084 {
2085 	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2086 		return (1);
2087 
2088 #ifdef _KERNEL
2089 	/*
2090 	 * If zio data pages are being allocated out of a separate heap segment,
2091 	 * then enforce that the size of available vmem for this area remains
2092 	 * above about 1/32nd free.
2093 	 */
2094 	if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2095 	    vmem_size(zio_arena, VMEM_FREE) <
2096 	    (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2097 		return (1);
2098 #endif
2099 
2100 	if (arc_reclaim_needed())
2101 		return (1);
2102 
2103 	return (arc_size > arc_c);
2104 }
2105 
2106 /*
2107  * The buffer, supplied as the first argument, needs a data block.
2108  * So, if we are at cache max, determine which cache should be victimized.
2109  * We have the following cases:
2110  *
2111  * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2112  * In this situation if we're out of space, but the resident size of the MFU is
2113  * under the limit, victimize the MFU cache to satisfy this insertion request.
2114  *
2115  * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2116  * Here, we've used up all of the available space for the MRU, so we need to
2117  * evict from our own cache instead.  Evict from the set of resident MRU
2118  * entries.
2119  *
2120  * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2121  * c minus p represents the MFU space in the cache, since p is the size of the
2122  * cache that is dedicated to the MRU.  In this situation there's still space on
2123  * the MFU side, so the MRU side needs to be victimized.
2124  *
2125  * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2126  * MFU's resident set is consuming more space than it has been allotted.  In
2127  * this situation, we must victimize our own cache, the MFU, for this insertion.
2128  */
2129 static void
2130 arc_get_data_buf(arc_buf_t *buf)
2131 {
2132 	arc_state_t		*state = buf->b_hdr->b_state;
2133 	uint64_t		size = buf->b_hdr->b_size;
2134 	arc_buf_contents_t	type = buf->b_hdr->b_type;
2135 
2136 	arc_adapt(size, state);
2137 
2138 	/*
2139 	 * We have not yet reached cache maximum size,
2140 	 * just allocate a new buffer.
2141 	 */
2142 	if (!arc_evict_needed(type)) {
2143 		if (type == ARC_BUFC_METADATA) {
2144 			buf->b_data = zio_buf_alloc(size);
2145 			arc_space_consume(size, ARC_SPACE_DATA);
2146 		} else {
2147 			ASSERT(type == ARC_BUFC_DATA);
2148 			buf->b_data = zio_data_buf_alloc(size);
2149 			ARCSTAT_INCR(arcstat_data_size, size);
2150 			atomic_add_64(&arc_size, size);
2151 		}
2152 		goto out;
2153 	}
2154 
2155 	/*
2156 	 * If we are prefetching from the mfu ghost list, this buffer
2157 	 * will end up on the mru list; so steal space from there.
2158 	 */
2159 	if (state == arc_mfu_ghost)
2160 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2161 	else if (state == arc_mru_ghost)
2162 		state = arc_mru;
2163 
2164 	if (state == arc_mru || state == arc_anon) {
2165 		uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2166 		state = (arc_mfu->arcs_lsize[type] >= size &&
2167 		    arc_p > mru_used) ? arc_mfu : arc_mru;
2168 	} else {
2169 		/* MFU cases */
2170 		uint64_t mfu_space = arc_c - arc_p;
2171 		state =  (arc_mru->arcs_lsize[type] >= size &&
2172 		    mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2173 	}
2174 	if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2175 		if (type == ARC_BUFC_METADATA) {
2176 			buf->b_data = zio_buf_alloc(size);
2177 			arc_space_consume(size, ARC_SPACE_DATA);
2178 		} else {
2179 			ASSERT(type == ARC_BUFC_DATA);
2180 			buf->b_data = zio_data_buf_alloc(size);
2181 			ARCSTAT_INCR(arcstat_data_size, size);
2182 			atomic_add_64(&arc_size, size);
2183 		}
2184 		ARCSTAT_BUMP(arcstat_recycle_miss);
2185 	}
2186 	ASSERT(buf->b_data != NULL);
2187 out:
2188 	/*
2189 	 * Update the state size.  Note that ghost states have a
2190 	 * "ghost size" and so don't need to be updated.
2191 	 */
2192 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
2193 		arc_buf_hdr_t *hdr = buf->b_hdr;
2194 
2195 		atomic_add_64(&hdr->b_state->arcs_size, size);
2196 		if (list_link_active(&hdr->b_arc_node)) {
2197 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
2198 			atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2199 		}
2200 		/*
2201 		 * If we are growing the cache, and we are adding anonymous
2202 		 * data, and we have outgrown arc_p, update arc_p
2203 		 */
2204 		if (arc_size < arc_c && hdr->b_state == arc_anon &&
2205 		    arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2206 			arc_p = MIN(arc_c, arc_p + size);
2207 	}
2208 }
2209 
2210 /*
2211  * This routine is called whenever a buffer is accessed.
2212  * NOTE: the hash lock is dropped in this function.
2213  */
2214 static void
2215 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2216 {
2217 	ASSERT(MUTEX_HELD(hash_lock));
2218 
2219 	if (buf->b_state == arc_anon) {
2220 		/*
2221 		 * This buffer is not in the cache, and does not
2222 		 * appear in our "ghost" list.  Add the new buffer
2223 		 * to the MRU state.
2224 		 */
2225 
2226 		ASSERT(buf->b_arc_access == 0);
2227 		buf->b_arc_access = lbolt;
2228 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2229 		arc_change_state(arc_mru, buf, hash_lock);
2230 
2231 	} else if (buf->b_state == arc_mru) {
2232 		/*
2233 		 * If this buffer is here because of a prefetch, then either:
2234 		 * - clear the flag if this is a "referencing" read
2235 		 *   (any subsequent access will bump this into the MFU state).
2236 		 * or
2237 		 * - move the buffer to the head of the list if this is
2238 		 *   another prefetch (to make it less likely to be evicted).
2239 		 */
2240 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
2241 			if (refcount_count(&buf->b_refcnt) == 0) {
2242 				ASSERT(list_link_active(&buf->b_arc_node));
2243 			} else {
2244 				buf->b_flags &= ~ARC_PREFETCH;
2245 				ARCSTAT_BUMP(arcstat_mru_hits);
2246 			}
2247 			buf->b_arc_access = lbolt;
2248 			return;
2249 		}
2250 
2251 		/*
2252 		 * This buffer has been "accessed" only once so far,
2253 		 * but it is still in the cache. Move it to the MFU
2254 		 * state.
2255 		 */
2256 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
2257 			/*
2258 			 * More than 125ms have passed since we
2259 			 * instantiated this buffer.  Move it to the
2260 			 * most frequently used state.
2261 			 */
2262 			buf->b_arc_access = lbolt;
2263 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2264 			arc_change_state(arc_mfu, buf, hash_lock);
2265 		}
2266 		ARCSTAT_BUMP(arcstat_mru_hits);
2267 	} else if (buf->b_state == arc_mru_ghost) {
2268 		arc_state_t	*new_state;
2269 		/*
2270 		 * This buffer has been "accessed" recently, but
2271 		 * was evicted from the cache.  Move it to the
2272 		 * MFU state.
2273 		 */
2274 
2275 		if (buf->b_flags & ARC_PREFETCH) {
2276 			new_state = arc_mru;
2277 			if (refcount_count(&buf->b_refcnt) > 0)
2278 				buf->b_flags &= ~ARC_PREFETCH;
2279 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2280 		} else {
2281 			new_state = arc_mfu;
2282 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2283 		}
2284 
2285 		buf->b_arc_access = lbolt;
2286 		arc_change_state(new_state, buf, hash_lock);
2287 
2288 		ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2289 	} else if (buf->b_state == arc_mfu) {
2290 		/*
2291 		 * This buffer has been accessed more than once and is
2292 		 * still in the cache.  Keep it in the MFU state.
2293 		 *
2294 		 * NOTE: an add_reference() that occurred when we did
2295 		 * the arc_read() will have kicked this off the list.
2296 		 * If it was a prefetch, we will explicitly move it to
2297 		 * the head of the list now.
2298 		 */
2299 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
2300 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
2301 			ASSERT(list_link_active(&buf->b_arc_node));
2302 		}
2303 		ARCSTAT_BUMP(arcstat_mfu_hits);
2304 		buf->b_arc_access = lbolt;
2305 	} else if (buf->b_state == arc_mfu_ghost) {
2306 		arc_state_t	*new_state = arc_mfu;
2307 		/*
2308 		 * This buffer has been accessed more than once but has
2309 		 * been evicted from the cache.  Move it back to the
2310 		 * MFU state.
2311 		 */
2312 
2313 		if (buf->b_flags & ARC_PREFETCH) {
2314 			/*
2315 			 * This is a prefetch access...
2316 			 * move this block back to the MRU state.
2317 			 */
2318 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
2319 			new_state = arc_mru;
2320 		}
2321 
2322 		buf->b_arc_access = lbolt;
2323 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2324 		arc_change_state(new_state, buf, hash_lock);
2325 
2326 		ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2327 	} else if (buf->b_state == arc_l2c_only) {
2328 		/*
2329 		 * This buffer is on the 2nd Level ARC.
2330 		 */
2331 
2332 		buf->b_arc_access = lbolt;
2333 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2334 		arc_change_state(arc_mfu, buf, hash_lock);
2335 	} else {
2336 		ASSERT(!"invalid arc state");
2337 	}
2338 }
2339 
2340 /* a generic arc_done_func_t which you can use */
2341 /* ARGSUSED */
2342 void
2343 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2344 {
2345 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2346 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2347 }
2348 
2349 /* a generic arc_done_func_t */
2350 void
2351 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2352 {
2353 	arc_buf_t **bufp = arg;
2354 	if (zio && zio->io_error) {
2355 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2356 		*bufp = NULL;
2357 	} else {
2358 		*bufp = buf;
2359 	}
2360 }
2361 
2362 static void
2363 arc_read_done(zio_t *zio)
2364 {
2365 	arc_buf_hdr_t	*hdr, *found;
2366 	arc_buf_t	*buf;
2367 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
2368 	kmutex_t	*hash_lock;
2369 	arc_callback_t	*callback_list, *acb;
2370 	int		freeable = FALSE;
2371 
2372 	buf = zio->io_private;
2373 	hdr = buf->b_hdr;
2374 
2375 	/*
2376 	 * The hdr was inserted into hash-table and removed from lists
2377 	 * prior to starting I/O.  We should find this header, since
2378 	 * it's in the hash table, and it should be legit since it's
2379 	 * not possible to evict it during the I/O.  The only possible
2380 	 * reason for it not to be found is if we were freed during the
2381 	 * read.
2382 	 */
2383 	found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2384 	    &hash_lock);
2385 
2386 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2387 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2388 	    (found == hdr && HDR_L2_READING(hdr)));
2389 
2390 	hdr->b_flags &= ~ARC_L2_EVICTED;
2391 	if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2392 		hdr->b_flags &= ~ARC_L2CACHE;
2393 
2394 	/* byteswap if necessary */
2395 	callback_list = hdr->b_acb;
2396 	ASSERT(callback_list != NULL);
2397 	if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
2398 		arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2399 		    byteswap_uint64_array :
2400 		    dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap;
2401 		func(buf->b_data, hdr->b_size);
2402 	}
2403 
2404 	arc_cksum_compute(buf, B_FALSE);
2405 
2406 	/* create copies of the data buffer for the callers */
2407 	abuf = buf;
2408 	for (acb = callback_list; acb; acb = acb->acb_next) {
2409 		if (acb->acb_done) {
2410 			if (abuf == NULL)
2411 				abuf = arc_buf_clone(buf);
2412 			acb->acb_buf = abuf;
2413 			abuf = NULL;
2414 		}
2415 	}
2416 	hdr->b_acb = NULL;
2417 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2418 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
2419 	if (abuf == buf)
2420 		hdr->b_flags |= ARC_BUF_AVAILABLE;
2421 
2422 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2423 
2424 	if (zio->io_error != 0) {
2425 		hdr->b_flags |= ARC_IO_ERROR;
2426 		if (hdr->b_state != arc_anon)
2427 			arc_change_state(arc_anon, hdr, hash_lock);
2428 		if (HDR_IN_HASH_TABLE(hdr))
2429 			buf_hash_remove(hdr);
2430 		freeable = refcount_is_zero(&hdr->b_refcnt);
2431 	}
2432 
2433 	/*
2434 	 * Broadcast before we drop the hash_lock to avoid the possibility
2435 	 * that the hdr (and hence the cv) might be freed before we get to
2436 	 * the cv_broadcast().
2437 	 */
2438 	cv_broadcast(&hdr->b_cv);
2439 
2440 	if (hash_lock) {
2441 		/*
2442 		 * Only call arc_access on anonymous buffers.  This is because
2443 		 * if we've issued an I/O for an evicted buffer, we've already
2444 		 * called arc_access (to prevent any simultaneous readers from
2445 		 * getting confused).
2446 		 */
2447 		if (zio->io_error == 0 && hdr->b_state == arc_anon)
2448 			arc_access(hdr, hash_lock);
2449 		mutex_exit(hash_lock);
2450 	} else {
2451 		/*
2452 		 * This block was freed while we waited for the read to
2453 		 * complete.  It has been removed from the hash table and
2454 		 * moved to the anonymous state (so that it won't show up
2455 		 * in the cache).
2456 		 */
2457 		ASSERT3P(hdr->b_state, ==, arc_anon);
2458 		freeable = refcount_is_zero(&hdr->b_refcnt);
2459 	}
2460 
2461 	/* execute each callback and free its structure */
2462 	while ((acb = callback_list) != NULL) {
2463 		if (acb->acb_done)
2464 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2465 
2466 		if (acb->acb_zio_dummy != NULL) {
2467 			acb->acb_zio_dummy->io_error = zio->io_error;
2468 			zio_nowait(acb->acb_zio_dummy);
2469 		}
2470 
2471 		callback_list = acb->acb_next;
2472 		kmem_free(acb, sizeof (arc_callback_t));
2473 	}
2474 
2475 	if (freeable)
2476 		arc_hdr_destroy(hdr);
2477 }
2478 
2479 /*
2480  * "Read" the block block at the specified DVA (in bp) via the
2481  * cache.  If the block is found in the cache, invoke the provided
2482  * callback immediately and return.  Note that the `zio' parameter
2483  * in the callback will be NULL in this case, since no IO was
2484  * required.  If the block is not in the cache pass the read request
2485  * on to the spa with a substitute callback function, so that the
2486  * requested block will be added to the cache.
2487  *
2488  * If a read request arrives for a block that has a read in-progress,
2489  * either wait for the in-progress read to complete (and return the
2490  * results); or, if this is a read with a "done" func, add a record
2491  * to the read to invoke the "done" func when the read completes,
2492  * and return; or just return.
2493  *
2494  * arc_read_done() will invoke all the requested "done" functions
2495  * for readers of this block.
2496  *
2497  * Normal callers should use arc_read and pass the arc buffer and offset
2498  * for the bp.  But if you know you don't need locking, you can use
2499  * arc_read_bp.
2500  */
2501 int
2502 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf,
2503     arc_done_func_t *done, void *private, int priority, int zio_flags,
2504     uint32_t *arc_flags, const zbookmark_t *zb)
2505 {
2506 	int err;
2507 	arc_buf_hdr_t *hdr = pbuf->b_hdr;
2508 
2509 	ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt));
2510 	ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size);
2511 	rw_enter(&pbuf->b_lock, RW_READER);
2512 
2513 	err = arc_read_nolock(pio, spa, bp, done, private, priority,
2514 	    zio_flags, arc_flags, zb);
2515 
2516 	ASSERT3P(hdr, ==, pbuf->b_hdr);
2517 	rw_exit(&pbuf->b_lock);
2518 	return (err);
2519 }
2520 
2521 int
2522 arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp,
2523     arc_done_func_t *done, void *private, int priority, int zio_flags,
2524     uint32_t *arc_flags, const zbookmark_t *zb)
2525 {
2526 	arc_buf_hdr_t *hdr;
2527 	arc_buf_t *buf;
2528 	kmutex_t *hash_lock;
2529 	zio_t *rzio;
2530 	uint64_t guid = spa_guid(spa);
2531 
2532 top:
2533 	hdr = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2534 	if (hdr && hdr->b_datacnt > 0) {
2535 
2536 		*arc_flags |= ARC_CACHED;
2537 
2538 		if (HDR_IO_IN_PROGRESS(hdr)) {
2539 
2540 			if (*arc_flags & ARC_WAIT) {
2541 				cv_wait(&hdr->b_cv, hash_lock);
2542 				mutex_exit(hash_lock);
2543 				goto top;
2544 			}
2545 			ASSERT(*arc_flags & ARC_NOWAIT);
2546 
2547 			if (done) {
2548 				arc_callback_t	*acb = NULL;
2549 
2550 				acb = kmem_zalloc(sizeof (arc_callback_t),
2551 				    KM_SLEEP);
2552 				acb->acb_done = done;
2553 				acb->acb_private = private;
2554 				if (pio != NULL)
2555 					acb->acb_zio_dummy = zio_null(pio,
2556 					    spa, NULL, NULL, NULL, zio_flags);
2557 
2558 				ASSERT(acb->acb_done != NULL);
2559 				acb->acb_next = hdr->b_acb;
2560 				hdr->b_acb = acb;
2561 				add_reference(hdr, hash_lock, private);
2562 				mutex_exit(hash_lock);
2563 				return (0);
2564 			}
2565 			mutex_exit(hash_lock);
2566 			return (0);
2567 		}
2568 
2569 		ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2570 
2571 		if (done) {
2572 			add_reference(hdr, hash_lock, private);
2573 			/*
2574 			 * If this block is already in use, create a new
2575 			 * copy of the data so that we will be guaranteed
2576 			 * that arc_release() will always succeed.
2577 			 */
2578 			buf = hdr->b_buf;
2579 			ASSERT(buf);
2580 			ASSERT(buf->b_data);
2581 			if (HDR_BUF_AVAILABLE(hdr)) {
2582 				ASSERT(buf->b_efunc == NULL);
2583 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2584 			} else {
2585 				buf = arc_buf_clone(buf);
2586 			}
2587 		} else if (*arc_flags & ARC_PREFETCH &&
2588 		    refcount_count(&hdr->b_refcnt) == 0) {
2589 			hdr->b_flags |= ARC_PREFETCH;
2590 		}
2591 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2592 		arc_access(hdr, hash_lock);
2593 		if (*arc_flags & ARC_L2CACHE)
2594 			hdr->b_flags |= ARC_L2CACHE;
2595 		mutex_exit(hash_lock);
2596 		ARCSTAT_BUMP(arcstat_hits);
2597 		ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2598 		    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2599 		    data, metadata, hits);
2600 
2601 		if (done)
2602 			done(NULL, buf, private);
2603 	} else {
2604 		uint64_t size = BP_GET_LSIZE(bp);
2605 		arc_callback_t	*acb;
2606 		vdev_t *vd = NULL;
2607 		uint64_t addr;
2608 		boolean_t devw = B_FALSE;
2609 
2610 		if (hdr == NULL) {
2611 			/* this block is not in the cache */
2612 			arc_buf_hdr_t	*exists;
2613 			arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2614 			buf = arc_buf_alloc(spa, size, private, type);
2615 			hdr = buf->b_hdr;
2616 			hdr->b_dva = *BP_IDENTITY(bp);
2617 			hdr->b_birth = bp->blk_birth;
2618 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2619 			exists = buf_hash_insert(hdr, &hash_lock);
2620 			if (exists) {
2621 				/* somebody beat us to the hash insert */
2622 				mutex_exit(hash_lock);
2623 				bzero(&hdr->b_dva, sizeof (dva_t));
2624 				hdr->b_birth = 0;
2625 				hdr->b_cksum0 = 0;
2626 				(void) arc_buf_remove_ref(buf, private);
2627 				goto top; /* restart the IO request */
2628 			}
2629 			/* if this is a prefetch, we don't have a reference */
2630 			if (*arc_flags & ARC_PREFETCH) {
2631 				(void) remove_reference(hdr, hash_lock,
2632 				    private);
2633 				hdr->b_flags |= ARC_PREFETCH;
2634 			}
2635 			if (*arc_flags & ARC_L2CACHE)
2636 				hdr->b_flags |= ARC_L2CACHE;
2637 			if (BP_GET_LEVEL(bp) > 0)
2638 				hdr->b_flags |= ARC_INDIRECT;
2639 		} else {
2640 			/* this block is in the ghost cache */
2641 			ASSERT(GHOST_STATE(hdr->b_state));
2642 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2643 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2644 			ASSERT(hdr->b_buf == NULL);
2645 
2646 			/* if this is a prefetch, we don't have a reference */
2647 			if (*arc_flags & ARC_PREFETCH)
2648 				hdr->b_flags |= ARC_PREFETCH;
2649 			else
2650 				add_reference(hdr, hash_lock, private);
2651 			if (*arc_flags & ARC_L2CACHE)
2652 				hdr->b_flags |= ARC_L2CACHE;
2653 			buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2654 			buf->b_hdr = hdr;
2655 			buf->b_data = NULL;
2656 			buf->b_efunc = NULL;
2657 			buf->b_private = NULL;
2658 			buf->b_next = NULL;
2659 			hdr->b_buf = buf;
2660 			arc_get_data_buf(buf);
2661 			ASSERT(hdr->b_datacnt == 0);
2662 			hdr->b_datacnt = 1;
2663 
2664 		}
2665 
2666 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2667 		acb->acb_done = done;
2668 		acb->acb_private = private;
2669 
2670 		ASSERT(hdr->b_acb == NULL);
2671 		hdr->b_acb = acb;
2672 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
2673 
2674 		/*
2675 		 * If the buffer has been evicted, migrate it to a present state
2676 		 * before issuing the I/O.  Once we drop the hash-table lock,
2677 		 * the header will be marked as I/O in progress and have an
2678 		 * attached buffer.  At this point, anybody who finds this
2679 		 * buffer ought to notice that it's legit but has a pending I/O.
2680 		 */
2681 
2682 		if (GHOST_STATE(hdr->b_state))
2683 			arc_access(hdr, hash_lock);
2684 
2685 		if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2686 		    (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2687 			devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2688 			addr = hdr->b_l2hdr->b_daddr;
2689 			/*
2690 			 * Lock out device removal.
2691 			 */
2692 			if (vdev_is_dead(vd) ||
2693 			    !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2694 				vd = NULL;
2695 		}
2696 
2697 		mutex_exit(hash_lock);
2698 
2699 		ASSERT3U(hdr->b_size, ==, size);
2700 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
2701 		    zbookmark_t *, zb);
2702 		ARCSTAT_BUMP(arcstat_misses);
2703 		ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2704 		    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2705 		    data, metadata, misses);
2706 
2707 		if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
2708 			/*
2709 			 * Read from the L2ARC if the following are true:
2710 			 * 1. The L2ARC vdev was previously cached.
2711 			 * 2. This buffer still has L2ARC metadata.
2712 			 * 3. This buffer isn't currently writing to the L2ARC.
2713 			 * 4. The L2ARC entry wasn't evicted, which may
2714 			 *    also have invalidated the vdev.
2715 			 * 5. This isn't prefetch and l2arc_noprefetch is set.
2716 			 */
2717 			if (hdr->b_l2hdr != NULL &&
2718 			    !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
2719 			    !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
2720 				l2arc_read_callback_t *cb;
2721 
2722 				DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2723 				ARCSTAT_BUMP(arcstat_l2_hits);
2724 
2725 				cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2726 				    KM_SLEEP);
2727 				cb->l2rcb_buf = buf;
2728 				cb->l2rcb_spa = spa;
2729 				cb->l2rcb_bp = *bp;
2730 				cb->l2rcb_zb = *zb;
2731 				cb->l2rcb_flags = zio_flags;
2732 
2733 				/*
2734 				 * l2arc read.  The SCL_L2ARC lock will be
2735 				 * released by l2arc_read_done().
2736 				 */
2737 				rzio = zio_read_phys(pio, vd, addr, size,
2738 				    buf->b_data, ZIO_CHECKSUM_OFF,
2739 				    l2arc_read_done, cb, priority, zio_flags |
2740 				    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
2741 				    ZIO_FLAG_DONT_PROPAGATE |
2742 				    ZIO_FLAG_DONT_RETRY, B_FALSE);
2743 				DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
2744 				    zio_t *, rzio);
2745 				ARCSTAT_INCR(arcstat_l2_read_bytes, size);
2746 
2747 				if (*arc_flags & ARC_NOWAIT) {
2748 					zio_nowait(rzio);
2749 					return (0);
2750 				}
2751 
2752 				ASSERT(*arc_flags & ARC_WAIT);
2753 				if (zio_wait(rzio) == 0)
2754 					return (0);
2755 
2756 				/* l2arc read error; goto zio_read() */
2757 			} else {
2758 				DTRACE_PROBE1(l2arc__miss,
2759 				    arc_buf_hdr_t *, hdr);
2760 				ARCSTAT_BUMP(arcstat_l2_misses);
2761 				if (HDR_L2_WRITING(hdr))
2762 					ARCSTAT_BUMP(arcstat_l2_rw_clash);
2763 				spa_config_exit(spa, SCL_L2ARC, vd);
2764 			}
2765 		} else {
2766 			if (vd != NULL)
2767 				spa_config_exit(spa, SCL_L2ARC, vd);
2768 			if (l2arc_ndev != 0) {
2769 				DTRACE_PROBE1(l2arc__miss,
2770 				    arc_buf_hdr_t *, hdr);
2771 				ARCSTAT_BUMP(arcstat_l2_misses);
2772 			}
2773 		}
2774 
2775 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
2776 		    arc_read_done, buf, priority, zio_flags, zb);
2777 
2778 		if (*arc_flags & ARC_WAIT)
2779 			return (zio_wait(rzio));
2780 
2781 		ASSERT(*arc_flags & ARC_NOWAIT);
2782 		zio_nowait(rzio);
2783 	}
2784 	return (0);
2785 }
2786 
2787 /*
2788  * arc_read() variant to support pool traversal.  If the block is already
2789  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2790  * The idea is that we don't want pool traversal filling up memory, but
2791  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2792  */
2793 int
2794 arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2795 {
2796 	arc_buf_hdr_t *hdr;
2797 	kmutex_t *hash_mtx;
2798 	uint64_t guid = spa_guid(spa);
2799 	int rc = 0;
2800 
2801 	hdr = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2802 
2803 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
2804 		arc_buf_t *buf = hdr->b_buf;
2805 
2806 		ASSERT(buf);
2807 		while (buf->b_data == NULL) {
2808 			buf = buf->b_next;
2809 			ASSERT(buf);
2810 		}
2811 		bcopy(buf->b_data, data, hdr->b_size);
2812 	} else {
2813 		rc = ENOENT;
2814 	}
2815 
2816 	if (hash_mtx)
2817 		mutex_exit(hash_mtx);
2818 
2819 	return (rc);
2820 }
2821 
2822 void
2823 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
2824 {
2825 	ASSERT(buf->b_hdr != NULL);
2826 	ASSERT(buf->b_hdr->b_state != arc_anon);
2827 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
2828 	buf->b_efunc = func;
2829 	buf->b_private = private;
2830 }
2831 
2832 /*
2833  * This is used by the DMU to let the ARC know that a buffer is
2834  * being evicted, so the ARC should clean up.  If this arc buf
2835  * is not yet in the evicted state, it will be put there.
2836  */
2837 int
2838 arc_buf_evict(arc_buf_t *buf)
2839 {
2840 	arc_buf_hdr_t *hdr;
2841 	kmutex_t *hash_lock;
2842 	arc_buf_t **bufp;
2843 
2844 	rw_enter(&buf->b_lock, RW_WRITER);
2845 	hdr = buf->b_hdr;
2846 	if (hdr == NULL) {
2847 		/*
2848 		 * We are in arc_do_user_evicts().
2849 		 */
2850 		ASSERT(buf->b_data == NULL);
2851 		rw_exit(&buf->b_lock);
2852 		return (0);
2853 	} else if (buf->b_data == NULL) {
2854 		arc_buf_t copy = *buf; /* structure assignment */
2855 		/*
2856 		 * We are on the eviction list; process this buffer now
2857 		 * but let arc_do_user_evicts() do the reaping.
2858 		 */
2859 		buf->b_efunc = NULL;
2860 		rw_exit(&buf->b_lock);
2861 		VERIFY(copy.b_efunc(&copy) == 0);
2862 		return (1);
2863 	}
2864 	hash_lock = HDR_LOCK(hdr);
2865 	mutex_enter(hash_lock);
2866 
2867 	ASSERT(buf->b_hdr == hdr);
2868 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2869 	ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2870 
2871 	/*
2872 	 * Pull this buffer off of the hdr
2873 	 */
2874 	bufp = &hdr->b_buf;
2875 	while (*bufp != buf)
2876 		bufp = &(*bufp)->b_next;
2877 	*bufp = buf->b_next;
2878 
2879 	ASSERT(buf->b_data != NULL);
2880 	arc_buf_destroy(buf, FALSE, FALSE);
2881 
2882 	if (hdr->b_datacnt == 0) {
2883 		arc_state_t *old_state = hdr->b_state;
2884 		arc_state_t *evicted_state;
2885 
2886 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
2887 
2888 		evicted_state =
2889 		    (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
2890 
2891 		mutex_enter(&old_state->arcs_mtx);
2892 		mutex_enter(&evicted_state->arcs_mtx);
2893 
2894 		arc_change_state(evicted_state, hdr, hash_lock);
2895 		ASSERT(HDR_IN_HASH_TABLE(hdr));
2896 		hdr->b_flags |= ARC_IN_HASH_TABLE;
2897 		hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2898 
2899 		mutex_exit(&evicted_state->arcs_mtx);
2900 		mutex_exit(&old_state->arcs_mtx);
2901 	}
2902 	mutex_exit(hash_lock);
2903 	rw_exit(&buf->b_lock);
2904 
2905 	VERIFY(buf->b_efunc(buf) == 0);
2906 	buf->b_efunc = NULL;
2907 	buf->b_private = NULL;
2908 	buf->b_hdr = NULL;
2909 	kmem_cache_free(buf_cache, buf);
2910 	return (1);
2911 }
2912 
2913 /*
2914  * Release this buffer from the cache.  This must be done
2915  * after a read and prior to modifying the buffer contents.
2916  * If the buffer has more than one reference, we must make
2917  * a new hdr for the buffer.
2918  */
2919 void
2920 arc_release(arc_buf_t *buf, void *tag)
2921 {
2922 	arc_buf_hdr_t *hdr;
2923 	kmutex_t *hash_lock;
2924 	l2arc_buf_hdr_t *l2hdr;
2925 	uint64_t buf_size;
2926 	boolean_t released = B_FALSE;
2927 
2928 	rw_enter(&buf->b_lock, RW_WRITER);
2929 	hdr = buf->b_hdr;
2930 
2931 	/* this buffer is not on any list */
2932 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2933 	ASSERT(!(hdr->b_flags & ARC_STORED));
2934 
2935 	if (hdr->b_state == arc_anon) {
2936 		/* this buffer is already released */
2937 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2938 		ASSERT(BUF_EMPTY(hdr));
2939 		ASSERT(buf->b_efunc == NULL);
2940 		arc_buf_thaw(buf);
2941 		rw_exit(&buf->b_lock);
2942 		released = B_TRUE;
2943 	} else {
2944 		hash_lock = HDR_LOCK(hdr);
2945 		mutex_enter(hash_lock);
2946 	}
2947 
2948 	l2hdr = hdr->b_l2hdr;
2949 	if (l2hdr) {
2950 		mutex_enter(&l2arc_buflist_mtx);
2951 		hdr->b_l2hdr = NULL;
2952 		buf_size = hdr->b_size;
2953 	}
2954 
2955 	if (released)
2956 		goto out;
2957 
2958 	/*
2959 	 * Do we have more than one buf?
2960 	 */
2961 	if (hdr->b_datacnt > 1) {
2962 		arc_buf_hdr_t *nhdr;
2963 		arc_buf_t **bufp;
2964 		uint64_t blksz = hdr->b_size;
2965 		uint64_t spa = hdr->b_spa;
2966 		arc_buf_contents_t type = hdr->b_type;
2967 		uint32_t flags = hdr->b_flags;
2968 
2969 		ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
2970 		/*
2971 		 * Pull the data off of this buf and attach it to
2972 		 * a new anonymous buf.
2973 		 */
2974 		(void) remove_reference(hdr, hash_lock, tag);
2975 		bufp = &hdr->b_buf;
2976 		while (*bufp != buf)
2977 			bufp = &(*bufp)->b_next;
2978 		*bufp = (*bufp)->b_next;
2979 		buf->b_next = NULL;
2980 
2981 		ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
2982 		atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
2983 		if (refcount_is_zero(&hdr->b_refcnt)) {
2984 			uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
2985 			ASSERT3U(*size, >=, hdr->b_size);
2986 			atomic_add_64(size, -hdr->b_size);
2987 		}
2988 		hdr->b_datacnt -= 1;
2989 		arc_cksum_verify(buf);
2990 
2991 		mutex_exit(hash_lock);
2992 
2993 		nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
2994 		nhdr->b_size = blksz;
2995 		nhdr->b_spa = spa;
2996 		nhdr->b_type = type;
2997 		nhdr->b_buf = buf;
2998 		nhdr->b_state = arc_anon;
2999 		nhdr->b_arc_access = 0;
3000 		nhdr->b_flags = flags & ARC_L2_WRITING;
3001 		nhdr->b_l2hdr = NULL;
3002 		nhdr->b_datacnt = 1;
3003 		nhdr->b_freeze_cksum = NULL;
3004 		(void) refcount_add(&nhdr->b_refcnt, tag);
3005 		buf->b_hdr = nhdr;
3006 		rw_exit(&buf->b_lock);
3007 		atomic_add_64(&arc_anon->arcs_size, blksz);
3008 	} else {
3009 		rw_exit(&buf->b_lock);
3010 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3011 		ASSERT(!list_link_active(&hdr->b_arc_node));
3012 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3013 		arc_change_state(arc_anon, hdr, hash_lock);
3014 		hdr->b_arc_access = 0;
3015 		mutex_exit(hash_lock);
3016 
3017 		bzero(&hdr->b_dva, sizeof (dva_t));
3018 		hdr->b_birth = 0;
3019 		hdr->b_cksum0 = 0;
3020 		arc_buf_thaw(buf);
3021 	}
3022 	buf->b_efunc = NULL;
3023 	buf->b_private = NULL;
3024 
3025 out:
3026 	if (l2hdr) {
3027 		list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3028 		kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3029 		ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3030 		mutex_exit(&l2arc_buflist_mtx);
3031 	}
3032 }
3033 
3034 int
3035 arc_released(arc_buf_t *buf)
3036 {
3037 	int released;
3038 
3039 	rw_enter(&buf->b_lock, RW_READER);
3040 	released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3041 	rw_exit(&buf->b_lock);
3042 	return (released);
3043 }
3044 
3045 int
3046 arc_has_callback(arc_buf_t *buf)
3047 {
3048 	int callback;
3049 
3050 	rw_enter(&buf->b_lock, RW_READER);
3051 	callback = (buf->b_efunc != NULL);
3052 	rw_exit(&buf->b_lock);
3053 	return (callback);
3054 }
3055 
3056 #ifdef ZFS_DEBUG
3057 int
3058 arc_referenced(arc_buf_t *buf)
3059 {
3060 	int referenced;
3061 
3062 	rw_enter(&buf->b_lock, RW_READER);
3063 	referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3064 	rw_exit(&buf->b_lock);
3065 	return (referenced);
3066 }
3067 #endif
3068 
3069 static void
3070 arc_write_ready(zio_t *zio)
3071 {
3072 	arc_write_callback_t *callback = zio->io_private;
3073 	arc_buf_t *buf = callback->awcb_buf;
3074 	arc_buf_hdr_t *hdr = buf->b_hdr;
3075 
3076 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3077 	callback->awcb_ready(zio, buf, callback->awcb_private);
3078 
3079 	/*
3080 	 * If the IO is already in progress, then this is a re-write
3081 	 * attempt, so we need to thaw and re-compute the cksum.
3082 	 * It is the responsibility of the callback to handle the
3083 	 * accounting for any re-write attempt.
3084 	 */
3085 	if (HDR_IO_IN_PROGRESS(hdr)) {
3086 		mutex_enter(&hdr->b_freeze_lock);
3087 		if (hdr->b_freeze_cksum != NULL) {
3088 			kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3089 			hdr->b_freeze_cksum = NULL;
3090 		}
3091 		mutex_exit(&hdr->b_freeze_lock);
3092 	}
3093 	arc_cksum_compute(buf, B_FALSE);
3094 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
3095 }
3096 
3097 static void
3098 arc_write_done(zio_t *zio)
3099 {
3100 	arc_write_callback_t *callback = zio->io_private;
3101 	arc_buf_t *buf = callback->awcb_buf;
3102 	arc_buf_hdr_t *hdr = buf->b_hdr;
3103 
3104 	hdr->b_acb = NULL;
3105 
3106 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3107 	hdr->b_birth = zio->io_bp->blk_birth;
3108 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3109 	/*
3110 	 * If the block to be written was all-zero, we may have
3111 	 * compressed it away.  In this case no write was performed
3112 	 * so there will be no dva/birth-date/checksum.  The buffer
3113 	 * must therefor remain anonymous (and uncached).
3114 	 */
3115 	if (!BUF_EMPTY(hdr)) {
3116 		arc_buf_hdr_t *exists;
3117 		kmutex_t *hash_lock;
3118 
3119 		arc_cksum_verify(buf);
3120 
3121 		exists = buf_hash_insert(hdr, &hash_lock);
3122 		if (exists) {
3123 			/*
3124 			 * This can only happen if we overwrite for
3125 			 * sync-to-convergence, because we remove
3126 			 * buffers from the hash table when we arc_free().
3127 			 */
3128 			ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE);
3129 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
3130 			    BP_IDENTITY(zio->io_bp)));
3131 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
3132 			    zio->io_bp->blk_birth);
3133 
3134 			ASSERT(refcount_is_zero(&exists->b_refcnt));
3135 			arc_change_state(arc_anon, exists, hash_lock);
3136 			mutex_exit(hash_lock);
3137 			arc_hdr_destroy(exists);
3138 			exists = buf_hash_insert(hdr, &hash_lock);
3139 			ASSERT3P(exists, ==, NULL);
3140 		}
3141 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3142 		/* if it's not anon, we are doing a scrub */
3143 		if (hdr->b_state == arc_anon)
3144 			arc_access(hdr, hash_lock);
3145 		mutex_exit(hash_lock);
3146 	} else if (callback->awcb_done == NULL) {
3147 		int destroy_hdr;
3148 		/*
3149 		 * This is an anonymous buffer with no user callback,
3150 		 * destroy it if there are no active references.
3151 		 */
3152 		mutex_enter(&arc_eviction_mtx);
3153 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
3154 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3155 		mutex_exit(&arc_eviction_mtx);
3156 		if (destroy_hdr)
3157 			arc_hdr_destroy(hdr);
3158 	} else {
3159 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3160 	}
3161 	hdr->b_flags &= ~ARC_STORED;
3162 
3163 	if (callback->awcb_done) {
3164 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3165 		callback->awcb_done(zio, buf, callback->awcb_private);
3166 	}
3167 
3168 	kmem_free(callback, sizeof (arc_write_callback_t));
3169 }
3170 
3171 void
3172 write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp)
3173 {
3174 	boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata);
3175 
3176 	/* Determine checksum setting */
3177 	if (ismd) {
3178 		/*
3179 		 * Metadata always gets checksummed.  If the data
3180 		 * checksum is multi-bit correctable, and it's not a
3181 		 * ZBT-style checksum, then it's suitable for metadata
3182 		 * as well.  Otherwise, the metadata checksum defaults
3183 		 * to fletcher4.
3184 		 */
3185 		if (zio_checksum_table[wp->wp_oschecksum].ci_correctable &&
3186 		    !zio_checksum_table[wp->wp_oschecksum].ci_zbt)
3187 			zp->zp_checksum = wp->wp_oschecksum;
3188 		else
3189 			zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4;
3190 	} else {
3191 		zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum,
3192 		    wp->wp_oschecksum);
3193 	}
3194 
3195 	/* Determine compression setting */
3196 	if (ismd) {
3197 		/*
3198 		 * XXX -- we should design a compression algorithm
3199 		 * that specializes in arrays of bps.
3200 		 */
3201 		zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY :
3202 		    ZIO_COMPRESS_LZJB;
3203 	} else {
3204 		zp->zp_compress = zio_compress_select(wp->wp_dncompress,
3205 		    wp->wp_oscompress);
3206 	}
3207 
3208 	zp->zp_type = wp->wp_type;
3209 	zp->zp_level = wp->wp_level;
3210 	zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa));
3211 }
3212 
3213 zio_t *
3214 arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp,
3215     boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
3216     arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority,
3217     int zio_flags, const zbookmark_t *zb)
3218 {
3219 	arc_buf_hdr_t *hdr = buf->b_hdr;
3220 	arc_write_callback_t *callback;
3221 	zio_t *zio;
3222 	zio_prop_t zp;
3223 
3224 	ASSERT(ready != NULL);
3225 	ASSERT(!HDR_IO_ERROR(hdr));
3226 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3227 	ASSERT(hdr->b_acb == 0);
3228 	if (l2arc)
3229 		hdr->b_flags |= ARC_L2CACHE;
3230 	callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3231 	callback->awcb_ready = ready;
3232 	callback->awcb_done = done;
3233 	callback->awcb_private = private;
3234 	callback->awcb_buf = buf;
3235 
3236 	write_policy(spa, wp, &zp);
3237 	zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp,
3238 	    arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3239 
3240 	return (zio);
3241 }
3242 
3243 int
3244 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
3245     zio_done_func_t *done, void *private, uint32_t arc_flags)
3246 {
3247 	arc_buf_hdr_t *ab;
3248 	kmutex_t *hash_lock;
3249 	zio_t	*zio;
3250 	uint64_t guid = spa_guid(spa);
3251 
3252 	/*
3253 	 * If this buffer is in the cache, release it, so it
3254 	 * can be re-used.
3255 	 */
3256 	ab = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
3257 	if (ab != NULL) {
3258 		/*
3259 		 * The checksum of blocks to free is not always
3260 		 * preserved (eg. on the deadlist).  However, if it is
3261 		 * nonzero, it should match what we have in the cache.
3262 		 */
3263 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
3264 		    bp->blk_cksum.zc_word[0] == ab->b_cksum0 ||
3265 		    bp->blk_fill == BLK_FILL_ALREADY_FREED);
3266 
3267 		if (ab->b_state != arc_anon)
3268 			arc_change_state(arc_anon, ab, hash_lock);
3269 		if (HDR_IO_IN_PROGRESS(ab)) {
3270 			/*
3271 			 * This should only happen when we prefetch.
3272 			 */
3273 			ASSERT(ab->b_flags & ARC_PREFETCH);
3274 			ASSERT3U(ab->b_datacnt, ==, 1);
3275 			ab->b_flags |= ARC_FREED_IN_READ;
3276 			if (HDR_IN_HASH_TABLE(ab))
3277 				buf_hash_remove(ab);
3278 			ab->b_arc_access = 0;
3279 			bzero(&ab->b_dva, sizeof (dva_t));
3280 			ab->b_birth = 0;
3281 			ab->b_cksum0 = 0;
3282 			ab->b_buf->b_efunc = NULL;
3283 			ab->b_buf->b_private = NULL;
3284 			mutex_exit(hash_lock);
3285 		} else if (refcount_is_zero(&ab->b_refcnt)) {
3286 			ab->b_flags |= ARC_FREE_IN_PROGRESS;
3287 			mutex_exit(hash_lock);
3288 			arc_hdr_destroy(ab);
3289 			ARCSTAT_BUMP(arcstat_deleted);
3290 		} else {
3291 			/*
3292 			 * We still have an active reference on this
3293 			 * buffer.  This can happen, e.g., from
3294 			 * dbuf_unoverride().
3295 			 */
3296 			ASSERT(!HDR_IN_HASH_TABLE(ab));
3297 			ab->b_arc_access = 0;
3298 			bzero(&ab->b_dva, sizeof (dva_t));
3299 			ab->b_birth = 0;
3300 			ab->b_cksum0 = 0;
3301 			ab->b_buf->b_efunc = NULL;
3302 			ab->b_buf->b_private = NULL;
3303 			mutex_exit(hash_lock);
3304 		}
3305 	}
3306 
3307 	zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED);
3308 
3309 	if (arc_flags & ARC_WAIT)
3310 		return (zio_wait(zio));
3311 
3312 	ASSERT(arc_flags & ARC_NOWAIT);
3313 	zio_nowait(zio);
3314 
3315 	return (0);
3316 }
3317 
3318 static int
3319 arc_memory_throttle(uint64_t reserve, uint64_t txg)
3320 {
3321 #ifdef _KERNEL
3322 	uint64_t inflight_data = arc_anon->arcs_size;
3323 	uint64_t available_memory = ptob(freemem);
3324 	static uint64_t page_load = 0;
3325 	static uint64_t last_txg = 0;
3326 
3327 #if defined(__i386)
3328 	available_memory =
3329 	    MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3330 #endif
3331 	if (available_memory >= zfs_write_limit_max)
3332 		return (0);
3333 
3334 	if (txg > last_txg) {
3335 		last_txg = txg;
3336 		page_load = 0;
3337 	}
3338 	/*
3339 	 * If we are in pageout, we know that memory is already tight,
3340 	 * the arc is already going to be evicting, so we just want to
3341 	 * continue to let page writes occur as quickly as possible.
3342 	 */
3343 	if (curproc == proc_pageout) {
3344 		if (page_load > MAX(ptob(minfree), available_memory) / 4)
3345 			return (ERESTART);
3346 		/* Note: reserve is inflated, so we deflate */
3347 		page_load += reserve / 8;
3348 		return (0);
3349 	} else if (page_load > 0 && arc_reclaim_needed()) {
3350 		/* memory is low, delay before restarting */
3351 		ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3352 		return (EAGAIN);
3353 	}
3354 	page_load = 0;
3355 
3356 	if (arc_size > arc_c_min) {
3357 		uint64_t evictable_memory =
3358 		    arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3359 		    arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3360 		    arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3361 		    arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3362 		available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3363 	}
3364 
3365 	if (inflight_data > available_memory / 4) {
3366 		ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3367 		return (ERESTART);
3368 	}
3369 #endif
3370 	return (0);
3371 }
3372 
3373 void
3374 arc_tempreserve_clear(uint64_t reserve)
3375 {
3376 	atomic_add_64(&arc_tempreserve, -reserve);
3377 	ASSERT((int64_t)arc_tempreserve >= 0);
3378 }
3379 
3380 int
3381 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3382 {
3383 	int error;
3384 
3385 #ifdef ZFS_DEBUG
3386 	/*
3387 	 * Once in a while, fail for no reason.  Everything should cope.
3388 	 */
3389 	if (spa_get_random(10000) == 0) {
3390 		dprintf("forcing random failure\n");
3391 		return (ERESTART);
3392 	}
3393 #endif
3394 	if (reserve > arc_c/4 && !arc_no_grow)
3395 		arc_c = MIN(arc_c_max, reserve * 4);
3396 	if (reserve > arc_c)
3397 		return (ENOMEM);
3398 
3399 	/*
3400 	 * Writes will, almost always, require additional memory allocations
3401 	 * in order to compress/encrypt/etc the data.  We therefor need to
3402 	 * make sure that there is sufficient available memory for this.
3403 	 */
3404 	if (error = arc_memory_throttle(reserve, txg))
3405 		return (error);
3406 
3407 	/*
3408 	 * Throttle writes when the amount of dirty data in the cache
3409 	 * gets too large.  We try to keep the cache less than half full
3410 	 * of dirty blocks so that our sync times don't grow too large.
3411 	 * Note: if two requests come in concurrently, we might let them
3412 	 * both succeed, when one of them should fail.  Not a huge deal.
3413 	 */
3414 	if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 &&
3415 	    arc_anon->arcs_size > arc_c / 4) {
3416 		dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3417 		    "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3418 		    arc_tempreserve>>10,
3419 		    arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3420 		    arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3421 		    reserve>>10, arc_c>>10);
3422 		return (ERESTART);
3423 	}
3424 	atomic_add_64(&arc_tempreserve, reserve);
3425 	return (0);
3426 }
3427 
3428 void
3429 arc_init(void)
3430 {
3431 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3432 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3433 
3434 	/* Convert seconds to clock ticks */
3435 	arc_min_prefetch_lifespan = 1 * hz;
3436 
3437 	/* Start out with 1/8 of all memory */
3438 	arc_c = physmem * PAGESIZE / 8;
3439 
3440 #ifdef _KERNEL
3441 	/*
3442 	 * On architectures where the physical memory can be larger
3443 	 * than the addressable space (intel in 32-bit mode), we may
3444 	 * need to limit the cache to 1/8 of VM size.
3445 	 */
3446 	arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3447 #endif
3448 
3449 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3450 	arc_c_min = MAX(arc_c / 4, 64<<20);
3451 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3452 	if (arc_c * 8 >= 1<<30)
3453 		arc_c_max = (arc_c * 8) - (1<<30);
3454 	else
3455 		arc_c_max = arc_c_min;
3456 	arc_c_max = MAX(arc_c * 6, arc_c_max);
3457 
3458 	/*
3459 	 * Allow the tunables to override our calculations if they are
3460 	 * reasonable (ie. over 64MB)
3461 	 */
3462 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3463 		arc_c_max = zfs_arc_max;
3464 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3465 		arc_c_min = zfs_arc_min;
3466 
3467 	arc_c = arc_c_max;
3468 	arc_p = (arc_c >> 1);
3469 
3470 	/* limit meta-data to 1/4 of the arc capacity */
3471 	arc_meta_limit = arc_c_max / 4;
3472 
3473 	/* Allow the tunable to override if it is reasonable */
3474 	if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3475 		arc_meta_limit = zfs_arc_meta_limit;
3476 
3477 	if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3478 		arc_c_min = arc_meta_limit / 2;
3479 
3480 	if (zfs_arc_grow_retry > 0)
3481 		arc_grow_retry = zfs_arc_grow_retry;
3482 
3483 	if (zfs_arc_shrink_shift > 0)
3484 		arc_shrink_shift = zfs_arc_shrink_shift;
3485 
3486 	if (zfs_arc_p_min_shift > 0)
3487 		arc_p_min_shift = zfs_arc_p_min_shift;
3488 
3489 	/* if kmem_flags are set, lets try to use less memory */
3490 	if (kmem_debugging())
3491 		arc_c = arc_c / 2;
3492 	if (arc_c < arc_c_min)
3493 		arc_c = arc_c_min;
3494 
3495 	arc_anon = &ARC_anon;
3496 	arc_mru = &ARC_mru;
3497 	arc_mru_ghost = &ARC_mru_ghost;
3498 	arc_mfu = &ARC_mfu;
3499 	arc_mfu_ghost = &ARC_mfu_ghost;
3500 	arc_l2c_only = &ARC_l2c_only;
3501 	arc_size = 0;
3502 
3503 	mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3504 	mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3505 	mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3506 	mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3507 	mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3508 	mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3509 
3510 	list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3511 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3512 	list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3513 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3514 	list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3515 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3516 	list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3517 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3518 	list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3519 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3520 	list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3521 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3522 	list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3523 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3524 	list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3525 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3526 	list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3527 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3528 	list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3529 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3530 
3531 	buf_init();
3532 
3533 	arc_thread_exit = 0;
3534 	arc_eviction_list = NULL;
3535 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3536 	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3537 
3538 	arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3539 	    sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3540 
3541 	if (arc_ksp != NULL) {
3542 		arc_ksp->ks_data = &arc_stats;
3543 		kstat_install(arc_ksp);
3544 	}
3545 
3546 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3547 	    TS_RUN, minclsyspri);
3548 
3549 	arc_dead = FALSE;
3550 	arc_warm = B_FALSE;
3551 
3552 	if (zfs_write_limit_max == 0)
3553 		zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
3554 	else
3555 		zfs_write_limit_shift = 0;
3556 	mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
3557 }
3558 
3559 void
3560 arc_fini(void)
3561 {
3562 	mutex_enter(&arc_reclaim_thr_lock);
3563 	arc_thread_exit = 1;
3564 	while (arc_thread_exit != 0)
3565 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3566 	mutex_exit(&arc_reclaim_thr_lock);
3567 
3568 	arc_flush(NULL);
3569 
3570 	arc_dead = TRUE;
3571 
3572 	if (arc_ksp != NULL) {
3573 		kstat_delete(arc_ksp);
3574 		arc_ksp = NULL;
3575 	}
3576 
3577 	mutex_destroy(&arc_eviction_mtx);
3578 	mutex_destroy(&arc_reclaim_thr_lock);
3579 	cv_destroy(&arc_reclaim_thr_cv);
3580 
3581 	list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3582 	list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3583 	list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3584 	list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3585 	list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3586 	list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3587 	list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3588 	list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3589 
3590 	mutex_destroy(&arc_anon->arcs_mtx);
3591 	mutex_destroy(&arc_mru->arcs_mtx);
3592 	mutex_destroy(&arc_mru_ghost->arcs_mtx);
3593 	mutex_destroy(&arc_mfu->arcs_mtx);
3594 	mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3595 	mutex_destroy(&arc_l2c_only->arcs_mtx);
3596 
3597 	mutex_destroy(&zfs_write_limit_lock);
3598 
3599 	buf_fini();
3600 }
3601 
3602 /*
3603  * Level 2 ARC
3604  *
3605  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3606  * It uses dedicated storage devices to hold cached data, which are populated
3607  * using large infrequent writes.  The main role of this cache is to boost
3608  * the performance of random read workloads.  The intended L2ARC devices
3609  * include short-stroked disks, solid state disks, and other media with
3610  * substantially faster read latency than disk.
3611  *
3612  *                 +-----------------------+
3613  *                 |         ARC           |
3614  *                 +-----------------------+
3615  *                    |         ^     ^
3616  *                    |         |     |
3617  *      l2arc_feed_thread()    arc_read()
3618  *                    |         |     |
3619  *                    |  l2arc read   |
3620  *                    V         |     |
3621  *               +---------------+    |
3622  *               |     L2ARC     |    |
3623  *               +---------------+    |
3624  *                   |    ^           |
3625  *          l2arc_write() |           |
3626  *                   |    |           |
3627  *                   V    |           |
3628  *                 +-------+      +-------+
3629  *                 | vdev  |      | vdev  |
3630  *                 | cache |      | cache |
3631  *                 +-------+      +-------+
3632  *                 +=========+     .-----.
3633  *                 :  L2ARC  :    |-_____-|
3634  *                 : devices :    | Disks |
3635  *                 +=========+    `-_____-'
3636  *
3637  * Read requests are satisfied from the following sources, in order:
3638  *
3639  *	1) ARC
3640  *	2) vdev cache of L2ARC devices
3641  *	3) L2ARC devices
3642  *	4) vdev cache of disks
3643  *	5) disks
3644  *
3645  * Some L2ARC device types exhibit extremely slow write performance.
3646  * To accommodate for this there are some significant differences between
3647  * the L2ARC and traditional cache design:
3648  *
3649  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
3650  * the ARC behave as usual, freeing buffers and placing headers on ghost
3651  * lists.  The ARC does not send buffers to the L2ARC during eviction as
3652  * this would add inflated write latencies for all ARC memory pressure.
3653  *
3654  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3655  * It does this by periodically scanning buffers from the eviction-end of
3656  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3657  * not already there.  It scans until a headroom of buffers is satisfied,
3658  * which itself is a buffer for ARC eviction.  The thread that does this is
3659  * l2arc_feed_thread(), illustrated below; example sizes are included to
3660  * provide a better sense of ratio than this diagram:
3661  *
3662  *	       head -->                        tail
3663  *	        +---------------------+----------+
3664  *	ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
3665  *	        +---------------------+----------+   |   o L2ARC eligible
3666  *	ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
3667  *	        +---------------------+----------+   |
3668  *	             15.9 Gbytes      ^ 32 Mbytes    |
3669  *	                           headroom          |
3670  *	                                      l2arc_feed_thread()
3671  *	                                             |
3672  *	                 l2arc write hand <--[oooo]--'
3673  *	                         |           8 Mbyte
3674  *	                         |          write max
3675  *	                         V
3676  *		  +==============================+
3677  *	L2ARC dev |####|#|###|###|    |####| ... |
3678  *	          +==============================+
3679  *	                     32 Gbytes
3680  *
3681  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3682  * evicted, then the L2ARC has cached a buffer much sooner than it probably
3683  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
3684  * safe to say that this is an uncommon case, since buffers at the end of
3685  * the ARC lists have moved there due to inactivity.
3686  *
3687  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3688  * then the L2ARC simply misses copying some buffers.  This serves as a
3689  * pressure valve to prevent heavy read workloads from both stalling the ARC
3690  * with waits and clogging the L2ARC with writes.  This also helps prevent
3691  * the potential for the L2ARC to churn if it attempts to cache content too
3692  * quickly, such as during backups of the entire pool.
3693  *
3694  * 5. After system boot and before the ARC has filled main memory, there are
3695  * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3696  * lists can remain mostly static.  Instead of searching from tail of these
3697  * lists as pictured, the l2arc_feed_thread() will search from the list heads
3698  * for eligible buffers, greatly increasing its chance of finding them.
3699  *
3700  * The L2ARC device write speed is also boosted during this time so that
3701  * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
3702  * there are no L2ARC reads, and no fear of degrading read performance
3703  * through increased writes.
3704  *
3705  * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3706  * the vdev queue can aggregate them into larger and fewer writes.  Each
3707  * device is written to in a rotor fashion, sweeping writes through
3708  * available space then repeating.
3709  *
3710  * 7. The L2ARC does not store dirty content.  It never needs to flush
3711  * write buffers back to disk based storage.
3712  *
3713  * 8. If an ARC buffer is written (and dirtied) which also exists in the
3714  * L2ARC, the now stale L2ARC buffer is immediately dropped.
3715  *
3716  * The performance of the L2ARC can be tweaked by a number of tunables, which
3717  * may be necessary for different workloads:
3718  *
3719  *	l2arc_write_max		max write bytes per interval
3720  *	l2arc_write_boost	extra write bytes during device warmup
3721  *	l2arc_noprefetch	skip caching prefetched buffers
3722  *	l2arc_headroom		number of max device writes to precache
3723  *	l2arc_feed_secs		seconds between L2ARC writing
3724  *
3725  * Tunables may be removed or added as future performance improvements are
3726  * integrated, and also may become zpool properties.
3727  *
3728  * There are three key functions that control how the L2ARC warms up:
3729  *
3730  *	l2arc_write_eligible()	check if a buffer is eligible to cache
3731  *	l2arc_write_size()	calculate how much to write
3732  *	l2arc_write_interval()	calculate sleep delay between writes
3733  *
3734  * These three functions determine what to write, how much, and how quickly
3735  * to send writes.
3736  */
3737 
3738 static boolean_t
3739 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
3740 {
3741 	/*
3742 	 * A buffer is *not* eligible for the L2ARC if it:
3743 	 * 1. belongs to a different spa.
3744 	 * 2. has no attached buffer.
3745 	 * 3. is already cached on the L2ARC.
3746 	 * 4. has an I/O in progress (it may be an incomplete read).
3747 	 * 5. is flagged not eligible (zfs property).
3748 	 */
3749 	if (ab->b_spa != spa_guid || ab->b_buf == NULL || ab->b_l2hdr != NULL ||
3750 	    HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
3751 		return (B_FALSE);
3752 
3753 	return (B_TRUE);
3754 }
3755 
3756 static uint64_t
3757 l2arc_write_size(l2arc_dev_t *dev)
3758 {
3759 	uint64_t size;
3760 
3761 	size = dev->l2ad_write;
3762 
3763 	if (arc_warm == B_FALSE)
3764 		size += dev->l2ad_boost;
3765 
3766 	return (size);
3767 
3768 }
3769 
3770 static clock_t
3771 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
3772 {
3773 	clock_t interval, next;
3774 
3775 	/*
3776 	 * If the ARC lists are busy, increase our write rate; if the
3777 	 * lists are stale, idle back.  This is achieved by checking
3778 	 * how much we previously wrote - if it was more than half of
3779 	 * what we wanted, schedule the next write much sooner.
3780 	 */
3781 	if (l2arc_feed_again && wrote > (wanted / 2))
3782 		interval = (hz * l2arc_feed_min_ms) / 1000;
3783 	else
3784 		interval = hz * l2arc_feed_secs;
3785 
3786 	next = MAX(lbolt, MIN(lbolt + interval, began + interval));
3787 
3788 	return (next);
3789 }
3790 
3791 static void
3792 l2arc_hdr_stat_add(void)
3793 {
3794 	ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
3795 	ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
3796 }
3797 
3798 static void
3799 l2arc_hdr_stat_remove(void)
3800 {
3801 	ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
3802 	ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
3803 }
3804 
3805 /*
3806  * Cycle through L2ARC devices.  This is how L2ARC load balances.
3807  * If a device is returned, this also returns holding the spa config lock.
3808  */
3809 static l2arc_dev_t *
3810 l2arc_dev_get_next(void)
3811 {
3812 	l2arc_dev_t *first, *next = NULL;
3813 
3814 	/*
3815 	 * Lock out the removal of spas (spa_namespace_lock), then removal
3816 	 * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
3817 	 * both locks will be dropped and a spa config lock held instead.
3818 	 */
3819 	mutex_enter(&spa_namespace_lock);
3820 	mutex_enter(&l2arc_dev_mtx);
3821 
3822 	/* if there are no vdevs, there is nothing to do */
3823 	if (l2arc_ndev == 0)
3824 		goto out;
3825 
3826 	first = NULL;
3827 	next = l2arc_dev_last;
3828 	do {
3829 		/* loop around the list looking for a non-faulted vdev */
3830 		if (next == NULL) {
3831 			next = list_head(l2arc_dev_list);
3832 		} else {
3833 			next = list_next(l2arc_dev_list, next);
3834 			if (next == NULL)
3835 				next = list_head(l2arc_dev_list);
3836 		}
3837 
3838 		/* if we have come back to the start, bail out */
3839 		if (first == NULL)
3840 			first = next;
3841 		else if (next == first)
3842 			break;
3843 
3844 	} while (vdev_is_dead(next->l2ad_vdev));
3845 
3846 	/* if we were unable to find any usable vdevs, return NULL */
3847 	if (vdev_is_dead(next->l2ad_vdev))
3848 		next = NULL;
3849 
3850 	l2arc_dev_last = next;
3851 
3852 out:
3853 	mutex_exit(&l2arc_dev_mtx);
3854 
3855 	/*
3856 	 * Grab the config lock to prevent the 'next' device from being
3857 	 * removed while we are writing to it.
3858 	 */
3859 	if (next != NULL)
3860 		spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
3861 	mutex_exit(&spa_namespace_lock);
3862 
3863 	return (next);
3864 }
3865 
3866 /*
3867  * Free buffers that were tagged for destruction.
3868  */
3869 static void
3870 l2arc_do_free_on_write()
3871 {
3872 	list_t *buflist;
3873 	l2arc_data_free_t *df, *df_prev;
3874 
3875 	mutex_enter(&l2arc_free_on_write_mtx);
3876 	buflist = l2arc_free_on_write;
3877 
3878 	for (df = list_tail(buflist); df; df = df_prev) {
3879 		df_prev = list_prev(buflist, df);
3880 		ASSERT(df->l2df_data != NULL);
3881 		ASSERT(df->l2df_func != NULL);
3882 		df->l2df_func(df->l2df_data, df->l2df_size);
3883 		list_remove(buflist, df);
3884 		kmem_free(df, sizeof (l2arc_data_free_t));
3885 	}
3886 
3887 	mutex_exit(&l2arc_free_on_write_mtx);
3888 }
3889 
3890 /*
3891  * A write to a cache device has completed.  Update all headers to allow
3892  * reads from these buffers to begin.
3893  */
3894 static void
3895 l2arc_write_done(zio_t *zio)
3896 {
3897 	l2arc_write_callback_t *cb;
3898 	l2arc_dev_t *dev;
3899 	list_t *buflist;
3900 	arc_buf_hdr_t *head, *ab, *ab_prev;
3901 	l2arc_buf_hdr_t *abl2;
3902 	kmutex_t *hash_lock;
3903 
3904 	cb = zio->io_private;
3905 	ASSERT(cb != NULL);
3906 	dev = cb->l2wcb_dev;
3907 	ASSERT(dev != NULL);
3908 	head = cb->l2wcb_head;
3909 	ASSERT(head != NULL);
3910 	buflist = dev->l2ad_buflist;
3911 	ASSERT(buflist != NULL);
3912 	DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
3913 	    l2arc_write_callback_t *, cb);
3914 
3915 	if (zio->io_error != 0)
3916 		ARCSTAT_BUMP(arcstat_l2_writes_error);
3917 
3918 	mutex_enter(&l2arc_buflist_mtx);
3919 
3920 	/*
3921 	 * All writes completed, or an error was hit.
3922 	 */
3923 	for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
3924 		ab_prev = list_prev(buflist, ab);
3925 
3926 		hash_lock = HDR_LOCK(ab);
3927 		if (!mutex_tryenter(hash_lock)) {
3928 			/*
3929 			 * This buffer misses out.  It may be in a stage
3930 			 * of eviction.  Its ARC_L2_WRITING flag will be
3931 			 * left set, denying reads to this buffer.
3932 			 */
3933 			ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
3934 			continue;
3935 		}
3936 
3937 		if (zio->io_error != 0) {
3938 			/*
3939 			 * Error - drop L2ARC entry.
3940 			 */
3941 			list_remove(buflist, ab);
3942 			abl2 = ab->b_l2hdr;
3943 			ab->b_l2hdr = NULL;
3944 			kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
3945 			ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
3946 		}
3947 
3948 		/*
3949 		 * Allow ARC to begin reads to this L2ARC entry.
3950 		 */
3951 		ab->b_flags &= ~ARC_L2_WRITING;
3952 
3953 		mutex_exit(hash_lock);
3954 	}
3955 
3956 	atomic_inc_64(&l2arc_writes_done);
3957 	list_remove(buflist, head);
3958 	kmem_cache_free(hdr_cache, head);
3959 	mutex_exit(&l2arc_buflist_mtx);
3960 
3961 	l2arc_do_free_on_write();
3962 
3963 	kmem_free(cb, sizeof (l2arc_write_callback_t));
3964 }
3965 
3966 /*
3967  * A read to a cache device completed.  Validate buffer contents before
3968  * handing over to the regular ARC routines.
3969  */
3970 static void
3971 l2arc_read_done(zio_t *zio)
3972 {
3973 	l2arc_read_callback_t *cb;
3974 	arc_buf_hdr_t *hdr;
3975 	arc_buf_t *buf;
3976 	kmutex_t *hash_lock;
3977 	int equal;
3978 
3979 	ASSERT(zio->io_vd != NULL);
3980 	ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
3981 
3982 	spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
3983 
3984 	cb = zio->io_private;
3985 	ASSERT(cb != NULL);
3986 	buf = cb->l2rcb_buf;
3987 	ASSERT(buf != NULL);
3988 	hdr = buf->b_hdr;
3989 	ASSERT(hdr != NULL);
3990 
3991 	hash_lock = HDR_LOCK(hdr);
3992 	mutex_enter(hash_lock);
3993 
3994 	/*
3995 	 * Check this survived the L2ARC journey.
3996 	 */
3997 	equal = arc_cksum_equal(buf);
3998 	if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
3999 		mutex_exit(hash_lock);
4000 		zio->io_private = buf;
4001 		zio->io_bp_copy = cb->l2rcb_bp;	/* XXX fix in L2ARC 2.0	*/
4002 		zio->io_bp = &zio->io_bp_copy;	/* XXX fix in L2ARC 2.0	*/
4003 		arc_read_done(zio);
4004 	} else {
4005 		mutex_exit(hash_lock);
4006 		/*
4007 		 * Buffer didn't survive caching.  Increment stats and
4008 		 * reissue to the original storage device.
4009 		 */
4010 		if (zio->io_error != 0) {
4011 			ARCSTAT_BUMP(arcstat_l2_io_error);
4012 		} else {
4013 			zio->io_error = EIO;
4014 		}
4015 		if (!equal)
4016 			ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4017 
4018 		/*
4019 		 * If there's no waiter, issue an async i/o to the primary
4020 		 * storage now.  If there *is* a waiter, the caller must
4021 		 * issue the i/o in a context where it's OK to block.
4022 		 */
4023 		if (zio->io_waiter == NULL) {
4024 			zio_t *pio = zio_unique_parent(zio);
4025 
4026 			ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4027 
4028 			zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4029 			    buf->b_data, zio->io_size, arc_read_done, buf,
4030 			    zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4031 		}
4032 	}
4033 
4034 	kmem_free(cb, sizeof (l2arc_read_callback_t));
4035 }
4036 
4037 /*
4038  * This is the list priority from which the L2ARC will search for pages to
4039  * cache.  This is used within loops (0..3) to cycle through lists in the
4040  * desired order.  This order can have a significant effect on cache
4041  * performance.
4042  *
4043  * Currently the metadata lists are hit first, MFU then MRU, followed by
4044  * the data lists.  This function returns a locked list, and also returns
4045  * the lock pointer.
4046  */
4047 static list_t *
4048 l2arc_list_locked(int list_num, kmutex_t **lock)
4049 {
4050 	list_t *list;
4051 
4052 	ASSERT(list_num >= 0 && list_num <= 3);
4053 
4054 	switch (list_num) {
4055 	case 0:
4056 		list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4057 		*lock = &arc_mfu->arcs_mtx;
4058 		break;
4059 	case 1:
4060 		list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4061 		*lock = &arc_mru->arcs_mtx;
4062 		break;
4063 	case 2:
4064 		list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4065 		*lock = &arc_mfu->arcs_mtx;
4066 		break;
4067 	case 3:
4068 		list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4069 		*lock = &arc_mru->arcs_mtx;
4070 		break;
4071 	}
4072 
4073 	ASSERT(!(MUTEX_HELD(*lock)));
4074 	mutex_enter(*lock);
4075 	return (list);
4076 }
4077 
4078 /*
4079  * Evict buffers from the device write hand to the distance specified in
4080  * bytes.  This distance may span populated buffers, it may span nothing.
4081  * This is clearing a region on the L2ARC device ready for writing.
4082  * If the 'all' boolean is set, every buffer is evicted.
4083  */
4084 static void
4085 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4086 {
4087 	list_t *buflist;
4088 	l2arc_buf_hdr_t *abl2;
4089 	arc_buf_hdr_t *ab, *ab_prev;
4090 	kmutex_t *hash_lock;
4091 	uint64_t taddr;
4092 
4093 	buflist = dev->l2ad_buflist;
4094 
4095 	if (buflist == NULL)
4096 		return;
4097 
4098 	if (!all && dev->l2ad_first) {
4099 		/*
4100 		 * This is the first sweep through the device.  There is
4101 		 * nothing to evict.
4102 		 */
4103 		return;
4104 	}
4105 
4106 	if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4107 		/*
4108 		 * When nearing the end of the device, evict to the end
4109 		 * before the device write hand jumps to the start.
4110 		 */
4111 		taddr = dev->l2ad_end;
4112 	} else {
4113 		taddr = dev->l2ad_hand + distance;
4114 	}
4115 	DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4116 	    uint64_t, taddr, boolean_t, all);
4117 
4118 top:
4119 	mutex_enter(&l2arc_buflist_mtx);
4120 	for (ab = list_tail(buflist); ab; ab = ab_prev) {
4121 		ab_prev = list_prev(buflist, ab);
4122 
4123 		hash_lock = HDR_LOCK(ab);
4124 		if (!mutex_tryenter(hash_lock)) {
4125 			/*
4126 			 * Missed the hash lock.  Retry.
4127 			 */
4128 			ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4129 			mutex_exit(&l2arc_buflist_mtx);
4130 			mutex_enter(hash_lock);
4131 			mutex_exit(hash_lock);
4132 			goto top;
4133 		}
4134 
4135 		if (HDR_L2_WRITE_HEAD(ab)) {
4136 			/*
4137 			 * We hit a write head node.  Leave it for
4138 			 * l2arc_write_done().
4139 			 */
4140 			list_remove(buflist, ab);
4141 			mutex_exit(hash_lock);
4142 			continue;
4143 		}
4144 
4145 		if (!all && ab->b_l2hdr != NULL &&
4146 		    (ab->b_l2hdr->b_daddr > taddr ||
4147 		    ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4148 			/*
4149 			 * We've evicted to the target address,
4150 			 * or the end of the device.
4151 			 */
4152 			mutex_exit(hash_lock);
4153 			break;
4154 		}
4155 
4156 		if (HDR_FREE_IN_PROGRESS(ab)) {
4157 			/*
4158 			 * Already on the path to destruction.
4159 			 */
4160 			mutex_exit(hash_lock);
4161 			continue;
4162 		}
4163 
4164 		if (ab->b_state == arc_l2c_only) {
4165 			ASSERT(!HDR_L2_READING(ab));
4166 			/*
4167 			 * This doesn't exist in the ARC.  Destroy.
4168 			 * arc_hdr_destroy() will call list_remove()
4169 			 * and decrement arcstat_l2_size.
4170 			 */
4171 			arc_change_state(arc_anon, ab, hash_lock);
4172 			arc_hdr_destroy(ab);
4173 		} else {
4174 			/*
4175 			 * Invalidate issued or about to be issued
4176 			 * reads, since we may be about to write
4177 			 * over this location.
4178 			 */
4179 			if (HDR_L2_READING(ab)) {
4180 				ARCSTAT_BUMP(arcstat_l2_evict_reading);
4181 				ab->b_flags |= ARC_L2_EVICTED;
4182 			}
4183 
4184 			/*
4185 			 * Tell ARC this no longer exists in L2ARC.
4186 			 */
4187 			if (ab->b_l2hdr != NULL) {
4188 				abl2 = ab->b_l2hdr;
4189 				ab->b_l2hdr = NULL;
4190 				kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4191 				ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4192 			}
4193 			list_remove(buflist, ab);
4194 
4195 			/*
4196 			 * This may have been leftover after a
4197 			 * failed write.
4198 			 */
4199 			ab->b_flags &= ~ARC_L2_WRITING;
4200 		}
4201 		mutex_exit(hash_lock);
4202 	}
4203 	mutex_exit(&l2arc_buflist_mtx);
4204 
4205 	spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict));
4206 	dev->l2ad_evict = taddr;
4207 }
4208 
4209 /*
4210  * Find and write ARC buffers to the L2ARC device.
4211  *
4212  * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4213  * for reading until they have completed writing.
4214  */
4215 static uint64_t
4216 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
4217 {
4218 	arc_buf_hdr_t *ab, *ab_prev, *head;
4219 	l2arc_buf_hdr_t *hdrl2;
4220 	list_t *list;
4221 	uint64_t passed_sz, write_sz, buf_sz, headroom;
4222 	void *buf_data;
4223 	kmutex_t *hash_lock, *list_lock;
4224 	boolean_t have_lock, full;
4225 	l2arc_write_callback_t *cb;
4226 	zio_t *pio, *wzio;
4227 	uint64_t guid = spa_guid(spa);
4228 
4229 	ASSERT(dev->l2ad_vdev != NULL);
4230 
4231 	pio = NULL;
4232 	write_sz = 0;
4233 	full = B_FALSE;
4234 	head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4235 	head->b_flags |= ARC_L2_WRITE_HEAD;
4236 
4237 	/*
4238 	 * Copy buffers for L2ARC writing.
4239 	 */
4240 	mutex_enter(&l2arc_buflist_mtx);
4241 	for (int try = 0; try <= 3; try++) {
4242 		list = l2arc_list_locked(try, &list_lock);
4243 		passed_sz = 0;
4244 
4245 		/*
4246 		 * L2ARC fast warmup.
4247 		 *
4248 		 * Until the ARC is warm and starts to evict, read from the
4249 		 * head of the ARC lists rather than the tail.
4250 		 */
4251 		headroom = target_sz * l2arc_headroom;
4252 		if (arc_warm == B_FALSE)
4253 			ab = list_head(list);
4254 		else
4255 			ab = list_tail(list);
4256 
4257 		for (; ab; ab = ab_prev) {
4258 			if (arc_warm == B_FALSE)
4259 				ab_prev = list_next(list, ab);
4260 			else
4261 				ab_prev = list_prev(list, ab);
4262 
4263 			hash_lock = HDR_LOCK(ab);
4264 			have_lock = MUTEX_HELD(hash_lock);
4265 			if (!have_lock && !mutex_tryenter(hash_lock)) {
4266 				/*
4267 				 * Skip this buffer rather than waiting.
4268 				 */
4269 				continue;
4270 			}
4271 
4272 			passed_sz += ab->b_size;
4273 			if (passed_sz > headroom) {
4274 				/*
4275 				 * Searched too far.
4276 				 */
4277 				mutex_exit(hash_lock);
4278 				break;
4279 			}
4280 
4281 			if (!l2arc_write_eligible(guid, ab)) {
4282 				mutex_exit(hash_lock);
4283 				continue;
4284 			}
4285 
4286 			if ((write_sz + ab->b_size) > target_sz) {
4287 				full = B_TRUE;
4288 				mutex_exit(hash_lock);
4289 				break;
4290 			}
4291 
4292 			if (pio == NULL) {
4293 				/*
4294 				 * Insert a dummy header on the buflist so
4295 				 * l2arc_write_done() can find where the
4296 				 * write buffers begin without searching.
4297 				 */
4298 				list_insert_head(dev->l2ad_buflist, head);
4299 
4300 				cb = kmem_alloc(
4301 				    sizeof (l2arc_write_callback_t), KM_SLEEP);
4302 				cb->l2wcb_dev = dev;
4303 				cb->l2wcb_head = head;
4304 				pio = zio_root(spa, l2arc_write_done, cb,
4305 				    ZIO_FLAG_CANFAIL);
4306 			}
4307 
4308 			/*
4309 			 * Create and add a new L2ARC header.
4310 			 */
4311 			hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4312 			hdrl2->b_dev = dev;
4313 			hdrl2->b_daddr = dev->l2ad_hand;
4314 
4315 			ab->b_flags |= ARC_L2_WRITING;
4316 			ab->b_l2hdr = hdrl2;
4317 			list_insert_head(dev->l2ad_buflist, ab);
4318 			buf_data = ab->b_buf->b_data;
4319 			buf_sz = ab->b_size;
4320 
4321 			/*
4322 			 * Compute and store the buffer cksum before
4323 			 * writing.  On debug the cksum is verified first.
4324 			 */
4325 			arc_cksum_verify(ab->b_buf);
4326 			arc_cksum_compute(ab->b_buf, B_TRUE);
4327 
4328 			mutex_exit(hash_lock);
4329 
4330 			wzio = zio_write_phys(pio, dev->l2ad_vdev,
4331 			    dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4332 			    NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4333 			    ZIO_FLAG_CANFAIL, B_FALSE);
4334 
4335 			DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4336 			    zio_t *, wzio);
4337 			(void) zio_nowait(wzio);
4338 
4339 			/*
4340 			 * Keep the clock hand suitably device-aligned.
4341 			 */
4342 			buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4343 
4344 			write_sz += buf_sz;
4345 			dev->l2ad_hand += buf_sz;
4346 		}
4347 
4348 		mutex_exit(list_lock);
4349 
4350 		if (full == B_TRUE)
4351 			break;
4352 	}
4353 	mutex_exit(&l2arc_buflist_mtx);
4354 
4355 	if (pio == NULL) {
4356 		ASSERT3U(write_sz, ==, 0);
4357 		kmem_cache_free(hdr_cache, head);
4358 		return (0);
4359 	}
4360 
4361 	ASSERT3U(write_sz, <=, target_sz);
4362 	ARCSTAT_BUMP(arcstat_l2_writes_sent);
4363 	ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
4364 	ARCSTAT_INCR(arcstat_l2_size, write_sz);
4365 	spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz);
4366 
4367 	/*
4368 	 * Bump device hand to the device start if it is approaching the end.
4369 	 * l2arc_evict() will already have evicted ahead for this case.
4370 	 */
4371 	if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4372 		spa_l2cache_space_update(dev->l2ad_vdev, 0,
4373 		    dev->l2ad_end - dev->l2ad_hand);
4374 		dev->l2ad_hand = dev->l2ad_start;
4375 		dev->l2ad_evict = dev->l2ad_start;
4376 		dev->l2ad_first = B_FALSE;
4377 	}
4378 
4379 	dev->l2ad_writing = B_TRUE;
4380 	(void) zio_wait(pio);
4381 	dev->l2ad_writing = B_FALSE;
4382 
4383 	return (write_sz);
4384 }
4385 
4386 /*
4387  * This thread feeds the L2ARC at regular intervals.  This is the beating
4388  * heart of the L2ARC.
4389  */
4390 static void
4391 l2arc_feed_thread(void)
4392 {
4393 	callb_cpr_t cpr;
4394 	l2arc_dev_t *dev;
4395 	spa_t *spa;
4396 	uint64_t size, wrote;
4397 	clock_t begin, next = lbolt;
4398 
4399 	CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4400 
4401 	mutex_enter(&l2arc_feed_thr_lock);
4402 
4403 	while (l2arc_thread_exit == 0) {
4404 		CALLB_CPR_SAFE_BEGIN(&cpr);
4405 		(void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4406 		    next);
4407 		CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4408 		next = lbolt + hz;
4409 
4410 		/*
4411 		 * Quick check for L2ARC devices.
4412 		 */
4413 		mutex_enter(&l2arc_dev_mtx);
4414 		if (l2arc_ndev == 0) {
4415 			mutex_exit(&l2arc_dev_mtx);
4416 			continue;
4417 		}
4418 		mutex_exit(&l2arc_dev_mtx);
4419 		begin = lbolt;
4420 
4421 		/*
4422 		 * This selects the next l2arc device to write to, and in
4423 		 * doing so the next spa to feed from: dev->l2ad_spa.   This
4424 		 * will return NULL if there are now no l2arc devices or if
4425 		 * they are all faulted.
4426 		 *
4427 		 * If a device is returned, its spa's config lock is also
4428 		 * held to prevent device removal.  l2arc_dev_get_next()
4429 		 * will grab and release l2arc_dev_mtx.
4430 		 */
4431 		if ((dev = l2arc_dev_get_next()) == NULL)
4432 			continue;
4433 
4434 		spa = dev->l2ad_spa;
4435 		ASSERT(spa != NULL);
4436 
4437 		/*
4438 		 * Avoid contributing to memory pressure.
4439 		 */
4440 		if (arc_reclaim_needed()) {
4441 			ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4442 			spa_config_exit(spa, SCL_L2ARC, dev);
4443 			continue;
4444 		}
4445 
4446 		ARCSTAT_BUMP(arcstat_l2_feeds);
4447 
4448 		size = l2arc_write_size(dev);
4449 
4450 		/*
4451 		 * Evict L2ARC buffers that will be overwritten.
4452 		 */
4453 		l2arc_evict(dev, size, B_FALSE);
4454 
4455 		/*
4456 		 * Write ARC buffers.
4457 		 */
4458 		wrote = l2arc_write_buffers(spa, dev, size);
4459 
4460 		/*
4461 		 * Calculate interval between writes.
4462 		 */
4463 		next = l2arc_write_interval(begin, size, wrote);
4464 		spa_config_exit(spa, SCL_L2ARC, dev);
4465 	}
4466 
4467 	l2arc_thread_exit = 0;
4468 	cv_broadcast(&l2arc_feed_thr_cv);
4469 	CALLB_CPR_EXIT(&cpr);		/* drops l2arc_feed_thr_lock */
4470 	thread_exit();
4471 }
4472 
4473 boolean_t
4474 l2arc_vdev_present(vdev_t *vd)
4475 {
4476 	l2arc_dev_t *dev;
4477 
4478 	mutex_enter(&l2arc_dev_mtx);
4479 	for (dev = list_head(l2arc_dev_list); dev != NULL;
4480 	    dev = list_next(l2arc_dev_list, dev)) {
4481 		if (dev->l2ad_vdev == vd)
4482 			break;
4483 	}
4484 	mutex_exit(&l2arc_dev_mtx);
4485 
4486 	return (dev != NULL);
4487 }
4488 
4489 /*
4490  * Add a vdev for use by the L2ARC.  By this point the spa has already
4491  * validated the vdev and opened it.
4492  */
4493 void
4494 l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end)
4495 {
4496 	l2arc_dev_t *adddev;
4497 
4498 	ASSERT(!l2arc_vdev_present(vd));
4499 
4500 	/*
4501 	 * Create a new l2arc device entry.
4502 	 */
4503 	adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4504 	adddev->l2ad_spa = spa;
4505 	adddev->l2ad_vdev = vd;
4506 	adddev->l2ad_write = l2arc_write_max;
4507 	adddev->l2ad_boost = l2arc_write_boost;
4508 	adddev->l2ad_start = start;
4509 	adddev->l2ad_end = end;
4510 	adddev->l2ad_hand = adddev->l2ad_start;
4511 	adddev->l2ad_evict = adddev->l2ad_start;
4512 	adddev->l2ad_first = B_TRUE;
4513 	adddev->l2ad_writing = B_FALSE;
4514 	ASSERT3U(adddev->l2ad_write, >, 0);
4515 
4516 	/*
4517 	 * This is a list of all ARC buffers that are still valid on the
4518 	 * device.
4519 	 */
4520 	adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4521 	list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4522 	    offsetof(arc_buf_hdr_t, b_l2node));
4523 
4524 	spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0);
4525 
4526 	/*
4527 	 * Add device to global list
4528 	 */
4529 	mutex_enter(&l2arc_dev_mtx);
4530 	list_insert_head(l2arc_dev_list, adddev);
4531 	atomic_inc_64(&l2arc_ndev);
4532 	mutex_exit(&l2arc_dev_mtx);
4533 }
4534 
4535 /*
4536  * Remove a vdev from the L2ARC.
4537  */
4538 void
4539 l2arc_remove_vdev(vdev_t *vd)
4540 {
4541 	l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4542 
4543 	/*
4544 	 * Find the device by vdev
4545 	 */
4546 	mutex_enter(&l2arc_dev_mtx);
4547 	for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4548 		nextdev = list_next(l2arc_dev_list, dev);
4549 		if (vd == dev->l2ad_vdev) {
4550 			remdev = dev;
4551 			break;
4552 		}
4553 	}
4554 	ASSERT(remdev != NULL);
4555 
4556 	/*
4557 	 * Remove device from global list
4558 	 */
4559 	list_remove(l2arc_dev_list, remdev);
4560 	l2arc_dev_last = NULL;		/* may have been invalidated */
4561 	atomic_dec_64(&l2arc_ndev);
4562 	mutex_exit(&l2arc_dev_mtx);
4563 
4564 	/*
4565 	 * Clear all buflists and ARC references.  L2ARC device flush.
4566 	 */
4567 	l2arc_evict(remdev, 0, B_TRUE);
4568 	list_destroy(remdev->l2ad_buflist);
4569 	kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4570 	kmem_free(remdev, sizeof (l2arc_dev_t));
4571 }
4572 
4573 void
4574 l2arc_init(void)
4575 {
4576 	l2arc_thread_exit = 0;
4577 	l2arc_ndev = 0;
4578 	l2arc_writes_sent = 0;
4579 	l2arc_writes_done = 0;
4580 
4581 	mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4582 	cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4583 	mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4584 	mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4585 	mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4586 
4587 	l2arc_dev_list = &L2ARC_dev_list;
4588 	l2arc_free_on_write = &L2ARC_free_on_write;
4589 	list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4590 	    offsetof(l2arc_dev_t, l2ad_node));
4591 	list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4592 	    offsetof(l2arc_data_free_t, l2df_list_node));
4593 }
4594 
4595 void
4596 l2arc_fini(void)
4597 {
4598 	/*
4599 	 * This is called from dmu_fini(), which is called from spa_fini();
4600 	 * Because of this, we can assume that all l2arc devices have
4601 	 * already been removed when the pools themselves were removed.
4602 	 */
4603 
4604 	l2arc_do_free_on_write();
4605 
4606 	mutex_destroy(&l2arc_feed_thr_lock);
4607 	cv_destroy(&l2arc_feed_thr_cv);
4608 	mutex_destroy(&l2arc_dev_mtx);
4609 	mutex_destroy(&l2arc_buflist_mtx);
4610 	mutex_destroy(&l2arc_free_on_write_mtx);
4611 
4612 	list_destroy(l2arc_dev_list);
4613 	list_destroy(l2arc_free_on_write);
4614 }
4615 
4616 void
4617 l2arc_start(void)
4618 {
4619 	if (!(spa_mode_global & FWRITE))
4620 		return;
4621 
4622 	(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4623 	    TS_RUN, minclsyspri);
4624 }
4625 
4626 void
4627 l2arc_stop(void)
4628 {
4629 	if (!(spa_mode_global & FWRITE))
4630 		return;
4631 
4632 	mutex_enter(&l2arc_feed_thr_lock);
4633 	cv_signal(&l2arc_feed_thr_cv);	/* kick thread out of startup */
4634 	l2arc_thread_exit = 1;
4635 	while (l2arc_thread_exit != 0)
4636 		cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4637 	mutex_exit(&l2arc_feed_thr_lock);
4638 }
4639