xref: /illumos-gate/usr/src/uts/common/fs/zfs/arc.c (revision 6de76ce2a90f54fecb0dba46dca08c99cef7aa08)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
24  * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
25  * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
27  */
28 
29 /*
30  * DVA-based Adjustable Replacement Cache
31  *
32  * While much of the theory of operation used here is
33  * based on the self-tuning, low overhead replacement cache
34  * presented by Megiddo and Modha at FAST 2003, there are some
35  * significant differences:
36  *
37  * 1. The Megiddo and Modha model assumes any page is evictable.
38  * Pages in its cache cannot be "locked" into memory.  This makes
39  * the eviction algorithm simple: evict the last page in the list.
40  * This also make the performance characteristics easy to reason
41  * about.  Our cache is not so simple.  At any given moment, some
42  * subset of the blocks in the cache are un-evictable because we
43  * have handed out a reference to them.  Blocks are only evictable
44  * when there are no external references active.  This makes
45  * eviction far more problematic:  we choose to evict the evictable
46  * blocks that are the "lowest" in the list.
47  *
48  * There are times when it is not possible to evict the requested
49  * space.  In these circumstances we are unable to adjust the cache
50  * size.  To prevent the cache growing unbounded at these times we
51  * implement a "cache throttle" that slows the flow of new data
52  * into the cache until we can make space available.
53  *
54  * 2. The Megiddo and Modha model assumes a fixed cache size.
55  * Pages are evicted when the cache is full and there is a cache
56  * miss.  Our model has a variable sized cache.  It grows with
57  * high use, but also tries to react to memory pressure from the
58  * operating system: decreasing its size when system memory is
59  * tight.
60  *
61  * 3. The Megiddo and Modha model assumes a fixed page size. All
62  * elements of the cache are therefore exactly the same size.  So
63  * when adjusting the cache size following a cache miss, its simply
64  * a matter of choosing a single page to evict.  In our model, we
65  * have variable sized cache blocks (rangeing from 512 bytes to
66  * 128K bytes).  We therefore choose a set of blocks to evict to make
67  * space for a cache miss that approximates as closely as possible
68  * the space used by the new block.
69  *
70  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71  * by N. Megiddo & D. Modha, FAST 2003
72  */
73 
74 /*
75  * The locking model:
76  *
77  * A new reference to a cache buffer can be obtained in two
78  * ways: 1) via a hash table lookup using the DVA as a key,
79  * or 2) via one of the ARC lists.  The arc_read() interface
80  * uses method 1, while the internal ARC algorithms for
81  * adjusting the cache use method 2.  We therefore provide two
82  * types of locks: 1) the hash table lock array, and 2) the
83  * ARC list locks.
84  *
85  * Buffers do not have their own mutexes, rather they rely on the
86  * hash table mutexes for the bulk of their protection (i.e. most
87  * fields in the arc_buf_hdr_t are protected by these mutexes).
88  *
89  * buf_hash_find() returns the appropriate mutex (held) when it
90  * locates the requested buffer in the hash table.  It returns
91  * NULL for the mutex if the buffer was not in the table.
92  *
93  * buf_hash_remove() expects the appropriate hash mutex to be
94  * already held before it is invoked.
95  *
96  * Each ARC state also has a mutex which is used to protect the
97  * buffer list associated with the state.  When attempting to
98  * obtain a hash table lock while holding an ARC list lock you
99  * must use: mutex_tryenter() to avoid deadlock.  Also note that
100  * the active state mutex must be held before the ghost state mutex.
101  *
102  * Note that the majority of the performance stats are manipulated
103  * with atomic operations.
104  *
105  * The L2ARC uses the l2ad_mtx on each vdev for the following:
106  *
107  *	- L2ARC buflist creation
108  *	- L2ARC buflist eviction
109  *	- L2ARC write completion, which walks L2ARC buflists
110  *	- ARC header destruction, as it removes from L2ARC buflists
111  *	- ARC header release, as it removes from L2ARC buflists
112  */
113 
114 /*
115  * ARC operation:
116  *
117  * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
118  * This structure can point either to a block that is still in the cache or to
119  * one that is only accessible in an L2 ARC device, or it can provide
120  * information about a block that was recently evicted. If a block is
121  * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
122  * information to retrieve it from the L2ARC device. This information is
123  * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
124  * that is in this state cannot access the data directly.
125  *
126  * Blocks that are actively being referenced or have not been evicted
127  * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
128  * the arc_buf_hdr_t that will point to the data block in memory. A block can
129  * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
130  * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
131  * also in the arc_buf_hdr_t's private physical data block pointer (b_pdata).
132  *
133  * The L1ARC's data pointer may or may not be uncompressed. The ARC has the
134  * ability to store the physical data (b_pdata) associated with the DVA of the
135  * arc_buf_hdr_t. Since the b_pdata is a copy of the on-disk physical block,
136  * it will match its on-disk compression characteristics. This behavior can be
137  * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
138  * compressed ARC functionality is disabled, the b_pdata will point to an
139  * uncompressed version of the on-disk data.
140  *
141  * Data in the L1ARC is not accessed by consumers of the ARC directly. Each
142  * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
143  * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
144  * consumer. The ARC will provide references to this data and will keep it
145  * cached until it is no longer in use. The ARC caches only the L1ARC's physical
146  * data block and will evict any arc_buf_t that is no longer referenced. The
147  * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
148  * "overhead_size" kstat.
149  *
150  * Depending on the consumer, an arc_buf_t can be requested in uncompressed or
151  * compressed form. The typical case is that consumers will want uncompressed
152  * data, and when that happens a new data buffer is allocated where the data is
153  * decompressed for them to use. Currently the only consumer who wants
154  * compressed arc_buf_t's is "zfs send", when it streams data exactly as it
155  * exists on disk. When this happens, the arc_buf_t's data buffer is shared
156  * with the arc_buf_hdr_t.
157  *
158  * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
159  * first one is owned by a compressed send consumer (and therefore references
160  * the same compressed data buffer as the arc_buf_hdr_t) and the second could be
161  * used by any other consumer (and has its own uncompressed copy of the data
162  * buffer).
163  *
164  *   arc_buf_hdr_t
165  *   +-----------+
166  *   | fields    |
167  *   | common to |
168  *   | L1- and   |
169  *   | L2ARC     |
170  *   +-----------+
171  *   | l2arc_buf_hdr_t
172  *   |           |
173  *   +-----------+
174  *   | l1arc_buf_hdr_t
175  *   |           |              arc_buf_t
176  *   | b_buf     +------------>+-----------+      arc_buf_t
177  *   | b_pdata   +-+           |b_next     +---->+-----------+
178  *   +-----------+ |           |-----------|     |b_next     +-->NULL
179  *                 |           |b_comp = T |     +-----------+
180  *                 |           |b_data     +-+   |b_comp = F |
181  *                 |           +-----------+ |   |b_data     +-+
182  *                 +->+------+               |   +-----------+ |
183  *        compressed  |      |               |                 |
184  *           data     |      |<--------------+                 | uncompressed
185  *                    +------+          compressed,            |     data
186  *                                        shared               +-->+------+
187  *                                         data                    |      |
188  *                                                                 |      |
189  *                                                                 +------+
190  *
191  * When a consumer reads a block, the ARC must first look to see if the
192  * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
193  * arc_buf_t and either copies uncompressed data into a new data buffer from an
194  * existing uncompressed arc_buf_t, decompresses the hdr's b_pdata buffer into a
195  * new data buffer, or shares the hdr's b_pdata buffer, depending on whether the
196  * hdr is compressed and the desired compression characteristics of the
197  * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
198  * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
199  * the last buffer in the hdr's b_buf list, however a shared compressed buf can
200  * be anywhere in the hdr's list.
201  *
202  * The diagram below shows an example of an uncompressed ARC hdr that is
203  * sharing its data with an arc_buf_t (note that the shared uncompressed buf is
204  * the last element in the buf list):
205  *
206  *                arc_buf_hdr_t
207  *                +-----------+
208  *                |           |
209  *                |           |
210  *                |           |
211  *                +-----------+
212  * l2arc_buf_hdr_t|           |
213  *                |           |
214  *                +-----------+
215  * l1arc_buf_hdr_t|           |
216  *                |           |                 arc_buf_t    (shared)
217  *                |    b_buf  +------------>+---------+      arc_buf_t
218  *                |           |             |b_next   +---->+---------+
219  *                |  b_pdata  +-+           |---------|     |b_next   +-->NULL
220  *                +-----------+ |           |         |     +---------+
221  *                              |           |b_data   +-+   |         |
222  *                              |           +---------+ |   |b_data   +-+
223  *                              +->+------+             |   +---------+ |
224  *                                 |      |             |               |
225  *                   uncompressed  |      |             |               |
226  *                        data     +------+             |               |
227  *                                    ^                 +->+------+     |
228  *                                    |       uncompressed |      |     |
229  *                                    |           data     |      |     |
230  *                                    |                    +------+     |
231  *                                    +---------------------------------+
232  *
233  * Writing to the ARC requires that the ARC first discard the hdr's b_pdata
234  * since the physical block is about to be rewritten. The new data contents
235  * will be contained in the arc_buf_t. As the I/O pipeline performs the write,
236  * it may compress the data before writing it to disk. The ARC will be called
237  * with the transformed data and will bcopy the transformed on-disk block into
238  * a newly allocated b_pdata. Writes are always done into buffers which have
239  * either been loaned (and hence are new and don't have other readers) or
240  * buffers which have been released (and hence have their own hdr, if there
241  * were originally other readers of the buf's original hdr). This ensures that
242  * the ARC only needs to update a single buf and its hdr after a write occurs.
243  *
244  * When the L2ARC is in use, it will also take advantage of the b_pdata. The
245  * L2ARC will always write the contents of b_pdata to the L2ARC. This means
246  * that when compressed ARC is enabled that the L2ARC blocks are identical
247  * to the on-disk block in the main data pool. This provides a significant
248  * advantage since the ARC can leverage the bp's checksum when reading from the
249  * L2ARC to determine if the contents are valid. However, if the compressed
250  * ARC is disabled, then the L2ARC's block must be transformed to look
251  * like the physical block in the main data pool before comparing the
252  * checksum and determining its validity.
253  */
254 
255 #include <sys/spa.h>
256 #include <sys/zio.h>
257 #include <sys/spa_impl.h>
258 #include <sys/zio_compress.h>
259 #include <sys/zio_checksum.h>
260 #include <sys/zfs_context.h>
261 #include <sys/arc.h>
262 #include <sys/refcount.h>
263 #include <sys/vdev.h>
264 #include <sys/vdev_impl.h>
265 #include <sys/dsl_pool.h>
266 #include <sys/multilist.h>
267 #ifdef _KERNEL
268 #include <sys/vmsystm.h>
269 #include <vm/anon.h>
270 #include <sys/fs/swapnode.h>
271 #include <sys/dnlc.h>
272 #endif
273 #include <sys/callb.h>
274 #include <sys/kstat.h>
275 #include <zfs_fletcher.h>
276 
277 #ifndef _KERNEL
278 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
279 boolean_t arc_watch = B_FALSE;
280 int arc_procfd;
281 #endif
282 
283 static kmutex_t		arc_reclaim_lock;
284 static kcondvar_t	arc_reclaim_thread_cv;
285 static boolean_t	arc_reclaim_thread_exit;
286 static kcondvar_t	arc_reclaim_waiters_cv;
287 
288 uint_t arc_reduce_dnlc_percent = 3;
289 
290 /*
291  * The number of headers to evict in arc_evict_state_impl() before
292  * dropping the sublist lock and evicting from another sublist. A lower
293  * value means we're more likely to evict the "correct" header (i.e. the
294  * oldest header in the arc state), but comes with higher overhead
295  * (i.e. more invocations of arc_evict_state_impl()).
296  */
297 int zfs_arc_evict_batch_limit = 10;
298 
299 /*
300  * The number of sublists used for each of the arc state lists. If this
301  * is not set to a suitable value by the user, it will be configured to
302  * the number of CPUs on the system in arc_init().
303  */
304 int zfs_arc_num_sublists_per_state = 0;
305 
306 /* number of seconds before growing cache again */
307 static int		arc_grow_retry = 60;
308 
309 /* shift of arc_c for calculating overflow limit in arc_get_data_buf */
310 int		zfs_arc_overflow_shift = 8;
311 
312 /* shift of arc_c for calculating both min and max arc_p */
313 static int		arc_p_min_shift = 4;
314 
315 /* log2(fraction of arc to reclaim) */
316 static int		arc_shrink_shift = 7;
317 
318 /*
319  * log2(fraction of ARC which must be free to allow growing).
320  * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
321  * when reading a new block into the ARC, we will evict an equal-sized block
322  * from the ARC.
323  *
324  * This must be less than arc_shrink_shift, so that when we shrink the ARC,
325  * we will still not allow it to grow.
326  */
327 int			arc_no_grow_shift = 5;
328 
329 
330 /*
331  * minimum lifespan of a prefetch block in clock ticks
332  * (initialized in arc_init())
333  */
334 static int		arc_min_prefetch_lifespan;
335 
336 /*
337  * If this percent of memory is free, don't throttle.
338  */
339 int arc_lotsfree_percent = 10;
340 
341 static int arc_dead;
342 
343 /*
344  * The arc has filled available memory and has now warmed up.
345  */
346 static boolean_t arc_warm;
347 
348 /*
349  * log2 fraction of the zio arena to keep free.
350  */
351 int arc_zio_arena_free_shift = 2;
352 
353 /*
354  * These tunables are for performance analysis.
355  */
356 uint64_t zfs_arc_max;
357 uint64_t zfs_arc_min;
358 uint64_t zfs_arc_meta_limit = 0;
359 uint64_t zfs_arc_meta_min = 0;
360 int zfs_arc_grow_retry = 0;
361 int zfs_arc_shrink_shift = 0;
362 int zfs_arc_p_min_shift = 0;
363 int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
364 
365 boolean_t zfs_compressed_arc_enabled = B_TRUE;
366 
367 /*
368  * Note that buffers can be in one of 6 states:
369  *	ARC_anon	- anonymous (discussed below)
370  *	ARC_mru		- recently used, currently cached
371  *	ARC_mru_ghost	- recentely used, no longer in cache
372  *	ARC_mfu		- frequently used, currently cached
373  *	ARC_mfu_ghost	- frequently used, no longer in cache
374  *	ARC_l2c_only	- exists in L2ARC but not other states
375  * When there are no active references to the buffer, they are
376  * are linked onto a list in one of these arc states.  These are
377  * the only buffers that can be evicted or deleted.  Within each
378  * state there are multiple lists, one for meta-data and one for
379  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
380  * etc.) is tracked separately so that it can be managed more
381  * explicitly: favored over data, limited explicitly.
382  *
383  * Anonymous buffers are buffers that are not associated with
384  * a DVA.  These are buffers that hold dirty block copies
385  * before they are written to stable storage.  By definition,
386  * they are "ref'd" and are considered part of arc_mru
387  * that cannot be freed.  Generally, they will aquire a DVA
388  * as they are written and migrate onto the arc_mru list.
389  *
390  * The ARC_l2c_only state is for buffers that are in the second
391  * level ARC but no longer in any of the ARC_m* lists.  The second
392  * level ARC itself may also contain buffers that are in any of
393  * the ARC_m* states - meaning that a buffer can exist in two
394  * places.  The reason for the ARC_l2c_only state is to keep the
395  * buffer header in the hash table, so that reads that hit the
396  * second level ARC benefit from these fast lookups.
397  */
398 
399 typedef struct arc_state {
400 	/*
401 	 * list of evictable buffers
402 	 */
403 	multilist_t arcs_list[ARC_BUFC_NUMTYPES];
404 	/*
405 	 * total amount of evictable data in this state
406 	 */
407 	refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
408 	/*
409 	 * total amount of data in this state; this includes: evictable,
410 	 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
411 	 */
412 	refcount_t arcs_size;
413 } arc_state_t;
414 
415 /* The 6 states: */
416 static arc_state_t ARC_anon;
417 static arc_state_t ARC_mru;
418 static arc_state_t ARC_mru_ghost;
419 static arc_state_t ARC_mfu;
420 static arc_state_t ARC_mfu_ghost;
421 static arc_state_t ARC_l2c_only;
422 
423 typedef struct arc_stats {
424 	kstat_named_t arcstat_hits;
425 	kstat_named_t arcstat_misses;
426 	kstat_named_t arcstat_demand_data_hits;
427 	kstat_named_t arcstat_demand_data_misses;
428 	kstat_named_t arcstat_demand_metadata_hits;
429 	kstat_named_t arcstat_demand_metadata_misses;
430 	kstat_named_t arcstat_prefetch_data_hits;
431 	kstat_named_t arcstat_prefetch_data_misses;
432 	kstat_named_t arcstat_prefetch_metadata_hits;
433 	kstat_named_t arcstat_prefetch_metadata_misses;
434 	kstat_named_t arcstat_mru_hits;
435 	kstat_named_t arcstat_mru_ghost_hits;
436 	kstat_named_t arcstat_mfu_hits;
437 	kstat_named_t arcstat_mfu_ghost_hits;
438 	kstat_named_t arcstat_deleted;
439 	/*
440 	 * Number of buffers that could not be evicted because the hash lock
441 	 * was held by another thread.  The lock may not necessarily be held
442 	 * by something using the same buffer, since hash locks are shared
443 	 * by multiple buffers.
444 	 */
445 	kstat_named_t arcstat_mutex_miss;
446 	/*
447 	 * Number of buffers skipped because they have I/O in progress, are
448 	 * indrect prefetch buffers that have not lived long enough, or are
449 	 * not from the spa we're trying to evict from.
450 	 */
451 	kstat_named_t arcstat_evict_skip;
452 	/*
453 	 * Number of times arc_evict_state() was unable to evict enough
454 	 * buffers to reach it's target amount.
455 	 */
456 	kstat_named_t arcstat_evict_not_enough;
457 	kstat_named_t arcstat_evict_l2_cached;
458 	kstat_named_t arcstat_evict_l2_eligible;
459 	kstat_named_t arcstat_evict_l2_ineligible;
460 	kstat_named_t arcstat_evict_l2_skip;
461 	kstat_named_t arcstat_hash_elements;
462 	kstat_named_t arcstat_hash_elements_max;
463 	kstat_named_t arcstat_hash_collisions;
464 	kstat_named_t arcstat_hash_chains;
465 	kstat_named_t arcstat_hash_chain_max;
466 	kstat_named_t arcstat_p;
467 	kstat_named_t arcstat_c;
468 	kstat_named_t arcstat_c_min;
469 	kstat_named_t arcstat_c_max;
470 	kstat_named_t arcstat_size;
471 	/*
472 	 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pdata.
473 	 * Note that the compressed bytes may match the uncompressed bytes
474 	 * if the block is either not compressed or compressed arc is disabled.
475 	 */
476 	kstat_named_t arcstat_compressed_size;
477 	/*
478 	 * Uncompressed size of the data stored in b_pdata. If compressed
479 	 * arc is disabled then this value will be identical to the stat
480 	 * above.
481 	 */
482 	kstat_named_t arcstat_uncompressed_size;
483 	/*
484 	 * Number of bytes stored in all the arc_buf_t's. This is classified
485 	 * as "overhead" since this data is typically short-lived and will
486 	 * be evicted from the arc when it becomes unreferenced unless the
487 	 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
488 	 * values have been set (see comment in dbuf.c for more information).
489 	 */
490 	kstat_named_t arcstat_overhead_size;
491 	/*
492 	 * Number of bytes consumed by internal ARC structures necessary
493 	 * for tracking purposes; these structures are not actually
494 	 * backed by ARC buffers. This includes arc_buf_hdr_t structures
495 	 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
496 	 * caches), and arc_buf_t structures (allocated via arc_buf_t
497 	 * cache).
498 	 */
499 	kstat_named_t arcstat_hdr_size;
500 	/*
501 	 * Number of bytes consumed by ARC buffers of type equal to
502 	 * ARC_BUFC_DATA. This is generally consumed by buffers backing
503 	 * on disk user data (e.g. plain file contents).
504 	 */
505 	kstat_named_t arcstat_data_size;
506 	/*
507 	 * Number of bytes consumed by ARC buffers of type equal to
508 	 * ARC_BUFC_METADATA. This is generally consumed by buffers
509 	 * backing on disk data that is used for internal ZFS
510 	 * structures (e.g. ZAP, dnode, indirect blocks, etc).
511 	 */
512 	kstat_named_t arcstat_metadata_size;
513 	/*
514 	 * Number of bytes consumed by various buffers and structures
515 	 * not actually backed with ARC buffers. This includes bonus
516 	 * buffers (allocated directly via zio_buf_* functions),
517 	 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t
518 	 * cache), and dnode_t structures (allocated via dnode_t cache).
519 	 */
520 	kstat_named_t arcstat_other_size;
521 	/*
522 	 * Total number of bytes consumed by ARC buffers residing in the
523 	 * arc_anon state. This includes *all* buffers in the arc_anon
524 	 * state; e.g. data, metadata, evictable, and unevictable buffers
525 	 * are all included in this value.
526 	 */
527 	kstat_named_t arcstat_anon_size;
528 	/*
529 	 * Number of bytes consumed by ARC buffers that meet the
530 	 * following criteria: backing buffers of type ARC_BUFC_DATA,
531 	 * residing in the arc_anon state, and are eligible for eviction
532 	 * (e.g. have no outstanding holds on the buffer).
533 	 */
534 	kstat_named_t arcstat_anon_evictable_data;
535 	/*
536 	 * Number of bytes consumed by ARC buffers that meet the
537 	 * following criteria: backing buffers of type ARC_BUFC_METADATA,
538 	 * residing in the arc_anon state, and are eligible for eviction
539 	 * (e.g. have no outstanding holds on the buffer).
540 	 */
541 	kstat_named_t arcstat_anon_evictable_metadata;
542 	/*
543 	 * Total number of bytes consumed by ARC buffers residing in the
544 	 * arc_mru state. This includes *all* buffers in the arc_mru
545 	 * state; e.g. data, metadata, evictable, and unevictable buffers
546 	 * are all included in this value.
547 	 */
548 	kstat_named_t arcstat_mru_size;
549 	/*
550 	 * Number of bytes consumed by ARC buffers that meet the
551 	 * following criteria: backing buffers of type ARC_BUFC_DATA,
552 	 * residing in the arc_mru state, and are eligible for eviction
553 	 * (e.g. have no outstanding holds on the buffer).
554 	 */
555 	kstat_named_t arcstat_mru_evictable_data;
556 	/*
557 	 * Number of bytes consumed by ARC buffers that meet the
558 	 * following criteria: backing buffers of type ARC_BUFC_METADATA,
559 	 * residing in the arc_mru state, and are eligible for eviction
560 	 * (e.g. have no outstanding holds on the buffer).
561 	 */
562 	kstat_named_t arcstat_mru_evictable_metadata;
563 	/*
564 	 * Total number of bytes that *would have been* consumed by ARC
565 	 * buffers in the arc_mru_ghost state. The key thing to note
566 	 * here, is the fact that this size doesn't actually indicate
567 	 * RAM consumption. The ghost lists only consist of headers and
568 	 * don't actually have ARC buffers linked off of these headers.
569 	 * Thus, *if* the headers had associated ARC buffers, these
570 	 * buffers *would have* consumed this number of bytes.
571 	 */
572 	kstat_named_t arcstat_mru_ghost_size;
573 	/*
574 	 * Number of bytes that *would have been* consumed by ARC
575 	 * buffers that are eligible for eviction, of type
576 	 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
577 	 */
578 	kstat_named_t arcstat_mru_ghost_evictable_data;
579 	/*
580 	 * Number of bytes that *would have been* consumed by ARC
581 	 * buffers that are eligible for eviction, of type
582 	 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
583 	 */
584 	kstat_named_t arcstat_mru_ghost_evictable_metadata;
585 	/*
586 	 * Total number of bytes consumed by ARC buffers residing in the
587 	 * arc_mfu state. This includes *all* buffers in the arc_mfu
588 	 * state; e.g. data, metadata, evictable, and unevictable buffers
589 	 * are all included in this value.
590 	 */
591 	kstat_named_t arcstat_mfu_size;
592 	/*
593 	 * Number of bytes consumed by ARC buffers that are eligible for
594 	 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
595 	 * state.
596 	 */
597 	kstat_named_t arcstat_mfu_evictable_data;
598 	/*
599 	 * Number of bytes consumed by ARC buffers that are eligible for
600 	 * eviction, of type ARC_BUFC_METADATA, and reside in the
601 	 * arc_mfu state.
602 	 */
603 	kstat_named_t arcstat_mfu_evictable_metadata;
604 	/*
605 	 * Total number of bytes that *would have been* consumed by ARC
606 	 * buffers in the arc_mfu_ghost state. See the comment above
607 	 * arcstat_mru_ghost_size for more details.
608 	 */
609 	kstat_named_t arcstat_mfu_ghost_size;
610 	/*
611 	 * Number of bytes that *would have been* consumed by ARC
612 	 * buffers that are eligible for eviction, of type
613 	 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
614 	 */
615 	kstat_named_t arcstat_mfu_ghost_evictable_data;
616 	/*
617 	 * Number of bytes that *would have been* consumed by ARC
618 	 * buffers that are eligible for eviction, of type
619 	 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
620 	 */
621 	kstat_named_t arcstat_mfu_ghost_evictable_metadata;
622 	kstat_named_t arcstat_l2_hits;
623 	kstat_named_t arcstat_l2_misses;
624 	kstat_named_t arcstat_l2_feeds;
625 	kstat_named_t arcstat_l2_rw_clash;
626 	kstat_named_t arcstat_l2_read_bytes;
627 	kstat_named_t arcstat_l2_write_bytes;
628 	kstat_named_t arcstat_l2_writes_sent;
629 	kstat_named_t arcstat_l2_writes_done;
630 	kstat_named_t arcstat_l2_writes_error;
631 	kstat_named_t arcstat_l2_writes_lock_retry;
632 	kstat_named_t arcstat_l2_evict_lock_retry;
633 	kstat_named_t arcstat_l2_evict_reading;
634 	kstat_named_t arcstat_l2_evict_l1cached;
635 	kstat_named_t arcstat_l2_free_on_write;
636 	kstat_named_t arcstat_l2_abort_lowmem;
637 	kstat_named_t arcstat_l2_cksum_bad;
638 	kstat_named_t arcstat_l2_io_error;
639 	kstat_named_t arcstat_l2_size;
640 	kstat_named_t arcstat_l2_asize;
641 	kstat_named_t arcstat_l2_hdr_size;
642 	kstat_named_t arcstat_memory_throttle_count;
643 	kstat_named_t arcstat_meta_used;
644 	kstat_named_t arcstat_meta_limit;
645 	kstat_named_t arcstat_meta_max;
646 	kstat_named_t arcstat_meta_min;
647 	kstat_named_t arcstat_sync_wait_for_async;
648 	kstat_named_t arcstat_demand_hit_predictive_prefetch;
649 } arc_stats_t;
650 
651 static arc_stats_t arc_stats = {
652 	{ "hits",			KSTAT_DATA_UINT64 },
653 	{ "misses",			KSTAT_DATA_UINT64 },
654 	{ "demand_data_hits",		KSTAT_DATA_UINT64 },
655 	{ "demand_data_misses",		KSTAT_DATA_UINT64 },
656 	{ "demand_metadata_hits",	KSTAT_DATA_UINT64 },
657 	{ "demand_metadata_misses",	KSTAT_DATA_UINT64 },
658 	{ "prefetch_data_hits",		KSTAT_DATA_UINT64 },
659 	{ "prefetch_data_misses",	KSTAT_DATA_UINT64 },
660 	{ "prefetch_metadata_hits",	KSTAT_DATA_UINT64 },
661 	{ "prefetch_metadata_misses",	KSTAT_DATA_UINT64 },
662 	{ "mru_hits",			KSTAT_DATA_UINT64 },
663 	{ "mru_ghost_hits",		KSTAT_DATA_UINT64 },
664 	{ "mfu_hits",			KSTAT_DATA_UINT64 },
665 	{ "mfu_ghost_hits",		KSTAT_DATA_UINT64 },
666 	{ "deleted",			KSTAT_DATA_UINT64 },
667 	{ "mutex_miss",			KSTAT_DATA_UINT64 },
668 	{ "evict_skip",			KSTAT_DATA_UINT64 },
669 	{ "evict_not_enough",		KSTAT_DATA_UINT64 },
670 	{ "evict_l2_cached",		KSTAT_DATA_UINT64 },
671 	{ "evict_l2_eligible",		KSTAT_DATA_UINT64 },
672 	{ "evict_l2_ineligible",	KSTAT_DATA_UINT64 },
673 	{ "evict_l2_skip",		KSTAT_DATA_UINT64 },
674 	{ "hash_elements",		KSTAT_DATA_UINT64 },
675 	{ "hash_elements_max",		KSTAT_DATA_UINT64 },
676 	{ "hash_collisions",		KSTAT_DATA_UINT64 },
677 	{ "hash_chains",		KSTAT_DATA_UINT64 },
678 	{ "hash_chain_max",		KSTAT_DATA_UINT64 },
679 	{ "p",				KSTAT_DATA_UINT64 },
680 	{ "c",				KSTAT_DATA_UINT64 },
681 	{ "c_min",			KSTAT_DATA_UINT64 },
682 	{ "c_max",			KSTAT_DATA_UINT64 },
683 	{ "size",			KSTAT_DATA_UINT64 },
684 	{ "compressed_size",		KSTAT_DATA_UINT64 },
685 	{ "uncompressed_size",		KSTAT_DATA_UINT64 },
686 	{ "overhead_size",		KSTAT_DATA_UINT64 },
687 	{ "hdr_size",			KSTAT_DATA_UINT64 },
688 	{ "data_size",			KSTAT_DATA_UINT64 },
689 	{ "metadata_size",		KSTAT_DATA_UINT64 },
690 	{ "other_size",			KSTAT_DATA_UINT64 },
691 	{ "anon_size",			KSTAT_DATA_UINT64 },
692 	{ "anon_evictable_data",	KSTAT_DATA_UINT64 },
693 	{ "anon_evictable_metadata",	KSTAT_DATA_UINT64 },
694 	{ "mru_size",			KSTAT_DATA_UINT64 },
695 	{ "mru_evictable_data",		KSTAT_DATA_UINT64 },
696 	{ "mru_evictable_metadata",	KSTAT_DATA_UINT64 },
697 	{ "mru_ghost_size",		KSTAT_DATA_UINT64 },
698 	{ "mru_ghost_evictable_data",	KSTAT_DATA_UINT64 },
699 	{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
700 	{ "mfu_size",			KSTAT_DATA_UINT64 },
701 	{ "mfu_evictable_data",		KSTAT_DATA_UINT64 },
702 	{ "mfu_evictable_metadata",	KSTAT_DATA_UINT64 },
703 	{ "mfu_ghost_size",		KSTAT_DATA_UINT64 },
704 	{ "mfu_ghost_evictable_data",	KSTAT_DATA_UINT64 },
705 	{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
706 	{ "l2_hits",			KSTAT_DATA_UINT64 },
707 	{ "l2_misses",			KSTAT_DATA_UINT64 },
708 	{ "l2_feeds",			KSTAT_DATA_UINT64 },
709 	{ "l2_rw_clash",		KSTAT_DATA_UINT64 },
710 	{ "l2_read_bytes",		KSTAT_DATA_UINT64 },
711 	{ "l2_write_bytes",		KSTAT_DATA_UINT64 },
712 	{ "l2_writes_sent",		KSTAT_DATA_UINT64 },
713 	{ "l2_writes_done",		KSTAT_DATA_UINT64 },
714 	{ "l2_writes_error",		KSTAT_DATA_UINT64 },
715 	{ "l2_writes_lock_retry",	KSTAT_DATA_UINT64 },
716 	{ "l2_evict_lock_retry",	KSTAT_DATA_UINT64 },
717 	{ "l2_evict_reading",		KSTAT_DATA_UINT64 },
718 	{ "l2_evict_l1cached",		KSTAT_DATA_UINT64 },
719 	{ "l2_free_on_write",		KSTAT_DATA_UINT64 },
720 	{ "l2_abort_lowmem",		KSTAT_DATA_UINT64 },
721 	{ "l2_cksum_bad",		KSTAT_DATA_UINT64 },
722 	{ "l2_io_error",		KSTAT_DATA_UINT64 },
723 	{ "l2_size",			KSTAT_DATA_UINT64 },
724 	{ "l2_asize",			KSTAT_DATA_UINT64 },
725 	{ "l2_hdr_size",		KSTAT_DATA_UINT64 },
726 	{ "memory_throttle_count",	KSTAT_DATA_UINT64 },
727 	{ "arc_meta_used",		KSTAT_DATA_UINT64 },
728 	{ "arc_meta_limit",		KSTAT_DATA_UINT64 },
729 	{ "arc_meta_max",		KSTAT_DATA_UINT64 },
730 	{ "arc_meta_min",		KSTAT_DATA_UINT64 },
731 	{ "sync_wait_for_async",	KSTAT_DATA_UINT64 },
732 	{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
733 };
734 
735 #define	ARCSTAT(stat)	(arc_stats.stat.value.ui64)
736 
737 #define	ARCSTAT_INCR(stat, val) \
738 	atomic_add_64(&arc_stats.stat.value.ui64, (val))
739 
740 #define	ARCSTAT_BUMP(stat)	ARCSTAT_INCR(stat, 1)
741 #define	ARCSTAT_BUMPDOWN(stat)	ARCSTAT_INCR(stat, -1)
742 
743 #define	ARCSTAT_MAX(stat, val) {					\
744 	uint64_t m;							\
745 	while ((val) > (m = arc_stats.stat.value.ui64) &&		\
746 	    (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))	\
747 		continue;						\
748 }
749 
750 #define	ARCSTAT_MAXSTAT(stat) \
751 	ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
752 
753 /*
754  * We define a macro to allow ARC hits/misses to be easily broken down by
755  * two separate conditions, giving a total of four different subtypes for
756  * each of hits and misses (so eight statistics total).
757  */
758 #define	ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
759 	if (cond1) {							\
760 		if (cond2) {						\
761 			ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
762 		} else {						\
763 			ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
764 		}							\
765 	} else {							\
766 		if (cond2) {						\
767 			ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
768 		} else {						\
769 			ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
770 		}							\
771 	}
772 
773 kstat_t			*arc_ksp;
774 static arc_state_t	*arc_anon;
775 static arc_state_t	*arc_mru;
776 static arc_state_t	*arc_mru_ghost;
777 static arc_state_t	*arc_mfu;
778 static arc_state_t	*arc_mfu_ghost;
779 static arc_state_t	*arc_l2c_only;
780 
781 /*
782  * There are several ARC variables that are critical to export as kstats --
783  * but we don't want to have to grovel around in the kstat whenever we wish to
784  * manipulate them.  For these variables, we therefore define them to be in
785  * terms of the statistic variable.  This assures that we are not introducing
786  * the possibility of inconsistency by having shadow copies of the variables,
787  * while still allowing the code to be readable.
788  */
789 #define	arc_size	ARCSTAT(arcstat_size)	/* actual total arc size */
790 #define	arc_p		ARCSTAT(arcstat_p)	/* target size of MRU */
791 #define	arc_c		ARCSTAT(arcstat_c)	/* target size of cache */
792 #define	arc_c_min	ARCSTAT(arcstat_c_min)	/* min target cache size */
793 #define	arc_c_max	ARCSTAT(arcstat_c_max)	/* max target cache size */
794 #define	arc_meta_limit	ARCSTAT(arcstat_meta_limit) /* max size for metadata */
795 #define	arc_meta_min	ARCSTAT(arcstat_meta_min) /* min size for metadata */
796 #define	arc_meta_used	ARCSTAT(arcstat_meta_used) /* size of metadata */
797 #define	arc_meta_max	ARCSTAT(arcstat_meta_max) /* max size of metadata */
798 
799 /* compressed size of entire arc */
800 #define	arc_compressed_size	ARCSTAT(arcstat_compressed_size)
801 /* uncompressed size of entire arc */
802 #define	arc_uncompressed_size	ARCSTAT(arcstat_uncompressed_size)
803 /* number of bytes in the arc from arc_buf_t's */
804 #define	arc_overhead_size	ARCSTAT(arcstat_overhead_size)
805 
806 static int		arc_no_grow;	/* Don't try to grow cache size */
807 static uint64_t		arc_tempreserve;
808 static uint64_t		arc_loaned_bytes;
809 
810 typedef struct arc_callback arc_callback_t;
811 
812 struct arc_callback {
813 	void			*acb_private;
814 	arc_done_func_t		*acb_done;
815 	arc_buf_t		*acb_buf;
816 	boolean_t		acb_compressed;
817 	zio_t			*acb_zio_dummy;
818 	arc_callback_t		*acb_next;
819 };
820 
821 typedef struct arc_write_callback arc_write_callback_t;
822 
823 struct arc_write_callback {
824 	void		*awcb_private;
825 	arc_done_func_t	*awcb_ready;
826 	arc_done_func_t	*awcb_children_ready;
827 	arc_done_func_t	*awcb_physdone;
828 	arc_done_func_t	*awcb_done;
829 	arc_buf_t	*awcb_buf;
830 };
831 
832 /*
833  * ARC buffers are separated into multiple structs as a memory saving measure:
834  *   - Common fields struct, always defined, and embedded within it:
835  *       - L2-only fields, always allocated but undefined when not in L2ARC
836  *       - L1-only fields, only allocated when in L1ARC
837  *
838  *           Buffer in L1                     Buffer only in L2
839  *    +------------------------+          +------------------------+
840  *    | arc_buf_hdr_t          |          | arc_buf_hdr_t          |
841  *    |                        |          |                        |
842  *    |                        |          |                        |
843  *    |                        |          |                        |
844  *    +------------------------+          +------------------------+
845  *    | l2arc_buf_hdr_t        |          | l2arc_buf_hdr_t        |
846  *    | (undefined if L1-only) |          |                        |
847  *    +------------------------+          +------------------------+
848  *    | l1arc_buf_hdr_t        |
849  *    |                        |
850  *    |                        |
851  *    |                        |
852  *    |                        |
853  *    +------------------------+
854  *
855  * Because it's possible for the L2ARC to become extremely large, we can wind
856  * up eating a lot of memory in L2ARC buffer headers, so the size of a header
857  * is minimized by only allocating the fields necessary for an L1-cached buffer
858  * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
859  * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
860  * words in pointers. arc_hdr_realloc() is used to switch a header between
861  * these two allocation states.
862  */
863 typedef struct l1arc_buf_hdr {
864 	kmutex_t		b_freeze_lock;
865 	zio_cksum_t		*b_freeze_cksum;
866 #ifdef ZFS_DEBUG
867 	/*
868 	 * Used for debugging with kmem_flags - by allocating and freeing
869 	 * b_thawed when the buffer is thawed, we get a record of the stack
870 	 * trace that thawed it.
871 	 */
872 	void			*b_thawed;
873 #endif
874 
875 	arc_buf_t		*b_buf;
876 	uint32_t		b_bufcnt;
877 	/* for waiting on writes to complete */
878 	kcondvar_t		b_cv;
879 	uint8_t			b_byteswap;
880 
881 	/* protected by arc state mutex */
882 	arc_state_t		*b_state;
883 	multilist_node_t	b_arc_node;
884 
885 	/* updated atomically */
886 	clock_t			b_arc_access;
887 
888 	/* self protecting */
889 	refcount_t		b_refcnt;
890 
891 	arc_callback_t		*b_acb;
892 	void			*b_pdata;
893 } l1arc_buf_hdr_t;
894 
895 typedef struct l2arc_dev l2arc_dev_t;
896 
897 typedef struct l2arc_buf_hdr {
898 	/* protected by arc_buf_hdr mutex */
899 	l2arc_dev_t		*b_dev;		/* L2ARC device */
900 	uint64_t		b_daddr;	/* disk address, offset byte */
901 
902 	list_node_t		b_l2node;
903 } l2arc_buf_hdr_t;
904 
905 struct arc_buf_hdr {
906 	/* protected by hash lock */
907 	dva_t			b_dva;
908 	uint64_t		b_birth;
909 
910 	arc_buf_contents_t	b_type;
911 	arc_buf_hdr_t		*b_hash_next;
912 	arc_flags_t		b_flags;
913 
914 	/*
915 	 * This field stores the size of the data buffer after
916 	 * compression, and is set in the arc's zio completion handlers.
917 	 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes).
918 	 *
919 	 * While the block pointers can store up to 32MB in their psize
920 	 * field, we can only store up to 32MB minus 512B. This is due
921 	 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e.
922 	 * a field of zeros represents 512B in the bp). We can't use a
923 	 * bias of 1 since we need to reserve a psize of zero, here, to
924 	 * represent holes and embedded blocks.
925 	 *
926 	 * This isn't a problem in practice, since the maximum size of a
927 	 * buffer is limited to 16MB, so we never need to store 32MB in
928 	 * this field. Even in the upstream illumos code base, the
929 	 * maximum size of a buffer is limited to 16MB.
930 	 */
931 	uint16_t		b_psize;
932 
933 	/*
934 	 * This field stores the size of the data buffer before
935 	 * compression, and cannot change once set. It is in units
936 	 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes)
937 	 */
938 	uint16_t		b_lsize;	/* immutable */
939 	uint64_t		b_spa;		/* immutable */
940 
941 	/* L2ARC fields. Undefined when not in L2ARC. */
942 	l2arc_buf_hdr_t		b_l2hdr;
943 	/* L1ARC fields. Undefined when in l2arc_only state */
944 	l1arc_buf_hdr_t		b_l1hdr;
945 };
946 
947 #define	GHOST_STATE(state)	\
948 	((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||	\
949 	(state) == arc_l2c_only)
950 
951 #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
952 #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
953 #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_FLAG_IO_ERROR)
954 #define	HDR_PREFETCH(hdr)	((hdr)->b_flags & ARC_FLAG_PREFETCH)
955 #define	HDR_COMPRESSION_ENABLED(hdr)	\
956 	((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
957 
958 #define	HDR_L2CACHE(hdr)	((hdr)->b_flags & ARC_FLAG_L2CACHE)
959 #define	HDR_L2_READING(hdr)	\
960 	(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) &&	\
961 	((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
962 #define	HDR_L2_WRITING(hdr)	((hdr)->b_flags & ARC_FLAG_L2_WRITING)
963 #define	HDR_L2_EVICTED(hdr)	((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
964 #define	HDR_L2_WRITE_HEAD(hdr)	((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
965 #define	HDR_SHARED_DATA(hdr)	((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
966 
967 #define	HDR_ISTYPE_METADATA(hdr)	\
968 	((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
969 #define	HDR_ISTYPE_DATA(hdr)	(!HDR_ISTYPE_METADATA(hdr))
970 
971 #define	HDR_HAS_L1HDR(hdr)	((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
972 #define	HDR_HAS_L2HDR(hdr)	((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
973 
974 /* For storing compression mode in b_flags */
975 #define	HDR_COMPRESS_OFFSET	(highbit64(ARC_FLAG_COMPRESS_0) - 1)
976 
977 #define	HDR_GET_COMPRESS(hdr)	((enum zio_compress)BF32_GET((hdr)->b_flags, \
978 	HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
979 #define	HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
980 	HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
981 
982 #define	ARC_BUF_LAST(buf)	((buf)->b_next == NULL)
983 #define	ARC_BUF_SHARED(buf)	((buf)->b_flags & ARC_BUF_FLAG_SHARED)
984 #define	ARC_BUF_COMPRESSED(buf)	((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
985 
986 /*
987  * Other sizes
988  */
989 
990 #define	HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
991 #define	HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
992 
993 /*
994  * Hash table routines
995  */
996 
997 #define	HT_LOCK_PAD	64
998 
999 struct ht_lock {
1000 	kmutex_t	ht_lock;
1001 #ifdef _KERNEL
1002 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
1003 #endif
1004 };
1005 
1006 #define	BUF_LOCKS 256
1007 typedef struct buf_hash_table {
1008 	uint64_t ht_mask;
1009 	arc_buf_hdr_t **ht_table;
1010 	struct ht_lock ht_locks[BUF_LOCKS];
1011 } buf_hash_table_t;
1012 
1013 static buf_hash_table_t buf_hash_table;
1014 
1015 #define	BUF_HASH_INDEX(spa, dva, birth) \
1016 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
1017 #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
1018 #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
1019 #define	HDR_LOCK(hdr) \
1020 	(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
1021 
1022 uint64_t zfs_crc64_table[256];
1023 
1024 /*
1025  * Level 2 ARC
1026  */
1027 
1028 #define	L2ARC_WRITE_SIZE	(8 * 1024 * 1024)	/* initial write max */
1029 #define	L2ARC_HEADROOM		2			/* num of writes */
1030 /*
1031  * If we discover during ARC scan any buffers to be compressed, we boost
1032  * our headroom for the next scanning cycle by this percentage multiple.
1033  */
1034 #define	L2ARC_HEADROOM_BOOST	200
1035 #define	L2ARC_FEED_SECS		1		/* caching interval secs */
1036 #define	L2ARC_FEED_MIN_MS	200		/* min caching interval ms */
1037 
1038 #define	l2arc_writes_sent	ARCSTAT(arcstat_l2_writes_sent)
1039 #define	l2arc_writes_done	ARCSTAT(arcstat_l2_writes_done)
1040 
1041 /* L2ARC Performance Tunables */
1042 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;	/* default max write size */
1043 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;	/* extra write during warmup */
1044 uint64_t l2arc_headroom = L2ARC_HEADROOM;	/* number of dev writes */
1045 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
1046 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;	/* interval seconds */
1047 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS;	/* min interval milliseconds */
1048 boolean_t l2arc_noprefetch = B_TRUE;		/* don't cache prefetch bufs */
1049 boolean_t l2arc_feed_again = B_TRUE;		/* turbo warmup */
1050 boolean_t l2arc_norw = B_TRUE;			/* no reads during writes */
1051 
1052 /*
1053  * L2ARC Internals
1054  */
1055 struct l2arc_dev {
1056 	vdev_t			*l2ad_vdev;	/* vdev */
1057 	spa_t			*l2ad_spa;	/* spa */
1058 	uint64_t		l2ad_hand;	/* next write location */
1059 	uint64_t		l2ad_start;	/* first addr on device */
1060 	uint64_t		l2ad_end;	/* last addr on device */
1061 	boolean_t		l2ad_first;	/* first sweep through */
1062 	boolean_t		l2ad_writing;	/* currently writing */
1063 	kmutex_t		l2ad_mtx;	/* lock for buffer list */
1064 	list_t			l2ad_buflist;	/* buffer list */
1065 	list_node_t		l2ad_node;	/* device list node */
1066 	refcount_t		l2ad_alloc;	/* allocated bytes */
1067 };
1068 
1069 static list_t L2ARC_dev_list;			/* device list */
1070 static list_t *l2arc_dev_list;			/* device list pointer */
1071 static kmutex_t l2arc_dev_mtx;			/* device list mutex */
1072 static l2arc_dev_t *l2arc_dev_last;		/* last device used */
1073 static list_t L2ARC_free_on_write;		/* free after write buf list */
1074 static list_t *l2arc_free_on_write;		/* free after write list ptr */
1075 static kmutex_t l2arc_free_on_write_mtx;	/* mutex for list */
1076 static uint64_t l2arc_ndev;			/* number of devices */
1077 
1078 typedef struct l2arc_read_callback {
1079 	arc_buf_hdr_t		*l2rcb_hdr;		/* read header */
1080 	blkptr_t		l2rcb_bp;		/* original blkptr */
1081 	zbookmark_phys_t	l2rcb_zb;		/* original bookmark */
1082 	int			l2rcb_flags;		/* original flags */
1083 } l2arc_read_callback_t;
1084 
1085 typedef struct l2arc_write_callback {
1086 	l2arc_dev_t	*l2wcb_dev;		/* device info */
1087 	arc_buf_hdr_t	*l2wcb_head;		/* head of write buflist */
1088 } l2arc_write_callback_t;
1089 
1090 typedef struct l2arc_data_free {
1091 	/* protected by l2arc_free_on_write_mtx */
1092 	void		*l2df_data;
1093 	size_t		l2df_size;
1094 	arc_buf_contents_t l2df_type;
1095 	list_node_t	l2df_list_node;
1096 } l2arc_data_free_t;
1097 
1098 static kmutex_t l2arc_feed_thr_lock;
1099 static kcondvar_t l2arc_feed_thr_cv;
1100 static uint8_t l2arc_thread_exit;
1101 
1102 static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
1103 static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
1104 static void arc_hdr_free_pdata(arc_buf_hdr_t *hdr);
1105 static void arc_hdr_alloc_pdata(arc_buf_hdr_t *);
1106 static void arc_access(arc_buf_hdr_t *, kmutex_t *);
1107 static boolean_t arc_is_overflowing();
1108 static void arc_buf_watch(arc_buf_t *);
1109 
1110 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
1111 static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
1112 static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
1113 static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
1114 
1115 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
1116 static void l2arc_read_done(zio_t *);
1117 
1118 static uint64_t
1119 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
1120 {
1121 	uint8_t *vdva = (uint8_t *)dva;
1122 	uint64_t crc = -1ULL;
1123 	int i;
1124 
1125 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
1126 
1127 	for (i = 0; i < sizeof (dva_t); i++)
1128 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
1129 
1130 	crc ^= (spa>>8) ^ birth;
1131 
1132 	return (crc);
1133 }
1134 
1135 #define	HDR_EMPTY(hdr)						\
1136 	((hdr)->b_dva.dva_word[0] == 0 &&			\
1137 	(hdr)->b_dva.dva_word[1] == 0)
1138 
1139 #define	HDR_EQUAL(spa, dva, birth, hdr)				\
1140 	((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
1141 	((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
1142 	((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
1143 
1144 static void
1145 buf_discard_identity(arc_buf_hdr_t *hdr)
1146 {
1147 	hdr->b_dva.dva_word[0] = 0;
1148 	hdr->b_dva.dva_word[1] = 0;
1149 	hdr->b_birth = 0;
1150 }
1151 
1152 static arc_buf_hdr_t *
1153 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
1154 {
1155 	const dva_t *dva = BP_IDENTITY(bp);
1156 	uint64_t birth = BP_PHYSICAL_BIRTH(bp);
1157 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
1158 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1159 	arc_buf_hdr_t *hdr;
1160 
1161 	mutex_enter(hash_lock);
1162 	for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
1163 	    hdr = hdr->b_hash_next) {
1164 		if (HDR_EQUAL(spa, dva, birth, hdr)) {
1165 			*lockp = hash_lock;
1166 			return (hdr);
1167 		}
1168 	}
1169 	mutex_exit(hash_lock);
1170 	*lockp = NULL;
1171 	return (NULL);
1172 }
1173 
1174 /*
1175  * Insert an entry into the hash table.  If there is already an element
1176  * equal to elem in the hash table, then the already existing element
1177  * will be returned and the new element will not be inserted.
1178  * Otherwise returns NULL.
1179  * If lockp == NULL, the caller is assumed to already hold the hash lock.
1180  */
1181 static arc_buf_hdr_t *
1182 buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
1183 {
1184 	uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1185 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1186 	arc_buf_hdr_t *fhdr;
1187 	uint32_t i;
1188 
1189 	ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
1190 	ASSERT(hdr->b_birth != 0);
1191 	ASSERT(!HDR_IN_HASH_TABLE(hdr));
1192 
1193 	if (lockp != NULL) {
1194 		*lockp = hash_lock;
1195 		mutex_enter(hash_lock);
1196 	} else {
1197 		ASSERT(MUTEX_HELD(hash_lock));
1198 	}
1199 
1200 	for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
1201 	    fhdr = fhdr->b_hash_next, i++) {
1202 		if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
1203 			return (fhdr);
1204 	}
1205 
1206 	hdr->b_hash_next = buf_hash_table.ht_table[idx];
1207 	buf_hash_table.ht_table[idx] = hdr;
1208 	arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
1209 
1210 	/* collect some hash table performance data */
1211 	if (i > 0) {
1212 		ARCSTAT_BUMP(arcstat_hash_collisions);
1213 		if (i == 1)
1214 			ARCSTAT_BUMP(arcstat_hash_chains);
1215 
1216 		ARCSTAT_MAX(arcstat_hash_chain_max, i);
1217 	}
1218 
1219 	ARCSTAT_BUMP(arcstat_hash_elements);
1220 	ARCSTAT_MAXSTAT(arcstat_hash_elements);
1221 
1222 	return (NULL);
1223 }
1224 
1225 static void
1226 buf_hash_remove(arc_buf_hdr_t *hdr)
1227 {
1228 	arc_buf_hdr_t *fhdr, **hdrp;
1229 	uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1230 
1231 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
1232 	ASSERT(HDR_IN_HASH_TABLE(hdr));
1233 
1234 	hdrp = &buf_hash_table.ht_table[idx];
1235 	while ((fhdr = *hdrp) != hdr) {
1236 		ASSERT3P(fhdr, !=, NULL);
1237 		hdrp = &fhdr->b_hash_next;
1238 	}
1239 	*hdrp = hdr->b_hash_next;
1240 	hdr->b_hash_next = NULL;
1241 	arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
1242 
1243 	/* collect some hash table performance data */
1244 	ARCSTAT_BUMPDOWN(arcstat_hash_elements);
1245 
1246 	if (buf_hash_table.ht_table[idx] &&
1247 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
1248 		ARCSTAT_BUMPDOWN(arcstat_hash_chains);
1249 }
1250 
1251 /*
1252  * Global data structures and functions for the buf kmem cache.
1253  */
1254 static kmem_cache_t *hdr_full_cache;
1255 static kmem_cache_t *hdr_l2only_cache;
1256 static kmem_cache_t *buf_cache;
1257 
1258 static void
1259 buf_fini(void)
1260 {
1261 	int i;
1262 
1263 	kmem_free(buf_hash_table.ht_table,
1264 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
1265 	for (i = 0; i < BUF_LOCKS; i++)
1266 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
1267 	kmem_cache_destroy(hdr_full_cache);
1268 	kmem_cache_destroy(hdr_l2only_cache);
1269 	kmem_cache_destroy(buf_cache);
1270 }
1271 
1272 /*
1273  * Constructor callback - called when the cache is empty
1274  * and a new buf is requested.
1275  */
1276 /* ARGSUSED */
1277 static int
1278 hdr_full_cons(void *vbuf, void *unused, int kmflag)
1279 {
1280 	arc_buf_hdr_t *hdr = vbuf;
1281 
1282 	bzero(hdr, HDR_FULL_SIZE);
1283 	cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
1284 	refcount_create(&hdr->b_l1hdr.b_refcnt);
1285 	mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
1286 	multilist_link_init(&hdr->b_l1hdr.b_arc_node);
1287 	arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1288 
1289 	return (0);
1290 }
1291 
1292 /* ARGSUSED */
1293 static int
1294 hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
1295 {
1296 	arc_buf_hdr_t *hdr = vbuf;
1297 
1298 	bzero(hdr, HDR_L2ONLY_SIZE);
1299 	arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1300 
1301 	return (0);
1302 }
1303 
1304 /* ARGSUSED */
1305 static int
1306 buf_cons(void *vbuf, void *unused, int kmflag)
1307 {
1308 	arc_buf_t *buf = vbuf;
1309 
1310 	bzero(buf, sizeof (arc_buf_t));
1311 	mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1312 	arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1313 
1314 	return (0);
1315 }
1316 
1317 /*
1318  * Destructor callback - called when a cached buf is
1319  * no longer required.
1320  */
1321 /* ARGSUSED */
1322 static void
1323 hdr_full_dest(void *vbuf, void *unused)
1324 {
1325 	arc_buf_hdr_t *hdr = vbuf;
1326 
1327 	ASSERT(HDR_EMPTY(hdr));
1328 	cv_destroy(&hdr->b_l1hdr.b_cv);
1329 	refcount_destroy(&hdr->b_l1hdr.b_refcnt);
1330 	mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
1331 	ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
1332 	arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1333 }
1334 
1335 /* ARGSUSED */
1336 static void
1337 hdr_l2only_dest(void *vbuf, void *unused)
1338 {
1339 	arc_buf_hdr_t *hdr = vbuf;
1340 
1341 	ASSERT(HDR_EMPTY(hdr));
1342 	arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1343 }
1344 
1345 /* ARGSUSED */
1346 static void
1347 buf_dest(void *vbuf, void *unused)
1348 {
1349 	arc_buf_t *buf = vbuf;
1350 
1351 	mutex_destroy(&buf->b_evict_lock);
1352 	arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1353 }
1354 
1355 /*
1356  * Reclaim callback -- invoked when memory is low.
1357  */
1358 /* ARGSUSED */
1359 static void
1360 hdr_recl(void *unused)
1361 {
1362 	dprintf("hdr_recl called\n");
1363 	/*
1364 	 * umem calls the reclaim func when we destroy the buf cache,
1365 	 * which is after we do arc_fini().
1366 	 */
1367 	if (!arc_dead)
1368 		cv_signal(&arc_reclaim_thread_cv);
1369 }
1370 
1371 static void
1372 buf_init(void)
1373 {
1374 	uint64_t *ct;
1375 	uint64_t hsize = 1ULL << 12;
1376 	int i, j;
1377 
1378 	/*
1379 	 * The hash table is big enough to fill all of physical memory
1380 	 * with an average block size of zfs_arc_average_blocksize (default 8K).
1381 	 * By default, the table will take up
1382 	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1383 	 */
1384 	while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
1385 		hsize <<= 1;
1386 retry:
1387 	buf_hash_table.ht_mask = hsize - 1;
1388 	buf_hash_table.ht_table =
1389 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1390 	if (buf_hash_table.ht_table == NULL) {
1391 		ASSERT(hsize > (1ULL << 8));
1392 		hsize >>= 1;
1393 		goto retry;
1394 	}
1395 
1396 	hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
1397 	    0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0);
1398 	hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
1399 	    HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl,
1400 	    NULL, NULL, 0);
1401 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1402 	    0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1403 
1404 	for (i = 0; i < 256; i++)
1405 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1406 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1407 
1408 	for (i = 0; i < BUF_LOCKS; i++) {
1409 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1410 		    NULL, MUTEX_DEFAULT, NULL);
1411 	}
1412 }
1413 
1414 /*
1415  * This is the size that the buf occupies in memory. If the buf is compressed,
1416  * it will correspond to the compressed size. You should use this method of
1417  * getting the buf size unless you explicitly need the logical size.
1418  */
1419 int32_t
1420 arc_buf_size(arc_buf_t *buf)
1421 {
1422 	return (ARC_BUF_COMPRESSED(buf) ?
1423 	    HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
1424 }
1425 
1426 int32_t
1427 arc_buf_lsize(arc_buf_t *buf)
1428 {
1429 	return (HDR_GET_LSIZE(buf->b_hdr));
1430 }
1431 
1432 enum zio_compress
1433 arc_get_compression(arc_buf_t *buf)
1434 {
1435 	return (ARC_BUF_COMPRESSED(buf) ?
1436 	    HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
1437 }
1438 
1439 #define	ARC_MINTIME	(hz>>4) /* 62 ms */
1440 
1441 static inline boolean_t
1442 arc_buf_is_shared(arc_buf_t *buf)
1443 {
1444 	boolean_t shared = (buf->b_data != NULL &&
1445 	    buf->b_data == buf->b_hdr->b_l1hdr.b_pdata);
1446 	IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
1447 	IMPLY(shared, ARC_BUF_SHARED(buf));
1448 	IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
1449 
1450 	/*
1451 	 * It would be nice to assert arc_can_share() too, but the "hdr isn't
1452 	 * already being shared" requirement prevents us from doing that.
1453 	 */
1454 
1455 	return (shared);
1456 }
1457 
1458 /*
1459  * Free the checksum associated with this header. If there is no checksum, this
1460  * is a no-op.
1461  */
1462 static inline void
1463 arc_cksum_free(arc_buf_hdr_t *hdr)
1464 {
1465 	ASSERT(HDR_HAS_L1HDR(hdr));
1466 	mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
1467 	if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
1468 		kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
1469 		hdr->b_l1hdr.b_freeze_cksum = NULL;
1470 	}
1471 	mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1472 }
1473 
1474 /*
1475  * Return true iff at least one of the bufs on hdr is not compressed.
1476  */
1477 static boolean_t
1478 arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
1479 {
1480 	for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
1481 		if (!ARC_BUF_COMPRESSED(b)) {
1482 			return (B_TRUE);
1483 		}
1484 	}
1485 	return (B_FALSE);
1486 }
1487 
1488 /*
1489  * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
1490  * matches the checksum that is stored in the hdr. If there is no checksum,
1491  * or if the buf is compressed, this is a no-op.
1492  */
1493 static void
1494 arc_cksum_verify(arc_buf_t *buf)
1495 {
1496 	arc_buf_hdr_t *hdr = buf->b_hdr;
1497 	zio_cksum_t zc;
1498 
1499 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1500 		return;
1501 
1502 	if (ARC_BUF_COMPRESSED(buf)) {
1503 		ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
1504 		    arc_hdr_has_uncompressed_buf(hdr));
1505 		return;
1506 	}
1507 
1508 	ASSERT(HDR_HAS_L1HDR(hdr));
1509 
1510 	mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
1511 	if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
1512 		mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1513 		return;
1514 	}
1515 
1516 	fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
1517 	if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
1518 		panic("buffer modified while frozen!");
1519 	mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1520 }
1521 
1522 static boolean_t
1523 arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
1524 {
1525 	enum zio_compress compress = BP_GET_COMPRESS(zio->io_bp);
1526 	boolean_t valid_cksum;
1527 
1528 	ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
1529 	VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
1530 
1531 	/*
1532 	 * We rely on the blkptr's checksum to determine if the block
1533 	 * is valid or not. When compressed arc is enabled, the l2arc
1534 	 * writes the block to the l2arc just as it appears in the pool.
1535 	 * This allows us to use the blkptr's checksum to validate the
1536 	 * data that we just read off of the l2arc without having to store
1537 	 * a separate checksum in the arc_buf_hdr_t. However, if compressed
1538 	 * arc is disabled, then the data written to the l2arc is always
1539 	 * uncompressed and won't match the block as it exists in the main
1540 	 * pool. When this is the case, we must first compress it if it is
1541 	 * compressed on the main pool before we can validate the checksum.
1542 	 */
1543 	if (!HDR_COMPRESSION_ENABLED(hdr) && compress != ZIO_COMPRESS_OFF) {
1544 		ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
1545 		uint64_t lsize = HDR_GET_LSIZE(hdr);
1546 		uint64_t csize;
1547 
1548 		void *cbuf = zio_buf_alloc(HDR_GET_PSIZE(hdr));
1549 		csize = zio_compress_data(compress, zio->io_data, cbuf, lsize);
1550 		ASSERT3U(csize, <=, HDR_GET_PSIZE(hdr));
1551 		if (csize < HDR_GET_PSIZE(hdr)) {
1552 			/*
1553 			 * Compressed blocks are always a multiple of the
1554 			 * smallest ashift in the pool. Ideally, we would
1555 			 * like to round up the csize to the next
1556 			 * spa_min_ashift but that value may have changed
1557 			 * since the block was last written. Instead,
1558 			 * we rely on the fact that the hdr's psize
1559 			 * was set to the psize of the block when it was
1560 			 * last written. We set the csize to that value
1561 			 * and zero out any part that should not contain
1562 			 * data.
1563 			 */
1564 			bzero((char *)cbuf + csize, HDR_GET_PSIZE(hdr) - csize);
1565 			csize = HDR_GET_PSIZE(hdr);
1566 		}
1567 		zio_push_transform(zio, cbuf, csize, HDR_GET_PSIZE(hdr), NULL);
1568 	}
1569 
1570 	/*
1571 	 * Block pointers always store the checksum for the logical data.
1572 	 * If the block pointer has the gang bit set, then the checksum
1573 	 * it represents is for the reconstituted data and not for an
1574 	 * individual gang member. The zio pipeline, however, must be able to
1575 	 * determine the checksum of each of the gang constituents so it
1576 	 * treats the checksum comparison differently than what we need
1577 	 * for l2arc blocks. This prevents us from using the
1578 	 * zio_checksum_error() interface directly. Instead we must call the
1579 	 * zio_checksum_error_impl() so that we can ensure the checksum is
1580 	 * generated using the correct checksum algorithm and accounts for the
1581 	 * logical I/O size and not just a gang fragment.
1582 	 */
1583 	valid_cksum = (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
1584 	    BP_GET_CHECKSUM(zio->io_bp), zio->io_data, zio->io_size,
1585 	    zio->io_offset, NULL) == 0);
1586 	zio_pop_transforms(zio);
1587 	return (valid_cksum);
1588 }
1589 
1590 /*
1591  * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
1592  * checksum and attaches it to the buf's hdr so that we can ensure that the buf
1593  * isn't modified later on. If buf is compressed or there is already a checksum
1594  * on the hdr, this is a no-op (we only checksum uncompressed bufs).
1595  */
1596 static void
1597 arc_cksum_compute(arc_buf_t *buf)
1598 {
1599 	arc_buf_hdr_t *hdr = buf->b_hdr;
1600 
1601 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1602 		return;
1603 
1604 	ASSERT(HDR_HAS_L1HDR(hdr));
1605 
1606 	mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1607 	if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
1608 		ASSERT(arc_hdr_has_uncompressed_buf(hdr));
1609 		mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1610 		return;
1611 	} else if (ARC_BUF_COMPRESSED(buf)) {
1612 		mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1613 		return;
1614 	}
1615 
1616 	ASSERT(!ARC_BUF_COMPRESSED(buf));
1617 	hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
1618 	    KM_SLEEP);
1619 	fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
1620 	    hdr->b_l1hdr.b_freeze_cksum);
1621 	mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1622 	arc_buf_watch(buf);
1623 }
1624 
1625 #ifndef _KERNEL
1626 typedef struct procctl {
1627 	long cmd;
1628 	prwatch_t prwatch;
1629 } procctl_t;
1630 #endif
1631 
1632 /* ARGSUSED */
1633 static void
1634 arc_buf_unwatch(arc_buf_t *buf)
1635 {
1636 #ifndef _KERNEL
1637 	if (arc_watch) {
1638 		int result;
1639 		procctl_t ctl;
1640 		ctl.cmd = PCWATCH;
1641 		ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1642 		ctl.prwatch.pr_size = 0;
1643 		ctl.prwatch.pr_wflags = 0;
1644 		result = write(arc_procfd, &ctl, sizeof (ctl));
1645 		ASSERT3U(result, ==, sizeof (ctl));
1646 	}
1647 #endif
1648 }
1649 
1650 /* ARGSUSED */
1651 static void
1652 arc_buf_watch(arc_buf_t *buf)
1653 {
1654 #ifndef _KERNEL
1655 	if (arc_watch) {
1656 		int result;
1657 		procctl_t ctl;
1658 		ctl.cmd = PCWATCH;
1659 		ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1660 		ctl.prwatch.pr_size = arc_buf_size(buf);
1661 		ctl.prwatch.pr_wflags = WA_WRITE;
1662 		result = write(arc_procfd, &ctl, sizeof (ctl));
1663 		ASSERT3U(result, ==, sizeof (ctl));
1664 	}
1665 #endif
1666 }
1667 
1668 static arc_buf_contents_t
1669 arc_buf_type(arc_buf_hdr_t *hdr)
1670 {
1671 	arc_buf_contents_t type;
1672 	if (HDR_ISTYPE_METADATA(hdr)) {
1673 		type = ARC_BUFC_METADATA;
1674 	} else {
1675 		type = ARC_BUFC_DATA;
1676 	}
1677 	VERIFY3U(hdr->b_type, ==, type);
1678 	return (type);
1679 }
1680 
1681 boolean_t
1682 arc_is_metadata(arc_buf_t *buf)
1683 {
1684 	return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
1685 }
1686 
1687 static uint32_t
1688 arc_bufc_to_flags(arc_buf_contents_t type)
1689 {
1690 	switch (type) {
1691 	case ARC_BUFC_DATA:
1692 		/* metadata field is 0 if buffer contains normal data */
1693 		return (0);
1694 	case ARC_BUFC_METADATA:
1695 		return (ARC_FLAG_BUFC_METADATA);
1696 	default:
1697 		break;
1698 	}
1699 	panic("undefined ARC buffer type!");
1700 	return ((uint32_t)-1);
1701 }
1702 
1703 void
1704 arc_buf_thaw(arc_buf_t *buf)
1705 {
1706 	arc_buf_hdr_t *hdr = buf->b_hdr;
1707 
1708 	ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
1709 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1710 
1711 	arc_cksum_verify(buf);
1712 
1713 	/*
1714 	 * Compressed buffers do not manipulate the b_freeze_cksum or
1715 	 * allocate b_thawed.
1716 	 */
1717 	if (ARC_BUF_COMPRESSED(buf)) {
1718 		ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
1719 		    arc_hdr_has_uncompressed_buf(hdr));
1720 		return;
1721 	}
1722 
1723 	ASSERT(HDR_HAS_L1HDR(hdr));
1724 	arc_cksum_free(hdr);
1725 
1726 	mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
1727 #ifdef ZFS_DEBUG
1728 	if (zfs_flags & ZFS_DEBUG_MODIFY) {
1729 		if (hdr->b_l1hdr.b_thawed != NULL)
1730 			kmem_free(hdr->b_l1hdr.b_thawed, 1);
1731 		hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP);
1732 	}
1733 #endif
1734 
1735 	mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1736 
1737 	arc_buf_unwatch(buf);
1738 }
1739 
1740 void
1741 arc_buf_freeze(arc_buf_t *buf)
1742 {
1743 	arc_buf_hdr_t *hdr = buf->b_hdr;
1744 	kmutex_t *hash_lock;
1745 
1746 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1747 		return;
1748 
1749 	if (ARC_BUF_COMPRESSED(buf)) {
1750 		ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
1751 		    arc_hdr_has_uncompressed_buf(hdr));
1752 		return;
1753 	}
1754 
1755 	hash_lock = HDR_LOCK(hdr);
1756 	mutex_enter(hash_lock);
1757 
1758 	ASSERT(HDR_HAS_L1HDR(hdr));
1759 	ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL ||
1760 	    hdr->b_l1hdr.b_state == arc_anon);
1761 	arc_cksum_compute(buf);
1762 	mutex_exit(hash_lock);
1763 }
1764 
1765 /*
1766  * The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
1767  * the following functions should be used to ensure that the flags are
1768  * updated in a thread-safe way. When manipulating the flags either
1769  * the hash_lock must be held or the hdr must be undiscoverable. This
1770  * ensures that we're not racing with any other threads when updating
1771  * the flags.
1772  */
1773 static inline void
1774 arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
1775 {
1776 	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
1777 	hdr->b_flags |= flags;
1778 }
1779 
1780 static inline void
1781 arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
1782 {
1783 	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
1784 	hdr->b_flags &= ~flags;
1785 }
1786 
1787 /*
1788  * Setting the compression bits in the arc_buf_hdr_t's b_flags is
1789  * done in a special way since we have to clear and set bits
1790  * at the same time. Consumers that wish to set the compression bits
1791  * must use this function to ensure that the flags are updated in
1792  * thread-safe manner.
1793  */
1794 static void
1795 arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
1796 {
1797 	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
1798 
1799 	/*
1800 	 * Holes and embedded blocks will always have a psize = 0 so
1801 	 * we ignore the compression of the blkptr and set the
1802 	 * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF.
1803 	 * Holes and embedded blocks remain anonymous so we don't
1804 	 * want to uncompress them. Mark them as uncompressed.
1805 	 */
1806 	if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
1807 		arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
1808 		HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
1809 		ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
1810 		ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
1811 	} else {
1812 		arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
1813 		HDR_SET_COMPRESS(hdr, cmp);
1814 		ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
1815 		ASSERT(HDR_COMPRESSION_ENABLED(hdr));
1816 	}
1817 }
1818 
1819 /*
1820  * Looks for another buf on the same hdr which has the data decompressed, copies
1821  * from it, and returns true. If no such buf exists, returns false.
1822  */
1823 static boolean_t
1824 arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
1825 {
1826 	arc_buf_hdr_t *hdr = buf->b_hdr;
1827 	boolean_t copied = B_FALSE;
1828 
1829 	ASSERT(HDR_HAS_L1HDR(hdr));
1830 	ASSERT3P(buf->b_data, !=, NULL);
1831 	ASSERT(!ARC_BUF_COMPRESSED(buf));
1832 
1833 	for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
1834 	    from = from->b_next) {
1835 		/* can't use our own data buffer */
1836 		if (from == buf) {
1837 			continue;
1838 		}
1839 
1840 		if (!ARC_BUF_COMPRESSED(from)) {
1841 			bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
1842 			copied = B_TRUE;
1843 			break;
1844 		}
1845 	}
1846 
1847 	/*
1848 	 * There were no decompressed bufs, so there should not be a
1849 	 * checksum on the hdr either.
1850 	 */
1851 	EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
1852 
1853 	return (copied);
1854 }
1855 
1856 /*
1857  * Given a buf that has a data buffer attached to it, this function will
1858  * efficiently fill the buf with data of the specified compression setting from
1859  * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
1860  * are already sharing a data buf, no copy is performed.
1861  *
1862  * If the buf is marked as compressed but uncompressed data was requested, this
1863  * will allocate a new data buffer for the buf, remove that flag, and fill the
1864  * buf with uncompressed data. You can't request a compressed buf on a hdr with
1865  * uncompressed data, and (since we haven't added support for it yet) if you
1866  * want compressed data your buf must already be marked as compressed and have
1867  * the correct-sized data buffer.
1868  */
1869 static int
1870 arc_buf_fill(arc_buf_t *buf, boolean_t compressed)
1871 {
1872 	arc_buf_hdr_t *hdr = buf->b_hdr;
1873 	boolean_t hdr_compressed = (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
1874 	dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
1875 
1876 	ASSERT3P(buf->b_data, !=, NULL);
1877 	IMPLY(compressed, hdr_compressed);
1878 	IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
1879 
1880 	if (hdr_compressed == compressed) {
1881 		if (!arc_buf_is_shared(buf)) {
1882 			bcopy(hdr->b_l1hdr.b_pdata, buf->b_data,
1883 			    arc_buf_size(buf));
1884 		}
1885 	} else {
1886 		ASSERT(hdr_compressed);
1887 		ASSERT(!compressed);
1888 		ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr));
1889 
1890 		/*
1891 		 * If the buf is sharing its data with the hdr, unlink it and
1892 		 * allocate a new data buffer for the buf.
1893 		 */
1894 		if (arc_buf_is_shared(buf)) {
1895 			ASSERT(ARC_BUF_COMPRESSED(buf));
1896 
1897 			/* We need to give the buf it's own b_data */
1898 			buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
1899 			buf->b_data =
1900 			    arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
1901 			arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
1902 
1903 			/* Previously overhead was 0; just add new overhead */
1904 			ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
1905 		} else if (ARC_BUF_COMPRESSED(buf)) {
1906 			/* We need to reallocate the buf's b_data */
1907 			arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
1908 			    buf);
1909 			buf->b_data =
1910 			    arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
1911 
1912 			/* We increased the size of b_data; update overhead */
1913 			ARCSTAT_INCR(arcstat_overhead_size,
1914 			    HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
1915 		}
1916 
1917 		/*
1918 		 * Regardless of the buf's previous compression settings, it
1919 		 * should not be compressed at the end of this function.
1920 		 */
1921 		buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
1922 
1923 		/*
1924 		 * Try copying the data from another buf which already has a
1925 		 * decompressed version. If that's not possible, it's time to
1926 		 * bite the bullet and decompress the data from the hdr.
1927 		 */
1928 		if (arc_buf_try_copy_decompressed_data(buf)) {
1929 			/* Skip byteswapping and checksumming (already done) */
1930 			ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, !=, NULL);
1931 			return (0);
1932 		} else {
1933 			int error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
1934 			    hdr->b_l1hdr.b_pdata, buf->b_data,
1935 			    HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
1936 
1937 			/*
1938 			 * Absent hardware errors or software bugs, this should
1939 			 * be impossible, but log it anyway so we can debug it.
1940 			 */
1941 			if (error != 0) {
1942 				zfs_dbgmsg(
1943 				    "hdr %p, compress %d, psize %d, lsize %d",
1944 				    hdr, HDR_GET_COMPRESS(hdr),
1945 				    HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
1946 				return (SET_ERROR(EIO));
1947 			}
1948 		}
1949 	}
1950 
1951 	/* Byteswap the buf's data if necessary */
1952 	if (bswap != DMU_BSWAP_NUMFUNCS) {
1953 		ASSERT(!HDR_SHARED_DATA(hdr));
1954 		ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
1955 		dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
1956 	}
1957 
1958 	/* Compute the hdr's checksum if necessary */
1959 	arc_cksum_compute(buf);
1960 
1961 	return (0);
1962 }
1963 
1964 int
1965 arc_decompress(arc_buf_t *buf)
1966 {
1967 	return (arc_buf_fill(buf, B_FALSE));
1968 }
1969 
1970 /*
1971  * Return the size of the block, b_pdata, that is stored in the arc_buf_hdr_t.
1972  */
1973 static uint64_t
1974 arc_hdr_size(arc_buf_hdr_t *hdr)
1975 {
1976 	uint64_t size;
1977 
1978 	if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
1979 	    HDR_GET_PSIZE(hdr) > 0) {
1980 		size = HDR_GET_PSIZE(hdr);
1981 	} else {
1982 		ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
1983 		size = HDR_GET_LSIZE(hdr);
1984 	}
1985 	return (size);
1986 }
1987 
1988 /*
1989  * Increment the amount of evictable space in the arc_state_t's refcount.
1990  * We account for the space used by the hdr and the arc buf individually
1991  * so that we can add and remove them from the refcount individually.
1992  */
1993 static void
1994 arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
1995 {
1996 	arc_buf_contents_t type = arc_buf_type(hdr);
1997 
1998 	ASSERT(HDR_HAS_L1HDR(hdr));
1999 
2000 	if (GHOST_STATE(state)) {
2001 		ASSERT0(hdr->b_l1hdr.b_bufcnt);
2002 		ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2003 		ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
2004 		(void) refcount_add_many(&state->arcs_esize[type],
2005 		    HDR_GET_LSIZE(hdr), hdr);
2006 		return;
2007 	}
2008 
2009 	ASSERT(!GHOST_STATE(state));
2010 	if (hdr->b_l1hdr.b_pdata != NULL) {
2011 		(void) refcount_add_many(&state->arcs_esize[type],
2012 		    arc_hdr_size(hdr), hdr);
2013 	}
2014 	for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2015 	    buf = buf->b_next) {
2016 		if (arc_buf_is_shared(buf))
2017 			continue;
2018 		(void) refcount_add_many(&state->arcs_esize[type],
2019 		    arc_buf_size(buf), buf);
2020 	}
2021 }
2022 
2023 /*
2024  * Decrement the amount of evictable space in the arc_state_t's refcount.
2025  * We account for the space used by the hdr and the arc buf individually
2026  * so that we can add and remove them from the refcount individually.
2027  */
2028 static void
2029 arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
2030 {
2031 	arc_buf_contents_t type = arc_buf_type(hdr);
2032 
2033 	ASSERT(HDR_HAS_L1HDR(hdr));
2034 
2035 	if (GHOST_STATE(state)) {
2036 		ASSERT0(hdr->b_l1hdr.b_bufcnt);
2037 		ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2038 		ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
2039 		(void) refcount_remove_many(&state->arcs_esize[type],
2040 		    HDR_GET_LSIZE(hdr), hdr);
2041 		return;
2042 	}
2043 
2044 	ASSERT(!GHOST_STATE(state));
2045 	if (hdr->b_l1hdr.b_pdata != NULL) {
2046 		(void) refcount_remove_many(&state->arcs_esize[type],
2047 		    arc_hdr_size(hdr), hdr);
2048 	}
2049 	for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2050 	    buf = buf->b_next) {
2051 		if (arc_buf_is_shared(buf))
2052 			continue;
2053 		(void) refcount_remove_many(&state->arcs_esize[type],
2054 		    arc_buf_size(buf), buf);
2055 	}
2056 }
2057 
2058 /*
2059  * Add a reference to this hdr indicating that someone is actively
2060  * referencing that memory. When the refcount transitions from 0 to 1,
2061  * we remove it from the respective arc_state_t list to indicate that
2062  * it is not evictable.
2063  */
2064 static void
2065 add_reference(arc_buf_hdr_t *hdr, void *tag)
2066 {
2067 	ASSERT(HDR_HAS_L1HDR(hdr));
2068 	if (!MUTEX_HELD(HDR_LOCK(hdr))) {
2069 		ASSERT(hdr->b_l1hdr.b_state == arc_anon);
2070 		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2071 		ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2072 	}
2073 
2074 	arc_state_t *state = hdr->b_l1hdr.b_state;
2075 
2076 	if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
2077 	    (state != arc_anon)) {
2078 		/* We don't use the L2-only state list. */
2079 		if (state != arc_l2c_only) {
2080 			multilist_remove(&state->arcs_list[arc_buf_type(hdr)],
2081 			    hdr);
2082 			arc_evictable_space_decrement(hdr, state);
2083 		}
2084 		/* remove the prefetch flag if we get a reference */
2085 		arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
2086 	}
2087 }
2088 
2089 /*
2090  * Remove a reference from this hdr. When the reference transitions from
2091  * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
2092  * list making it eligible for eviction.
2093  */
2094 static int
2095 remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
2096 {
2097 	int cnt;
2098 	arc_state_t *state = hdr->b_l1hdr.b_state;
2099 
2100 	ASSERT(HDR_HAS_L1HDR(hdr));
2101 	ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
2102 	ASSERT(!GHOST_STATE(state));
2103 
2104 	/*
2105 	 * arc_l2c_only counts as a ghost state so we don't need to explicitly
2106 	 * check to prevent usage of the arc_l2c_only list.
2107 	 */
2108 	if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
2109 	    (state != arc_anon)) {
2110 		multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr);
2111 		ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
2112 		arc_evictable_space_increment(hdr, state);
2113 	}
2114 	return (cnt);
2115 }
2116 
2117 /*
2118  * Move the supplied buffer to the indicated state. The hash lock
2119  * for the buffer must be held by the caller.
2120  */
2121 static void
2122 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
2123     kmutex_t *hash_lock)
2124 {
2125 	arc_state_t *old_state;
2126 	int64_t refcnt;
2127 	uint32_t bufcnt;
2128 	boolean_t update_old, update_new;
2129 	arc_buf_contents_t buftype = arc_buf_type(hdr);
2130 
2131 	/*
2132 	 * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
2133 	 * in arc_read() when bringing a buffer out of the L2ARC.  However, the
2134 	 * L1 hdr doesn't always exist when we change state to arc_anon before
2135 	 * destroying a header, in which case reallocating to add the L1 hdr is
2136 	 * pointless.
2137 	 */
2138 	if (HDR_HAS_L1HDR(hdr)) {
2139 		old_state = hdr->b_l1hdr.b_state;
2140 		refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
2141 		bufcnt = hdr->b_l1hdr.b_bufcnt;
2142 		update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pdata != NULL);
2143 	} else {
2144 		old_state = arc_l2c_only;
2145 		refcnt = 0;
2146 		bufcnt = 0;
2147 		update_old = B_FALSE;
2148 	}
2149 	update_new = update_old;
2150 
2151 	ASSERT(MUTEX_HELD(hash_lock));
2152 	ASSERT3P(new_state, !=, old_state);
2153 	ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
2154 	ASSERT(old_state != arc_anon || bufcnt <= 1);
2155 
2156 	/*
2157 	 * If this buffer is evictable, transfer it from the
2158 	 * old state list to the new state list.
2159 	 */
2160 	if (refcnt == 0) {
2161 		if (old_state != arc_anon && old_state != arc_l2c_only) {
2162 			ASSERT(HDR_HAS_L1HDR(hdr));
2163 			multilist_remove(&old_state->arcs_list[buftype], hdr);
2164 
2165 			if (GHOST_STATE(old_state)) {
2166 				ASSERT0(bufcnt);
2167 				ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2168 				update_old = B_TRUE;
2169 			}
2170 			arc_evictable_space_decrement(hdr, old_state);
2171 		}
2172 		if (new_state != arc_anon && new_state != arc_l2c_only) {
2173 
2174 			/*
2175 			 * An L1 header always exists here, since if we're
2176 			 * moving to some L1-cached state (i.e. not l2c_only or
2177 			 * anonymous), we realloc the header to add an L1hdr
2178 			 * beforehand.
2179 			 */
2180 			ASSERT(HDR_HAS_L1HDR(hdr));
2181 			multilist_insert(&new_state->arcs_list[buftype], hdr);
2182 
2183 			if (GHOST_STATE(new_state)) {
2184 				ASSERT0(bufcnt);
2185 				ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2186 				update_new = B_TRUE;
2187 			}
2188 			arc_evictable_space_increment(hdr, new_state);
2189 		}
2190 	}
2191 
2192 	ASSERT(!HDR_EMPTY(hdr));
2193 	if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
2194 		buf_hash_remove(hdr);
2195 
2196 	/* adjust state sizes (ignore arc_l2c_only) */
2197 
2198 	if (update_new && new_state != arc_l2c_only) {
2199 		ASSERT(HDR_HAS_L1HDR(hdr));
2200 		if (GHOST_STATE(new_state)) {
2201 			ASSERT0(bufcnt);
2202 
2203 			/*
2204 			 * When moving a header to a ghost state, we first
2205 			 * remove all arc buffers. Thus, we'll have a
2206 			 * bufcnt of zero, and no arc buffer to use for
2207 			 * the reference. As a result, we use the arc
2208 			 * header pointer for the reference.
2209 			 */
2210 			(void) refcount_add_many(&new_state->arcs_size,
2211 			    HDR_GET_LSIZE(hdr), hdr);
2212 			ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
2213 		} else {
2214 			uint32_t buffers = 0;
2215 
2216 			/*
2217 			 * Each individual buffer holds a unique reference,
2218 			 * thus we must remove each of these references one
2219 			 * at a time.
2220 			 */
2221 			for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2222 			    buf = buf->b_next) {
2223 				ASSERT3U(bufcnt, !=, 0);
2224 				buffers++;
2225 
2226 				/*
2227 				 * When the arc_buf_t is sharing the data
2228 				 * block with the hdr, the owner of the
2229 				 * reference belongs to the hdr. Only
2230 				 * add to the refcount if the arc_buf_t is
2231 				 * not shared.
2232 				 */
2233 				if (arc_buf_is_shared(buf))
2234 					continue;
2235 
2236 				(void) refcount_add_many(&new_state->arcs_size,
2237 				    arc_buf_size(buf), buf);
2238 			}
2239 			ASSERT3U(bufcnt, ==, buffers);
2240 
2241 			if (hdr->b_l1hdr.b_pdata != NULL) {
2242 				(void) refcount_add_many(&new_state->arcs_size,
2243 				    arc_hdr_size(hdr), hdr);
2244 			} else {
2245 				ASSERT(GHOST_STATE(old_state));
2246 			}
2247 		}
2248 	}
2249 
2250 	if (update_old && old_state != arc_l2c_only) {
2251 		ASSERT(HDR_HAS_L1HDR(hdr));
2252 		if (GHOST_STATE(old_state)) {
2253 			ASSERT0(bufcnt);
2254 			ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
2255 
2256 			/*
2257 			 * When moving a header off of a ghost state,
2258 			 * the header will not contain any arc buffers.
2259 			 * We use the arc header pointer for the reference
2260 			 * which is exactly what we did when we put the
2261 			 * header on the ghost state.
2262 			 */
2263 
2264 			(void) refcount_remove_many(&old_state->arcs_size,
2265 			    HDR_GET_LSIZE(hdr), hdr);
2266 		} else {
2267 			uint32_t buffers = 0;
2268 
2269 			/*
2270 			 * Each individual buffer holds a unique reference,
2271 			 * thus we must remove each of these references one
2272 			 * at a time.
2273 			 */
2274 			for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2275 			    buf = buf->b_next) {
2276 				ASSERT3U(bufcnt, !=, 0);
2277 				buffers++;
2278 
2279 				/*
2280 				 * When the arc_buf_t is sharing the data
2281 				 * block with the hdr, the owner of the
2282 				 * reference belongs to the hdr. Only
2283 				 * add to the refcount if the arc_buf_t is
2284 				 * not shared.
2285 				 */
2286 				if (arc_buf_is_shared(buf))
2287 					continue;
2288 
2289 				(void) refcount_remove_many(
2290 				    &old_state->arcs_size, arc_buf_size(buf),
2291 				    buf);
2292 			}
2293 			ASSERT3U(bufcnt, ==, buffers);
2294 			ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
2295 			(void) refcount_remove_many(
2296 			    &old_state->arcs_size, arc_hdr_size(hdr), hdr);
2297 		}
2298 	}
2299 
2300 	if (HDR_HAS_L1HDR(hdr))
2301 		hdr->b_l1hdr.b_state = new_state;
2302 
2303 	/*
2304 	 * L2 headers should never be on the L2 state list since they don't
2305 	 * have L1 headers allocated.
2306 	 */
2307 	ASSERT(multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
2308 	    multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
2309 }
2310 
2311 void
2312 arc_space_consume(uint64_t space, arc_space_type_t type)
2313 {
2314 	ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
2315 
2316 	switch (type) {
2317 	case ARC_SPACE_DATA:
2318 		ARCSTAT_INCR(arcstat_data_size, space);
2319 		break;
2320 	case ARC_SPACE_META:
2321 		ARCSTAT_INCR(arcstat_metadata_size, space);
2322 		break;
2323 	case ARC_SPACE_OTHER:
2324 		ARCSTAT_INCR(arcstat_other_size, space);
2325 		break;
2326 	case ARC_SPACE_HDRS:
2327 		ARCSTAT_INCR(arcstat_hdr_size, space);
2328 		break;
2329 	case ARC_SPACE_L2HDRS:
2330 		ARCSTAT_INCR(arcstat_l2_hdr_size, space);
2331 		break;
2332 	}
2333 
2334 	if (type != ARC_SPACE_DATA)
2335 		ARCSTAT_INCR(arcstat_meta_used, space);
2336 
2337 	atomic_add_64(&arc_size, space);
2338 }
2339 
2340 void
2341 arc_space_return(uint64_t space, arc_space_type_t type)
2342 {
2343 	ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
2344 
2345 	switch (type) {
2346 	case ARC_SPACE_DATA:
2347 		ARCSTAT_INCR(arcstat_data_size, -space);
2348 		break;
2349 	case ARC_SPACE_META:
2350 		ARCSTAT_INCR(arcstat_metadata_size, -space);
2351 		break;
2352 	case ARC_SPACE_OTHER:
2353 		ARCSTAT_INCR(arcstat_other_size, -space);
2354 		break;
2355 	case ARC_SPACE_HDRS:
2356 		ARCSTAT_INCR(arcstat_hdr_size, -space);
2357 		break;
2358 	case ARC_SPACE_L2HDRS:
2359 		ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
2360 		break;
2361 	}
2362 
2363 	if (type != ARC_SPACE_DATA) {
2364 		ASSERT(arc_meta_used >= space);
2365 		if (arc_meta_max < arc_meta_used)
2366 			arc_meta_max = arc_meta_used;
2367 		ARCSTAT_INCR(arcstat_meta_used, -space);
2368 	}
2369 
2370 	ASSERT(arc_size >= space);
2371 	atomic_add_64(&arc_size, -space);
2372 }
2373 
2374 /*
2375  * Given a hdr and a buf, returns whether that buf can share its b_data buffer
2376  * with the hdr's b_pdata.
2377  */
2378 static boolean_t
2379 arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
2380 {
2381 	/*
2382 	 * The criteria for sharing a hdr's data are:
2383 	 * 1. the hdr's compression matches the buf's compression
2384 	 * 2. the hdr doesn't need to be byteswapped
2385 	 * 3. the hdr isn't already being shared
2386 	 * 4. the buf is either compressed or it is the last buf in the hdr list
2387 	 *
2388 	 * Criterion #4 maintains the invariant that shared uncompressed
2389 	 * bufs must be the final buf in the hdr's b_buf list. Reading this, you
2390 	 * might ask, "if a compressed buf is allocated first, won't that be the
2391 	 * last thing in the list?", but in that case it's impossible to create
2392 	 * a shared uncompressed buf anyway (because the hdr must be compressed
2393 	 * to have the compressed buf). You might also think that #3 is
2394 	 * sufficient to make this guarantee, however it's possible
2395 	 * (specifically in the rare L2ARC write race mentioned in
2396 	 * arc_buf_alloc_impl()) there will be an existing uncompressed buf that
2397 	 * is sharable, but wasn't at the time of its allocation. Rather than
2398 	 * allow a new shared uncompressed buf to be created and then shuffle
2399 	 * the list around to make it the last element, this simply disallows
2400 	 * sharing if the new buf isn't the first to be added.
2401 	 */
2402 	ASSERT3P(buf->b_hdr, ==, hdr);
2403 	boolean_t hdr_compressed = HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF;
2404 	boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
2405 	return (buf_compressed == hdr_compressed &&
2406 	    hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
2407 	    !HDR_SHARED_DATA(hdr) &&
2408 	    (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
2409 }
2410 
2411 /*
2412  * Allocate a buf for this hdr. If you care about the data that's in the hdr,
2413  * or if you want a compressed buffer, pass those flags in. Returns 0 if the
2414  * copy was made successfully, or an error code otherwise.
2415  */
2416 static int
2417 arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed,
2418     boolean_t fill, arc_buf_t **ret)
2419 {
2420 	arc_buf_t *buf;
2421 
2422 	ASSERT(HDR_HAS_L1HDR(hdr));
2423 	ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
2424 	VERIFY(hdr->b_type == ARC_BUFC_DATA ||
2425 	    hdr->b_type == ARC_BUFC_METADATA);
2426 	ASSERT3P(ret, !=, NULL);
2427 	ASSERT3P(*ret, ==, NULL);
2428 
2429 	buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2430 	buf->b_hdr = hdr;
2431 	buf->b_data = NULL;
2432 	buf->b_next = hdr->b_l1hdr.b_buf;
2433 	buf->b_flags = 0;
2434 
2435 	add_reference(hdr, tag);
2436 
2437 	/*
2438 	 * We're about to change the hdr's b_flags. We must either
2439 	 * hold the hash_lock or be undiscoverable.
2440 	 */
2441 	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2442 
2443 	/*
2444 	 * Only honor requests for compressed bufs if the hdr is actually
2445 	 * compressed.
2446 	 */
2447 	if (compressed && HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF)
2448 		buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
2449 
2450 	/*
2451 	 * If the hdr's data can be shared then we share the data buffer and
2452 	 * set the appropriate bit in the hdr's b_flags to indicate the hdr is
2453 	 * sharing it's b_pdata with the arc_buf_t. Otherwise, we allocate a new
2454 	 * buffer to store the buf's data.
2455 	 *
2456 	 * There is one additional restriction here because we're sharing
2457 	 * hdr -> buf instead of the usual buf -> hdr: the hdr can't be actively
2458 	 * involved in an L2ARC write, because if this buf is used by an
2459 	 * arc_write() then the hdr's data buffer will be released when the
2460 	 * write completes, even though the L2ARC write might still be using it.
2461 	 */
2462 	boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr);
2463 
2464 	/* Set up b_data and sharing */
2465 	if (can_share) {
2466 		buf->b_data = hdr->b_l1hdr.b_pdata;
2467 		buf->b_flags |= ARC_BUF_FLAG_SHARED;
2468 		arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
2469 	} else {
2470 		buf->b_data =
2471 		    arc_get_data_buf(hdr, arc_buf_size(buf), buf);
2472 		ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
2473 	}
2474 	VERIFY3P(buf->b_data, !=, NULL);
2475 
2476 	hdr->b_l1hdr.b_buf = buf;
2477 	hdr->b_l1hdr.b_bufcnt += 1;
2478 
2479 	/*
2480 	 * If the user wants the data from the hdr, we need to either copy or
2481 	 * decompress the data.
2482 	 */
2483 	if (fill) {
2484 		return (arc_buf_fill(buf, ARC_BUF_COMPRESSED(buf) != 0));
2485 	}
2486 
2487 	return (0);
2488 }
2489 
2490 static char *arc_onloan_tag = "onloan";
2491 
2492 static inline void
2493 arc_loaned_bytes_update(int64_t delta)
2494 {
2495 	atomic_add_64(&arc_loaned_bytes, delta);
2496 
2497 	/* assert that it did not wrap around */
2498 	ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
2499 }
2500 
2501 /*
2502  * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
2503  * flight data by arc_tempreserve_space() until they are "returned". Loaned
2504  * buffers must be returned to the arc before they can be used by the DMU or
2505  * freed.
2506  */
2507 arc_buf_t *
2508 arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
2509 {
2510 	arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
2511 	    is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
2512 
2513 	arc_loaned_bytes_update(size);
2514 
2515 	return (buf);
2516 }
2517 
2518 arc_buf_t *
2519 arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
2520     enum zio_compress compression_type)
2521 {
2522 	arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
2523 	    psize, lsize, compression_type);
2524 
2525 	arc_loaned_bytes_update(psize);
2526 
2527 	return (buf);
2528 }
2529 
2530 
2531 /*
2532  * Return a loaned arc buffer to the arc.
2533  */
2534 void
2535 arc_return_buf(arc_buf_t *buf, void *tag)
2536 {
2537 	arc_buf_hdr_t *hdr = buf->b_hdr;
2538 
2539 	ASSERT3P(buf->b_data, !=, NULL);
2540 	ASSERT(HDR_HAS_L1HDR(hdr));
2541 	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
2542 	(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
2543 
2544 	arc_loaned_bytes_update(-arc_buf_size(buf));
2545 }
2546 
2547 /* Detach an arc_buf from a dbuf (tag) */
2548 void
2549 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
2550 {
2551 	arc_buf_hdr_t *hdr = buf->b_hdr;
2552 
2553 	ASSERT3P(buf->b_data, !=, NULL);
2554 	ASSERT(HDR_HAS_L1HDR(hdr));
2555 	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
2556 	(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
2557 
2558 	arc_loaned_bytes_update(arc_buf_size(buf));
2559 }
2560 
2561 static void
2562 l2arc_free_data_on_write(void *data, size_t size, arc_buf_contents_t type)
2563 {
2564 	l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
2565 
2566 	df->l2df_data = data;
2567 	df->l2df_size = size;
2568 	df->l2df_type = type;
2569 	mutex_enter(&l2arc_free_on_write_mtx);
2570 	list_insert_head(l2arc_free_on_write, df);
2571 	mutex_exit(&l2arc_free_on_write_mtx);
2572 }
2573 
2574 static void
2575 arc_hdr_free_on_write(arc_buf_hdr_t *hdr)
2576 {
2577 	arc_state_t *state = hdr->b_l1hdr.b_state;
2578 	arc_buf_contents_t type = arc_buf_type(hdr);
2579 	uint64_t size = arc_hdr_size(hdr);
2580 
2581 	/* protected by hash lock, if in the hash table */
2582 	if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
2583 		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2584 		ASSERT(state != arc_anon && state != arc_l2c_only);
2585 
2586 		(void) refcount_remove_many(&state->arcs_esize[type],
2587 		    size, hdr);
2588 	}
2589 	(void) refcount_remove_many(&state->arcs_size, size, hdr);
2590 	if (type == ARC_BUFC_METADATA) {
2591 		arc_space_return(size, ARC_SPACE_META);
2592 	} else {
2593 		ASSERT(type == ARC_BUFC_DATA);
2594 		arc_space_return(size, ARC_SPACE_DATA);
2595 	}
2596 
2597 	l2arc_free_data_on_write(hdr->b_l1hdr.b_pdata, size, type);
2598 }
2599 
2600 /*
2601  * Share the arc_buf_t's data with the hdr. Whenever we are sharing the
2602  * data buffer, we transfer the refcount ownership to the hdr and update
2603  * the appropriate kstats.
2604  */
2605 static void
2606 arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
2607 {
2608 	arc_state_t *state = hdr->b_l1hdr.b_state;
2609 
2610 	ASSERT(arc_can_share(hdr, buf));
2611 	ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
2612 	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2613 
2614 	/*
2615 	 * Start sharing the data buffer. We transfer the
2616 	 * refcount ownership to the hdr since it always owns
2617 	 * the refcount whenever an arc_buf_t is shared.
2618 	 */
2619 	refcount_transfer_ownership(&state->arcs_size, buf, hdr);
2620 	hdr->b_l1hdr.b_pdata = buf->b_data;
2621 	arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
2622 	buf->b_flags |= ARC_BUF_FLAG_SHARED;
2623 
2624 	/*
2625 	 * Since we've transferred ownership to the hdr we need
2626 	 * to increment its compressed and uncompressed kstats and
2627 	 * decrement the overhead size.
2628 	 */
2629 	ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
2630 	ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
2631 	ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
2632 }
2633 
2634 static void
2635 arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
2636 {
2637 	arc_state_t *state = hdr->b_l1hdr.b_state;
2638 
2639 	ASSERT(arc_buf_is_shared(buf));
2640 	ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
2641 	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2642 
2643 	/*
2644 	 * We are no longer sharing this buffer so we need
2645 	 * to transfer its ownership to the rightful owner.
2646 	 */
2647 	refcount_transfer_ownership(&state->arcs_size, hdr, buf);
2648 	arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
2649 	hdr->b_l1hdr.b_pdata = NULL;
2650 	buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
2651 
2652 	/*
2653 	 * Since the buffer is no longer shared between
2654 	 * the arc buf and the hdr, count it as overhead.
2655 	 */
2656 	ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
2657 	ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
2658 	ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
2659 }
2660 
2661 /*
2662  * Remove an arc_buf_t from the hdr's buf list and return the last
2663  * arc_buf_t on the list. If no buffers remain on the list then return
2664  * NULL.
2665  */
2666 static arc_buf_t *
2667 arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
2668 {
2669 	ASSERT(HDR_HAS_L1HDR(hdr));
2670 	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2671 
2672 	arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
2673 	arc_buf_t *lastbuf = NULL;
2674 
2675 	/*
2676 	 * Remove the buf from the hdr list and locate the last
2677 	 * remaining buffer on the list.
2678 	 */
2679 	while (*bufp != NULL) {
2680 		if (*bufp == buf)
2681 			*bufp = buf->b_next;
2682 
2683 		/*
2684 		 * If we've removed a buffer in the middle of
2685 		 * the list then update the lastbuf and update
2686 		 * bufp.
2687 		 */
2688 		if (*bufp != NULL) {
2689 			lastbuf = *bufp;
2690 			bufp = &(*bufp)->b_next;
2691 		}
2692 	}
2693 	buf->b_next = NULL;
2694 	ASSERT3P(lastbuf, !=, buf);
2695 	IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
2696 	IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
2697 	IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
2698 
2699 	return (lastbuf);
2700 }
2701 
2702 /*
2703  * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's
2704  * list and free it.
2705  */
2706 static void
2707 arc_buf_destroy_impl(arc_buf_t *buf)
2708 {
2709 	arc_buf_hdr_t *hdr = buf->b_hdr;
2710 
2711 	/*
2712 	 * Free up the data associated with the buf but only if we're not
2713 	 * sharing this with the hdr. If we are sharing it with the hdr, the
2714 	 * hdr is responsible for doing the free.
2715 	 */
2716 	if (buf->b_data != NULL) {
2717 		/*
2718 		 * We're about to change the hdr's b_flags. We must either
2719 		 * hold the hash_lock or be undiscoverable.
2720 		 */
2721 		ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2722 
2723 		arc_cksum_verify(buf);
2724 		arc_buf_unwatch(buf);
2725 
2726 		if (arc_buf_is_shared(buf)) {
2727 			arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
2728 		} else {
2729 			uint64_t size = arc_buf_size(buf);
2730 			arc_free_data_buf(hdr, buf->b_data, size, buf);
2731 			ARCSTAT_INCR(arcstat_overhead_size, -size);
2732 		}
2733 		buf->b_data = NULL;
2734 
2735 		ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
2736 		hdr->b_l1hdr.b_bufcnt -= 1;
2737 	}
2738 
2739 	arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
2740 
2741 	if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
2742 		/*
2743 		 * If the current arc_buf_t is sharing its data buffer with the
2744 		 * hdr, then reassign the hdr's b_pdata to share it with the new
2745 		 * buffer at the end of the list. The shared buffer is always
2746 		 * the last one on the hdr's buffer list.
2747 		 *
2748 		 * There is an equivalent case for compressed bufs, but since
2749 		 * they aren't guaranteed to be the last buf in the list and
2750 		 * that is an exceedingly rare case, we just allow that space be
2751 		 * wasted temporarily.
2752 		 */
2753 		if (lastbuf != NULL) {
2754 			/* Only one buf can be shared at once */
2755 			VERIFY(!arc_buf_is_shared(lastbuf));
2756 			/* hdr is uncompressed so can't have compressed buf */
2757 			VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
2758 
2759 			ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
2760 			arc_hdr_free_pdata(hdr);
2761 
2762 			/*
2763 			 * We must setup a new shared block between the
2764 			 * last buffer and the hdr. The data would have
2765 			 * been allocated by the arc buf so we need to transfer
2766 			 * ownership to the hdr since it's now being shared.
2767 			 */
2768 			arc_share_buf(hdr, lastbuf);
2769 		}
2770 	} else if (HDR_SHARED_DATA(hdr)) {
2771 		/*
2772 		 * Uncompressed shared buffers are always at the end
2773 		 * of the list. Compressed buffers don't have the
2774 		 * same requirements. This makes it hard to
2775 		 * simply assert that the lastbuf is shared so
2776 		 * we rely on the hdr's compression flags to determine
2777 		 * if we have a compressed, shared buffer.
2778 		 */
2779 		ASSERT3P(lastbuf, !=, NULL);
2780 		ASSERT(arc_buf_is_shared(lastbuf) ||
2781 		    HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
2782 	}
2783 
2784 	/*
2785 	 * Free the checksum if we're removing the last uncompressed buf from
2786 	 * this hdr.
2787 	 */
2788 	if (!arc_hdr_has_uncompressed_buf(hdr)) {
2789 		arc_cksum_free(hdr);
2790 	}
2791 
2792 	/* clean up the buf */
2793 	buf->b_hdr = NULL;
2794 	kmem_cache_free(buf_cache, buf);
2795 }
2796 
2797 static void
2798 arc_hdr_alloc_pdata(arc_buf_hdr_t *hdr)
2799 {
2800 	ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
2801 	ASSERT(HDR_HAS_L1HDR(hdr));
2802 	ASSERT(!HDR_SHARED_DATA(hdr));
2803 
2804 	ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
2805 	hdr->b_l1hdr.b_pdata = arc_get_data_buf(hdr, arc_hdr_size(hdr), hdr);
2806 	hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
2807 	ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
2808 
2809 	ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
2810 	ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
2811 }
2812 
2813 static void
2814 arc_hdr_free_pdata(arc_buf_hdr_t *hdr)
2815 {
2816 	ASSERT(HDR_HAS_L1HDR(hdr));
2817 	ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
2818 
2819 	/*
2820 	 * If the hdr is currently being written to the l2arc then
2821 	 * we defer freeing the data by adding it to the l2arc_free_on_write
2822 	 * list. The l2arc will free the data once it's finished
2823 	 * writing it to the l2arc device.
2824 	 */
2825 	if (HDR_L2_WRITING(hdr)) {
2826 		arc_hdr_free_on_write(hdr);
2827 		ARCSTAT_BUMP(arcstat_l2_free_on_write);
2828 	} else {
2829 		arc_free_data_buf(hdr, hdr->b_l1hdr.b_pdata,
2830 		    arc_hdr_size(hdr), hdr);
2831 	}
2832 	hdr->b_l1hdr.b_pdata = NULL;
2833 	hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
2834 
2835 	ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
2836 	ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
2837 }
2838 
2839 static arc_buf_hdr_t *
2840 arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
2841     enum zio_compress compression_type, arc_buf_contents_t type)
2842 {
2843 	arc_buf_hdr_t *hdr;
2844 
2845 	VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
2846 
2847 	hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
2848 	ASSERT(HDR_EMPTY(hdr));
2849 	ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
2850 	ASSERT3P(hdr->b_l1hdr.b_thawed, ==, NULL);
2851 	HDR_SET_PSIZE(hdr, psize);
2852 	HDR_SET_LSIZE(hdr, lsize);
2853 	hdr->b_spa = spa;
2854 	hdr->b_type = type;
2855 	hdr->b_flags = 0;
2856 	arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
2857 	arc_hdr_set_compress(hdr, compression_type);
2858 
2859 	hdr->b_l1hdr.b_state = arc_anon;
2860 	hdr->b_l1hdr.b_arc_access = 0;
2861 	hdr->b_l1hdr.b_bufcnt = 0;
2862 	hdr->b_l1hdr.b_buf = NULL;
2863 
2864 	/*
2865 	 * Allocate the hdr's buffer. This will contain either
2866 	 * the compressed or uncompressed data depending on the block
2867 	 * it references and compressed arc enablement.
2868 	 */
2869 	arc_hdr_alloc_pdata(hdr);
2870 	ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2871 
2872 	return (hdr);
2873 }
2874 
2875 /*
2876  * Transition between the two allocation states for the arc_buf_hdr struct.
2877  * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
2878  * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
2879  * version is used when a cache buffer is only in the L2ARC in order to reduce
2880  * memory usage.
2881  */
2882 static arc_buf_hdr_t *
2883 arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
2884 {
2885 	ASSERT(HDR_HAS_L2HDR(hdr));
2886 
2887 	arc_buf_hdr_t *nhdr;
2888 	l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
2889 
2890 	ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
2891 	    (old == hdr_l2only_cache && new == hdr_full_cache));
2892 
2893 	nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
2894 
2895 	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
2896 	buf_hash_remove(hdr);
2897 
2898 	bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
2899 
2900 	if (new == hdr_full_cache) {
2901 		arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
2902 		/*
2903 		 * arc_access and arc_change_state need to be aware that a
2904 		 * header has just come out of L2ARC, so we set its state to
2905 		 * l2c_only even though it's about to change.
2906 		 */
2907 		nhdr->b_l1hdr.b_state = arc_l2c_only;
2908 
2909 		/* Verify previous threads set to NULL before freeing */
2910 		ASSERT3P(nhdr->b_l1hdr.b_pdata, ==, NULL);
2911 	} else {
2912 		ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2913 		ASSERT0(hdr->b_l1hdr.b_bufcnt);
2914 		ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
2915 
2916 		/*
2917 		 * If we've reached here, We must have been called from
2918 		 * arc_evict_hdr(), as such we should have already been
2919 		 * removed from any ghost list we were previously on
2920 		 * (which protects us from racing with arc_evict_state),
2921 		 * thus no locking is needed during this check.
2922 		 */
2923 		ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
2924 
2925 		/*
2926 		 * A buffer must not be moved into the arc_l2c_only
2927 		 * state if it's not finished being written out to the
2928 		 * l2arc device. Otherwise, the b_l1hdr.b_pdata field
2929 		 * might try to be accessed, even though it was removed.
2930 		 */
2931 		VERIFY(!HDR_L2_WRITING(hdr));
2932 		VERIFY3P(hdr->b_l1hdr.b_pdata, ==, NULL);
2933 
2934 #ifdef ZFS_DEBUG
2935 		if (hdr->b_l1hdr.b_thawed != NULL) {
2936 			kmem_free(hdr->b_l1hdr.b_thawed, 1);
2937 			hdr->b_l1hdr.b_thawed = NULL;
2938 		}
2939 #endif
2940 
2941 		arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
2942 	}
2943 	/*
2944 	 * The header has been reallocated so we need to re-insert it into any
2945 	 * lists it was on.
2946 	 */
2947 	(void) buf_hash_insert(nhdr, NULL);
2948 
2949 	ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
2950 
2951 	mutex_enter(&dev->l2ad_mtx);
2952 
2953 	/*
2954 	 * We must place the realloc'ed header back into the list at
2955 	 * the same spot. Otherwise, if it's placed earlier in the list,
2956 	 * l2arc_write_buffers() could find it during the function's
2957 	 * write phase, and try to write it out to the l2arc.
2958 	 */
2959 	list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
2960 	list_remove(&dev->l2ad_buflist, hdr);
2961 
2962 	mutex_exit(&dev->l2ad_mtx);
2963 
2964 	/*
2965 	 * Since we're using the pointer address as the tag when
2966 	 * incrementing and decrementing the l2ad_alloc refcount, we
2967 	 * must remove the old pointer (that we're about to destroy) and
2968 	 * add the new pointer to the refcount. Otherwise we'd remove
2969 	 * the wrong pointer address when calling arc_hdr_destroy() later.
2970 	 */
2971 
2972 	(void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
2973 	(void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
2974 
2975 	buf_discard_identity(hdr);
2976 	kmem_cache_free(old, hdr);
2977 
2978 	return (nhdr);
2979 }
2980 
2981 /*
2982  * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
2983  * The buf is returned thawed since we expect the consumer to modify it.
2984  */
2985 arc_buf_t *
2986 arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
2987 {
2988 	arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
2989 	    ZIO_COMPRESS_OFF, type);
2990 	ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
2991 
2992 	arc_buf_t *buf = NULL;
2993 	VERIFY0(arc_buf_alloc_impl(hdr, tag, B_FALSE, B_FALSE, &buf));
2994 	arc_buf_thaw(buf);
2995 
2996 	return (buf);
2997 }
2998 
2999 /*
3000  * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
3001  * for bufs containing metadata.
3002  */
3003 arc_buf_t *
3004 arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
3005     enum zio_compress compression_type)
3006 {
3007 	ASSERT3U(lsize, >, 0);
3008 	ASSERT3U(lsize, >=, psize);
3009 	ASSERT(compression_type > ZIO_COMPRESS_OFF);
3010 	ASSERT(compression_type < ZIO_COMPRESS_FUNCTIONS);
3011 
3012 	arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
3013 	    compression_type, ARC_BUFC_DATA);
3014 	ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
3015 
3016 	arc_buf_t *buf = NULL;
3017 	VERIFY0(arc_buf_alloc_impl(hdr, tag, B_TRUE, B_FALSE, &buf));
3018 	arc_buf_thaw(buf);
3019 	ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
3020 
3021 	return (buf);
3022 }
3023 
3024 static void
3025 arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
3026 {
3027 	l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
3028 	l2arc_dev_t *dev = l2hdr->b_dev;
3029 	uint64_t asize = arc_hdr_size(hdr);
3030 
3031 	ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
3032 	ASSERT(HDR_HAS_L2HDR(hdr));
3033 
3034 	list_remove(&dev->l2ad_buflist, hdr);
3035 
3036 	ARCSTAT_INCR(arcstat_l2_asize, -asize);
3037 	ARCSTAT_INCR(arcstat_l2_size, -HDR_GET_LSIZE(hdr));
3038 
3039 	vdev_space_update(dev->l2ad_vdev, -asize, 0, 0);
3040 
3041 	(void) refcount_remove_many(&dev->l2ad_alloc, asize, hdr);
3042 	arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
3043 }
3044 
3045 static void
3046 arc_hdr_destroy(arc_buf_hdr_t *hdr)
3047 {
3048 	if (HDR_HAS_L1HDR(hdr)) {
3049 		ASSERT(hdr->b_l1hdr.b_buf == NULL ||
3050 		    hdr->b_l1hdr.b_bufcnt > 0);
3051 		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3052 		ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
3053 	}
3054 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3055 	ASSERT(!HDR_IN_HASH_TABLE(hdr));
3056 
3057 	if (!HDR_EMPTY(hdr))
3058 		buf_discard_identity(hdr);
3059 
3060 	if (HDR_HAS_L2HDR(hdr)) {
3061 		l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
3062 		boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
3063 
3064 		if (!buflist_held)
3065 			mutex_enter(&dev->l2ad_mtx);
3066 
3067 		/*
3068 		 * Even though we checked this conditional above, we
3069 		 * need to check this again now that we have the
3070 		 * l2ad_mtx. This is because we could be racing with
3071 		 * another thread calling l2arc_evict() which might have
3072 		 * destroyed this header's L2 portion as we were waiting
3073 		 * to acquire the l2ad_mtx. If that happens, we don't
3074 		 * want to re-destroy the header's L2 portion.
3075 		 */
3076 		if (HDR_HAS_L2HDR(hdr))
3077 			arc_hdr_l2hdr_destroy(hdr);
3078 
3079 		if (!buflist_held)
3080 			mutex_exit(&dev->l2ad_mtx);
3081 	}
3082 
3083 	if (HDR_HAS_L1HDR(hdr)) {
3084 		arc_cksum_free(hdr);
3085 
3086 		while (hdr->b_l1hdr.b_buf != NULL)
3087 			arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
3088 
3089 #ifdef ZFS_DEBUG
3090 		if (hdr->b_l1hdr.b_thawed != NULL) {
3091 			kmem_free(hdr->b_l1hdr.b_thawed, 1);
3092 			hdr->b_l1hdr.b_thawed = NULL;
3093 		}
3094 #endif
3095 
3096 		if (hdr->b_l1hdr.b_pdata != NULL) {
3097 			arc_hdr_free_pdata(hdr);
3098 		}
3099 	}
3100 
3101 	ASSERT3P(hdr->b_hash_next, ==, NULL);
3102 	if (HDR_HAS_L1HDR(hdr)) {
3103 		ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
3104 		ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
3105 		kmem_cache_free(hdr_full_cache, hdr);
3106 	} else {
3107 		kmem_cache_free(hdr_l2only_cache, hdr);
3108 	}
3109 }
3110 
3111 void
3112 arc_buf_destroy(arc_buf_t *buf, void* tag)
3113 {
3114 	arc_buf_hdr_t *hdr = buf->b_hdr;
3115 	kmutex_t *hash_lock = HDR_LOCK(hdr);
3116 
3117 	if (hdr->b_l1hdr.b_state == arc_anon) {
3118 		ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
3119 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3120 		VERIFY0(remove_reference(hdr, NULL, tag));
3121 		arc_hdr_destroy(hdr);
3122 		return;
3123 	}
3124 
3125 	mutex_enter(hash_lock);
3126 	ASSERT3P(hdr, ==, buf->b_hdr);
3127 	ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
3128 	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3129 	ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
3130 	ASSERT3P(buf->b_data, !=, NULL);
3131 
3132 	(void) remove_reference(hdr, hash_lock, tag);
3133 	arc_buf_destroy_impl(buf);
3134 	mutex_exit(hash_lock);
3135 }
3136 
3137 /*
3138  * Evict the arc_buf_hdr that is provided as a parameter. The resultant
3139  * state of the header is dependent on it's state prior to entering this
3140  * function. The following transitions are possible:
3141  *
3142  *    - arc_mru -> arc_mru_ghost
3143  *    - arc_mfu -> arc_mfu_ghost
3144  *    - arc_mru_ghost -> arc_l2c_only
3145  *    - arc_mru_ghost -> deleted
3146  *    - arc_mfu_ghost -> arc_l2c_only
3147  *    - arc_mfu_ghost -> deleted
3148  */
3149 static int64_t
3150 arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
3151 {
3152 	arc_state_t *evicted_state, *state;
3153 	int64_t bytes_evicted = 0;
3154 
3155 	ASSERT(MUTEX_HELD(hash_lock));
3156 	ASSERT(HDR_HAS_L1HDR(hdr));
3157 
3158 	state = hdr->b_l1hdr.b_state;
3159 	if (GHOST_STATE(state)) {
3160 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3161 		ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
3162 
3163 		/*
3164 		 * l2arc_write_buffers() relies on a header's L1 portion
3165 		 * (i.e. its b_pdata field) during its write phase.
3166 		 * Thus, we cannot push a header onto the arc_l2c_only
3167 		 * state (removing it's L1 piece) until the header is
3168 		 * done being written to the l2arc.
3169 		 */
3170 		if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
3171 			ARCSTAT_BUMP(arcstat_evict_l2_skip);
3172 			return (bytes_evicted);
3173 		}
3174 
3175 		ARCSTAT_BUMP(arcstat_deleted);
3176 		bytes_evicted += HDR_GET_LSIZE(hdr);
3177 
3178 		DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
3179 
3180 		ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
3181 		if (HDR_HAS_L2HDR(hdr)) {
3182 			/*
3183 			 * This buffer is cached on the 2nd Level ARC;
3184 			 * don't destroy the header.
3185 			 */
3186 			arc_change_state(arc_l2c_only, hdr, hash_lock);
3187 			/*
3188 			 * dropping from L1+L2 cached to L2-only,
3189 			 * realloc to remove the L1 header.
3190 			 */
3191 			hdr = arc_hdr_realloc(hdr, hdr_full_cache,
3192 			    hdr_l2only_cache);
3193 		} else {
3194 			arc_change_state(arc_anon, hdr, hash_lock);
3195 			arc_hdr_destroy(hdr);
3196 		}
3197 		return (bytes_evicted);
3198 	}
3199 
3200 	ASSERT(state == arc_mru || state == arc_mfu);
3201 	evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3202 
3203 	/* prefetch buffers have a minimum lifespan */
3204 	if (HDR_IO_IN_PROGRESS(hdr) ||
3205 	    ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
3206 	    ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
3207 	    arc_min_prefetch_lifespan)) {
3208 		ARCSTAT_BUMP(arcstat_evict_skip);
3209 		return (bytes_evicted);
3210 	}
3211 
3212 	ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
3213 	while (hdr->b_l1hdr.b_buf) {
3214 		arc_buf_t *buf = hdr->b_l1hdr.b_buf;
3215 		if (!mutex_tryenter(&buf->b_evict_lock)) {
3216 			ARCSTAT_BUMP(arcstat_mutex_miss);
3217 			break;
3218 		}
3219 		if (buf->b_data != NULL)
3220 			bytes_evicted += HDR_GET_LSIZE(hdr);
3221 		mutex_exit(&buf->b_evict_lock);
3222 		arc_buf_destroy_impl(buf);
3223 	}
3224 
3225 	if (HDR_HAS_L2HDR(hdr)) {
3226 		ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
3227 	} else {
3228 		if (l2arc_write_eligible(hdr->b_spa, hdr)) {
3229 			ARCSTAT_INCR(arcstat_evict_l2_eligible,
3230 			    HDR_GET_LSIZE(hdr));
3231 		} else {
3232 			ARCSTAT_INCR(arcstat_evict_l2_ineligible,
3233 			    HDR_GET_LSIZE(hdr));
3234 		}
3235 	}
3236 
3237 	if (hdr->b_l1hdr.b_bufcnt == 0) {
3238 		arc_cksum_free(hdr);
3239 
3240 		bytes_evicted += arc_hdr_size(hdr);
3241 
3242 		/*
3243 		 * If this hdr is being evicted and has a compressed
3244 		 * buffer then we discard it here before we change states.
3245 		 * This ensures that the accounting is updated correctly
3246 		 * in arc_free_data_buf().
3247 		 */
3248 		arc_hdr_free_pdata(hdr);
3249 
3250 		arc_change_state(evicted_state, hdr, hash_lock);
3251 		ASSERT(HDR_IN_HASH_TABLE(hdr));
3252 		arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
3253 		DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
3254 	}
3255 
3256 	return (bytes_evicted);
3257 }
3258 
3259 static uint64_t
3260 arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
3261     uint64_t spa, int64_t bytes)
3262 {
3263 	multilist_sublist_t *mls;
3264 	uint64_t bytes_evicted = 0;
3265 	arc_buf_hdr_t *hdr;
3266 	kmutex_t *hash_lock;
3267 	int evict_count = 0;
3268 
3269 	ASSERT3P(marker, !=, NULL);
3270 	IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
3271 
3272 	mls = multilist_sublist_lock(ml, idx);
3273 
3274 	for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL;
3275 	    hdr = multilist_sublist_prev(mls, marker)) {
3276 		if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) ||
3277 		    (evict_count >= zfs_arc_evict_batch_limit))
3278 			break;
3279 
3280 		/*
3281 		 * To keep our iteration location, move the marker
3282 		 * forward. Since we're not holding hdr's hash lock, we
3283 		 * must be very careful and not remove 'hdr' from the
3284 		 * sublist. Otherwise, other consumers might mistake the
3285 		 * 'hdr' as not being on a sublist when they call the
3286 		 * multilist_link_active() function (they all rely on
3287 		 * the hash lock protecting concurrent insertions and
3288 		 * removals). multilist_sublist_move_forward() was
3289 		 * specifically implemented to ensure this is the case
3290 		 * (only 'marker' will be removed and re-inserted).
3291 		 */
3292 		multilist_sublist_move_forward(mls, marker);
3293 
3294 		/*
3295 		 * The only case where the b_spa field should ever be
3296 		 * zero, is the marker headers inserted by
3297 		 * arc_evict_state(). It's possible for multiple threads
3298 		 * to be calling arc_evict_state() concurrently (e.g.
3299 		 * dsl_pool_close() and zio_inject_fault()), so we must
3300 		 * skip any markers we see from these other threads.
3301 		 */
3302 		if (hdr->b_spa == 0)
3303 			continue;
3304 
3305 		/* we're only interested in evicting buffers of a certain spa */
3306 		if (spa != 0 && hdr->b_spa != spa) {
3307 			ARCSTAT_BUMP(arcstat_evict_skip);
3308 			continue;
3309 		}
3310 
3311 		hash_lock = HDR_LOCK(hdr);
3312 
3313 		/*
3314 		 * We aren't calling this function from any code path
3315 		 * that would already be holding a hash lock, so we're
3316 		 * asserting on this assumption to be defensive in case
3317 		 * this ever changes. Without this check, it would be
3318 		 * possible to incorrectly increment arcstat_mutex_miss
3319 		 * below (e.g. if the code changed such that we called
3320 		 * this function with a hash lock held).
3321 		 */
3322 		ASSERT(!MUTEX_HELD(hash_lock));
3323 
3324 		if (mutex_tryenter(hash_lock)) {
3325 			uint64_t evicted = arc_evict_hdr(hdr, hash_lock);
3326 			mutex_exit(hash_lock);
3327 
3328 			bytes_evicted += evicted;
3329 
3330 			/*
3331 			 * If evicted is zero, arc_evict_hdr() must have
3332 			 * decided to skip this header, don't increment
3333 			 * evict_count in this case.
3334 			 */
3335 			if (evicted != 0)
3336 				evict_count++;
3337 
3338 			/*
3339 			 * If arc_size isn't overflowing, signal any
3340 			 * threads that might happen to be waiting.
3341 			 *
3342 			 * For each header evicted, we wake up a single
3343 			 * thread. If we used cv_broadcast, we could
3344 			 * wake up "too many" threads causing arc_size
3345 			 * to significantly overflow arc_c; since
3346 			 * arc_get_data_buf() doesn't check for overflow
3347 			 * when it's woken up (it doesn't because it's
3348 			 * possible for the ARC to be overflowing while
3349 			 * full of un-evictable buffers, and the
3350 			 * function should proceed in this case).
3351 			 *
3352 			 * If threads are left sleeping, due to not
3353 			 * using cv_broadcast, they will be woken up
3354 			 * just before arc_reclaim_thread() sleeps.
3355 			 */
3356 			mutex_enter(&arc_reclaim_lock);
3357 			if (!arc_is_overflowing())
3358 				cv_signal(&arc_reclaim_waiters_cv);
3359 			mutex_exit(&arc_reclaim_lock);
3360 		} else {
3361 			ARCSTAT_BUMP(arcstat_mutex_miss);
3362 		}
3363 	}
3364 
3365 	multilist_sublist_unlock(mls);
3366 
3367 	return (bytes_evicted);
3368 }
3369 
3370 /*
3371  * Evict buffers from the given arc state, until we've removed the
3372  * specified number of bytes. Move the removed buffers to the
3373  * appropriate evict state.
3374  *
3375  * This function makes a "best effort". It skips over any buffers
3376  * it can't get a hash_lock on, and so, may not catch all candidates.
3377  * It may also return without evicting as much space as requested.
3378  *
3379  * If bytes is specified using the special value ARC_EVICT_ALL, this
3380  * will evict all available (i.e. unlocked and evictable) buffers from
3381  * the given arc state; which is used by arc_flush().
3382  */
3383 static uint64_t
3384 arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
3385     arc_buf_contents_t type)
3386 {
3387 	uint64_t total_evicted = 0;
3388 	multilist_t *ml = &state->arcs_list[type];
3389 	int num_sublists;
3390 	arc_buf_hdr_t **markers;
3391 
3392 	IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
3393 
3394 	num_sublists = multilist_get_num_sublists(ml);
3395 
3396 	/*
3397 	 * If we've tried to evict from each sublist, made some
3398 	 * progress, but still have not hit the target number of bytes
3399 	 * to evict, we want to keep trying. The markers allow us to
3400 	 * pick up where we left off for each individual sublist, rather
3401 	 * than starting from the tail each time.
3402 	 */
3403 	markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
3404 	for (int i = 0; i < num_sublists; i++) {
3405 		markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
3406 
3407 		/*
3408 		 * A b_spa of 0 is used to indicate that this header is
3409 		 * a marker. This fact is used in arc_adjust_type() and
3410 		 * arc_evict_state_impl().
3411 		 */
3412 		markers[i]->b_spa = 0;
3413 
3414 		multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
3415 		multilist_sublist_insert_tail(mls, markers[i]);
3416 		multilist_sublist_unlock(mls);
3417 	}
3418 
3419 	/*
3420 	 * While we haven't hit our target number of bytes to evict, or
3421 	 * we're evicting all available buffers.
3422 	 */
3423 	while (total_evicted < bytes || bytes == ARC_EVICT_ALL) {
3424 		/*
3425 		 * Start eviction using a randomly selected sublist,
3426 		 * this is to try and evenly balance eviction across all
3427 		 * sublists. Always starting at the same sublist
3428 		 * (e.g. index 0) would cause evictions to favor certain
3429 		 * sublists over others.
3430 		 */
3431 		int sublist_idx = multilist_get_random_index(ml);
3432 		uint64_t scan_evicted = 0;
3433 
3434 		for (int i = 0; i < num_sublists; i++) {
3435 			uint64_t bytes_remaining;
3436 			uint64_t bytes_evicted;
3437 
3438 			if (bytes == ARC_EVICT_ALL)
3439 				bytes_remaining = ARC_EVICT_ALL;
3440 			else if (total_evicted < bytes)
3441 				bytes_remaining = bytes - total_evicted;
3442 			else
3443 				break;
3444 
3445 			bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
3446 			    markers[sublist_idx], spa, bytes_remaining);
3447 
3448 			scan_evicted += bytes_evicted;
3449 			total_evicted += bytes_evicted;
3450 
3451 			/* we've reached the end, wrap to the beginning */
3452 			if (++sublist_idx >= num_sublists)
3453 				sublist_idx = 0;
3454 		}
3455 
3456 		/*
3457 		 * If we didn't evict anything during this scan, we have
3458 		 * no reason to believe we'll evict more during another
3459 		 * scan, so break the loop.
3460 		 */
3461 		if (scan_evicted == 0) {
3462 			/* This isn't possible, let's make that obvious */
3463 			ASSERT3S(bytes, !=, 0);
3464 
3465 			/*
3466 			 * When bytes is ARC_EVICT_ALL, the only way to
3467 			 * break the loop is when scan_evicted is zero.
3468 			 * In that case, we actually have evicted enough,
3469 			 * so we don't want to increment the kstat.
3470 			 */
3471 			if (bytes != ARC_EVICT_ALL) {
3472 				ASSERT3S(total_evicted, <, bytes);
3473 				ARCSTAT_BUMP(arcstat_evict_not_enough);
3474 			}
3475 
3476 			break;
3477 		}
3478 	}
3479 
3480 	for (int i = 0; i < num_sublists; i++) {
3481 		multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
3482 		multilist_sublist_remove(mls, markers[i]);
3483 		multilist_sublist_unlock(mls);
3484 
3485 		kmem_cache_free(hdr_full_cache, markers[i]);
3486 	}
3487 	kmem_free(markers, sizeof (*markers) * num_sublists);
3488 
3489 	return (total_evicted);
3490 }
3491 
3492 /*
3493  * Flush all "evictable" data of the given type from the arc state
3494  * specified. This will not evict any "active" buffers (i.e. referenced).
3495  *
3496  * When 'retry' is set to B_FALSE, the function will make a single pass
3497  * over the state and evict any buffers that it can. Since it doesn't
3498  * continually retry the eviction, it might end up leaving some buffers
3499  * in the ARC due to lock misses.
3500  *
3501  * When 'retry' is set to B_TRUE, the function will continually retry the
3502  * eviction until *all* evictable buffers have been removed from the
3503  * state. As a result, if concurrent insertions into the state are
3504  * allowed (e.g. if the ARC isn't shutting down), this function might
3505  * wind up in an infinite loop, continually trying to evict buffers.
3506  */
3507 static uint64_t
3508 arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
3509     boolean_t retry)
3510 {
3511 	uint64_t evicted = 0;
3512 
3513 	while (refcount_count(&state->arcs_esize[type]) != 0) {
3514 		evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
3515 
3516 		if (!retry)
3517 			break;
3518 	}
3519 
3520 	return (evicted);
3521 }
3522 
3523 /*
3524  * Evict the specified number of bytes from the state specified,
3525  * restricting eviction to the spa and type given. This function
3526  * prevents us from trying to evict more from a state's list than
3527  * is "evictable", and to skip evicting altogether when passed a
3528  * negative value for "bytes". In contrast, arc_evict_state() will
3529  * evict everything it can, when passed a negative value for "bytes".
3530  */
3531 static uint64_t
3532 arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
3533     arc_buf_contents_t type)
3534 {
3535 	int64_t delta;
3536 
3537 	if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
3538 		delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
3539 		return (arc_evict_state(state, spa, delta, type));
3540 	}
3541 
3542 	return (0);
3543 }
3544 
3545 /*
3546  * Evict metadata buffers from the cache, such that arc_meta_used is
3547  * capped by the arc_meta_limit tunable.
3548  */
3549 static uint64_t
3550 arc_adjust_meta(void)
3551 {
3552 	uint64_t total_evicted = 0;
3553 	int64_t target;
3554 
3555 	/*
3556 	 * If we're over the meta limit, we want to evict enough
3557 	 * metadata to get back under the meta limit. We don't want to
3558 	 * evict so much that we drop the MRU below arc_p, though. If
3559 	 * we're over the meta limit more than we're over arc_p, we
3560 	 * evict some from the MRU here, and some from the MFU below.
3561 	 */
3562 	target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
3563 	    (int64_t)(refcount_count(&arc_anon->arcs_size) +
3564 	    refcount_count(&arc_mru->arcs_size) - arc_p));
3565 
3566 	total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
3567 
3568 	/*
3569 	 * Similar to the above, we want to evict enough bytes to get us
3570 	 * below the meta limit, but not so much as to drop us below the
3571 	 * space allotted to the MFU (which is defined as arc_c - arc_p).
3572 	 */
3573 	target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
3574 	    (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p)));
3575 
3576 	total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
3577 
3578 	return (total_evicted);
3579 }
3580 
3581 /*
3582  * Return the type of the oldest buffer in the given arc state
3583  *
3584  * This function will select a random sublist of type ARC_BUFC_DATA and
3585  * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
3586  * is compared, and the type which contains the "older" buffer will be
3587  * returned.
3588  */
3589 static arc_buf_contents_t
3590 arc_adjust_type(arc_state_t *state)
3591 {
3592 	multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA];
3593 	multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA];
3594 	int data_idx = multilist_get_random_index(data_ml);
3595 	int meta_idx = multilist_get_random_index(meta_ml);
3596 	multilist_sublist_t *data_mls;
3597 	multilist_sublist_t *meta_mls;
3598 	arc_buf_contents_t type;
3599 	arc_buf_hdr_t *data_hdr;
3600 	arc_buf_hdr_t *meta_hdr;
3601 
3602 	/*
3603 	 * We keep the sublist lock until we're finished, to prevent
3604 	 * the headers from being destroyed via arc_evict_state().
3605 	 */
3606 	data_mls = multilist_sublist_lock(data_ml, data_idx);
3607 	meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
3608 
3609 	/*
3610 	 * These two loops are to ensure we skip any markers that
3611 	 * might be at the tail of the lists due to arc_evict_state().
3612 	 */
3613 
3614 	for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
3615 	    data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
3616 		if (data_hdr->b_spa != 0)
3617 			break;
3618 	}
3619 
3620 	for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
3621 	    meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
3622 		if (meta_hdr->b_spa != 0)
3623 			break;
3624 	}
3625 
3626 	if (data_hdr == NULL && meta_hdr == NULL) {
3627 		type = ARC_BUFC_DATA;
3628 	} else if (data_hdr == NULL) {
3629 		ASSERT3P(meta_hdr, !=, NULL);
3630 		type = ARC_BUFC_METADATA;
3631 	} else if (meta_hdr == NULL) {
3632 		ASSERT3P(data_hdr, !=, NULL);
3633 		type = ARC_BUFC_DATA;
3634 	} else {
3635 		ASSERT3P(data_hdr, !=, NULL);
3636 		ASSERT3P(meta_hdr, !=, NULL);
3637 
3638 		/* The headers can't be on the sublist without an L1 header */
3639 		ASSERT(HDR_HAS_L1HDR(data_hdr));
3640 		ASSERT(HDR_HAS_L1HDR(meta_hdr));
3641 
3642 		if (data_hdr->b_l1hdr.b_arc_access <
3643 		    meta_hdr->b_l1hdr.b_arc_access) {
3644 			type = ARC_BUFC_DATA;
3645 		} else {
3646 			type = ARC_BUFC_METADATA;
3647 		}
3648 	}
3649 
3650 	multilist_sublist_unlock(meta_mls);
3651 	multilist_sublist_unlock(data_mls);
3652 
3653 	return (type);
3654 }
3655 
3656 /*
3657  * Evict buffers from the cache, such that arc_size is capped by arc_c.
3658  */
3659 static uint64_t
3660 arc_adjust(void)
3661 {
3662 	uint64_t total_evicted = 0;
3663 	uint64_t bytes;
3664 	int64_t target;
3665 
3666 	/*
3667 	 * If we're over arc_meta_limit, we want to correct that before
3668 	 * potentially evicting data buffers below.
3669 	 */
3670 	total_evicted += arc_adjust_meta();
3671 
3672 	/*
3673 	 * Adjust MRU size
3674 	 *
3675 	 * If we're over the target cache size, we want to evict enough
3676 	 * from the list to get back to our target size. We don't want
3677 	 * to evict too much from the MRU, such that it drops below
3678 	 * arc_p. So, if we're over our target cache size more than
3679 	 * the MRU is over arc_p, we'll evict enough to get back to
3680 	 * arc_p here, and then evict more from the MFU below.
3681 	 */
3682 	target = MIN((int64_t)(arc_size - arc_c),
3683 	    (int64_t)(refcount_count(&arc_anon->arcs_size) +
3684 	    refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
3685 
3686 	/*
3687 	 * If we're below arc_meta_min, always prefer to evict data.
3688 	 * Otherwise, try to satisfy the requested number of bytes to
3689 	 * evict from the type which contains older buffers; in an
3690 	 * effort to keep newer buffers in the cache regardless of their
3691 	 * type. If we cannot satisfy the number of bytes from this
3692 	 * type, spill over into the next type.
3693 	 */
3694 	if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA &&
3695 	    arc_meta_used > arc_meta_min) {
3696 		bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
3697 		total_evicted += bytes;
3698 
3699 		/*
3700 		 * If we couldn't evict our target number of bytes from
3701 		 * metadata, we try to get the rest from data.
3702 		 */
3703 		target -= bytes;
3704 
3705 		total_evicted +=
3706 		    arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
3707 	} else {
3708 		bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
3709 		total_evicted += bytes;
3710 
3711 		/*
3712 		 * If we couldn't evict our target number of bytes from
3713 		 * data, we try to get the rest from metadata.
3714 		 */
3715 		target -= bytes;
3716 
3717 		total_evicted +=
3718 		    arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
3719 	}
3720 
3721 	/*
3722 	 * Adjust MFU size
3723 	 *
3724 	 * Now that we've tried to evict enough from the MRU to get its
3725 	 * size back to arc_p, if we're still above the target cache
3726 	 * size, we evict the rest from the MFU.
3727 	 */
3728 	target = arc_size - arc_c;
3729 
3730 	if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA &&
3731 	    arc_meta_used > arc_meta_min) {
3732 		bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
3733 		total_evicted += bytes;
3734 
3735 		/*
3736 		 * If we couldn't evict our target number of bytes from
3737 		 * metadata, we try to get the rest from data.
3738 		 */
3739 		target -= bytes;
3740 
3741 		total_evicted +=
3742 		    arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
3743 	} else {
3744 		bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
3745 		total_evicted += bytes;
3746 
3747 		/*
3748 		 * If we couldn't evict our target number of bytes from
3749 		 * data, we try to get the rest from data.
3750 		 */
3751 		target -= bytes;
3752 
3753 		total_evicted +=
3754 		    arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
3755 	}
3756 
3757 	/*
3758 	 * Adjust ghost lists
3759 	 *
3760 	 * In addition to the above, the ARC also defines target values
3761 	 * for the ghost lists. The sum of the mru list and mru ghost
3762 	 * list should never exceed the target size of the cache, and
3763 	 * the sum of the mru list, mfu list, mru ghost list, and mfu
3764 	 * ghost list should never exceed twice the target size of the
3765 	 * cache. The following logic enforces these limits on the ghost
3766 	 * caches, and evicts from them as needed.
3767 	 */
3768 	target = refcount_count(&arc_mru->arcs_size) +
3769 	    refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
3770 
3771 	bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
3772 	total_evicted += bytes;
3773 
3774 	target -= bytes;
3775 
3776 	total_evicted +=
3777 	    arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
3778 
3779 	/*
3780 	 * We assume the sum of the mru list and mfu list is less than
3781 	 * or equal to arc_c (we enforced this above), which means we
3782 	 * can use the simpler of the two equations below:
3783 	 *
3784 	 *	mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
3785 	 *		    mru ghost + mfu ghost <= arc_c
3786 	 */
3787 	target = refcount_count(&arc_mru_ghost->arcs_size) +
3788 	    refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
3789 
3790 	bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
3791 	total_evicted += bytes;
3792 
3793 	target -= bytes;
3794 
3795 	total_evicted +=
3796 	    arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
3797 
3798 	return (total_evicted);
3799 }
3800 
3801 void
3802 arc_flush(spa_t *spa, boolean_t retry)
3803 {
3804 	uint64_t guid = 0;
3805 
3806 	/*
3807 	 * If retry is B_TRUE, a spa must not be specified since we have
3808 	 * no good way to determine if all of a spa's buffers have been
3809 	 * evicted from an arc state.
3810 	 */
3811 	ASSERT(!retry || spa == 0);
3812 
3813 	if (spa != NULL)
3814 		guid = spa_load_guid(spa);
3815 
3816 	(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
3817 	(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
3818 
3819 	(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
3820 	(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
3821 
3822 	(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
3823 	(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
3824 
3825 	(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
3826 	(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
3827 }
3828 
3829 void
3830 arc_shrink(int64_t to_free)
3831 {
3832 	if (arc_c > arc_c_min) {
3833 
3834 		if (arc_c > arc_c_min + to_free)
3835 			atomic_add_64(&arc_c, -to_free);
3836 		else
3837 			arc_c = arc_c_min;
3838 
3839 		atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
3840 		if (arc_c > arc_size)
3841 			arc_c = MAX(arc_size, arc_c_min);
3842 		if (arc_p > arc_c)
3843 			arc_p = (arc_c >> 1);
3844 		ASSERT(arc_c >= arc_c_min);
3845 		ASSERT((int64_t)arc_p >= 0);
3846 	}
3847 
3848 	if (arc_size > arc_c)
3849 		(void) arc_adjust();
3850 }
3851 
3852 typedef enum free_memory_reason_t {
3853 	FMR_UNKNOWN,
3854 	FMR_NEEDFREE,
3855 	FMR_LOTSFREE,
3856 	FMR_SWAPFS_MINFREE,
3857 	FMR_PAGES_PP_MAXIMUM,
3858 	FMR_HEAP_ARENA,
3859 	FMR_ZIO_ARENA,
3860 } free_memory_reason_t;
3861 
3862 int64_t last_free_memory;
3863 free_memory_reason_t last_free_reason;
3864 
3865 /*
3866  * Additional reserve of pages for pp_reserve.
3867  */
3868 int64_t arc_pages_pp_reserve = 64;
3869 
3870 /*
3871  * Additional reserve of pages for swapfs.
3872  */
3873 int64_t arc_swapfs_reserve = 64;
3874 
3875 /*
3876  * Return the amount of memory that can be consumed before reclaim will be
3877  * needed.  Positive if there is sufficient free memory, negative indicates
3878  * the amount of memory that needs to be freed up.
3879  */
3880 static int64_t
3881 arc_available_memory(void)
3882 {
3883 	int64_t lowest = INT64_MAX;
3884 	int64_t n;
3885 	free_memory_reason_t r = FMR_UNKNOWN;
3886 
3887 #ifdef _KERNEL
3888 	if (needfree > 0) {
3889 		n = PAGESIZE * (-needfree);
3890 		if (n < lowest) {
3891 			lowest = n;
3892 			r = FMR_NEEDFREE;
3893 		}
3894 	}
3895 
3896 	/*
3897 	 * check that we're out of range of the pageout scanner.  It starts to
3898 	 * schedule paging if freemem is less than lotsfree and needfree.
3899 	 * lotsfree is the high-water mark for pageout, and needfree is the
3900 	 * number of needed free pages.  We add extra pages here to make sure
3901 	 * the scanner doesn't start up while we're freeing memory.
3902 	 */
3903 	n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
3904 	if (n < lowest) {
3905 		lowest = n;
3906 		r = FMR_LOTSFREE;
3907 	}
3908 
3909 	/*
3910 	 * check to make sure that swapfs has enough space so that anon
3911 	 * reservations can still succeed. anon_resvmem() checks that the
3912 	 * availrmem is greater than swapfs_minfree, and the number of reserved
3913 	 * swap pages.  We also add a bit of extra here just to prevent
3914 	 * circumstances from getting really dire.
3915 	 */
3916 	n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve -
3917 	    desfree - arc_swapfs_reserve);
3918 	if (n < lowest) {
3919 		lowest = n;
3920 		r = FMR_SWAPFS_MINFREE;
3921 	}
3922 
3923 
3924 	/*
3925 	 * Check that we have enough availrmem that memory locking (e.g., via
3926 	 * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
3927 	 * stores the number of pages that cannot be locked; when availrmem
3928 	 * drops below pages_pp_maximum, page locking mechanisms such as
3929 	 * page_pp_lock() will fail.)
3930 	 */
3931 	n = PAGESIZE * (availrmem - pages_pp_maximum -
3932 	    arc_pages_pp_reserve);
3933 	if (n < lowest) {
3934 		lowest = n;
3935 		r = FMR_PAGES_PP_MAXIMUM;
3936 	}
3937 
3938 #if defined(__i386)
3939 	/*
3940 	 * If we're on an i386 platform, it's possible that we'll exhaust the
3941 	 * kernel heap space before we ever run out of available physical
3942 	 * memory.  Most checks of the size of the heap_area compare against
3943 	 * tune.t_minarmem, which is the minimum available real memory that we
3944 	 * can have in the system.  However, this is generally fixed at 25 pages
3945 	 * which is so low that it's useless.  In this comparison, we seek to
3946 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
3947 	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
3948 	 * free)
3949 	 */
3950 	n = (int64_t)vmem_size(heap_arena, VMEM_FREE) -
3951 	    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
3952 	if (n < lowest) {
3953 		lowest = n;
3954 		r = FMR_HEAP_ARENA;
3955 	}
3956 #endif
3957 
3958 	/*
3959 	 * If zio data pages are being allocated out of a separate heap segment,
3960 	 * then enforce that the size of available vmem for this arena remains
3961 	 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
3962 	 *
3963 	 * Note that reducing the arc_zio_arena_free_shift keeps more virtual
3964 	 * memory (in the zio_arena) free, which can avoid memory
3965 	 * fragmentation issues.
3966 	 */
3967 	if (zio_arena != NULL) {
3968 		n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
3969 		    (vmem_size(zio_arena, VMEM_ALLOC) >>
3970 		    arc_zio_arena_free_shift);
3971 		if (n < lowest) {
3972 			lowest = n;
3973 			r = FMR_ZIO_ARENA;
3974 		}
3975 	}
3976 #else
3977 	/* Every 100 calls, free a small amount */
3978 	if (spa_get_random(100) == 0)
3979 		lowest = -1024;
3980 #endif
3981 
3982 	last_free_memory = lowest;
3983 	last_free_reason = r;
3984 
3985 	return (lowest);
3986 }
3987 
3988 
3989 /*
3990  * Determine if the system is under memory pressure and is asking
3991  * to reclaim memory. A return value of B_TRUE indicates that the system
3992  * is under memory pressure and that the arc should adjust accordingly.
3993  */
3994 static boolean_t
3995 arc_reclaim_needed(void)
3996 {
3997 	return (arc_available_memory() < 0);
3998 }
3999 
4000 static void
4001 arc_kmem_reap_now(void)
4002 {
4003 	size_t			i;
4004 	kmem_cache_t		*prev_cache = NULL;
4005 	kmem_cache_t		*prev_data_cache = NULL;
4006 	extern kmem_cache_t	*zio_buf_cache[];
4007 	extern kmem_cache_t	*zio_data_buf_cache[];
4008 	extern kmem_cache_t	*range_seg_cache;
4009 
4010 #ifdef _KERNEL
4011 	if (arc_meta_used >= arc_meta_limit) {
4012 		/*
4013 		 * We are exceeding our meta-data cache limit.
4014 		 * Purge some DNLC entries to release holds on meta-data.
4015 		 */
4016 		dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
4017 	}
4018 #if defined(__i386)
4019 	/*
4020 	 * Reclaim unused memory from all kmem caches.
4021 	 */
4022 	kmem_reap();
4023 #endif
4024 #endif
4025 
4026 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
4027 		if (zio_buf_cache[i] != prev_cache) {
4028 			prev_cache = zio_buf_cache[i];
4029 			kmem_cache_reap_now(zio_buf_cache[i]);
4030 		}
4031 		if (zio_data_buf_cache[i] != prev_data_cache) {
4032 			prev_data_cache = zio_data_buf_cache[i];
4033 			kmem_cache_reap_now(zio_data_buf_cache[i]);
4034 		}
4035 	}
4036 	kmem_cache_reap_now(buf_cache);
4037 	kmem_cache_reap_now(hdr_full_cache);
4038 	kmem_cache_reap_now(hdr_l2only_cache);
4039 	kmem_cache_reap_now(range_seg_cache);
4040 
4041 	if (zio_arena != NULL) {
4042 		/*
4043 		 * Ask the vmem arena to reclaim unused memory from its
4044 		 * quantum caches.
4045 		 */
4046 		vmem_qcache_reap(zio_arena);
4047 	}
4048 }
4049 
4050 /*
4051  * Threads can block in arc_get_data_buf() waiting for this thread to evict
4052  * enough data and signal them to proceed. When this happens, the threads in
4053  * arc_get_data_buf() are sleeping while holding the hash lock for their
4054  * particular arc header. Thus, we must be careful to never sleep on a
4055  * hash lock in this thread. This is to prevent the following deadlock:
4056  *
4057  *  - Thread A sleeps on CV in arc_get_data_buf() holding hash lock "L",
4058  *    waiting for the reclaim thread to signal it.
4059  *
4060  *  - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
4061  *    fails, and goes to sleep forever.
4062  *
4063  * This possible deadlock is avoided by always acquiring a hash lock
4064  * using mutex_tryenter() from arc_reclaim_thread().
4065  */
4066 static void
4067 arc_reclaim_thread(void)
4068 {
4069 	hrtime_t		growtime = 0;
4070 	callb_cpr_t		cpr;
4071 
4072 	CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG);
4073 
4074 	mutex_enter(&arc_reclaim_lock);
4075 	while (!arc_reclaim_thread_exit) {
4076 		uint64_t evicted = 0;
4077 
4078 		/*
4079 		 * This is necessary in order for the mdb ::arc dcmd to
4080 		 * show up to date information. Since the ::arc command
4081 		 * does not call the kstat's update function, without
4082 		 * this call, the command may show stale stats for the
4083 		 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
4084 		 * with this change, the data might be up to 1 second
4085 		 * out of date; but that should suffice. The arc_state_t
4086 		 * structures can be queried directly if more accurate
4087 		 * information is needed.
4088 		 */
4089 		if (arc_ksp != NULL)
4090 			arc_ksp->ks_update(arc_ksp, KSTAT_READ);
4091 
4092 		mutex_exit(&arc_reclaim_lock);
4093 
4094 		/*
4095 		 * We call arc_adjust() before (possibly) calling
4096 		 * arc_kmem_reap_now(), so that we can wake up
4097 		 * arc_get_data_buf() sooner.
4098 		 */
4099 		evicted = arc_adjust();
4100 
4101 		int64_t free_memory = arc_available_memory();
4102 		if (free_memory < 0) {
4103 
4104 			arc_no_grow = B_TRUE;
4105 			arc_warm = B_TRUE;
4106 
4107 			/*
4108 			 * Wait at least zfs_grow_retry (default 60) seconds
4109 			 * before considering growing.
4110 			 */
4111 			growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
4112 
4113 			arc_kmem_reap_now();
4114 
4115 			/*
4116 			 * If we are still low on memory, shrink the ARC
4117 			 * so that we have arc_shrink_min free space.
4118 			 */
4119 			free_memory = arc_available_memory();
4120 
4121 			int64_t to_free =
4122 			    (arc_c >> arc_shrink_shift) - free_memory;
4123 			if (to_free > 0) {
4124 #ifdef _KERNEL
4125 				to_free = MAX(to_free, ptob(needfree));
4126 #endif
4127 				arc_shrink(to_free);
4128 			}
4129 		} else if (free_memory < arc_c >> arc_no_grow_shift) {
4130 			arc_no_grow = B_TRUE;
4131 		} else if (gethrtime() >= growtime) {
4132 			arc_no_grow = B_FALSE;
4133 		}
4134 
4135 		mutex_enter(&arc_reclaim_lock);
4136 
4137 		/*
4138 		 * If evicted is zero, we couldn't evict anything via
4139 		 * arc_adjust(). This could be due to hash lock
4140 		 * collisions, but more likely due to the majority of
4141 		 * arc buffers being unevictable. Therefore, even if
4142 		 * arc_size is above arc_c, another pass is unlikely to
4143 		 * be helpful and could potentially cause us to enter an
4144 		 * infinite loop.
4145 		 */
4146 		if (arc_size <= arc_c || evicted == 0) {
4147 			/*
4148 			 * We're either no longer overflowing, or we
4149 			 * can't evict anything more, so we should wake
4150 			 * up any threads before we go to sleep.
4151 			 */
4152 			cv_broadcast(&arc_reclaim_waiters_cv);
4153 
4154 			/*
4155 			 * Block until signaled, or after one second (we
4156 			 * might need to perform arc_kmem_reap_now()
4157 			 * even if we aren't being signalled)
4158 			 */
4159 			CALLB_CPR_SAFE_BEGIN(&cpr);
4160 			(void) cv_timedwait_hires(&arc_reclaim_thread_cv,
4161 			    &arc_reclaim_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
4162 			CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock);
4163 		}
4164 	}
4165 
4166 	arc_reclaim_thread_exit = B_FALSE;
4167 	cv_broadcast(&arc_reclaim_thread_cv);
4168 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_lock */
4169 	thread_exit();
4170 }
4171 
4172 /*
4173  * Adapt arc info given the number of bytes we are trying to add and
4174  * the state that we are comming from.  This function is only called
4175  * when we are adding new content to the cache.
4176  */
4177 static void
4178 arc_adapt(int bytes, arc_state_t *state)
4179 {
4180 	int mult;
4181 	uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
4182 	int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
4183 	int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
4184 
4185 	if (state == arc_l2c_only)
4186 		return;
4187 
4188 	ASSERT(bytes > 0);
4189 	/*
4190 	 * Adapt the target size of the MRU list:
4191 	 *	- if we just hit in the MRU ghost list, then increase
4192 	 *	  the target size of the MRU list.
4193 	 *	- if we just hit in the MFU ghost list, then increase
4194 	 *	  the target size of the MFU list by decreasing the
4195 	 *	  target size of the MRU list.
4196 	 */
4197 	if (state == arc_mru_ghost) {
4198 		mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
4199 		mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
4200 
4201 		arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
4202 	} else if (state == arc_mfu_ghost) {
4203 		uint64_t delta;
4204 
4205 		mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
4206 		mult = MIN(mult, 10);
4207 
4208 		delta = MIN(bytes * mult, arc_p);
4209 		arc_p = MAX(arc_p_min, arc_p - delta);
4210 	}
4211 	ASSERT((int64_t)arc_p >= 0);
4212 
4213 	if (arc_reclaim_needed()) {
4214 		cv_signal(&arc_reclaim_thread_cv);
4215 		return;
4216 	}
4217 
4218 	if (arc_no_grow)
4219 		return;
4220 
4221 	if (arc_c >= arc_c_max)
4222 		return;
4223 
4224 	/*
4225 	 * If we're within (2 * maxblocksize) bytes of the target
4226 	 * cache size, increment the target cache size
4227 	 */
4228 	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
4229 		atomic_add_64(&arc_c, (int64_t)bytes);
4230 		if (arc_c > arc_c_max)
4231 			arc_c = arc_c_max;
4232 		else if (state == arc_anon)
4233 			atomic_add_64(&arc_p, (int64_t)bytes);
4234 		if (arc_p > arc_c)
4235 			arc_p = arc_c;
4236 	}
4237 	ASSERT((int64_t)arc_p >= 0);
4238 }
4239 
4240 /*
4241  * Check if arc_size has grown past our upper threshold, determined by
4242  * zfs_arc_overflow_shift.
4243  */
4244 static boolean_t
4245 arc_is_overflowing(void)
4246 {
4247 	/* Always allow at least one block of overflow */
4248 	uint64_t overflow = MAX(SPA_MAXBLOCKSIZE,
4249 	    arc_c >> zfs_arc_overflow_shift);
4250 
4251 	return (arc_size >= arc_c + overflow);
4252 }
4253 
4254 /*
4255  * Allocate a block and return it to the caller. If we are hitting the
4256  * hard limit for the cache size, we must sleep, waiting for the eviction
4257  * thread to catch up. If we're past the target size but below the hard
4258  * limit, we'll only signal the reclaim thread and continue on.
4259  */
4260 static void *
4261 arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
4262 {
4263 	void *datap = NULL;
4264 	arc_state_t		*state = hdr->b_l1hdr.b_state;
4265 	arc_buf_contents_t	type = arc_buf_type(hdr);
4266 
4267 	arc_adapt(size, state);
4268 
4269 	/*
4270 	 * If arc_size is currently overflowing, and has grown past our
4271 	 * upper limit, we must be adding data faster than the evict
4272 	 * thread can evict. Thus, to ensure we don't compound the
4273 	 * problem by adding more data and forcing arc_size to grow even
4274 	 * further past it's target size, we halt and wait for the
4275 	 * eviction thread to catch up.
4276 	 *
4277 	 * It's also possible that the reclaim thread is unable to evict
4278 	 * enough buffers to get arc_size below the overflow limit (e.g.
4279 	 * due to buffers being un-evictable, or hash lock collisions).
4280 	 * In this case, we want to proceed regardless if we're
4281 	 * overflowing; thus we don't use a while loop here.
4282 	 */
4283 	if (arc_is_overflowing()) {
4284 		mutex_enter(&arc_reclaim_lock);
4285 
4286 		/*
4287 		 * Now that we've acquired the lock, we may no longer be
4288 		 * over the overflow limit, lets check.
4289 		 *
4290 		 * We're ignoring the case of spurious wake ups. If that
4291 		 * were to happen, it'd let this thread consume an ARC
4292 		 * buffer before it should have (i.e. before we're under
4293 		 * the overflow limit and were signalled by the reclaim
4294 		 * thread). As long as that is a rare occurrence, it
4295 		 * shouldn't cause any harm.
4296 		 */
4297 		if (arc_is_overflowing()) {
4298 			cv_signal(&arc_reclaim_thread_cv);
4299 			cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock);
4300 		}
4301 
4302 		mutex_exit(&arc_reclaim_lock);
4303 	}
4304 
4305 	VERIFY3U(hdr->b_type, ==, type);
4306 	if (type == ARC_BUFC_METADATA) {
4307 		datap = zio_buf_alloc(size);
4308 		arc_space_consume(size, ARC_SPACE_META);
4309 	} else {
4310 		ASSERT(type == ARC_BUFC_DATA);
4311 		datap = zio_data_buf_alloc(size);
4312 		arc_space_consume(size, ARC_SPACE_DATA);
4313 	}
4314 
4315 	/*
4316 	 * Update the state size.  Note that ghost states have a
4317 	 * "ghost size" and so don't need to be updated.
4318 	 */
4319 	if (!GHOST_STATE(state)) {
4320 
4321 		(void) refcount_add_many(&state->arcs_size, size, tag);
4322 
4323 		/*
4324 		 * If this is reached via arc_read, the link is
4325 		 * protected by the hash lock. If reached via
4326 		 * arc_buf_alloc, the header should not be accessed by
4327 		 * any other thread. And, if reached via arc_read_done,
4328 		 * the hash lock will protect it if it's found in the
4329 		 * hash table; otherwise no other thread should be
4330 		 * trying to [add|remove]_reference it.
4331 		 */
4332 		if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
4333 			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
4334 			(void) refcount_add_many(&state->arcs_esize[type],
4335 			    size, tag);
4336 		}
4337 
4338 		/*
4339 		 * If we are growing the cache, and we are adding anonymous
4340 		 * data, and we have outgrown arc_p, update arc_p
4341 		 */
4342 		if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
4343 		    (refcount_count(&arc_anon->arcs_size) +
4344 		    refcount_count(&arc_mru->arcs_size) > arc_p))
4345 			arc_p = MIN(arc_c, arc_p + size);
4346 	}
4347 	return (datap);
4348 }
4349 
4350 /*
4351  * Free the arc data buffer.
4352  */
4353 static void
4354 arc_free_data_buf(arc_buf_hdr_t *hdr, void *data, uint64_t size, void *tag)
4355 {
4356 	arc_state_t *state = hdr->b_l1hdr.b_state;
4357 	arc_buf_contents_t type = arc_buf_type(hdr);
4358 
4359 	/* protected by hash lock, if in the hash table */
4360 	if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
4361 		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
4362 		ASSERT(state != arc_anon && state != arc_l2c_only);
4363 
4364 		(void) refcount_remove_many(&state->arcs_esize[type],
4365 		    size, tag);
4366 	}
4367 	(void) refcount_remove_many(&state->arcs_size, size, tag);
4368 
4369 	VERIFY3U(hdr->b_type, ==, type);
4370 	if (type == ARC_BUFC_METADATA) {
4371 		zio_buf_free(data, size);
4372 		arc_space_return(size, ARC_SPACE_META);
4373 	} else {
4374 		ASSERT(type == ARC_BUFC_DATA);
4375 		zio_data_buf_free(data, size);
4376 		arc_space_return(size, ARC_SPACE_DATA);
4377 	}
4378 }
4379 
4380 /*
4381  * This routine is called whenever a buffer is accessed.
4382  * NOTE: the hash lock is dropped in this function.
4383  */
4384 static void
4385 arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
4386 {
4387 	clock_t now;
4388 
4389 	ASSERT(MUTEX_HELD(hash_lock));
4390 	ASSERT(HDR_HAS_L1HDR(hdr));
4391 
4392 	if (hdr->b_l1hdr.b_state == arc_anon) {
4393 		/*
4394 		 * This buffer is not in the cache, and does not
4395 		 * appear in our "ghost" list.  Add the new buffer
4396 		 * to the MRU state.
4397 		 */
4398 
4399 		ASSERT0(hdr->b_l1hdr.b_arc_access);
4400 		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
4401 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
4402 		arc_change_state(arc_mru, hdr, hash_lock);
4403 
4404 	} else if (hdr->b_l1hdr.b_state == arc_mru) {
4405 		now = ddi_get_lbolt();
4406 
4407 		/*
4408 		 * If this buffer is here because of a prefetch, then either:
4409 		 * - clear the flag if this is a "referencing" read
4410 		 *   (any subsequent access will bump this into the MFU state).
4411 		 * or
4412 		 * - move the buffer to the head of the list if this is
4413 		 *   another prefetch (to make it less likely to be evicted).
4414 		 */
4415 		if (HDR_PREFETCH(hdr)) {
4416 			if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
4417 				/* link protected by hash lock */
4418 				ASSERT(multilist_link_active(
4419 				    &hdr->b_l1hdr.b_arc_node));
4420 			} else {
4421 				arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
4422 				ARCSTAT_BUMP(arcstat_mru_hits);
4423 			}
4424 			hdr->b_l1hdr.b_arc_access = now;
4425 			return;
4426 		}
4427 
4428 		/*
4429 		 * This buffer has been "accessed" only once so far,
4430 		 * but it is still in the cache. Move it to the MFU
4431 		 * state.
4432 		 */
4433 		if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) {
4434 			/*
4435 			 * More than 125ms have passed since we
4436 			 * instantiated this buffer.  Move it to the
4437 			 * most frequently used state.
4438 			 */
4439 			hdr->b_l1hdr.b_arc_access = now;
4440 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
4441 			arc_change_state(arc_mfu, hdr, hash_lock);
4442 		}
4443 		ARCSTAT_BUMP(arcstat_mru_hits);
4444 	} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
4445 		arc_state_t	*new_state;
4446 		/*
4447 		 * This buffer has been "accessed" recently, but
4448 		 * was evicted from the cache.  Move it to the
4449 		 * MFU state.
4450 		 */
4451 
4452 		if (HDR_PREFETCH(hdr)) {
4453 			new_state = arc_mru;
4454 			if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
4455 				arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
4456 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
4457 		} else {
4458 			new_state = arc_mfu;
4459 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
4460 		}
4461 
4462 		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
4463 		arc_change_state(new_state, hdr, hash_lock);
4464 
4465 		ARCSTAT_BUMP(arcstat_mru_ghost_hits);
4466 	} else if (hdr->b_l1hdr.b_state == arc_mfu) {
4467 		/*
4468 		 * This buffer has been accessed more than once and is
4469 		 * still in the cache.  Keep it in the MFU state.
4470 		 *
4471 		 * NOTE: an add_reference() that occurred when we did
4472 		 * the arc_read() will have kicked this off the list.
4473 		 * If it was a prefetch, we will explicitly move it to
4474 		 * the head of the list now.
4475 		 */
4476 		if ((HDR_PREFETCH(hdr)) != 0) {
4477 			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
4478 			/* link protected by hash_lock */
4479 			ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
4480 		}
4481 		ARCSTAT_BUMP(arcstat_mfu_hits);
4482 		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
4483 	} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
4484 		arc_state_t	*new_state = arc_mfu;
4485 		/*
4486 		 * This buffer has been accessed more than once but has
4487 		 * been evicted from the cache.  Move it back to the
4488 		 * MFU state.
4489 		 */
4490 
4491 		if (HDR_PREFETCH(hdr)) {
4492 			/*
4493 			 * This is a prefetch access...
4494 			 * move this block back to the MRU state.
4495 			 */
4496 			ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
4497 			new_state = arc_mru;
4498 		}
4499 
4500 		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
4501 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
4502 		arc_change_state(new_state, hdr, hash_lock);
4503 
4504 		ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
4505 	} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
4506 		/*
4507 		 * This buffer is on the 2nd Level ARC.
4508 		 */
4509 
4510 		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
4511 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
4512 		arc_change_state(arc_mfu, hdr, hash_lock);
4513 	} else {
4514 		ASSERT(!"invalid arc state");
4515 	}
4516 }
4517 
4518 /* a generic arc_done_func_t which you can use */
4519 /* ARGSUSED */
4520 void
4521 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
4522 {
4523 	if (zio == NULL || zio->io_error == 0)
4524 		bcopy(buf->b_data, arg, arc_buf_size(buf));
4525 	arc_buf_destroy(buf, arg);
4526 }
4527 
4528 /* a generic arc_done_func_t */
4529 void
4530 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
4531 {
4532 	arc_buf_t **bufp = arg;
4533 	if (zio && zio->io_error) {
4534 		arc_buf_destroy(buf, arg);
4535 		*bufp = NULL;
4536 	} else {
4537 		*bufp = buf;
4538 		ASSERT(buf->b_data);
4539 	}
4540 }
4541 
4542 static void
4543 arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
4544 {
4545 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
4546 		ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
4547 		ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
4548 	} else {
4549 		if (HDR_COMPRESSION_ENABLED(hdr)) {
4550 			ASSERT3U(HDR_GET_COMPRESS(hdr), ==,
4551 			    BP_GET_COMPRESS(bp));
4552 		}
4553 		ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
4554 		ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
4555 	}
4556 }
4557 
4558 static void
4559 arc_read_done(zio_t *zio)
4560 {
4561 	arc_buf_hdr_t	*hdr = zio->io_private;
4562 	kmutex_t	*hash_lock = NULL;
4563 	arc_callback_t	*callback_list;
4564 	arc_callback_t	*acb;
4565 	boolean_t	freeable = B_FALSE;
4566 	boolean_t	no_zio_error = (zio->io_error == 0);
4567 
4568 	/*
4569 	 * The hdr was inserted into hash-table and removed from lists
4570 	 * prior to starting I/O.  We should find this header, since
4571 	 * it's in the hash table, and it should be legit since it's
4572 	 * not possible to evict it during the I/O.  The only possible
4573 	 * reason for it not to be found is if we were freed during the
4574 	 * read.
4575 	 */
4576 	if (HDR_IN_HASH_TABLE(hdr)) {
4577 		ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
4578 		ASSERT3U(hdr->b_dva.dva_word[0], ==,
4579 		    BP_IDENTITY(zio->io_bp)->dva_word[0]);
4580 		ASSERT3U(hdr->b_dva.dva_word[1], ==,
4581 		    BP_IDENTITY(zio->io_bp)->dva_word[1]);
4582 
4583 		arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
4584 		    &hash_lock);
4585 
4586 		ASSERT((found == hdr &&
4587 		    DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
4588 		    (found == hdr && HDR_L2_READING(hdr)));
4589 		ASSERT3P(hash_lock, !=, NULL);
4590 	}
4591 
4592 	if (no_zio_error) {
4593 		/* byteswap if necessary */
4594 		if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
4595 			if (BP_GET_LEVEL(zio->io_bp) > 0) {
4596 				hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
4597 			} else {
4598 				hdr->b_l1hdr.b_byteswap =
4599 				    DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
4600 			}
4601 		} else {
4602 			hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
4603 		}
4604 	}
4605 
4606 	arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
4607 	if (l2arc_noprefetch && HDR_PREFETCH(hdr))
4608 		arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
4609 
4610 	callback_list = hdr->b_l1hdr.b_acb;
4611 	ASSERT3P(callback_list, !=, NULL);
4612 
4613 	if (hash_lock && no_zio_error && hdr->b_l1hdr.b_state == arc_anon) {
4614 		/*
4615 		 * Only call arc_access on anonymous buffers.  This is because
4616 		 * if we've issued an I/O for an evicted buffer, we've already
4617 		 * called arc_access (to prevent any simultaneous readers from
4618 		 * getting confused).
4619 		 */
4620 		arc_access(hdr, hash_lock);
4621 	}
4622 
4623 	/*
4624 	 * If a read request has a callback (i.e. acb_done is not NULL), then we
4625 	 * make a buf containing the data according to the parameters which were
4626 	 * passed in. The implementation of arc_buf_alloc_impl() ensures that we
4627 	 * aren't needlessly decompressing the data multiple times.
4628 	 */
4629 	int callback_cnt = 0;
4630 	for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
4631 		if (!acb->acb_done)
4632 			continue;
4633 
4634 		/* This is a demand read since prefetches don't use callbacks */
4635 		callback_cnt++;
4636 
4637 		int error = arc_buf_alloc_impl(hdr, acb->acb_private,
4638 		    acb->acb_compressed, no_zio_error, &acb->acb_buf);
4639 		if (no_zio_error) {
4640 			zio->io_error = error;
4641 		}
4642 	}
4643 	hdr->b_l1hdr.b_acb = NULL;
4644 	arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
4645 	if (callback_cnt == 0) {
4646 		ASSERT(HDR_PREFETCH(hdr));
4647 		ASSERT0(hdr->b_l1hdr.b_bufcnt);
4648 		ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
4649 	}
4650 
4651 	ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
4652 	    callback_list != NULL);
4653 
4654 	if (no_zio_error) {
4655 		arc_hdr_verify(hdr, zio->io_bp);
4656 	} else {
4657 		arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
4658 		if (hdr->b_l1hdr.b_state != arc_anon)
4659 			arc_change_state(arc_anon, hdr, hash_lock);
4660 		if (HDR_IN_HASH_TABLE(hdr))
4661 			buf_hash_remove(hdr);
4662 		freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
4663 	}
4664 
4665 	/*
4666 	 * Broadcast before we drop the hash_lock to avoid the possibility
4667 	 * that the hdr (and hence the cv) might be freed before we get to
4668 	 * the cv_broadcast().
4669 	 */
4670 	cv_broadcast(&hdr->b_l1hdr.b_cv);
4671 
4672 	if (hash_lock != NULL) {
4673 		mutex_exit(hash_lock);
4674 	} else {
4675 		/*
4676 		 * This block was freed while we waited for the read to
4677 		 * complete.  It has been removed from the hash table and
4678 		 * moved to the anonymous state (so that it won't show up
4679 		 * in the cache).
4680 		 */
4681 		ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
4682 		freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
4683 	}
4684 
4685 	/* execute each callback and free its structure */
4686 	while ((acb = callback_list) != NULL) {
4687 		if (acb->acb_done)
4688 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
4689 
4690 		if (acb->acb_zio_dummy != NULL) {
4691 			acb->acb_zio_dummy->io_error = zio->io_error;
4692 			zio_nowait(acb->acb_zio_dummy);
4693 		}
4694 
4695 		callback_list = acb->acb_next;
4696 		kmem_free(acb, sizeof (arc_callback_t));
4697 	}
4698 
4699 	if (freeable)
4700 		arc_hdr_destroy(hdr);
4701 }
4702 
4703 /*
4704  * "Read" the block at the specified DVA (in bp) via the
4705  * cache.  If the block is found in the cache, invoke the provided
4706  * callback immediately and return.  Note that the `zio' parameter
4707  * in the callback will be NULL in this case, since no IO was
4708  * required.  If the block is not in the cache pass the read request
4709  * on to the spa with a substitute callback function, so that the
4710  * requested block will be added to the cache.
4711  *
4712  * If a read request arrives for a block that has a read in-progress,
4713  * either wait for the in-progress read to complete (and return the
4714  * results); or, if this is a read with a "done" func, add a record
4715  * to the read to invoke the "done" func when the read completes,
4716  * and return; or just return.
4717  *
4718  * arc_read_done() will invoke all the requested "done" functions
4719  * for readers of this block.
4720  */
4721 int
4722 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
4723     void *private, zio_priority_t priority, int zio_flags,
4724     arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
4725 {
4726 	arc_buf_hdr_t *hdr = NULL;
4727 	kmutex_t *hash_lock = NULL;
4728 	zio_t *rzio;
4729 	uint64_t guid = spa_load_guid(spa);
4730 	boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW) != 0;
4731 
4732 	ASSERT(!BP_IS_EMBEDDED(bp) ||
4733 	    BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
4734 
4735 top:
4736 	if (!BP_IS_EMBEDDED(bp)) {
4737 		/*
4738 		 * Embedded BP's have no DVA and require no I/O to "read".
4739 		 * Create an anonymous arc buf to back it.
4740 		 */
4741 		hdr = buf_hash_find(guid, bp, &hash_lock);
4742 	}
4743 
4744 	if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_pdata != NULL) {
4745 		arc_buf_t *buf = NULL;
4746 		*arc_flags |= ARC_FLAG_CACHED;
4747 
4748 		if (HDR_IO_IN_PROGRESS(hdr)) {
4749 
4750 			if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
4751 			    priority == ZIO_PRIORITY_SYNC_READ) {
4752 				/*
4753 				 * This sync read must wait for an
4754 				 * in-progress async read (e.g. a predictive
4755 				 * prefetch).  Async reads are queued
4756 				 * separately at the vdev_queue layer, so
4757 				 * this is a form of priority inversion.
4758 				 * Ideally, we would "inherit" the demand
4759 				 * i/o's priority by moving the i/o from
4760 				 * the async queue to the synchronous queue,
4761 				 * but there is currently no mechanism to do
4762 				 * so.  Track this so that we can evaluate
4763 				 * the magnitude of this potential performance
4764 				 * problem.
4765 				 *
4766 				 * Note that if the prefetch i/o is already
4767 				 * active (has been issued to the device),
4768 				 * the prefetch improved performance, because
4769 				 * we issued it sooner than we would have
4770 				 * without the prefetch.
4771 				 */
4772 				DTRACE_PROBE1(arc__sync__wait__for__async,
4773 				    arc_buf_hdr_t *, hdr);
4774 				ARCSTAT_BUMP(arcstat_sync_wait_for_async);
4775 			}
4776 			if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
4777 				arc_hdr_clear_flags(hdr,
4778 				    ARC_FLAG_PREDICTIVE_PREFETCH);
4779 			}
4780 
4781 			if (*arc_flags & ARC_FLAG_WAIT) {
4782 				cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
4783 				mutex_exit(hash_lock);
4784 				goto top;
4785 			}
4786 			ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
4787 
4788 			if (done) {
4789 				arc_callback_t *acb = NULL;
4790 
4791 				acb = kmem_zalloc(sizeof (arc_callback_t),
4792 				    KM_SLEEP);
4793 				acb->acb_done = done;
4794 				acb->acb_private = private;
4795 				acb->acb_compressed = compressed_read;
4796 				if (pio != NULL)
4797 					acb->acb_zio_dummy = zio_null(pio,
4798 					    spa, NULL, NULL, NULL, zio_flags);
4799 
4800 				ASSERT3P(acb->acb_done, !=, NULL);
4801 				acb->acb_next = hdr->b_l1hdr.b_acb;
4802 				hdr->b_l1hdr.b_acb = acb;
4803 				mutex_exit(hash_lock);
4804 				return (0);
4805 			}
4806 			mutex_exit(hash_lock);
4807 			return (0);
4808 		}
4809 
4810 		ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
4811 		    hdr->b_l1hdr.b_state == arc_mfu);
4812 
4813 		if (done) {
4814 			if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
4815 				/*
4816 				 * This is a demand read which does not have to
4817 				 * wait for i/o because we did a predictive
4818 				 * prefetch i/o for it, which has completed.
4819 				 */
4820 				DTRACE_PROBE1(
4821 				    arc__demand__hit__predictive__prefetch,
4822 				    arc_buf_hdr_t *, hdr);
4823 				ARCSTAT_BUMP(
4824 				    arcstat_demand_hit_predictive_prefetch);
4825 				arc_hdr_clear_flags(hdr,
4826 				    ARC_FLAG_PREDICTIVE_PREFETCH);
4827 			}
4828 			ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp));
4829 
4830 			/* Get a buf with the desired data in it. */
4831 			VERIFY0(arc_buf_alloc_impl(hdr, private,
4832 			    compressed_read, B_TRUE, &buf));
4833 		} else if (*arc_flags & ARC_FLAG_PREFETCH &&
4834 		    refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
4835 			arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
4836 		}
4837 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
4838 		arc_access(hdr, hash_lock);
4839 		if (*arc_flags & ARC_FLAG_L2CACHE)
4840 			arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
4841 		mutex_exit(hash_lock);
4842 		ARCSTAT_BUMP(arcstat_hits);
4843 		ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
4844 		    demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
4845 		    data, metadata, hits);
4846 
4847 		if (done)
4848 			done(NULL, buf, private);
4849 	} else {
4850 		uint64_t lsize = BP_GET_LSIZE(bp);
4851 		uint64_t psize = BP_GET_PSIZE(bp);
4852 		arc_callback_t *acb;
4853 		vdev_t *vd = NULL;
4854 		uint64_t addr = 0;
4855 		boolean_t devw = B_FALSE;
4856 		uint64_t size;
4857 
4858 		if (hdr == NULL) {
4859 			/* this block is not in the cache */
4860 			arc_buf_hdr_t *exists = NULL;
4861 			arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
4862 			hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
4863 			    BP_GET_COMPRESS(bp), type);
4864 
4865 			if (!BP_IS_EMBEDDED(bp)) {
4866 				hdr->b_dva = *BP_IDENTITY(bp);
4867 				hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
4868 				exists = buf_hash_insert(hdr, &hash_lock);
4869 			}
4870 			if (exists != NULL) {
4871 				/* somebody beat us to the hash insert */
4872 				mutex_exit(hash_lock);
4873 				buf_discard_identity(hdr);
4874 				arc_hdr_destroy(hdr);
4875 				goto top; /* restart the IO request */
4876 			}
4877 		} else {
4878 			/*
4879 			 * This block is in the ghost cache. If it was L2-only
4880 			 * (and thus didn't have an L1 hdr), we realloc the
4881 			 * header to add an L1 hdr.
4882 			 */
4883 			if (!HDR_HAS_L1HDR(hdr)) {
4884 				hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
4885 				    hdr_full_cache);
4886 			}
4887 			ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
4888 			ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
4889 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4890 			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
4891 			ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
4892 			ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
4893 
4894 			/*
4895 			 * This is a delicate dance that we play here.
4896 			 * This hdr is in the ghost list so we access it
4897 			 * to move it out of the ghost list before we
4898 			 * initiate the read. If it's a prefetch then
4899 			 * it won't have a callback so we'll remove the
4900 			 * reference that arc_buf_alloc_impl() created. We
4901 			 * do this after we've called arc_access() to
4902 			 * avoid hitting an assert in remove_reference().
4903 			 */
4904 			arc_access(hdr, hash_lock);
4905 			arc_hdr_alloc_pdata(hdr);
4906 		}
4907 		ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
4908 		size = arc_hdr_size(hdr);
4909 
4910 		/*
4911 		 * If compression is enabled on the hdr, then will do
4912 		 * RAW I/O and will store the compressed data in the hdr's
4913 		 * data block. Otherwise, the hdr's data block will contain
4914 		 * the uncompressed data.
4915 		 */
4916 		if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) {
4917 			zio_flags |= ZIO_FLAG_RAW;
4918 		}
4919 
4920 		if (*arc_flags & ARC_FLAG_PREFETCH)
4921 			arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
4922 		if (*arc_flags & ARC_FLAG_L2CACHE)
4923 			arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
4924 		if (BP_GET_LEVEL(bp) > 0)
4925 			arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
4926 		if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
4927 			arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
4928 		ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
4929 
4930 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
4931 		acb->acb_done = done;
4932 		acb->acb_private = private;
4933 		acb->acb_compressed = compressed_read;
4934 
4935 		ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
4936 		hdr->b_l1hdr.b_acb = acb;
4937 		arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
4938 
4939 		if (HDR_HAS_L2HDR(hdr) &&
4940 		    (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
4941 			devw = hdr->b_l2hdr.b_dev->l2ad_writing;
4942 			addr = hdr->b_l2hdr.b_daddr;
4943 			/*
4944 			 * Lock out device removal.
4945 			 */
4946 			if (vdev_is_dead(vd) ||
4947 			    !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
4948 				vd = NULL;
4949 		}
4950 
4951 		if (priority == ZIO_PRIORITY_ASYNC_READ)
4952 			arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
4953 		else
4954 			arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
4955 
4956 		if (hash_lock != NULL)
4957 			mutex_exit(hash_lock);
4958 
4959 		/*
4960 		 * At this point, we have a level 1 cache miss.  Try again in
4961 		 * L2ARC if possible.
4962 		 */
4963 		ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
4964 
4965 		DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
4966 		    uint64_t, lsize, zbookmark_phys_t *, zb);
4967 		ARCSTAT_BUMP(arcstat_misses);
4968 		ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
4969 		    demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
4970 		    data, metadata, misses);
4971 
4972 		if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
4973 			/*
4974 			 * Read from the L2ARC if the following are true:
4975 			 * 1. The L2ARC vdev was previously cached.
4976 			 * 2. This buffer still has L2ARC metadata.
4977 			 * 3. This buffer isn't currently writing to the L2ARC.
4978 			 * 4. The L2ARC entry wasn't evicted, which may
4979 			 *    also have invalidated the vdev.
4980 			 * 5. This isn't prefetch and l2arc_noprefetch is set.
4981 			 */
4982 			if (HDR_HAS_L2HDR(hdr) &&
4983 			    !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
4984 			    !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
4985 				l2arc_read_callback_t *cb;
4986 
4987 				DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
4988 				ARCSTAT_BUMP(arcstat_l2_hits);
4989 
4990 				cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
4991 				    KM_SLEEP);
4992 				cb->l2rcb_hdr = hdr;
4993 				cb->l2rcb_bp = *bp;
4994 				cb->l2rcb_zb = *zb;
4995 				cb->l2rcb_flags = zio_flags;
4996 
4997 				ASSERT(addr >= VDEV_LABEL_START_SIZE &&
4998 				    addr + lsize < vd->vdev_psize -
4999 				    VDEV_LABEL_END_SIZE);
5000 
5001 				/*
5002 				 * l2arc read.  The SCL_L2ARC lock will be
5003 				 * released by l2arc_read_done().
5004 				 * Issue a null zio if the underlying buffer
5005 				 * was squashed to zero size by compression.
5006 				 */
5007 				ASSERT3U(HDR_GET_COMPRESS(hdr), !=,
5008 				    ZIO_COMPRESS_EMPTY);
5009 				rzio = zio_read_phys(pio, vd, addr,
5010 				    size, hdr->b_l1hdr.b_pdata,
5011 				    ZIO_CHECKSUM_OFF,
5012 				    l2arc_read_done, cb, priority,
5013 				    zio_flags | ZIO_FLAG_DONT_CACHE |
5014 				    ZIO_FLAG_CANFAIL |
5015 				    ZIO_FLAG_DONT_PROPAGATE |
5016 				    ZIO_FLAG_DONT_RETRY, B_FALSE);
5017 				DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
5018 				    zio_t *, rzio);
5019 				ARCSTAT_INCR(arcstat_l2_read_bytes, size);
5020 
5021 				if (*arc_flags & ARC_FLAG_NOWAIT) {
5022 					zio_nowait(rzio);
5023 					return (0);
5024 				}
5025 
5026 				ASSERT(*arc_flags & ARC_FLAG_WAIT);
5027 				if (zio_wait(rzio) == 0)
5028 					return (0);
5029 
5030 				/* l2arc read error; goto zio_read() */
5031 			} else {
5032 				DTRACE_PROBE1(l2arc__miss,
5033 				    arc_buf_hdr_t *, hdr);
5034 				ARCSTAT_BUMP(arcstat_l2_misses);
5035 				if (HDR_L2_WRITING(hdr))
5036 					ARCSTAT_BUMP(arcstat_l2_rw_clash);
5037 				spa_config_exit(spa, SCL_L2ARC, vd);
5038 			}
5039 		} else {
5040 			if (vd != NULL)
5041 				spa_config_exit(spa, SCL_L2ARC, vd);
5042 			if (l2arc_ndev != 0) {
5043 				DTRACE_PROBE1(l2arc__miss,
5044 				    arc_buf_hdr_t *, hdr);
5045 				ARCSTAT_BUMP(arcstat_l2_misses);
5046 			}
5047 		}
5048 
5049 		rzio = zio_read(pio, spa, bp, hdr->b_l1hdr.b_pdata, size,
5050 		    arc_read_done, hdr, priority, zio_flags, zb);
5051 
5052 		if (*arc_flags & ARC_FLAG_WAIT)
5053 			return (zio_wait(rzio));
5054 
5055 		ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
5056 		zio_nowait(rzio);
5057 	}
5058 	return (0);
5059 }
5060 
5061 /*
5062  * Notify the arc that a block was freed, and thus will never be used again.
5063  */
5064 void
5065 arc_freed(spa_t *spa, const blkptr_t *bp)
5066 {
5067 	arc_buf_hdr_t *hdr;
5068 	kmutex_t *hash_lock;
5069 	uint64_t guid = spa_load_guid(spa);
5070 
5071 	ASSERT(!BP_IS_EMBEDDED(bp));
5072 
5073 	hdr = buf_hash_find(guid, bp, &hash_lock);
5074 	if (hdr == NULL)
5075 		return;
5076 
5077 	/*
5078 	 * We might be trying to free a block that is still doing I/O
5079 	 * (i.e. prefetch) or has a reference (i.e. a dedup-ed,
5080 	 * dmu_sync-ed block). If this block is being prefetched, then it
5081 	 * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
5082 	 * until the I/O completes. A block may also have a reference if it is
5083 	 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would
5084 	 * have written the new block to its final resting place on disk but
5085 	 * without the dedup flag set. This would have left the hdr in the MRU
5086 	 * state and discoverable. When the txg finally syncs it detects that
5087 	 * the block was overridden in open context and issues an override I/O.
5088 	 * Since this is a dedup block, the override I/O will determine if the
5089 	 * block is already in the DDT. If so, then it will replace the io_bp
5090 	 * with the bp from the DDT and allow the I/O to finish. When the I/O
5091 	 * reaches the done callback, dbuf_write_override_done, it will
5092 	 * check to see if the io_bp and io_bp_override are identical.
5093 	 * If they are not, then it indicates that the bp was replaced with
5094 	 * the bp in the DDT and the override bp is freed. This allows
5095 	 * us to arrive here with a reference on a block that is being
5096 	 * freed. So if we have an I/O in progress, or a reference to
5097 	 * this hdr, then we don't destroy the hdr.
5098 	 */
5099 	if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
5100 	    refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
5101 		arc_change_state(arc_anon, hdr, hash_lock);
5102 		arc_hdr_destroy(hdr);
5103 		mutex_exit(hash_lock);
5104 	} else {
5105 		mutex_exit(hash_lock);
5106 	}
5107 
5108 }
5109 
5110 /*
5111  * Release this buffer from the cache, making it an anonymous buffer.  This
5112  * must be done after a read and prior to modifying the buffer contents.
5113  * If the buffer has more than one reference, we must make
5114  * a new hdr for the buffer.
5115  */
5116 void
5117 arc_release(arc_buf_t *buf, void *tag)
5118 {
5119 	arc_buf_hdr_t *hdr = buf->b_hdr;
5120 
5121 	/*
5122 	 * It would be nice to assert that if it's DMU metadata (level >
5123 	 * 0 || it's the dnode file), then it must be syncing context.
5124 	 * But we don't know that information at this level.
5125 	 */
5126 
5127 	mutex_enter(&buf->b_evict_lock);
5128 
5129 	ASSERT(HDR_HAS_L1HDR(hdr));
5130 
5131 	/*
5132 	 * We don't grab the hash lock prior to this check, because if
5133 	 * the buffer's header is in the arc_anon state, it won't be
5134 	 * linked into the hash table.
5135 	 */
5136 	if (hdr->b_l1hdr.b_state == arc_anon) {
5137 		mutex_exit(&buf->b_evict_lock);
5138 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
5139 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
5140 		ASSERT(!HDR_HAS_L2HDR(hdr));
5141 		ASSERT(HDR_EMPTY(hdr));
5142 
5143 		ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
5144 		ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
5145 		ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
5146 
5147 		hdr->b_l1hdr.b_arc_access = 0;
5148 
5149 		/*
5150 		 * If the buf is being overridden then it may already
5151 		 * have a hdr that is not empty.
5152 		 */
5153 		buf_discard_identity(hdr);
5154 		arc_buf_thaw(buf);
5155 
5156 		return;
5157 	}
5158 
5159 	kmutex_t *hash_lock = HDR_LOCK(hdr);
5160 	mutex_enter(hash_lock);
5161 
5162 	/*
5163 	 * This assignment is only valid as long as the hash_lock is
5164 	 * held, we must be careful not to reference state or the
5165 	 * b_state field after dropping the lock.
5166 	 */
5167 	arc_state_t *state = hdr->b_l1hdr.b_state;
5168 	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
5169 	ASSERT3P(state, !=, arc_anon);
5170 
5171 	/* this buffer is not on any list */
5172 	ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
5173 
5174 	if (HDR_HAS_L2HDR(hdr)) {
5175 		mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
5176 
5177 		/*
5178 		 * We have to recheck this conditional again now that
5179 		 * we're holding the l2ad_mtx to prevent a race with
5180 		 * another thread which might be concurrently calling
5181 		 * l2arc_evict(). In that case, l2arc_evict() might have
5182 		 * destroyed the header's L2 portion as we were waiting
5183 		 * to acquire the l2ad_mtx.
5184 		 */
5185 		if (HDR_HAS_L2HDR(hdr))
5186 			arc_hdr_l2hdr_destroy(hdr);
5187 
5188 		mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
5189 	}
5190 
5191 	/*
5192 	 * Do we have more than one buf?
5193 	 */
5194 	if (hdr->b_l1hdr.b_bufcnt > 1) {
5195 		arc_buf_hdr_t *nhdr;
5196 		uint64_t spa = hdr->b_spa;
5197 		uint64_t psize = HDR_GET_PSIZE(hdr);
5198 		uint64_t lsize = HDR_GET_LSIZE(hdr);
5199 		enum zio_compress compress = HDR_GET_COMPRESS(hdr);
5200 		arc_buf_contents_t type = arc_buf_type(hdr);
5201 		VERIFY3U(hdr->b_type, ==, type);
5202 
5203 		ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
5204 		(void) remove_reference(hdr, hash_lock, tag);
5205 
5206 		if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
5207 			ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
5208 			ASSERT(ARC_BUF_LAST(buf));
5209 		}
5210 
5211 		/*
5212 		 * Pull the data off of this hdr and attach it to
5213 		 * a new anonymous hdr. Also find the last buffer
5214 		 * in the hdr's buffer list.
5215 		 */
5216 		arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
5217 		ASSERT3P(lastbuf, !=, NULL);
5218 
5219 		/*
5220 		 * If the current arc_buf_t and the hdr are sharing their data
5221 		 * buffer, then we must stop sharing that block.
5222 		 */
5223 		if (arc_buf_is_shared(buf)) {
5224 			VERIFY(!arc_buf_is_shared(lastbuf));
5225 
5226 			/*
5227 			 * First, sever the block sharing relationship between
5228 			 * buf and the arc_buf_hdr_t.
5229 			 */
5230 			arc_unshare_buf(hdr, buf);
5231 
5232 			/*
5233 			 * Now we need to recreate the hdr's b_pdata. Since we
5234 			 * have lastbuf handy, we try to share with it, but if
5235 			 * we can't then we allocate a new b_pdata and copy the
5236 			 * data from buf into it.
5237 			 */
5238 			if (arc_can_share(hdr, lastbuf)) {
5239 				arc_share_buf(hdr, lastbuf);
5240 			} else {
5241 				arc_hdr_alloc_pdata(hdr);
5242 				bcopy(buf->b_data, hdr->b_l1hdr.b_pdata, psize);
5243 			}
5244 			VERIFY3P(lastbuf->b_data, !=, NULL);
5245 		} else if (HDR_SHARED_DATA(hdr)) {
5246 			/*
5247 			 * Uncompressed shared buffers are always at the end
5248 			 * of the list. Compressed buffers don't have the
5249 			 * same requirements. This makes it hard to
5250 			 * simply assert that the lastbuf is shared so
5251 			 * we rely on the hdr's compression flags to determine
5252 			 * if we have a compressed, shared buffer.
5253 			 */
5254 			ASSERT(arc_buf_is_shared(lastbuf) ||
5255 			    HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
5256 			ASSERT(!ARC_BUF_SHARED(buf));
5257 		}
5258 		ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
5259 		ASSERT3P(state, !=, arc_l2c_only);
5260 
5261 		(void) refcount_remove_many(&state->arcs_size,
5262 		    arc_buf_size(buf), buf);
5263 
5264 		if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
5265 			ASSERT3P(state, !=, arc_l2c_only);
5266 			(void) refcount_remove_many(&state->arcs_esize[type],
5267 			    arc_buf_size(buf), buf);
5268 		}
5269 
5270 		hdr->b_l1hdr.b_bufcnt -= 1;
5271 		arc_cksum_verify(buf);
5272 		arc_buf_unwatch(buf);
5273 
5274 		mutex_exit(hash_lock);
5275 
5276 		/*
5277 		 * Allocate a new hdr. The new hdr will contain a b_pdata
5278 		 * buffer which will be freed in arc_write().
5279 		 */
5280 		nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type);
5281 		ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
5282 		ASSERT0(nhdr->b_l1hdr.b_bufcnt);
5283 		ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
5284 		VERIFY3U(nhdr->b_type, ==, type);
5285 		ASSERT(!HDR_SHARED_DATA(nhdr));
5286 
5287 		nhdr->b_l1hdr.b_buf = buf;
5288 		nhdr->b_l1hdr.b_bufcnt = 1;
5289 		(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
5290 		buf->b_hdr = nhdr;
5291 
5292 		mutex_exit(&buf->b_evict_lock);
5293 		(void) refcount_add_many(&arc_anon->arcs_size,
5294 		    arc_buf_size(buf), buf);
5295 	} else {
5296 		mutex_exit(&buf->b_evict_lock);
5297 		ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
5298 		/* protected by hash lock, or hdr is on arc_anon */
5299 		ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
5300 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
5301 		arc_change_state(arc_anon, hdr, hash_lock);
5302 		hdr->b_l1hdr.b_arc_access = 0;
5303 		mutex_exit(hash_lock);
5304 
5305 		buf_discard_identity(hdr);
5306 		arc_buf_thaw(buf);
5307 	}
5308 }
5309 
5310 int
5311 arc_released(arc_buf_t *buf)
5312 {
5313 	int released;
5314 
5315 	mutex_enter(&buf->b_evict_lock);
5316 	released = (buf->b_data != NULL &&
5317 	    buf->b_hdr->b_l1hdr.b_state == arc_anon);
5318 	mutex_exit(&buf->b_evict_lock);
5319 	return (released);
5320 }
5321 
5322 #ifdef ZFS_DEBUG
5323 int
5324 arc_referenced(arc_buf_t *buf)
5325 {
5326 	int referenced;
5327 
5328 	mutex_enter(&buf->b_evict_lock);
5329 	referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
5330 	mutex_exit(&buf->b_evict_lock);
5331 	return (referenced);
5332 }
5333 #endif
5334 
5335 static void
5336 arc_write_ready(zio_t *zio)
5337 {
5338 	arc_write_callback_t *callback = zio->io_private;
5339 	arc_buf_t *buf = callback->awcb_buf;
5340 	arc_buf_hdr_t *hdr = buf->b_hdr;
5341 	uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp);
5342 
5343 	ASSERT(HDR_HAS_L1HDR(hdr));
5344 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
5345 	ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
5346 
5347 	/*
5348 	 * If we're reexecuting this zio because the pool suspended, then
5349 	 * cleanup any state that was previously set the first time the
5350 	 * callback was invoked.
5351 	 */
5352 	if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
5353 		arc_cksum_free(hdr);
5354 		arc_buf_unwatch(buf);
5355 		if (hdr->b_l1hdr.b_pdata != NULL) {
5356 			if (arc_buf_is_shared(buf)) {
5357 				arc_unshare_buf(hdr, buf);
5358 			} else {
5359 				arc_hdr_free_pdata(hdr);
5360 			}
5361 		}
5362 	}
5363 	ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
5364 	ASSERT(!HDR_SHARED_DATA(hdr));
5365 	ASSERT(!arc_buf_is_shared(buf));
5366 
5367 	callback->awcb_ready(zio, buf, callback->awcb_private);
5368 
5369 	if (HDR_IO_IN_PROGRESS(hdr))
5370 		ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
5371 
5372 	arc_cksum_compute(buf);
5373 	arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
5374 
5375 	enum zio_compress compress;
5376 	if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
5377 		compress = ZIO_COMPRESS_OFF;
5378 	} else {
5379 		ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(zio->io_bp));
5380 		compress = BP_GET_COMPRESS(zio->io_bp);
5381 	}
5382 	HDR_SET_PSIZE(hdr, psize);
5383 	arc_hdr_set_compress(hdr, compress);
5384 
5385 	/*
5386 	 * If the hdr is compressed, then copy the compressed
5387 	 * zio contents into arc_buf_hdr_t. Otherwise, copy the original
5388 	 * data buf into the hdr. Ideally, we would like to always copy the
5389 	 * io_data into b_pdata but the user may have disabled compressed
5390 	 * arc thus the on-disk block may or may not match what we maintain
5391 	 * in the hdr's b_pdata field.
5392 	 */
5393 	if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
5394 	    !ARC_BUF_COMPRESSED(buf)) {
5395 		ASSERT3U(BP_GET_COMPRESS(zio->io_bp), !=, ZIO_COMPRESS_OFF);
5396 		ASSERT3U(psize, >, 0);
5397 		arc_hdr_alloc_pdata(hdr);
5398 		bcopy(zio->io_data, hdr->b_l1hdr.b_pdata, psize);
5399 	} else {
5400 		ASSERT3P(buf->b_data, ==, zio->io_orig_data);
5401 		ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
5402 		ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
5403 
5404 		/*
5405 		 * This hdr is not compressed so we're able to share
5406 		 * the arc_buf_t data buffer with the hdr.
5407 		 */
5408 		arc_share_buf(hdr, buf);
5409 		ASSERT0(bcmp(zio->io_orig_data, hdr->b_l1hdr.b_pdata,
5410 		    HDR_GET_LSIZE(hdr)));
5411 	}
5412 	arc_hdr_verify(hdr, zio->io_bp);
5413 }
5414 
5415 static void
5416 arc_write_children_ready(zio_t *zio)
5417 {
5418 	arc_write_callback_t *callback = zio->io_private;
5419 	arc_buf_t *buf = callback->awcb_buf;
5420 
5421 	callback->awcb_children_ready(zio, buf, callback->awcb_private);
5422 }
5423 
5424 /*
5425  * The SPA calls this callback for each physical write that happens on behalf
5426  * of a logical write.  See the comment in dbuf_write_physdone() for details.
5427  */
5428 static void
5429 arc_write_physdone(zio_t *zio)
5430 {
5431 	arc_write_callback_t *cb = zio->io_private;
5432 	if (cb->awcb_physdone != NULL)
5433 		cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
5434 }
5435 
5436 static void
5437 arc_write_done(zio_t *zio)
5438 {
5439 	arc_write_callback_t *callback = zio->io_private;
5440 	arc_buf_t *buf = callback->awcb_buf;
5441 	arc_buf_hdr_t *hdr = buf->b_hdr;
5442 
5443 	ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
5444 
5445 	if (zio->io_error == 0) {
5446 		arc_hdr_verify(hdr, zio->io_bp);
5447 
5448 		if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
5449 			buf_discard_identity(hdr);
5450 		} else {
5451 			hdr->b_dva = *BP_IDENTITY(zio->io_bp);
5452 			hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
5453 		}
5454 	} else {
5455 		ASSERT(HDR_EMPTY(hdr));
5456 	}
5457 
5458 	/*
5459 	 * If the block to be written was all-zero or compressed enough to be
5460 	 * embedded in the BP, no write was performed so there will be no
5461 	 * dva/birth/checksum.  The buffer must therefore remain anonymous
5462 	 * (and uncached).
5463 	 */
5464 	if (!HDR_EMPTY(hdr)) {
5465 		arc_buf_hdr_t *exists;
5466 		kmutex_t *hash_lock;
5467 
5468 		ASSERT3U(zio->io_error, ==, 0);
5469 
5470 		arc_cksum_verify(buf);
5471 
5472 		exists = buf_hash_insert(hdr, &hash_lock);
5473 		if (exists != NULL) {
5474 			/*
5475 			 * This can only happen if we overwrite for
5476 			 * sync-to-convergence, because we remove
5477 			 * buffers from the hash table when we arc_free().
5478 			 */
5479 			if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
5480 				if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
5481 					panic("bad overwrite, hdr=%p exists=%p",
5482 					    (void *)hdr, (void *)exists);
5483 				ASSERT(refcount_is_zero(
5484 				    &exists->b_l1hdr.b_refcnt));
5485 				arc_change_state(arc_anon, exists, hash_lock);
5486 				mutex_exit(hash_lock);
5487 				arc_hdr_destroy(exists);
5488 				exists = buf_hash_insert(hdr, &hash_lock);
5489 				ASSERT3P(exists, ==, NULL);
5490 			} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
5491 				/* nopwrite */
5492 				ASSERT(zio->io_prop.zp_nopwrite);
5493 				if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
5494 					panic("bad nopwrite, hdr=%p exists=%p",
5495 					    (void *)hdr, (void *)exists);
5496 			} else {
5497 				/* Dedup */
5498 				ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
5499 				ASSERT(hdr->b_l1hdr.b_state == arc_anon);
5500 				ASSERT(BP_GET_DEDUP(zio->io_bp));
5501 				ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
5502 			}
5503 		}
5504 		arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
5505 		/* if it's not anon, we are doing a scrub */
5506 		if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
5507 			arc_access(hdr, hash_lock);
5508 		mutex_exit(hash_lock);
5509 	} else {
5510 		arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
5511 	}
5512 
5513 	ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
5514 	callback->awcb_done(zio, buf, callback->awcb_private);
5515 
5516 	kmem_free(callback, sizeof (arc_write_callback_t));
5517 }
5518 
5519 zio_t *
5520 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
5521     boolean_t l2arc, const zio_prop_t *zp, arc_done_func_t *ready,
5522     arc_done_func_t *children_ready, arc_done_func_t *physdone,
5523     arc_done_func_t *done, void *private, zio_priority_t priority,
5524     int zio_flags, const zbookmark_phys_t *zb)
5525 {
5526 	arc_buf_hdr_t *hdr = buf->b_hdr;
5527 	arc_write_callback_t *callback;
5528 	zio_t *zio;
5529 
5530 	ASSERT3P(ready, !=, NULL);
5531 	ASSERT3P(done, !=, NULL);
5532 	ASSERT(!HDR_IO_ERROR(hdr));
5533 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
5534 	ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
5535 	ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
5536 	if (l2arc)
5537 		arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
5538 	if (ARC_BUF_COMPRESSED(buf)) {
5539 		ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_OFF);
5540 		ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
5541 		zio_flags |= ZIO_FLAG_RAW;
5542 	}
5543 	callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
5544 	callback->awcb_ready = ready;
5545 	callback->awcb_children_ready = children_ready;
5546 	callback->awcb_physdone = physdone;
5547 	callback->awcb_done = done;
5548 	callback->awcb_private = private;
5549 	callback->awcb_buf = buf;
5550 
5551 	/*
5552 	 * The hdr's b_pdata is now stale, free it now. A new data block
5553 	 * will be allocated when the zio pipeline calls arc_write_ready().
5554 	 */
5555 	if (hdr->b_l1hdr.b_pdata != NULL) {
5556 		/*
5557 		 * If the buf is currently sharing the data block with
5558 		 * the hdr then we need to break that relationship here.
5559 		 * The hdr will remain with a NULL data pointer and the
5560 		 * buf will take sole ownership of the block.
5561 		 */
5562 		if (arc_buf_is_shared(buf)) {
5563 			arc_unshare_buf(hdr, buf);
5564 		} else {
5565 			arc_hdr_free_pdata(hdr);
5566 		}
5567 		VERIFY3P(buf->b_data, !=, NULL);
5568 		arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
5569 	}
5570 	ASSERT(!arc_buf_is_shared(buf));
5571 	ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL);
5572 
5573 	zio = zio_write(pio, spa, txg, bp, buf->b_data,
5574 	    HDR_GET_LSIZE(hdr), arc_buf_size(buf), zp, arc_write_ready,
5575 	    (children_ready != NULL) ? arc_write_children_ready : NULL,
5576 	    arc_write_physdone, arc_write_done, callback,
5577 	    priority, zio_flags, zb);
5578 
5579 	return (zio);
5580 }
5581 
5582 static int
5583 arc_memory_throttle(uint64_t reserve, uint64_t txg)
5584 {
5585 #ifdef _KERNEL
5586 	uint64_t available_memory = ptob(freemem);
5587 	static uint64_t page_load = 0;
5588 	static uint64_t last_txg = 0;
5589 
5590 #if defined(__i386)
5591 	available_memory =
5592 	    MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
5593 #endif
5594 
5595 	if (freemem > physmem * arc_lotsfree_percent / 100)
5596 		return (0);
5597 
5598 	if (txg > last_txg) {
5599 		last_txg = txg;
5600 		page_load = 0;
5601 	}
5602 	/*
5603 	 * If we are in pageout, we know that memory is already tight,
5604 	 * the arc is already going to be evicting, so we just want to
5605 	 * continue to let page writes occur as quickly as possible.
5606 	 */
5607 	if (curproc == proc_pageout) {
5608 		if (page_load > MAX(ptob(minfree), available_memory) / 4)
5609 			return (SET_ERROR(ERESTART));
5610 		/* Note: reserve is inflated, so we deflate */
5611 		page_load += reserve / 8;
5612 		return (0);
5613 	} else if (page_load > 0 && arc_reclaim_needed()) {
5614 		/* memory is low, delay before restarting */
5615 		ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
5616 		return (SET_ERROR(EAGAIN));
5617 	}
5618 	page_load = 0;
5619 #endif
5620 	return (0);
5621 }
5622 
5623 void
5624 arc_tempreserve_clear(uint64_t reserve)
5625 {
5626 	atomic_add_64(&arc_tempreserve, -reserve);
5627 	ASSERT((int64_t)arc_tempreserve >= 0);
5628 }
5629 
5630 int
5631 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
5632 {
5633 	int error;
5634 	uint64_t anon_size;
5635 
5636 	if (reserve > arc_c/4 && !arc_no_grow)
5637 		arc_c = MIN(arc_c_max, reserve * 4);
5638 	if (reserve > arc_c)
5639 		return (SET_ERROR(ENOMEM));
5640 
5641 	/*
5642 	 * Don't count loaned bufs as in flight dirty data to prevent long
5643 	 * network delays from blocking transactions that are ready to be
5644 	 * assigned to a txg.
5645 	 */
5646 
5647 	/* assert that it has not wrapped around */
5648 	ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
5649 
5650 	anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
5651 	    arc_loaned_bytes), 0);
5652 
5653 	/*
5654 	 * Writes will, almost always, require additional memory allocations
5655 	 * in order to compress/encrypt/etc the data.  We therefore need to
5656 	 * make sure that there is sufficient available memory for this.
5657 	 */
5658 	error = arc_memory_throttle(reserve, txg);
5659 	if (error != 0)
5660 		return (error);
5661 
5662 	/*
5663 	 * Throttle writes when the amount of dirty data in the cache
5664 	 * gets too large.  We try to keep the cache less than half full
5665 	 * of dirty blocks so that our sync times don't grow too large.
5666 	 * Note: if two requests come in concurrently, we might let them
5667 	 * both succeed, when one of them should fail.  Not a huge deal.
5668 	 */
5669 
5670 	if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
5671 	    anon_size > arc_c / 4) {
5672 		uint64_t meta_esize =
5673 		    refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
5674 		uint64_t data_esize =
5675 		    refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
5676 		dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
5677 		    "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
5678 		    arc_tempreserve >> 10, meta_esize >> 10,
5679 		    data_esize >> 10, reserve >> 10, arc_c >> 10);
5680 		return (SET_ERROR(ERESTART));
5681 	}
5682 	atomic_add_64(&arc_tempreserve, reserve);
5683 	return (0);
5684 }
5685 
5686 static void
5687 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
5688     kstat_named_t *evict_data, kstat_named_t *evict_metadata)
5689 {
5690 	size->value.ui64 = refcount_count(&state->arcs_size);
5691 	evict_data->value.ui64 =
5692 	    refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
5693 	evict_metadata->value.ui64 =
5694 	    refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
5695 }
5696 
5697 static int
5698 arc_kstat_update(kstat_t *ksp, int rw)
5699 {
5700 	arc_stats_t *as = ksp->ks_data;
5701 
5702 	if (rw == KSTAT_WRITE) {
5703 		return (EACCES);
5704 	} else {
5705 		arc_kstat_update_state(arc_anon,
5706 		    &as->arcstat_anon_size,
5707 		    &as->arcstat_anon_evictable_data,
5708 		    &as->arcstat_anon_evictable_metadata);
5709 		arc_kstat_update_state(arc_mru,
5710 		    &as->arcstat_mru_size,
5711 		    &as->arcstat_mru_evictable_data,
5712 		    &as->arcstat_mru_evictable_metadata);
5713 		arc_kstat_update_state(arc_mru_ghost,
5714 		    &as->arcstat_mru_ghost_size,
5715 		    &as->arcstat_mru_ghost_evictable_data,
5716 		    &as->arcstat_mru_ghost_evictable_metadata);
5717 		arc_kstat_update_state(arc_mfu,
5718 		    &as->arcstat_mfu_size,
5719 		    &as->arcstat_mfu_evictable_data,
5720 		    &as->arcstat_mfu_evictable_metadata);
5721 		arc_kstat_update_state(arc_mfu_ghost,
5722 		    &as->arcstat_mfu_ghost_size,
5723 		    &as->arcstat_mfu_ghost_evictable_data,
5724 		    &as->arcstat_mfu_ghost_evictable_metadata);
5725 	}
5726 
5727 	return (0);
5728 }
5729 
5730 /*
5731  * This function *must* return indices evenly distributed between all
5732  * sublists of the multilist. This is needed due to how the ARC eviction
5733  * code is laid out; arc_evict_state() assumes ARC buffers are evenly
5734  * distributed between all sublists and uses this assumption when
5735  * deciding which sublist to evict from and how much to evict from it.
5736  */
5737 unsigned int
5738 arc_state_multilist_index_func(multilist_t *ml, void *obj)
5739 {
5740 	arc_buf_hdr_t *hdr = obj;
5741 
5742 	/*
5743 	 * We rely on b_dva to generate evenly distributed index
5744 	 * numbers using buf_hash below. So, as an added precaution,
5745 	 * let's make sure we never add empty buffers to the arc lists.
5746 	 */
5747 	ASSERT(!HDR_EMPTY(hdr));
5748 
5749 	/*
5750 	 * The assumption here, is the hash value for a given
5751 	 * arc_buf_hdr_t will remain constant throughout it's lifetime
5752 	 * (i.e. it's b_spa, b_dva, and b_birth fields don't change).
5753 	 * Thus, we don't need to store the header's sublist index
5754 	 * on insertion, as this index can be recalculated on removal.
5755 	 *
5756 	 * Also, the low order bits of the hash value are thought to be
5757 	 * distributed evenly. Otherwise, in the case that the multilist
5758 	 * has a power of two number of sublists, each sublists' usage
5759 	 * would not be evenly distributed.
5760 	 */
5761 	return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
5762 	    multilist_get_num_sublists(ml));
5763 }
5764 
5765 static void
5766 arc_state_init(void)
5767 {
5768 	arc_anon = &ARC_anon;
5769 	arc_mru = &ARC_mru;
5770 	arc_mru_ghost = &ARC_mru_ghost;
5771 	arc_mfu = &ARC_mfu;
5772 	arc_mfu_ghost = &ARC_mfu_ghost;
5773 	arc_l2c_only = &ARC_l2c_only;
5774 
5775 	multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
5776 	    sizeof (arc_buf_hdr_t),
5777 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5778 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5779 	multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
5780 	    sizeof (arc_buf_hdr_t),
5781 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5782 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5783 	multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
5784 	    sizeof (arc_buf_hdr_t),
5785 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5786 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5787 	multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
5788 	    sizeof (arc_buf_hdr_t),
5789 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5790 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5791 	multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
5792 	    sizeof (arc_buf_hdr_t),
5793 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5794 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5795 	multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
5796 	    sizeof (arc_buf_hdr_t),
5797 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5798 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5799 	multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
5800 	    sizeof (arc_buf_hdr_t),
5801 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5802 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5803 	multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
5804 	    sizeof (arc_buf_hdr_t),
5805 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5806 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5807 	multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
5808 	    sizeof (arc_buf_hdr_t),
5809 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5810 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5811 	multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
5812 	    sizeof (arc_buf_hdr_t),
5813 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5814 	    zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5815 
5816 	refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
5817 	refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
5818 	refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
5819 	refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
5820 	refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
5821 	refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
5822 	refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
5823 	refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
5824 	refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
5825 	refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
5826 	refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
5827 	refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
5828 
5829 	refcount_create(&arc_anon->arcs_size);
5830 	refcount_create(&arc_mru->arcs_size);
5831 	refcount_create(&arc_mru_ghost->arcs_size);
5832 	refcount_create(&arc_mfu->arcs_size);
5833 	refcount_create(&arc_mfu_ghost->arcs_size);
5834 	refcount_create(&arc_l2c_only->arcs_size);
5835 }
5836 
5837 static void
5838 arc_state_fini(void)
5839 {
5840 	refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
5841 	refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
5842 	refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
5843 	refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
5844 	refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
5845 	refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
5846 	refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
5847 	refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
5848 	refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
5849 	refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
5850 	refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
5851 	refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
5852 
5853 	refcount_destroy(&arc_anon->arcs_size);
5854 	refcount_destroy(&arc_mru->arcs_size);
5855 	refcount_destroy(&arc_mru_ghost->arcs_size);
5856 	refcount_destroy(&arc_mfu->arcs_size);
5857 	refcount_destroy(&arc_mfu_ghost->arcs_size);
5858 	refcount_destroy(&arc_l2c_only->arcs_size);
5859 
5860 	multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
5861 	multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
5862 	multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
5863 	multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
5864 	multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
5865 	multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
5866 	multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
5867 	multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
5868 }
5869 
5870 uint64_t
5871 arc_max_bytes(void)
5872 {
5873 	return (arc_c_max);
5874 }
5875 
5876 void
5877 arc_init(void)
5878 {
5879 	/*
5880 	 * allmem is "all memory that we could possibly use".
5881 	 */
5882 #ifdef _KERNEL
5883 	uint64_t allmem = ptob(physmem - swapfs_minfree);
5884 #else
5885 	uint64_t allmem = (physmem * PAGESIZE) / 2;
5886 #endif
5887 
5888 	mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
5889 	cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL);
5890 	cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL);
5891 
5892 	/* Convert seconds to clock ticks */
5893 	arc_min_prefetch_lifespan = 1 * hz;
5894 
5895 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
5896 	arc_c_min = MAX(allmem / 32, 64 << 20);
5897 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
5898 	if (allmem >= 1 << 30)
5899 		arc_c_max = allmem - (1 << 30);
5900 	else
5901 		arc_c_max = arc_c_min;
5902 	arc_c_max = MAX(allmem * 3 / 4, arc_c_max);
5903 
5904 	/*
5905 	 * In userland, there's only the memory pressure that we artificially
5906 	 * create (see arc_available_memory()).  Don't let arc_c get too
5907 	 * small, because it can cause transactions to be larger than
5908 	 * arc_c, causing arc_tempreserve_space() to fail.
5909 	 */
5910 #ifndef _KERNEL
5911 	arc_c_min = arc_c_max / 2;
5912 #endif
5913 
5914 	/*
5915 	 * Allow the tunables to override our calculations if they are
5916 	 * reasonable (ie. over 64MB)
5917 	 */
5918 	if (zfs_arc_max > 64 << 20 && zfs_arc_max < allmem) {
5919 		arc_c_max = zfs_arc_max;
5920 		arc_c_min = MIN(arc_c_min, arc_c_max);
5921 	}
5922 	if (zfs_arc_min > 64 << 20 && zfs_arc_min <= arc_c_max)
5923 		arc_c_min = zfs_arc_min;
5924 
5925 	arc_c = arc_c_max;
5926 	arc_p = (arc_c >> 1);
5927 	arc_size = 0;
5928 
5929 	/* limit meta-data to 1/4 of the arc capacity */
5930 	arc_meta_limit = arc_c_max / 4;
5931 
5932 #ifdef _KERNEL
5933 	/*
5934 	 * Metadata is stored in the kernel's heap.  Don't let us
5935 	 * use more than half the heap for the ARC.
5936 	 */
5937 	arc_meta_limit = MIN(arc_meta_limit,
5938 	    vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 2);
5939 #endif
5940 
5941 	/* Allow the tunable to override if it is reasonable */
5942 	if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
5943 		arc_meta_limit = zfs_arc_meta_limit;
5944 
5945 	if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
5946 		arc_c_min = arc_meta_limit / 2;
5947 
5948 	if (zfs_arc_meta_min > 0) {
5949 		arc_meta_min = zfs_arc_meta_min;
5950 	} else {
5951 		arc_meta_min = arc_c_min / 2;
5952 	}
5953 
5954 	if (zfs_arc_grow_retry > 0)
5955 		arc_grow_retry = zfs_arc_grow_retry;
5956 
5957 	if (zfs_arc_shrink_shift > 0)
5958 		arc_shrink_shift = zfs_arc_shrink_shift;
5959 
5960 	/*
5961 	 * Ensure that arc_no_grow_shift is less than arc_shrink_shift.
5962 	 */
5963 	if (arc_no_grow_shift >= arc_shrink_shift)
5964 		arc_no_grow_shift = arc_shrink_shift - 1;
5965 
5966 	if (zfs_arc_p_min_shift > 0)
5967 		arc_p_min_shift = zfs_arc_p_min_shift;
5968 
5969 	if (zfs_arc_num_sublists_per_state < 1)
5970 		zfs_arc_num_sublists_per_state = MAX(boot_ncpus, 1);
5971 
5972 	/* if kmem_flags are set, lets try to use less memory */
5973 	if (kmem_debugging())
5974 		arc_c = arc_c / 2;
5975 	if (arc_c < arc_c_min)
5976 		arc_c = arc_c_min;
5977 
5978 	arc_state_init();
5979 	buf_init();
5980 
5981 	arc_reclaim_thread_exit = B_FALSE;
5982 
5983 	arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
5984 	    sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
5985 
5986 	if (arc_ksp != NULL) {
5987 		arc_ksp->ks_data = &arc_stats;
5988 		arc_ksp->ks_update = arc_kstat_update;
5989 		kstat_install(arc_ksp);
5990 	}
5991 
5992 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
5993 	    TS_RUN, minclsyspri);
5994 
5995 	arc_dead = B_FALSE;
5996 	arc_warm = B_FALSE;
5997 
5998 	/*
5999 	 * Calculate maximum amount of dirty data per pool.
6000 	 *
6001 	 * If it has been set by /etc/system, take that.
6002 	 * Otherwise, use a percentage of physical memory defined by
6003 	 * zfs_dirty_data_max_percent (default 10%) with a cap at
6004 	 * zfs_dirty_data_max_max (default 4GB).
6005 	 */
6006 	if (zfs_dirty_data_max == 0) {
6007 		zfs_dirty_data_max = physmem * PAGESIZE *
6008 		    zfs_dirty_data_max_percent / 100;
6009 		zfs_dirty_data_max = MIN(zfs_dirty_data_max,
6010 		    zfs_dirty_data_max_max);
6011 	}
6012 }
6013 
6014 void
6015 arc_fini(void)
6016 {
6017 	mutex_enter(&arc_reclaim_lock);
6018 	arc_reclaim_thread_exit = B_TRUE;
6019 	/*
6020 	 * The reclaim thread will set arc_reclaim_thread_exit back to
6021 	 * B_FALSE when it is finished exiting; we're waiting for that.
6022 	 */
6023 	while (arc_reclaim_thread_exit) {
6024 		cv_signal(&arc_reclaim_thread_cv);
6025 		cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock);
6026 	}
6027 	mutex_exit(&arc_reclaim_lock);
6028 
6029 	/* Use B_TRUE to ensure *all* buffers are evicted */
6030 	arc_flush(NULL, B_TRUE);
6031 
6032 	arc_dead = B_TRUE;
6033 
6034 	if (arc_ksp != NULL) {
6035 		kstat_delete(arc_ksp);
6036 		arc_ksp = NULL;
6037 	}
6038 
6039 	mutex_destroy(&arc_reclaim_lock);
6040 	cv_destroy(&arc_reclaim_thread_cv);
6041 	cv_destroy(&arc_reclaim_waiters_cv);
6042 
6043 	arc_state_fini();
6044 	buf_fini();
6045 
6046 	ASSERT0(arc_loaned_bytes);
6047 }
6048 
6049 /*
6050  * Level 2 ARC
6051  *
6052  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
6053  * It uses dedicated storage devices to hold cached data, which are populated
6054  * using large infrequent writes.  The main role of this cache is to boost
6055  * the performance of random read workloads.  The intended L2ARC devices
6056  * include short-stroked disks, solid state disks, and other media with
6057  * substantially faster read latency than disk.
6058  *
6059  *                 +-----------------------+
6060  *                 |         ARC           |
6061  *                 +-----------------------+
6062  *                    |         ^     ^
6063  *                    |         |     |
6064  *      l2arc_feed_thread()    arc_read()
6065  *                    |         |     |
6066  *                    |  l2arc read   |
6067  *                    V         |     |
6068  *               +---------------+    |
6069  *               |     L2ARC     |    |
6070  *               +---------------+    |
6071  *                   |    ^           |
6072  *          l2arc_write() |           |
6073  *                   |    |           |
6074  *                   V    |           |
6075  *                 +-------+      +-------+
6076  *                 | vdev  |      | vdev  |
6077  *                 | cache |      | cache |
6078  *                 +-------+      +-------+
6079  *                 +=========+     .-----.
6080  *                 :  L2ARC  :    |-_____-|
6081  *                 : devices :    | Disks |
6082  *                 +=========+    `-_____-'
6083  *
6084  * Read requests are satisfied from the following sources, in order:
6085  *
6086  *	1) ARC
6087  *	2) vdev cache of L2ARC devices
6088  *	3) L2ARC devices
6089  *	4) vdev cache of disks
6090  *	5) disks
6091  *
6092  * Some L2ARC device types exhibit extremely slow write performance.
6093  * To accommodate for this there are some significant differences between
6094  * the L2ARC and traditional cache design:
6095  *
6096  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
6097  * the ARC behave as usual, freeing buffers and placing headers on ghost
6098  * lists.  The ARC does not send buffers to the L2ARC during eviction as
6099  * this would add inflated write latencies for all ARC memory pressure.
6100  *
6101  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
6102  * It does this by periodically scanning buffers from the eviction-end of
6103  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
6104  * not already there. It scans until a headroom of buffers is satisfied,
6105  * which itself is a buffer for ARC eviction. If a compressible buffer is
6106  * found during scanning and selected for writing to an L2ARC device, we
6107  * temporarily boost scanning headroom during the next scan cycle to make
6108  * sure we adapt to compression effects (which might significantly reduce
6109  * the data volume we write to L2ARC). The thread that does this is
6110  * l2arc_feed_thread(), illustrated below; example sizes are included to
6111  * provide a better sense of ratio than this diagram:
6112  *
6113  *	       head -->                        tail
6114  *	        +---------------------+----------+
6115  *	ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
6116  *	        +---------------------+----------+   |   o L2ARC eligible
6117  *	ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
6118  *	        +---------------------+----------+   |
6119  *	             15.9 Gbytes      ^ 32 Mbytes    |
6120  *	                           headroom          |
6121  *	                                      l2arc_feed_thread()
6122  *	                                             |
6123  *	                 l2arc write hand <--[oooo]--'
6124  *	                         |           8 Mbyte
6125  *	                         |          write max
6126  *	                         V
6127  *		  +==============================+
6128  *	L2ARC dev |####|#|###|###|    |####| ... |
6129  *	          +==============================+
6130  *	                     32 Gbytes
6131  *
6132  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
6133  * evicted, then the L2ARC has cached a buffer much sooner than it probably
6134  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
6135  * safe to say that this is an uncommon case, since buffers at the end of
6136  * the ARC lists have moved there due to inactivity.
6137  *
6138  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
6139  * then the L2ARC simply misses copying some buffers.  This serves as a
6140  * pressure valve to prevent heavy read workloads from both stalling the ARC
6141  * with waits and clogging the L2ARC with writes.  This also helps prevent
6142  * the potential for the L2ARC to churn if it attempts to cache content too
6143  * quickly, such as during backups of the entire pool.
6144  *
6145  * 5. After system boot and before the ARC has filled main memory, there are
6146  * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
6147  * lists can remain mostly static.  Instead of searching from tail of these
6148  * lists as pictured, the l2arc_feed_thread() will search from the list heads
6149  * for eligible buffers, greatly increasing its chance of finding them.
6150  *
6151  * The L2ARC device write speed is also boosted during this time so that
6152  * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
6153  * there are no L2ARC reads, and no fear of degrading read performance
6154  * through increased writes.
6155  *
6156  * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
6157  * the vdev queue can aggregate them into larger and fewer writes.  Each
6158  * device is written to in a rotor fashion, sweeping writes through
6159  * available space then repeating.
6160  *
6161  * 7. The L2ARC does not store dirty content.  It never needs to flush
6162  * write buffers back to disk based storage.
6163  *
6164  * 8. If an ARC buffer is written (and dirtied) which also exists in the
6165  * L2ARC, the now stale L2ARC buffer is immediately dropped.
6166  *
6167  * The performance of the L2ARC can be tweaked by a number of tunables, which
6168  * may be necessary for different workloads:
6169  *
6170  *	l2arc_write_max		max write bytes per interval
6171  *	l2arc_write_boost	extra write bytes during device warmup
6172  *	l2arc_noprefetch	skip caching prefetched buffers
6173  *	l2arc_headroom		number of max device writes to precache
6174  *	l2arc_headroom_boost	when we find compressed buffers during ARC
6175  *				scanning, we multiply headroom by this
6176  *				percentage factor for the next scan cycle,
6177  *				since more compressed buffers are likely to
6178  *				be present
6179  *	l2arc_feed_secs		seconds between L2ARC writing
6180  *
6181  * Tunables may be removed or added as future performance improvements are
6182  * integrated, and also may become zpool properties.
6183  *
6184  * There are three key functions that control how the L2ARC warms up:
6185  *
6186  *	l2arc_write_eligible()	check if a buffer is eligible to cache
6187  *	l2arc_write_size()	calculate how much to write
6188  *	l2arc_write_interval()	calculate sleep delay between writes
6189  *
6190  * These three functions determine what to write, how much, and how quickly
6191  * to send writes.
6192  */
6193 
6194 static boolean_t
6195 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
6196 {
6197 	/*
6198 	 * A buffer is *not* eligible for the L2ARC if it:
6199 	 * 1. belongs to a different spa.
6200 	 * 2. is already cached on the L2ARC.
6201 	 * 3. has an I/O in progress (it may be an incomplete read).
6202 	 * 4. is flagged not eligible (zfs property).
6203 	 */
6204 	if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
6205 	    HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
6206 		return (B_FALSE);
6207 
6208 	return (B_TRUE);
6209 }
6210 
6211 static uint64_t
6212 l2arc_write_size(void)
6213 {
6214 	uint64_t size;
6215 
6216 	/*
6217 	 * Make sure our globals have meaningful values in case the user
6218 	 * altered them.
6219 	 */
6220 	size = l2arc_write_max;
6221 	if (size == 0) {
6222 		cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
6223 		    "be greater than zero, resetting it to the default (%d)",
6224 		    L2ARC_WRITE_SIZE);
6225 		size = l2arc_write_max = L2ARC_WRITE_SIZE;
6226 	}
6227 
6228 	if (arc_warm == B_FALSE)
6229 		size += l2arc_write_boost;
6230 
6231 	return (size);
6232 
6233 }
6234 
6235 static clock_t
6236 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
6237 {
6238 	clock_t interval, next, now;
6239 
6240 	/*
6241 	 * If the ARC lists are busy, increase our write rate; if the
6242 	 * lists are stale, idle back.  This is achieved by checking
6243 	 * how much we previously wrote - if it was more than half of
6244 	 * what we wanted, schedule the next write much sooner.
6245 	 */
6246 	if (l2arc_feed_again && wrote > (wanted / 2))
6247 		interval = (hz * l2arc_feed_min_ms) / 1000;
6248 	else
6249 		interval = hz * l2arc_feed_secs;
6250 
6251 	now = ddi_get_lbolt();
6252 	next = MAX(now, MIN(now + interval, began + interval));
6253 
6254 	return (next);
6255 }
6256 
6257 /*
6258  * Cycle through L2ARC devices.  This is how L2ARC load balances.
6259  * If a device is returned, this also returns holding the spa config lock.
6260  */
6261 static l2arc_dev_t *
6262 l2arc_dev_get_next(void)
6263 {
6264 	l2arc_dev_t *first, *next = NULL;
6265 
6266 	/*
6267 	 * Lock out the removal of spas (spa_namespace_lock), then removal
6268 	 * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
6269 	 * both locks will be dropped and a spa config lock held instead.
6270 	 */
6271 	mutex_enter(&spa_namespace_lock);
6272 	mutex_enter(&l2arc_dev_mtx);
6273 
6274 	/* if there are no vdevs, there is nothing to do */
6275 	if (l2arc_ndev == 0)
6276 		goto out;
6277 
6278 	first = NULL;
6279 	next = l2arc_dev_last;
6280 	do {
6281 		/* loop around the list looking for a non-faulted vdev */
6282 		if (next == NULL) {
6283 			next = list_head(l2arc_dev_list);
6284 		} else {
6285 			next = list_next(l2arc_dev_list, next);
6286 			if (next == NULL)
6287 				next = list_head(l2arc_dev_list);
6288 		}
6289 
6290 		/* if we have come back to the start, bail out */
6291 		if (first == NULL)
6292 			first = next;
6293 		else if (next == first)
6294 			break;
6295 
6296 	} while (vdev_is_dead(next->l2ad_vdev));
6297 
6298 	/* if we were unable to find any usable vdevs, return NULL */
6299 	if (vdev_is_dead(next->l2ad_vdev))
6300 		next = NULL;
6301 
6302 	l2arc_dev_last = next;
6303 
6304 out:
6305 	mutex_exit(&l2arc_dev_mtx);
6306 
6307 	/*
6308 	 * Grab the config lock to prevent the 'next' device from being
6309 	 * removed while we are writing to it.
6310 	 */
6311 	if (next != NULL)
6312 		spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
6313 	mutex_exit(&spa_namespace_lock);
6314 
6315 	return (next);
6316 }
6317 
6318 /*
6319  * Free buffers that were tagged for destruction.
6320  */
6321 static void
6322 l2arc_do_free_on_write()
6323 {
6324 	list_t *buflist;
6325 	l2arc_data_free_t *df, *df_prev;
6326 
6327 	mutex_enter(&l2arc_free_on_write_mtx);
6328 	buflist = l2arc_free_on_write;
6329 
6330 	for (df = list_tail(buflist); df; df = df_prev) {
6331 		df_prev = list_prev(buflist, df);
6332 		ASSERT3P(df->l2df_data, !=, NULL);
6333 		if (df->l2df_type == ARC_BUFC_METADATA) {
6334 			zio_buf_free(df->l2df_data, df->l2df_size);
6335 		} else {
6336 			ASSERT(df->l2df_type == ARC_BUFC_DATA);
6337 			zio_data_buf_free(df->l2df_data, df->l2df_size);
6338 		}
6339 		list_remove(buflist, df);
6340 		kmem_free(df, sizeof (l2arc_data_free_t));
6341 	}
6342 
6343 	mutex_exit(&l2arc_free_on_write_mtx);
6344 }
6345 
6346 /*
6347  * A write to a cache device has completed.  Update all headers to allow
6348  * reads from these buffers to begin.
6349  */
6350 static void
6351 l2arc_write_done(zio_t *zio)
6352 {
6353 	l2arc_write_callback_t *cb;
6354 	l2arc_dev_t *dev;
6355 	list_t *buflist;
6356 	arc_buf_hdr_t *head, *hdr, *hdr_prev;
6357 	kmutex_t *hash_lock;
6358 	int64_t bytes_dropped = 0;
6359 
6360 	cb = zio->io_private;
6361 	ASSERT3P(cb, !=, NULL);
6362 	dev = cb->l2wcb_dev;
6363 	ASSERT3P(dev, !=, NULL);
6364 	head = cb->l2wcb_head;
6365 	ASSERT3P(head, !=, NULL);
6366 	buflist = &dev->l2ad_buflist;
6367 	ASSERT3P(buflist, !=, NULL);
6368 	DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
6369 	    l2arc_write_callback_t *, cb);
6370 
6371 	if (zio->io_error != 0)
6372 		ARCSTAT_BUMP(arcstat_l2_writes_error);
6373 
6374 	/*
6375 	 * All writes completed, or an error was hit.
6376 	 */
6377 top:
6378 	mutex_enter(&dev->l2ad_mtx);
6379 	for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
6380 		hdr_prev = list_prev(buflist, hdr);
6381 
6382 		hash_lock = HDR_LOCK(hdr);
6383 
6384 		/*
6385 		 * We cannot use mutex_enter or else we can deadlock
6386 		 * with l2arc_write_buffers (due to swapping the order
6387 		 * the hash lock and l2ad_mtx are taken).
6388 		 */
6389 		if (!mutex_tryenter(hash_lock)) {
6390 			/*
6391 			 * Missed the hash lock. We must retry so we
6392 			 * don't leave the ARC_FLAG_L2_WRITING bit set.
6393 			 */
6394 			ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
6395 
6396 			/*
6397 			 * We don't want to rescan the headers we've
6398 			 * already marked as having been written out, so
6399 			 * we reinsert the head node so we can pick up
6400 			 * where we left off.
6401 			 */
6402 			list_remove(buflist, head);
6403 			list_insert_after(buflist, hdr, head);
6404 
6405 			mutex_exit(&dev->l2ad_mtx);
6406 
6407 			/*
6408 			 * We wait for the hash lock to become available
6409 			 * to try and prevent busy waiting, and increase
6410 			 * the chance we'll be able to acquire the lock
6411 			 * the next time around.
6412 			 */
6413 			mutex_enter(hash_lock);
6414 			mutex_exit(hash_lock);
6415 			goto top;
6416 		}
6417 
6418 		/*
6419 		 * We could not have been moved into the arc_l2c_only
6420 		 * state while in-flight due to our ARC_FLAG_L2_WRITING
6421 		 * bit being set. Let's just ensure that's being enforced.
6422 		 */
6423 		ASSERT(HDR_HAS_L1HDR(hdr));
6424 
6425 		if (zio->io_error != 0) {
6426 			/*
6427 			 * Error - drop L2ARC entry.
6428 			 */
6429 			list_remove(buflist, hdr);
6430 			arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
6431 
6432 			ARCSTAT_INCR(arcstat_l2_asize, -arc_hdr_size(hdr));
6433 			ARCSTAT_INCR(arcstat_l2_size, -HDR_GET_LSIZE(hdr));
6434 
6435 			bytes_dropped += arc_hdr_size(hdr);
6436 			(void) refcount_remove_many(&dev->l2ad_alloc,
6437 			    arc_hdr_size(hdr), hdr);
6438 		}
6439 
6440 		/*
6441 		 * Allow ARC to begin reads and ghost list evictions to
6442 		 * this L2ARC entry.
6443 		 */
6444 		arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
6445 
6446 		mutex_exit(hash_lock);
6447 	}
6448 
6449 	atomic_inc_64(&l2arc_writes_done);
6450 	list_remove(buflist, head);
6451 	ASSERT(!HDR_HAS_L1HDR(head));
6452 	kmem_cache_free(hdr_l2only_cache, head);
6453 	mutex_exit(&dev->l2ad_mtx);
6454 
6455 	vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
6456 
6457 	l2arc_do_free_on_write();
6458 
6459 	kmem_free(cb, sizeof (l2arc_write_callback_t));
6460 }
6461 
6462 /*
6463  * A read to a cache device completed.  Validate buffer contents before
6464  * handing over to the regular ARC routines.
6465  */
6466 static void
6467 l2arc_read_done(zio_t *zio)
6468 {
6469 	l2arc_read_callback_t *cb;
6470 	arc_buf_hdr_t *hdr;
6471 	kmutex_t *hash_lock;
6472 	boolean_t valid_cksum;
6473 
6474 	ASSERT3P(zio->io_vd, !=, NULL);
6475 	ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
6476 
6477 	spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
6478 
6479 	cb = zio->io_private;
6480 	ASSERT3P(cb, !=, NULL);
6481 	hdr = cb->l2rcb_hdr;
6482 	ASSERT3P(hdr, !=, NULL);
6483 
6484 	hash_lock = HDR_LOCK(hdr);
6485 	mutex_enter(hash_lock);
6486 	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
6487 
6488 	ASSERT3P(zio->io_data, !=, NULL);
6489 
6490 	/*
6491 	 * Check this survived the L2ARC journey.
6492 	 */
6493 	ASSERT3P(zio->io_data, ==, hdr->b_l1hdr.b_pdata);
6494 	zio->io_bp_copy = cb->l2rcb_bp;	/* XXX fix in L2ARC 2.0	*/
6495 	zio->io_bp = &zio->io_bp_copy;	/* XXX fix in L2ARC 2.0	*/
6496 
6497 	valid_cksum = arc_cksum_is_equal(hdr, zio);
6498 	if (valid_cksum && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
6499 		mutex_exit(hash_lock);
6500 		zio->io_private = hdr;
6501 		arc_read_done(zio);
6502 	} else {
6503 		mutex_exit(hash_lock);
6504 		/*
6505 		 * Buffer didn't survive caching.  Increment stats and
6506 		 * reissue to the original storage device.
6507 		 */
6508 		if (zio->io_error != 0) {
6509 			ARCSTAT_BUMP(arcstat_l2_io_error);
6510 		} else {
6511 			zio->io_error = SET_ERROR(EIO);
6512 		}
6513 		if (!valid_cksum)
6514 			ARCSTAT_BUMP(arcstat_l2_cksum_bad);
6515 
6516 		/*
6517 		 * If there's no waiter, issue an async i/o to the primary
6518 		 * storage now.  If there *is* a waiter, the caller must
6519 		 * issue the i/o in a context where it's OK to block.
6520 		 */
6521 		if (zio->io_waiter == NULL) {
6522 			zio_t *pio = zio_unique_parent(zio);
6523 
6524 			ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
6525 
6526 			zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp,
6527 			    hdr->b_l1hdr.b_pdata, zio->io_size, arc_read_done,
6528 			    hdr, zio->io_priority, cb->l2rcb_flags,
6529 			    &cb->l2rcb_zb));
6530 		}
6531 	}
6532 
6533 	kmem_free(cb, sizeof (l2arc_read_callback_t));
6534 }
6535 
6536 /*
6537  * This is the list priority from which the L2ARC will search for pages to
6538  * cache.  This is used within loops (0..3) to cycle through lists in the
6539  * desired order.  This order can have a significant effect on cache
6540  * performance.
6541  *
6542  * Currently the metadata lists are hit first, MFU then MRU, followed by
6543  * the data lists.  This function returns a locked list, and also returns
6544  * the lock pointer.
6545  */
6546 static multilist_sublist_t *
6547 l2arc_sublist_lock(int list_num)
6548 {
6549 	multilist_t *ml = NULL;
6550 	unsigned int idx;
6551 
6552 	ASSERT(list_num >= 0 && list_num <= 3);
6553 
6554 	switch (list_num) {
6555 	case 0:
6556 		ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
6557 		break;
6558 	case 1:
6559 		ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
6560 		break;
6561 	case 2:
6562 		ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
6563 		break;
6564 	case 3:
6565 		ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
6566 		break;
6567 	}
6568 
6569 	/*
6570 	 * Return a randomly-selected sublist. This is acceptable
6571 	 * because the caller feeds only a little bit of data for each
6572 	 * call (8MB). Subsequent calls will result in different
6573 	 * sublists being selected.
6574 	 */
6575 	idx = multilist_get_random_index(ml);
6576 	return (multilist_sublist_lock(ml, idx));
6577 }
6578 
6579 /*
6580  * Evict buffers from the device write hand to the distance specified in
6581  * bytes.  This distance may span populated buffers, it may span nothing.
6582  * This is clearing a region on the L2ARC device ready for writing.
6583  * If the 'all' boolean is set, every buffer is evicted.
6584  */
6585 static void
6586 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
6587 {
6588 	list_t *buflist;
6589 	arc_buf_hdr_t *hdr, *hdr_prev;
6590 	kmutex_t *hash_lock;
6591 	uint64_t taddr;
6592 
6593 	buflist = &dev->l2ad_buflist;
6594 
6595 	if (!all && dev->l2ad_first) {
6596 		/*
6597 		 * This is the first sweep through the device.  There is
6598 		 * nothing to evict.
6599 		 */
6600 		return;
6601 	}
6602 
6603 	if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
6604 		/*
6605 		 * When nearing the end of the device, evict to the end
6606 		 * before the device write hand jumps to the start.
6607 		 */
6608 		taddr = dev->l2ad_end;
6609 	} else {
6610 		taddr = dev->l2ad_hand + distance;
6611 	}
6612 	DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
6613 	    uint64_t, taddr, boolean_t, all);
6614 
6615 top:
6616 	mutex_enter(&dev->l2ad_mtx);
6617 	for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
6618 		hdr_prev = list_prev(buflist, hdr);
6619 
6620 		hash_lock = HDR_LOCK(hdr);
6621 
6622 		/*
6623 		 * We cannot use mutex_enter or else we can deadlock
6624 		 * with l2arc_write_buffers (due to swapping the order
6625 		 * the hash lock and l2ad_mtx are taken).
6626 		 */
6627 		if (!mutex_tryenter(hash_lock)) {
6628 			/*
6629 			 * Missed the hash lock.  Retry.
6630 			 */
6631 			ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
6632 			mutex_exit(&dev->l2ad_mtx);
6633 			mutex_enter(hash_lock);
6634 			mutex_exit(hash_lock);
6635 			goto top;
6636 		}
6637 
6638 		if (HDR_L2_WRITE_HEAD(hdr)) {
6639 			/*
6640 			 * We hit a write head node.  Leave it for
6641 			 * l2arc_write_done().
6642 			 */
6643 			list_remove(buflist, hdr);
6644 			mutex_exit(hash_lock);
6645 			continue;
6646 		}
6647 
6648 		if (!all && HDR_HAS_L2HDR(hdr) &&
6649 		    (hdr->b_l2hdr.b_daddr > taddr ||
6650 		    hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
6651 			/*
6652 			 * We've evicted to the target address,
6653 			 * or the end of the device.
6654 			 */
6655 			mutex_exit(hash_lock);
6656 			break;
6657 		}
6658 
6659 		ASSERT(HDR_HAS_L2HDR(hdr));
6660 		if (!HDR_HAS_L1HDR(hdr)) {
6661 			ASSERT(!HDR_L2_READING(hdr));
6662 			/*
6663 			 * This doesn't exist in the ARC.  Destroy.
6664 			 * arc_hdr_destroy() will call list_remove()
6665 			 * and decrement arcstat_l2_size.
6666 			 */
6667 			arc_change_state(arc_anon, hdr, hash_lock);
6668 			arc_hdr_destroy(hdr);
6669 		} else {
6670 			ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
6671 			ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
6672 			/*
6673 			 * Invalidate issued or about to be issued
6674 			 * reads, since we may be about to write
6675 			 * over this location.
6676 			 */
6677 			if (HDR_L2_READING(hdr)) {
6678 				ARCSTAT_BUMP(arcstat_l2_evict_reading);
6679 				arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
6680 			}
6681 
6682 			/* Ensure this header has finished being written */
6683 			ASSERT(!HDR_L2_WRITING(hdr));
6684 
6685 			arc_hdr_l2hdr_destroy(hdr);
6686 		}
6687 		mutex_exit(hash_lock);
6688 	}
6689 	mutex_exit(&dev->l2ad_mtx);
6690 }
6691 
6692 /*
6693  * Find and write ARC buffers to the L2ARC device.
6694  *
6695  * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
6696  * for reading until they have completed writing.
6697  * The headroom_boost is an in-out parameter used to maintain headroom boost
6698  * state between calls to this function.
6699  *
6700  * Returns the number of bytes actually written (which may be smaller than
6701  * the delta by which the device hand has changed due to alignment).
6702  */
6703 static uint64_t
6704 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
6705 {
6706 	arc_buf_hdr_t *hdr, *hdr_prev, *head;
6707 	uint64_t write_asize, write_psize, write_sz, headroom;
6708 	boolean_t full;
6709 	l2arc_write_callback_t *cb;
6710 	zio_t *pio, *wzio;
6711 	uint64_t guid = spa_load_guid(spa);
6712 
6713 	ASSERT3P(dev->l2ad_vdev, !=, NULL);
6714 
6715 	pio = NULL;
6716 	write_sz = write_asize = write_psize = 0;
6717 	full = B_FALSE;
6718 	head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
6719 	arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
6720 
6721 	/*
6722 	 * Copy buffers for L2ARC writing.
6723 	 */
6724 	for (int try = 0; try <= 3; try++) {
6725 		multilist_sublist_t *mls = l2arc_sublist_lock(try);
6726 		uint64_t passed_sz = 0;
6727 
6728 		/*
6729 		 * L2ARC fast warmup.
6730 		 *
6731 		 * Until the ARC is warm and starts to evict, read from the
6732 		 * head of the ARC lists rather than the tail.
6733 		 */
6734 		if (arc_warm == B_FALSE)
6735 			hdr = multilist_sublist_head(mls);
6736 		else
6737 			hdr = multilist_sublist_tail(mls);
6738 
6739 		headroom = target_sz * l2arc_headroom;
6740 		if (zfs_compressed_arc_enabled)
6741 			headroom = (headroom * l2arc_headroom_boost) / 100;
6742 
6743 		for (; hdr; hdr = hdr_prev) {
6744 			kmutex_t *hash_lock;
6745 
6746 			if (arc_warm == B_FALSE)
6747 				hdr_prev = multilist_sublist_next(mls, hdr);
6748 			else
6749 				hdr_prev = multilist_sublist_prev(mls, hdr);
6750 
6751 			hash_lock = HDR_LOCK(hdr);
6752 			if (!mutex_tryenter(hash_lock)) {
6753 				/*
6754 				 * Skip this buffer rather than waiting.
6755 				 */
6756 				continue;
6757 			}
6758 
6759 			passed_sz += HDR_GET_LSIZE(hdr);
6760 			if (passed_sz > headroom) {
6761 				/*
6762 				 * Searched too far.
6763 				 */
6764 				mutex_exit(hash_lock);
6765 				break;
6766 			}
6767 
6768 			if (!l2arc_write_eligible(guid, hdr)) {
6769 				mutex_exit(hash_lock);
6770 				continue;
6771 			}
6772 
6773 			if ((write_asize + HDR_GET_LSIZE(hdr)) > target_sz) {
6774 				full = B_TRUE;
6775 				mutex_exit(hash_lock);
6776 				break;
6777 			}
6778 
6779 			if (pio == NULL) {
6780 				/*
6781 				 * Insert a dummy header on the buflist so
6782 				 * l2arc_write_done() can find where the
6783 				 * write buffers begin without searching.
6784 				 */
6785 				mutex_enter(&dev->l2ad_mtx);
6786 				list_insert_head(&dev->l2ad_buflist, head);
6787 				mutex_exit(&dev->l2ad_mtx);
6788 
6789 				cb = kmem_alloc(
6790 				    sizeof (l2arc_write_callback_t), KM_SLEEP);
6791 				cb->l2wcb_dev = dev;
6792 				cb->l2wcb_head = head;
6793 				pio = zio_root(spa, l2arc_write_done, cb,
6794 				    ZIO_FLAG_CANFAIL);
6795 			}
6796 
6797 			hdr->b_l2hdr.b_dev = dev;
6798 			hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
6799 			arc_hdr_set_flags(hdr,
6800 			    ARC_FLAG_L2_WRITING | ARC_FLAG_HAS_L2HDR);
6801 
6802 			mutex_enter(&dev->l2ad_mtx);
6803 			list_insert_head(&dev->l2ad_buflist, hdr);
6804 			mutex_exit(&dev->l2ad_mtx);
6805 
6806 			/*
6807 			 * We rely on the L1 portion of the header below, so
6808 			 * it's invalid for this header to have been evicted out
6809 			 * of the ghost cache, prior to being written out. The
6810 			 * ARC_FLAG_L2_WRITING bit ensures this won't happen.
6811 			 */
6812 			ASSERT(HDR_HAS_L1HDR(hdr));
6813 
6814 			ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
6815 			ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL);
6816 			ASSERT3U(arc_hdr_size(hdr), >, 0);
6817 			uint64_t size = arc_hdr_size(hdr);
6818 
6819 			(void) refcount_add_many(&dev->l2ad_alloc, size, hdr);
6820 
6821 			/*
6822 			 * Normally the L2ARC can use the hdr's data, but if
6823 			 * we're sharing data between the hdr and one of its
6824 			 * bufs, L2ARC needs its own copy of the data so that
6825 			 * the ZIO below can't race with the buf consumer. To
6826 			 * ensure that this copy will be available for the
6827 			 * lifetime of the ZIO and be cleaned up afterwards, we
6828 			 * add it to the l2arc_free_on_write queue.
6829 			 */
6830 			void *to_write;
6831 			if (!HDR_SHARED_DATA(hdr)) {
6832 				to_write = hdr->b_l1hdr.b_pdata;
6833 			} else {
6834 				arc_buf_contents_t type = arc_buf_type(hdr);
6835 				if (type == ARC_BUFC_METADATA) {
6836 					to_write = zio_buf_alloc(size);
6837 				} else {
6838 					ASSERT3U(type, ==, ARC_BUFC_DATA);
6839 					to_write = zio_data_buf_alloc(size);
6840 				}
6841 
6842 				bcopy(hdr->b_l1hdr.b_pdata, to_write, size);
6843 				l2arc_free_data_on_write(to_write, size, type);
6844 			}
6845 			wzio = zio_write_phys(pio, dev->l2ad_vdev,
6846 			    hdr->b_l2hdr.b_daddr, size, to_write,
6847 			    ZIO_CHECKSUM_OFF, NULL, hdr,
6848 			    ZIO_PRIORITY_ASYNC_WRITE,
6849 			    ZIO_FLAG_CANFAIL, B_FALSE);
6850 
6851 			write_sz += HDR_GET_LSIZE(hdr);
6852 			DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
6853 			    zio_t *, wzio);
6854 
6855 			write_asize += size;
6856 			/*
6857 			 * Keep the clock hand suitably device-aligned.
6858 			 */
6859 			uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
6860 			    size);
6861 			write_psize += asize;
6862 			dev->l2ad_hand += asize;
6863 
6864 			mutex_exit(hash_lock);
6865 
6866 			(void) zio_nowait(wzio);
6867 		}
6868 
6869 		multilist_sublist_unlock(mls);
6870 
6871 		if (full == B_TRUE)
6872 			break;
6873 	}
6874 
6875 	/* No buffers selected for writing? */
6876 	if (pio == NULL) {
6877 		ASSERT0(write_sz);
6878 		ASSERT(!HDR_HAS_L1HDR(head));
6879 		kmem_cache_free(hdr_l2only_cache, head);
6880 		return (0);
6881 	}
6882 
6883 	ASSERT3U(write_asize, <=, target_sz);
6884 	ARCSTAT_BUMP(arcstat_l2_writes_sent);
6885 	ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
6886 	ARCSTAT_INCR(arcstat_l2_size, write_sz);
6887 	ARCSTAT_INCR(arcstat_l2_asize, write_asize);
6888 	vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0);
6889 
6890 	/*
6891 	 * Bump device hand to the device start if it is approaching the end.
6892 	 * l2arc_evict() will already have evicted ahead for this case.
6893 	 */
6894 	if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
6895 		dev->l2ad_hand = dev->l2ad_start;
6896 		dev->l2ad_first = B_FALSE;
6897 	}
6898 
6899 	dev->l2ad_writing = B_TRUE;
6900 	(void) zio_wait(pio);
6901 	dev->l2ad_writing = B_FALSE;
6902 
6903 	return (write_asize);
6904 }
6905 
6906 /*
6907  * This thread feeds the L2ARC at regular intervals.  This is the beating
6908  * heart of the L2ARC.
6909  */
6910 static void
6911 l2arc_feed_thread(void)
6912 {
6913 	callb_cpr_t cpr;
6914 	l2arc_dev_t *dev;
6915 	spa_t *spa;
6916 	uint64_t size, wrote;
6917 	clock_t begin, next = ddi_get_lbolt();
6918 
6919 	CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
6920 
6921 	mutex_enter(&l2arc_feed_thr_lock);
6922 
6923 	while (l2arc_thread_exit == 0) {
6924 		CALLB_CPR_SAFE_BEGIN(&cpr);
6925 		(void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
6926 		    next);
6927 		CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
6928 		next = ddi_get_lbolt() + hz;
6929 
6930 		/*
6931 		 * Quick check for L2ARC devices.
6932 		 */
6933 		mutex_enter(&l2arc_dev_mtx);
6934 		if (l2arc_ndev == 0) {
6935 			mutex_exit(&l2arc_dev_mtx);
6936 			continue;
6937 		}
6938 		mutex_exit(&l2arc_dev_mtx);
6939 		begin = ddi_get_lbolt();
6940 
6941 		/*
6942 		 * This selects the next l2arc device to write to, and in
6943 		 * doing so the next spa to feed from: dev->l2ad_spa.   This
6944 		 * will return NULL if there are now no l2arc devices or if
6945 		 * they are all faulted.
6946 		 *
6947 		 * If a device is returned, its spa's config lock is also
6948 		 * held to prevent device removal.  l2arc_dev_get_next()
6949 		 * will grab and release l2arc_dev_mtx.
6950 		 */
6951 		if ((dev = l2arc_dev_get_next()) == NULL)
6952 			continue;
6953 
6954 		spa = dev->l2ad_spa;
6955 		ASSERT3P(spa, !=, NULL);
6956 
6957 		/*
6958 		 * If the pool is read-only then force the feed thread to
6959 		 * sleep a little longer.
6960 		 */
6961 		if (!spa_writeable(spa)) {
6962 			next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
6963 			spa_config_exit(spa, SCL_L2ARC, dev);
6964 			continue;
6965 		}
6966 
6967 		/*
6968 		 * Avoid contributing to memory pressure.
6969 		 */
6970 		if (arc_reclaim_needed()) {
6971 			ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
6972 			spa_config_exit(spa, SCL_L2ARC, dev);
6973 			continue;
6974 		}
6975 
6976 		ARCSTAT_BUMP(arcstat_l2_feeds);
6977 
6978 		size = l2arc_write_size();
6979 
6980 		/*
6981 		 * Evict L2ARC buffers that will be overwritten.
6982 		 */
6983 		l2arc_evict(dev, size, B_FALSE);
6984 
6985 		/*
6986 		 * Write ARC buffers.
6987 		 */
6988 		wrote = l2arc_write_buffers(spa, dev, size);
6989 
6990 		/*
6991 		 * Calculate interval between writes.
6992 		 */
6993 		next = l2arc_write_interval(begin, size, wrote);
6994 		spa_config_exit(spa, SCL_L2ARC, dev);
6995 	}
6996 
6997 	l2arc_thread_exit = 0;
6998 	cv_broadcast(&l2arc_feed_thr_cv);
6999 	CALLB_CPR_EXIT(&cpr);		/* drops l2arc_feed_thr_lock */
7000 	thread_exit();
7001 }
7002 
7003 boolean_t
7004 l2arc_vdev_present(vdev_t *vd)
7005 {
7006 	l2arc_dev_t *dev;
7007 
7008 	mutex_enter(&l2arc_dev_mtx);
7009 	for (dev = list_head(l2arc_dev_list); dev != NULL;
7010 	    dev = list_next(l2arc_dev_list, dev)) {
7011 		if (dev->l2ad_vdev == vd)
7012 			break;
7013 	}
7014 	mutex_exit(&l2arc_dev_mtx);
7015 
7016 	return (dev != NULL);
7017 }
7018 
7019 /*
7020  * Add a vdev for use by the L2ARC.  By this point the spa has already
7021  * validated the vdev and opened it.
7022  */
7023 void
7024 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
7025 {
7026 	l2arc_dev_t *adddev;
7027 
7028 	ASSERT(!l2arc_vdev_present(vd));
7029 
7030 	/*
7031 	 * Create a new l2arc device entry.
7032 	 */
7033 	adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
7034 	adddev->l2ad_spa = spa;
7035 	adddev->l2ad_vdev = vd;
7036 	adddev->l2ad_start = VDEV_LABEL_START_SIZE;
7037 	adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
7038 	adddev->l2ad_hand = adddev->l2ad_start;
7039 	adddev->l2ad_first = B_TRUE;
7040 	adddev->l2ad_writing = B_FALSE;
7041 
7042 	mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
7043 	/*
7044 	 * This is a list of all ARC buffers that are still valid on the
7045 	 * device.
7046 	 */
7047 	list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
7048 	    offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
7049 
7050 	vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
7051 	refcount_create(&adddev->l2ad_alloc);
7052 
7053 	/*
7054 	 * Add device to global list
7055 	 */
7056 	mutex_enter(&l2arc_dev_mtx);
7057 	list_insert_head(l2arc_dev_list, adddev);
7058 	atomic_inc_64(&l2arc_ndev);
7059 	mutex_exit(&l2arc_dev_mtx);
7060 }
7061 
7062 /*
7063  * Remove a vdev from the L2ARC.
7064  */
7065 void
7066 l2arc_remove_vdev(vdev_t *vd)
7067 {
7068 	l2arc_dev_t *dev, *nextdev, *remdev = NULL;
7069 
7070 	/*
7071 	 * Find the device by vdev
7072 	 */
7073 	mutex_enter(&l2arc_dev_mtx);
7074 	for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
7075 		nextdev = list_next(l2arc_dev_list, dev);
7076 		if (vd == dev->l2ad_vdev) {
7077 			remdev = dev;
7078 			break;
7079 		}
7080 	}
7081 	ASSERT3P(remdev, !=, NULL);
7082 
7083 	/*
7084 	 * Remove device from global list
7085 	 */
7086 	list_remove(l2arc_dev_list, remdev);
7087 	l2arc_dev_last = NULL;		/* may have been invalidated */
7088 	atomic_dec_64(&l2arc_ndev);
7089 	mutex_exit(&l2arc_dev_mtx);
7090 
7091 	/*
7092 	 * Clear all buflists and ARC references.  L2ARC device flush.
7093 	 */
7094 	l2arc_evict(remdev, 0, B_TRUE);
7095 	list_destroy(&remdev->l2ad_buflist);
7096 	mutex_destroy(&remdev->l2ad_mtx);
7097 	refcount_destroy(&remdev->l2ad_alloc);
7098 	kmem_free(remdev, sizeof (l2arc_dev_t));
7099 }
7100 
7101 void
7102 l2arc_init(void)
7103 {
7104 	l2arc_thread_exit = 0;
7105 	l2arc_ndev = 0;
7106 	l2arc_writes_sent = 0;
7107 	l2arc_writes_done = 0;
7108 
7109 	mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
7110 	cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
7111 	mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
7112 	mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
7113 
7114 	l2arc_dev_list = &L2ARC_dev_list;
7115 	l2arc_free_on_write = &L2ARC_free_on_write;
7116 	list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
7117 	    offsetof(l2arc_dev_t, l2ad_node));
7118 	list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
7119 	    offsetof(l2arc_data_free_t, l2df_list_node));
7120 }
7121 
7122 void
7123 l2arc_fini(void)
7124 {
7125 	/*
7126 	 * This is called from dmu_fini(), which is called from spa_fini();
7127 	 * Because of this, we can assume that all l2arc devices have
7128 	 * already been removed when the pools themselves were removed.
7129 	 */
7130 
7131 	l2arc_do_free_on_write();
7132 
7133 	mutex_destroy(&l2arc_feed_thr_lock);
7134 	cv_destroy(&l2arc_feed_thr_cv);
7135 	mutex_destroy(&l2arc_dev_mtx);
7136 	mutex_destroy(&l2arc_free_on_write_mtx);
7137 
7138 	list_destroy(l2arc_dev_list);
7139 	list_destroy(l2arc_free_on_write);
7140 }
7141 
7142 void
7143 l2arc_start(void)
7144 {
7145 	if (!(spa_mode_global & FWRITE))
7146 		return;
7147 
7148 	(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
7149 	    TS_RUN, minclsyspri);
7150 }
7151 
7152 void
7153 l2arc_stop(void)
7154 {
7155 	if (!(spa_mode_global & FWRITE))
7156 		return;
7157 
7158 	mutex_enter(&l2arc_feed_thr_lock);
7159 	cv_signal(&l2arc_feed_thr_cv);	/* kick thread out of startup */
7160 	l2arc_thread_exit = 1;
7161 	while (l2arc_thread_exit != 0)
7162 		cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
7163 	mutex_exit(&l2arc_feed_thr_lock);
7164 }
7165