xref: /illumos-gate/usr/src/uts/common/fs/zfs/arc.c (revision 77ed85091c75f96e0c776b6b222bc51695e3ee0c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * DVA-based Adjustable Relpacement Cache
30  *
31  * While much of the theory of operation used here is
32  * based on the self-tuning, low overhead replacement cache
33  * presented by Megiddo and Modha at FAST 2003, there are some
34  * significant differences:
35  *
36  * 1. The Megiddo and Modha model assumes any page is evictable.
37  * Pages in its cache cannot be "locked" into memory.  This makes
38  * the eviction algorithm simple: evict the last page in the list.
39  * This also make the performance characteristics easy to reason
40  * about.  Our cache is not so simple.  At any given moment, some
41  * subset of the blocks in the cache are un-evictable because we
42  * have handed out a reference to them.  Blocks are only evictable
43  * when there are no external references active.  This makes
44  * eviction far more problematic:  we choose to evict the evictable
45  * blocks that are the "lowest" in the list.
46  *
47  * There are times when it is not possible to evict the requested
48  * space.  In these circumstances we are unable to adjust the cache
49  * size.  To prevent the cache growing unbounded at these times we
50  * implement a "cache throttle" that slowes the flow of new data
51  * into the cache until we can make space avaiable.
52  *
53  * 2. The Megiddo and Modha model assumes a fixed cache size.
54  * Pages are evicted when the cache is full and there is a cache
55  * miss.  Our model has a variable sized cache.  It grows with
56  * high use, but also tries to react to memory preasure from the
57  * operating system: decreasing its size when system memory is
58  * tight.
59  *
60  * 3. The Megiddo and Modha model assumes a fixed page size. All
61  * elements of the cache are therefor exactly the same size.  So
62  * when adjusting the cache size following a cache miss, its simply
63  * a matter of choosing a single page to evict.  In our model, we
64  * have variable sized cache blocks (rangeing from 512 bytes to
65  * 128K bytes).  We therefor choose a set of blocks to evict to make
66  * space for a cache miss that approximates as closely as possible
67  * the space used by the new block.
68  *
69  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70  * by N. Megiddo & D. Modha, FAST 2003
71  */
72 
73 /*
74  * The locking model:
75  *
76  * A new reference to a cache buffer can be obtained in two
77  * ways: 1) via a hash table lookup using the DVA as a key,
78  * or 2) via one of the ARC lists.  The arc_read() inerface
79  * uses method 1, while the internal arc algorithms for
80  * adjusting the cache use method 2.  We therefor provide two
81  * types of locks: 1) the hash table lock array, and 2) the
82  * arc list locks.
83  *
84  * Buffers do not have their own mutexs, rather they rely on the
85  * hash table mutexs for the bulk of their protection (i.e. most
86  * fields in the arc_buf_hdr_t are protected by these mutexs).
87  *
88  * buf_hash_find() returns the appropriate mutex (held) when it
89  * locates the requested buffer in the hash table.  It returns
90  * NULL for the mutex if the buffer was not in the table.
91  *
92  * buf_hash_remove() expects the appropriate hash mutex to be
93  * already held before it is invoked.
94  *
95  * Each arc state also has a mutex which is used to protect the
96  * buffer list associated with the state.  When attempting to
97  * obtain a hash table lock while holding an arc list lock you
98  * must use: mutex_tryenter() to avoid deadlock.  Also note that
99  * the "top" state mutex must be held before the "bot" state mutex.
100  *
101  * Arc buffers may have an associated eviction callback function.
102  * This function will be invoked prior to removing the buffer (e.g.
103  * in arc_do_user_evicts()).  Note however that the data associated
104  * with the buffer may be evicted prior to the callback.  The callback
105  * must be made with *no locks held* (to prevent deadlock).  Additionally,
106  * the users of callbacks must ensure that their private data is
107  * protected from simultaneous callbacks from arc_buf_evict()
108  * and arc_do_user_evicts().
109  *
110  * Note that the majority of the performance stats are manipulated
111  * with atomic operations.
112  */
113 
114 #include <sys/spa.h>
115 #include <sys/zio.h>
116 #include <sys/zfs_context.h>
117 #include <sys/arc.h>
118 #include <sys/refcount.h>
119 #ifdef _KERNEL
120 #include <sys/vmsystm.h>
121 #include <vm/anon.h>
122 #include <sys/fs/swapnode.h>
123 #include <sys/dnlc.h>
124 #endif
125 #include <sys/callb.h>
126 
127 static kmutex_t		arc_reclaim_thr_lock;
128 static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
129 static uint8_t		arc_thread_exit;
130 
131 #define	ARC_REDUCE_DNLC_PERCENT	3
132 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
133 
134 typedef enum arc_reclaim_strategy {
135 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
136 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
137 } arc_reclaim_strategy_t;
138 
139 /* number of seconds before growing cache again */
140 static int		arc_grow_retry = 60;
141 
142 static kmutex_t arc_reclaim_lock;
143 static int arc_dead;
144 
145 /*
146  * Note that buffers can be on one of 5 states:
147  *	ARC_anon	- anonymous (discussed below)
148  *	ARC_mru		- recently used, currently cached
149  *	ARC_mru_ghost	- recentely used, no longer in cache
150  *	ARC_mfu		- frequently used, currently cached
151  *	ARC_mfu_ghost	- frequently used, no longer in cache
152  * When there are no active references to the buffer, they
153  * are linked onto one of the lists in arc.  These are the
154  * only buffers that can be evicted or deleted.
155  *
156  * Anonymous buffers are buffers that are not associated with
157  * a DVA.  These are buffers that hold dirty block copies
158  * before they are written to stable storage.  By definition,
159  * they are "ref'd" and are considered part of arc_mru
160  * that cannot be freed.  Generally, they will aquire a DVA
161  * as they are written and migrate onto the arc_mru list.
162  */
163 
164 typedef struct arc_state {
165 	list_t	list;	/* linked list of evictable buffer in state */
166 	uint64_t lsize;	/* total size of buffers in the linked list */
167 	uint64_t size;	/* total size of all buffers in this state */
168 	uint64_t hits;
169 	kmutex_t mtx;
170 } arc_state_t;
171 
172 /* The 5 states: */
173 static arc_state_t ARC_anon;
174 static arc_state_t ARC_mru;
175 static arc_state_t ARC_mru_ghost;
176 static arc_state_t ARC_mfu;
177 static arc_state_t ARC_mfu_ghost;
178 
179 static struct arc {
180 	arc_state_t 	*anon;
181 	arc_state_t	*mru;
182 	arc_state_t	*mru_ghost;
183 	arc_state_t	*mfu;
184 	arc_state_t	*mfu_ghost;
185 	uint64_t	size;		/* Actual total arc size */
186 	uint64_t	p;		/* Target size (in bytes) of mru */
187 	uint64_t	c;		/* Target size of cache (in bytes) */
188 	uint64_t	c_min;		/* Minimum target cache size */
189 	uint64_t	c_max;		/* Maximum target cache size */
190 
191 	/* performance stats */
192 	uint64_t	hits;
193 	uint64_t	misses;
194 	uint64_t	deleted;
195 	uint64_t	skipped;
196 	uint64_t	hash_elements;
197 	uint64_t	hash_elements_max;
198 	uint64_t	hash_collisions;
199 	uint64_t	hash_chains;
200 	uint32_t	hash_chain_max;
201 
202 	int		no_grow;	/* Don't try to grow cache size */
203 } arc;
204 
205 static uint64_t arc_tempreserve;
206 
207 typedef struct arc_callback arc_callback_t;
208 
209 struct arc_callback {
210 	arc_done_func_t		*acb_done;
211 	void			*acb_private;
212 	arc_byteswap_func_t	*acb_byteswap;
213 	arc_buf_t		*acb_buf;
214 	zio_t			*acb_zio_dummy;
215 	arc_callback_t		*acb_next;
216 };
217 
218 struct arc_buf_hdr {
219 	/* immutable */
220 	uint64_t		b_size;
221 	spa_t			*b_spa;
222 
223 	/* protected by hash lock */
224 	dva_t			b_dva;
225 	uint64_t		b_birth;
226 	uint64_t		b_cksum0;
227 
228 	arc_buf_hdr_t		*b_hash_next;
229 	arc_buf_t		*b_buf;
230 	uint32_t		b_flags;
231 	uint32_t		b_datacnt;
232 
233 	kcondvar_t		b_cv;
234 	arc_callback_t		*b_acb;
235 
236 	/* protected by arc state mutex */
237 	arc_state_t		*b_state;
238 	list_node_t		b_arc_node;
239 
240 	/* updated atomically */
241 	clock_t			b_arc_access;
242 
243 	/* self protecting */
244 	refcount_t		b_refcnt;
245 };
246 
247 static arc_buf_t *arc_eviction_list;
248 static kmutex_t arc_eviction_mtx;
249 static void arc_access_and_exit(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
250 
251 #define	GHOST_STATE(state)	\
252 	((state) == arc.mru_ghost || (state) == arc.mfu_ghost)
253 
254 /*
255  * Private ARC flags.  These flags are private ARC only flags that will show up
256  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
257  * be passed in as arc_flags in things like arc_read.  However, these flags
258  * should never be passed and should only be set by ARC code.  When adding new
259  * public flags, make sure not to smash the private ones.
260  */
261 
262 #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
263 #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
264 #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
265 #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
266 #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
267 
268 #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
269 #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
270 #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
271 #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
272 #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
273 
274 /*
275  * Hash table routines
276  */
277 
278 #define	HT_LOCK_PAD	64
279 
280 struct ht_lock {
281 	kmutex_t	ht_lock;
282 #ifdef _KERNEL
283 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
284 #endif
285 };
286 
287 #define	BUF_LOCKS 256
288 typedef struct buf_hash_table {
289 	uint64_t ht_mask;
290 	arc_buf_hdr_t **ht_table;
291 	struct ht_lock ht_locks[BUF_LOCKS];
292 } buf_hash_table_t;
293 
294 static buf_hash_table_t buf_hash_table;
295 
296 #define	BUF_HASH_INDEX(spa, dva, birth) \
297 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
298 #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
299 #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
300 #define	HDR_LOCK(buf) \
301 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
302 
303 uint64_t zfs_crc64_table[256];
304 
305 static uint64_t
306 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
307 {
308 	uintptr_t spav = (uintptr_t)spa;
309 	uint8_t *vdva = (uint8_t *)dva;
310 	uint64_t crc = -1ULL;
311 	int i;
312 
313 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
314 
315 	for (i = 0; i < sizeof (dva_t); i++)
316 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
317 
318 	crc ^= (spav>>8) ^ birth;
319 
320 	return (crc);
321 }
322 
323 #define	BUF_EMPTY(buf)						\
324 	((buf)->b_dva.dva_word[0] == 0 &&			\
325 	(buf)->b_dva.dva_word[1] == 0 &&			\
326 	(buf)->b_birth == 0)
327 
328 #define	BUF_EQUAL(spa, dva, birth, buf)				\
329 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
330 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
331 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
332 
333 static arc_buf_hdr_t *
334 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
335 {
336 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
337 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
338 	arc_buf_hdr_t *buf;
339 
340 	mutex_enter(hash_lock);
341 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
342 	    buf = buf->b_hash_next) {
343 		if (BUF_EQUAL(spa, dva, birth, buf)) {
344 			*lockp = hash_lock;
345 			return (buf);
346 		}
347 	}
348 	mutex_exit(hash_lock);
349 	*lockp = NULL;
350 	return (NULL);
351 }
352 
353 /*
354  * Insert an entry into the hash table.  If there is already an element
355  * equal to elem in the hash table, then the already existing element
356  * will be returned and the new element will not be inserted.
357  * Otherwise returns NULL.
358  */
359 static arc_buf_hdr_t *fbufs[4]; /* XXX to find 6341326 */
360 static kthread_t *fbufs_lastthread;
361 static arc_buf_hdr_t *
362 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
363 {
364 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
365 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
366 	arc_buf_hdr_t *fbuf;
367 	uint32_t max, i;
368 
369 	ASSERT(!HDR_IN_HASH_TABLE(buf));
370 	fbufs_lastthread = curthread;
371 	*lockp = hash_lock;
372 	mutex_enter(hash_lock);
373 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
374 	    fbuf = fbuf->b_hash_next, i++) {
375 		if (i < sizeof (fbufs) / sizeof (fbufs[0]))
376 			fbufs[i] = fbuf;
377 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
378 			return (fbuf);
379 	}
380 
381 	buf->b_hash_next = buf_hash_table.ht_table[idx];
382 	buf_hash_table.ht_table[idx] = buf;
383 	buf->b_flags |= ARC_IN_HASH_TABLE;
384 
385 	/* collect some hash table performance data */
386 	if (i > 0) {
387 		atomic_add_64(&arc.hash_collisions, 1);
388 		if (i == 1)
389 			atomic_add_64(&arc.hash_chains, 1);
390 	}
391 	while (i > (max = arc.hash_chain_max) &&
392 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
393 		continue;
394 	}
395 	atomic_add_64(&arc.hash_elements, 1);
396 	if (arc.hash_elements > arc.hash_elements_max)
397 		atomic_add_64(&arc.hash_elements_max, 1);
398 
399 	return (NULL);
400 }
401 
402 static void
403 buf_hash_remove(arc_buf_hdr_t *buf)
404 {
405 	arc_buf_hdr_t *fbuf, **bufp;
406 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
407 
408 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
409 	ASSERT(HDR_IN_HASH_TABLE(buf));
410 
411 	bufp = &buf_hash_table.ht_table[idx];
412 	while ((fbuf = *bufp) != buf) {
413 		ASSERT(fbuf != NULL);
414 		bufp = &fbuf->b_hash_next;
415 	}
416 	*bufp = buf->b_hash_next;
417 	buf->b_hash_next = NULL;
418 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
419 
420 	/* collect some hash table performance data */
421 	atomic_add_64(&arc.hash_elements, -1);
422 	if (buf_hash_table.ht_table[idx] &&
423 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
424 		atomic_add_64(&arc.hash_chains, -1);
425 }
426 
427 /*
428  * Global data structures and functions for the buf kmem cache.
429  */
430 static kmem_cache_t *hdr_cache;
431 static kmem_cache_t *buf_cache;
432 
433 static void
434 buf_fini(void)
435 {
436 	int i;
437 
438 	kmem_free(buf_hash_table.ht_table,
439 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
440 	for (i = 0; i < BUF_LOCKS; i++)
441 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
442 	kmem_cache_destroy(hdr_cache);
443 	kmem_cache_destroy(buf_cache);
444 }
445 
446 /*
447  * Constructor callback - called when the cache is empty
448  * and a new buf is requested.
449  */
450 /* ARGSUSED */
451 static int
452 hdr_cons(void *vbuf, void *unused, int kmflag)
453 {
454 	arc_buf_hdr_t *buf = vbuf;
455 
456 	bzero(buf, sizeof (arc_buf_hdr_t));
457 	refcount_create(&buf->b_refcnt);
458 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
459 	return (0);
460 }
461 
462 /*
463  * Destructor callback - called when a cached buf is
464  * no longer required.
465  */
466 /* ARGSUSED */
467 static void
468 hdr_dest(void *vbuf, void *unused)
469 {
470 	arc_buf_hdr_t *buf = vbuf;
471 
472 	refcount_destroy(&buf->b_refcnt);
473 	cv_destroy(&buf->b_cv);
474 }
475 
476 static int arc_reclaim_needed(void);
477 void arc_kmem_reclaim(void);
478 
479 /*
480  * Reclaim callback -- invoked when memory is low.
481  */
482 /* ARGSUSED */
483 static void
484 hdr_recl(void *unused)
485 {
486 	dprintf("hdr_recl called\n");
487 	if (arc_reclaim_needed())
488 		arc_kmem_reclaim();
489 }
490 
491 static void
492 buf_init(void)
493 {
494 	uint64_t *ct;
495 	uint64_t hsize = 1ULL << 12;
496 	int i, j;
497 
498 	/*
499 	 * The hash table is big enough to fill all of physical memory
500 	 * with an average 64K block size.  The table will take up
501 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
502 	 */
503 	while (hsize * 65536 < physmem * PAGESIZE)
504 		hsize <<= 1;
505 retry:
506 	buf_hash_table.ht_mask = hsize - 1;
507 	buf_hash_table.ht_table =
508 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
509 	if (buf_hash_table.ht_table == NULL) {
510 		ASSERT(hsize > (1ULL << 8));
511 		hsize >>= 1;
512 		goto retry;
513 	}
514 
515 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
516 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
517 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
518 	    0, NULL, NULL, NULL, NULL, NULL, 0);
519 
520 	for (i = 0; i < 256; i++)
521 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
522 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
523 
524 	for (i = 0; i < BUF_LOCKS; i++) {
525 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
526 		    NULL, MUTEX_DEFAULT, NULL);
527 	}
528 }
529 
530 #define	ARC_MINTIME	(hz>>4) /* 62 ms */
531 
532 static void
533 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
534 {
535 	ASSERT(MUTEX_HELD(hash_lock));
536 
537 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
538 	    (ab->b_state != arc.anon)) {
539 		int delta = ab->b_size * ab->b_datacnt;
540 
541 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
542 		mutex_enter(&ab->b_state->mtx);
543 		ASSERT(refcount_count(&ab->b_refcnt) > 0);
544 		ASSERT(list_link_active(&ab->b_arc_node));
545 		list_remove(&ab->b_state->list, ab);
546 		if (GHOST_STATE(ab->b_state)) {
547 			ASSERT3U(ab->b_datacnt, ==, 0);
548 			ASSERT3P(ab->b_buf, ==, NULL);
549 			delta = ab->b_size;
550 		}
551 		ASSERT(delta > 0);
552 		ASSERT3U(ab->b_state->lsize, >=, delta);
553 		atomic_add_64(&ab->b_state->lsize, -delta);
554 		mutex_exit(&ab->b_state->mtx);
555 	}
556 }
557 
558 static int
559 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
560 {
561 	int cnt;
562 
563 	ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock));
564 	ASSERT(!GHOST_STATE(ab->b_state));
565 
566 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
567 	    (ab->b_state != arc.anon)) {
568 
569 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
570 		mutex_enter(&ab->b_state->mtx);
571 		ASSERT(!list_link_active(&ab->b_arc_node));
572 		list_insert_head(&ab->b_state->list, ab);
573 		ASSERT(ab->b_datacnt > 0);
574 		atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt);
575 		ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize);
576 		mutex_exit(&ab->b_state->mtx);
577 	}
578 	return (cnt);
579 }
580 
581 /*
582  * Move the supplied buffer to the indicated state.  The mutex
583  * for the buffer must be held by the caller.
584  */
585 static void
586 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
587 {
588 	arc_state_t *old_state = ab->b_state;
589 	int refcnt = refcount_count(&ab->b_refcnt);
590 	int from_delta, to_delta;
591 
592 	ASSERT(MUTEX_HELD(hash_lock));
593 	ASSERT(new_state != old_state);
594 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
595 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
596 
597 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
598 
599 	/*
600 	 * If this buffer is evictable, transfer it from the
601 	 * old state list to the new state list.
602 	 */
603 	if (refcnt == 0) {
604 		if (old_state != arc.anon) {
605 			int use_mutex = !MUTEX_HELD(&old_state->mtx);
606 
607 			if (use_mutex)
608 				mutex_enter(&old_state->mtx);
609 
610 			ASSERT(list_link_active(&ab->b_arc_node));
611 			list_remove(&old_state->list, ab);
612 
613 			/* ghost elements have a ghost size */
614 			if (GHOST_STATE(old_state)) {
615 				ASSERT(ab->b_datacnt == 0);
616 				ASSERT(ab->b_buf == NULL);
617 				from_delta = ab->b_size;
618 			}
619 			ASSERT3U(old_state->lsize, >=, from_delta);
620 			atomic_add_64(&old_state->lsize, -from_delta);
621 
622 			if (use_mutex)
623 				mutex_exit(&old_state->mtx);
624 		}
625 		if (new_state != arc.anon) {
626 			int use_mutex = !MUTEX_HELD(&new_state->mtx);
627 
628 			if (use_mutex)
629 				mutex_enter(&new_state->mtx);
630 
631 			list_insert_head(&new_state->list, ab);
632 
633 			/* ghost elements have a ghost size */
634 			if (GHOST_STATE(new_state)) {
635 				ASSERT(ab->b_datacnt == 0);
636 				ASSERT(ab->b_buf == NULL);
637 				to_delta = ab->b_size;
638 			}
639 			atomic_add_64(&new_state->lsize, to_delta);
640 			ASSERT3U(new_state->size + to_delta, >=,
641 			    new_state->lsize);
642 
643 			if (use_mutex)
644 				mutex_exit(&new_state->mtx);
645 		}
646 	}
647 
648 	ASSERT(!BUF_EMPTY(ab));
649 	if (new_state == arc.anon && old_state != arc.anon) {
650 		buf_hash_remove(ab);
651 	}
652 
653 	/*
654 	 * If this buffer isn't being transferred to the MRU-top
655 	 * state, it's safe to clear its prefetch flag
656 	 */
657 	if ((new_state != arc.mru) && (new_state != arc.mru_ghost)) {
658 		ab->b_flags &= ~ARC_PREFETCH;
659 	}
660 
661 	/* adjust state sizes */
662 	if (to_delta)
663 		atomic_add_64(&new_state->size, to_delta);
664 	if (from_delta) {
665 		ASSERT3U(old_state->size, >=, from_delta);
666 		atomic_add_64(&old_state->size, -from_delta);
667 	}
668 	ab->b_state = new_state;
669 }
670 
671 arc_buf_t *
672 arc_buf_alloc(spa_t *spa, int size, void *tag)
673 {
674 	arc_buf_hdr_t *hdr;
675 	arc_buf_t *buf;
676 
677 	ASSERT3U(size, >, 0);
678 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
679 	ASSERT(BUF_EMPTY(hdr));
680 	hdr->b_size = size;
681 	hdr->b_spa = spa;
682 	hdr->b_state = arc.anon;
683 	hdr->b_arc_access = 0;
684 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
685 	buf->b_hdr = hdr;
686 	buf->b_efunc = NULL;
687 	buf->b_private = NULL;
688 	buf->b_next = NULL;
689 	buf->b_data = zio_buf_alloc(size);
690 	hdr->b_buf = buf;
691 	hdr->b_datacnt = 1;
692 	hdr->b_flags = 0;
693 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
694 	(void) refcount_add(&hdr->b_refcnt, tag);
695 
696 	atomic_add_64(&arc.size, size);
697 	atomic_add_64(&arc.anon->size, size);
698 
699 	return (buf);
700 }
701 
702 static void *
703 arc_data_copy(arc_buf_hdr_t *hdr, void *old_data)
704 {
705 	void *new_data = zio_buf_alloc(hdr->b_size);
706 
707 	atomic_add_64(&arc.size, hdr->b_size);
708 	bcopy(old_data, new_data, hdr->b_size);
709 	atomic_add_64(&hdr->b_state->size, hdr->b_size);
710 	if (list_link_active(&hdr->b_arc_node)) {
711 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
712 		atomic_add_64(&hdr->b_state->lsize, hdr->b_size);
713 	}
714 	return (new_data);
715 }
716 
717 void
718 arc_buf_add_ref(arc_buf_t *buf, void* tag)
719 {
720 	arc_buf_hdr_t *hdr;
721 	kmutex_t *hash_lock;
722 
723 	mutex_enter(&arc_eviction_mtx);
724 	hdr = buf->b_hdr;
725 	if (buf->b_data == NULL) {
726 		/*
727 		 * This buffer is evicted.
728 		 */
729 		mutex_exit(&arc_eviction_mtx);
730 		return;
731 	} else {
732 		/*
733 		 * Prevent this buffer from being evicted
734 		 * while we add a reference.
735 		 */
736 		buf->b_hdr = NULL;
737 	}
738 	mutex_exit(&arc_eviction_mtx);
739 
740 	ASSERT(hdr->b_state != arc.anon);
741 	hash_lock = HDR_LOCK(hdr);
742 	mutex_enter(hash_lock);
743 	ASSERT(!GHOST_STATE(hdr->b_state));
744 	buf->b_hdr = hdr;
745 	add_reference(hdr, hash_lock, tag);
746 	arc_access_and_exit(hdr, hash_lock);
747 	atomic_add_64(&arc.hits, 1);
748 }
749 
750 static void
751 arc_buf_destroy(arc_buf_t *buf, boolean_t all)
752 {
753 	arc_buf_t **bufp;
754 
755 	/* free up data associated with the buf */
756 	if (buf->b_data) {
757 		arc_state_t *state = buf->b_hdr->b_state;
758 		uint64_t size = buf->b_hdr->b_size;
759 
760 		zio_buf_free(buf->b_data, size);
761 		atomic_add_64(&arc.size, -size);
762 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
763 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
764 			ASSERT(state != arc.anon);
765 			ASSERT3U(state->lsize, >=, size);
766 			atomic_add_64(&state->lsize, -size);
767 		}
768 		ASSERT3U(state->size, >=, size);
769 		atomic_add_64(&state->size, -size);
770 		buf->b_data = NULL;
771 		ASSERT(buf->b_hdr->b_datacnt > 0);
772 		buf->b_hdr->b_datacnt -= 1;
773 	}
774 
775 	/* only remove the buf if requested */
776 	if (!all)
777 		return;
778 
779 	/* remove the buf from the hdr list */
780 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
781 		continue;
782 	*bufp = buf->b_next;
783 
784 	ASSERT(buf->b_efunc == NULL);
785 
786 	/* clean up the buf */
787 	buf->b_hdr = NULL;
788 	kmem_cache_free(buf_cache, buf);
789 }
790 
791 static void
792 arc_hdr_destroy(arc_buf_hdr_t *hdr)
793 {
794 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
795 	ASSERT3P(hdr->b_state, ==, arc.anon);
796 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
797 
798 	if (!BUF_EMPTY(hdr)) {
799 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
800 		bzero(&hdr->b_dva, sizeof (dva_t));
801 		hdr->b_birth = 0;
802 		hdr->b_cksum0 = 0;
803 	}
804 	while (hdr->b_buf) {
805 		arc_buf_t *buf = hdr->b_buf;
806 
807 		if (buf->b_efunc) {
808 			mutex_enter(&arc_eviction_mtx);
809 			ASSERT(buf->b_hdr != NULL);
810 			arc_buf_destroy(hdr->b_buf, FALSE);
811 			hdr->b_buf = buf->b_next;
812 			buf->b_next = arc_eviction_list;
813 			arc_eviction_list = buf;
814 			mutex_exit(&arc_eviction_mtx);
815 		} else {
816 			arc_buf_destroy(hdr->b_buf, TRUE);
817 		}
818 	}
819 
820 	ASSERT(!list_link_active(&hdr->b_arc_node));
821 	ASSERT3P(hdr->b_hash_next, ==, NULL);
822 	ASSERT3P(hdr->b_acb, ==, NULL);
823 	kmem_cache_free(hdr_cache, hdr);
824 }
825 
826 void
827 arc_buf_free(arc_buf_t *buf, void *tag)
828 {
829 	arc_buf_hdr_t *hdr = buf->b_hdr;
830 	int hashed = hdr->b_state != arc.anon;
831 
832 	ASSERT(buf->b_efunc == NULL);
833 	ASSERT(buf->b_data != NULL);
834 
835 	if (hashed) {
836 		kmutex_t *hash_lock = HDR_LOCK(hdr);
837 
838 		mutex_enter(hash_lock);
839 		(void) remove_reference(hdr, hash_lock, tag);
840 		if (hdr->b_datacnt > 1)
841 			arc_buf_destroy(buf, TRUE);
842 		else
843 			hdr->b_flags |= ARC_BUF_AVAILABLE;
844 		mutex_exit(hash_lock);
845 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
846 		int destroy_hdr;
847 		/*
848 		 * We are in the middle of an async write.  Don't destroy
849 		 * this buffer unless the write completes before we finish
850 		 * decrementing the reference count.
851 		 */
852 		mutex_enter(&arc_eviction_mtx);
853 		(void) remove_reference(hdr, NULL, tag);
854 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
855 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
856 		mutex_exit(&arc_eviction_mtx);
857 		if (destroy_hdr)
858 			arc_hdr_destroy(hdr);
859 	} else {
860 		if (remove_reference(hdr, NULL, tag) > 0) {
861 			ASSERT(HDR_IO_ERROR(hdr));
862 			arc_buf_destroy(buf, TRUE);
863 		} else {
864 			arc_hdr_destroy(hdr);
865 		}
866 	}
867 }
868 
869 int
870 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
871 {
872 	arc_buf_hdr_t *hdr = buf->b_hdr;
873 	kmutex_t *hash_lock = HDR_LOCK(hdr);
874 	int no_callback = (buf->b_efunc == NULL);
875 
876 	if (hdr->b_state == arc.anon) {
877 		arc_buf_free(buf, tag);
878 		return (no_callback);
879 	}
880 
881 	mutex_enter(hash_lock);
882 	ASSERT(hdr->b_state != arc.anon);
883 	ASSERT(buf->b_data != NULL);
884 
885 	(void) remove_reference(hdr, hash_lock, tag);
886 	if (hdr->b_datacnt > 1) {
887 		if (no_callback)
888 			arc_buf_destroy(buf, TRUE);
889 	} else if (no_callback) {
890 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
891 		hdr->b_flags |= ARC_BUF_AVAILABLE;
892 	}
893 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
894 	    refcount_is_zero(&hdr->b_refcnt));
895 	mutex_exit(hash_lock);
896 	return (no_callback);
897 }
898 
899 int
900 arc_buf_size(arc_buf_t *buf)
901 {
902 	return (buf->b_hdr->b_size);
903 }
904 
905 /*
906  * Evict buffers from list until we've removed the specified number of
907  * bytes.  Move the removed buffers to the appropriate evict state.
908  */
909 static uint64_t
910 arc_evict(arc_state_t *state, int64_t bytes)
911 {
912 	arc_state_t *evicted_state;
913 	uint64_t bytes_evicted = 0, skipped = 0;
914 	arc_buf_hdr_t *ab, *ab_prev;
915 	kmutex_t *hash_lock;
916 
917 	ASSERT(state == arc.mru || state == arc.mfu);
918 
919 	evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
920 
921 	mutex_enter(&state->mtx);
922 	mutex_enter(&evicted_state->mtx);
923 
924 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
925 		ab_prev = list_prev(&state->list, ab);
926 		hash_lock = HDR_LOCK(ab);
927 		if (mutex_tryenter(hash_lock)) {
928 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
929 			ASSERT(ab->b_datacnt > 0);
930 			while (ab->b_buf) {
931 				arc_buf_t *buf = ab->b_buf;
932 				if (buf->b_data)
933 					bytes_evicted += ab->b_size;
934 				if (buf->b_efunc) {
935 					mutex_enter(&arc_eviction_mtx);
936 					/*
937 					 * arc_buf_add_ref() could derail
938 					 * this eviction.
939 					 */
940 					if (buf->b_hdr == NULL) {
941 						mutex_exit(&arc_eviction_mtx);
942 						mutex_exit(hash_lock);
943 						goto skip;
944 					}
945 					arc_buf_destroy(buf, FALSE);
946 					ab->b_buf = buf->b_next;
947 					buf->b_next = arc_eviction_list;
948 					arc_eviction_list = buf;
949 					mutex_exit(&arc_eviction_mtx);
950 				} else {
951 					arc_buf_destroy(buf, TRUE);
952 				}
953 			}
954 			ASSERT(ab->b_datacnt == 0);
955 			arc_change_state(evicted_state, ab, hash_lock);
956 			ASSERT(HDR_IN_HASH_TABLE(ab));
957 			ab->b_flags = ARC_IN_HASH_TABLE;
958 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
959 			mutex_exit(hash_lock);
960 			if (bytes >= 0 && bytes_evicted >= bytes)
961 				break;
962 		} else {
963 skip:
964 			skipped += 1;
965 		}
966 	}
967 	mutex_exit(&evicted_state->mtx);
968 	mutex_exit(&state->mtx);
969 
970 	if (bytes_evicted < bytes)
971 		dprintf("only evicted %lld bytes from %x",
972 		    (longlong_t)bytes_evicted, state);
973 
974 	atomic_add_64(&arc.skipped, skipped);
975 	if (bytes < 0)
976 		return (skipped);
977 	return (bytes_evicted);
978 }
979 
980 /*
981  * Remove buffers from list until we've removed the specified number of
982  * bytes.  Destroy the buffers that are removed.
983  */
984 static void
985 arc_evict_ghost(arc_state_t *state, int64_t bytes)
986 {
987 	arc_buf_hdr_t *ab, *ab_prev;
988 	kmutex_t *hash_lock;
989 	uint64_t bytes_deleted = 0;
990 	uint_t bufs_skipped = 0;
991 
992 	ASSERT(GHOST_STATE(state));
993 top:
994 	mutex_enter(&state->mtx);
995 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
996 		ab_prev = list_prev(&state->list, ab);
997 		hash_lock = HDR_LOCK(ab);
998 		if (mutex_tryenter(hash_lock)) {
999 			ASSERT(ab->b_buf == NULL);
1000 			arc_change_state(arc.anon, ab, hash_lock);
1001 			mutex_exit(hash_lock);
1002 			atomic_add_64(&arc.deleted, 1);
1003 			bytes_deleted += ab->b_size;
1004 			arc_hdr_destroy(ab);
1005 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1006 			if (bytes >= 0 && bytes_deleted >= bytes)
1007 				break;
1008 		} else {
1009 			if (bytes < 0) {
1010 				mutex_exit(&state->mtx);
1011 				mutex_enter(hash_lock);
1012 				mutex_exit(hash_lock);
1013 				goto top;
1014 			}
1015 			bufs_skipped += 1;
1016 		}
1017 	}
1018 	mutex_exit(&state->mtx);
1019 
1020 	if (bufs_skipped) {
1021 		atomic_add_64(&arc.skipped, bufs_skipped);
1022 		ASSERT(bytes >= 0);
1023 	}
1024 
1025 	if (bytes_deleted < bytes)
1026 		dprintf("only deleted %lld bytes from %p",
1027 		    (longlong_t)bytes_deleted, state);
1028 }
1029 
1030 static void
1031 arc_adjust(void)
1032 {
1033 	int64_t top_sz, mru_over, arc_over;
1034 
1035 	top_sz = arc.anon->size + arc.mru->size;
1036 
1037 	if (top_sz > arc.p && arc.mru->lsize > 0) {
1038 		int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p);
1039 		(void) arc_evict(arc.mru, toevict);
1040 		top_sz = arc.anon->size + arc.mru->size;
1041 	}
1042 
1043 	mru_over = top_sz + arc.mru_ghost->size - arc.c;
1044 
1045 	if (mru_over > 0) {
1046 		if (arc.mru_ghost->lsize > 0) {
1047 			int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over);
1048 			arc_evict_ghost(arc.mru_ghost, todelete);
1049 		}
1050 	}
1051 
1052 	if ((arc_over = arc.size - arc.c) > 0) {
1053 		int64_t tbl_over;
1054 
1055 		if (arc.mfu->lsize > 0) {
1056 			int64_t toevict = MIN(arc.mfu->lsize, arc_over);
1057 			(void) arc_evict(arc.mfu, toevict);
1058 		}
1059 
1060 		tbl_over = arc.size + arc.mru_ghost->lsize +
1061 		    arc.mfu_ghost->lsize - arc.c*2;
1062 
1063 		if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) {
1064 			int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over);
1065 			arc_evict_ghost(arc.mfu_ghost, todelete);
1066 		}
1067 	}
1068 }
1069 
1070 static void
1071 arc_do_user_evicts(void)
1072 {
1073 	mutex_enter(&arc_eviction_mtx);
1074 	while (arc_eviction_list != NULL) {
1075 		arc_buf_t *buf = arc_eviction_list;
1076 		arc_eviction_list = buf->b_next;
1077 		buf->b_hdr = NULL;
1078 		mutex_exit(&arc_eviction_mtx);
1079 
1080 		if (buf->b_efunc != NULL)
1081 			VERIFY(buf->b_efunc(buf) == 0);
1082 
1083 		buf->b_efunc = NULL;
1084 		buf->b_private = NULL;
1085 		kmem_cache_free(buf_cache, buf);
1086 		mutex_enter(&arc_eviction_mtx);
1087 	}
1088 	mutex_exit(&arc_eviction_mtx);
1089 }
1090 
1091 /*
1092  * Flush all *evictable* data from the cache.
1093  * NOTE: this will not touch "active" (i.e. referenced) data.
1094  */
1095 void
1096 arc_flush(void)
1097 {
1098 	while (arc_evict(arc.mru, -1));
1099 	while (arc_evict(arc.mfu, -1));
1100 
1101 	arc_evict_ghost(arc.mru_ghost, -1);
1102 	arc_evict_ghost(arc.mfu_ghost, -1);
1103 
1104 	mutex_enter(&arc_reclaim_thr_lock);
1105 	arc_do_user_evicts();
1106 	mutex_exit(&arc_reclaim_thr_lock);
1107 	ASSERT(arc_eviction_list == NULL);
1108 }
1109 
1110 void
1111 arc_kmem_reclaim(void)
1112 {
1113 	/* Remove 12.5% */
1114 	/*
1115 	 * We need arc_reclaim_lock because we don't want multiple
1116 	 * threads trying to reclaim concurrently.
1117 	 */
1118 
1119 	/*
1120 	 * umem calls the reclaim func when we destroy the buf cache,
1121 	 * which is after we do arc_fini().  So we set a flag to prevent
1122 	 * accessing the destroyed mutexes and lists.
1123 	 */
1124 	if (arc_dead)
1125 		return;
1126 
1127 	if (arc.c <= arc.c_min)
1128 		return;
1129 
1130 	mutex_enter(&arc_reclaim_lock);
1131 
1132 	atomic_add_64(&arc.c, -(arc.c >> 3));
1133 	atomic_add_64(&arc.p, -(arc.p >> 3));
1134 	if (arc.c > arc.size)
1135 		arc.c = arc.size;
1136 	if (arc.c < arc.c_min)
1137 		arc.c = arc.c_min;
1138 	if (arc.p > arc.c)
1139 		arc.p = (arc.c >> 1);
1140 	ASSERT((int64_t)arc.p >= 0);
1141 
1142 	arc_adjust();
1143 
1144 	mutex_exit(&arc_reclaim_lock);
1145 }
1146 
1147 static int
1148 arc_reclaim_needed(void)
1149 {
1150 	uint64_t extra;
1151 
1152 #ifdef _KERNEL
1153 	/*
1154 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1155 	 */
1156 	extra = desfree;
1157 
1158 	/*
1159 	 * check that we're out of range of the pageout scanner.  It starts to
1160 	 * schedule paging if freemem is less than lotsfree and needfree.
1161 	 * lotsfree is the high-water mark for pageout, and needfree is the
1162 	 * number of needed free pages.  We add extra pages here to make sure
1163 	 * the scanner doesn't start up while we're freeing memory.
1164 	 */
1165 	if (freemem < lotsfree + needfree + extra)
1166 		return (1);
1167 
1168 	/*
1169 	 * check to make sure that swapfs has enough space so that anon
1170 	 * reservations can still succeeed. anon_resvmem() checks that the
1171 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1172 	 * swap pages.  We also add a bit of extra here just to prevent
1173 	 * circumstances from getting really dire.
1174 	 */
1175 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1176 		return (1);
1177 
1178 #if defined(__i386)
1179 	/*
1180 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1181 	 * kernel heap space before we ever run out of available physical
1182 	 * memory.  Most checks of the size of the heap_area compare against
1183 	 * tune.t_minarmem, which is the minimum available real memory that we
1184 	 * can have in the system.  However, this is generally fixed at 25 pages
1185 	 * which is so low that it's useless.  In this comparison, we seek to
1186 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1187 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1188 	 * free)
1189 	 */
1190 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1191 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1192 		return (1);
1193 #endif
1194 
1195 #else
1196 	if (spa_get_random(100) == 0)
1197 		return (1);
1198 #endif
1199 	return (0);
1200 }
1201 
1202 static void
1203 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1204 {
1205 	size_t			i;
1206 	kmem_cache_t		*prev_cache = NULL;
1207 	extern kmem_cache_t	*zio_buf_cache[];
1208 
1209 #ifdef _KERNEL
1210 	/*
1211 	 * First purge some DNLC entries, in case the DNLC is using
1212 	 * up too much memory.
1213 	 */
1214 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
1215 
1216 #if defined(__i386)
1217 	/*
1218 	 * Reclaim unused memory from all kmem caches.
1219 	 */
1220 	kmem_reap();
1221 #endif
1222 #endif
1223 
1224 	/*
1225 	 * An agressive reclamation will shrink the cache size as well as
1226 	 * reap free buffers from the arc kmem caches.
1227 	 */
1228 	if (strat == ARC_RECLAIM_AGGR)
1229 		arc_kmem_reclaim();
1230 
1231 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1232 		if (zio_buf_cache[i] != prev_cache) {
1233 			prev_cache = zio_buf_cache[i];
1234 			kmem_cache_reap_now(zio_buf_cache[i]);
1235 		}
1236 	}
1237 	kmem_cache_reap_now(buf_cache);
1238 	kmem_cache_reap_now(hdr_cache);
1239 }
1240 
1241 static void
1242 arc_reclaim_thread(void)
1243 {
1244 	clock_t			growtime = 0;
1245 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1246 	callb_cpr_t		cpr;
1247 
1248 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1249 
1250 	mutex_enter(&arc_reclaim_thr_lock);
1251 	while (arc_thread_exit == 0) {
1252 		if (arc_reclaim_needed()) {
1253 
1254 			if (arc.no_grow) {
1255 				if (last_reclaim == ARC_RECLAIM_CONS) {
1256 					last_reclaim = ARC_RECLAIM_AGGR;
1257 				} else {
1258 					last_reclaim = ARC_RECLAIM_CONS;
1259 				}
1260 			} else {
1261 				arc.no_grow = TRUE;
1262 				last_reclaim = ARC_RECLAIM_AGGR;
1263 				membar_producer();
1264 			}
1265 
1266 			/* reset the growth delay for every reclaim */
1267 			growtime = lbolt + (arc_grow_retry * hz);
1268 
1269 			arc_kmem_reap_now(last_reclaim);
1270 
1271 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1272 			arc.no_grow = FALSE;
1273 		}
1274 
1275 		if (arc_eviction_list != NULL)
1276 			arc_do_user_evicts();
1277 
1278 		/* block until needed, or one second, whichever is shorter */
1279 		CALLB_CPR_SAFE_BEGIN(&cpr);
1280 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1281 		    &arc_reclaim_thr_lock, (lbolt + hz));
1282 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1283 	}
1284 
1285 	arc_thread_exit = 0;
1286 	cv_broadcast(&arc_reclaim_thr_cv);
1287 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1288 	thread_exit();
1289 }
1290 
1291 /*
1292  * Adapt arc info given the number of bytes we are trying to add and
1293  * the state that we are comming from.  This function is only called
1294  * when we are adding new content to the cache.
1295  */
1296 static void
1297 arc_adapt(int bytes, arc_state_t *state)
1298 {
1299 	int mult;
1300 
1301 	ASSERT(bytes > 0);
1302 	/*
1303 	 * Adapt the target size of the MRU list:
1304 	 *	- if we just hit in the MRU ghost list, then increase
1305 	 *	  the target size of the MRU list.
1306 	 *	- if we just hit in the MFU ghost list, then increase
1307 	 *	  the target size of the MFU list by decreasing the
1308 	 *	  target size of the MRU list.
1309 	 */
1310 	if (state == arc.mru_ghost) {
1311 		mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ?
1312 		    1 : (arc.mfu_ghost->size/arc.mru_ghost->size));
1313 
1314 		arc.p = MIN(arc.c, arc.p + bytes * mult);
1315 	} else if (state == arc.mfu_ghost) {
1316 		mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ?
1317 		    1 : (arc.mru_ghost->size/arc.mfu_ghost->size));
1318 
1319 		arc.p = MAX(0, (int64_t)arc.p - bytes * mult);
1320 	}
1321 	ASSERT((int64_t)arc.p >= 0);
1322 
1323 	if (arc_reclaim_needed()) {
1324 		cv_signal(&arc_reclaim_thr_cv);
1325 		return;
1326 	}
1327 
1328 	if (arc.no_grow)
1329 		return;
1330 
1331 	if (arc.c >= arc.c_max)
1332 		return;
1333 
1334 	/*
1335 	 * If we're within (2 * maxblocksize) bytes of the target
1336 	 * cache size, increment the target cache size
1337 	 */
1338 	if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) {
1339 		atomic_add_64(&arc.c, (int64_t)bytes);
1340 		if (arc.c > arc.c_max)
1341 			arc.c = arc.c_max;
1342 		else if (state == arc.anon)
1343 			atomic_add_64(&arc.p, (int64_t)bytes);
1344 		if (arc.p > arc.c)
1345 			arc.p = arc.c;
1346 	}
1347 	ASSERT((int64_t)arc.p >= 0);
1348 }
1349 
1350 /*
1351  * Check if the cache has reached its limits and eviction is required
1352  * prior to insert.
1353  */
1354 static int
1355 arc_evict_needed()
1356 {
1357 	if (arc_reclaim_needed())
1358 		return (1);
1359 
1360 	return (arc.size > arc.c);
1361 }
1362 
1363 /*
1364  * The state, supplied as the first argument, is going to have something
1365  * inserted on its behalf. So, determine which cache must be victimized to
1366  * satisfy an insertion for this state.  We have the following cases:
1367  *
1368  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) ->
1369  * In this situation if we're out of space, but the resident size of the MFU is
1370  * under the limit, victimize the MFU cache to satisfy this insertion request.
1371  *
1372  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) ->
1373  * Here, we've used up all of the available space for the MRU, so we need to
1374  * evict from our own cache instead.  Evict from the set of resident MRU
1375  * entries.
1376  *
1377  * 3. Insert for MFU (c - p) > sizeof(arc.mfu) ->
1378  * c minus p represents the MFU space in the cache, since p is the size of the
1379  * cache that is dedicated to the MRU.  In this situation there's still space on
1380  * the MFU side, so the MRU side needs to be victimized.
1381  *
1382  * 4. Insert for MFU (c - p) < sizeof(arc.mfu) ->
1383  * MFU's resident set is consuming more space than it has been allotted.  In
1384  * this situation, we must victimize our own cache, the MFU, for this insertion.
1385  */
1386 static void
1387 arc_evict_for_state(arc_state_t *state, uint64_t bytes)
1388 {
1389 	uint64_t	mru_used;
1390 	uint64_t	mfu_space;
1391 	uint64_t	evicted;
1392 
1393 	ASSERT(state == arc.mru || state == arc.mfu);
1394 
1395 	if (state == arc.mru) {
1396 		mru_used = arc.anon->size + arc.mru->size;
1397 		if (arc.p > mru_used) {
1398 			/* case 1 */
1399 			evicted = arc_evict(arc.mfu, bytes);
1400 			if (evicted < bytes) {
1401 				arc_adjust();
1402 			}
1403 		} else {
1404 			/* case 2 */
1405 			evicted = arc_evict(arc.mru, bytes);
1406 			if (evicted < bytes) {
1407 				arc_adjust();
1408 			}
1409 		}
1410 	} else {
1411 		/* MFU case */
1412 		mfu_space = arc.c - arc.p;
1413 		if (mfu_space > arc.mfu->size) {
1414 			/* case 3 */
1415 			evicted = arc_evict(arc.mru, bytes);
1416 			if (evicted < bytes) {
1417 				arc_adjust();
1418 			}
1419 		} else {
1420 			/* case 4 */
1421 			evicted = arc_evict(arc.mfu, bytes);
1422 			if (evicted < bytes) {
1423 				arc_adjust();
1424 			}
1425 		}
1426 	}
1427 }
1428 
1429 /*
1430  * This routine is called whenever a buffer is accessed.
1431  * NOTE: the hash lock is dropped in this function.
1432  */
1433 static void
1434 arc_access_and_exit(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1435 {
1436 	arc_state_t	*evict_state = NULL;
1437 	int		blksz;
1438 
1439 	ASSERT(MUTEX_HELD(hash_lock));
1440 
1441 	blksz = buf->b_size;
1442 
1443 	if (buf->b_state == arc.anon) {
1444 		/*
1445 		 * This buffer is not in the cache, and does not
1446 		 * appear in our "ghost" list.  Add the new buffer
1447 		 * to the MRU state.
1448 		 */
1449 
1450 		arc_adapt(blksz, arc.anon);
1451 		if (arc_evict_needed())
1452 			evict_state = arc.mru;
1453 
1454 		ASSERT(buf->b_arc_access == 0);
1455 		buf->b_arc_access = lbolt;
1456 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1457 		arc_change_state(arc.mru, buf, hash_lock);
1458 
1459 	} else if (buf->b_state == arc.mru) {
1460 		/*
1461 		 * If this buffer is in the MRU-top state and has the prefetch
1462 		 * flag, the first read was actually part of a prefetch.  In
1463 		 * this situation, we simply want to clear the flag and return.
1464 		 * A subsequent access should bump this into the MFU state.
1465 		 */
1466 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
1467 			buf->b_flags &= ~ARC_PREFETCH;
1468 			atomic_add_64(&arc.mru->hits, 1);
1469 			mutex_exit(hash_lock);
1470 			return;
1471 		}
1472 
1473 		/*
1474 		 * This buffer has been "accessed" only once so far,
1475 		 * but it is still in the cache. Move it to the MFU
1476 		 * state.
1477 		 */
1478 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1479 			/*
1480 			 * More than 125ms have passed since we
1481 			 * instantiated this buffer.  Move it to the
1482 			 * most frequently used state.
1483 			 */
1484 			buf->b_arc_access = lbolt;
1485 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1486 			arc_change_state(arc.mfu, buf, hash_lock);
1487 		}
1488 		atomic_add_64(&arc.mru->hits, 1);
1489 	} else if (buf->b_state == arc.mru_ghost) {
1490 		arc_state_t	*new_state;
1491 		/*
1492 		 * This buffer has been "accessed" recently, but
1493 		 * was evicted from the cache.  Move it to the
1494 		 * MFU state.
1495 		 */
1496 
1497 		if (buf->b_flags & ARC_PREFETCH) {
1498 			new_state = arc.mru;
1499 			buf->b_flags &= ~ARC_PREFETCH;
1500 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1501 		} else {
1502 			new_state = arc.mfu;
1503 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1504 		}
1505 
1506 		arc_adapt(blksz, arc.mru_ghost);
1507 		if (arc_evict_needed())
1508 			evict_state = new_state;
1509 
1510 		buf->b_arc_access = lbolt;
1511 		arc_change_state(new_state, buf, hash_lock);
1512 
1513 		atomic_add_64(&arc.mru_ghost->hits, 1);
1514 	} else if (buf->b_state == arc.mfu) {
1515 		/*
1516 		 * This buffer has been accessed more than once and is
1517 		 * still in the cache.  Keep it in the MFU state.
1518 		 *
1519 		 * NOTE: the add_reference() that occurred when we did
1520 		 * the arc_read() should have kicked this off the list,
1521 		 * so even if it was a prefetch, it will be put back at
1522 		 * the head of the list when we remove_reference().
1523 		 */
1524 		atomic_add_64(&arc.mfu->hits, 1);
1525 	} else if (buf->b_state == arc.mfu_ghost) {
1526 		/*
1527 		 * This buffer has been accessed more than once but has
1528 		 * been evicted from the cache.  Move it back to the
1529 		 * MFU state.
1530 		 */
1531 
1532 		arc_adapt(blksz, arc.mfu_ghost);
1533 		if (arc_evict_needed())
1534 			evict_state = arc.mfu;
1535 
1536 		buf->b_arc_access = lbolt;
1537 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1538 		arc_change_state(arc.mfu, buf, hash_lock);
1539 
1540 		atomic_add_64(&arc.mfu_ghost->hits, 1);
1541 	} else {
1542 		ASSERT(!"invalid arc state");
1543 	}
1544 
1545 	mutex_exit(hash_lock);
1546 	if (evict_state)
1547 		arc_evict_for_state(evict_state, blksz);
1548 }
1549 
1550 /* a generic arc_done_func_t which you can use */
1551 /* ARGSUSED */
1552 void
1553 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1554 {
1555 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
1556 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1557 }
1558 
1559 /* a generic arc_done_func_t which you can use */
1560 void
1561 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1562 {
1563 	arc_buf_t **bufp = arg;
1564 	if (zio && zio->io_error) {
1565 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1566 		*bufp = NULL;
1567 	} else {
1568 		*bufp = buf;
1569 	}
1570 }
1571 
1572 static void
1573 arc_read_done(zio_t *zio)
1574 {
1575 	arc_buf_hdr_t	*hdr, *found;
1576 	arc_buf_t	*buf;
1577 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1578 	kmutex_t	*hash_lock;
1579 	arc_callback_t	*callback_list, *acb;
1580 	int		freeable = FALSE;
1581 
1582 	buf = zio->io_private;
1583 	hdr = buf->b_hdr;
1584 
1585 	/*
1586 	 * The hdr was inserted into hash-table and removed from lists
1587 	 * prior to starting I/O.  We should find this header, since
1588 	 * it's in the hash table, and it should be legit since it's
1589 	 * not possible to evict it during the I/O.  The only possible
1590 	 * reason for it not to be found is if we were freed during the
1591 	 * read.
1592 	 */
1593 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
1594 		    &hash_lock);
1595 
1596 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
1597 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1598 
1599 	/* byteswap if necessary */
1600 	callback_list = hdr->b_acb;
1601 	ASSERT(callback_list != NULL);
1602 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1603 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1604 
1605 	/* create copies of the data buffer for the callers */
1606 	abuf = buf;
1607 	for (acb = callback_list; acb; acb = acb->acb_next) {
1608 		if (acb->acb_done) {
1609 			if (abuf == NULL) {
1610 				abuf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1611 				abuf->b_data = arc_data_copy(hdr, buf->b_data);
1612 				abuf->b_hdr = hdr;
1613 				abuf->b_efunc = NULL;
1614 				abuf->b_private = NULL;
1615 				abuf->b_next = hdr->b_buf;
1616 				hdr->b_buf = abuf;
1617 				hdr->b_datacnt += 1;
1618 			}
1619 			acb->acb_buf = abuf;
1620 			abuf = NULL;
1621 		} else {
1622 			/*
1623 			 * The caller did not provide a callback function.
1624 			 * In this case, we should just remove the reference.
1625 			 */
1626 			if (HDR_FREED_IN_READ(hdr)) {
1627 				ASSERT3P(hdr->b_state, ==, arc.anon);
1628 				(void) refcount_remove(&hdr->b_refcnt,
1629 				    acb->acb_private);
1630 			} else {
1631 				(void) remove_reference(hdr, hash_lock,
1632 				    acb->acb_private);
1633 			}
1634 		}
1635 	}
1636 	hdr->b_acb = NULL;
1637 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
1638 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
1639 	if (abuf == buf)
1640 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1641 
1642 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1643 
1644 	if (zio->io_error != 0) {
1645 		hdr->b_flags |= ARC_IO_ERROR;
1646 		if (hdr->b_state != arc.anon)
1647 			arc_change_state(arc.anon, hdr, hash_lock);
1648 		if (HDR_IN_HASH_TABLE(hdr))
1649 			buf_hash_remove(hdr);
1650 		freeable = refcount_is_zero(&hdr->b_refcnt);
1651 		/* translate checksum errors into IO errors */
1652 		if (zio->io_error == ECKSUM)
1653 			zio->io_error = EIO;
1654 	}
1655 
1656 	/*
1657 	 * Broadcast before we drop the hash_lock.  This is less efficient,
1658 	 * but avoids the possibility that the hdr (and hence the cv) might
1659 	 * be freed before we get to the cv_broadcast().
1660 	 */
1661 	cv_broadcast(&hdr->b_cv);
1662 
1663 	if (hash_lock) {
1664 		/*
1665 		 * Only call arc_access on anonymous buffers.  This is because
1666 		 * if we've issued an I/O for an evicted buffer, we've already
1667 		 * called arc_access (to prevent any simultaneous readers from
1668 		 * getting confused).
1669 		 */
1670 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
1671 			arc_access_and_exit(hdr, hash_lock);
1672 		else
1673 			mutex_exit(hash_lock);
1674 	} else {
1675 		/*
1676 		 * This block was freed while we waited for the read to
1677 		 * complete.  It has been removed from the hash table and
1678 		 * moved to the anonymous state (so that it won't show up
1679 		 * in the cache).
1680 		 */
1681 		ASSERT3P(hdr->b_state, ==, arc.anon);
1682 		freeable = refcount_is_zero(&hdr->b_refcnt);
1683 	}
1684 
1685 	/* execute each callback and free its structure */
1686 	while ((acb = callback_list) != NULL) {
1687 		if (acb->acb_done)
1688 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1689 
1690 		if (acb->acb_zio_dummy != NULL) {
1691 			acb->acb_zio_dummy->io_error = zio->io_error;
1692 			zio_nowait(acb->acb_zio_dummy);
1693 		}
1694 
1695 		callback_list = acb->acb_next;
1696 		kmem_free(acb, sizeof (arc_callback_t));
1697 	}
1698 
1699 	if (freeable)
1700 		arc_hdr_destroy(hdr);
1701 }
1702 
1703 /*
1704  * "Read" the block block at the specified DVA (in bp) via the
1705  * cache.  If the block is found in the cache, invoke the provided
1706  * callback immediately and return.  Note that the `zio' parameter
1707  * in the callback will be NULL in this case, since no IO was
1708  * required.  If the block is not in the cache pass the read request
1709  * on to the spa with a substitute callback function, so that the
1710  * requested block will be added to the cache.
1711  *
1712  * If a read request arrives for a block that has a read in-progress,
1713  * either wait for the in-progress read to complete (and return the
1714  * results); or, if this is a read with a "done" func, add a record
1715  * to the read to invoke the "done" func when the read completes,
1716  * and return; or just return.
1717  *
1718  * arc_read_done() will invoke all the requested "done" functions
1719  * for readers of this block.
1720  */
1721 int
1722 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1723     arc_done_func_t *done, void *private, int priority, int flags,
1724     uint32_t arc_flags, zbookmark_t *zb)
1725 {
1726 	arc_buf_hdr_t *hdr;
1727 	arc_buf_t *buf;
1728 	kmutex_t *hash_lock;
1729 	zio_t	*rzio;
1730 
1731 top:
1732 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
1733 	if (hdr && hdr->b_datacnt > 0) {
1734 
1735 		if (HDR_IO_IN_PROGRESS(hdr)) {
1736 			if ((arc_flags & ARC_NOWAIT) && done) {
1737 				arc_callback_t	*acb = NULL;
1738 
1739 				acb = kmem_zalloc(sizeof (arc_callback_t),
1740 				    KM_SLEEP);
1741 				acb->acb_done = done;
1742 				acb->acb_private = private;
1743 				acb->acb_byteswap = swap;
1744 				if (pio != NULL)
1745 					acb->acb_zio_dummy = zio_null(pio,
1746 					    spa, NULL, NULL, flags);
1747 
1748 				ASSERT(acb->acb_done != NULL);
1749 				acb->acb_next = hdr->b_acb;
1750 				hdr->b_acb = acb;
1751 				add_reference(hdr, hash_lock, private);
1752 				mutex_exit(hash_lock);
1753 				return (0);
1754 			} else if (arc_flags & ARC_WAIT) {
1755 				cv_wait(&hdr->b_cv, hash_lock);
1756 				mutex_exit(hash_lock);
1757 				goto top;
1758 			}
1759 			mutex_exit(hash_lock);
1760 			return (0);
1761 		}
1762 
1763 		ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1764 
1765 		if (done) {
1766 			/*
1767 			 * If this block is already in use, create a new
1768 			 * copy of the data so that we will be guaranteed
1769 			 * that arc_release() will always succeed.
1770 			 */
1771 			buf = hdr->b_buf;
1772 			ASSERT(buf);
1773 			ASSERT(buf->b_data);
1774 			if (!HDR_BUF_AVAILABLE(hdr)) {
1775 				void *data = arc_data_copy(hdr, buf->b_data);
1776 				buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1777 				buf->b_hdr = hdr;
1778 				buf->b_data = data;
1779 				buf->b_efunc = NULL;
1780 				buf->b_private = NULL;
1781 				buf->b_next = hdr->b_buf;
1782 				hdr->b_buf = buf;
1783 				hdr->b_datacnt += 1;
1784 			} else {
1785 				ASSERT(buf->b_efunc == NULL);
1786 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
1787 			}
1788 			add_reference(hdr, hash_lock, private);
1789 		}
1790 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1791 		arc_access_and_exit(hdr, hash_lock);
1792 		atomic_add_64(&arc.hits, 1);
1793 		if (done)
1794 			done(NULL, buf, private);
1795 	} else {
1796 		uint64_t size = BP_GET_LSIZE(bp);
1797 		arc_callback_t	*acb;
1798 
1799 		if (hdr == NULL) {
1800 			/* this block is not in the cache */
1801 			arc_buf_hdr_t	*exists;
1802 
1803 			buf = arc_buf_alloc(spa, size, private);
1804 			hdr = buf->b_hdr;
1805 			hdr->b_dva = *BP_IDENTITY(bp);
1806 			hdr->b_birth = bp->blk_birth;
1807 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1808 			exists = buf_hash_insert(hdr, &hash_lock);
1809 			if (exists) {
1810 				/* somebody beat us to the hash insert */
1811 				mutex_exit(hash_lock);
1812 				bzero(&hdr->b_dva, sizeof (dva_t));
1813 				hdr->b_birth = 0;
1814 				hdr->b_cksum0 = 0;
1815 				(void) arc_buf_remove_ref(buf, private);
1816 				goto top; /* restart the IO request */
1817 			}
1818 
1819 		} else {
1820 			/* this block is in the ghost cache */
1821 			ASSERT(GHOST_STATE(hdr->b_state));
1822 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1823 			add_reference(hdr, hash_lock, private);
1824 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
1825 
1826 			ASSERT(hdr->b_buf == NULL);
1827 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1828 			buf->b_hdr = hdr;
1829 			buf->b_efunc = NULL;
1830 			buf->b_private = NULL;
1831 			buf->b_next = NULL;
1832 			hdr->b_buf = buf;
1833 			buf->b_data = zio_buf_alloc(hdr->b_size);
1834 			atomic_add_64(&arc.size, hdr->b_size);
1835 			ASSERT(hdr->b_datacnt == 0);
1836 			hdr->b_datacnt = 1;
1837 		}
1838 
1839 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
1840 		acb->acb_done = done;
1841 		acb->acb_private = private;
1842 		acb->acb_byteswap = swap;
1843 
1844 		ASSERT(hdr->b_acb == NULL);
1845 		hdr->b_acb = acb;
1846 
1847 		/*
1848 		 * If this DVA is part of a prefetch, mark the buf
1849 		 * header with the prefetch flag
1850 		 */
1851 		if (arc_flags & ARC_PREFETCH)
1852 			hdr->b_flags |= ARC_PREFETCH;
1853 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
1854 
1855 		/*
1856 		 * If the buffer has been evicted, migrate it to a present state
1857 		 * before issuing the I/O.  Once we drop the hash-table lock,
1858 		 * the header will be marked as I/O in progress and have an
1859 		 * attached buffer.  At this point, anybody who finds this
1860 		 * buffer ought to notice that it's legit but has a pending I/O.
1861 		 */
1862 
1863 		if (GHOST_STATE(hdr->b_state))
1864 			arc_access_and_exit(hdr, hash_lock);
1865 		else
1866 			mutex_exit(hash_lock);
1867 
1868 		ASSERT3U(hdr->b_size, ==, size);
1869 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
1870 		    zbookmark_t *, zb);
1871 		atomic_add_64(&arc.misses, 1);
1872 
1873 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
1874 		    arc_read_done, buf, priority, flags, zb);
1875 
1876 		if (arc_flags & ARC_WAIT)
1877 			return (zio_wait(rzio));
1878 
1879 		ASSERT(arc_flags & ARC_NOWAIT);
1880 		zio_nowait(rzio);
1881 	}
1882 	return (0);
1883 }
1884 
1885 /*
1886  * arc_read() variant to support pool traversal.  If the block is already
1887  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
1888  * The idea is that we don't want pool traversal filling up memory, but
1889  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
1890  */
1891 int
1892 arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
1893 {
1894 	arc_buf_hdr_t *hdr;
1895 	kmutex_t *hash_mtx;
1896 	int rc = 0;
1897 
1898 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
1899 
1900 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
1901 		arc_buf_t *buf = hdr->b_buf;
1902 
1903 		ASSERT(buf);
1904 		while (buf->b_data == NULL) {
1905 			buf = buf->b_next;
1906 			ASSERT(buf);
1907 		}
1908 		bcopy(buf->b_data, data, hdr->b_size);
1909 	} else {
1910 		rc = ENOENT;
1911 	}
1912 
1913 	if (hash_mtx)
1914 		mutex_exit(hash_mtx);
1915 
1916 	return (rc);
1917 }
1918 
1919 void
1920 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
1921 {
1922 	ASSERT(buf->b_hdr != NULL);
1923 	ASSERT(buf->b_hdr->b_state != arc.anon);
1924 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
1925 	buf->b_efunc = func;
1926 	buf->b_private = private;
1927 }
1928 
1929 /*
1930  * This is used by the DMU to let the ARC know that a buffer is
1931  * being evicted, so the ARC should clean up.  If this arc buf
1932  * is not yet in the evicted state, it will be put there.
1933  */
1934 int
1935 arc_buf_evict(arc_buf_t *buf)
1936 {
1937 	arc_buf_hdr_t *hdr;
1938 	kmutex_t *hash_lock;
1939 	arc_buf_t **bufp;
1940 
1941 	mutex_enter(&arc_eviction_mtx);
1942 	hdr = buf->b_hdr;
1943 	if (hdr == NULL) {
1944 		/*
1945 		 * We are in arc_do_user_evicts().
1946 		 * NOTE: We can't be in arc_buf_add_ref() because
1947 		 * that would violate the interface rules.
1948 		 */
1949 		ASSERT(buf->b_data == NULL);
1950 		mutex_exit(&arc_eviction_mtx);
1951 		return (0);
1952 	} else if (buf->b_data == NULL) {
1953 		arc_buf_t copy = *buf; /* structure assignment */
1954 		/*
1955 		 * We are on the eviction list.  Process this buffer
1956 		 * now but let arc_do_user_evicts() do the reaping.
1957 		 */
1958 		buf->b_efunc = NULL;
1959 		buf->b_hdr = NULL;
1960 		mutex_exit(&arc_eviction_mtx);
1961 		VERIFY(copy.b_efunc(&copy) == 0);
1962 		return (1);
1963 	} else {
1964 		/*
1965 		 * Prevent a race with arc_evict()
1966 		 */
1967 		ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
1968 		buf->b_hdr = NULL;
1969 	}
1970 	mutex_exit(&arc_eviction_mtx);
1971 
1972 	hash_lock = HDR_LOCK(hdr);
1973 	mutex_enter(hash_lock);
1974 
1975 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1976 
1977 	/*
1978 	 * Pull this buffer off of the hdr
1979 	 */
1980 	bufp = &hdr->b_buf;
1981 	while (*bufp != buf)
1982 		bufp = &(*bufp)->b_next;
1983 	*bufp = buf->b_next;
1984 
1985 	ASSERT(buf->b_data != NULL);
1986 	buf->b_hdr = hdr;
1987 	arc_buf_destroy(buf, FALSE);
1988 
1989 	if (hdr->b_datacnt == 0) {
1990 		arc_state_t *old_state = hdr->b_state;
1991 		arc_state_t *evicted_state;
1992 
1993 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
1994 
1995 		evicted_state =
1996 		    (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
1997 
1998 		mutex_enter(&old_state->mtx);
1999 		mutex_enter(&evicted_state->mtx);
2000 
2001 		arc_change_state(evicted_state, hdr, hash_lock);
2002 		ASSERT(HDR_IN_HASH_TABLE(hdr));
2003 		hdr->b_flags = ARC_IN_HASH_TABLE;
2004 
2005 		mutex_exit(&evicted_state->mtx);
2006 		mutex_exit(&old_state->mtx);
2007 	}
2008 	mutex_exit(hash_lock);
2009 
2010 	VERIFY(buf->b_efunc(buf) == 0);
2011 	buf->b_efunc = NULL;
2012 	buf->b_private = NULL;
2013 	buf->b_hdr = NULL;
2014 	kmem_cache_free(buf_cache, buf);
2015 	return (1);
2016 }
2017 
2018 /*
2019  * Release this buffer from the cache.  This must be done
2020  * after a read and prior to modifying the buffer contents.
2021  * If the buffer has more than one reference, we must make
2022  * make a new hdr for the buffer.
2023  */
2024 void
2025 arc_release(arc_buf_t *buf, void *tag)
2026 {
2027 	arc_buf_hdr_t *hdr = buf->b_hdr;
2028 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2029 
2030 	/* this buffer is not on any list */
2031 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2032 
2033 	if (hdr->b_state == arc.anon) {
2034 		/* this buffer is already released */
2035 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2036 		ASSERT(BUF_EMPTY(hdr));
2037 		ASSERT(buf->b_efunc == NULL);
2038 		return;
2039 	}
2040 
2041 	mutex_enter(hash_lock);
2042 
2043 	/*
2044 	 * Do we have more than one buf?
2045 	 */
2046 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2047 		arc_buf_hdr_t *nhdr;
2048 		arc_buf_t **bufp;
2049 		uint64_t blksz = hdr->b_size;
2050 		spa_t *spa = hdr->b_spa;
2051 
2052 		ASSERT(hdr->b_datacnt > 1);
2053 		/*
2054 		 * Pull the data off of this buf and attach it to
2055 		 * a new anonymous buf.
2056 		 */
2057 		(void) remove_reference(hdr, hash_lock, tag);
2058 		bufp = &hdr->b_buf;
2059 		while (*bufp != buf)
2060 			bufp = &(*bufp)->b_next;
2061 		*bufp = (*bufp)->b_next;
2062 
2063 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
2064 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
2065 		if (refcount_is_zero(&hdr->b_refcnt)) {
2066 			ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size);
2067 			atomic_add_64(&hdr->b_state->lsize, -hdr->b_size);
2068 		}
2069 		hdr->b_datacnt -= 1;
2070 
2071 		mutex_exit(hash_lock);
2072 
2073 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2074 		nhdr->b_size = blksz;
2075 		nhdr->b_spa = spa;
2076 		nhdr->b_buf = buf;
2077 		nhdr->b_state = arc.anon;
2078 		nhdr->b_arc_access = 0;
2079 		nhdr->b_flags = 0;
2080 		nhdr->b_datacnt = 1;
2081 		buf->b_hdr = nhdr;
2082 		buf->b_next = NULL;
2083 		(void) refcount_add(&nhdr->b_refcnt, tag);
2084 		atomic_add_64(&arc.anon->size, blksz);
2085 
2086 		hdr = nhdr;
2087 	} else {
2088 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2089 		ASSERT(!list_link_active(&hdr->b_arc_node));
2090 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2091 		arc_change_state(arc.anon, hdr, hash_lock);
2092 		hdr->b_arc_access = 0;
2093 		mutex_exit(hash_lock);
2094 		bzero(&hdr->b_dva, sizeof (dva_t));
2095 		hdr->b_birth = 0;
2096 		hdr->b_cksum0 = 0;
2097 	}
2098 	buf->b_efunc = NULL;
2099 	buf->b_private = NULL;
2100 }
2101 
2102 int
2103 arc_released(arc_buf_t *buf)
2104 {
2105 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon);
2106 }
2107 
2108 int
2109 arc_has_callback(arc_buf_t *buf)
2110 {
2111 	return (buf->b_efunc != NULL);
2112 }
2113 
2114 #ifdef ZFS_DEBUG
2115 int
2116 arc_referenced(arc_buf_t *buf)
2117 {
2118 	return (refcount_count(&buf->b_hdr->b_refcnt));
2119 }
2120 #endif
2121 
2122 static void
2123 arc_write_done(zio_t *zio)
2124 {
2125 	arc_buf_t *buf;
2126 	arc_buf_hdr_t *hdr;
2127 	arc_callback_t *acb;
2128 
2129 	buf = zio->io_private;
2130 	hdr = buf->b_hdr;
2131 	acb = hdr->b_acb;
2132 	hdr->b_acb = NULL;
2133 	ASSERT(acb != NULL);
2134 
2135 	/* this buffer is on no lists and is not in the hash table */
2136 	ASSERT3P(hdr->b_state, ==, arc.anon);
2137 
2138 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2139 	hdr->b_birth = zio->io_bp->blk_birth;
2140 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
2141 	/*
2142 	 * If the block to be written was all-zero, we may have
2143 	 * compressed it away.  In this case no write was performed
2144 	 * so there will be no dva/birth-date/checksum.  The buffer
2145 	 * must therefor remain anonymous (and uncached).
2146 	 */
2147 	if (!BUF_EMPTY(hdr)) {
2148 		arc_buf_hdr_t *exists;
2149 		kmutex_t *hash_lock;
2150 
2151 		exists = buf_hash_insert(hdr, &hash_lock);
2152 		if (exists) {
2153 			/*
2154 			 * This can only happen if we overwrite for
2155 			 * sync-to-convergence, because we remove
2156 			 * buffers from the hash table when we arc_free().
2157 			 */
2158 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2159 			    BP_IDENTITY(zio->io_bp)));
2160 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2161 			    zio->io_bp->blk_birth);
2162 
2163 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2164 			arc_change_state(arc.anon, exists, hash_lock);
2165 			mutex_exit(hash_lock);
2166 			arc_hdr_destroy(exists);
2167 			exists = buf_hash_insert(hdr, &hash_lock);
2168 			ASSERT3P(exists, ==, NULL);
2169 		}
2170 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2171 		arc_access_and_exit(hdr, hash_lock);
2172 	} else if (acb->acb_done == NULL) {
2173 		int destroy_hdr;
2174 		/*
2175 		 * This is an anonymous buffer with no user callback,
2176 		 * destroy it if there are no active references.
2177 		 */
2178 		mutex_enter(&arc_eviction_mtx);
2179 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
2180 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2181 		mutex_exit(&arc_eviction_mtx);
2182 		if (destroy_hdr)
2183 			arc_hdr_destroy(hdr);
2184 	} else {
2185 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2186 	}
2187 
2188 	if (acb->acb_done) {
2189 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2190 		acb->acb_done(zio, buf, acb->acb_private);
2191 	}
2192 
2193 	kmem_free(acb, sizeof (arc_callback_t));
2194 }
2195 
2196 int
2197 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2198     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2199     arc_done_func_t *done, void *private, int priority, int flags,
2200     uint32_t arc_flags, zbookmark_t *zb)
2201 {
2202 	arc_buf_hdr_t *hdr = buf->b_hdr;
2203 	arc_callback_t	*acb;
2204 	zio_t	*rzio;
2205 
2206 	/* this is a private buffer - no locking required */
2207 	ASSERT3P(hdr->b_state, ==, arc.anon);
2208 	ASSERT(BUF_EMPTY(hdr));
2209 	ASSERT(!HDR_IO_ERROR(hdr));
2210 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2211 	acb->acb_done = done;
2212 	acb->acb_private = private;
2213 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2214 	hdr->b_acb = acb;
2215 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
2216 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
2217 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2218 
2219 	if (arc_flags & ARC_WAIT)
2220 		return (zio_wait(rzio));
2221 
2222 	ASSERT(arc_flags & ARC_NOWAIT);
2223 	zio_nowait(rzio);
2224 
2225 	return (0);
2226 }
2227 
2228 int
2229 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2230     zio_done_func_t *done, void *private, uint32_t arc_flags)
2231 {
2232 	arc_buf_hdr_t *ab;
2233 	kmutex_t *hash_lock;
2234 	zio_t	*zio;
2235 
2236 	/*
2237 	 * If this buffer is in the cache, release it, so it
2238 	 * can be re-used.
2239 	 */
2240 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2241 	if (ab != NULL) {
2242 		/*
2243 		 * The checksum of blocks to free is not always
2244 		 * preserved (eg. on the deadlist).  However, if it is
2245 		 * nonzero, it should match what we have in the cache.
2246 		 */
2247 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2248 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
2249 		if (ab->b_state != arc.anon)
2250 			arc_change_state(arc.anon, ab, hash_lock);
2251 		if (refcount_is_zero(&ab->b_refcnt)) {
2252 			mutex_exit(hash_lock);
2253 			arc_hdr_destroy(ab);
2254 			atomic_add_64(&arc.deleted, 1);
2255 		} else {
2256 			/*
2257 			 * We could have an outstanding read on this
2258 			 * block, so multiple active references are
2259 			 * possible.  But we should only have a single
2260 			 * data buffer associated at this point.
2261 			 */
2262 			ASSERT3U(ab->b_datacnt, ==, 1);
2263 			if (HDR_IO_IN_PROGRESS(ab))
2264 				ab->b_flags |= ARC_FREED_IN_READ;
2265 			if (HDR_IN_HASH_TABLE(ab))
2266 				buf_hash_remove(ab);
2267 			ab->b_arc_access = 0;
2268 			bzero(&ab->b_dva, sizeof (dva_t));
2269 			ab->b_birth = 0;
2270 			ab->b_cksum0 = 0;
2271 			ab->b_buf->b_efunc = NULL;
2272 			ab->b_buf->b_private = NULL;
2273 			mutex_exit(hash_lock);
2274 		}
2275 	}
2276 
2277 	zio = zio_free(pio, spa, txg, bp, done, private);
2278 
2279 	if (arc_flags & ARC_WAIT)
2280 		return (zio_wait(zio));
2281 
2282 	ASSERT(arc_flags & ARC_NOWAIT);
2283 	zio_nowait(zio);
2284 
2285 	return (0);
2286 }
2287 
2288 void
2289 arc_tempreserve_clear(uint64_t tempreserve)
2290 {
2291 	atomic_add_64(&arc_tempreserve, -tempreserve);
2292 	ASSERT((int64_t)arc_tempreserve >= 0);
2293 }
2294 
2295 int
2296 arc_tempreserve_space(uint64_t tempreserve)
2297 {
2298 #ifdef ZFS_DEBUG
2299 	/*
2300 	 * Once in a while, fail for no reason.  Everything should cope.
2301 	 */
2302 	if (spa_get_random(10000) == 0) {
2303 		dprintf("forcing random failure\n");
2304 		return (ERESTART);
2305 	}
2306 #endif
2307 	if (tempreserve > arc.c/4 && !arc.no_grow)
2308 		arc.c = MIN(arc.c_max, tempreserve * 4);
2309 	if (tempreserve > arc.c)
2310 		return (ENOMEM);
2311 
2312 	/*
2313 	 * Throttle writes when the amount of dirty data in the cache
2314 	 * gets too large.  We try to keep the cache less than half full
2315 	 * of dirty blocks so that our sync times don't grow too large.
2316 	 * Note: if two requests come in concurrently, we might let them
2317 	 * both succeed, when one of them should fail.  Not a huge deal.
2318 	 *
2319 	 * XXX The limit should be adjusted dynamically to keep the time
2320 	 * to sync a dataset fixed (around 1-5 seconds?).
2321 	 */
2322 
2323 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
2324 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
2325 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2326 		    "tempreserve=%lluK arc.c=%lluK\n",
2327 		    arc_tempreserve>>10, arc.anon->lsize>>10,
2328 		    tempreserve>>10, arc.c>>10);
2329 		return (ERESTART);
2330 	}
2331 	atomic_add_64(&arc_tempreserve, tempreserve);
2332 	return (0);
2333 }
2334 
2335 void
2336 arc_init(void)
2337 {
2338 	mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
2339 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2340 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2341 
2342 	/* Start out with 1/8 of all memory */
2343 	arc.c = physmem * PAGESIZE / 8;
2344 
2345 #ifdef _KERNEL
2346 	/*
2347 	 * On architectures where the physical memory can be larger
2348 	 * than the addressable space (intel in 32-bit mode), we may
2349 	 * need to limit the cache to 1/8 of VM size.
2350 	 */
2351 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2352 #endif
2353 
2354 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2355 	arc.c_min = MAX(arc.c / 4, 64<<20);
2356 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2357 	if (arc.c * 8 >= 1<<30)
2358 		arc.c_max = (arc.c * 8) - (1<<30);
2359 	else
2360 		arc.c_max = arc.c_min;
2361 	arc.c_max = MAX(arc.c * 6, arc.c_max);
2362 	arc.c = arc.c_max;
2363 	arc.p = (arc.c >> 1);
2364 
2365 	/* if kmem_flags are set, lets try to use less memory */
2366 	if (kmem_debugging())
2367 		arc.c = arc.c / 2;
2368 	if (arc.c < arc.c_min)
2369 		arc.c = arc.c_min;
2370 
2371 	arc.anon = &ARC_anon;
2372 	arc.mru = &ARC_mru;
2373 	arc.mru_ghost = &ARC_mru_ghost;
2374 	arc.mfu = &ARC_mfu;
2375 	arc.mfu_ghost = &ARC_mfu_ghost;
2376 	arc.size = 0;
2377 
2378 	list_create(&arc.mru->list, sizeof (arc_buf_hdr_t),
2379 	    offsetof(arc_buf_hdr_t, b_arc_node));
2380 	list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t),
2381 	    offsetof(arc_buf_hdr_t, b_arc_node));
2382 	list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t),
2383 	    offsetof(arc_buf_hdr_t, b_arc_node));
2384 	list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t),
2385 	    offsetof(arc_buf_hdr_t, b_arc_node));
2386 
2387 	buf_init();
2388 
2389 	arc_thread_exit = 0;
2390 	arc_eviction_list = NULL;
2391 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
2392 
2393 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2394 	    TS_RUN, minclsyspri);
2395 }
2396 
2397 void
2398 arc_fini(void)
2399 {
2400 	mutex_enter(&arc_reclaim_thr_lock);
2401 	arc_thread_exit = 1;
2402 	while (arc_thread_exit != 0)
2403 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2404 	mutex_exit(&arc_reclaim_thr_lock);
2405 
2406 	arc_flush();
2407 
2408 	arc_dead = TRUE;
2409 
2410 	mutex_destroy(&arc_eviction_mtx);
2411 	mutex_destroy(&arc_reclaim_lock);
2412 	mutex_destroy(&arc_reclaim_thr_lock);
2413 	cv_destroy(&arc_reclaim_thr_cv);
2414 
2415 	list_destroy(&arc.mru->list);
2416 	list_destroy(&arc.mru_ghost->list);
2417 	list_destroy(&arc.mfu->list);
2418 	list_destroy(&arc.mfu_ghost->list);
2419 
2420 	buf_fini();
2421 }
2422