1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5033f9833Sek * Common Development and Distribution License (the "License"). 6033f9833Sek * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2244cb6abcSbmc * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23fa9e4066Sahrens * Use is subject to license terms. 24fa9e4066Sahrens */ 25fa9e4066Sahrens 26fa9e4066Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27fa9e4066Sahrens 28fa9e4066Sahrens /* 2944cb6abcSbmc * DVA-based Adjustable Replacement Cache 30fa9e4066Sahrens * 31ea8dc4b6Seschrock * While much of the theory of operation used here is 32ea8dc4b6Seschrock * based on the self-tuning, low overhead replacement cache 33fa9e4066Sahrens * presented by Megiddo and Modha at FAST 2003, there are some 34fa9e4066Sahrens * significant differences: 35fa9e4066Sahrens * 36fa9e4066Sahrens * 1. The Megiddo and Modha model assumes any page is evictable. 37fa9e4066Sahrens * Pages in its cache cannot be "locked" into memory. This makes 38fa9e4066Sahrens * the eviction algorithm simple: evict the last page in the list. 39fa9e4066Sahrens * This also make the performance characteristics easy to reason 40fa9e4066Sahrens * about. Our cache is not so simple. At any given moment, some 41fa9e4066Sahrens * subset of the blocks in the cache are un-evictable because we 42fa9e4066Sahrens * have handed out a reference to them. Blocks are only evictable 43fa9e4066Sahrens * when there are no external references active. This makes 44fa9e4066Sahrens * eviction far more problematic: we choose to evict the evictable 45fa9e4066Sahrens * blocks that are the "lowest" in the list. 46fa9e4066Sahrens * 47fa9e4066Sahrens * There are times when it is not possible to evict the requested 48fa9e4066Sahrens * space. In these circumstances we are unable to adjust the cache 49fa9e4066Sahrens * size. To prevent the cache growing unbounded at these times we 50*fa94a07fSbrendan * implement a "cache throttle" that slows the flow of new data 51*fa94a07fSbrendan * into the cache until we can make space available. 52fa9e4066Sahrens * 53fa9e4066Sahrens * 2. The Megiddo and Modha model assumes a fixed cache size. 54fa9e4066Sahrens * Pages are evicted when the cache is full and there is a cache 55fa9e4066Sahrens * miss. Our model has a variable sized cache. It grows with 56*fa94a07fSbrendan * high use, but also tries to react to memory pressure from the 57fa9e4066Sahrens * operating system: decreasing its size when system memory is 58fa9e4066Sahrens * tight. 59fa9e4066Sahrens * 60fa9e4066Sahrens * 3. The Megiddo and Modha model assumes a fixed page size. All 61fa9e4066Sahrens * elements of the cache are therefor exactly the same size. So 62fa9e4066Sahrens * when adjusting the cache size following a cache miss, its simply 63fa9e4066Sahrens * a matter of choosing a single page to evict. In our model, we 64fa9e4066Sahrens * have variable sized cache blocks (rangeing from 512 bytes to 65fa9e4066Sahrens * 128K bytes). We therefor choose a set of blocks to evict to make 66fa9e4066Sahrens * space for a cache miss that approximates as closely as possible 67fa9e4066Sahrens * the space used by the new block. 68fa9e4066Sahrens * 69fa9e4066Sahrens * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70fa9e4066Sahrens * by N. Megiddo & D. Modha, FAST 2003 71fa9e4066Sahrens */ 72fa9e4066Sahrens 73fa9e4066Sahrens /* 74fa9e4066Sahrens * The locking model: 75fa9e4066Sahrens * 76fa9e4066Sahrens * A new reference to a cache buffer can be obtained in two 77fa9e4066Sahrens * ways: 1) via a hash table lookup using the DVA as a key, 78*fa94a07fSbrendan * or 2) via one of the ARC lists. The arc_read() interface 79fa9e4066Sahrens * uses method 1, while the internal arc algorithms for 80fa9e4066Sahrens * adjusting the cache use method 2. We therefor provide two 81fa9e4066Sahrens * types of locks: 1) the hash table lock array, and 2) the 82fa9e4066Sahrens * arc list locks. 83fa9e4066Sahrens * 84fa9e4066Sahrens * Buffers do not have their own mutexs, rather they rely on the 85fa9e4066Sahrens * hash table mutexs for the bulk of their protection (i.e. most 86fa9e4066Sahrens * fields in the arc_buf_hdr_t are protected by these mutexs). 87fa9e4066Sahrens * 88fa9e4066Sahrens * buf_hash_find() returns the appropriate mutex (held) when it 89fa9e4066Sahrens * locates the requested buffer in the hash table. It returns 90fa9e4066Sahrens * NULL for the mutex if the buffer was not in the table. 91fa9e4066Sahrens * 92fa9e4066Sahrens * buf_hash_remove() expects the appropriate hash mutex to be 93fa9e4066Sahrens * already held before it is invoked. 94fa9e4066Sahrens * 95fa9e4066Sahrens * Each arc state also has a mutex which is used to protect the 96fa9e4066Sahrens * buffer list associated with the state. When attempting to 97fa9e4066Sahrens * obtain a hash table lock while holding an arc list lock you 98fa9e4066Sahrens * must use: mutex_tryenter() to avoid deadlock. Also note that 9944eda4d7Smaybee * the active state mutex must be held before the ghost state mutex. 100fa9e4066Sahrens * 101ea8dc4b6Seschrock * Arc buffers may have an associated eviction callback function. 102ea8dc4b6Seschrock * This function will be invoked prior to removing the buffer (e.g. 103ea8dc4b6Seschrock * in arc_do_user_evicts()). Note however that the data associated 104ea8dc4b6Seschrock * with the buffer may be evicted prior to the callback. The callback 105ea8dc4b6Seschrock * must be made with *no locks held* (to prevent deadlock). Additionally, 106ea8dc4b6Seschrock * the users of callbacks must ensure that their private data is 107ea8dc4b6Seschrock * protected from simultaneous callbacks from arc_buf_evict() 108ea8dc4b6Seschrock * and arc_do_user_evicts(). 109ea8dc4b6Seschrock * 110fa9e4066Sahrens * Note that the majority of the performance stats are manipulated 111fa9e4066Sahrens * with atomic operations. 112*fa94a07fSbrendan * 113*fa94a07fSbrendan * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 114*fa94a07fSbrendan * 115*fa94a07fSbrendan * - L2ARC buflist creation 116*fa94a07fSbrendan * - L2ARC buflist eviction 117*fa94a07fSbrendan * - L2ARC write completion, which walks L2ARC buflists 118*fa94a07fSbrendan * - ARC header destruction, as it removes from L2ARC buflists 119*fa94a07fSbrendan * - ARC header release, as it removes from L2ARC buflists 120fa9e4066Sahrens */ 121fa9e4066Sahrens 122fa9e4066Sahrens #include <sys/spa.h> 123fa9e4066Sahrens #include <sys/zio.h> 1246b4acc8bSahrens #include <sys/zio_checksum.h> 125fa9e4066Sahrens #include <sys/zfs_context.h> 126fa9e4066Sahrens #include <sys/arc.h> 127fa9e4066Sahrens #include <sys/refcount.h> 128fa9e4066Sahrens #ifdef _KERNEL 129fa9e4066Sahrens #include <sys/vmsystm.h> 130fa9e4066Sahrens #include <vm/anon.h> 131fa9e4066Sahrens #include <sys/fs/swapnode.h> 132033f9833Sek #include <sys/dnlc.h> 133fa9e4066Sahrens #endif 134fa9e4066Sahrens #include <sys/callb.h> 13544cb6abcSbmc #include <sys/kstat.h> 136fa9e4066Sahrens 137fa9e4066Sahrens static kmutex_t arc_reclaim_thr_lock; 138fa9e4066Sahrens static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 139fa9e4066Sahrens static uint8_t arc_thread_exit; 140fa9e4066Sahrens 141033f9833Sek #define ARC_REDUCE_DNLC_PERCENT 3 142033f9833Sek uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 143033f9833Sek 144fa9e4066Sahrens typedef enum arc_reclaim_strategy { 145fa9e4066Sahrens ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 146fa9e4066Sahrens ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 147fa9e4066Sahrens } arc_reclaim_strategy_t; 148fa9e4066Sahrens 149fa9e4066Sahrens /* number of seconds before growing cache again */ 150fa9e4066Sahrens static int arc_grow_retry = 60; 151fa9e4066Sahrens 15213506d1eSmaybee /* 153b19a79ecSperrin * minimum lifespan of a prefetch block in clock ticks 154b19a79ecSperrin * (initialized in arc_init()) 15513506d1eSmaybee */ 156b19a79ecSperrin static int arc_min_prefetch_lifespan; 15713506d1eSmaybee 158fa9e4066Sahrens static int arc_dead; 159fa9e4066Sahrens 160a2eea2e1Sahrens /* 161a2eea2e1Sahrens * These tunables are for performance analysis. 162a2eea2e1Sahrens */ 163a2eea2e1Sahrens uint64_t zfs_arc_max; 164a2eea2e1Sahrens uint64_t zfs_arc_min; 1651116048bSek uint64_t zfs_arc_meta_limit = 0; 166a2eea2e1Sahrens 167fa9e4066Sahrens /* 168*fa94a07fSbrendan * Note that buffers can be in one of 6 states: 169fa9e4066Sahrens * ARC_anon - anonymous (discussed below) 170ea8dc4b6Seschrock * ARC_mru - recently used, currently cached 171ea8dc4b6Seschrock * ARC_mru_ghost - recentely used, no longer in cache 172ea8dc4b6Seschrock * ARC_mfu - frequently used, currently cached 173ea8dc4b6Seschrock * ARC_mfu_ghost - frequently used, no longer in cache 174*fa94a07fSbrendan * ARC_l2c_only - exists in L2ARC but not other states 1750e8c6158Smaybee * When there are no active references to the buffer, they are 1760e8c6158Smaybee * are linked onto a list in one of these arc states. These are 1770e8c6158Smaybee * the only buffers that can be evicted or deleted. Within each 1780e8c6158Smaybee * state there are multiple lists, one for meta-data and one for 1790e8c6158Smaybee * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 1800e8c6158Smaybee * etc.) is tracked separately so that it can be managed more 181*fa94a07fSbrendan * explicitly: favored over data, limited explicitly. 182fa9e4066Sahrens * 183fa9e4066Sahrens * Anonymous buffers are buffers that are not associated with 184fa9e4066Sahrens * a DVA. These are buffers that hold dirty block copies 185fa9e4066Sahrens * before they are written to stable storage. By definition, 186ea8dc4b6Seschrock * they are "ref'd" and are considered part of arc_mru 187fa9e4066Sahrens * that cannot be freed. Generally, they will aquire a DVA 188ea8dc4b6Seschrock * as they are written and migrate onto the arc_mru list. 189*fa94a07fSbrendan * 190*fa94a07fSbrendan * The ARC_l2c_only state is for buffers that are in the second 191*fa94a07fSbrendan * level ARC but no longer in any of the ARC_m* lists. The second 192*fa94a07fSbrendan * level ARC itself may also contain buffers that are in any of 193*fa94a07fSbrendan * the ARC_m* states - meaning that a buffer can exist in two 194*fa94a07fSbrendan * places. The reason for the ARC_l2c_only state is to keep the 195*fa94a07fSbrendan * buffer header in the hash table, so that reads that hit the 196*fa94a07fSbrendan * second level ARC benefit from these fast lookups. 197fa9e4066Sahrens */ 198fa9e4066Sahrens 199fa9e4066Sahrens typedef struct arc_state { 2000e8c6158Smaybee list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 2010e8c6158Smaybee uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 2020e8c6158Smaybee uint64_t arcs_size; /* total amount of data in this state */ 20344cb6abcSbmc kmutex_t arcs_mtx; 204fa9e4066Sahrens } arc_state_t; 205fa9e4066Sahrens 206*fa94a07fSbrendan /* The 6 states: */ 207fa9e4066Sahrens static arc_state_t ARC_anon; 208ea8dc4b6Seschrock static arc_state_t ARC_mru; 209ea8dc4b6Seschrock static arc_state_t ARC_mru_ghost; 210ea8dc4b6Seschrock static arc_state_t ARC_mfu; 211ea8dc4b6Seschrock static arc_state_t ARC_mfu_ghost; 212*fa94a07fSbrendan static arc_state_t ARC_l2c_only; 213fa9e4066Sahrens 21444cb6abcSbmc typedef struct arc_stats { 21544cb6abcSbmc kstat_named_t arcstat_hits; 21644cb6abcSbmc kstat_named_t arcstat_misses; 21744cb6abcSbmc kstat_named_t arcstat_demand_data_hits; 21844cb6abcSbmc kstat_named_t arcstat_demand_data_misses; 21944cb6abcSbmc kstat_named_t arcstat_demand_metadata_hits; 22044cb6abcSbmc kstat_named_t arcstat_demand_metadata_misses; 22144cb6abcSbmc kstat_named_t arcstat_prefetch_data_hits; 22244cb6abcSbmc kstat_named_t arcstat_prefetch_data_misses; 22344cb6abcSbmc kstat_named_t arcstat_prefetch_metadata_hits; 22444cb6abcSbmc kstat_named_t arcstat_prefetch_metadata_misses; 22544cb6abcSbmc kstat_named_t arcstat_mru_hits; 22644cb6abcSbmc kstat_named_t arcstat_mru_ghost_hits; 22744cb6abcSbmc kstat_named_t arcstat_mfu_hits; 22844cb6abcSbmc kstat_named_t arcstat_mfu_ghost_hits; 22944cb6abcSbmc kstat_named_t arcstat_deleted; 23044cb6abcSbmc kstat_named_t arcstat_recycle_miss; 23144cb6abcSbmc kstat_named_t arcstat_mutex_miss; 23244cb6abcSbmc kstat_named_t arcstat_evict_skip; 23344cb6abcSbmc kstat_named_t arcstat_hash_elements; 23444cb6abcSbmc kstat_named_t arcstat_hash_elements_max; 23544cb6abcSbmc kstat_named_t arcstat_hash_collisions; 23644cb6abcSbmc kstat_named_t arcstat_hash_chains; 23744cb6abcSbmc kstat_named_t arcstat_hash_chain_max; 23844cb6abcSbmc kstat_named_t arcstat_p; 23944cb6abcSbmc kstat_named_t arcstat_c; 24044cb6abcSbmc kstat_named_t arcstat_c_min; 24144cb6abcSbmc kstat_named_t arcstat_c_max; 24244cb6abcSbmc kstat_named_t arcstat_size; 243*fa94a07fSbrendan kstat_named_t arcstat_hdr_size; 244*fa94a07fSbrendan kstat_named_t arcstat_l2_hits; 245*fa94a07fSbrendan kstat_named_t arcstat_l2_misses; 246*fa94a07fSbrendan kstat_named_t arcstat_l2_feeds; 247*fa94a07fSbrendan kstat_named_t arcstat_l2_rw_clash; 248*fa94a07fSbrendan kstat_named_t arcstat_l2_writes_sent; 249*fa94a07fSbrendan kstat_named_t arcstat_l2_writes_done; 250*fa94a07fSbrendan kstat_named_t arcstat_l2_writes_error; 251*fa94a07fSbrendan kstat_named_t arcstat_l2_writes_hdr_miss; 252*fa94a07fSbrendan kstat_named_t arcstat_l2_evict_lock_retry; 253*fa94a07fSbrendan kstat_named_t arcstat_l2_evict_reading; 254*fa94a07fSbrendan kstat_named_t arcstat_l2_free_on_write; 255*fa94a07fSbrendan kstat_named_t arcstat_l2_abort_lowmem; 256*fa94a07fSbrendan kstat_named_t arcstat_l2_cksum_bad; 257*fa94a07fSbrendan kstat_named_t arcstat_l2_io_error; 258*fa94a07fSbrendan kstat_named_t arcstat_l2_size; 259*fa94a07fSbrendan kstat_named_t arcstat_l2_hdr_size; 26044cb6abcSbmc } arc_stats_t; 26144cb6abcSbmc 26244cb6abcSbmc static arc_stats_t arc_stats = { 26344cb6abcSbmc { "hits", KSTAT_DATA_UINT64 }, 26444cb6abcSbmc { "misses", KSTAT_DATA_UINT64 }, 26544cb6abcSbmc { "demand_data_hits", KSTAT_DATA_UINT64 }, 26644cb6abcSbmc { "demand_data_misses", KSTAT_DATA_UINT64 }, 26744cb6abcSbmc { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 26844cb6abcSbmc { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 26944cb6abcSbmc { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 27044cb6abcSbmc { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 27144cb6abcSbmc { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 27244cb6abcSbmc { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 27344cb6abcSbmc { "mru_hits", KSTAT_DATA_UINT64 }, 27444cb6abcSbmc { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 27544cb6abcSbmc { "mfu_hits", KSTAT_DATA_UINT64 }, 27644cb6abcSbmc { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 27744cb6abcSbmc { "deleted", KSTAT_DATA_UINT64 }, 27844cb6abcSbmc { "recycle_miss", KSTAT_DATA_UINT64 }, 27944cb6abcSbmc { "mutex_miss", KSTAT_DATA_UINT64 }, 28044cb6abcSbmc { "evict_skip", KSTAT_DATA_UINT64 }, 28144cb6abcSbmc { "hash_elements", KSTAT_DATA_UINT64 }, 28244cb6abcSbmc { "hash_elements_max", KSTAT_DATA_UINT64 }, 28344cb6abcSbmc { "hash_collisions", KSTAT_DATA_UINT64 }, 28444cb6abcSbmc { "hash_chains", KSTAT_DATA_UINT64 }, 28544cb6abcSbmc { "hash_chain_max", KSTAT_DATA_UINT64 }, 28644cb6abcSbmc { "p", KSTAT_DATA_UINT64 }, 28744cb6abcSbmc { "c", KSTAT_DATA_UINT64 }, 28844cb6abcSbmc { "c_min", KSTAT_DATA_UINT64 }, 28944cb6abcSbmc { "c_max", KSTAT_DATA_UINT64 }, 290*fa94a07fSbrendan { "size", KSTAT_DATA_UINT64 }, 291*fa94a07fSbrendan { "hdr_size", KSTAT_DATA_UINT64 }, 292*fa94a07fSbrendan { "l2_hits", KSTAT_DATA_UINT64 }, 293*fa94a07fSbrendan { "l2_misses", KSTAT_DATA_UINT64 }, 294*fa94a07fSbrendan { "l2_feeds", KSTAT_DATA_UINT64 }, 295*fa94a07fSbrendan { "l2_rw_clash", KSTAT_DATA_UINT64 }, 296*fa94a07fSbrendan { "l2_writes_sent", KSTAT_DATA_UINT64 }, 297*fa94a07fSbrendan { "l2_writes_done", KSTAT_DATA_UINT64 }, 298*fa94a07fSbrendan { "l2_writes_error", KSTAT_DATA_UINT64 }, 299*fa94a07fSbrendan { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 300*fa94a07fSbrendan { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 301*fa94a07fSbrendan { "l2_evict_reading", KSTAT_DATA_UINT64 }, 302*fa94a07fSbrendan { "l2_free_on_write", KSTAT_DATA_UINT64 }, 303*fa94a07fSbrendan { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 304*fa94a07fSbrendan { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 305*fa94a07fSbrendan { "l2_io_error", KSTAT_DATA_UINT64 }, 306*fa94a07fSbrendan { "l2_size", KSTAT_DATA_UINT64 }, 307*fa94a07fSbrendan { "l2_hdr_size", KSTAT_DATA_UINT64 } 30844cb6abcSbmc }; 30944cb6abcSbmc 31044cb6abcSbmc #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 31144cb6abcSbmc 31244cb6abcSbmc #define ARCSTAT_INCR(stat, val) \ 31344cb6abcSbmc atomic_add_64(&arc_stats.stat.value.ui64, (val)); 31444cb6abcSbmc 31544cb6abcSbmc #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 31644cb6abcSbmc #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 31744cb6abcSbmc 31844cb6abcSbmc #define ARCSTAT_MAX(stat, val) { \ 31944cb6abcSbmc uint64_t m; \ 32044cb6abcSbmc while ((val) > (m = arc_stats.stat.value.ui64) && \ 32144cb6abcSbmc (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 32244cb6abcSbmc continue; \ 32344cb6abcSbmc } 32444cb6abcSbmc 32544cb6abcSbmc #define ARCSTAT_MAXSTAT(stat) \ 32644cb6abcSbmc ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 32744cb6abcSbmc 32844cb6abcSbmc /* 32944cb6abcSbmc * We define a macro to allow ARC hits/misses to be easily broken down by 33044cb6abcSbmc * two separate conditions, giving a total of four different subtypes for 33144cb6abcSbmc * each of hits and misses (so eight statistics total). 33244cb6abcSbmc */ 33344cb6abcSbmc #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 33444cb6abcSbmc if (cond1) { \ 33544cb6abcSbmc if (cond2) { \ 33644cb6abcSbmc ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 33744cb6abcSbmc } else { \ 33844cb6abcSbmc ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 33944cb6abcSbmc } \ 34044cb6abcSbmc } else { \ 34144cb6abcSbmc if (cond2) { \ 34244cb6abcSbmc ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 34344cb6abcSbmc } else { \ 34444cb6abcSbmc ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 34544cb6abcSbmc } \ 34644cb6abcSbmc } 34744cb6abcSbmc 34844cb6abcSbmc kstat_t *arc_ksp; 34944cb6abcSbmc static arc_state_t *arc_anon; 35044cb6abcSbmc static arc_state_t *arc_mru; 35144cb6abcSbmc static arc_state_t *arc_mru_ghost; 35244cb6abcSbmc static arc_state_t *arc_mfu; 35344cb6abcSbmc static arc_state_t *arc_mfu_ghost; 354*fa94a07fSbrendan static arc_state_t *arc_l2c_only; 35544cb6abcSbmc 35644cb6abcSbmc /* 35744cb6abcSbmc * There are several ARC variables that are critical to export as kstats -- 35844cb6abcSbmc * but we don't want to have to grovel around in the kstat whenever we wish to 35944cb6abcSbmc * manipulate them. For these variables, we therefore define them to be in 36044cb6abcSbmc * terms of the statistic variable. This assures that we are not introducing 36144cb6abcSbmc * the possibility of inconsistency by having shadow copies of the variables, 36244cb6abcSbmc * while still allowing the code to be readable. 36344cb6abcSbmc */ 36444cb6abcSbmc #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 36544cb6abcSbmc #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 36644cb6abcSbmc #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 36744cb6abcSbmc #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 36844cb6abcSbmc #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 36944cb6abcSbmc 37044cb6abcSbmc static int arc_no_grow; /* Don't try to grow cache size */ 37144cb6abcSbmc static uint64_t arc_tempreserve; 3720e8c6158Smaybee static uint64_t arc_meta_used; 3730e8c6158Smaybee static uint64_t arc_meta_limit; 3740e8c6158Smaybee static uint64_t arc_meta_max = 0; 375fa9e4066Sahrens 376*fa94a07fSbrendan typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 377*fa94a07fSbrendan 378fa9e4066Sahrens typedef struct arc_callback arc_callback_t; 379fa9e4066Sahrens 380fa9e4066Sahrens struct arc_callback { 381fa9e4066Sahrens void *acb_private; 382c717a561Smaybee arc_done_func_t *acb_done; 383fa9e4066Sahrens arc_byteswap_func_t *acb_byteswap; 384fa9e4066Sahrens arc_buf_t *acb_buf; 385fa9e4066Sahrens zio_t *acb_zio_dummy; 386fa9e4066Sahrens arc_callback_t *acb_next; 387fa9e4066Sahrens }; 388fa9e4066Sahrens 389c717a561Smaybee typedef struct arc_write_callback arc_write_callback_t; 390c717a561Smaybee 391c717a561Smaybee struct arc_write_callback { 392c717a561Smaybee void *awcb_private; 393c717a561Smaybee arc_done_func_t *awcb_ready; 394c717a561Smaybee arc_done_func_t *awcb_done; 395c717a561Smaybee arc_buf_t *awcb_buf; 396c717a561Smaybee }; 397c717a561Smaybee 398fa9e4066Sahrens struct arc_buf_hdr { 399fa9e4066Sahrens /* protected by hash lock */ 400fa9e4066Sahrens dva_t b_dva; 401fa9e4066Sahrens uint64_t b_birth; 402fa9e4066Sahrens uint64_t b_cksum0; 403fa9e4066Sahrens 4046b4acc8bSahrens kmutex_t b_freeze_lock; 4056b4acc8bSahrens zio_cksum_t *b_freeze_cksum; 4066b4acc8bSahrens 407fa9e4066Sahrens arc_buf_hdr_t *b_hash_next; 408fa9e4066Sahrens arc_buf_t *b_buf; 409fa9e4066Sahrens uint32_t b_flags; 410ea8dc4b6Seschrock uint32_t b_datacnt; 411fa9e4066Sahrens 412fa9e4066Sahrens arc_callback_t *b_acb; 413ad23a2dbSjohansen kcondvar_t b_cv; 414ad23a2dbSjohansen 415ad23a2dbSjohansen /* immutable */ 416ad23a2dbSjohansen arc_buf_contents_t b_type; 417ad23a2dbSjohansen uint64_t b_size; 418ad23a2dbSjohansen spa_t *b_spa; 419fa9e4066Sahrens 420fa9e4066Sahrens /* protected by arc state mutex */ 421fa9e4066Sahrens arc_state_t *b_state; 422fa9e4066Sahrens list_node_t b_arc_node; 423fa9e4066Sahrens 424fa9e4066Sahrens /* updated atomically */ 425fa9e4066Sahrens clock_t b_arc_access; 426fa9e4066Sahrens 427fa9e4066Sahrens /* self protecting */ 428fa9e4066Sahrens refcount_t b_refcnt; 429*fa94a07fSbrendan 430*fa94a07fSbrendan l2arc_buf_hdr_t *b_l2hdr; 431*fa94a07fSbrendan list_node_t b_l2node; 432fa9e4066Sahrens }; 433fa9e4066Sahrens 434ea8dc4b6Seschrock static arc_buf_t *arc_eviction_list; 435ea8dc4b6Seschrock static kmutex_t arc_eviction_mtx; 43640d7d650Smaybee static arc_buf_hdr_t arc_eviction_hdr; 43744eda4d7Smaybee static void arc_get_data_buf(arc_buf_t *buf); 43844eda4d7Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 4390e8c6158Smaybee static int arc_evict_needed(arc_buf_contents_t type); 440f4d2e9e6Smaybee static void arc_evict_ghost(arc_state_t *state, int64_t bytes); 441ea8dc4b6Seschrock 442ea8dc4b6Seschrock #define GHOST_STATE(state) \ 443*fa94a07fSbrendan ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 444*fa94a07fSbrendan (state) == arc_l2c_only) 445ea8dc4b6Seschrock 446fa9e4066Sahrens /* 447fa9e4066Sahrens * Private ARC flags. These flags are private ARC only flags that will show up 448fa9e4066Sahrens * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 449fa9e4066Sahrens * be passed in as arc_flags in things like arc_read. However, these flags 450fa9e4066Sahrens * should never be passed and should only be set by ARC code. When adding new 451fa9e4066Sahrens * public flags, make sure not to smash the private ones. 452fa9e4066Sahrens */ 453fa9e4066Sahrens 454ea8dc4b6Seschrock #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 455fa9e4066Sahrens #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 456fa9e4066Sahrens #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 457fa9e4066Sahrens #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 458ea8dc4b6Seschrock #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 45913506d1eSmaybee #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 460*fa94a07fSbrendan #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 461*fa94a07fSbrendan #define ARC_DONT_L2CACHE (1 << 16) /* originated by prefetch */ 462*fa94a07fSbrendan #define ARC_L2_READING (1 << 17) /* L2ARC read in progress */ 463*fa94a07fSbrendan #define ARC_L2_WRITING (1 << 18) /* L2ARC write in progress */ 464*fa94a07fSbrendan #define ARC_L2_EVICTED (1 << 19) /* evicted during I/O */ 465*fa94a07fSbrendan #define ARC_L2_WRITE_HEAD (1 << 20) /* head of write list */ 466fa9e4066Sahrens 467ea8dc4b6Seschrock #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 468fa9e4066Sahrens #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 469fa9e4066Sahrens #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 470fa9e4066Sahrens #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 471ea8dc4b6Seschrock #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 472*fa94a07fSbrendan #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 473*fa94a07fSbrendan #define HDR_DONT_L2CACHE(hdr) ((hdr)->b_flags & ARC_DONT_L2CACHE) 474*fa94a07fSbrendan #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_L2_READING) 475*fa94a07fSbrendan #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 476*fa94a07fSbrendan #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 477*fa94a07fSbrendan #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 478fa9e4066Sahrens 479fa9e4066Sahrens /* 480fa9e4066Sahrens * Hash table routines 481fa9e4066Sahrens */ 482fa9e4066Sahrens 483fa9e4066Sahrens #define HT_LOCK_PAD 64 484fa9e4066Sahrens 485fa9e4066Sahrens struct ht_lock { 486fa9e4066Sahrens kmutex_t ht_lock; 487fa9e4066Sahrens #ifdef _KERNEL 488fa9e4066Sahrens unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 489fa9e4066Sahrens #endif 490fa9e4066Sahrens }; 491fa9e4066Sahrens 492fa9e4066Sahrens #define BUF_LOCKS 256 493fa9e4066Sahrens typedef struct buf_hash_table { 494fa9e4066Sahrens uint64_t ht_mask; 495fa9e4066Sahrens arc_buf_hdr_t **ht_table; 496fa9e4066Sahrens struct ht_lock ht_locks[BUF_LOCKS]; 497fa9e4066Sahrens } buf_hash_table_t; 498fa9e4066Sahrens 499fa9e4066Sahrens static buf_hash_table_t buf_hash_table; 500fa9e4066Sahrens 501fa9e4066Sahrens #define BUF_HASH_INDEX(spa, dva, birth) \ 502fa9e4066Sahrens (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 503fa9e4066Sahrens #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 504fa9e4066Sahrens #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 505fa9e4066Sahrens #define HDR_LOCK(buf) \ 506fa9e4066Sahrens (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 507fa9e4066Sahrens 508fa9e4066Sahrens uint64_t zfs_crc64_table[256]; 509fa9e4066Sahrens 510*fa94a07fSbrendan /* 511*fa94a07fSbrendan * Level 2 ARC 512*fa94a07fSbrendan */ 513*fa94a07fSbrendan 514*fa94a07fSbrendan #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 515*fa94a07fSbrendan #define L2ARC_HEADROOM 4 /* num of writes */ 516*fa94a07fSbrendan #define L2ARC_FEED_DELAY 180 /* starting grace */ 517*fa94a07fSbrendan #define L2ARC_FEED_SECS 1 /* caching interval */ 518*fa94a07fSbrendan 519*fa94a07fSbrendan #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 520*fa94a07fSbrendan #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 521*fa94a07fSbrendan 522*fa94a07fSbrendan /* 523*fa94a07fSbrendan * L2ARC Performance Tunables 524*fa94a07fSbrendan */ 525*fa94a07fSbrendan uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 526*fa94a07fSbrendan uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 527*fa94a07fSbrendan uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 528*fa94a07fSbrendan boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 529*fa94a07fSbrendan 530*fa94a07fSbrendan /* 531*fa94a07fSbrendan * L2ARC Internals 532*fa94a07fSbrendan */ 533*fa94a07fSbrendan typedef struct l2arc_dev { 534*fa94a07fSbrendan vdev_t *l2ad_vdev; /* vdev */ 535*fa94a07fSbrendan spa_t *l2ad_spa; /* spa */ 536*fa94a07fSbrendan uint64_t l2ad_hand; /* next write location */ 537*fa94a07fSbrendan uint64_t l2ad_write; /* desired write size, bytes */ 538*fa94a07fSbrendan uint64_t l2ad_start; /* first addr on device */ 539*fa94a07fSbrendan uint64_t l2ad_end; /* last addr on device */ 540*fa94a07fSbrendan uint64_t l2ad_evict; /* last addr eviction reached */ 541*fa94a07fSbrendan boolean_t l2ad_first; /* first sweep through */ 542*fa94a07fSbrendan list_t *l2ad_buflist; /* buffer list */ 543*fa94a07fSbrendan list_node_t l2ad_node; /* device list node */ 544*fa94a07fSbrendan } l2arc_dev_t; 545*fa94a07fSbrendan 546*fa94a07fSbrendan static list_t L2ARC_dev_list; /* device list */ 547*fa94a07fSbrendan static list_t *l2arc_dev_list; /* device list pointer */ 548*fa94a07fSbrendan static kmutex_t l2arc_dev_mtx; /* device list mutex */ 549*fa94a07fSbrendan static l2arc_dev_t *l2arc_dev_last; /* last device used */ 550*fa94a07fSbrendan static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 551*fa94a07fSbrendan static list_t L2ARC_free_on_write; /* free after write buf list */ 552*fa94a07fSbrendan static list_t *l2arc_free_on_write; /* free after write list ptr */ 553*fa94a07fSbrendan static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 554*fa94a07fSbrendan static uint64_t l2arc_ndev; /* number of devices */ 555*fa94a07fSbrendan 556*fa94a07fSbrendan typedef struct l2arc_read_callback { 557*fa94a07fSbrendan arc_buf_t *l2rcb_buf; /* read buffer */ 558*fa94a07fSbrendan spa_t *l2rcb_spa; /* spa */ 559*fa94a07fSbrendan blkptr_t l2rcb_bp; /* original blkptr */ 560*fa94a07fSbrendan zbookmark_t l2rcb_zb; /* original bookmark */ 561*fa94a07fSbrendan int l2rcb_flags; /* original flags */ 562*fa94a07fSbrendan } l2arc_read_callback_t; 563*fa94a07fSbrendan 564*fa94a07fSbrendan typedef struct l2arc_write_callback { 565*fa94a07fSbrendan l2arc_dev_t *l2wcb_dev; /* device info */ 566*fa94a07fSbrendan arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 567*fa94a07fSbrendan } l2arc_write_callback_t; 568*fa94a07fSbrendan 569*fa94a07fSbrendan struct l2arc_buf_hdr { 570*fa94a07fSbrendan /* protected by arc_buf_hdr mutex */ 571*fa94a07fSbrendan l2arc_dev_t *b_dev; /* L2ARC device */ 572*fa94a07fSbrendan daddr_t b_daddr; /* disk address, offset byte */ 573*fa94a07fSbrendan }; 574*fa94a07fSbrendan 575*fa94a07fSbrendan typedef struct l2arc_data_free { 576*fa94a07fSbrendan /* protected by l2arc_free_on_write_mtx */ 577*fa94a07fSbrendan void *l2df_data; 578*fa94a07fSbrendan size_t l2df_size; 579*fa94a07fSbrendan void (*l2df_func)(void *, size_t); 580*fa94a07fSbrendan list_node_t l2df_list_node; 581*fa94a07fSbrendan } l2arc_data_free_t; 582*fa94a07fSbrendan 583*fa94a07fSbrendan static kmutex_t l2arc_feed_thr_lock; 584*fa94a07fSbrendan static kcondvar_t l2arc_feed_thr_cv; 585*fa94a07fSbrendan static uint8_t l2arc_thread_exit; 586*fa94a07fSbrendan 587*fa94a07fSbrendan static void l2arc_read_done(zio_t *zio); 588*fa94a07fSbrendan static void l2arc_hdr_stat_add(void); 589*fa94a07fSbrendan static void l2arc_hdr_stat_remove(void); 590*fa94a07fSbrendan 591fa9e4066Sahrens static uint64_t 592fa9e4066Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 593fa9e4066Sahrens { 594fa9e4066Sahrens uintptr_t spav = (uintptr_t)spa; 595fa9e4066Sahrens uint8_t *vdva = (uint8_t *)dva; 596fa9e4066Sahrens uint64_t crc = -1ULL; 597fa9e4066Sahrens int i; 598fa9e4066Sahrens 599fa9e4066Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 600fa9e4066Sahrens 601fa9e4066Sahrens for (i = 0; i < sizeof (dva_t); i++) 602fa9e4066Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 603fa9e4066Sahrens 604fa9e4066Sahrens crc ^= (spav>>8) ^ birth; 605fa9e4066Sahrens 606fa9e4066Sahrens return (crc); 607fa9e4066Sahrens } 608fa9e4066Sahrens 609fa9e4066Sahrens #define BUF_EMPTY(buf) \ 610fa9e4066Sahrens ((buf)->b_dva.dva_word[0] == 0 && \ 611fa9e4066Sahrens (buf)->b_dva.dva_word[1] == 0 && \ 612fa9e4066Sahrens (buf)->b_birth == 0) 613fa9e4066Sahrens 614fa9e4066Sahrens #define BUF_EQUAL(spa, dva, birth, buf) \ 615fa9e4066Sahrens ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 616fa9e4066Sahrens ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 617fa9e4066Sahrens ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 618fa9e4066Sahrens 619fa9e4066Sahrens static arc_buf_hdr_t * 620fa9e4066Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 621fa9e4066Sahrens { 622fa9e4066Sahrens uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 623fa9e4066Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 624fa9e4066Sahrens arc_buf_hdr_t *buf; 625fa9e4066Sahrens 626fa9e4066Sahrens mutex_enter(hash_lock); 627fa9e4066Sahrens for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 628fa9e4066Sahrens buf = buf->b_hash_next) { 629fa9e4066Sahrens if (BUF_EQUAL(spa, dva, birth, buf)) { 630fa9e4066Sahrens *lockp = hash_lock; 631fa9e4066Sahrens return (buf); 632fa9e4066Sahrens } 633fa9e4066Sahrens } 634fa9e4066Sahrens mutex_exit(hash_lock); 635fa9e4066Sahrens *lockp = NULL; 636fa9e4066Sahrens return (NULL); 637fa9e4066Sahrens } 638fa9e4066Sahrens 639fa9e4066Sahrens /* 640fa9e4066Sahrens * Insert an entry into the hash table. If there is already an element 641fa9e4066Sahrens * equal to elem in the hash table, then the already existing element 642fa9e4066Sahrens * will be returned and the new element will not be inserted. 643fa9e4066Sahrens * Otherwise returns NULL. 644fa9e4066Sahrens */ 645fa9e4066Sahrens static arc_buf_hdr_t * 646fa9e4066Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 647fa9e4066Sahrens { 648fa9e4066Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 649fa9e4066Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 650fa9e4066Sahrens arc_buf_hdr_t *fbuf; 65144cb6abcSbmc uint32_t i; 652fa9e4066Sahrens 653ea8dc4b6Seschrock ASSERT(!HDR_IN_HASH_TABLE(buf)); 654fa9e4066Sahrens *lockp = hash_lock; 655fa9e4066Sahrens mutex_enter(hash_lock); 656fa9e4066Sahrens for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 657fa9e4066Sahrens fbuf = fbuf->b_hash_next, i++) { 658fa9e4066Sahrens if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 659fa9e4066Sahrens return (fbuf); 660fa9e4066Sahrens } 661fa9e4066Sahrens 662fa9e4066Sahrens buf->b_hash_next = buf_hash_table.ht_table[idx]; 663fa9e4066Sahrens buf_hash_table.ht_table[idx] = buf; 664ea8dc4b6Seschrock buf->b_flags |= ARC_IN_HASH_TABLE; 665fa9e4066Sahrens 666fa9e4066Sahrens /* collect some hash table performance data */ 667fa9e4066Sahrens if (i > 0) { 66844cb6abcSbmc ARCSTAT_BUMP(arcstat_hash_collisions); 669fa9e4066Sahrens if (i == 1) 67044cb6abcSbmc ARCSTAT_BUMP(arcstat_hash_chains); 67144cb6abcSbmc 67244cb6abcSbmc ARCSTAT_MAX(arcstat_hash_chain_max, i); 673fa9e4066Sahrens } 67444cb6abcSbmc 67544cb6abcSbmc ARCSTAT_BUMP(arcstat_hash_elements); 67644cb6abcSbmc ARCSTAT_MAXSTAT(arcstat_hash_elements); 677fa9e4066Sahrens 678fa9e4066Sahrens return (NULL); 679fa9e4066Sahrens } 680fa9e4066Sahrens 681fa9e4066Sahrens static void 682fa9e4066Sahrens buf_hash_remove(arc_buf_hdr_t *buf) 683fa9e4066Sahrens { 684fa9e4066Sahrens arc_buf_hdr_t *fbuf, **bufp; 685fa9e4066Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 686fa9e4066Sahrens 687fa9e4066Sahrens ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 688ea8dc4b6Seschrock ASSERT(HDR_IN_HASH_TABLE(buf)); 689fa9e4066Sahrens 690fa9e4066Sahrens bufp = &buf_hash_table.ht_table[idx]; 691fa9e4066Sahrens while ((fbuf = *bufp) != buf) { 692fa9e4066Sahrens ASSERT(fbuf != NULL); 693fa9e4066Sahrens bufp = &fbuf->b_hash_next; 694fa9e4066Sahrens } 695fa9e4066Sahrens *bufp = buf->b_hash_next; 696fa9e4066Sahrens buf->b_hash_next = NULL; 697ea8dc4b6Seschrock buf->b_flags &= ~ARC_IN_HASH_TABLE; 698fa9e4066Sahrens 699fa9e4066Sahrens /* collect some hash table performance data */ 70044cb6abcSbmc ARCSTAT_BUMPDOWN(arcstat_hash_elements); 70144cb6abcSbmc 702fa9e4066Sahrens if (buf_hash_table.ht_table[idx] && 703fa9e4066Sahrens buf_hash_table.ht_table[idx]->b_hash_next == NULL) 70444cb6abcSbmc ARCSTAT_BUMPDOWN(arcstat_hash_chains); 705fa9e4066Sahrens } 706fa9e4066Sahrens 707fa9e4066Sahrens /* 708fa9e4066Sahrens * Global data structures and functions for the buf kmem cache. 709fa9e4066Sahrens */ 710fa9e4066Sahrens static kmem_cache_t *hdr_cache; 711fa9e4066Sahrens static kmem_cache_t *buf_cache; 712fa9e4066Sahrens 713fa9e4066Sahrens static void 714fa9e4066Sahrens buf_fini(void) 715fa9e4066Sahrens { 716fa9e4066Sahrens int i; 717fa9e4066Sahrens 718fa9e4066Sahrens kmem_free(buf_hash_table.ht_table, 719fa9e4066Sahrens (buf_hash_table.ht_mask + 1) * sizeof (void *)); 720fa9e4066Sahrens for (i = 0; i < BUF_LOCKS; i++) 721fa9e4066Sahrens mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 722fa9e4066Sahrens kmem_cache_destroy(hdr_cache); 723fa9e4066Sahrens kmem_cache_destroy(buf_cache); 724fa9e4066Sahrens } 725fa9e4066Sahrens 726fa9e4066Sahrens /* 727fa9e4066Sahrens * Constructor callback - called when the cache is empty 728fa9e4066Sahrens * and a new buf is requested. 729fa9e4066Sahrens */ 730fa9e4066Sahrens /* ARGSUSED */ 731fa9e4066Sahrens static int 732fa9e4066Sahrens hdr_cons(void *vbuf, void *unused, int kmflag) 733fa9e4066Sahrens { 734fa9e4066Sahrens arc_buf_hdr_t *buf = vbuf; 735fa9e4066Sahrens 736fa9e4066Sahrens bzero(buf, sizeof (arc_buf_hdr_t)); 737fa9e4066Sahrens refcount_create(&buf->b_refcnt); 738fa9e4066Sahrens cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 739c25056deSgw mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 740*fa94a07fSbrendan 741*fa94a07fSbrendan ARCSTAT_INCR(arcstat_hdr_size, sizeof (arc_buf_hdr_t)); 742fa9e4066Sahrens return (0); 743fa9e4066Sahrens } 744fa9e4066Sahrens 745fa9e4066Sahrens /* 746fa9e4066Sahrens * Destructor callback - called when a cached buf is 747fa9e4066Sahrens * no longer required. 748fa9e4066Sahrens */ 749fa9e4066Sahrens /* ARGSUSED */ 750fa9e4066Sahrens static void 751fa9e4066Sahrens hdr_dest(void *vbuf, void *unused) 752fa9e4066Sahrens { 753fa9e4066Sahrens arc_buf_hdr_t *buf = vbuf; 754fa9e4066Sahrens 755fa9e4066Sahrens refcount_destroy(&buf->b_refcnt); 756fa9e4066Sahrens cv_destroy(&buf->b_cv); 757c25056deSgw mutex_destroy(&buf->b_freeze_lock); 758*fa94a07fSbrendan 759*fa94a07fSbrendan ARCSTAT_INCR(arcstat_hdr_size, -sizeof (arc_buf_hdr_t)); 760fa9e4066Sahrens } 761fa9e4066Sahrens 762fa9e4066Sahrens /* 763fa9e4066Sahrens * Reclaim callback -- invoked when memory is low. 764fa9e4066Sahrens */ 765fa9e4066Sahrens /* ARGSUSED */ 766fa9e4066Sahrens static void 767fa9e4066Sahrens hdr_recl(void *unused) 768fa9e4066Sahrens { 769fa9e4066Sahrens dprintf("hdr_recl called\n"); 77049e3519aSmaybee /* 77149e3519aSmaybee * umem calls the reclaim func when we destroy the buf cache, 77249e3519aSmaybee * which is after we do arc_fini(). 77349e3519aSmaybee */ 77449e3519aSmaybee if (!arc_dead) 77549e3519aSmaybee cv_signal(&arc_reclaim_thr_cv); 776fa9e4066Sahrens } 777fa9e4066Sahrens 778fa9e4066Sahrens static void 779fa9e4066Sahrens buf_init(void) 780fa9e4066Sahrens { 781fa9e4066Sahrens uint64_t *ct; 782ea8dc4b6Seschrock uint64_t hsize = 1ULL << 12; 783fa9e4066Sahrens int i, j; 784fa9e4066Sahrens 785fa9e4066Sahrens /* 786fa9e4066Sahrens * The hash table is big enough to fill all of physical memory 787ea8dc4b6Seschrock * with an average 64K block size. The table will take up 788ea8dc4b6Seschrock * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 789fa9e4066Sahrens */ 790ea8dc4b6Seschrock while (hsize * 65536 < physmem * PAGESIZE) 791fa9e4066Sahrens hsize <<= 1; 792ea8dc4b6Seschrock retry: 793fa9e4066Sahrens buf_hash_table.ht_mask = hsize - 1; 794ea8dc4b6Seschrock buf_hash_table.ht_table = 795ea8dc4b6Seschrock kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 796ea8dc4b6Seschrock if (buf_hash_table.ht_table == NULL) { 797ea8dc4b6Seschrock ASSERT(hsize > (1ULL << 8)); 798ea8dc4b6Seschrock hsize >>= 1; 799ea8dc4b6Seschrock goto retry; 800ea8dc4b6Seschrock } 801fa9e4066Sahrens 802fa9e4066Sahrens hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 803fa9e4066Sahrens 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 804fa9e4066Sahrens buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 805fa9e4066Sahrens 0, NULL, NULL, NULL, NULL, NULL, 0); 806fa9e4066Sahrens 807fa9e4066Sahrens for (i = 0; i < 256; i++) 808fa9e4066Sahrens for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 809fa9e4066Sahrens *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 810fa9e4066Sahrens 811fa9e4066Sahrens for (i = 0; i < BUF_LOCKS; i++) { 812fa9e4066Sahrens mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 813fa9e4066Sahrens NULL, MUTEX_DEFAULT, NULL); 814fa9e4066Sahrens } 815fa9e4066Sahrens } 816fa9e4066Sahrens 817fa9e4066Sahrens #define ARC_MINTIME (hz>>4) /* 62 ms */ 818fa9e4066Sahrens 8196b4acc8bSahrens static void 8206b4acc8bSahrens arc_cksum_verify(arc_buf_t *buf) 8216b4acc8bSahrens { 8226b4acc8bSahrens zio_cksum_t zc; 8236b4acc8bSahrens 824cc60fd72Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 8256b4acc8bSahrens return; 8266b4acc8bSahrens 8276b4acc8bSahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8283ccfa83cSahrens if (buf->b_hdr->b_freeze_cksum == NULL || 8293ccfa83cSahrens (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 8306b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8316b4acc8bSahrens return; 8326b4acc8bSahrens } 8336b4acc8bSahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 8346b4acc8bSahrens if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 8356b4acc8bSahrens panic("buffer modified while frozen!"); 8366b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8376b4acc8bSahrens } 8386b4acc8bSahrens 839*fa94a07fSbrendan static int 840*fa94a07fSbrendan arc_cksum_equal(arc_buf_t *buf) 841*fa94a07fSbrendan { 842*fa94a07fSbrendan zio_cksum_t zc; 843*fa94a07fSbrendan int equal; 844*fa94a07fSbrendan 845*fa94a07fSbrendan mutex_enter(&buf->b_hdr->b_freeze_lock); 846*fa94a07fSbrendan fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 847*fa94a07fSbrendan equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 848*fa94a07fSbrendan mutex_exit(&buf->b_hdr->b_freeze_lock); 849*fa94a07fSbrendan 850*fa94a07fSbrendan return (equal); 851*fa94a07fSbrendan } 852*fa94a07fSbrendan 8536b4acc8bSahrens static void 854*fa94a07fSbrendan arc_cksum_compute(arc_buf_t *buf, boolean_t force) 8556b4acc8bSahrens { 856*fa94a07fSbrendan if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 8576b4acc8bSahrens return; 8586b4acc8bSahrens 8596b4acc8bSahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8606b4acc8bSahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 8616b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8626b4acc8bSahrens return; 8636b4acc8bSahrens } 8646b4acc8bSahrens buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 8656b4acc8bSahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 8666b4acc8bSahrens buf->b_hdr->b_freeze_cksum); 8676b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8686b4acc8bSahrens } 8696b4acc8bSahrens 8706b4acc8bSahrens void 8716b4acc8bSahrens arc_buf_thaw(arc_buf_t *buf) 8726b4acc8bSahrens { 873*fa94a07fSbrendan if (zfs_flags & ZFS_DEBUG_MODIFY) { 874*fa94a07fSbrendan if (buf->b_hdr->b_state != arc_anon) 875*fa94a07fSbrendan panic("modifying non-anon buffer!"); 876*fa94a07fSbrendan if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 877*fa94a07fSbrendan panic("modifying buffer while i/o in progress!"); 878*fa94a07fSbrendan arc_cksum_verify(buf); 879*fa94a07fSbrendan } 8806b4acc8bSahrens 8816b4acc8bSahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8826b4acc8bSahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 8836b4acc8bSahrens kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 8846b4acc8bSahrens buf->b_hdr->b_freeze_cksum = NULL; 8856b4acc8bSahrens } 8866b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8876b4acc8bSahrens } 8886b4acc8bSahrens 8896b4acc8bSahrens void 8906b4acc8bSahrens arc_buf_freeze(arc_buf_t *buf) 8916b4acc8bSahrens { 892cc60fd72Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 893cc60fd72Sahrens return; 894cc60fd72Sahrens 8956b4acc8bSahrens ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 89644cb6abcSbmc buf->b_hdr->b_state == arc_anon); 897*fa94a07fSbrendan arc_cksum_compute(buf, B_FALSE); 8986b4acc8bSahrens } 8996b4acc8bSahrens 900fa9e4066Sahrens static void 901fa9e4066Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 902fa9e4066Sahrens { 903fa9e4066Sahrens ASSERT(MUTEX_HELD(hash_lock)); 904fa9e4066Sahrens 905fa9e4066Sahrens if ((refcount_add(&ab->b_refcnt, tag) == 1) && 90644cb6abcSbmc (ab->b_state != arc_anon)) { 907c0a81264Sek uint64_t delta = ab->b_size * ab->b_datacnt; 9080e8c6158Smaybee list_t *list = &ab->b_state->arcs_list[ab->b_type]; 9090e8c6158Smaybee uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 910fa9e4066Sahrens 91144cb6abcSbmc ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 91244cb6abcSbmc mutex_enter(&ab->b_state->arcs_mtx); 913fa9e4066Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 9140e8c6158Smaybee list_remove(list, ab); 915ea8dc4b6Seschrock if (GHOST_STATE(ab->b_state)) { 916ea8dc4b6Seschrock ASSERT3U(ab->b_datacnt, ==, 0); 917ea8dc4b6Seschrock ASSERT3P(ab->b_buf, ==, NULL); 918ea8dc4b6Seschrock delta = ab->b_size; 919ea8dc4b6Seschrock } 920ea8dc4b6Seschrock ASSERT(delta > 0); 9210e8c6158Smaybee ASSERT3U(*size, >=, delta); 9220e8c6158Smaybee atomic_add_64(size, -delta); 92344cb6abcSbmc mutex_exit(&ab->b_state->arcs_mtx); 92413506d1eSmaybee /* remove the prefetch flag is we get a reference */ 92513506d1eSmaybee if (ab->b_flags & ARC_PREFETCH) 92613506d1eSmaybee ab->b_flags &= ~ARC_PREFETCH; 927fa9e4066Sahrens } 928fa9e4066Sahrens } 929fa9e4066Sahrens 930fa9e4066Sahrens static int 931fa9e4066Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 932fa9e4066Sahrens { 933fa9e4066Sahrens int cnt; 93444cb6abcSbmc arc_state_t *state = ab->b_state; 935fa9e4066Sahrens 93644cb6abcSbmc ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 93744cb6abcSbmc ASSERT(!GHOST_STATE(state)); 938fa9e4066Sahrens 939fa9e4066Sahrens if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 94044cb6abcSbmc (state != arc_anon)) { 9410e8c6158Smaybee uint64_t *size = &state->arcs_lsize[ab->b_type]; 9420e8c6158Smaybee 94344cb6abcSbmc ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 94444cb6abcSbmc mutex_enter(&state->arcs_mtx); 945fa9e4066Sahrens ASSERT(!list_link_active(&ab->b_arc_node)); 9460e8c6158Smaybee list_insert_head(&state->arcs_list[ab->b_type], ab); 947ea8dc4b6Seschrock ASSERT(ab->b_datacnt > 0); 9480e8c6158Smaybee atomic_add_64(size, ab->b_size * ab->b_datacnt); 94944cb6abcSbmc mutex_exit(&state->arcs_mtx); 950fa9e4066Sahrens } 951fa9e4066Sahrens return (cnt); 952fa9e4066Sahrens } 953fa9e4066Sahrens 954fa9e4066Sahrens /* 955fa9e4066Sahrens * Move the supplied buffer to the indicated state. The mutex 956fa9e4066Sahrens * for the buffer must be held by the caller. 957fa9e4066Sahrens */ 958fa9e4066Sahrens static void 959ea8dc4b6Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 960fa9e4066Sahrens { 961ea8dc4b6Seschrock arc_state_t *old_state = ab->b_state; 962c0a81264Sek int64_t refcnt = refcount_count(&ab->b_refcnt); 963c0a81264Sek uint64_t from_delta, to_delta; 964fa9e4066Sahrens 965fa9e4066Sahrens ASSERT(MUTEX_HELD(hash_lock)); 966ea8dc4b6Seschrock ASSERT(new_state != old_state); 967ea8dc4b6Seschrock ASSERT(refcnt == 0 || ab->b_datacnt > 0); 968ea8dc4b6Seschrock ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 969ea8dc4b6Seschrock 970ea8dc4b6Seschrock from_delta = to_delta = ab->b_datacnt * ab->b_size; 971fa9e4066Sahrens 972fa9e4066Sahrens /* 973fa9e4066Sahrens * If this buffer is evictable, transfer it from the 974fa9e4066Sahrens * old state list to the new state list. 975fa9e4066Sahrens */ 976ea8dc4b6Seschrock if (refcnt == 0) { 97744cb6abcSbmc if (old_state != arc_anon) { 97844cb6abcSbmc int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 9790e8c6158Smaybee uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 980ea8dc4b6Seschrock 981ea8dc4b6Seschrock if (use_mutex) 98244cb6abcSbmc mutex_enter(&old_state->arcs_mtx); 983fa9e4066Sahrens 984fa9e4066Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 9850e8c6158Smaybee list_remove(&old_state->arcs_list[ab->b_type], ab); 986ea8dc4b6Seschrock 98713506d1eSmaybee /* 98813506d1eSmaybee * If prefetching out of the ghost cache, 98913506d1eSmaybee * we will have a non-null datacnt. 99013506d1eSmaybee */ 99113506d1eSmaybee if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 99213506d1eSmaybee /* ghost elements have a ghost size */ 993ea8dc4b6Seschrock ASSERT(ab->b_buf == NULL); 994ea8dc4b6Seschrock from_delta = ab->b_size; 995ea8dc4b6Seschrock } 9960e8c6158Smaybee ASSERT3U(*size, >=, from_delta); 9970e8c6158Smaybee atomic_add_64(size, -from_delta); 998ea8dc4b6Seschrock 999ea8dc4b6Seschrock if (use_mutex) 100044cb6abcSbmc mutex_exit(&old_state->arcs_mtx); 1001fa9e4066Sahrens } 100244cb6abcSbmc if (new_state != arc_anon) { 100344cb6abcSbmc int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 10040e8c6158Smaybee uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1005fa9e4066Sahrens 1006ea8dc4b6Seschrock if (use_mutex) 100744cb6abcSbmc mutex_enter(&new_state->arcs_mtx); 1008ea8dc4b6Seschrock 10090e8c6158Smaybee list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1010ea8dc4b6Seschrock 1011ea8dc4b6Seschrock /* ghost elements have a ghost size */ 1012ea8dc4b6Seschrock if (GHOST_STATE(new_state)) { 1013ea8dc4b6Seschrock ASSERT(ab->b_datacnt == 0); 1014ea8dc4b6Seschrock ASSERT(ab->b_buf == NULL); 1015ea8dc4b6Seschrock to_delta = ab->b_size; 1016ea8dc4b6Seschrock } 10170e8c6158Smaybee atomic_add_64(size, to_delta); 1018ea8dc4b6Seschrock 1019ea8dc4b6Seschrock if (use_mutex) 102044cb6abcSbmc mutex_exit(&new_state->arcs_mtx); 1021fa9e4066Sahrens } 1022fa9e4066Sahrens } 1023fa9e4066Sahrens 1024fa9e4066Sahrens ASSERT(!BUF_EMPTY(ab)); 1025*fa94a07fSbrendan if (new_state == arc_anon) { 1026fa9e4066Sahrens buf_hash_remove(ab); 1027fa9e4066Sahrens } 1028fa9e4066Sahrens 1029ea8dc4b6Seschrock /* adjust state sizes */ 1030ea8dc4b6Seschrock if (to_delta) 103144cb6abcSbmc atomic_add_64(&new_state->arcs_size, to_delta); 1032ea8dc4b6Seschrock if (from_delta) { 103344cb6abcSbmc ASSERT3U(old_state->arcs_size, >=, from_delta); 103444cb6abcSbmc atomic_add_64(&old_state->arcs_size, -from_delta); 1035fa9e4066Sahrens } 1036fa9e4066Sahrens ab->b_state = new_state; 1037*fa94a07fSbrendan 1038*fa94a07fSbrendan /* adjust l2arc hdr stats */ 1039*fa94a07fSbrendan if (new_state == arc_l2c_only) 1040*fa94a07fSbrendan l2arc_hdr_stat_add(); 1041*fa94a07fSbrendan else if (old_state == arc_l2c_only) 1042*fa94a07fSbrendan l2arc_hdr_stat_remove(); 1043fa9e4066Sahrens } 1044fa9e4066Sahrens 10450e8c6158Smaybee void 10460e8c6158Smaybee arc_space_consume(uint64_t space) 10470e8c6158Smaybee { 10480e8c6158Smaybee atomic_add_64(&arc_meta_used, space); 10490e8c6158Smaybee atomic_add_64(&arc_size, space); 10500e8c6158Smaybee } 10510e8c6158Smaybee 10520e8c6158Smaybee void 10530e8c6158Smaybee arc_space_return(uint64_t space) 10540e8c6158Smaybee { 10550e8c6158Smaybee ASSERT(arc_meta_used >= space); 10560e8c6158Smaybee if (arc_meta_max < arc_meta_used) 10570e8c6158Smaybee arc_meta_max = arc_meta_used; 10580e8c6158Smaybee atomic_add_64(&arc_meta_used, -space); 10590e8c6158Smaybee ASSERT(arc_size >= space); 10600e8c6158Smaybee atomic_add_64(&arc_size, -space); 10610e8c6158Smaybee } 10620e8c6158Smaybee 10630e8c6158Smaybee void * 10640e8c6158Smaybee arc_data_buf_alloc(uint64_t size) 10650e8c6158Smaybee { 10660e8c6158Smaybee if (arc_evict_needed(ARC_BUFC_DATA)) 10670e8c6158Smaybee cv_signal(&arc_reclaim_thr_cv); 10680e8c6158Smaybee atomic_add_64(&arc_size, size); 10690e8c6158Smaybee return (zio_data_buf_alloc(size)); 10700e8c6158Smaybee } 10710e8c6158Smaybee 10720e8c6158Smaybee void 10730e8c6158Smaybee arc_data_buf_free(void *buf, uint64_t size) 10740e8c6158Smaybee { 10750e8c6158Smaybee zio_data_buf_free(buf, size); 10760e8c6158Smaybee ASSERT(arc_size >= size); 10770e8c6158Smaybee atomic_add_64(&arc_size, -size); 10780e8c6158Smaybee } 10790e8c6158Smaybee 1080fa9e4066Sahrens arc_buf_t * 1081ad23a2dbSjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1082fa9e4066Sahrens { 1083fa9e4066Sahrens arc_buf_hdr_t *hdr; 1084fa9e4066Sahrens arc_buf_t *buf; 1085fa9e4066Sahrens 1086fa9e4066Sahrens ASSERT3U(size, >, 0); 1087fa9e4066Sahrens hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 1088fa9e4066Sahrens ASSERT(BUF_EMPTY(hdr)); 1089fa9e4066Sahrens hdr->b_size = size; 1090ad23a2dbSjohansen hdr->b_type = type; 1091fa9e4066Sahrens hdr->b_spa = spa; 109244cb6abcSbmc hdr->b_state = arc_anon; 1093fa9e4066Sahrens hdr->b_arc_access = 0; 1094fa9e4066Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 1095fa9e4066Sahrens buf->b_hdr = hdr; 109644eda4d7Smaybee buf->b_data = NULL; 1097ea8dc4b6Seschrock buf->b_efunc = NULL; 1098ea8dc4b6Seschrock buf->b_private = NULL; 1099fa9e4066Sahrens buf->b_next = NULL; 1100fa9e4066Sahrens hdr->b_buf = buf; 110144eda4d7Smaybee arc_get_data_buf(buf); 1102ea8dc4b6Seschrock hdr->b_datacnt = 1; 1103fa9e4066Sahrens hdr->b_flags = 0; 1104fa9e4066Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1105fa9e4066Sahrens (void) refcount_add(&hdr->b_refcnt, tag); 1106fa9e4066Sahrens 1107fa9e4066Sahrens return (buf); 1108fa9e4066Sahrens } 1109fa9e4066Sahrens 111044eda4d7Smaybee static arc_buf_t * 111144eda4d7Smaybee arc_buf_clone(arc_buf_t *from) 1112ea8dc4b6Seschrock { 111344eda4d7Smaybee arc_buf_t *buf; 111444eda4d7Smaybee arc_buf_hdr_t *hdr = from->b_hdr; 111544eda4d7Smaybee uint64_t size = hdr->b_size; 1116ea8dc4b6Seschrock 111744eda4d7Smaybee buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 111844eda4d7Smaybee buf->b_hdr = hdr; 111944eda4d7Smaybee buf->b_data = NULL; 112044eda4d7Smaybee buf->b_efunc = NULL; 112144eda4d7Smaybee buf->b_private = NULL; 112244eda4d7Smaybee buf->b_next = hdr->b_buf; 112344eda4d7Smaybee hdr->b_buf = buf; 112444eda4d7Smaybee arc_get_data_buf(buf); 112544eda4d7Smaybee bcopy(from->b_data, buf->b_data, size); 112644eda4d7Smaybee hdr->b_datacnt += 1; 112744eda4d7Smaybee return (buf); 1128ea8dc4b6Seschrock } 1129ea8dc4b6Seschrock 1130ea8dc4b6Seschrock void 1131ea8dc4b6Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag) 1132ea8dc4b6Seschrock { 113340d7d650Smaybee arc_buf_hdr_t *hdr; 1134ea8dc4b6Seschrock kmutex_t *hash_lock; 1135ea8dc4b6Seschrock 11369b23f181Smaybee /* 11379b23f181Smaybee * Check to see if this buffer is currently being evicted via 113840d7d650Smaybee * arc_do_user_evicts(). 11399b23f181Smaybee */ 114040d7d650Smaybee mutex_enter(&arc_eviction_mtx); 114140d7d650Smaybee hdr = buf->b_hdr; 114240d7d650Smaybee if (hdr == NULL) { 114340d7d650Smaybee mutex_exit(&arc_eviction_mtx); 11449b23f181Smaybee return; 114540d7d650Smaybee } 11469b23f181Smaybee hash_lock = HDR_LOCK(hdr); 114740d7d650Smaybee mutex_exit(&arc_eviction_mtx); 114840d7d650Smaybee 11499b23f181Smaybee mutex_enter(hash_lock); 1150ea8dc4b6Seschrock if (buf->b_data == NULL) { 1151ea8dc4b6Seschrock /* 1152ea8dc4b6Seschrock * This buffer is evicted. 1153ea8dc4b6Seschrock */ 11549b23f181Smaybee mutex_exit(hash_lock); 1155ea8dc4b6Seschrock return; 1156ea8dc4b6Seschrock } 1157ea8dc4b6Seschrock 11589b23f181Smaybee ASSERT(buf->b_hdr == hdr); 115944cb6abcSbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1160ea8dc4b6Seschrock add_reference(hdr, hash_lock, tag); 116144eda4d7Smaybee arc_access(hdr, hash_lock); 116244eda4d7Smaybee mutex_exit(hash_lock); 116344cb6abcSbmc ARCSTAT_BUMP(arcstat_hits); 116444cb6abcSbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 116544cb6abcSbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 116644cb6abcSbmc data, metadata, hits); 1167ea8dc4b6Seschrock } 1168ea8dc4b6Seschrock 1169*fa94a07fSbrendan /* 1170*fa94a07fSbrendan * Free the arc data buffer. If it is an l2arc write in progress, 1171*fa94a07fSbrendan * the buffer is placed on l2arc_free_on_write to be freed later. 1172*fa94a07fSbrendan */ 1173*fa94a07fSbrendan static void 1174*fa94a07fSbrendan arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1175*fa94a07fSbrendan void *data, size_t size) 1176*fa94a07fSbrendan { 1177*fa94a07fSbrendan if (HDR_L2_WRITING(hdr)) { 1178*fa94a07fSbrendan l2arc_data_free_t *df; 1179*fa94a07fSbrendan df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1180*fa94a07fSbrendan df->l2df_data = data; 1181*fa94a07fSbrendan df->l2df_size = size; 1182*fa94a07fSbrendan df->l2df_func = free_func; 1183*fa94a07fSbrendan mutex_enter(&l2arc_free_on_write_mtx); 1184*fa94a07fSbrendan list_insert_head(l2arc_free_on_write, df); 1185*fa94a07fSbrendan mutex_exit(&l2arc_free_on_write_mtx); 1186*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_free_on_write); 1187*fa94a07fSbrendan } else { 1188*fa94a07fSbrendan free_func(data, size); 1189*fa94a07fSbrendan } 1190*fa94a07fSbrendan } 1191*fa94a07fSbrendan 1192ea8dc4b6Seschrock static void 119344eda4d7Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1194ea8dc4b6Seschrock { 1195ea8dc4b6Seschrock arc_buf_t **bufp; 1196ea8dc4b6Seschrock 1197ea8dc4b6Seschrock /* free up data associated with the buf */ 1198ea8dc4b6Seschrock if (buf->b_data) { 1199ea8dc4b6Seschrock arc_state_t *state = buf->b_hdr->b_state; 1200ea8dc4b6Seschrock uint64_t size = buf->b_hdr->b_size; 1201ad23a2dbSjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 1202ea8dc4b6Seschrock 12036b4acc8bSahrens arc_cksum_verify(buf); 120444eda4d7Smaybee if (!recycle) { 1205ad23a2dbSjohansen if (type == ARC_BUFC_METADATA) { 1206*fa94a07fSbrendan arc_buf_data_free(buf->b_hdr, zio_buf_free, 1207*fa94a07fSbrendan buf->b_data, size); 12080e8c6158Smaybee arc_space_return(size); 1209ad23a2dbSjohansen } else { 1210ad23a2dbSjohansen ASSERT(type == ARC_BUFC_DATA); 1211*fa94a07fSbrendan arc_buf_data_free(buf->b_hdr, 1212*fa94a07fSbrendan zio_data_buf_free, buf->b_data, size); 12130e8c6158Smaybee atomic_add_64(&arc_size, -size); 1214ad23a2dbSjohansen } 121544eda4d7Smaybee } 1216ea8dc4b6Seschrock if (list_link_active(&buf->b_hdr->b_arc_node)) { 12170e8c6158Smaybee uint64_t *cnt = &state->arcs_lsize[type]; 12180e8c6158Smaybee 1219ea8dc4b6Seschrock ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 122044cb6abcSbmc ASSERT(state != arc_anon); 12210e8c6158Smaybee 12220e8c6158Smaybee ASSERT3U(*cnt, >=, size); 12230e8c6158Smaybee atomic_add_64(cnt, -size); 1224ea8dc4b6Seschrock } 122544cb6abcSbmc ASSERT3U(state->arcs_size, >=, size); 122644cb6abcSbmc atomic_add_64(&state->arcs_size, -size); 1227ea8dc4b6Seschrock buf->b_data = NULL; 1228ea8dc4b6Seschrock ASSERT(buf->b_hdr->b_datacnt > 0); 1229ea8dc4b6Seschrock buf->b_hdr->b_datacnt -= 1; 1230ea8dc4b6Seschrock } 1231ea8dc4b6Seschrock 1232ea8dc4b6Seschrock /* only remove the buf if requested */ 1233ea8dc4b6Seschrock if (!all) 1234ea8dc4b6Seschrock return; 1235ea8dc4b6Seschrock 1236ea8dc4b6Seschrock /* remove the buf from the hdr list */ 1237ea8dc4b6Seschrock for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1238ea8dc4b6Seschrock continue; 1239ea8dc4b6Seschrock *bufp = buf->b_next; 1240ea8dc4b6Seschrock 1241ea8dc4b6Seschrock ASSERT(buf->b_efunc == NULL); 1242ea8dc4b6Seschrock 1243ea8dc4b6Seschrock /* clean up the buf */ 1244ea8dc4b6Seschrock buf->b_hdr = NULL; 1245ea8dc4b6Seschrock kmem_cache_free(buf_cache, buf); 1246ea8dc4b6Seschrock } 1247ea8dc4b6Seschrock 1248fa9e4066Sahrens static void 1249ea8dc4b6Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr) 1250fa9e4066Sahrens { 1251fa9e4066Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 125244cb6abcSbmc ASSERT3P(hdr->b_state, ==, arc_anon); 1253ea8dc4b6Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1254fa9e4066Sahrens 1255*fa94a07fSbrendan if (hdr->b_l2hdr != NULL) { 1256*fa94a07fSbrendan if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1257*fa94a07fSbrendan /* 1258*fa94a07fSbrendan * To prevent arc_free() and l2arc_evict() from 1259*fa94a07fSbrendan * attempting to free the same buffer at the same time, 1260*fa94a07fSbrendan * a FREE_IN_PROGRESS flag is given to arc_free() to 1261*fa94a07fSbrendan * give it priority. l2arc_evict() can't destroy this 1262*fa94a07fSbrendan * header while we are waiting on l2arc_buflist_mtx. 1263*fa94a07fSbrendan */ 1264*fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 1265*fa94a07fSbrendan ASSERT(hdr->b_l2hdr != NULL); 1266*fa94a07fSbrendan 1267*fa94a07fSbrendan list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1268*fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 1269*fa94a07fSbrendan } else { 1270*fa94a07fSbrendan list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1271*fa94a07fSbrendan } 1272*fa94a07fSbrendan ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1273*fa94a07fSbrendan kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1274*fa94a07fSbrendan if (hdr->b_state == arc_l2c_only) 1275*fa94a07fSbrendan l2arc_hdr_stat_remove(); 1276*fa94a07fSbrendan hdr->b_l2hdr = NULL; 1277*fa94a07fSbrendan } 1278*fa94a07fSbrendan 1279fa9e4066Sahrens if (!BUF_EMPTY(hdr)) { 1280ea8dc4b6Seschrock ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1281fa9e4066Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 1282fa9e4066Sahrens hdr->b_birth = 0; 1283fa9e4066Sahrens hdr->b_cksum0 = 0; 1284fa9e4066Sahrens } 1285ea8dc4b6Seschrock while (hdr->b_buf) { 1286fa9e4066Sahrens arc_buf_t *buf = hdr->b_buf; 1287fa9e4066Sahrens 1288ea8dc4b6Seschrock if (buf->b_efunc) { 1289ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 1290ea8dc4b6Seschrock ASSERT(buf->b_hdr != NULL); 129144eda4d7Smaybee arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1292ea8dc4b6Seschrock hdr->b_buf = buf->b_next; 129340d7d650Smaybee buf->b_hdr = &arc_eviction_hdr; 1294ea8dc4b6Seschrock buf->b_next = arc_eviction_list; 1295ea8dc4b6Seschrock arc_eviction_list = buf; 1296ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 1297ea8dc4b6Seschrock } else { 129844eda4d7Smaybee arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1299ea8dc4b6Seschrock } 1300fa9e4066Sahrens } 13016b4acc8bSahrens if (hdr->b_freeze_cksum != NULL) { 13026b4acc8bSahrens kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 13036b4acc8bSahrens hdr->b_freeze_cksum = NULL; 13046b4acc8bSahrens } 1305ea8dc4b6Seschrock 1306fa9e4066Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 1307fa9e4066Sahrens ASSERT3P(hdr->b_hash_next, ==, NULL); 1308fa9e4066Sahrens ASSERT3P(hdr->b_acb, ==, NULL); 1309fa9e4066Sahrens kmem_cache_free(hdr_cache, hdr); 1310fa9e4066Sahrens } 1311fa9e4066Sahrens 1312fa9e4066Sahrens void 1313fa9e4066Sahrens arc_buf_free(arc_buf_t *buf, void *tag) 1314fa9e4066Sahrens { 1315fa9e4066Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 131644cb6abcSbmc int hashed = hdr->b_state != arc_anon; 1317fa9e4066Sahrens 1318ea8dc4b6Seschrock ASSERT(buf->b_efunc == NULL); 1319ea8dc4b6Seschrock ASSERT(buf->b_data != NULL); 1320ea8dc4b6Seschrock 1321ea8dc4b6Seschrock if (hashed) { 1322ea8dc4b6Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 1323ea8dc4b6Seschrock 1324ea8dc4b6Seschrock mutex_enter(hash_lock); 1325ea8dc4b6Seschrock (void) remove_reference(hdr, hash_lock, tag); 1326ea8dc4b6Seschrock if (hdr->b_datacnt > 1) 132744eda4d7Smaybee arc_buf_destroy(buf, FALSE, TRUE); 1328ea8dc4b6Seschrock else 1329ea8dc4b6Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 1330fa9e4066Sahrens mutex_exit(hash_lock); 1331ea8dc4b6Seschrock } else if (HDR_IO_IN_PROGRESS(hdr)) { 1332ea8dc4b6Seschrock int destroy_hdr; 1333ea8dc4b6Seschrock /* 1334ea8dc4b6Seschrock * We are in the middle of an async write. Don't destroy 1335ea8dc4b6Seschrock * this buffer unless the write completes before we finish 1336ea8dc4b6Seschrock * decrementing the reference count. 1337ea8dc4b6Seschrock */ 1338ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 1339ea8dc4b6Seschrock (void) remove_reference(hdr, NULL, tag); 1340ea8dc4b6Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1341ea8dc4b6Seschrock destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1342ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 1343ea8dc4b6Seschrock if (destroy_hdr) 1344ea8dc4b6Seschrock arc_hdr_destroy(hdr); 1345ea8dc4b6Seschrock } else { 1346ea8dc4b6Seschrock if (remove_reference(hdr, NULL, tag) > 0) { 1347ea8dc4b6Seschrock ASSERT(HDR_IO_ERROR(hdr)); 134844eda4d7Smaybee arc_buf_destroy(buf, FALSE, TRUE); 1349ea8dc4b6Seschrock } else { 1350ea8dc4b6Seschrock arc_hdr_destroy(hdr); 1351ea8dc4b6Seschrock } 1352fa9e4066Sahrens } 1353ea8dc4b6Seschrock } 1354fa9e4066Sahrens 1355ea8dc4b6Seschrock int 1356ea8dc4b6Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1357ea8dc4b6Seschrock { 1358ea8dc4b6Seschrock arc_buf_hdr_t *hdr = buf->b_hdr; 1359ea8dc4b6Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 1360ea8dc4b6Seschrock int no_callback = (buf->b_efunc == NULL); 1361fa9e4066Sahrens 136244cb6abcSbmc if (hdr->b_state == arc_anon) { 1363ea8dc4b6Seschrock arc_buf_free(buf, tag); 1364ea8dc4b6Seschrock return (no_callback); 1365ea8dc4b6Seschrock } 1366ea8dc4b6Seschrock 1367ea8dc4b6Seschrock mutex_enter(hash_lock); 136844cb6abcSbmc ASSERT(hdr->b_state != arc_anon); 1369ea8dc4b6Seschrock ASSERT(buf->b_data != NULL); 1370ea8dc4b6Seschrock 1371ea8dc4b6Seschrock (void) remove_reference(hdr, hash_lock, tag); 1372ea8dc4b6Seschrock if (hdr->b_datacnt > 1) { 1373ea8dc4b6Seschrock if (no_callback) 137444eda4d7Smaybee arc_buf_destroy(buf, FALSE, TRUE); 1375ea8dc4b6Seschrock } else if (no_callback) { 1376ea8dc4b6Seschrock ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1377ea8dc4b6Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 1378ea8dc4b6Seschrock } 1379ea8dc4b6Seschrock ASSERT(no_callback || hdr->b_datacnt > 1 || 1380ea8dc4b6Seschrock refcount_is_zero(&hdr->b_refcnt)); 1381ea8dc4b6Seschrock mutex_exit(hash_lock); 1382ea8dc4b6Seschrock return (no_callback); 1383fa9e4066Sahrens } 1384fa9e4066Sahrens 1385fa9e4066Sahrens int 1386fa9e4066Sahrens arc_buf_size(arc_buf_t *buf) 1387fa9e4066Sahrens { 1388fa9e4066Sahrens return (buf->b_hdr->b_size); 1389fa9e4066Sahrens } 1390fa9e4066Sahrens 1391fa9e4066Sahrens /* 1392fa9e4066Sahrens * Evict buffers from list until we've removed the specified number of 1393fa9e4066Sahrens * bytes. Move the removed buffers to the appropriate evict state. 139444eda4d7Smaybee * If the recycle flag is set, then attempt to "recycle" a buffer: 139544eda4d7Smaybee * - look for a buffer to evict that is `bytes' long. 139644eda4d7Smaybee * - return the data block from this buffer rather than freeing it. 139744eda4d7Smaybee * This flag is used by callers that are trying to make space for a 139844eda4d7Smaybee * new buffer in a full arc cache. 1399fa9e4066Sahrens */ 140044eda4d7Smaybee static void * 1401ad23a2dbSjohansen arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle, 1402ad23a2dbSjohansen arc_buf_contents_t type) 1403fa9e4066Sahrens { 1404fa9e4066Sahrens arc_state_t *evicted_state; 140544eda4d7Smaybee uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 14063fa51506Smaybee arc_buf_hdr_t *ab, *ab_prev = NULL; 14070e8c6158Smaybee list_t *list = &state->arcs_list[type]; 1408fa9e4066Sahrens kmutex_t *hash_lock; 140944eda4d7Smaybee boolean_t have_lock; 14103fa51506Smaybee void *stolen = NULL; 1411fa9e4066Sahrens 141244cb6abcSbmc ASSERT(state == arc_mru || state == arc_mfu); 1413fa9e4066Sahrens 141444cb6abcSbmc evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1415fa9e4066Sahrens 141644cb6abcSbmc mutex_enter(&state->arcs_mtx); 141744cb6abcSbmc mutex_enter(&evicted_state->arcs_mtx); 1418fa9e4066Sahrens 14190e8c6158Smaybee for (ab = list_tail(list); ab; ab = ab_prev) { 14200e8c6158Smaybee ab_prev = list_prev(list, ab); 142113506d1eSmaybee /* prefetch buffers have a minimum lifespan */ 142244eda4d7Smaybee if (HDR_IO_IN_PROGRESS(ab) || 142344eda4d7Smaybee (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 142444eda4d7Smaybee lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 142513506d1eSmaybee skipped++; 142613506d1eSmaybee continue; 142713506d1eSmaybee } 14283fa51506Smaybee /* "lookahead" for better eviction candidate */ 14293fa51506Smaybee if (recycle && ab->b_size != bytes && 14303fa51506Smaybee ab_prev && ab_prev->b_size == bytes) 143144eda4d7Smaybee continue; 1432fa9e4066Sahrens hash_lock = HDR_LOCK(ab); 143344eda4d7Smaybee have_lock = MUTEX_HELD(hash_lock); 143444eda4d7Smaybee if (have_lock || mutex_tryenter(hash_lock)) { 1435fa9e4066Sahrens ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1436ea8dc4b6Seschrock ASSERT(ab->b_datacnt > 0); 1437ea8dc4b6Seschrock while (ab->b_buf) { 1438ea8dc4b6Seschrock arc_buf_t *buf = ab->b_buf; 143944eda4d7Smaybee if (buf->b_data) { 1440ea8dc4b6Seschrock bytes_evicted += ab->b_size; 1441ad23a2dbSjohansen if (recycle && ab->b_type == type && 1442*fa94a07fSbrendan ab->b_size == bytes && 1443*fa94a07fSbrendan !HDR_L2_WRITING(ab)) { 14443fa51506Smaybee stolen = buf->b_data; 14453fa51506Smaybee recycle = FALSE; 14463fa51506Smaybee } 144744eda4d7Smaybee } 1448ea8dc4b6Seschrock if (buf->b_efunc) { 1449ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 14503fa51506Smaybee arc_buf_destroy(buf, 14513fa51506Smaybee buf->b_data == stolen, FALSE); 1452ea8dc4b6Seschrock ab->b_buf = buf->b_next; 145340d7d650Smaybee buf->b_hdr = &arc_eviction_hdr; 1454ea8dc4b6Seschrock buf->b_next = arc_eviction_list; 1455ea8dc4b6Seschrock arc_eviction_list = buf; 1456ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 1457ea8dc4b6Seschrock } else { 14583fa51506Smaybee arc_buf_destroy(buf, 14593fa51506Smaybee buf->b_data == stolen, TRUE); 1460ea8dc4b6Seschrock } 1461ea8dc4b6Seschrock } 1462ea8dc4b6Seschrock ASSERT(ab->b_datacnt == 0); 1463fa9e4066Sahrens arc_change_state(evicted_state, ab, hash_lock); 1464ea8dc4b6Seschrock ASSERT(HDR_IN_HASH_TABLE(ab)); 1465*fa94a07fSbrendan ab->b_flags |= ARC_IN_HASH_TABLE; 1466*fa94a07fSbrendan ab->b_flags &= ~ARC_BUF_AVAILABLE; 1467fa9e4066Sahrens DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 146844eda4d7Smaybee if (!have_lock) 146944eda4d7Smaybee mutex_exit(hash_lock); 1470ea8dc4b6Seschrock if (bytes >= 0 && bytes_evicted >= bytes) 1471fa9e4066Sahrens break; 1472fa9e4066Sahrens } else { 147344eda4d7Smaybee missed += 1; 1474fa9e4066Sahrens } 1475fa9e4066Sahrens } 147644cb6abcSbmc 147744cb6abcSbmc mutex_exit(&evicted_state->arcs_mtx); 147844cb6abcSbmc mutex_exit(&state->arcs_mtx); 1479fa9e4066Sahrens 1480fa9e4066Sahrens if (bytes_evicted < bytes) 1481fa9e4066Sahrens dprintf("only evicted %lld bytes from %x", 1482fa9e4066Sahrens (longlong_t)bytes_evicted, state); 1483fa9e4066Sahrens 148444eda4d7Smaybee if (skipped) 148544cb6abcSbmc ARCSTAT_INCR(arcstat_evict_skip, skipped); 148644cb6abcSbmc 148744eda4d7Smaybee if (missed) 148844cb6abcSbmc ARCSTAT_INCR(arcstat_mutex_miss, missed); 1489f4d2e9e6Smaybee 1490f4d2e9e6Smaybee /* 1491f4d2e9e6Smaybee * We have just evicted some date into the ghost state, make 1492f4d2e9e6Smaybee * sure we also adjust the ghost state size if necessary. 1493f4d2e9e6Smaybee */ 1494f4d2e9e6Smaybee if (arc_no_grow && 1495f4d2e9e6Smaybee arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1496f4d2e9e6Smaybee int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1497f4d2e9e6Smaybee arc_mru_ghost->arcs_size - arc_c; 1498f4d2e9e6Smaybee 1499f4d2e9e6Smaybee if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1500f4d2e9e6Smaybee int64_t todelete = 1501f4d2e9e6Smaybee MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1502f4d2e9e6Smaybee arc_evict_ghost(arc_mru_ghost, todelete); 1503f4d2e9e6Smaybee } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1504f4d2e9e6Smaybee int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1505f4d2e9e6Smaybee arc_mru_ghost->arcs_size + 1506f4d2e9e6Smaybee arc_mfu_ghost->arcs_size - arc_c); 1507f4d2e9e6Smaybee arc_evict_ghost(arc_mfu_ghost, todelete); 1508f4d2e9e6Smaybee } 1509f4d2e9e6Smaybee } 151044cb6abcSbmc 15113fa51506Smaybee return (stolen); 1512fa9e4066Sahrens } 1513fa9e4066Sahrens 1514fa9e4066Sahrens /* 1515fa9e4066Sahrens * Remove buffers from list until we've removed the specified number of 1516fa9e4066Sahrens * bytes. Destroy the buffers that are removed. 1517fa9e4066Sahrens */ 1518fa9e4066Sahrens static void 1519ea8dc4b6Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes) 1520fa9e4066Sahrens { 1521fa9e4066Sahrens arc_buf_hdr_t *ab, *ab_prev; 15220e8c6158Smaybee list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1523fa9e4066Sahrens kmutex_t *hash_lock; 1524ea8dc4b6Seschrock uint64_t bytes_deleted = 0; 1525c0a81264Sek uint64_t bufs_skipped = 0; 1526fa9e4066Sahrens 1527ea8dc4b6Seschrock ASSERT(GHOST_STATE(state)); 1528fa9e4066Sahrens top: 152944cb6abcSbmc mutex_enter(&state->arcs_mtx); 15300e8c6158Smaybee for (ab = list_tail(list); ab; ab = ab_prev) { 15310e8c6158Smaybee ab_prev = list_prev(list, ab); 1532fa9e4066Sahrens hash_lock = HDR_LOCK(ab); 1533fa9e4066Sahrens if (mutex_tryenter(hash_lock)) { 153413506d1eSmaybee ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1535ea8dc4b6Seschrock ASSERT(ab->b_buf == NULL); 153644cb6abcSbmc ARCSTAT_BUMP(arcstat_deleted); 1537fa9e4066Sahrens bytes_deleted += ab->b_size; 1538*fa94a07fSbrendan 1539*fa94a07fSbrendan if (ab->b_l2hdr != NULL) { 1540*fa94a07fSbrendan /* 1541*fa94a07fSbrendan * This buffer is cached on the 2nd Level ARC; 1542*fa94a07fSbrendan * don't destroy the header. 1543*fa94a07fSbrendan */ 1544*fa94a07fSbrendan arc_change_state(arc_l2c_only, ab, hash_lock); 1545*fa94a07fSbrendan mutex_exit(hash_lock); 1546*fa94a07fSbrendan } else { 1547*fa94a07fSbrendan arc_change_state(arc_anon, ab, hash_lock); 1548*fa94a07fSbrendan mutex_exit(hash_lock); 1549*fa94a07fSbrendan arc_hdr_destroy(ab); 1550*fa94a07fSbrendan } 1551*fa94a07fSbrendan 1552ea8dc4b6Seschrock DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1553fa9e4066Sahrens if (bytes >= 0 && bytes_deleted >= bytes) 1554fa9e4066Sahrens break; 1555fa9e4066Sahrens } else { 1556fa9e4066Sahrens if (bytes < 0) { 155744cb6abcSbmc mutex_exit(&state->arcs_mtx); 1558fa9e4066Sahrens mutex_enter(hash_lock); 1559fa9e4066Sahrens mutex_exit(hash_lock); 1560fa9e4066Sahrens goto top; 1561fa9e4066Sahrens } 1562fa9e4066Sahrens bufs_skipped += 1; 1563fa9e4066Sahrens } 1564fa9e4066Sahrens } 156544cb6abcSbmc mutex_exit(&state->arcs_mtx); 1566fa9e4066Sahrens 15670e8c6158Smaybee if (list == &state->arcs_list[ARC_BUFC_DATA] && 15680e8c6158Smaybee (bytes < 0 || bytes_deleted < bytes)) { 15690e8c6158Smaybee list = &state->arcs_list[ARC_BUFC_METADATA]; 15700e8c6158Smaybee goto top; 15710e8c6158Smaybee } 15720e8c6158Smaybee 1573fa9e4066Sahrens if (bufs_skipped) { 157444cb6abcSbmc ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1575fa9e4066Sahrens ASSERT(bytes >= 0); 1576fa9e4066Sahrens } 1577fa9e4066Sahrens 1578fa9e4066Sahrens if (bytes_deleted < bytes) 1579fa9e4066Sahrens dprintf("only deleted %lld bytes from %p", 1580fa9e4066Sahrens (longlong_t)bytes_deleted, state); 1581fa9e4066Sahrens } 1582fa9e4066Sahrens 1583fa9e4066Sahrens static void 1584fa9e4066Sahrens arc_adjust(void) 1585fa9e4066Sahrens { 158644cb6abcSbmc int64_t top_sz, mru_over, arc_over, todelete; 1587fa9e4066Sahrens 158844cb6abcSbmc top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1589fa9e4066Sahrens 15900e8c6158Smaybee if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 15910e8c6158Smaybee int64_t toevict = 15920e8c6158Smaybee MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 15930e8c6158Smaybee (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_DATA); 15940e8c6158Smaybee top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 15950e8c6158Smaybee } 15960e8c6158Smaybee 15970e8c6158Smaybee if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 15980e8c6158Smaybee int64_t toevict = 15990e8c6158Smaybee MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 16000e8c6158Smaybee (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_METADATA); 160144cb6abcSbmc top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1602fa9e4066Sahrens } 1603fa9e4066Sahrens 160444cb6abcSbmc mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1605fa9e4066Sahrens 1606fa9e4066Sahrens if (mru_over > 0) { 16070e8c6158Smaybee if (arc_mru_ghost->arcs_size > 0) { 16080e8c6158Smaybee todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 160944cb6abcSbmc arc_evict_ghost(arc_mru_ghost, todelete); 1610fa9e4066Sahrens } 1611fa9e4066Sahrens } 1612fa9e4066Sahrens 161344cb6abcSbmc if ((arc_over = arc_size - arc_c) > 0) { 1614ea8dc4b6Seschrock int64_t tbl_over; 1615fa9e4066Sahrens 16160e8c6158Smaybee if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 16170e8c6158Smaybee int64_t toevict = 16180e8c6158Smaybee MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 161944cb6abcSbmc (void) arc_evict(arc_mfu, toevict, FALSE, 16200e8c6158Smaybee ARC_BUFC_DATA); 16210e8c6158Smaybee arc_over = arc_size - arc_c; 1622fa9e4066Sahrens } 1623fa9e4066Sahrens 16240e8c6158Smaybee if (arc_over > 0 && 16250e8c6158Smaybee arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 16260e8c6158Smaybee int64_t toevict = 16270e8c6158Smaybee MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 16280e8c6158Smaybee arc_over); 16290e8c6158Smaybee (void) arc_evict(arc_mfu, toevict, FALSE, 16300e8c6158Smaybee ARC_BUFC_METADATA); 16310e8c6158Smaybee } 1632fa9e4066Sahrens 16330e8c6158Smaybee tbl_over = arc_size + arc_mru_ghost->arcs_size + 16340e8c6158Smaybee arc_mfu_ghost->arcs_size - arc_c * 2; 16350e8c6158Smaybee 16360e8c6158Smaybee if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 16370e8c6158Smaybee todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 163844cb6abcSbmc arc_evict_ghost(arc_mfu_ghost, todelete); 1639fa9e4066Sahrens } 1640fa9e4066Sahrens } 1641fa9e4066Sahrens } 1642fa9e4066Sahrens 1643ea8dc4b6Seschrock static void 1644ea8dc4b6Seschrock arc_do_user_evicts(void) 1645ea8dc4b6Seschrock { 1646ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 1647ea8dc4b6Seschrock while (arc_eviction_list != NULL) { 1648ea8dc4b6Seschrock arc_buf_t *buf = arc_eviction_list; 1649ea8dc4b6Seschrock arc_eviction_list = buf->b_next; 1650ea8dc4b6Seschrock buf->b_hdr = NULL; 1651ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 1652ea8dc4b6Seschrock 1653dd6ef538Smaybee if (buf->b_efunc != NULL) 1654dd6ef538Smaybee VERIFY(buf->b_efunc(buf) == 0); 1655ea8dc4b6Seschrock 1656ea8dc4b6Seschrock buf->b_efunc = NULL; 1657ea8dc4b6Seschrock buf->b_private = NULL; 1658ea8dc4b6Seschrock kmem_cache_free(buf_cache, buf); 1659ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 1660ea8dc4b6Seschrock } 1661ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 1662ea8dc4b6Seschrock } 1663ea8dc4b6Seschrock 1664fa9e4066Sahrens /* 1665fa9e4066Sahrens * Flush all *evictable* data from the cache. 1666fa9e4066Sahrens * NOTE: this will not touch "active" (i.e. referenced) data. 1667fa9e4066Sahrens */ 1668fa9e4066Sahrens void 1669fa9e4066Sahrens arc_flush(void) 1670fa9e4066Sahrens { 16710e8c6158Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) 16720e8c6158Smaybee (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_DATA); 16730e8c6158Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) 16740e8c6158Smaybee (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_METADATA); 16750e8c6158Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) 16760e8c6158Smaybee (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_DATA); 16770e8c6158Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) 16780e8c6158Smaybee (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_METADATA); 1679fa9e4066Sahrens 168044cb6abcSbmc arc_evict_ghost(arc_mru_ghost, -1); 168144cb6abcSbmc arc_evict_ghost(arc_mfu_ghost, -1); 1682ea8dc4b6Seschrock 1683ea8dc4b6Seschrock mutex_enter(&arc_reclaim_thr_lock); 1684ea8dc4b6Seschrock arc_do_user_evicts(); 1685ea8dc4b6Seschrock mutex_exit(&arc_reclaim_thr_lock); 1686ea8dc4b6Seschrock ASSERT(arc_eviction_list == NULL); 1687fa9e4066Sahrens } 1688fa9e4066Sahrens 168949e3519aSmaybee int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 169013506d1eSmaybee 1691fa9e4066Sahrens void 169249e3519aSmaybee arc_shrink(void) 1693fa9e4066Sahrens { 169444cb6abcSbmc if (arc_c > arc_c_min) { 169549e3519aSmaybee uint64_t to_free; 1696fa9e4066Sahrens 16973cff2f43Sstans #ifdef _KERNEL 169844cb6abcSbmc to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 16993cff2f43Sstans #else 170044cb6abcSbmc to_free = arc_c >> arc_shrink_shift; 17013cff2f43Sstans #endif 170244cb6abcSbmc if (arc_c > arc_c_min + to_free) 170344cb6abcSbmc atomic_add_64(&arc_c, -to_free); 170449e3519aSmaybee else 170544cb6abcSbmc arc_c = arc_c_min; 170644cb6abcSbmc 170744cb6abcSbmc atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 170844cb6abcSbmc if (arc_c > arc_size) 170944cb6abcSbmc arc_c = MAX(arc_size, arc_c_min); 171044cb6abcSbmc if (arc_p > arc_c) 171144cb6abcSbmc arc_p = (arc_c >> 1); 171244cb6abcSbmc ASSERT(arc_c >= arc_c_min); 171344cb6abcSbmc ASSERT((int64_t)arc_p >= 0); 171449e3519aSmaybee } 1715fa9e4066Sahrens 171644cb6abcSbmc if (arc_size > arc_c) 171749e3519aSmaybee arc_adjust(); 1718fa9e4066Sahrens } 1719fa9e4066Sahrens 1720fa9e4066Sahrens static int 1721fa9e4066Sahrens arc_reclaim_needed(void) 1722fa9e4066Sahrens { 1723fa9e4066Sahrens uint64_t extra; 1724fa9e4066Sahrens 1725fa9e4066Sahrens #ifdef _KERNEL 17263cff2f43Sstans 17273cff2f43Sstans if (needfree) 17283cff2f43Sstans return (1); 17293cff2f43Sstans 1730fa9e4066Sahrens /* 1731fa9e4066Sahrens * take 'desfree' extra pages, so we reclaim sooner, rather than later 1732fa9e4066Sahrens */ 1733fa9e4066Sahrens extra = desfree; 1734fa9e4066Sahrens 1735fa9e4066Sahrens /* 1736fa9e4066Sahrens * check that we're out of range of the pageout scanner. It starts to 1737fa9e4066Sahrens * schedule paging if freemem is less than lotsfree and needfree. 1738fa9e4066Sahrens * lotsfree is the high-water mark for pageout, and needfree is the 1739fa9e4066Sahrens * number of needed free pages. We add extra pages here to make sure 1740fa9e4066Sahrens * the scanner doesn't start up while we're freeing memory. 1741fa9e4066Sahrens */ 1742fa9e4066Sahrens if (freemem < lotsfree + needfree + extra) 1743fa9e4066Sahrens return (1); 1744fa9e4066Sahrens 1745fa9e4066Sahrens /* 1746fa9e4066Sahrens * check to make sure that swapfs has enough space so that anon 1747*fa94a07fSbrendan * reservations can still succeed. anon_resvmem() checks that the 1748fa9e4066Sahrens * availrmem is greater than swapfs_minfree, and the number of reserved 1749fa9e4066Sahrens * swap pages. We also add a bit of extra here just to prevent 1750fa9e4066Sahrens * circumstances from getting really dire. 1751fa9e4066Sahrens */ 1752fa9e4066Sahrens if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1753fa9e4066Sahrens return (1); 1754fa9e4066Sahrens 17555dc8af33Smaybee #if defined(__i386) 1756fa9e4066Sahrens /* 1757fa9e4066Sahrens * If we're on an i386 platform, it's possible that we'll exhaust the 1758fa9e4066Sahrens * kernel heap space before we ever run out of available physical 1759fa9e4066Sahrens * memory. Most checks of the size of the heap_area compare against 1760fa9e4066Sahrens * tune.t_minarmem, which is the minimum available real memory that we 1761fa9e4066Sahrens * can have in the system. However, this is generally fixed at 25 pages 1762fa9e4066Sahrens * which is so low that it's useless. In this comparison, we seek to 1763fa9e4066Sahrens * calculate the total heap-size, and reclaim if more than 3/4ths of the 1764*fa94a07fSbrendan * heap is allocated. (Or, in the calculation, if less than 1/4th is 1765fa9e4066Sahrens * free) 1766fa9e4066Sahrens */ 1767fa9e4066Sahrens if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1768fa9e4066Sahrens (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1769fa9e4066Sahrens return (1); 1770fa9e4066Sahrens #endif 1771fa9e4066Sahrens 1772fa9e4066Sahrens #else 1773fa9e4066Sahrens if (spa_get_random(100) == 0) 1774fa9e4066Sahrens return (1); 1775fa9e4066Sahrens #endif 1776fa9e4066Sahrens return (0); 1777fa9e4066Sahrens } 1778fa9e4066Sahrens 1779fa9e4066Sahrens static void 1780fa9e4066Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1781fa9e4066Sahrens { 1782fa9e4066Sahrens size_t i; 1783fa9e4066Sahrens kmem_cache_t *prev_cache = NULL; 1784ad23a2dbSjohansen kmem_cache_t *prev_data_cache = NULL; 1785fa9e4066Sahrens extern kmem_cache_t *zio_buf_cache[]; 1786ad23a2dbSjohansen extern kmem_cache_t *zio_data_buf_cache[]; 1787fa9e4066Sahrens 1788033f9833Sek #ifdef _KERNEL 17890e8c6158Smaybee if (arc_meta_used >= arc_meta_limit) { 17900e8c6158Smaybee /* 17910e8c6158Smaybee * We are exceeding our meta-data cache limit. 17920e8c6158Smaybee * Purge some DNLC entries to release holds on meta-data. 17930e8c6158Smaybee */ 17940e8c6158Smaybee dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 17950e8c6158Smaybee } 17965dc8af33Smaybee #if defined(__i386) 17975dc8af33Smaybee /* 17985dc8af33Smaybee * Reclaim unused memory from all kmem caches. 17995dc8af33Smaybee */ 18005dc8af33Smaybee kmem_reap(); 18015dc8af33Smaybee #endif 1802033f9833Sek #endif 1803033f9833Sek 1804fa9e4066Sahrens /* 1805*fa94a07fSbrendan * An aggressive reclamation will shrink the cache size as well as 1806ea8dc4b6Seschrock * reap free buffers from the arc kmem caches. 1807fa9e4066Sahrens */ 1808fa9e4066Sahrens if (strat == ARC_RECLAIM_AGGR) 180949e3519aSmaybee arc_shrink(); 1810fa9e4066Sahrens 1811fa9e4066Sahrens for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1812fa9e4066Sahrens if (zio_buf_cache[i] != prev_cache) { 1813fa9e4066Sahrens prev_cache = zio_buf_cache[i]; 1814fa9e4066Sahrens kmem_cache_reap_now(zio_buf_cache[i]); 1815fa9e4066Sahrens } 1816ad23a2dbSjohansen if (zio_data_buf_cache[i] != prev_data_cache) { 1817ad23a2dbSjohansen prev_data_cache = zio_data_buf_cache[i]; 1818ad23a2dbSjohansen kmem_cache_reap_now(zio_data_buf_cache[i]); 1819ad23a2dbSjohansen } 1820fa9e4066Sahrens } 1821ea8dc4b6Seschrock kmem_cache_reap_now(buf_cache); 1822ea8dc4b6Seschrock kmem_cache_reap_now(hdr_cache); 1823fa9e4066Sahrens } 1824fa9e4066Sahrens 1825fa9e4066Sahrens static void 1826fa9e4066Sahrens arc_reclaim_thread(void) 1827fa9e4066Sahrens { 1828fa9e4066Sahrens clock_t growtime = 0; 1829fa9e4066Sahrens arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1830fa9e4066Sahrens callb_cpr_t cpr; 1831fa9e4066Sahrens 1832fa9e4066Sahrens CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1833fa9e4066Sahrens 1834fa9e4066Sahrens mutex_enter(&arc_reclaim_thr_lock); 1835fa9e4066Sahrens while (arc_thread_exit == 0) { 1836fa9e4066Sahrens if (arc_reclaim_needed()) { 1837fa9e4066Sahrens 183844cb6abcSbmc if (arc_no_grow) { 1839fa9e4066Sahrens if (last_reclaim == ARC_RECLAIM_CONS) { 1840fa9e4066Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1841fa9e4066Sahrens } else { 1842fa9e4066Sahrens last_reclaim = ARC_RECLAIM_CONS; 1843fa9e4066Sahrens } 1844fa9e4066Sahrens } else { 184544cb6abcSbmc arc_no_grow = TRUE; 1846fa9e4066Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1847fa9e4066Sahrens membar_producer(); 1848fa9e4066Sahrens } 1849fa9e4066Sahrens 1850fa9e4066Sahrens /* reset the growth delay for every reclaim */ 1851fa9e4066Sahrens growtime = lbolt + (arc_grow_retry * hz); 1852fa9e4066Sahrens 1853fa9e4066Sahrens arc_kmem_reap_now(last_reclaim); 1854fa9e4066Sahrens 18550e8c6158Smaybee } else if (arc_no_grow && lbolt >= growtime) { 185644cb6abcSbmc arc_no_grow = FALSE; 1857fa9e4066Sahrens } 1858fa9e4066Sahrens 185944cb6abcSbmc if (2 * arc_c < arc_size + 186044cb6abcSbmc arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 1861641fbdaeSmaybee arc_adjust(); 1862641fbdaeSmaybee 1863ea8dc4b6Seschrock if (arc_eviction_list != NULL) 1864ea8dc4b6Seschrock arc_do_user_evicts(); 1865ea8dc4b6Seschrock 1866fa9e4066Sahrens /* block until needed, or one second, whichever is shorter */ 1867fa9e4066Sahrens CALLB_CPR_SAFE_BEGIN(&cpr); 1868fa9e4066Sahrens (void) cv_timedwait(&arc_reclaim_thr_cv, 1869fa9e4066Sahrens &arc_reclaim_thr_lock, (lbolt + hz)); 1870fa9e4066Sahrens CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1871fa9e4066Sahrens } 1872fa9e4066Sahrens 1873fa9e4066Sahrens arc_thread_exit = 0; 1874fa9e4066Sahrens cv_broadcast(&arc_reclaim_thr_cv); 1875fa9e4066Sahrens CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1876fa9e4066Sahrens thread_exit(); 1877fa9e4066Sahrens } 1878fa9e4066Sahrens 1879ea8dc4b6Seschrock /* 1880ea8dc4b6Seschrock * Adapt arc info given the number of bytes we are trying to add and 1881ea8dc4b6Seschrock * the state that we are comming from. This function is only called 1882ea8dc4b6Seschrock * when we are adding new content to the cache. 1883ea8dc4b6Seschrock */ 1884fa9e4066Sahrens static void 1885ea8dc4b6Seschrock arc_adapt(int bytes, arc_state_t *state) 1886fa9e4066Sahrens { 1887ea8dc4b6Seschrock int mult; 1888ea8dc4b6Seschrock 1889*fa94a07fSbrendan if (state == arc_l2c_only) 1890*fa94a07fSbrendan return; 1891*fa94a07fSbrendan 1892ea8dc4b6Seschrock ASSERT(bytes > 0); 1893fa9e4066Sahrens /* 1894ea8dc4b6Seschrock * Adapt the target size of the MRU list: 1895ea8dc4b6Seschrock * - if we just hit in the MRU ghost list, then increase 1896ea8dc4b6Seschrock * the target size of the MRU list. 1897ea8dc4b6Seschrock * - if we just hit in the MFU ghost list, then increase 1898ea8dc4b6Seschrock * the target size of the MFU list by decreasing the 1899ea8dc4b6Seschrock * target size of the MRU list. 1900fa9e4066Sahrens */ 190144cb6abcSbmc if (state == arc_mru_ghost) { 190244cb6abcSbmc mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 190344cb6abcSbmc 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 1904ea8dc4b6Seschrock 190544cb6abcSbmc arc_p = MIN(arc_c, arc_p + bytes * mult); 190644cb6abcSbmc } else if (state == arc_mfu_ghost) { 190744cb6abcSbmc mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 190844cb6abcSbmc 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 1909ea8dc4b6Seschrock 191044cb6abcSbmc arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 1911ea8dc4b6Seschrock } 191244cb6abcSbmc ASSERT((int64_t)arc_p >= 0); 1913fa9e4066Sahrens 1914fa9e4066Sahrens if (arc_reclaim_needed()) { 1915fa9e4066Sahrens cv_signal(&arc_reclaim_thr_cv); 1916fa9e4066Sahrens return; 1917fa9e4066Sahrens } 1918fa9e4066Sahrens 191944cb6abcSbmc if (arc_no_grow) 1920fa9e4066Sahrens return; 1921fa9e4066Sahrens 192244cb6abcSbmc if (arc_c >= arc_c_max) 1923ea8dc4b6Seschrock return; 1924ea8dc4b6Seschrock 1925fa9e4066Sahrens /* 1926ea8dc4b6Seschrock * If we're within (2 * maxblocksize) bytes of the target 1927ea8dc4b6Seschrock * cache size, increment the target cache size 1928fa9e4066Sahrens */ 192944cb6abcSbmc if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 193044cb6abcSbmc atomic_add_64(&arc_c, (int64_t)bytes); 193144cb6abcSbmc if (arc_c > arc_c_max) 193244cb6abcSbmc arc_c = arc_c_max; 193344cb6abcSbmc else if (state == arc_anon) 193444cb6abcSbmc atomic_add_64(&arc_p, (int64_t)bytes); 193544cb6abcSbmc if (arc_p > arc_c) 193644cb6abcSbmc arc_p = arc_c; 1937fa9e4066Sahrens } 193844cb6abcSbmc ASSERT((int64_t)arc_p >= 0); 1939fa9e4066Sahrens } 1940fa9e4066Sahrens 1941fa9e4066Sahrens /* 1942ea8dc4b6Seschrock * Check if the cache has reached its limits and eviction is required 1943ea8dc4b6Seschrock * prior to insert. 1944fa9e4066Sahrens */ 1945fa9e4066Sahrens static int 19460e8c6158Smaybee arc_evict_needed(arc_buf_contents_t type) 1947fa9e4066Sahrens { 19480e8c6158Smaybee if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 19490e8c6158Smaybee return (1); 19500e8c6158Smaybee 19510e8c6158Smaybee #ifdef _KERNEL 19520e8c6158Smaybee /* 19530e8c6158Smaybee * If zio data pages are being allocated out of a separate heap segment, 19540e8c6158Smaybee * then enforce that the size of available vmem for this area remains 19550e8c6158Smaybee * above about 1/32nd free. 19560e8c6158Smaybee */ 19570e8c6158Smaybee if (type == ARC_BUFC_DATA && zio_arena != NULL && 19580e8c6158Smaybee vmem_size(zio_arena, VMEM_FREE) < 19590e8c6158Smaybee (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 19600e8c6158Smaybee return (1); 19610e8c6158Smaybee #endif 19620e8c6158Smaybee 1963fa9e4066Sahrens if (arc_reclaim_needed()) 1964fa9e4066Sahrens return (1); 1965fa9e4066Sahrens 196644cb6abcSbmc return (arc_size > arc_c); 1967fa9e4066Sahrens } 1968fa9e4066Sahrens 1969fa9e4066Sahrens /* 197044eda4d7Smaybee * The buffer, supplied as the first argument, needs a data block. 197144eda4d7Smaybee * So, if we are at cache max, determine which cache should be victimized. 197244eda4d7Smaybee * We have the following cases: 1973fa9e4066Sahrens * 197444cb6abcSbmc * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 1975fa9e4066Sahrens * In this situation if we're out of space, but the resident size of the MFU is 1976fa9e4066Sahrens * under the limit, victimize the MFU cache to satisfy this insertion request. 1977fa9e4066Sahrens * 197844cb6abcSbmc * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 1979fa9e4066Sahrens * Here, we've used up all of the available space for the MRU, so we need to 1980fa9e4066Sahrens * evict from our own cache instead. Evict from the set of resident MRU 1981fa9e4066Sahrens * entries. 1982fa9e4066Sahrens * 198344cb6abcSbmc * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 1984fa9e4066Sahrens * c minus p represents the MFU space in the cache, since p is the size of the 1985fa9e4066Sahrens * cache that is dedicated to the MRU. In this situation there's still space on 1986fa9e4066Sahrens * the MFU side, so the MRU side needs to be victimized. 1987fa9e4066Sahrens * 198844cb6abcSbmc * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 1989fa9e4066Sahrens * MFU's resident set is consuming more space than it has been allotted. In 1990fa9e4066Sahrens * this situation, we must victimize our own cache, the MFU, for this insertion. 1991fa9e4066Sahrens */ 1992fa9e4066Sahrens static void 199344eda4d7Smaybee arc_get_data_buf(arc_buf_t *buf) 1994fa9e4066Sahrens { 1995ad23a2dbSjohansen arc_state_t *state = buf->b_hdr->b_state; 1996ad23a2dbSjohansen uint64_t size = buf->b_hdr->b_size; 1997ad23a2dbSjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 1998fa9e4066Sahrens 199944eda4d7Smaybee arc_adapt(size, state); 2000fa9e4066Sahrens 200144eda4d7Smaybee /* 200244eda4d7Smaybee * We have not yet reached cache maximum size, 200344eda4d7Smaybee * just allocate a new buffer. 200444eda4d7Smaybee */ 20050e8c6158Smaybee if (!arc_evict_needed(type)) { 2006ad23a2dbSjohansen if (type == ARC_BUFC_METADATA) { 2007ad23a2dbSjohansen buf->b_data = zio_buf_alloc(size); 20080e8c6158Smaybee arc_space_consume(size); 2009ad23a2dbSjohansen } else { 2010ad23a2dbSjohansen ASSERT(type == ARC_BUFC_DATA); 2011ad23a2dbSjohansen buf->b_data = zio_data_buf_alloc(size); 20120e8c6158Smaybee atomic_add_64(&arc_size, size); 2013ad23a2dbSjohansen } 201444eda4d7Smaybee goto out; 201544eda4d7Smaybee } 201644eda4d7Smaybee 201744eda4d7Smaybee /* 201844eda4d7Smaybee * If we are prefetching from the mfu ghost list, this buffer 201944eda4d7Smaybee * will end up on the mru list; so steal space from there. 202044eda4d7Smaybee */ 202144cb6abcSbmc if (state == arc_mfu_ghost) 202244cb6abcSbmc state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 202344cb6abcSbmc else if (state == arc_mru_ghost) 202444cb6abcSbmc state = arc_mru; 202544cb6abcSbmc 202644cb6abcSbmc if (state == arc_mru || state == arc_anon) { 202744cb6abcSbmc uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 20280e8c6158Smaybee state = (arc_mfu->arcs_lsize[type] > 0 && 20290e8c6158Smaybee arc_p > mru_used) ? arc_mfu : arc_mru; 2030fa9e4066Sahrens } else { 203144eda4d7Smaybee /* MFU cases */ 203244cb6abcSbmc uint64_t mfu_space = arc_c - arc_p; 20330e8c6158Smaybee state = (arc_mru->arcs_lsize[type] > 0 && 20340e8c6158Smaybee mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 203544eda4d7Smaybee } 2036ad23a2dbSjohansen if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) { 2037ad23a2dbSjohansen if (type == ARC_BUFC_METADATA) { 2038ad23a2dbSjohansen buf->b_data = zio_buf_alloc(size); 20390e8c6158Smaybee arc_space_consume(size); 2040ad23a2dbSjohansen } else { 2041ad23a2dbSjohansen ASSERT(type == ARC_BUFC_DATA); 2042ad23a2dbSjohansen buf->b_data = zio_data_buf_alloc(size); 20430e8c6158Smaybee atomic_add_64(&arc_size, size); 2044ad23a2dbSjohansen } 204544cb6abcSbmc ARCSTAT_BUMP(arcstat_recycle_miss); 204644eda4d7Smaybee } 204744eda4d7Smaybee ASSERT(buf->b_data != NULL); 204844eda4d7Smaybee out: 204944eda4d7Smaybee /* 205044eda4d7Smaybee * Update the state size. Note that ghost states have a 205144eda4d7Smaybee * "ghost size" and so don't need to be updated. 205244eda4d7Smaybee */ 205344eda4d7Smaybee if (!GHOST_STATE(buf->b_hdr->b_state)) { 205444eda4d7Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 205544eda4d7Smaybee 205644cb6abcSbmc atomic_add_64(&hdr->b_state->arcs_size, size); 205744eda4d7Smaybee if (list_link_active(&hdr->b_arc_node)) { 205844eda4d7Smaybee ASSERT(refcount_is_zero(&hdr->b_refcnt)); 20590e8c6158Smaybee atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2060fa9e4066Sahrens } 2061641fbdaeSmaybee /* 2062641fbdaeSmaybee * If we are growing the cache, and we are adding anonymous 206344cb6abcSbmc * data, and we have outgrown arc_p, update arc_p 2064641fbdaeSmaybee */ 206544cb6abcSbmc if (arc_size < arc_c && hdr->b_state == arc_anon && 206644cb6abcSbmc arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 206744cb6abcSbmc arc_p = MIN(arc_c, arc_p + size); 2068fa9e4066Sahrens } 2069fa9e4066Sahrens } 2070fa9e4066Sahrens 2071fa9e4066Sahrens /* 2072fa9e4066Sahrens * This routine is called whenever a buffer is accessed. 2073ea8dc4b6Seschrock * NOTE: the hash lock is dropped in this function. 2074fa9e4066Sahrens */ 2075fa9e4066Sahrens static void 207644eda4d7Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2077fa9e4066Sahrens { 2078fa9e4066Sahrens ASSERT(MUTEX_HELD(hash_lock)); 2079fa9e4066Sahrens 208044cb6abcSbmc if (buf->b_state == arc_anon) { 2081fa9e4066Sahrens /* 2082fa9e4066Sahrens * This buffer is not in the cache, and does not 2083fa9e4066Sahrens * appear in our "ghost" list. Add the new buffer 2084fa9e4066Sahrens * to the MRU state. 2085fa9e4066Sahrens */ 2086fa9e4066Sahrens 2087fa9e4066Sahrens ASSERT(buf->b_arc_access == 0); 2088fa9e4066Sahrens buf->b_arc_access = lbolt; 2089ea8dc4b6Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 209044cb6abcSbmc arc_change_state(arc_mru, buf, hash_lock); 2091fa9e4066Sahrens 209244cb6abcSbmc } else if (buf->b_state == arc_mru) { 2093fa9e4066Sahrens /* 209413506d1eSmaybee * If this buffer is here because of a prefetch, then either: 209513506d1eSmaybee * - clear the flag if this is a "referencing" read 209613506d1eSmaybee * (any subsequent access will bump this into the MFU state). 209713506d1eSmaybee * or 209813506d1eSmaybee * - move the buffer to the head of the list if this is 209913506d1eSmaybee * another prefetch (to make it less likely to be evicted). 2100fa9e4066Sahrens */ 2101fa9e4066Sahrens if ((buf->b_flags & ARC_PREFETCH) != 0) { 210213506d1eSmaybee if (refcount_count(&buf->b_refcnt) == 0) { 210313506d1eSmaybee ASSERT(list_link_active(&buf->b_arc_node)); 210413506d1eSmaybee } else { 210513506d1eSmaybee buf->b_flags &= ~ARC_PREFETCH; 210644cb6abcSbmc ARCSTAT_BUMP(arcstat_mru_hits); 210713506d1eSmaybee } 210813506d1eSmaybee buf->b_arc_access = lbolt; 2109fa9e4066Sahrens return; 2110fa9e4066Sahrens } 2111fa9e4066Sahrens 2112fa9e4066Sahrens /* 2113fa9e4066Sahrens * This buffer has been "accessed" only once so far, 2114fa9e4066Sahrens * but it is still in the cache. Move it to the MFU 2115fa9e4066Sahrens * state. 2116fa9e4066Sahrens */ 2117fa9e4066Sahrens if (lbolt > buf->b_arc_access + ARC_MINTIME) { 2118fa9e4066Sahrens /* 2119fa9e4066Sahrens * More than 125ms have passed since we 2120fa9e4066Sahrens * instantiated this buffer. Move it to the 2121fa9e4066Sahrens * most frequently used state. 2122fa9e4066Sahrens */ 2123fa9e4066Sahrens buf->b_arc_access = lbolt; 2124ea8dc4b6Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 212544cb6abcSbmc arc_change_state(arc_mfu, buf, hash_lock); 2126fa9e4066Sahrens } 212744cb6abcSbmc ARCSTAT_BUMP(arcstat_mru_hits); 212844cb6abcSbmc } else if (buf->b_state == arc_mru_ghost) { 2129fa9e4066Sahrens arc_state_t *new_state; 2130fa9e4066Sahrens /* 2131fa9e4066Sahrens * This buffer has been "accessed" recently, but 2132fa9e4066Sahrens * was evicted from the cache. Move it to the 2133fa9e4066Sahrens * MFU state. 2134fa9e4066Sahrens */ 2135fa9e4066Sahrens 2136fa9e4066Sahrens if (buf->b_flags & ARC_PREFETCH) { 213744cb6abcSbmc new_state = arc_mru; 213813506d1eSmaybee if (refcount_count(&buf->b_refcnt) > 0) 213913506d1eSmaybee buf->b_flags &= ~ARC_PREFETCH; 2140ea8dc4b6Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2141fa9e4066Sahrens } else { 214244cb6abcSbmc new_state = arc_mfu; 2143ea8dc4b6Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2144fa9e4066Sahrens } 2145fa9e4066Sahrens 2146fa9e4066Sahrens buf->b_arc_access = lbolt; 2147fa9e4066Sahrens arc_change_state(new_state, buf, hash_lock); 2148fa9e4066Sahrens 214944cb6abcSbmc ARCSTAT_BUMP(arcstat_mru_ghost_hits); 215044cb6abcSbmc } else if (buf->b_state == arc_mfu) { 2151fa9e4066Sahrens /* 2152fa9e4066Sahrens * This buffer has been accessed more than once and is 2153fa9e4066Sahrens * still in the cache. Keep it in the MFU state. 2154fa9e4066Sahrens * 215513506d1eSmaybee * NOTE: an add_reference() that occurred when we did 215613506d1eSmaybee * the arc_read() will have kicked this off the list. 215713506d1eSmaybee * If it was a prefetch, we will explicitly move it to 215813506d1eSmaybee * the head of the list now. 2159fa9e4066Sahrens */ 216013506d1eSmaybee if ((buf->b_flags & ARC_PREFETCH) != 0) { 216113506d1eSmaybee ASSERT(refcount_count(&buf->b_refcnt) == 0); 216213506d1eSmaybee ASSERT(list_link_active(&buf->b_arc_node)); 216313506d1eSmaybee } 216444cb6abcSbmc ARCSTAT_BUMP(arcstat_mfu_hits); 216513506d1eSmaybee buf->b_arc_access = lbolt; 216644cb6abcSbmc } else if (buf->b_state == arc_mfu_ghost) { 216744cb6abcSbmc arc_state_t *new_state = arc_mfu; 2168fa9e4066Sahrens /* 2169fa9e4066Sahrens * This buffer has been accessed more than once but has 2170fa9e4066Sahrens * been evicted from the cache. Move it back to the 2171fa9e4066Sahrens * MFU state. 2172fa9e4066Sahrens */ 2173fa9e4066Sahrens 217413506d1eSmaybee if (buf->b_flags & ARC_PREFETCH) { 217513506d1eSmaybee /* 217613506d1eSmaybee * This is a prefetch access... 217713506d1eSmaybee * move this block back to the MRU state. 217813506d1eSmaybee */ 217913506d1eSmaybee ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 218044cb6abcSbmc new_state = arc_mru; 218113506d1eSmaybee } 218213506d1eSmaybee 2183fa9e4066Sahrens buf->b_arc_access = lbolt; 2184ea8dc4b6Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 218513506d1eSmaybee arc_change_state(new_state, buf, hash_lock); 2186fa9e4066Sahrens 218744cb6abcSbmc ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2188*fa94a07fSbrendan } else if (buf->b_state == arc_l2c_only) { 2189*fa94a07fSbrendan /* 2190*fa94a07fSbrendan * This buffer is on the 2nd Level ARC. 2191*fa94a07fSbrendan */ 2192*fa94a07fSbrendan 2193*fa94a07fSbrendan buf->b_arc_access = lbolt; 2194*fa94a07fSbrendan DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2195*fa94a07fSbrendan arc_change_state(arc_mfu, buf, hash_lock); 2196fa9e4066Sahrens } else { 2197fa9e4066Sahrens ASSERT(!"invalid arc state"); 2198fa9e4066Sahrens } 2199fa9e4066Sahrens } 2200fa9e4066Sahrens 2201fa9e4066Sahrens /* a generic arc_done_func_t which you can use */ 2202fa9e4066Sahrens /* ARGSUSED */ 2203fa9e4066Sahrens void 2204fa9e4066Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2205fa9e4066Sahrens { 2206fa9e4066Sahrens bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2207ea8dc4b6Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2208fa9e4066Sahrens } 2209fa9e4066Sahrens 22100e8c6158Smaybee /* a generic arc_done_func_t */ 2211fa9e4066Sahrens void 2212fa9e4066Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2213fa9e4066Sahrens { 2214fa9e4066Sahrens arc_buf_t **bufp = arg; 2215fa9e4066Sahrens if (zio && zio->io_error) { 2216ea8dc4b6Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2217fa9e4066Sahrens *bufp = NULL; 2218fa9e4066Sahrens } else { 2219fa9e4066Sahrens *bufp = buf; 2220fa9e4066Sahrens } 2221fa9e4066Sahrens } 2222fa9e4066Sahrens 2223fa9e4066Sahrens static void 2224fa9e4066Sahrens arc_read_done(zio_t *zio) 2225fa9e4066Sahrens { 2226bbf4a8dfSmaybee arc_buf_hdr_t *hdr, *found; 2227fa9e4066Sahrens arc_buf_t *buf; 2228fa9e4066Sahrens arc_buf_t *abuf; /* buffer we're assigning to callback */ 2229fa9e4066Sahrens kmutex_t *hash_lock; 2230fa9e4066Sahrens arc_callback_t *callback_list, *acb; 2231fa9e4066Sahrens int freeable = FALSE; 2232fa9e4066Sahrens 2233fa9e4066Sahrens buf = zio->io_private; 2234fa9e4066Sahrens hdr = buf->b_hdr; 2235fa9e4066Sahrens 2236bbf4a8dfSmaybee /* 2237bbf4a8dfSmaybee * The hdr was inserted into hash-table and removed from lists 2238bbf4a8dfSmaybee * prior to starting I/O. We should find this header, since 2239bbf4a8dfSmaybee * it's in the hash table, and it should be legit since it's 2240bbf4a8dfSmaybee * not possible to evict it during the I/O. The only possible 2241bbf4a8dfSmaybee * reason for it not to be found is if we were freed during the 2242bbf4a8dfSmaybee * read. 2243bbf4a8dfSmaybee */ 2244bbf4a8dfSmaybee found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 22456b4acc8bSahrens &hash_lock); 2246fa9e4066Sahrens 2247bbf4a8dfSmaybee ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2248*fa94a07fSbrendan (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2249*fa94a07fSbrendan (found == hdr && HDR_L2_READING(hdr))); 2250*fa94a07fSbrendan 2251*fa94a07fSbrendan hdr->b_flags &= ~(ARC_L2_READING|ARC_L2_EVICTED); 2252*fa94a07fSbrendan if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2253*fa94a07fSbrendan hdr->b_flags |= ARC_DONT_L2CACHE; 2254fa9e4066Sahrens 2255fa9e4066Sahrens /* byteswap if necessary */ 2256fa9e4066Sahrens callback_list = hdr->b_acb; 2257fa9e4066Sahrens ASSERT(callback_list != NULL); 2258fa9e4066Sahrens if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 2259fa9e4066Sahrens callback_list->acb_byteswap(buf->b_data, hdr->b_size); 2260fa9e4066Sahrens 2261*fa94a07fSbrendan arc_cksum_compute(buf, B_FALSE); 22626b4acc8bSahrens 2263fa9e4066Sahrens /* create copies of the data buffer for the callers */ 2264fa9e4066Sahrens abuf = buf; 2265fa9e4066Sahrens for (acb = callback_list; acb; acb = acb->acb_next) { 2266fa9e4066Sahrens if (acb->acb_done) { 226744eda4d7Smaybee if (abuf == NULL) 226844eda4d7Smaybee abuf = arc_buf_clone(buf); 2269fa9e4066Sahrens acb->acb_buf = abuf; 2270fa9e4066Sahrens abuf = NULL; 2271fa9e4066Sahrens } 2272fa9e4066Sahrens } 2273fa9e4066Sahrens hdr->b_acb = NULL; 2274fa9e4066Sahrens hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2275ea8dc4b6Seschrock ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2276ea8dc4b6Seschrock if (abuf == buf) 2277ea8dc4b6Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 2278fa9e4066Sahrens 2279fa9e4066Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2280fa9e4066Sahrens 2281fa9e4066Sahrens if (zio->io_error != 0) { 2282fa9e4066Sahrens hdr->b_flags |= ARC_IO_ERROR; 228344cb6abcSbmc if (hdr->b_state != arc_anon) 228444cb6abcSbmc arc_change_state(arc_anon, hdr, hash_lock); 2285ea8dc4b6Seschrock if (HDR_IN_HASH_TABLE(hdr)) 2286ea8dc4b6Seschrock buf_hash_remove(hdr); 2287fa9e4066Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 228813506d1eSmaybee /* convert checksum errors into IO errors */ 2289ea8dc4b6Seschrock if (zio->io_error == ECKSUM) 2290ea8dc4b6Seschrock zio->io_error = EIO; 2291fa9e4066Sahrens } 2292fa9e4066Sahrens 2293ea8dc4b6Seschrock /* 229413506d1eSmaybee * Broadcast before we drop the hash_lock to avoid the possibility 229513506d1eSmaybee * that the hdr (and hence the cv) might be freed before we get to 229613506d1eSmaybee * the cv_broadcast(). 2297ea8dc4b6Seschrock */ 2298ea8dc4b6Seschrock cv_broadcast(&hdr->b_cv); 2299ea8dc4b6Seschrock 2300bbf4a8dfSmaybee if (hash_lock) { 2301fa9e4066Sahrens /* 2302fa9e4066Sahrens * Only call arc_access on anonymous buffers. This is because 2303fa9e4066Sahrens * if we've issued an I/O for an evicted buffer, we've already 2304fa9e4066Sahrens * called arc_access (to prevent any simultaneous readers from 2305fa9e4066Sahrens * getting confused). 2306fa9e4066Sahrens */ 230744cb6abcSbmc if (zio->io_error == 0 && hdr->b_state == arc_anon) 230844eda4d7Smaybee arc_access(hdr, hash_lock); 230944eda4d7Smaybee mutex_exit(hash_lock); 2310fa9e4066Sahrens } else { 2311fa9e4066Sahrens /* 2312fa9e4066Sahrens * This block was freed while we waited for the read to 2313fa9e4066Sahrens * complete. It has been removed from the hash table and 2314fa9e4066Sahrens * moved to the anonymous state (so that it won't show up 2315fa9e4066Sahrens * in the cache). 2316fa9e4066Sahrens */ 231744cb6abcSbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2318fa9e4066Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 2319fa9e4066Sahrens } 2320fa9e4066Sahrens 2321fa9e4066Sahrens /* execute each callback and free its structure */ 2322fa9e4066Sahrens while ((acb = callback_list) != NULL) { 2323fa9e4066Sahrens if (acb->acb_done) 2324fa9e4066Sahrens acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2325fa9e4066Sahrens 2326fa9e4066Sahrens if (acb->acb_zio_dummy != NULL) { 2327fa9e4066Sahrens acb->acb_zio_dummy->io_error = zio->io_error; 2328fa9e4066Sahrens zio_nowait(acb->acb_zio_dummy); 2329fa9e4066Sahrens } 2330fa9e4066Sahrens 2331fa9e4066Sahrens callback_list = acb->acb_next; 2332fa9e4066Sahrens kmem_free(acb, sizeof (arc_callback_t)); 2333fa9e4066Sahrens } 2334fa9e4066Sahrens 2335fa9e4066Sahrens if (freeable) 2336ea8dc4b6Seschrock arc_hdr_destroy(hdr); 2337fa9e4066Sahrens } 2338fa9e4066Sahrens 2339fa9e4066Sahrens /* 2340fa9e4066Sahrens * "Read" the block block at the specified DVA (in bp) via the 2341fa9e4066Sahrens * cache. If the block is found in the cache, invoke the provided 2342fa9e4066Sahrens * callback immediately and return. Note that the `zio' parameter 2343fa9e4066Sahrens * in the callback will be NULL in this case, since no IO was 2344fa9e4066Sahrens * required. If the block is not in the cache pass the read request 2345fa9e4066Sahrens * on to the spa with a substitute callback function, so that the 2346fa9e4066Sahrens * requested block will be added to the cache. 2347fa9e4066Sahrens * 2348fa9e4066Sahrens * If a read request arrives for a block that has a read in-progress, 2349fa9e4066Sahrens * either wait for the in-progress read to complete (and return the 2350fa9e4066Sahrens * results); or, if this is a read with a "done" func, add a record 2351fa9e4066Sahrens * to the read to invoke the "done" func when the read completes, 2352fa9e4066Sahrens * and return; or just return. 2353fa9e4066Sahrens * 2354fa9e4066Sahrens * arc_read_done() will invoke all the requested "done" functions 2355fa9e4066Sahrens * for readers of this block. 2356fa9e4066Sahrens */ 2357fa9e4066Sahrens int 2358fa9e4066Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2359fa9e4066Sahrens arc_done_func_t *done, void *private, int priority, int flags, 236013506d1eSmaybee uint32_t *arc_flags, zbookmark_t *zb) 2361fa9e4066Sahrens { 2362fa9e4066Sahrens arc_buf_hdr_t *hdr; 2363fa9e4066Sahrens arc_buf_t *buf; 2364fa9e4066Sahrens kmutex_t *hash_lock; 2365*fa94a07fSbrendan zio_t *rzio; 2366fa9e4066Sahrens 2367fa9e4066Sahrens top: 2368fa9e4066Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2369ea8dc4b6Seschrock if (hdr && hdr->b_datacnt > 0) { 2370fa9e4066Sahrens 237113506d1eSmaybee *arc_flags |= ARC_CACHED; 237213506d1eSmaybee 2373fa9e4066Sahrens if (HDR_IO_IN_PROGRESS(hdr)) { 237413506d1eSmaybee 237513506d1eSmaybee if (*arc_flags & ARC_WAIT) { 237613506d1eSmaybee cv_wait(&hdr->b_cv, hash_lock); 237713506d1eSmaybee mutex_exit(hash_lock); 237813506d1eSmaybee goto top; 237913506d1eSmaybee } 238013506d1eSmaybee ASSERT(*arc_flags & ARC_NOWAIT); 238113506d1eSmaybee 238213506d1eSmaybee if (done) { 2383fa9e4066Sahrens arc_callback_t *acb = NULL; 2384fa9e4066Sahrens 2385fa9e4066Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), 2386fa9e4066Sahrens KM_SLEEP); 2387fa9e4066Sahrens acb->acb_done = done; 2388fa9e4066Sahrens acb->acb_private = private; 2389fa9e4066Sahrens acb->acb_byteswap = swap; 2390fa9e4066Sahrens if (pio != NULL) 2391fa9e4066Sahrens acb->acb_zio_dummy = zio_null(pio, 2392fa9e4066Sahrens spa, NULL, NULL, flags); 2393fa9e4066Sahrens 2394fa9e4066Sahrens ASSERT(acb->acb_done != NULL); 2395fa9e4066Sahrens acb->acb_next = hdr->b_acb; 2396fa9e4066Sahrens hdr->b_acb = acb; 2397fa9e4066Sahrens add_reference(hdr, hash_lock, private); 2398fa9e4066Sahrens mutex_exit(hash_lock); 2399fa9e4066Sahrens return (0); 2400fa9e4066Sahrens } 2401fa9e4066Sahrens mutex_exit(hash_lock); 2402fa9e4066Sahrens return (0); 2403fa9e4066Sahrens } 2404fa9e4066Sahrens 240544cb6abcSbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2406fa9e4066Sahrens 2407ea8dc4b6Seschrock if (done) { 240844eda4d7Smaybee add_reference(hdr, hash_lock, private); 2409ea8dc4b6Seschrock /* 2410ea8dc4b6Seschrock * If this block is already in use, create a new 2411ea8dc4b6Seschrock * copy of the data so that we will be guaranteed 2412ea8dc4b6Seschrock * that arc_release() will always succeed. 2413ea8dc4b6Seschrock */ 2414fa9e4066Sahrens buf = hdr->b_buf; 2415ea8dc4b6Seschrock ASSERT(buf); 2416ea8dc4b6Seschrock ASSERT(buf->b_data); 241744eda4d7Smaybee if (HDR_BUF_AVAILABLE(hdr)) { 2418ea8dc4b6Seschrock ASSERT(buf->b_efunc == NULL); 2419ea8dc4b6Seschrock hdr->b_flags &= ~ARC_BUF_AVAILABLE; 242044eda4d7Smaybee } else { 242144eda4d7Smaybee buf = arc_buf_clone(buf); 2422ea8dc4b6Seschrock } 242313506d1eSmaybee } else if (*arc_flags & ARC_PREFETCH && 242413506d1eSmaybee refcount_count(&hdr->b_refcnt) == 0) { 242513506d1eSmaybee hdr->b_flags |= ARC_PREFETCH; 2426fa9e4066Sahrens } 2427fa9e4066Sahrens DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 242844eda4d7Smaybee arc_access(hdr, hash_lock); 242944eda4d7Smaybee mutex_exit(hash_lock); 243044cb6abcSbmc ARCSTAT_BUMP(arcstat_hits); 243144cb6abcSbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 243244cb6abcSbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 243344cb6abcSbmc data, metadata, hits); 243444cb6abcSbmc 2435fa9e4066Sahrens if (done) 2436fa9e4066Sahrens done(NULL, buf, private); 2437fa9e4066Sahrens } else { 2438fa9e4066Sahrens uint64_t size = BP_GET_LSIZE(bp); 2439fa9e4066Sahrens arc_callback_t *acb; 2440fa9e4066Sahrens 2441fa9e4066Sahrens if (hdr == NULL) { 2442fa9e4066Sahrens /* this block is not in the cache */ 2443fa9e4066Sahrens arc_buf_hdr_t *exists; 2444ad23a2dbSjohansen arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2445ad23a2dbSjohansen buf = arc_buf_alloc(spa, size, private, type); 2446fa9e4066Sahrens hdr = buf->b_hdr; 2447fa9e4066Sahrens hdr->b_dva = *BP_IDENTITY(bp); 2448fa9e4066Sahrens hdr->b_birth = bp->blk_birth; 2449fa9e4066Sahrens hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2450fa9e4066Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2451fa9e4066Sahrens if (exists) { 2452fa9e4066Sahrens /* somebody beat us to the hash insert */ 2453fa9e4066Sahrens mutex_exit(hash_lock); 2454fa9e4066Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2455fa9e4066Sahrens hdr->b_birth = 0; 2456fa9e4066Sahrens hdr->b_cksum0 = 0; 2457ea8dc4b6Seschrock (void) arc_buf_remove_ref(buf, private); 2458fa9e4066Sahrens goto top; /* restart the IO request */ 2459fa9e4066Sahrens } 246013506d1eSmaybee /* if this is a prefetch, we don't have a reference */ 246113506d1eSmaybee if (*arc_flags & ARC_PREFETCH) { 246213506d1eSmaybee (void) remove_reference(hdr, hash_lock, 246313506d1eSmaybee private); 246413506d1eSmaybee hdr->b_flags |= ARC_PREFETCH; 246513506d1eSmaybee } 246613506d1eSmaybee if (BP_GET_LEVEL(bp) > 0) 246713506d1eSmaybee hdr->b_flags |= ARC_INDIRECT; 2468fa9e4066Sahrens } else { 2469fa9e4066Sahrens /* this block is in the ghost cache */ 2470ea8dc4b6Seschrock ASSERT(GHOST_STATE(hdr->b_state)); 2471ea8dc4b6Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 247213506d1eSmaybee ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2473ea8dc4b6Seschrock ASSERT(hdr->b_buf == NULL); 247413506d1eSmaybee 247513506d1eSmaybee /* if this is a prefetch, we don't have a reference */ 247613506d1eSmaybee if (*arc_flags & ARC_PREFETCH) 247713506d1eSmaybee hdr->b_flags |= ARC_PREFETCH; 247813506d1eSmaybee else 247913506d1eSmaybee add_reference(hdr, hash_lock, private); 2480fa9e4066Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 2481fa9e4066Sahrens buf->b_hdr = hdr; 248244eda4d7Smaybee buf->b_data = NULL; 2483ea8dc4b6Seschrock buf->b_efunc = NULL; 2484ea8dc4b6Seschrock buf->b_private = NULL; 2485fa9e4066Sahrens buf->b_next = NULL; 2486fa9e4066Sahrens hdr->b_buf = buf; 248744eda4d7Smaybee arc_get_data_buf(buf); 2488ea8dc4b6Seschrock ASSERT(hdr->b_datacnt == 0); 2489ea8dc4b6Seschrock hdr->b_datacnt = 1; 249013506d1eSmaybee 2491fa9e4066Sahrens } 2492fa9e4066Sahrens 2493fa9e4066Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2494fa9e4066Sahrens acb->acb_done = done; 2495fa9e4066Sahrens acb->acb_private = private; 2496fa9e4066Sahrens acb->acb_byteswap = swap; 2497fa9e4066Sahrens 2498fa9e4066Sahrens ASSERT(hdr->b_acb == NULL); 2499fa9e4066Sahrens hdr->b_acb = acb; 2500fa9e4066Sahrens hdr->b_flags |= ARC_IO_IN_PROGRESS; 2501fa9e4066Sahrens 2502fa9e4066Sahrens /* 2503fa9e4066Sahrens * If the buffer has been evicted, migrate it to a present state 2504fa9e4066Sahrens * before issuing the I/O. Once we drop the hash-table lock, 2505fa9e4066Sahrens * the header will be marked as I/O in progress and have an 2506fa9e4066Sahrens * attached buffer. At this point, anybody who finds this 2507fa9e4066Sahrens * buffer ought to notice that it's legit but has a pending I/O. 2508fa9e4066Sahrens */ 2509fa9e4066Sahrens 2510ea8dc4b6Seschrock if (GHOST_STATE(hdr->b_state)) 251144eda4d7Smaybee arc_access(hdr, hash_lock); 2512fa9e4066Sahrens 2513fa9e4066Sahrens ASSERT3U(hdr->b_size, ==, size); 2514c543ec06Sahrens DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2515c543ec06Sahrens zbookmark_t *, zb); 251644cb6abcSbmc ARCSTAT_BUMP(arcstat_misses); 251744cb6abcSbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 251844cb6abcSbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 251944cb6abcSbmc data, metadata, misses); 2520ea8dc4b6Seschrock 2521*fa94a07fSbrendan if (l2arc_ndev != 0) { 2522*fa94a07fSbrendan /* 2523*fa94a07fSbrendan * Read from the L2ARC if the following are true: 2524*fa94a07fSbrendan * 1. This buffer has L2ARC metadata. 2525*fa94a07fSbrendan * 2. This buffer isn't currently writing to the L2ARC. 2526*fa94a07fSbrendan */ 2527*fa94a07fSbrendan if (hdr->b_l2hdr != NULL && !HDR_L2_WRITING(hdr)) { 2528*fa94a07fSbrendan vdev_t *vd = hdr->b_l2hdr->b_dev->l2ad_vdev; 2529*fa94a07fSbrendan daddr_t addr = hdr->b_l2hdr->b_daddr; 2530*fa94a07fSbrendan l2arc_read_callback_t *cb; 2531*fa94a07fSbrendan 2532*fa94a07fSbrendan DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2533*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_hits); 2534*fa94a07fSbrendan 2535*fa94a07fSbrendan hdr->b_flags |= ARC_L2_READING; 2536*fa94a07fSbrendan mutex_exit(hash_lock); 2537*fa94a07fSbrendan 2538*fa94a07fSbrendan cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2539*fa94a07fSbrendan KM_SLEEP); 2540*fa94a07fSbrendan cb->l2rcb_buf = buf; 2541*fa94a07fSbrendan cb->l2rcb_spa = spa; 2542*fa94a07fSbrendan cb->l2rcb_bp = *bp; 2543*fa94a07fSbrendan cb->l2rcb_zb = *zb; 2544*fa94a07fSbrendan cb->l2rcb_flags = flags; 2545*fa94a07fSbrendan 2546*fa94a07fSbrendan /* 2547*fa94a07fSbrendan * l2arc read. 2548*fa94a07fSbrendan */ 2549*fa94a07fSbrendan rzio = zio_read_phys(pio, vd, addr, size, 2550*fa94a07fSbrendan buf->b_data, ZIO_CHECKSUM_OFF, 2551*fa94a07fSbrendan l2arc_read_done, cb, priority, 2552*fa94a07fSbrendan flags | ZIO_FLAG_DONT_CACHE, B_FALSE); 2553*fa94a07fSbrendan DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2554*fa94a07fSbrendan zio_t *, rzio); 2555*fa94a07fSbrendan 2556*fa94a07fSbrendan if (*arc_flags & ARC_WAIT) 2557*fa94a07fSbrendan return (zio_wait(rzio)); 2558*fa94a07fSbrendan 2559*fa94a07fSbrendan ASSERT(*arc_flags & ARC_NOWAIT); 2560*fa94a07fSbrendan zio_nowait(rzio); 2561*fa94a07fSbrendan return (0); 2562*fa94a07fSbrendan } else { 2563*fa94a07fSbrendan DTRACE_PROBE1(l2arc__miss, 2564*fa94a07fSbrendan arc_buf_hdr_t *, hdr); 2565*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_misses); 2566*fa94a07fSbrendan if (HDR_L2_WRITING(hdr)) 2567*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_rw_clash); 2568*fa94a07fSbrendan } 2569*fa94a07fSbrendan } 2570*fa94a07fSbrendan mutex_exit(hash_lock); 2571*fa94a07fSbrendan 2572fa9e4066Sahrens rzio = zio_read(pio, spa, bp, buf->b_data, size, 2573ea8dc4b6Seschrock arc_read_done, buf, priority, flags, zb); 2574fa9e4066Sahrens 257513506d1eSmaybee if (*arc_flags & ARC_WAIT) 2576fa9e4066Sahrens return (zio_wait(rzio)); 2577fa9e4066Sahrens 257813506d1eSmaybee ASSERT(*arc_flags & ARC_NOWAIT); 2579fa9e4066Sahrens zio_nowait(rzio); 2580fa9e4066Sahrens } 2581fa9e4066Sahrens return (0); 2582fa9e4066Sahrens } 2583fa9e4066Sahrens 2584fa9e4066Sahrens /* 2585fa9e4066Sahrens * arc_read() variant to support pool traversal. If the block is already 2586fa9e4066Sahrens * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2587fa9e4066Sahrens * The idea is that we don't want pool traversal filling up memory, but 2588fa9e4066Sahrens * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2589fa9e4066Sahrens */ 2590fa9e4066Sahrens int 2591fa9e4066Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2592fa9e4066Sahrens { 2593fa9e4066Sahrens arc_buf_hdr_t *hdr; 2594fa9e4066Sahrens kmutex_t *hash_mtx; 2595fa9e4066Sahrens int rc = 0; 2596fa9e4066Sahrens 2597fa9e4066Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2598fa9e4066Sahrens 2599ea8dc4b6Seschrock if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2600ea8dc4b6Seschrock arc_buf_t *buf = hdr->b_buf; 2601ea8dc4b6Seschrock 2602ea8dc4b6Seschrock ASSERT(buf); 2603ea8dc4b6Seschrock while (buf->b_data == NULL) { 2604ea8dc4b6Seschrock buf = buf->b_next; 2605ea8dc4b6Seschrock ASSERT(buf); 2606ea8dc4b6Seschrock } 2607ea8dc4b6Seschrock bcopy(buf->b_data, data, hdr->b_size); 2608ea8dc4b6Seschrock } else { 2609fa9e4066Sahrens rc = ENOENT; 2610ea8dc4b6Seschrock } 2611fa9e4066Sahrens 2612fa9e4066Sahrens if (hash_mtx) 2613fa9e4066Sahrens mutex_exit(hash_mtx); 2614fa9e4066Sahrens 2615fa9e4066Sahrens return (rc); 2616fa9e4066Sahrens } 2617fa9e4066Sahrens 2618ea8dc4b6Seschrock void 2619ea8dc4b6Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2620ea8dc4b6Seschrock { 2621ea8dc4b6Seschrock ASSERT(buf->b_hdr != NULL); 262244cb6abcSbmc ASSERT(buf->b_hdr->b_state != arc_anon); 2623ea8dc4b6Seschrock ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2624ea8dc4b6Seschrock buf->b_efunc = func; 2625ea8dc4b6Seschrock buf->b_private = private; 2626ea8dc4b6Seschrock } 2627ea8dc4b6Seschrock 2628ea8dc4b6Seschrock /* 2629ea8dc4b6Seschrock * This is used by the DMU to let the ARC know that a buffer is 2630ea8dc4b6Seschrock * being evicted, so the ARC should clean up. If this arc buf 2631ea8dc4b6Seschrock * is not yet in the evicted state, it will be put there. 2632ea8dc4b6Seschrock */ 2633ea8dc4b6Seschrock int 2634ea8dc4b6Seschrock arc_buf_evict(arc_buf_t *buf) 2635ea8dc4b6Seschrock { 263640d7d650Smaybee arc_buf_hdr_t *hdr; 2637ea8dc4b6Seschrock kmutex_t *hash_lock; 2638ea8dc4b6Seschrock arc_buf_t **bufp; 2639ea8dc4b6Seschrock 264040d7d650Smaybee mutex_enter(&arc_eviction_mtx); 264140d7d650Smaybee hdr = buf->b_hdr; 2642ea8dc4b6Seschrock if (hdr == NULL) { 2643ea8dc4b6Seschrock /* 2644ea8dc4b6Seschrock * We are in arc_do_user_evicts(). 2645ea8dc4b6Seschrock */ 2646ea8dc4b6Seschrock ASSERT(buf->b_data == NULL); 264740d7d650Smaybee mutex_exit(&arc_eviction_mtx); 2648ea8dc4b6Seschrock return (0); 2649ea8dc4b6Seschrock } 2650ea8dc4b6Seschrock hash_lock = HDR_LOCK(hdr); 265140d7d650Smaybee mutex_exit(&arc_eviction_mtx); 265240d7d650Smaybee 2653ea8dc4b6Seschrock mutex_enter(hash_lock); 2654ea8dc4b6Seschrock 26559b23f181Smaybee if (buf->b_data == NULL) { 26569b23f181Smaybee /* 26579b23f181Smaybee * We are on the eviction list. 26589b23f181Smaybee */ 26599b23f181Smaybee mutex_exit(hash_lock); 26609b23f181Smaybee mutex_enter(&arc_eviction_mtx); 26619b23f181Smaybee if (buf->b_hdr == NULL) { 26629b23f181Smaybee /* 26639b23f181Smaybee * We are already in arc_do_user_evicts(). 26649b23f181Smaybee */ 26659b23f181Smaybee mutex_exit(&arc_eviction_mtx); 26669b23f181Smaybee return (0); 26679b23f181Smaybee } else { 26689b23f181Smaybee arc_buf_t copy = *buf; /* structure assignment */ 26699b23f181Smaybee /* 26709b23f181Smaybee * Process this buffer now 26719b23f181Smaybee * but let arc_do_user_evicts() do the reaping. 26729b23f181Smaybee */ 26739b23f181Smaybee buf->b_efunc = NULL; 26749b23f181Smaybee mutex_exit(&arc_eviction_mtx); 26759b23f181Smaybee VERIFY(copy.b_efunc(©) == 0); 26769b23f181Smaybee return (1); 26779b23f181Smaybee } 26789b23f181Smaybee } 26799b23f181Smaybee 26809b23f181Smaybee ASSERT(buf->b_hdr == hdr); 26819b23f181Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 268244cb6abcSbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2683ea8dc4b6Seschrock 2684ea8dc4b6Seschrock /* 2685ea8dc4b6Seschrock * Pull this buffer off of the hdr 2686ea8dc4b6Seschrock */ 2687ea8dc4b6Seschrock bufp = &hdr->b_buf; 2688ea8dc4b6Seschrock while (*bufp != buf) 2689ea8dc4b6Seschrock bufp = &(*bufp)->b_next; 2690ea8dc4b6Seschrock *bufp = buf->b_next; 2691ea8dc4b6Seschrock 2692ea8dc4b6Seschrock ASSERT(buf->b_data != NULL); 269344eda4d7Smaybee arc_buf_destroy(buf, FALSE, FALSE); 2694ea8dc4b6Seschrock 2695ea8dc4b6Seschrock if (hdr->b_datacnt == 0) { 2696ea8dc4b6Seschrock arc_state_t *old_state = hdr->b_state; 2697ea8dc4b6Seschrock arc_state_t *evicted_state; 2698ea8dc4b6Seschrock 2699ea8dc4b6Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2700ea8dc4b6Seschrock 2701ea8dc4b6Seschrock evicted_state = 270244cb6abcSbmc (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2703ea8dc4b6Seschrock 270444cb6abcSbmc mutex_enter(&old_state->arcs_mtx); 270544cb6abcSbmc mutex_enter(&evicted_state->arcs_mtx); 2706ea8dc4b6Seschrock 2707ea8dc4b6Seschrock arc_change_state(evicted_state, hdr, hash_lock); 2708ea8dc4b6Seschrock ASSERT(HDR_IN_HASH_TABLE(hdr)); 2709*fa94a07fSbrendan hdr->b_flags |= ARC_IN_HASH_TABLE; 2710*fa94a07fSbrendan hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2711ea8dc4b6Seschrock 271244cb6abcSbmc mutex_exit(&evicted_state->arcs_mtx); 271344cb6abcSbmc mutex_exit(&old_state->arcs_mtx); 2714ea8dc4b6Seschrock } 2715ea8dc4b6Seschrock mutex_exit(hash_lock); 2716dd6ef538Smaybee 2717ea8dc4b6Seschrock VERIFY(buf->b_efunc(buf) == 0); 2718ea8dc4b6Seschrock buf->b_efunc = NULL; 2719ea8dc4b6Seschrock buf->b_private = NULL; 2720ea8dc4b6Seschrock buf->b_hdr = NULL; 2721ea8dc4b6Seschrock kmem_cache_free(buf_cache, buf); 2722ea8dc4b6Seschrock return (1); 2723ea8dc4b6Seschrock } 2724ea8dc4b6Seschrock 2725fa9e4066Sahrens /* 2726fa9e4066Sahrens * Release this buffer from the cache. This must be done 2727fa9e4066Sahrens * after a read and prior to modifying the buffer contents. 2728fa9e4066Sahrens * If the buffer has more than one reference, we must make 2729fa9e4066Sahrens * make a new hdr for the buffer. 2730fa9e4066Sahrens */ 2731fa9e4066Sahrens void 2732fa9e4066Sahrens arc_release(arc_buf_t *buf, void *tag) 2733fa9e4066Sahrens { 2734fa9e4066Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2735fa9e4066Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 2736*fa94a07fSbrendan l2arc_buf_hdr_t *l2hdr = NULL; 2737*fa94a07fSbrendan uint64_t buf_size; 2738fa9e4066Sahrens 2739fa9e4066Sahrens /* this buffer is not on any list */ 2740fa9e4066Sahrens ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2741fa9e4066Sahrens 274244cb6abcSbmc if (hdr->b_state == arc_anon) { 2743fa9e4066Sahrens /* this buffer is already released */ 2744fa9e4066Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2745fa9e4066Sahrens ASSERT(BUF_EMPTY(hdr)); 2746ea8dc4b6Seschrock ASSERT(buf->b_efunc == NULL); 27476b4acc8bSahrens arc_buf_thaw(buf); 2748fa9e4066Sahrens return; 2749fa9e4066Sahrens } 2750fa9e4066Sahrens 2751fa9e4066Sahrens mutex_enter(hash_lock); 2752fa9e4066Sahrens 2753ea8dc4b6Seschrock /* 2754ea8dc4b6Seschrock * Do we have more than one buf? 2755ea8dc4b6Seschrock */ 2756ea8dc4b6Seschrock if (hdr->b_buf != buf || buf->b_next != NULL) { 2757fa9e4066Sahrens arc_buf_hdr_t *nhdr; 2758fa9e4066Sahrens arc_buf_t **bufp; 2759fa9e4066Sahrens uint64_t blksz = hdr->b_size; 2760fa9e4066Sahrens spa_t *spa = hdr->b_spa; 2761ad23a2dbSjohansen arc_buf_contents_t type = hdr->b_type; 2762*fa94a07fSbrendan uint32_t flags = hdr->b_flags; 2763fa9e4066Sahrens 2764ea8dc4b6Seschrock ASSERT(hdr->b_datacnt > 1); 2765fa9e4066Sahrens /* 2766fa9e4066Sahrens * Pull the data off of this buf and attach it to 2767fa9e4066Sahrens * a new anonymous buf. 2768fa9e4066Sahrens */ 2769ea8dc4b6Seschrock (void) remove_reference(hdr, hash_lock, tag); 2770fa9e4066Sahrens bufp = &hdr->b_buf; 2771ea8dc4b6Seschrock while (*bufp != buf) 2772fa9e4066Sahrens bufp = &(*bufp)->b_next; 2773fa9e4066Sahrens *bufp = (*bufp)->b_next; 2774af2c4821Smaybee buf->b_next = NULL; 2775ea8dc4b6Seschrock 277644cb6abcSbmc ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 277744cb6abcSbmc atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2778ea8dc4b6Seschrock if (refcount_is_zero(&hdr->b_refcnt)) { 27790e8c6158Smaybee uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 27800e8c6158Smaybee ASSERT3U(*size, >=, hdr->b_size); 27810e8c6158Smaybee atomic_add_64(size, -hdr->b_size); 2782ea8dc4b6Seschrock } 2783ea8dc4b6Seschrock hdr->b_datacnt -= 1; 2784*fa94a07fSbrendan if (hdr->b_l2hdr != NULL) { 2785*fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 2786*fa94a07fSbrendan l2hdr = hdr->b_l2hdr; 2787*fa94a07fSbrendan hdr->b_l2hdr = NULL; 2788*fa94a07fSbrendan buf_size = hdr->b_size; 2789*fa94a07fSbrendan } 2790c717a561Smaybee arc_cksum_verify(buf); 2791ea8dc4b6Seschrock 2792fa9e4066Sahrens mutex_exit(hash_lock); 2793fa9e4066Sahrens 2794fa9e4066Sahrens nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2795fa9e4066Sahrens nhdr->b_size = blksz; 2796fa9e4066Sahrens nhdr->b_spa = spa; 2797ad23a2dbSjohansen nhdr->b_type = type; 2798fa9e4066Sahrens nhdr->b_buf = buf; 279944cb6abcSbmc nhdr->b_state = arc_anon; 2800fa9e4066Sahrens nhdr->b_arc_access = 0; 2801*fa94a07fSbrendan nhdr->b_flags = flags & ARC_L2_WRITING; 2802*fa94a07fSbrendan nhdr->b_l2hdr = NULL; 2803ea8dc4b6Seschrock nhdr->b_datacnt = 1; 2804c717a561Smaybee nhdr->b_freeze_cksum = NULL; 2805fa9e4066Sahrens (void) refcount_add(&nhdr->b_refcnt, tag); 2806af2c4821Smaybee buf->b_hdr = nhdr; 280744cb6abcSbmc atomic_add_64(&arc_anon->arcs_size, blksz); 2808fa9e4066Sahrens } else { 2809ea8dc4b6Seschrock ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2810fa9e4066Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 2811fa9e4066Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 281244cb6abcSbmc arc_change_state(arc_anon, hdr, hash_lock); 2813fa9e4066Sahrens hdr->b_arc_access = 0; 2814*fa94a07fSbrendan if (hdr->b_l2hdr != NULL) { 2815*fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 2816*fa94a07fSbrendan l2hdr = hdr->b_l2hdr; 2817*fa94a07fSbrendan hdr->b_l2hdr = NULL; 2818*fa94a07fSbrendan buf_size = hdr->b_size; 2819*fa94a07fSbrendan } 2820fa9e4066Sahrens mutex_exit(hash_lock); 2821*fa94a07fSbrendan 2822fa9e4066Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2823fa9e4066Sahrens hdr->b_birth = 0; 2824fa9e4066Sahrens hdr->b_cksum0 = 0; 2825c717a561Smaybee arc_buf_thaw(buf); 2826fa9e4066Sahrens } 2827ea8dc4b6Seschrock buf->b_efunc = NULL; 2828ea8dc4b6Seschrock buf->b_private = NULL; 2829*fa94a07fSbrendan 2830*fa94a07fSbrendan if (l2hdr) { 2831*fa94a07fSbrendan list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 2832*fa94a07fSbrendan kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 2833*fa94a07fSbrendan ARCSTAT_INCR(arcstat_l2_size, -buf_size); 2834*fa94a07fSbrendan } 2835*fa94a07fSbrendan if (MUTEX_HELD(&l2arc_buflist_mtx)) 2836*fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 2837fa9e4066Sahrens } 2838fa9e4066Sahrens 2839fa9e4066Sahrens int 2840fa9e4066Sahrens arc_released(arc_buf_t *buf) 2841fa9e4066Sahrens { 284244cb6abcSbmc return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2843ea8dc4b6Seschrock } 2844ea8dc4b6Seschrock 2845ea8dc4b6Seschrock int 2846ea8dc4b6Seschrock arc_has_callback(arc_buf_t *buf) 2847ea8dc4b6Seschrock { 2848ea8dc4b6Seschrock return (buf->b_efunc != NULL); 2849fa9e4066Sahrens } 2850fa9e4066Sahrens 2851ea8dc4b6Seschrock #ifdef ZFS_DEBUG 2852ea8dc4b6Seschrock int 2853ea8dc4b6Seschrock arc_referenced(arc_buf_t *buf) 2854ea8dc4b6Seschrock { 2855ea8dc4b6Seschrock return (refcount_count(&buf->b_hdr->b_refcnt)); 2856ea8dc4b6Seschrock } 2857ea8dc4b6Seschrock #endif 2858ea8dc4b6Seschrock 2859c717a561Smaybee static void 2860c717a561Smaybee arc_write_ready(zio_t *zio) 2861c717a561Smaybee { 2862c717a561Smaybee arc_write_callback_t *callback = zio->io_private; 2863c717a561Smaybee arc_buf_t *buf = callback->awcb_buf; 28640a4e9518Sgw arc_buf_hdr_t *hdr = buf->b_hdr; 2865c717a561Smaybee 28660a4e9518Sgw if (zio->io_error == 0 && callback->awcb_ready) { 2867c717a561Smaybee ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 2868c717a561Smaybee callback->awcb_ready(zio, buf, callback->awcb_private); 2869c717a561Smaybee } 28700a4e9518Sgw /* 28710a4e9518Sgw * If the IO is already in progress, then this is a re-write 28720a4e9518Sgw * attempt, so we need to thaw and re-compute the cksum. It is 28730a4e9518Sgw * the responsibility of the callback to handle the freeing 28740a4e9518Sgw * and accounting for any re-write attempt. If we don't have a 28750a4e9518Sgw * callback registered then simply free the block here. 28760a4e9518Sgw */ 28770a4e9518Sgw if (HDR_IO_IN_PROGRESS(hdr)) { 28780a4e9518Sgw if (!BP_IS_HOLE(&zio->io_bp_orig) && 28790a4e9518Sgw callback->awcb_ready == NULL) { 28800a4e9518Sgw zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, 28810a4e9518Sgw &zio->io_bp_orig, NULL, NULL)); 28820a4e9518Sgw } 28830a4e9518Sgw mutex_enter(&hdr->b_freeze_lock); 28840a4e9518Sgw if (hdr->b_freeze_cksum != NULL) { 28850a4e9518Sgw kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 28860a4e9518Sgw hdr->b_freeze_cksum = NULL; 28870a4e9518Sgw } 28880a4e9518Sgw mutex_exit(&hdr->b_freeze_lock); 28890a4e9518Sgw } 2890*fa94a07fSbrendan arc_cksum_compute(buf, B_FALSE); 28910a4e9518Sgw hdr->b_flags |= ARC_IO_IN_PROGRESS; 2892c717a561Smaybee } 2893c717a561Smaybee 2894fa9e4066Sahrens static void 2895fa9e4066Sahrens arc_write_done(zio_t *zio) 2896fa9e4066Sahrens { 2897c717a561Smaybee arc_write_callback_t *callback = zio->io_private; 2898c717a561Smaybee arc_buf_t *buf = callback->awcb_buf; 2899c717a561Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 2900fa9e4066Sahrens 2901fa9e4066Sahrens hdr->b_acb = NULL; 2902fa9e4066Sahrens 2903fa9e4066Sahrens /* this buffer is on no lists and is not in the hash table */ 290444cb6abcSbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2905fa9e4066Sahrens 2906fa9e4066Sahrens hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2907fa9e4066Sahrens hdr->b_birth = zio->io_bp->blk_birth; 2908fa9e4066Sahrens hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2909ea8dc4b6Seschrock /* 2910ea8dc4b6Seschrock * If the block to be written was all-zero, we may have 2911ea8dc4b6Seschrock * compressed it away. In this case no write was performed 2912ea8dc4b6Seschrock * so there will be no dva/birth-date/checksum. The buffer 2913ea8dc4b6Seschrock * must therefor remain anonymous (and uncached). 2914ea8dc4b6Seschrock */ 2915fa9e4066Sahrens if (!BUF_EMPTY(hdr)) { 2916fa9e4066Sahrens arc_buf_hdr_t *exists; 2917fa9e4066Sahrens kmutex_t *hash_lock; 2918fa9e4066Sahrens 29196b4acc8bSahrens arc_cksum_verify(buf); 29206b4acc8bSahrens 2921fa9e4066Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2922fa9e4066Sahrens if (exists) { 2923fa9e4066Sahrens /* 2924fa9e4066Sahrens * This can only happen if we overwrite for 2925fa9e4066Sahrens * sync-to-convergence, because we remove 2926fa9e4066Sahrens * buffers from the hash table when we arc_free(). 2927fa9e4066Sahrens */ 2928fa9e4066Sahrens ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2929fa9e4066Sahrens BP_IDENTITY(zio->io_bp))); 2930fa9e4066Sahrens ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2931fa9e4066Sahrens zio->io_bp->blk_birth); 2932fa9e4066Sahrens 2933fa9e4066Sahrens ASSERT(refcount_is_zero(&exists->b_refcnt)); 293444cb6abcSbmc arc_change_state(arc_anon, exists, hash_lock); 2935fa9e4066Sahrens mutex_exit(hash_lock); 2936ea8dc4b6Seschrock arc_hdr_destroy(exists); 2937fa9e4066Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2938fa9e4066Sahrens ASSERT3P(exists, ==, NULL); 2939fa9e4066Sahrens } 2940ea8dc4b6Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 294144eda4d7Smaybee arc_access(hdr, hash_lock); 294244eda4d7Smaybee mutex_exit(hash_lock); 2943c717a561Smaybee } else if (callback->awcb_done == NULL) { 2944ea8dc4b6Seschrock int destroy_hdr; 2945ea8dc4b6Seschrock /* 2946ea8dc4b6Seschrock * This is an anonymous buffer with no user callback, 2947ea8dc4b6Seschrock * destroy it if there are no active references. 2948ea8dc4b6Seschrock */ 2949ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 2950ea8dc4b6Seschrock destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2951ea8dc4b6Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2952ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 2953ea8dc4b6Seschrock if (destroy_hdr) 2954ea8dc4b6Seschrock arc_hdr_destroy(hdr); 2955ea8dc4b6Seschrock } else { 2956ea8dc4b6Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2957fa9e4066Sahrens } 2958ea8dc4b6Seschrock 2959c717a561Smaybee if (callback->awcb_done) { 2960fa9e4066Sahrens ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2961c717a561Smaybee callback->awcb_done(zio, buf, callback->awcb_private); 2962fa9e4066Sahrens } 2963fa9e4066Sahrens 2964c717a561Smaybee kmem_free(callback, sizeof (arc_write_callback_t)); 2965fa9e4066Sahrens } 2966fa9e4066Sahrens 2967c717a561Smaybee zio_t * 296844cd46caSbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2969fa9e4066Sahrens uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2970c717a561Smaybee arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 2971c717a561Smaybee int flags, zbookmark_t *zb) 2972fa9e4066Sahrens { 2973fa9e4066Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2974c717a561Smaybee arc_write_callback_t *callback; 2975c717a561Smaybee zio_t *zio; 2976fa9e4066Sahrens 2977fa9e4066Sahrens /* this is a private buffer - no locking required */ 297844cb6abcSbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2979fa9e4066Sahrens ASSERT(BUF_EMPTY(hdr)); 2980fa9e4066Sahrens ASSERT(!HDR_IO_ERROR(hdr)); 2981c5c6ffa0Smaybee ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 2982c5c6ffa0Smaybee ASSERT(hdr->b_acb == 0); 2983c717a561Smaybee callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 2984c717a561Smaybee callback->awcb_ready = ready; 2985c717a561Smaybee callback->awcb_done = done; 2986c717a561Smaybee callback->awcb_private = private; 2987c717a561Smaybee callback->awcb_buf = buf; 2988c717a561Smaybee zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 2989c717a561Smaybee buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 2990c717a561Smaybee priority, flags, zb); 2991fa9e4066Sahrens 2992c717a561Smaybee return (zio); 2993fa9e4066Sahrens } 2994fa9e4066Sahrens 2995fa9e4066Sahrens int 2996fa9e4066Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2997fa9e4066Sahrens zio_done_func_t *done, void *private, uint32_t arc_flags) 2998fa9e4066Sahrens { 2999fa9e4066Sahrens arc_buf_hdr_t *ab; 3000fa9e4066Sahrens kmutex_t *hash_lock; 3001fa9e4066Sahrens zio_t *zio; 3002fa9e4066Sahrens 3003fa9e4066Sahrens /* 3004fa9e4066Sahrens * If this buffer is in the cache, release it, so it 3005fa9e4066Sahrens * can be re-used. 3006fa9e4066Sahrens */ 3007fa9e4066Sahrens ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3008fa9e4066Sahrens if (ab != NULL) { 3009fa9e4066Sahrens /* 3010fa9e4066Sahrens * The checksum of blocks to free is not always 3011fa9e4066Sahrens * preserved (eg. on the deadlist). However, if it is 3012fa9e4066Sahrens * nonzero, it should match what we have in the cache. 3013fa9e4066Sahrens */ 3014fa9e4066Sahrens ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3015fa9e4066Sahrens ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 301644cb6abcSbmc if (ab->b_state != arc_anon) 301744cb6abcSbmc arc_change_state(arc_anon, ab, hash_lock); 301813506d1eSmaybee if (HDR_IO_IN_PROGRESS(ab)) { 301913506d1eSmaybee /* 302013506d1eSmaybee * This should only happen when we prefetch. 302113506d1eSmaybee */ 302213506d1eSmaybee ASSERT(ab->b_flags & ARC_PREFETCH); 302313506d1eSmaybee ASSERT3U(ab->b_datacnt, ==, 1); 302413506d1eSmaybee ab->b_flags |= ARC_FREED_IN_READ; 302513506d1eSmaybee if (HDR_IN_HASH_TABLE(ab)) 302613506d1eSmaybee buf_hash_remove(ab); 302713506d1eSmaybee ab->b_arc_access = 0; 302813506d1eSmaybee bzero(&ab->b_dva, sizeof (dva_t)); 302913506d1eSmaybee ab->b_birth = 0; 303013506d1eSmaybee ab->b_cksum0 = 0; 303113506d1eSmaybee ab->b_buf->b_efunc = NULL; 303213506d1eSmaybee ab->b_buf->b_private = NULL; 303313506d1eSmaybee mutex_exit(hash_lock); 303413506d1eSmaybee } else if (refcount_is_zero(&ab->b_refcnt)) { 3035*fa94a07fSbrendan ab->b_flags |= ARC_FREE_IN_PROGRESS; 3036fa9e4066Sahrens mutex_exit(hash_lock); 3037ea8dc4b6Seschrock arc_hdr_destroy(ab); 303844cb6abcSbmc ARCSTAT_BUMP(arcstat_deleted); 3039fa9e4066Sahrens } else { 3040bbf4a8dfSmaybee /* 304113506d1eSmaybee * We still have an active reference on this 304213506d1eSmaybee * buffer. This can happen, e.g., from 304313506d1eSmaybee * dbuf_unoverride(). 3044bbf4a8dfSmaybee */ 304513506d1eSmaybee ASSERT(!HDR_IN_HASH_TABLE(ab)); 3046fa9e4066Sahrens ab->b_arc_access = 0; 3047fa9e4066Sahrens bzero(&ab->b_dva, sizeof (dva_t)); 3048fa9e4066Sahrens ab->b_birth = 0; 3049fa9e4066Sahrens ab->b_cksum0 = 0; 3050ea8dc4b6Seschrock ab->b_buf->b_efunc = NULL; 3051ea8dc4b6Seschrock ab->b_buf->b_private = NULL; 3052fa9e4066Sahrens mutex_exit(hash_lock); 3053fa9e4066Sahrens } 3054fa9e4066Sahrens } 3055fa9e4066Sahrens 3056fa9e4066Sahrens zio = zio_free(pio, spa, txg, bp, done, private); 3057fa9e4066Sahrens 3058fa9e4066Sahrens if (arc_flags & ARC_WAIT) 3059fa9e4066Sahrens return (zio_wait(zio)); 3060fa9e4066Sahrens 3061fa9e4066Sahrens ASSERT(arc_flags & ARC_NOWAIT); 3062fa9e4066Sahrens zio_nowait(zio); 3063fa9e4066Sahrens 3064fa9e4066Sahrens return (0); 3065fa9e4066Sahrens } 3066fa9e4066Sahrens 3067fa9e4066Sahrens void 3068fa9e4066Sahrens arc_tempreserve_clear(uint64_t tempreserve) 3069fa9e4066Sahrens { 3070fa9e4066Sahrens atomic_add_64(&arc_tempreserve, -tempreserve); 3071fa9e4066Sahrens ASSERT((int64_t)arc_tempreserve >= 0); 3072fa9e4066Sahrens } 3073fa9e4066Sahrens 3074fa9e4066Sahrens int 3075fa9e4066Sahrens arc_tempreserve_space(uint64_t tempreserve) 3076fa9e4066Sahrens { 3077fa9e4066Sahrens #ifdef ZFS_DEBUG 3078fa9e4066Sahrens /* 3079fa9e4066Sahrens * Once in a while, fail for no reason. Everything should cope. 3080fa9e4066Sahrens */ 3081fa9e4066Sahrens if (spa_get_random(10000) == 0) { 3082fa9e4066Sahrens dprintf("forcing random failure\n"); 3083fa9e4066Sahrens return (ERESTART); 3084fa9e4066Sahrens } 3085fa9e4066Sahrens #endif 308644cb6abcSbmc if (tempreserve > arc_c/4 && !arc_no_grow) 308744cb6abcSbmc arc_c = MIN(arc_c_max, tempreserve * 4); 308844cb6abcSbmc if (tempreserve > arc_c) 3089112fe045Smaybee return (ENOMEM); 3090112fe045Smaybee 3091fa9e4066Sahrens /* 3092112fe045Smaybee * Throttle writes when the amount of dirty data in the cache 3093112fe045Smaybee * gets too large. We try to keep the cache less than half full 3094112fe045Smaybee * of dirty blocks so that our sync times don't grow too large. 3095112fe045Smaybee * Note: if two requests come in concurrently, we might let them 3096112fe045Smaybee * both succeed, when one of them should fail. Not a huge deal. 3097112fe045Smaybee * 3098112fe045Smaybee * XXX The limit should be adjusted dynamically to keep the time 3099112fe045Smaybee * to sync a dataset fixed (around 1-5 seconds?). 3100fa9e4066Sahrens */ 3101fa9e4066Sahrens 310244cb6abcSbmc if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 310344cb6abcSbmc arc_tempreserve + arc_anon->arcs_size > arc_c / 4) { 31040e8c6158Smaybee dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 31050e8c6158Smaybee "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 31060e8c6158Smaybee arc_tempreserve>>10, 31070e8c6158Smaybee arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 31080e8c6158Smaybee arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 310944cb6abcSbmc tempreserve>>10, arc_c>>10); 3110fa9e4066Sahrens return (ERESTART); 3111fa9e4066Sahrens } 3112fa9e4066Sahrens atomic_add_64(&arc_tempreserve, tempreserve); 3113fa9e4066Sahrens return (0); 3114fa9e4066Sahrens } 3115fa9e4066Sahrens 3116fa9e4066Sahrens void 3117fa9e4066Sahrens arc_init(void) 3118fa9e4066Sahrens { 3119fa9e4066Sahrens mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3120fa9e4066Sahrens cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3121fa9e4066Sahrens 312213506d1eSmaybee /* Convert seconds to clock ticks */ 3123b19a79ecSperrin arc_min_prefetch_lifespan = 1 * hz; 312413506d1eSmaybee 3125fa9e4066Sahrens /* Start out with 1/8 of all memory */ 312644cb6abcSbmc arc_c = physmem * PAGESIZE / 8; 3127fa9e4066Sahrens 3128fa9e4066Sahrens #ifdef _KERNEL 3129fa9e4066Sahrens /* 3130fa9e4066Sahrens * On architectures where the physical memory can be larger 3131fa9e4066Sahrens * than the addressable space (intel in 32-bit mode), we may 3132fa9e4066Sahrens * need to limit the cache to 1/8 of VM size. 3133fa9e4066Sahrens */ 313444cb6abcSbmc arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3135fa9e4066Sahrens #endif 3136fa9e4066Sahrens 3137112fe045Smaybee /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 313844cb6abcSbmc arc_c_min = MAX(arc_c / 4, 64<<20); 3139112fe045Smaybee /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 314044cb6abcSbmc if (arc_c * 8 >= 1<<30) 314144cb6abcSbmc arc_c_max = (arc_c * 8) - (1<<30); 3142fa9e4066Sahrens else 314344cb6abcSbmc arc_c_max = arc_c_min; 314444cb6abcSbmc arc_c_max = MAX(arc_c * 6, arc_c_max); 3145a2eea2e1Sahrens 3146a2eea2e1Sahrens /* 3147a2eea2e1Sahrens * Allow the tunables to override our calculations if they are 3148a2eea2e1Sahrens * reasonable (ie. over 64MB) 3149a2eea2e1Sahrens */ 3150a2eea2e1Sahrens if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 315144cb6abcSbmc arc_c_max = zfs_arc_max; 315244cb6abcSbmc if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 315344cb6abcSbmc arc_c_min = zfs_arc_min; 3154a2eea2e1Sahrens 315544cb6abcSbmc arc_c = arc_c_max; 315644cb6abcSbmc arc_p = (arc_c >> 1); 3157fa9e4066Sahrens 31580e8c6158Smaybee /* limit meta-data to 1/4 of the arc capacity */ 31590e8c6158Smaybee arc_meta_limit = arc_c_max / 4; 31601116048bSek 31611116048bSek /* Allow the tunable to override if it is reasonable */ 31621116048bSek if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 31631116048bSek arc_meta_limit = zfs_arc_meta_limit; 31641116048bSek 31650e8c6158Smaybee if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 31660e8c6158Smaybee arc_c_min = arc_meta_limit / 2; 31670e8c6158Smaybee 3168fa9e4066Sahrens /* if kmem_flags are set, lets try to use less memory */ 3169fa9e4066Sahrens if (kmem_debugging()) 317044cb6abcSbmc arc_c = arc_c / 2; 317144cb6abcSbmc if (arc_c < arc_c_min) 317244cb6abcSbmc arc_c = arc_c_min; 317344cb6abcSbmc 317444cb6abcSbmc arc_anon = &ARC_anon; 317544cb6abcSbmc arc_mru = &ARC_mru; 317644cb6abcSbmc arc_mru_ghost = &ARC_mru_ghost; 317744cb6abcSbmc arc_mfu = &ARC_mfu; 317844cb6abcSbmc arc_mfu_ghost = &ARC_mfu_ghost; 3179*fa94a07fSbrendan arc_l2c_only = &ARC_l2c_only; 318044cb6abcSbmc arc_size = 0; 318144cb6abcSbmc 318244cb6abcSbmc mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 318344cb6abcSbmc mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 318444cb6abcSbmc mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 318544cb6abcSbmc mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 318644cb6abcSbmc mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3187*fa94a07fSbrendan mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 318844cb6abcSbmc 31890e8c6158Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 31900e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 31910e8c6158Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 31920e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 31930e8c6158Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 31940e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 31950e8c6158Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 31960e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 31970e8c6158Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 31980e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 31990e8c6158Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 32000e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32010e8c6158Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 32020e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32030e8c6158Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 32040e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3205*fa94a07fSbrendan list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3206*fa94a07fSbrendan sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3207*fa94a07fSbrendan list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3208*fa94a07fSbrendan sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3209fa9e4066Sahrens 3210fa9e4066Sahrens buf_init(); 3211fa9e4066Sahrens 3212fa9e4066Sahrens arc_thread_exit = 0; 3213ea8dc4b6Seschrock arc_eviction_list = NULL; 3214ea8dc4b6Seschrock mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 321540d7d650Smaybee bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3216fa9e4066Sahrens 321744cb6abcSbmc arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 321844cb6abcSbmc sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 321944cb6abcSbmc 322044cb6abcSbmc if (arc_ksp != NULL) { 322144cb6abcSbmc arc_ksp->ks_data = &arc_stats; 322244cb6abcSbmc kstat_install(arc_ksp); 322344cb6abcSbmc } 322444cb6abcSbmc 3225fa9e4066Sahrens (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3226fa9e4066Sahrens TS_RUN, minclsyspri); 322749e3519aSmaybee 322849e3519aSmaybee arc_dead = FALSE; 3229fa9e4066Sahrens } 3230fa9e4066Sahrens 3231fa9e4066Sahrens void 3232fa9e4066Sahrens arc_fini(void) 3233fa9e4066Sahrens { 3234fa9e4066Sahrens mutex_enter(&arc_reclaim_thr_lock); 3235fa9e4066Sahrens arc_thread_exit = 1; 3236fa9e4066Sahrens while (arc_thread_exit != 0) 3237fa9e4066Sahrens cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3238fa9e4066Sahrens mutex_exit(&arc_reclaim_thr_lock); 3239fa9e4066Sahrens 3240fa9e4066Sahrens arc_flush(); 3241fa9e4066Sahrens 3242fa9e4066Sahrens arc_dead = TRUE; 3243fa9e4066Sahrens 324444cb6abcSbmc if (arc_ksp != NULL) { 324544cb6abcSbmc kstat_delete(arc_ksp); 324644cb6abcSbmc arc_ksp = NULL; 324744cb6abcSbmc } 324844cb6abcSbmc 3249ea8dc4b6Seschrock mutex_destroy(&arc_eviction_mtx); 3250fa9e4066Sahrens mutex_destroy(&arc_reclaim_thr_lock); 3251fa9e4066Sahrens cv_destroy(&arc_reclaim_thr_cv); 3252fa9e4066Sahrens 32530e8c6158Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 32540e8c6158Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 32550e8c6158Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 32560e8c6158Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 32570e8c6158Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 32580e8c6158Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 32590e8c6158Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 32600e8c6158Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3261fa9e4066Sahrens 326244cb6abcSbmc mutex_destroy(&arc_anon->arcs_mtx); 326344cb6abcSbmc mutex_destroy(&arc_mru->arcs_mtx); 326444cb6abcSbmc mutex_destroy(&arc_mru_ghost->arcs_mtx); 326544cb6abcSbmc mutex_destroy(&arc_mfu->arcs_mtx); 326644cb6abcSbmc mutex_destroy(&arc_mfu_ghost->arcs_mtx); 32675ad82045Snd 3268fa9e4066Sahrens buf_fini(); 3269fa9e4066Sahrens } 3270*fa94a07fSbrendan 3271*fa94a07fSbrendan /* 3272*fa94a07fSbrendan * Level 2 ARC 3273*fa94a07fSbrendan * 3274*fa94a07fSbrendan * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3275*fa94a07fSbrendan * It uses dedicated storage devices to hold cached data, which are populated 3276*fa94a07fSbrendan * using large infrequent writes. The main role of this cache is to boost 3277*fa94a07fSbrendan * the performance of random read workloads. The intended L2ARC devices 3278*fa94a07fSbrendan * include short-stroked disks, solid state disks, and other media with 3279*fa94a07fSbrendan * substantially faster read latency than disk. 3280*fa94a07fSbrendan * 3281*fa94a07fSbrendan * +-----------------------+ 3282*fa94a07fSbrendan * | ARC | 3283*fa94a07fSbrendan * +-----------------------+ 3284*fa94a07fSbrendan * | ^ ^ 3285*fa94a07fSbrendan * | | | 3286*fa94a07fSbrendan * l2arc_feed_thread() arc_read() 3287*fa94a07fSbrendan * | | | 3288*fa94a07fSbrendan * | l2arc read | 3289*fa94a07fSbrendan * V | | 3290*fa94a07fSbrendan * +---------------+ | 3291*fa94a07fSbrendan * | L2ARC | | 3292*fa94a07fSbrendan * +---------------+ | 3293*fa94a07fSbrendan * | ^ | 3294*fa94a07fSbrendan * l2arc_write() | | 3295*fa94a07fSbrendan * | | | 3296*fa94a07fSbrendan * V | | 3297*fa94a07fSbrendan * +-------+ +-------+ 3298*fa94a07fSbrendan * | vdev | | vdev | 3299*fa94a07fSbrendan * | cache | | cache | 3300*fa94a07fSbrendan * +-------+ +-------+ 3301*fa94a07fSbrendan * +=========+ .-----. 3302*fa94a07fSbrendan * : L2ARC : |-_____-| 3303*fa94a07fSbrendan * : devices : | Disks | 3304*fa94a07fSbrendan * +=========+ `-_____-' 3305*fa94a07fSbrendan * 3306*fa94a07fSbrendan * Read requests are satisfied from the following sources, in order: 3307*fa94a07fSbrendan * 3308*fa94a07fSbrendan * 1) ARC 3309*fa94a07fSbrendan * 2) vdev cache of L2ARC devices 3310*fa94a07fSbrendan * 3) L2ARC devices 3311*fa94a07fSbrendan * 4) vdev cache of disks 3312*fa94a07fSbrendan * 5) disks 3313*fa94a07fSbrendan * 3314*fa94a07fSbrendan * Some L2ARC device types exhibit extremely slow write performance. 3315*fa94a07fSbrendan * To accommodate for this there are some significant differences between 3316*fa94a07fSbrendan * the L2ARC and traditional cache design: 3317*fa94a07fSbrendan * 3318*fa94a07fSbrendan * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3319*fa94a07fSbrendan * the ARC behave as usual, freeing buffers and placing headers on ghost 3320*fa94a07fSbrendan * lists. The ARC does not send buffers to the L2ARC during eviction as 3321*fa94a07fSbrendan * this would add inflated write latencies for all ARC memory pressure. 3322*fa94a07fSbrendan * 3323*fa94a07fSbrendan * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3324*fa94a07fSbrendan * It does this by periodically scanning buffers from the eviction-end of 3325*fa94a07fSbrendan * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3326*fa94a07fSbrendan * not already there. It scans until a headroom of buffers is satisfied, 3327*fa94a07fSbrendan * which itself is a buffer for ARC eviction. The thread that does this is 3328*fa94a07fSbrendan * l2arc_feed_thread(), illustrated below; example sizes are included to 3329*fa94a07fSbrendan * provide a better sense of ratio than this diagram: 3330*fa94a07fSbrendan * 3331*fa94a07fSbrendan * head --> tail 3332*fa94a07fSbrendan * +---------------------+----------+ 3333*fa94a07fSbrendan * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3334*fa94a07fSbrendan * +---------------------+----------+ | o L2ARC eligible 3335*fa94a07fSbrendan * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3336*fa94a07fSbrendan * +---------------------+----------+ | 3337*fa94a07fSbrendan * 15.9 Gbytes ^ 32 Mbytes | 3338*fa94a07fSbrendan * headroom | 3339*fa94a07fSbrendan * l2arc_feed_thread() 3340*fa94a07fSbrendan * | 3341*fa94a07fSbrendan * l2arc write hand <--[oooo]--' 3342*fa94a07fSbrendan * | 8 Mbyte 3343*fa94a07fSbrendan * | write max 3344*fa94a07fSbrendan * V 3345*fa94a07fSbrendan * +==============================+ 3346*fa94a07fSbrendan * L2ARC dev |####|#|###|###| |####| ... | 3347*fa94a07fSbrendan * +==============================+ 3348*fa94a07fSbrendan * 32 Gbytes 3349*fa94a07fSbrendan * 3350*fa94a07fSbrendan * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3351*fa94a07fSbrendan * evicted, then the L2ARC has cached a buffer much sooner than it probably 3352*fa94a07fSbrendan * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3353*fa94a07fSbrendan * safe to say that this is an uncommon case, since buffers at the end of 3354*fa94a07fSbrendan * the ARC lists have moved there due to inactivity. 3355*fa94a07fSbrendan * 3356*fa94a07fSbrendan * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3357*fa94a07fSbrendan * then the L2ARC simply misses copying some buffers. This serves as a 3358*fa94a07fSbrendan * pressure valve to prevent heavy read workloads from both stalling the ARC 3359*fa94a07fSbrendan * with waits and clogging the L2ARC with writes. This also helps prevent 3360*fa94a07fSbrendan * the potential for the L2ARC to churn if it attempts to cache content too 3361*fa94a07fSbrendan * quickly, such as during backups of the entire pool. 3362*fa94a07fSbrendan * 3363*fa94a07fSbrendan * 5. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3364*fa94a07fSbrendan * the vdev queue can aggregate them into larger and fewer writes. Each 3365*fa94a07fSbrendan * device is written to in a rotor fashion, sweeping writes through 3366*fa94a07fSbrendan * available space then repeating. 3367*fa94a07fSbrendan * 3368*fa94a07fSbrendan * 6. The L2ARC does not store dirty content. It never needs to flush 3369*fa94a07fSbrendan * write buffers back to disk based storage. 3370*fa94a07fSbrendan * 3371*fa94a07fSbrendan * 7. If an ARC buffer is written (and dirtied) which also exists in the 3372*fa94a07fSbrendan * L2ARC, the now stale L2ARC buffer is immediately dropped. 3373*fa94a07fSbrendan * 3374*fa94a07fSbrendan * The performance of the L2ARC can be tweaked by a number of tunables, which 3375*fa94a07fSbrendan * may be necessary for different workloads: 3376*fa94a07fSbrendan * 3377*fa94a07fSbrendan * l2arc_write_max max write bytes per interval 3378*fa94a07fSbrendan * l2arc_noprefetch skip caching prefetched buffers 3379*fa94a07fSbrendan * l2arc_headroom number of max device writes to precache 3380*fa94a07fSbrendan * l2arc_feed_secs seconds between L2ARC writing 3381*fa94a07fSbrendan * 3382*fa94a07fSbrendan * Tunables may be removed or added as future performance improvements are 3383*fa94a07fSbrendan * integrated, and also may become zpool properties. 3384*fa94a07fSbrendan */ 3385*fa94a07fSbrendan 3386*fa94a07fSbrendan static void 3387*fa94a07fSbrendan l2arc_hdr_stat_add(void) 3388*fa94a07fSbrendan { 3389*fa94a07fSbrendan ARCSTAT_INCR(arcstat_l2_hdr_size, sizeof (arc_buf_hdr_t) + 3390*fa94a07fSbrendan sizeof (l2arc_buf_hdr_t)); 3391*fa94a07fSbrendan ARCSTAT_INCR(arcstat_hdr_size, -sizeof (arc_buf_hdr_t)); 3392*fa94a07fSbrendan } 3393*fa94a07fSbrendan 3394*fa94a07fSbrendan static void 3395*fa94a07fSbrendan l2arc_hdr_stat_remove(void) 3396*fa94a07fSbrendan { 3397*fa94a07fSbrendan ARCSTAT_INCR(arcstat_l2_hdr_size, -sizeof (arc_buf_hdr_t) - 3398*fa94a07fSbrendan sizeof (l2arc_buf_hdr_t)); 3399*fa94a07fSbrendan ARCSTAT_INCR(arcstat_hdr_size, sizeof (arc_buf_hdr_t)); 3400*fa94a07fSbrendan } 3401*fa94a07fSbrendan 3402*fa94a07fSbrendan /* 3403*fa94a07fSbrendan * Cycle through L2ARC devices. This is how L2ARC load balances. 3404*fa94a07fSbrendan * This is called with l2arc_dev_mtx held, which also locks out spa removal. 3405*fa94a07fSbrendan */ 3406*fa94a07fSbrendan static l2arc_dev_t * 3407*fa94a07fSbrendan l2arc_dev_get_next(void) 3408*fa94a07fSbrendan { 3409*fa94a07fSbrendan l2arc_dev_t *next; 3410*fa94a07fSbrendan 3411*fa94a07fSbrendan if (l2arc_dev_last == NULL) { 3412*fa94a07fSbrendan next = list_head(l2arc_dev_list); 3413*fa94a07fSbrendan } else { 3414*fa94a07fSbrendan next = list_next(l2arc_dev_list, l2arc_dev_last); 3415*fa94a07fSbrendan if (next == NULL) 3416*fa94a07fSbrendan next = list_head(l2arc_dev_list); 3417*fa94a07fSbrendan } 3418*fa94a07fSbrendan 3419*fa94a07fSbrendan l2arc_dev_last = next; 3420*fa94a07fSbrendan 3421*fa94a07fSbrendan return (next); 3422*fa94a07fSbrendan } 3423*fa94a07fSbrendan 3424*fa94a07fSbrendan /* 3425*fa94a07fSbrendan * A write to a cache device has completed. Update all headers to allow 3426*fa94a07fSbrendan * reads from these buffers to begin. 3427*fa94a07fSbrendan */ 3428*fa94a07fSbrendan static void 3429*fa94a07fSbrendan l2arc_write_done(zio_t *zio) 3430*fa94a07fSbrendan { 3431*fa94a07fSbrendan l2arc_write_callback_t *cb; 3432*fa94a07fSbrendan l2arc_dev_t *dev; 3433*fa94a07fSbrendan list_t *buflist; 3434*fa94a07fSbrendan l2arc_data_free_t *df, *df_prev; 3435*fa94a07fSbrendan arc_buf_hdr_t *head, *ab, *ab_prev; 3436*fa94a07fSbrendan kmutex_t *hash_lock; 3437*fa94a07fSbrendan 3438*fa94a07fSbrendan cb = zio->io_private; 3439*fa94a07fSbrendan ASSERT(cb != NULL); 3440*fa94a07fSbrendan dev = cb->l2wcb_dev; 3441*fa94a07fSbrendan ASSERT(dev != NULL); 3442*fa94a07fSbrendan head = cb->l2wcb_head; 3443*fa94a07fSbrendan ASSERT(head != NULL); 3444*fa94a07fSbrendan buflist = dev->l2ad_buflist; 3445*fa94a07fSbrendan ASSERT(buflist != NULL); 3446*fa94a07fSbrendan DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3447*fa94a07fSbrendan l2arc_write_callback_t *, cb); 3448*fa94a07fSbrendan 3449*fa94a07fSbrendan if (zio->io_error != 0) 3450*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_writes_error); 3451*fa94a07fSbrendan 3452*fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 3453*fa94a07fSbrendan 3454*fa94a07fSbrendan /* 3455*fa94a07fSbrendan * All writes completed, or an error was hit. 3456*fa94a07fSbrendan */ 3457*fa94a07fSbrendan for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3458*fa94a07fSbrendan ab_prev = list_prev(buflist, ab); 3459*fa94a07fSbrendan 3460*fa94a07fSbrendan hash_lock = HDR_LOCK(ab); 3461*fa94a07fSbrendan if (!mutex_tryenter(hash_lock)) { 3462*fa94a07fSbrendan /* 3463*fa94a07fSbrendan * This buffer misses out. It may be in a stage 3464*fa94a07fSbrendan * of eviction. Its ARC_L2_WRITING flag will be 3465*fa94a07fSbrendan * left set, denying reads to this buffer. 3466*fa94a07fSbrendan */ 3467*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3468*fa94a07fSbrendan continue; 3469*fa94a07fSbrendan } 3470*fa94a07fSbrendan 3471*fa94a07fSbrendan if (zio->io_error != 0) { 3472*fa94a07fSbrendan /* 3473*fa94a07fSbrendan * Error - invalidate L2ARC entry. 3474*fa94a07fSbrendan */ 3475*fa94a07fSbrendan ab->b_l2hdr = NULL; 3476*fa94a07fSbrendan } 3477*fa94a07fSbrendan 3478*fa94a07fSbrendan /* 3479*fa94a07fSbrendan * Allow ARC to begin reads to this L2ARC entry. 3480*fa94a07fSbrendan */ 3481*fa94a07fSbrendan ab->b_flags &= ~ARC_L2_WRITING; 3482*fa94a07fSbrendan 3483*fa94a07fSbrendan mutex_exit(hash_lock); 3484*fa94a07fSbrendan } 3485*fa94a07fSbrendan 3486*fa94a07fSbrendan atomic_inc_64(&l2arc_writes_done); 3487*fa94a07fSbrendan list_remove(buflist, head); 3488*fa94a07fSbrendan kmem_cache_free(hdr_cache, head); 3489*fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 3490*fa94a07fSbrendan 3491*fa94a07fSbrendan /* 3492*fa94a07fSbrendan * Free buffers that were tagged for destruction. 3493*fa94a07fSbrendan */ 3494*fa94a07fSbrendan mutex_enter(&l2arc_free_on_write_mtx); 3495*fa94a07fSbrendan buflist = l2arc_free_on_write; 3496*fa94a07fSbrendan for (df = list_tail(buflist); df; df = df_prev) { 3497*fa94a07fSbrendan df_prev = list_prev(buflist, df); 3498*fa94a07fSbrendan ASSERT(df->l2df_data != NULL); 3499*fa94a07fSbrendan ASSERT(df->l2df_func != NULL); 3500*fa94a07fSbrendan df->l2df_func(df->l2df_data, df->l2df_size); 3501*fa94a07fSbrendan list_remove(buflist, df); 3502*fa94a07fSbrendan kmem_free(df, sizeof (l2arc_data_free_t)); 3503*fa94a07fSbrendan } 3504*fa94a07fSbrendan mutex_exit(&l2arc_free_on_write_mtx); 3505*fa94a07fSbrendan 3506*fa94a07fSbrendan kmem_free(cb, sizeof (l2arc_write_callback_t)); 3507*fa94a07fSbrendan } 3508*fa94a07fSbrendan 3509*fa94a07fSbrendan /* 3510*fa94a07fSbrendan * A read to a cache device completed. Validate buffer contents before 3511*fa94a07fSbrendan * handing over to the regular ARC routines. 3512*fa94a07fSbrendan */ 3513*fa94a07fSbrendan static void 3514*fa94a07fSbrendan l2arc_read_done(zio_t *zio) 3515*fa94a07fSbrendan { 3516*fa94a07fSbrendan l2arc_read_callback_t *cb; 3517*fa94a07fSbrendan arc_buf_hdr_t *hdr; 3518*fa94a07fSbrendan arc_buf_t *buf; 3519*fa94a07fSbrendan zio_t *rzio; 3520*fa94a07fSbrendan kmutex_t *hash_lock; 3521*fa94a07fSbrendan int equal, err = 0; 3522*fa94a07fSbrendan 3523*fa94a07fSbrendan cb = zio->io_private; 3524*fa94a07fSbrendan ASSERT(cb != NULL); 3525*fa94a07fSbrendan buf = cb->l2rcb_buf; 3526*fa94a07fSbrendan ASSERT(buf != NULL); 3527*fa94a07fSbrendan hdr = buf->b_hdr; 3528*fa94a07fSbrendan ASSERT(hdr != NULL); 3529*fa94a07fSbrendan 3530*fa94a07fSbrendan hash_lock = HDR_LOCK(hdr); 3531*fa94a07fSbrendan mutex_enter(hash_lock); 3532*fa94a07fSbrendan 3533*fa94a07fSbrendan /* 3534*fa94a07fSbrendan * Check this survived the L2ARC journey. 3535*fa94a07fSbrendan */ 3536*fa94a07fSbrendan equal = arc_cksum_equal(buf); 3537*fa94a07fSbrendan if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3538*fa94a07fSbrendan mutex_exit(hash_lock); 3539*fa94a07fSbrendan zio->io_private = buf; 3540*fa94a07fSbrendan arc_read_done(zio); 3541*fa94a07fSbrendan } else { 3542*fa94a07fSbrendan mutex_exit(hash_lock); 3543*fa94a07fSbrendan /* 3544*fa94a07fSbrendan * Buffer didn't survive caching. Increment stats and 3545*fa94a07fSbrendan * reissue to the original storage device. 3546*fa94a07fSbrendan */ 3547*fa94a07fSbrendan if (zio->io_error != 0) 3548*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_io_error); 3549*fa94a07fSbrendan if (!equal) 3550*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_cksum_bad); 3551*fa94a07fSbrendan 3552*fa94a07fSbrendan zio->io_flags &= ~ZIO_FLAG_DONT_CACHE; 3553*fa94a07fSbrendan rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp, 3554*fa94a07fSbrendan buf->b_data, zio->io_size, arc_read_done, buf, 3555*fa94a07fSbrendan zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb); 3556*fa94a07fSbrendan 3557*fa94a07fSbrendan /* 3558*fa94a07fSbrendan * Since this is a seperate thread, we can wait on this 3559*fa94a07fSbrendan * I/O whether there is an io_waiter or not. 3560*fa94a07fSbrendan */ 3561*fa94a07fSbrendan err = zio_wait(rzio); 3562*fa94a07fSbrendan 3563*fa94a07fSbrendan /* 3564*fa94a07fSbrendan * Let the resent I/O call arc_read_done() instead. 3565*fa94a07fSbrendan * io_error is set to the reissued I/O error status. 3566*fa94a07fSbrendan */ 3567*fa94a07fSbrendan zio->io_done = NULL; 3568*fa94a07fSbrendan zio->io_waiter = NULL; 3569*fa94a07fSbrendan zio->io_error = err; 3570*fa94a07fSbrendan } 3571*fa94a07fSbrendan 3572*fa94a07fSbrendan kmem_free(cb, sizeof (l2arc_read_callback_t)); 3573*fa94a07fSbrendan } 3574*fa94a07fSbrendan 3575*fa94a07fSbrendan /* 3576*fa94a07fSbrendan * This is the list priority from which the L2ARC will search for pages to 3577*fa94a07fSbrendan * cache. This is used within loops (0..3) to cycle through lists in the 3578*fa94a07fSbrendan * desired order. This order can have a significant effect on cache 3579*fa94a07fSbrendan * performance. 3580*fa94a07fSbrendan * 3581*fa94a07fSbrendan * Currently the metadata lists are hit first, MFU then MRU, followed by 3582*fa94a07fSbrendan * the data lists. This function returns a locked list, and also returns 3583*fa94a07fSbrendan * the lock pointer. 3584*fa94a07fSbrendan */ 3585*fa94a07fSbrendan static list_t * 3586*fa94a07fSbrendan l2arc_list_locked(int list_num, kmutex_t **lock) 3587*fa94a07fSbrendan { 3588*fa94a07fSbrendan list_t *list; 3589*fa94a07fSbrendan 3590*fa94a07fSbrendan ASSERT(list_num >= 0 && list_num <= 3); 3591*fa94a07fSbrendan 3592*fa94a07fSbrendan switch (list_num) { 3593*fa94a07fSbrendan case 0: 3594*fa94a07fSbrendan list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 3595*fa94a07fSbrendan *lock = &arc_mfu->arcs_mtx; 3596*fa94a07fSbrendan break; 3597*fa94a07fSbrendan case 1: 3598*fa94a07fSbrendan list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 3599*fa94a07fSbrendan *lock = &arc_mru->arcs_mtx; 3600*fa94a07fSbrendan break; 3601*fa94a07fSbrendan case 2: 3602*fa94a07fSbrendan list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 3603*fa94a07fSbrendan *lock = &arc_mfu->arcs_mtx; 3604*fa94a07fSbrendan break; 3605*fa94a07fSbrendan case 3: 3606*fa94a07fSbrendan list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 3607*fa94a07fSbrendan *lock = &arc_mru->arcs_mtx; 3608*fa94a07fSbrendan break; 3609*fa94a07fSbrendan } 3610*fa94a07fSbrendan 3611*fa94a07fSbrendan ASSERT(!(MUTEX_HELD(*lock))); 3612*fa94a07fSbrendan mutex_enter(*lock); 3613*fa94a07fSbrendan return (list); 3614*fa94a07fSbrendan } 3615*fa94a07fSbrendan 3616*fa94a07fSbrendan /* 3617*fa94a07fSbrendan * Evict buffers from the device write hand to the distance specified in 3618*fa94a07fSbrendan * bytes. This distance may span populated buffers, it may span nothing. 3619*fa94a07fSbrendan * This is clearing a region on the L2ARC device ready for writing. 3620*fa94a07fSbrendan * If the 'all' boolean is set, every buffer is evicted. 3621*fa94a07fSbrendan */ 3622*fa94a07fSbrendan static void 3623*fa94a07fSbrendan l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 3624*fa94a07fSbrendan { 3625*fa94a07fSbrendan list_t *buflist; 3626*fa94a07fSbrendan l2arc_buf_hdr_t *abl2; 3627*fa94a07fSbrendan arc_buf_hdr_t *ab, *ab_prev; 3628*fa94a07fSbrendan kmutex_t *hash_lock; 3629*fa94a07fSbrendan uint64_t taddr; 3630*fa94a07fSbrendan 3631*fa94a07fSbrendan ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 3632*fa94a07fSbrendan 3633*fa94a07fSbrendan buflist = dev->l2ad_buflist; 3634*fa94a07fSbrendan 3635*fa94a07fSbrendan if (buflist == NULL) 3636*fa94a07fSbrendan return; 3637*fa94a07fSbrendan 3638*fa94a07fSbrendan if (!all && dev->l2ad_first) { 3639*fa94a07fSbrendan /* 3640*fa94a07fSbrendan * This is the first sweep through the device. There is 3641*fa94a07fSbrendan * nothing to evict. 3642*fa94a07fSbrendan */ 3643*fa94a07fSbrendan return; 3644*fa94a07fSbrendan } 3645*fa94a07fSbrendan 3646*fa94a07fSbrendan if (dev->l2ad_hand >= (dev->l2ad_end - (2 * dev->l2ad_write))) { 3647*fa94a07fSbrendan /* 3648*fa94a07fSbrendan * When nearing the end of the device, evict to the end 3649*fa94a07fSbrendan * before the device write hand jumps to the start. 3650*fa94a07fSbrendan */ 3651*fa94a07fSbrendan taddr = dev->l2ad_end; 3652*fa94a07fSbrendan } else { 3653*fa94a07fSbrendan taddr = dev->l2ad_hand + distance; 3654*fa94a07fSbrendan } 3655*fa94a07fSbrendan DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 3656*fa94a07fSbrendan uint64_t, taddr, boolean_t, all); 3657*fa94a07fSbrendan 3658*fa94a07fSbrendan top: 3659*fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 3660*fa94a07fSbrendan for (ab = list_tail(buflist); ab; ab = ab_prev) { 3661*fa94a07fSbrendan ab_prev = list_prev(buflist, ab); 3662*fa94a07fSbrendan 3663*fa94a07fSbrendan hash_lock = HDR_LOCK(ab); 3664*fa94a07fSbrendan if (!mutex_tryenter(hash_lock)) { 3665*fa94a07fSbrendan /* 3666*fa94a07fSbrendan * Missed the hash lock. Retry. 3667*fa94a07fSbrendan */ 3668*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 3669*fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 3670*fa94a07fSbrendan mutex_enter(hash_lock); 3671*fa94a07fSbrendan mutex_exit(hash_lock); 3672*fa94a07fSbrendan goto top; 3673*fa94a07fSbrendan } 3674*fa94a07fSbrendan 3675*fa94a07fSbrendan if (HDR_L2_WRITE_HEAD(ab)) { 3676*fa94a07fSbrendan /* 3677*fa94a07fSbrendan * We hit a write head node. Leave it for 3678*fa94a07fSbrendan * l2arc_write_done(). 3679*fa94a07fSbrendan */ 3680*fa94a07fSbrendan list_remove(buflist, ab); 3681*fa94a07fSbrendan mutex_exit(hash_lock); 3682*fa94a07fSbrendan continue; 3683*fa94a07fSbrendan } 3684*fa94a07fSbrendan 3685*fa94a07fSbrendan if (!all && ab->b_l2hdr != NULL && 3686*fa94a07fSbrendan (ab->b_l2hdr->b_daddr > taddr || 3687*fa94a07fSbrendan ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 3688*fa94a07fSbrendan /* 3689*fa94a07fSbrendan * We've evicted to the target address, 3690*fa94a07fSbrendan * or the end of the device. 3691*fa94a07fSbrendan */ 3692*fa94a07fSbrendan mutex_exit(hash_lock); 3693*fa94a07fSbrendan break; 3694*fa94a07fSbrendan } 3695*fa94a07fSbrendan 3696*fa94a07fSbrendan if (HDR_FREE_IN_PROGRESS(ab)) { 3697*fa94a07fSbrendan /* 3698*fa94a07fSbrendan * Already on the path to destruction. 3699*fa94a07fSbrendan */ 3700*fa94a07fSbrendan mutex_exit(hash_lock); 3701*fa94a07fSbrendan continue; 3702*fa94a07fSbrendan } 3703*fa94a07fSbrendan 3704*fa94a07fSbrendan if (ab->b_state == arc_l2c_only) { 3705*fa94a07fSbrendan ASSERT(!HDR_L2_READING(ab)); 3706*fa94a07fSbrendan /* 3707*fa94a07fSbrendan * This doesn't exist in the ARC. Destroy. 3708*fa94a07fSbrendan * arc_hdr_destroy() will call list_remove() 3709*fa94a07fSbrendan * and decrement arcstat_l2_size. 3710*fa94a07fSbrendan */ 3711*fa94a07fSbrendan arc_change_state(arc_anon, ab, hash_lock); 3712*fa94a07fSbrendan arc_hdr_destroy(ab); 3713*fa94a07fSbrendan } else { 3714*fa94a07fSbrendan /* 3715*fa94a07fSbrendan * Tell ARC this no longer exists in L2ARC. 3716*fa94a07fSbrendan */ 3717*fa94a07fSbrendan if (ab->b_l2hdr != NULL) { 3718*fa94a07fSbrendan abl2 = ab->b_l2hdr; 3719*fa94a07fSbrendan ab->b_l2hdr = NULL; 3720*fa94a07fSbrendan kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3721*fa94a07fSbrendan ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3722*fa94a07fSbrendan } 3723*fa94a07fSbrendan list_remove(buflist, ab); 3724*fa94a07fSbrendan 3725*fa94a07fSbrendan /* 3726*fa94a07fSbrendan * This may have been leftover after a 3727*fa94a07fSbrendan * failed write. 3728*fa94a07fSbrendan */ 3729*fa94a07fSbrendan ab->b_flags &= ~ARC_L2_WRITING; 3730*fa94a07fSbrendan 3731*fa94a07fSbrendan /* 3732*fa94a07fSbrendan * Invalidate issued or about to be issued 3733*fa94a07fSbrendan * reads, since we may be about to write 3734*fa94a07fSbrendan * over this location. 3735*fa94a07fSbrendan */ 3736*fa94a07fSbrendan if (HDR_L2_READING(ab)) { 3737*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_evict_reading); 3738*fa94a07fSbrendan ab->b_flags |= ARC_L2_EVICTED; 3739*fa94a07fSbrendan } 3740*fa94a07fSbrendan } 3741*fa94a07fSbrendan mutex_exit(hash_lock); 3742*fa94a07fSbrendan } 3743*fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 3744*fa94a07fSbrendan 3745*fa94a07fSbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 3746*fa94a07fSbrendan dev->l2ad_evict = taddr; 3747*fa94a07fSbrendan } 3748*fa94a07fSbrendan 3749*fa94a07fSbrendan /* 3750*fa94a07fSbrendan * Find and write ARC buffers to the L2ARC device. 3751*fa94a07fSbrendan * 3752*fa94a07fSbrendan * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 3753*fa94a07fSbrendan * for reading until they have completed writing. 3754*fa94a07fSbrendan */ 3755*fa94a07fSbrendan static void 3756*fa94a07fSbrendan l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev) 3757*fa94a07fSbrendan { 3758*fa94a07fSbrendan arc_buf_hdr_t *ab, *ab_prev, *head; 3759*fa94a07fSbrendan l2arc_buf_hdr_t *hdrl2; 3760*fa94a07fSbrendan list_t *list; 3761*fa94a07fSbrendan uint64_t passed_sz, write_sz, buf_sz; 3762*fa94a07fSbrendan uint64_t target_sz = dev->l2ad_write; 3763*fa94a07fSbrendan uint64_t headroom = dev->l2ad_write * l2arc_headroom; 3764*fa94a07fSbrendan void *buf_data; 3765*fa94a07fSbrendan kmutex_t *hash_lock, *list_lock; 3766*fa94a07fSbrendan boolean_t have_lock, full; 3767*fa94a07fSbrendan l2arc_write_callback_t *cb; 3768*fa94a07fSbrendan zio_t *pio, *wzio; 3769*fa94a07fSbrendan 3770*fa94a07fSbrendan ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 3771*fa94a07fSbrendan ASSERT(dev->l2ad_vdev != NULL); 3772*fa94a07fSbrendan 3773*fa94a07fSbrendan pio = NULL; 3774*fa94a07fSbrendan write_sz = 0; 3775*fa94a07fSbrendan full = B_FALSE; 3776*fa94a07fSbrendan head = kmem_cache_alloc(hdr_cache, KM_SLEEP); 3777*fa94a07fSbrendan head->b_flags |= ARC_L2_WRITE_HEAD; 3778*fa94a07fSbrendan 3779*fa94a07fSbrendan /* 3780*fa94a07fSbrendan * Copy buffers for L2ARC writing. 3781*fa94a07fSbrendan */ 3782*fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 3783*fa94a07fSbrendan for (int try = 0; try <= 3; try++) { 3784*fa94a07fSbrendan list = l2arc_list_locked(try, &list_lock); 3785*fa94a07fSbrendan passed_sz = 0; 3786*fa94a07fSbrendan 3787*fa94a07fSbrendan for (ab = list_tail(list); ab; ab = ab_prev) { 3788*fa94a07fSbrendan ab_prev = list_prev(list, ab); 3789*fa94a07fSbrendan 3790*fa94a07fSbrendan hash_lock = HDR_LOCK(ab); 3791*fa94a07fSbrendan have_lock = MUTEX_HELD(hash_lock); 3792*fa94a07fSbrendan if (!have_lock && !mutex_tryenter(hash_lock)) { 3793*fa94a07fSbrendan /* 3794*fa94a07fSbrendan * Skip this buffer rather than waiting. 3795*fa94a07fSbrendan */ 3796*fa94a07fSbrendan continue; 3797*fa94a07fSbrendan } 3798*fa94a07fSbrendan 3799*fa94a07fSbrendan passed_sz += ab->b_size; 3800*fa94a07fSbrendan if (passed_sz > headroom) { 3801*fa94a07fSbrendan /* 3802*fa94a07fSbrendan * Searched too far. 3803*fa94a07fSbrendan */ 3804*fa94a07fSbrendan mutex_exit(hash_lock); 3805*fa94a07fSbrendan break; 3806*fa94a07fSbrendan } 3807*fa94a07fSbrendan 3808*fa94a07fSbrendan if (ab->b_spa != spa) { 3809*fa94a07fSbrendan mutex_exit(hash_lock); 3810*fa94a07fSbrendan continue; 3811*fa94a07fSbrendan } 3812*fa94a07fSbrendan 3813*fa94a07fSbrendan if (ab->b_l2hdr != NULL) { 3814*fa94a07fSbrendan /* 3815*fa94a07fSbrendan * Already in L2ARC. 3816*fa94a07fSbrendan */ 3817*fa94a07fSbrendan mutex_exit(hash_lock); 3818*fa94a07fSbrendan continue; 3819*fa94a07fSbrendan } 3820*fa94a07fSbrendan 3821*fa94a07fSbrendan if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) { 3822*fa94a07fSbrendan mutex_exit(hash_lock); 3823*fa94a07fSbrendan continue; 3824*fa94a07fSbrendan } 3825*fa94a07fSbrendan 3826*fa94a07fSbrendan if ((write_sz + ab->b_size) > target_sz) { 3827*fa94a07fSbrendan full = B_TRUE; 3828*fa94a07fSbrendan mutex_exit(hash_lock); 3829*fa94a07fSbrendan break; 3830*fa94a07fSbrendan } 3831*fa94a07fSbrendan 3832*fa94a07fSbrendan if (ab->b_buf == NULL) { 3833*fa94a07fSbrendan DTRACE_PROBE1(l2arc__buf__null, void *, ab); 3834*fa94a07fSbrendan mutex_exit(hash_lock); 3835*fa94a07fSbrendan continue; 3836*fa94a07fSbrendan } 3837*fa94a07fSbrendan 3838*fa94a07fSbrendan if (pio == NULL) { 3839*fa94a07fSbrendan /* 3840*fa94a07fSbrendan * Insert a dummy header on the buflist so 3841*fa94a07fSbrendan * l2arc_write_done() can find where the 3842*fa94a07fSbrendan * write buffers begin without searching. 3843*fa94a07fSbrendan */ 3844*fa94a07fSbrendan list_insert_head(dev->l2ad_buflist, head); 3845*fa94a07fSbrendan 3846*fa94a07fSbrendan cb = kmem_alloc( 3847*fa94a07fSbrendan sizeof (l2arc_write_callback_t), KM_SLEEP); 3848*fa94a07fSbrendan cb->l2wcb_dev = dev; 3849*fa94a07fSbrendan cb->l2wcb_head = head; 3850*fa94a07fSbrendan pio = zio_root(spa, l2arc_write_done, cb, 3851*fa94a07fSbrendan ZIO_FLAG_CANFAIL); 3852*fa94a07fSbrendan } 3853*fa94a07fSbrendan 3854*fa94a07fSbrendan /* 3855*fa94a07fSbrendan * Create and add a new L2ARC header. 3856*fa94a07fSbrendan */ 3857*fa94a07fSbrendan hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 3858*fa94a07fSbrendan hdrl2->b_dev = dev; 3859*fa94a07fSbrendan hdrl2->b_daddr = dev->l2ad_hand; 3860*fa94a07fSbrendan 3861*fa94a07fSbrendan ab->b_flags |= ARC_L2_WRITING; 3862*fa94a07fSbrendan ab->b_l2hdr = hdrl2; 3863*fa94a07fSbrendan list_insert_head(dev->l2ad_buflist, ab); 3864*fa94a07fSbrendan buf_data = ab->b_buf->b_data; 3865*fa94a07fSbrendan buf_sz = ab->b_size; 3866*fa94a07fSbrendan 3867*fa94a07fSbrendan /* 3868*fa94a07fSbrendan * Compute and store the buffer cksum before 3869*fa94a07fSbrendan * writing. On debug the cksum is verified first. 3870*fa94a07fSbrendan */ 3871*fa94a07fSbrendan arc_cksum_verify(ab->b_buf); 3872*fa94a07fSbrendan arc_cksum_compute(ab->b_buf, B_TRUE); 3873*fa94a07fSbrendan 3874*fa94a07fSbrendan mutex_exit(hash_lock); 3875*fa94a07fSbrendan 3876*fa94a07fSbrendan wzio = zio_write_phys(pio, dev->l2ad_vdev, 3877*fa94a07fSbrendan dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 3878*fa94a07fSbrendan NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 3879*fa94a07fSbrendan ZIO_FLAG_CANFAIL, B_FALSE); 3880*fa94a07fSbrendan 3881*fa94a07fSbrendan DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 3882*fa94a07fSbrendan zio_t *, wzio); 3883*fa94a07fSbrendan (void) zio_nowait(wzio); 3884*fa94a07fSbrendan 3885*fa94a07fSbrendan write_sz += buf_sz; 3886*fa94a07fSbrendan dev->l2ad_hand += buf_sz; 3887*fa94a07fSbrendan } 3888*fa94a07fSbrendan 3889*fa94a07fSbrendan mutex_exit(list_lock); 3890*fa94a07fSbrendan 3891*fa94a07fSbrendan if (full == B_TRUE) 3892*fa94a07fSbrendan break; 3893*fa94a07fSbrendan } 3894*fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 3895*fa94a07fSbrendan 3896*fa94a07fSbrendan if (pio == NULL) { 3897*fa94a07fSbrendan ASSERT3U(write_sz, ==, 0); 3898*fa94a07fSbrendan kmem_cache_free(hdr_cache, head); 3899*fa94a07fSbrendan return; 3900*fa94a07fSbrendan } 3901*fa94a07fSbrendan 3902*fa94a07fSbrendan ASSERT3U(write_sz, <=, target_sz); 3903*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_writes_sent); 3904*fa94a07fSbrendan ARCSTAT_INCR(arcstat_l2_size, write_sz); 3905*fa94a07fSbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 3906*fa94a07fSbrendan 3907*fa94a07fSbrendan /* 3908*fa94a07fSbrendan * Bump device hand to the device start if it is approaching the end. 3909*fa94a07fSbrendan * l2arc_evict() will already have evicted ahead for this case. 3910*fa94a07fSbrendan */ 3911*fa94a07fSbrendan if (dev->l2ad_hand >= (dev->l2ad_end - dev->l2ad_write)) { 3912*fa94a07fSbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, 3913*fa94a07fSbrendan dev->l2ad_end - dev->l2ad_hand); 3914*fa94a07fSbrendan dev->l2ad_hand = dev->l2ad_start; 3915*fa94a07fSbrendan dev->l2ad_evict = dev->l2ad_start; 3916*fa94a07fSbrendan dev->l2ad_first = B_FALSE; 3917*fa94a07fSbrendan } 3918*fa94a07fSbrendan 3919*fa94a07fSbrendan (void) zio_wait(pio); 3920*fa94a07fSbrendan } 3921*fa94a07fSbrendan 3922*fa94a07fSbrendan /* 3923*fa94a07fSbrendan * This thread feeds the L2ARC at regular intervals. This is the beating 3924*fa94a07fSbrendan * heart of the L2ARC. 3925*fa94a07fSbrendan */ 3926*fa94a07fSbrendan static void 3927*fa94a07fSbrendan l2arc_feed_thread(void) 3928*fa94a07fSbrendan { 3929*fa94a07fSbrendan callb_cpr_t cpr; 3930*fa94a07fSbrendan l2arc_dev_t *dev; 3931*fa94a07fSbrendan spa_t *spa; 3932*fa94a07fSbrendan int interval; 3933*fa94a07fSbrendan boolean_t startup = B_TRUE; 3934*fa94a07fSbrendan 3935*fa94a07fSbrendan CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 3936*fa94a07fSbrendan 3937*fa94a07fSbrendan mutex_enter(&l2arc_feed_thr_lock); 3938*fa94a07fSbrendan 3939*fa94a07fSbrendan while (l2arc_thread_exit == 0) { 3940*fa94a07fSbrendan /* 3941*fa94a07fSbrendan * Initially pause for L2ARC_FEED_DELAY seconds as a grace 3942*fa94a07fSbrendan * interval during boot, followed by l2arc_feed_secs seconds 3943*fa94a07fSbrendan * thereafter. 3944*fa94a07fSbrendan */ 3945*fa94a07fSbrendan CALLB_CPR_SAFE_BEGIN(&cpr); 3946*fa94a07fSbrendan if (startup) { 3947*fa94a07fSbrendan interval = L2ARC_FEED_DELAY; 3948*fa94a07fSbrendan startup = B_FALSE; 3949*fa94a07fSbrendan } else { 3950*fa94a07fSbrendan interval = l2arc_feed_secs; 3951*fa94a07fSbrendan } 3952*fa94a07fSbrendan (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 3953*fa94a07fSbrendan lbolt + (hz * interval)); 3954*fa94a07fSbrendan CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 3955*fa94a07fSbrendan 3956*fa94a07fSbrendan /* 3957*fa94a07fSbrendan * Do nothing until L2ARC devices exist. 3958*fa94a07fSbrendan */ 3959*fa94a07fSbrendan mutex_enter(&l2arc_dev_mtx); 3960*fa94a07fSbrendan if (l2arc_ndev == 0) { 3961*fa94a07fSbrendan mutex_exit(&l2arc_dev_mtx); 3962*fa94a07fSbrendan continue; 3963*fa94a07fSbrendan } 3964*fa94a07fSbrendan 3965*fa94a07fSbrendan /* 3966*fa94a07fSbrendan * Avoid contributing to memory pressure. 3967*fa94a07fSbrendan */ 3968*fa94a07fSbrendan if (arc_reclaim_needed()) { 3969*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 3970*fa94a07fSbrendan mutex_exit(&l2arc_dev_mtx); 3971*fa94a07fSbrendan continue; 3972*fa94a07fSbrendan } 3973*fa94a07fSbrendan 3974*fa94a07fSbrendan /* 3975*fa94a07fSbrendan * This selects the next l2arc device to write to, and in 3976*fa94a07fSbrendan * doing so the next spa to feed from: dev->l2ad_spa. 3977*fa94a07fSbrendan */ 3978*fa94a07fSbrendan if ((dev = l2arc_dev_get_next()) == NULL) { 3979*fa94a07fSbrendan mutex_exit(&l2arc_dev_mtx); 3980*fa94a07fSbrendan continue; 3981*fa94a07fSbrendan } 3982*fa94a07fSbrendan spa = dev->l2ad_spa; 3983*fa94a07fSbrendan ASSERT(spa != NULL); 3984*fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_feeds); 3985*fa94a07fSbrendan 3986*fa94a07fSbrendan /* 3987*fa94a07fSbrendan * Evict L2ARC buffers that will be overwritten. 3988*fa94a07fSbrendan */ 3989*fa94a07fSbrendan l2arc_evict(dev, dev->l2ad_write, B_FALSE); 3990*fa94a07fSbrendan 3991*fa94a07fSbrendan /* 3992*fa94a07fSbrendan * Write ARC buffers. 3993*fa94a07fSbrendan */ 3994*fa94a07fSbrendan l2arc_write_buffers(spa, dev); 3995*fa94a07fSbrendan mutex_exit(&l2arc_dev_mtx); 3996*fa94a07fSbrendan } 3997*fa94a07fSbrendan 3998*fa94a07fSbrendan l2arc_thread_exit = 0; 3999*fa94a07fSbrendan cv_broadcast(&l2arc_feed_thr_cv); 4000*fa94a07fSbrendan CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4001*fa94a07fSbrendan thread_exit(); 4002*fa94a07fSbrendan } 4003*fa94a07fSbrendan 4004*fa94a07fSbrendan /* 4005*fa94a07fSbrendan * Add a vdev for use by the L2ARC. By this point the spa has already 4006*fa94a07fSbrendan * validated the vdev and opened it. 4007*fa94a07fSbrendan */ 4008*fa94a07fSbrendan void 4009*fa94a07fSbrendan l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4010*fa94a07fSbrendan { 4011*fa94a07fSbrendan l2arc_dev_t *adddev; 4012*fa94a07fSbrendan 4013*fa94a07fSbrendan /* 4014*fa94a07fSbrendan * Create a new l2arc device entry. 4015*fa94a07fSbrendan */ 4016*fa94a07fSbrendan adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4017*fa94a07fSbrendan adddev->l2ad_spa = spa; 4018*fa94a07fSbrendan adddev->l2ad_vdev = vd; 4019*fa94a07fSbrendan adddev->l2ad_write = l2arc_write_max; 4020*fa94a07fSbrendan adddev->l2ad_start = start; 4021*fa94a07fSbrendan adddev->l2ad_end = end; 4022*fa94a07fSbrendan adddev->l2ad_hand = adddev->l2ad_start; 4023*fa94a07fSbrendan adddev->l2ad_evict = adddev->l2ad_start; 4024*fa94a07fSbrendan adddev->l2ad_first = B_TRUE; 4025*fa94a07fSbrendan ASSERT3U(adddev->l2ad_write, >, 0); 4026*fa94a07fSbrendan 4027*fa94a07fSbrendan /* 4028*fa94a07fSbrendan * This is a list of all ARC buffers that are still valid on the 4029*fa94a07fSbrendan * device. 4030*fa94a07fSbrendan */ 4031*fa94a07fSbrendan adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4032*fa94a07fSbrendan list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4033*fa94a07fSbrendan offsetof(arc_buf_hdr_t, b_l2node)); 4034*fa94a07fSbrendan 4035*fa94a07fSbrendan spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4036*fa94a07fSbrendan 4037*fa94a07fSbrendan /* 4038*fa94a07fSbrendan * Add device to global list 4039*fa94a07fSbrendan */ 4040*fa94a07fSbrendan mutex_enter(&l2arc_dev_mtx); 4041*fa94a07fSbrendan list_insert_head(l2arc_dev_list, adddev); 4042*fa94a07fSbrendan atomic_inc_64(&l2arc_ndev); 4043*fa94a07fSbrendan mutex_exit(&l2arc_dev_mtx); 4044*fa94a07fSbrendan } 4045*fa94a07fSbrendan 4046*fa94a07fSbrendan /* 4047*fa94a07fSbrendan * Remove a vdev from the L2ARC. 4048*fa94a07fSbrendan */ 4049*fa94a07fSbrendan void 4050*fa94a07fSbrendan l2arc_remove_vdev(vdev_t *vd) 4051*fa94a07fSbrendan { 4052*fa94a07fSbrendan l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4053*fa94a07fSbrendan 4054*fa94a07fSbrendan /* 4055*fa94a07fSbrendan * We can only grab the spa config lock when cache device writes 4056*fa94a07fSbrendan * complete. 4057*fa94a07fSbrendan */ 4058*fa94a07fSbrendan ASSERT3U(l2arc_writes_sent, ==, l2arc_writes_done); 4059*fa94a07fSbrendan 4060*fa94a07fSbrendan /* 4061*fa94a07fSbrendan * Find the device by vdev 4062*fa94a07fSbrendan */ 4063*fa94a07fSbrendan mutex_enter(&l2arc_dev_mtx); 4064*fa94a07fSbrendan for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4065*fa94a07fSbrendan nextdev = list_next(l2arc_dev_list, dev); 4066*fa94a07fSbrendan if (vd == dev->l2ad_vdev) { 4067*fa94a07fSbrendan remdev = dev; 4068*fa94a07fSbrendan break; 4069*fa94a07fSbrendan } 4070*fa94a07fSbrendan } 4071*fa94a07fSbrendan ASSERT(remdev != NULL); 4072*fa94a07fSbrendan 4073*fa94a07fSbrendan /* 4074*fa94a07fSbrendan * Remove device from global list 4075*fa94a07fSbrendan */ 4076*fa94a07fSbrendan list_remove(l2arc_dev_list, remdev); 4077*fa94a07fSbrendan l2arc_dev_last = NULL; /* may have been invalidated */ 4078*fa94a07fSbrendan 4079*fa94a07fSbrendan /* 4080*fa94a07fSbrendan * Clear all buflists and ARC references. L2ARC device flush. 4081*fa94a07fSbrendan */ 4082*fa94a07fSbrendan l2arc_evict(remdev, 0, B_TRUE); 4083*fa94a07fSbrendan list_destroy(remdev->l2ad_buflist); 4084*fa94a07fSbrendan kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4085*fa94a07fSbrendan kmem_free(remdev, sizeof (l2arc_dev_t)); 4086*fa94a07fSbrendan 4087*fa94a07fSbrendan atomic_dec_64(&l2arc_ndev); 4088*fa94a07fSbrendan mutex_exit(&l2arc_dev_mtx); 4089*fa94a07fSbrendan } 4090*fa94a07fSbrendan 4091*fa94a07fSbrendan void 4092*fa94a07fSbrendan l2arc_init() 4093*fa94a07fSbrendan { 4094*fa94a07fSbrendan l2arc_thread_exit = 0; 4095*fa94a07fSbrendan l2arc_ndev = 0; 4096*fa94a07fSbrendan l2arc_writes_sent = 0; 4097*fa94a07fSbrendan l2arc_writes_done = 0; 4098*fa94a07fSbrendan 4099*fa94a07fSbrendan mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4100*fa94a07fSbrendan cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4101*fa94a07fSbrendan mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4102*fa94a07fSbrendan mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4103*fa94a07fSbrendan mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4104*fa94a07fSbrendan 4105*fa94a07fSbrendan l2arc_dev_list = &L2ARC_dev_list; 4106*fa94a07fSbrendan l2arc_free_on_write = &L2ARC_free_on_write; 4107*fa94a07fSbrendan list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4108*fa94a07fSbrendan offsetof(l2arc_dev_t, l2ad_node)); 4109*fa94a07fSbrendan list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4110*fa94a07fSbrendan offsetof(l2arc_data_free_t, l2df_list_node)); 4111*fa94a07fSbrendan 4112*fa94a07fSbrendan (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4113*fa94a07fSbrendan TS_RUN, minclsyspri); 4114*fa94a07fSbrendan } 4115*fa94a07fSbrendan 4116*fa94a07fSbrendan void 4117*fa94a07fSbrendan l2arc_fini() 4118*fa94a07fSbrendan { 4119*fa94a07fSbrendan mutex_enter(&l2arc_feed_thr_lock); 4120*fa94a07fSbrendan cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4121*fa94a07fSbrendan l2arc_thread_exit = 1; 4122*fa94a07fSbrendan while (l2arc_thread_exit != 0) 4123*fa94a07fSbrendan cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4124*fa94a07fSbrendan mutex_exit(&l2arc_feed_thr_lock); 4125*fa94a07fSbrendan 4126*fa94a07fSbrendan mutex_destroy(&l2arc_feed_thr_lock); 4127*fa94a07fSbrendan cv_destroy(&l2arc_feed_thr_cv); 4128*fa94a07fSbrendan mutex_destroy(&l2arc_dev_mtx); 4129*fa94a07fSbrendan mutex_destroy(&l2arc_buflist_mtx); 4130*fa94a07fSbrendan mutex_destroy(&l2arc_free_on_write_mtx); 4131*fa94a07fSbrendan 4132*fa94a07fSbrendan list_destroy(l2arc_dev_list); 4133*fa94a07fSbrendan list_destroy(l2arc_free_on_write); 4134*fa94a07fSbrendan } 4135