1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5033f9833Sek * Common Development and Distribution License (the "License"). 6033f9833Sek * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 223f9d6ad7SLin Ling * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23cf746768SBryan Cantrill * Copyright (c) 2012, Joyent, Inc. All rights reserved. 247802d7bfSMatthew Ahrens * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 2571cb1b74SSaso Kiselkov * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 263038a2b4SSaso Kiselkov * Copyright 2014 Nexenta Systems, Inc. All rights reserved. 27fa9e4066Sahrens */ 28fa9e4066Sahrens 29fa9e4066Sahrens /* 3044cb6abcSbmc * DVA-based Adjustable Replacement Cache 31fa9e4066Sahrens * 32ea8dc4b6Seschrock * While much of the theory of operation used here is 33ea8dc4b6Seschrock * based on the self-tuning, low overhead replacement cache 34fa9e4066Sahrens * presented by Megiddo and Modha at FAST 2003, there are some 35fa9e4066Sahrens * significant differences: 36fa9e4066Sahrens * 37fa9e4066Sahrens * 1. The Megiddo and Modha model assumes any page is evictable. 38fa9e4066Sahrens * Pages in its cache cannot be "locked" into memory. This makes 39fa9e4066Sahrens * the eviction algorithm simple: evict the last page in the list. 40fa9e4066Sahrens * This also make the performance characteristics easy to reason 41fa9e4066Sahrens * about. Our cache is not so simple. At any given moment, some 42fa9e4066Sahrens * subset of the blocks in the cache are un-evictable because we 43fa9e4066Sahrens * have handed out a reference to them. Blocks are only evictable 44fa9e4066Sahrens * when there are no external references active. This makes 45fa9e4066Sahrens * eviction far more problematic: we choose to evict the evictable 46fa9e4066Sahrens * blocks that are the "lowest" in the list. 47fa9e4066Sahrens * 48fa9e4066Sahrens * There are times when it is not possible to evict the requested 49fa9e4066Sahrens * space. In these circumstances we are unable to adjust the cache 50fa9e4066Sahrens * size. To prevent the cache growing unbounded at these times we 51fa94a07fSbrendan * implement a "cache throttle" that slows the flow of new data 52fa94a07fSbrendan * into the cache until we can make space available. 53fa9e4066Sahrens * 54fa9e4066Sahrens * 2. The Megiddo and Modha model assumes a fixed cache size. 55fa9e4066Sahrens * Pages are evicted when the cache is full and there is a cache 56fa9e4066Sahrens * miss. Our model has a variable sized cache. It grows with 57fa94a07fSbrendan * high use, but also tries to react to memory pressure from the 58fa9e4066Sahrens * operating system: decreasing its size when system memory is 59fa9e4066Sahrens * tight. 60fa9e4066Sahrens * 61fa9e4066Sahrens * 3. The Megiddo and Modha model assumes a fixed page size. All 62f7170741SWill Andrews * elements of the cache are therefore exactly the same size. So 63fa9e4066Sahrens * when adjusting the cache size following a cache miss, its simply 64fa9e4066Sahrens * a matter of choosing a single page to evict. In our model, we 65fa9e4066Sahrens * have variable sized cache blocks (rangeing from 512 bytes to 66f7170741SWill Andrews * 128K bytes). We therefore choose a set of blocks to evict to make 67fa9e4066Sahrens * space for a cache miss that approximates as closely as possible 68fa9e4066Sahrens * the space used by the new block. 69fa9e4066Sahrens * 70fa9e4066Sahrens * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 71fa9e4066Sahrens * by N. Megiddo & D. Modha, FAST 2003 72fa9e4066Sahrens */ 73fa9e4066Sahrens 74fa9e4066Sahrens /* 75fa9e4066Sahrens * The locking model: 76fa9e4066Sahrens * 77fa9e4066Sahrens * A new reference to a cache buffer can be obtained in two 78fa9e4066Sahrens * ways: 1) via a hash table lookup using the DVA as a key, 79fa94a07fSbrendan * or 2) via one of the ARC lists. The arc_read() interface 80fa9e4066Sahrens * uses method 1, while the internal arc algorithms for 81f7170741SWill Andrews * adjusting the cache use method 2. We therefore provide two 82fa9e4066Sahrens * types of locks: 1) the hash table lock array, and 2) the 83fa9e4066Sahrens * arc list locks. 84fa9e4066Sahrens * 85fc98fea5SBart Coddens * Buffers do not have their own mutexes, rather they rely on the 86fc98fea5SBart Coddens * hash table mutexes for the bulk of their protection (i.e. most 87fc98fea5SBart Coddens * fields in the arc_buf_hdr_t are protected by these mutexes). 88fa9e4066Sahrens * 89fa9e4066Sahrens * buf_hash_find() returns the appropriate mutex (held) when it 90fa9e4066Sahrens * locates the requested buffer in the hash table. It returns 91fa9e4066Sahrens * NULL for the mutex if the buffer was not in the table. 92fa9e4066Sahrens * 93fa9e4066Sahrens * buf_hash_remove() expects the appropriate hash mutex to be 94fa9e4066Sahrens * already held before it is invoked. 95fa9e4066Sahrens * 96fa9e4066Sahrens * Each arc state also has a mutex which is used to protect the 97fa9e4066Sahrens * buffer list associated with the state. When attempting to 98fa9e4066Sahrens * obtain a hash table lock while holding an arc list lock you 99fa9e4066Sahrens * must use: mutex_tryenter() to avoid deadlock. Also note that 10044eda4d7Smaybee * the active state mutex must be held before the ghost state mutex. 101fa9e4066Sahrens * 102ea8dc4b6Seschrock * Arc buffers may have an associated eviction callback function. 103ea8dc4b6Seschrock * This function will be invoked prior to removing the buffer (e.g. 104ea8dc4b6Seschrock * in arc_do_user_evicts()). Note however that the data associated 105ea8dc4b6Seschrock * with the buffer may be evicted prior to the callback. The callback 106ea8dc4b6Seschrock * must be made with *no locks held* (to prevent deadlock). Additionally, 107ea8dc4b6Seschrock * the users of callbacks must ensure that their private data is 108bbfa8ea8SMatthew Ahrens * protected from simultaneous callbacks from arc_clear_callback() 109ea8dc4b6Seschrock * and arc_do_user_evicts(). 110ea8dc4b6Seschrock * 111fa9e4066Sahrens * Note that the majority of the performance stats are manipulated 112fa9e4066Sahrens * with atomic operations. 113fa94a07fSbrendan * 114fa94a07fSbrendan * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 115fa94a07fSbrendan * 116fa94a07fSbrendan * - L2ARC buflist creation 117fa94a07fSbrendan * - L2ARC buflist eviction 118fa94a07fSbrendan * - L2ARC write completion, which walks L2ARC buflists 119fa94a07fSbrendan * - ARC header destruction, as it removes from L2ARC buflists 120fa94a07fSbrendan * - ARC header release, as it removes from L2ARC buflists 121fa9e4066Sahrens */ 122fa9e4066Sahrens 123fa9e4066Sahrens #include <sys/spa.h> 124fa9e4066Sahrens #include <sys/zio.h> 125aad02571SSaso Kiselkov #include <sys/zio_compress.h> 126fa9e4066Sahrens #include <sys/zfs_context.h> 127fa9e4066Sahrens #include <sys/arc.h> 128fa9e4066Sahrens #include <sys/refcount.h> 129c5904d13Seschrock #include <sys/vdev.h> 130573ca77eSGeorge Wilson #include <sys/vdev_impl.h> 13169962b56SMatthew Ahrens #include <sys/dsl_pool.h> 132fa9e4066Sahrens #ifdef _KERNEL 133fa9e4066Sahrens #include <sys/vmsystm.h> 134fa9e4066Sahrens #include <vm/anon.h> 135fa9e4066Sahrens #include <sys/fs/swapnode.h> 136033f9833Sek #include <sys/dnlc.h> 137fa9e4066Sahrens #endif 138fa9e4066Sahrens #include <sys/callb.h> 13944cb6abcSbmc #include <sys/kstat.h> 140b24ab676SJeff Bonwick #include <zfs_fletcher.h> 141fa9e4066Sahrens 142cd1c8b85SMatthew Ahrens #ifndef _KERNEL 143cd1c8b85SMatthew Ahrens /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 144cd1c8b85SMatthew Ahrens boolean_t arc_watch = B_FALSE; 145cd1c8b85SMatthew Ahrens int arc_procfd; 146cd1c8b85SMatthew Ahrens #endif 147cd1c8b85SMatthew Ahrens 148fa9e4066Sahrens static kmutex_t arc_reclaim_thr_lock; 149fa9e4066Sahrens static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 150fa9e4066Sahrens static uint8_t arc_thread_exit; 151fa9e4066Sahrens 152033f9833Sek #define ARC_REDUCE_DNLC_PERCENT 3 153033f9833Sek uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 154033f9833Sek 155fa9e4066Sahrens typedef enum arc_reclaim_strategy { 156fa9e4066Sahrens ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 157fa9e4066Sahrens ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 158fa9e4066Sahrens } arc_reclaim_strategy_t; 159fa9e4066Sahrens 16069962b56SMatthew Ahrens /* 16169962b56SMatthew Ahrens * The number of iterations through arc_evict_*() before we 16269962b56SMatthew Ahrens * drop & reacquire the lock. 16369962b56SMatthew Ahrens */ 16469962b56SMatthew Ahrens int arc_evict_iterations = 100; 16569962b56SMatthew Ahrens 166fa9e4066Sahrens /* number of seconds before growing cache again */ 167fa9e4066Sahrens static int arc_grow_retry = 60; 168fa9e4066Sahrens 1695a98e54bSBrendan Gregg - Sun Microsystems /* shift of arc_c for calculating both min and max arc_p */ 1705a98e54bSBrendan Gregg - Sun Microsystems static int arc_p_min_shift = 4; 1715a98e54bSBrendan Gregg - Sun Microsystems 1725a98e54bSBrendan Gregg - Sun Microsystems /* log2(fraction of arc to reclaim) */ 1735a98e54bSBrendan Gregg - Sun Microsystems static int arc_shrink_shift = 5; 1745a98e54bSBrendan Gregg - Sun Microsystems 17513506d1eSmaybee /* 176b19a79ecSperrin * minimum lifespan of a prefetch block in clock ticks 177b19a79ecSperrin * (initialized in arc_init()) 17813506d1eSmaybee */ 179b19a79ecSperrin static int arc_min_prefetch_lifespan; 18013506d1eSmaybee 18169962b56SMatthew Ahrens /* 18269962b56SMatthew Ahrens * If this percent of memory is free, don't throttle. 18369962b56SMatthew Ahrens */ 18469962b56SMatthew Ahrens int arc_lotsfree_percent = 10; 18569962b56SMatthew Ahrens 186fa9e4066Sahrens static int arc_dead; 187fa9e4066Sahrens 1883a737e0dSbrendan /* 1893a737e0dSbrendan * The arc has filled available memory and has now warmed up. 1903a737e0dSbrendan */ 1913a737e0dSbrendan static boolean_t arc_warm; 1923a737e0dSbrendan 193a2eea2e1Sahrens /* 194a2eea2e1Sahrens * These tunables are for performance analysis. 195a2eea2e1Sahrens */ 196a2eea2e1Sahrens uint64_t zfs_arc_max; 197a2eea2e1Sahrens uint64_t zfs_arc_min; 1981116048bSek uint64_t zfs_arc_meta_limit = 0; 1993a5286a1SMatthew Ahrens uint64_t zfs_arc_meta_min = 0; 2005a98e54bSBrendan Gregg - Sun Microsystems int zfs_arc_grow_retry = 0; 2015a98e54bSBrendan Gregg - Sun Microsystems int zfs_arc_shrink_shift = 0; 2025a98e54bSBrendan Gregg - Sun Microsystems int zfs_arc_p_min_shift = 0; 2039253d63dSGeorge Wilson int zfs_disable_dup_eviction = 0; 20463e911b6SMatthew Ahrens int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ 205a2eea2e1Sahrens 206fa9e4066Sahrens /* 207fa94a07fSbrendan * Note that buffers can be in one of 6 states: 208fa9e4066Sahrens * ARC_anon - anonymous (discussed below) 209ea8dc4b6Seschrock * ARC_mru - recently used, currently cached 210ea8dc4b6Seschrock * ARC_mru_ghost - recentely used, no longer in cache 211ea8dc4b6Seschrock * ARC_mfu - frequently used, currently cached 212ea8dc4b6Seschrock * ARC_mfu_ghost - frequently used, no longer in cache 213fa94a07fSbrendan * ARC_l2c_only - exists in L2ARC but not other states 2140e8c6158Smaybee * When there are no active references to the buffer, they are 2150e8c6158Smaybee * are linked onto a list in one of these arc states. These are 2160e8c6158Smaybee * the only buffers that can be evicted or deleted. Within each 2170e8c6158Smaybee * state there are multiple lists, one for meta-data and one for 2180e8c6158Smaybee * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 2190e8c6158Smaybee * etc.) is tracked separately so that it can be managed more 220fa94a07fSbrendan * explicitly: favored over data, limited explicitly. 221fa9e4066Sahrens * 222fa9e4066Sahrens * Anonymous buffers are buffers that are not associated with 223fa9e4066Sahrens * a DVA. These are buffers that hold dirty block copies 224fa9e4066Sahrens * before they are written to stable storage. By definition, 225ea8dc4b6Seschrock * they are "ref'd" and are considered part of arc_mru 226fa9e4066Sahrens * that cannot be freed. Generally, they will aquire a DVA 227ea8dc4b6Seschrock * as they are written and migrate onto the arc_mru list. 228fa94a07fSbrendan * 229fa94a07fSbrendan * The ARC_l2c_only state is for buffers that are in the second 230fa94a07fSbrendan * level ARC but no longer in any of the ARC_m* lists. The second 231fa94a07fSbrendan * level ARC itself may also contain buffers that are in any of 232fa94a07fSbrendan * the ARC_m* states - meaning that a buffer can exist in two 233fa94a07fSbrendan * places. The reason for the ARC_l2c_only state is to keep the 234fa94a07fSbrendan * buffer header in the hash table, so that reads that hit the 235fa94a07fSbrendan * second level ARC benefit from these fast lookups. 236fa9e4066Sahrens */ 237fa9e4066Sahrens 238fa9e4066Sahrens typedef struct arc_state { 2390e8c6158Smaybee list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 2400e8c6158Smaybee uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 2410e8c6158Smaybee uint64_t arcs_size; /* total amount of data in this state */ 24244cb6abcSbmc kmutex_t arcs_mtx; 243fa9e4066Sahrens } arc_state_t; 244fa9e4066Sahrens 245fa94a07fSbrendan /* The 6 states: */ 246fa9e4066Sahrens static arc_state_t ARC_anon; 247ea8dc4b6Seschrock static arc_state_t ARC_mru; 248ea8dc4b6Seschrock static arc_state_t ARC_mru_ghost; 249ea8dc4b6Seschrock static arc_state_t ARC_mfu; 250ea8dc4b6Seschrock static arc_state_t ARC_mfu_ghost; 251fa94a07fSbrendan static arc_state_t ARC_l2c_only; 252fa9e4066Sahrens 25344cb6abcSbmc typedef struct arc_stats { 25444cb6abcSbmc kstat_named_t arcstat_hits; 25544cb6abcSbmc kstat_named_t arcstat_misses; 25644cb6abcSbmc kstat_named_t arcstat_demand_data_hits; 25744cb6abcSbmc kstat_named_t arcstat_demand_data_misses; 25844cb6abcSbmc kstat_named_t arcstat_demand_metadata_hits; 25944cb6abcSbmc kstat_named_t arcstat_demand_metadata_misses; 26044cb6abcSbmc kstat_named_t arcstat_prefetch_data_hits; 26144cb6abcSbmc kstat_named_t arcstat_prefetch_data_misses; 26244cb6abcSbmc kstat_named_t arcstat_prefetch_metadata_hits; 26344cb6abcSbmc kstat_named_t arcstat_prefetch_metadata_misses; 26444cb6abcSbmc kstat_named_t arcstat_mru_hits; 26544cb6abcSbmc kstat_named_t arcstat_mru_ghost_hits; 26644cb6abcSbmc kstat_named_t arcstat_mfu_hits; 26744cb6abcSbmc kstat_named_t arcstat_mfu_ghost_hits; 26844cb6abcSbmc kstat_named_t arcstat_deleted; 26944cb6abcSbmc kstat_named_t arcstat_recycle_miss; 2703e30c24aSWill Andrews /* 2713e30c24aSWill Andrews * Number of buffers that could not be evicted because the hash lock 2723e30c24aSWill Andrews * was held by another thread. The lock may not necessarily be held 2733e30c24aSWill Andrews * by something using the same buffer, since hash locks are shared 2743e30c24aSWill Andrews * by multiple buffers. 2753e30c24aSWill Andrews */ 27644cb6abcSbmc kstat_named_t arcstat_mutex_miss; 2773e30c24aSWill Andrews /* 2783e30c24aSWill Andrews * Number of buffers skipped because they have I/O in progress, are 2793e30c24aSWill Andrews * indrect prefetch buffers that have not lived long enough, or are 2803e30c24aSWill Andrews * not from the spa we're trying to evict from. 2813e30c24aSWill Andrews */ 28244cb6abcSbmc kstat_named_t arcstat_evict_skip; 2835ea40c06SBrendan Gregg - Sun Microsystems kstat_named_t arcstat_evict_l2_cached; 2845ea40c06SBrendan Gregg - Sun Microsystems kstat_named_t arcstat_evict_l2_eligible; 2855ea40c06SBrendan Gregg - Sun Microsystems kstat_named_t arcstat_evict_l2_ineligible; 28644cb6abcSbmc kstat_named_t arcstat_hash_elements; 28744cb6abcSbmc kstat_named_t arcstat_hash_elements_max; 28844cb6abcSbmc kstat_named_t arcstat_hash_collisions; 28944cb6abcSbmc kstat_named_t arcstat_hash_chains; 29044cb6abcSbmc kstat_named_t arcstat_hash_chain_max; 29144cb6abcSbmc kstat_named_t arcstat_p; 29244cb6abcSbmc kstat_named_t arcstat_c; 29344cb6abcSbmc kstat_named_t arcstat_c_min; 29444cb6abcSbmc kstat_named_t arcstat_c_max; 29544cb6abcSbmc kstat_named_t arcstat_size; 296fa94a07fSbrendan kstat_named_t arcstat_hdr_size; 2975a98e54bSBrendan Gregg - Sun Microsystems kstat_named_t arcstat_data_size; 2985a98e54bSBrendan Gregg - Sun Microsystems kstat_named_t arcstat_other_size; 299fa94a07fSbrendan kstat_named_t arcstat_l2_hits; 300fa94a07fSbrendan kstat_named_t arcstat_l2_misses; 301fa94a07fSbrendan kstat_named_t arcstat_l2_feeds; 302fa94a07fSbrendan kstat_named_t arcstat_l2_rw_clash; 3035a98e54bSBrendan Gregg - Sun Microsystems kstat_named_t arcstat_l2_read_bytes; 3045a98e54bSBrendan Gregg - Sun Microsystems kstat_named_t arcstat_l2_write_bytes; 305fa94a07fSbrendan kstat_named_t arcstat_l2_writes_sent; 306fa94a07fSbrendan kstat_named_t arcstat_l2_writes_done; 307fa94a07fSbrendan kstat_named_t arcstat_l2_writes_error; 308fa94a07fSbrendan kstat_named_t arcstat_l2_writes_hdr_miss; 309fa94a07fSbrendan kstat_named_t arcstat_l2_evict_lock_retry; 310fa94a07fSbrendan kstat_named_t arcstat_l2_evict_reading; 311fa94a07fSbrendan kstat_named_t arcstat_l2_free_on_write; 312fa94a07fSbrendan kstat_named_t arcstat_l2_abort_lowmem; 313fa94a07fSbrendan kstat_named_t arcstat_l2_cksum_bad; 314fa94a07fSbrendan kstat_named_t arcstat_l2_io_error; 315fa94a07fSbrendan kstat_named_t arcstat_l2_size; 316aad02571SSaso Kiselkov kstat_named_t arcstat_l2_asize; 317fa94a07fSbrendan kstat_named_t arcstat_l2_hdr_size; 318aad02571SSaso Kiselkov kstat_named_t arcstat_l2_compress_successes; 319aad02571SSaso Kiselkov kstat_named_t arcstat_l2_compress_zeros; 320aad02571SSaso Kiselkov kstat_named_t arcstat_l2_compress_failures; 3211ab7f2deSmaybee kstat_named_t arcstat_memory_throttle_count; 3229253d63dSGeorge Wilson kstat_named_t arcstat_duplicate_buffers; 3239253d63dSGeorge Wilson kstat_named_t arcstat_duplicate_buffers_size; 3249253d63dSGeorge Wilson kstat_named_t arcstat_duplicate_reads; 32520128a08SGeorge Wilson kstat_named_t arcstat_meta_used; 32620128a08SGeorge Wilson kstat_named_t arcstat_meta_limit; 32720128a08SGeorge Wilson kstat_named_t arcstat_meta_max; 3283a5286a1SMatthew Ahrens kstat_named_t arcstat_meta_min; 32944cb6abcSbmc } arc_stats_t; 33044cb6abcSbmc 33144cb6abcSbmc static arc_stats_t arc_stats = { 33244cb6abcSbmc { "hits", KSTAT_DATA_UINT64 }, 33344cb6abcSbmc { "misses", KSTAT_DATA_UINT64 }, 33444cb6abcSbmc { "demand_data_hits", KSTAT_DATA_UINT64 }, 33544cb6abcSbmc { "demand_data_misses", KSTAT_DATA_UINT64 }, 33644cb6abcSbmc { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 33744cb6abcSbmc { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 33844cb6abcSbmc { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 33944cb6abcSbmc { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 34044cb6abcSbmc { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 34144cb6abcSbmc { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 34244cb6abcSbmc { "mru_hits", KSTAT_DATA_UINT64 }, 34344cb6abcSbmc { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 34444cb6abcSbmc { "mfu_hits", KSTAT_DATA_UINT64 }, 34544cb6abcSbmc { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 34644cb6abcSbmc { "deleted", KSTAT_DATA_UINT64 }, 34744cb6abcSbmc { "recycle_miss", KSTAT_DATA_UINT64 }, 34844cb6abcSbmc { "mutex_miss", KSTAT_DATA_UINT64 }, 34944cb6abcSbmc { "evict_skip", KSTAT_DATA_UINT64 }, 3505ea40c06SBrendan Gregg - Sun Microsystems { "evict_l2_cached", KSTAT_DATA_UINT64 }, 3515ea40c06SBrendan Gregg - Sun Microsystems { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 3525ea40c06SBrendan Gregg - Sun Microsystems { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 35344cb6abcSbmc { "hash_elements", KSTAT_DATA_UINT64 }, 35444cb6abcSbmc { "hash_elements_max", KSTAT_DATA_UINT64 }, 35544cb6abcSbmc { "hash_collisions", KSTAT_DATA_UINT64 }, 35644cb6abcSbmc { "hash_chains", KSTAT_DATA_UINT64 }, 35744cb6abcSbmc { "hash_chain_max", KSTAT_DATA_UINT64 }, 35844cb6abcSbmc { "p", KSTAT_DATA_UINT64 }, 35944cb6abcSbmc { "c", KSTAT_DATA_UINT64 }, 36044cb6abcSbmc { "c_min", KSTAT_DATA_UINT64 }, 36144cb6abcSbmc { "c_max", KSTAT_DATA_UINT64 }, 362fa94a07fSbrendan { "size", KSTAT_DATA_UINT64 }, 363fa94a07fSbrendan { "hdr_size", KSTAT_DATA_UINT64 }, 3645a98e54bSBrendan Gregg - Sun Microsystems { "data_size", KSTAT_DATA_UINT64 }, 3655a98e54bSBrendan Gregg - Sun Microsystems { "other_size", KSTAT_DATA_UINT64 }, 366fa94a07fSbrendan { "l2_hits", KSTAT_DATA_UINT64 }, 367fa94a07fSbrendan { "l2_misses", KSTAT_DATA_UINT64 }, 368fa94a07fSbrendan { "l2_feeds", KSTAT_DATA_UINT64 }, 369fa94a07fSbrendan { "l2_rw_clash", KSTAT_DATA_UINT64 }, 3705a98e54bSBrendan Gregg - Sun Microsystems { "l2_read_bytes", KSTAT_DATA_UINT64 }, 3715a98e54bSBrendan Gregg - Sun Microsystems { "l2_write_bytes", KSTAT_DATA_UINT64 }, 372fa94a07fSbrendan { "l2_writes_sent", KSTAT_DATA_UINT64 }, 373fa94a07fSbrendan { "l2_writes_done", KSTAT_DATA_UINT64 }, 374fa94a07fSbrendan { "l2_writes_error", KSTAT_DATA_UINT64 }, 375fa94a07fSbrendan { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 376fa94a07fSbrendan { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 377fa94a07fSbrendan { "l2_evict_reading", KSTAT_DATA_UINT64 }, 378fa94a07fSbrendan { "l2_free_on_write", KSTAT_DATA_UINT64 }, 379fa94a07fSbrendan { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 380fa94a07fSbrendan { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 381fa94a07fSbrendan { "l2_io_error", KSTAT_DATA_UINT64 }, 382fa94a07fSbrendan { "l2_size", KSTAT_DATA_UINT64 }, 383aad02571SSaso Kiselkov { "l2_asize", KSTAT_DATA_UINT64 }, 3841ab7f2deSmaybee { "l2_hdr_size", KSTAT_DATA_UINT64 }, 385aad02571SSaso Kiselkov { "l2_compress_successes", KSTAT_DATA_UINT64 }, 386aad02571SSaso Kiselkov { "l2_compress_zeros", KSTAT_DATA_UINT64 }, 387aad02571SSaso Kiselkov { "l2_compress_failures", KSTAT_DATA_UINT64 }, 3889253d63dSGeorge Wilson { "memory_throttle_count", KSTAT_DATA_UINT64 }, 3899253d63dSGeorge Wilson { "duplicate_buffers", KSTAT_DATA_UINT64 }, 3909253d63dSGeorge Wilson { "duplicate_buffers_size", KSTAT_DATA_UINT64 }, 39120128a08SGeorge Wilson { "duplicate_reads", KSTAT_DATA_UINT64 }, 39220128a08SGeorge Wilson { "arc_meta_used", KSTAT_DATA_UINT64 }, 39320128a08SGeorge Wilson { "arc_meta_limit", KSTAT_DATA_UINT64 }, 3943a5286a1SMatthew Ahrens { "arc_meta_max", KSTAT_DATA_UINT64 }, 3953a5286a1SMatthew Ahrens { "arc_meta_min", KSTAT_DATA_UINT64 } 39644cb6abcSbmc }; 39744cb6abcSbmc 39844cb6abcSbmc #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 39944cb6abcSbmc 40044cb6abcSbmc #define ARCSTAT_INCR(stat, val) \ 401f7170741SWill Andrews atomic_add_64(&arc_stats.stat.value.ui64, (val)) 40244cb6abcSbmc 403b24ab676SJeff Bonwick #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 40444cb6abcSbmc #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 40544cb6abcSbmc 40644cb6abcSbmc #define ARCSTAT_MAX(stat, val) { \ 40744cb6abcSbmc uint64_t m; \ 40844cb6abcSbmc while ((val) > (m = arc_stats.stat.value.ui64) && \ 40944cb6abcSbmc (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 41044cb6abcSbmc continue; \ 41144cb6abcSbmc } 41244cb6abcSbmc 41344cb6abcSbmc #define ARCSTAT_MAXSTAT(stat) \ 41444cb6abcSbmc ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 41544cb6abcSbmc 41644cb6abcSbmc /* 41744cb6abcSbmc * We define a macro to allow ARC hits/misses to be easily broken down by 41844cb6abcSbmc * two separate conditions, giving a total of four different subtypes for 41944cb6abcSbmc * each of hits and misses (so eight statistics total). 42044cb6abcSbmc */ 42144cb6abcSbmc #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 42244cb6abcSbmc if (cond1) { \ 42344cb6abcSbmc if (cond2) { \ 42444cb6abcSbmc ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 42544cb6abcSbmc } else { \ 42644cb6abcSbmc ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 42744cb6abcSbmc } \ 42844cb6abcSbmc } else { \ 42944cb6abcSbmc if (cond2) { \ 43044cb6abcSbmc ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 43144cb6abcSbmc } else { \ 43244cb6abcSbmc ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 43344cb6abcSbmc } \ 43444cb6abcSbmc } 43544cb6abcSbmc 43644cb6abcSbmc kstat_t *arc_ksp; 437b24ab676SJeff Bonwick static arc_state_t *arc_anon; 43844cb6abcSbmc static arc_state_t *arc_mru; 43944cb6abcSbmc static arc_state_t *arc_mru_ghost; 44044cb6abcSbmc static arc_state_t *arc_mfu; 44144cb6abcSbmc static arc_state_t *arc_mfu_ghost; 442fa94a07fSbrendan static arc_state_t *arc_l2c_only; 44344cb6abcSbmc 44444cb6abcSbmc /* 44544cb6abcSbmc * There are several ARC variables that are critical to export as kstats -- 44644cb6abcSbmc * but we don't want to have to grovel around in the kstat whenever we wish to 44744cb6abcSbmc * manipulate them. For these variables, we therefore define them to be in 44844cb6abcSbmc * terms of the statistic variable. This assures that we are not introducing 44944cb6abcSbmc * the possibility of inconsistency by having shadow copies of the variables, 45044cb6abcSbmc * while still allowing the code to be readable. 45144cb6abcSbmc */ 45244cb6abcSbmc #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 45344cb6abcSbmc #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 45444cb6abcSbmc #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 45544cb6abcSbmc #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 45644cb6abcSbmc #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 45720128a08SGeorge Wilson #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ 4583a5286a1SMatthew Ahrens #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ 45920128a08SGeorge Wilson #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */ 46020128a08SGeorge Wilson #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ 46144cb6abcSbmc 462aad02571SSaso Kiselkov #define L2ARC_IS_VALID_COMPRESS(_c_) \ 463aad02571SSaso Kiselkov ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY) 464aad02571SSaso Kiselkov 46544cb6abcSbmc static int arc_no_grow; /* Don't try to grow cache size */ 46644cb6abcSbmc static uint64_t arc_tempreserve; 4672fdbea25SAleksandr Guzovskiy static uint64_t arc_loaned_bytes; 468fa9e4066Sahrens 469fa94a07fSbrendan typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 470fa94a07fSbrendan 471fa9e4066Sahrens typedef struct arc_callback arc_callback_t; 472fa9e4066Sahrens 473fa9e4066Sahrens struct arc_callback { 474fa9e4066Sahrens void *acb_private; 475c717a561Smaybee arc_done_func_t *acb_done; 476fa9e4066Sahrens arc_buf_t *acb_buf; 477fa9e4066Sahrens zio_t *acb_zio_dummy; 478fa9e4066Sahrens arc_callback_t *acb_next; 479fa9e4066Sahrens }; 480fa9e4066Sahrens 481c717a561Smaybee typedef struct arc_write_callback arc_write_callback_t; 482c717a561Smaybee 483c717a561Smaybee struct arc_write_callback { 484c717a561Smaybee void *awcb_private; 485c717a561Smaybee arc_done_func_t *awcb_ready; 48669962b56SMatthew Ahrens arc_done_func_t *awcb_physdone; 487c717a561Smaybee arc_done_func_t *awcb_done; 488c717a561Smaybee arc_buf_t *awcb_buf; 489c717a561Smaybee }; 490c717a561Smaybee 491fa9e4066Sahrens struct arc_buf_hdr { 492fa9e4066Sahrens /* protected by hash lock */ 493fa9e4066Sahrens dva_t b_dva; 494fa9e4066Sahrens uint64_t b_birth; 495fa9e4066Sahrens uint64_t b_cksum0; 496fa9e4066Sahrens 4976b4acc8bSahrens kmutex_t b_freeze_lock; 4986b4acc8bSahrens zio_cksum_t *b_freeze_cksum; 4993f9d6ad7SLin Ling void *b_thawed; 5006b4acc8bSahrens 501fa9e4066Sahrens arc_buf_hdr_t *b_hash_next; 502fa9e4066Sahrens arc_buf_t *b_buf; 503*7adb730bSGeorge Wilson arc_flags_t b_flags; 504ea8dc4b6Seschrock uint32_t b_datacnt; 505fa9e4066Sahrens 506fa9e4066Sahrens arc_callback_t *b_acb; 507ad23a2dbSjohansen kcondvar_t b_cv; 508ad23a2dbSjohansen 509ad23a2dbSjohansen /* immutable */ 510ad23a2dbSjohansen arc_buf_contents_t b_type; 511ad23a2dbSjohansen uint64_t b_size; 512ac05c741SMark Maybee uint64_t b_spa; 513fa9e4066Sahrens 514fa9e4066Sahrens /* protected by arc state mutex */ 515fa9e4066Sahrens arc_state_t *b_state; 516fa9e4066Sahrens list_node_t b_arc_node; 517fa9e4066Sahrens 518fa9e4066Sahrens /* updated atomically */ 519fa9e4066Sahrens clock_t b_arc_access; 520fa9e4066Sahrens 521fa9e4066Sahrens /* self protecting */ 522fa9e4066Sahrens refcount_t b_refcnt; 523fa94a07fSbrendan 524fa94a07fSbrendan l2arc_buf_hdr_t *b_l2hdr; 525fa94a07fSbrendan list_node_t b_l2node; 526fa9e4066Sahrens }; 527fa9e4066Sahrens 528ea8dc4b6Seschrock static arc_buf_t *arc_eviction_list; 529ea8dc4b6Seschrock static kmutex_t arc_eviction_mtx; 53040d7d650Smaybee static arc_buf_hdr_t arc_eviction_hdr; 5315ea40c06SBrendan Gregg - Sun Microsystems 532ea8dc4b6Seschrock #define GHOST_STATE(state) \ 533fa94a07fSbrendan ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 534fa94a07fSbrendan (state) == arc_l2c_only) 535ea8dc4b6Seschrock 536*7adb730bSGeorge Wilson #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) 537*7adb730bSGeorge Wilson #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) 538*7adb730bSGeorge Wilson #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) 539*7adb730bSGeorge Wilson #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) 540*7adb730bSGeorge Wilson #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FLAG_FREED_IN_READ) 541*7adb730bSGeorge Wilson #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE) 542*7adb730bSGeorge Wilson #define HDR_FREE_IN_PROGRESS(hdr) \ 543*7adb730bSGeorge Wilson ((hdr)->b_flags & ARC_FLAG_FREE_IN_PROGRESS) 544*7adb730bSGeorge Wilson #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) 545*7adb730bSGeorge Wilson #define HDR_L2_READING(hdr) \ 546*7adb730bSGeorge Wilson ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS && \ 547*7adb730bSGeorge Wilson (hdr)->b_l2hdr != NULL) 548*7adb730bSGeorge Wilson #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) 549*7adb730bSGeorge Wilson #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) 550*7adb730bSGeorge Wilson #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) 551fa9e4066Sahrens 552e6c728e1Sbrendan /* 553e6c728e1Sbrendan * Other sizes 554e6c728e1Sbrendan */ 555e6c728e1Sbrendan 556e6c728e1Sbrendan #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 557e6c728e1Sbrendan #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 558e6c728e1Sbrendan 559fa9e4066Sahrens /* 560fa9e4066Sahrens * Hash table routines 561fa9e4066Sahrens */ 562fa9e4066Sahrens 563fa9e4066Sahrens #define HT_LOCK_PAD 64 564fa9e4066Sahrens 565fa9e4066Sahrens struct ht_lock { 566fa9e4066Sahrens kmutex_t ht_lock; 567fa9e4066Sahrens #ifdef _KERNEL 568fa9e4066Sahrens unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 569fa9e4066Sahrens #endif 570fa9e4066Sahrens }; 571fa9e4066Sahrens 572fa9e4066Sahrens #define BUF_LOCKS 256 573fa9e4066Sahrens typedef struct buf_hash_table { 574fa9e4066Sahrens uint64_t ht_mask; 575fa9e4066Sahrens arc_buf_hdr_t **ht_table; 576fa9e4066Sahrens struct ht_lock ht_locks[BUF_LOCKS]; 577fa9e4066Sahrens } buf_hash_table_t; 578fa9e4066Sahrens 579fa9e4066Sahrens static buf_hash_table_t buf_hash_table; 580fa9e4066Sahrens 581fa9e4066Sahrens #define BUF_HASH_INDEX(spa, dva, birth) \ 582fa9e4066Sahrens (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 583fa9e4066Sahrens #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 584fa9e4066Sahrens #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 5853f9d6ad7SLin Ling #define HDR_LOCK(hdr) \ 5863f9d6ad7SLin Ling (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 587fa9e4066Sahrens 588fa9e4066Sahrens uint64_t zfs_crc64_table[256]; 589fa9e4066Sahrens 590fa94a07fSbrendan /* 591fa94a07fSbrendan * Level 2 ARC 592fa94a07fSbrendan */ 593fa94a07fSbrendan 594fa94a07fSbrendan #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 595aad02571SSaso Kiselkov #define L2ARC_HEADROOM 2 /* num of writes */ 596aad02571SSaso Kiselkov /* 597aad02571SSaso Kiselkov * If we discover during ARC scan any buffers to be compressed, we boost 598aad02571SSaso Kiselkov * our headroom for the next scanning cycle by this percentage multiple. 599aad02571SSaso Kiselkov */ 600aad02571SSaso Kiselkov #define L2ARC_HEADROOM_BOOST 200 6015a98e54bSBrendan Gregg - Sun Microsystems #define L2ARC_FEED_SECS 1 /* caching interval secs */ 6025a98e54bSBrendan Gregg - Sun Microsystems #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 603fa94a07fSbrendan 604fa94a07fSbrendan #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 605fa94a07fSbrendan #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 606fa94a07fSbrendan 607f7170741SWill Andrews /* L2ARC Performance Tunables */ 608fa94a07fSbrendan uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 6093a737e0dSbrendan uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 610fa94a07fSbrendan uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 611aad02571SSaso Kiselkov uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; 612fa94a07fSbrendan uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 6135a98e54bSBrendan Gregg - Sun Microsystems uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 614fa94a07fSbrendan boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 6155a98e54bSBrendan Gregg - Sun Microsystems boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 6165a98e54bSBrendan Gregg - Sun Microsystems boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 617fa94a07fSbrendan 618fa94a07fSbrendan /* 619fa94a07fSbrendan * L2ARC Internals 620fa94a07fSbrendan */ 621fa94a07fSbrendan typedef struct l2arc_dev { 622fa94a07fSbrendan vdev_t *l2ad_vdev; /* vdev */ 623fa94a07fSbrendan spa_t *l2ad_spa; /* spa */ 624fa94a07fSbrendan uint64_t l2ad_hand; /* next write location */ 625fa94a07fSbrendan uint64_t l2ad_start; /* first addr on device */ 626fa94a07fSbrendan uint64_t l2ad_end; /* last addr on device */ 627fa94a07fSbrendan uint64_t l2ad_evict; /* last addr eviction reached */ 628fa94a07fSbrendan boolean_t l2ad_first; /* first sweep through */ 6295a98e54bSBrendan Gregg - Sun Microsystems boolean_t l2ad_writing; /* currently writing */ 630fa94a07fSbrendan list_t *l2ad_buflist; /* buffer list */ 631fa94a07fSbrendan list_node_t l2ad_node; /* device list node */ 632fa94a07fSbrendan } l2arc_dev_t; 633fa94a07fSbrendan 634fa94a07fSbrendan static list_t L2ARC_dev_list; /* device list */ 635fa94a07fSbrendan static list_t *l2arc_dev_list; /* device list pointer */ 636fa94a07fSbrendan static kmutex_t l2arc_dev_mtx; /* device list mutex */ 637fa94a07fSbrendan static l2arc_dev_t *l2arc_dev_last; /* last device used */ 638fa94a07fSbrendan static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 639fa94a07fSbrendan static list_t L2ARC_free_on_write; /* free after write buf list */ 640fa94a07fSbrendan static list_t *l2arc_free_on_write; /* free after write list ptr */ 641fa94a07fSbrendan static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 642fa94a07fSbrendan static uint64_t l2arc_ndev; /* number of devices */ 643fa94a07fSbrendan 644fa94a07fSbrendan typedef struct l2arc_read_callback { 645aad02571SSaso Kiselkov arc_buf_t *l2rcb_buf; /* read buffer */ 646aad02571SSaso Kiselkov spa_t *l2rcb_spa; /* spa */ 647aad02571SSaso Kiselkov blkptr_t l2rcb_bp; /* original blkptr */ 6487802d7bfSMatthew Ahrens zbookmark_phys_t l2rcb_zb; /* original bookmark */ 649aad02571SSaso Kiselkov int l2rcb_flags; /* original flags */ 650aad02571SSaso Kiselkov enum zio_compress l2rcb_compress; /* applied compress */ 651fa94a07fSbrendan } l2arc_read_callback_t; 652fa94a07fSbrendan 653fa94a07fSbrendan typedef struct l2arc_write_callback { 654fa94a07fSbrendan l2arc_dev_t *l2wcb_dev; /* device info */ 655fa94a07fSbrendan arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 656fa94a07fSbrendan } l2arc_write_callback_t; 657fa94a07fSbrendan 658fa94a07fSbrendan struct l2arc_buf_hdr { 659fa94a07fSbrendan /* protected by arc_buf_hdr mutex */ 660aad02571SSaso Kiselkov l2arc_dev_t *b_dev; /* L2ARC device */ 661aad02571SSaso Kiselkov uint64_t b_daddr; /* disk address, offset byte */ 662aad02571SSaso Kiselkov /* compression applied to buffer data */ 663aad02571SSaso Kiselkov enum zio_compress b_compress; 664aad02571SSaso Kiselkov /* real alloc'd buffer size depending on b_compress applied */ 665aad02571SSaso Kiselkov int b_asize; 666aad02571SSaso Kiselkov /* temporary buffer holder for in-flight compressed data */ 667aad02571SSaso Kiselkov void *b_tmp_cdata; 668fa94a07fSbrendan }; 669fa94a07fSbrendan 670fa94a07fSbrendan typedef struct l2arc_data_free { 671fa94a07fSbrendan /* protected by l2arc_free_on_write_mtx */ 672fa94a07fSbrendan void *l2df_data; 673fa94a07fSbrendan size_t l2df_size; 674fa94a07fSbrendan void (*l2df_func)(void *, size_t); 675fa94a07fSbrendan list_node_t l2df_list_node; 676fa94a07fSbrendan } l2arc_data_free_t; 677fa94a07fSbrendan 678fa94a07fSbrendan static kmutex_t l2arc_feed_thr_lock; 679fa94a07fSbrendan static kcondvar_t l2arc_feed_thr_cv; 680fa94a07fSbrendan static uint8_t l2arc_thread_exit; 681fa94a07fSbrendan 682*7adb730bSGeorge Wilson static void arc_get_data_buf(arc_buf_t *); 683*7adb730bSGeorge Wilson static void arc_access(arc_buf_hdr_t *, kmutex_t *); 684*7adb730bSGeorge Wilson static int arc_evict_needed(arc_buf_contents_t); 685*7adb730bSGeorge Wilson static void arc_evict_ghost(arc_state_t *, uint64_t, int64_t); 686*7adb730bSGeorge Wilson static void arc_buf_watch(arc_buf_t *); 687*7adb730bSGeorge Wilson 688*7adb730bSGeorge Wilson static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); 689*7adb730bSGeorge Wilson static void l2arc_read_done(zio_t *); 690fa94a07fSbrendan static void l2arc_hdr_stat_add(void); 691fa94a07fSbrendan static void l2arc_hdr_stat_remove(void); 692fa94a07fSbrendan 693*7adb730bSGeorge Wilson static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *); 694*7adb730bSGeorge Wilson static void l2arc_decompress_zio(zio_t *, arc_buf_hdr_t *, enum zio_compress); 695*7adb730bSGeorge Wilson static void l2arc_release_cdata_buf(arc_buf_hdr_t *); 696aad02571SSaso Kiselkov 697fa9e4066Sahrens static uint64_t 698ac05c741SMark Maybee buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 699fa9e4066Sahrens { 700fa9e4066Sahrens uint8_t *vdva = (uint8_t *)dva; 701fa9e4066Sahrens uint64_t crc = -1ULL; 702fa9e4066Sahrens int i; 703fa9e4066Sahrens 704fa9e4066Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 705fa9e4066Sahrens 706fa9e4066Sahrens for (i = 0; i < sizeof (dva_t); i++) 707fa9e4066Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 708fa9e4066Sahrens 709ac05c741SMark Maybee crc ^= (spa>>8) ^ birth; 710fa9e4066Sahrens 711fa9e4066Sahrens return (crc); 712fa9e4066Sahrens } 713fa9e4066Sahrens 714fa9e4066Sahrens #define BUF_EMPTY(buf) \ 715fa9e4066Sahrens ((buf)->b_dva.dva_word[0] == 0 && \ 716fa9e4066Sahrens (buf)->b_dva.dva_word[1] == 0 && \ 71743466aaeSMax Grossman (buf)->b_cksum0 == 0) 718fa9e4066Sahrens 719fa9e4066Sahrens #define BUF_EQUAL(spa, dva, birth, buf) \ 720fa9e4066Sahrens ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 721fa9e4066Sahrens ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 722fa9e4066Sahrens ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 723fa9e4066Sahrens 7243f9d6ad7SLin Ling static void 7253f9d6ad7SLin Ling buf_discard_identity(arc_buf_hdr_t *hdr) 7263f9d6ad7SLin Ling { 7273f9d6ad7SLin Ling hdr->b_dva.dva_word[0] = 0; 7283f9d6ad7SLin Ling hdr->b_dva.dva_word[1] = 0; 7293f9d6ad7SLin Ling hdr->b_birth = 0; 7303f9d6ad7SLin Ling hdr->b_cksum0 = 0; 7313f9d6ad7SLin Ling } 7323f9d6ad7SLin Ling 733fa9e4066Sahrens static arc_buf_hdr_t * 7345d7b4d43SMatthew Ahrens buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) 735fa9e4066Sahrens { 7365d7b4d43SMatthew Ahrens const dva_t *dva = BP_IDENTITY(bp); 7375d7b4d43SMatthew Ahrens uint64_t birth = BP_PHYSICAL_BIRTH(bp); 738fa9e4066Sahrens uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 739fa9e4066Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 740*7adb730bSGeorge Wilson arc_buf_hdr_t *hdr; 741fa9e4066Sahrens 742fa9e4066Sahrens mutex_enter(hash_lock); 743*7adb730bSGeorge Wilson for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; 744*7adb730bSGeorge Wilson hdr = hdr->b_hash_next) { 745*7adb730bSGeorge Wilson if (BUF_EQUAL(spa, dva, birth, hdr)) { 746fa9e4066Sahrens *lockp = hash_lock; 747*7adb730bSGeorge Wilson return (hdr); 748fa9e4066Sahrens } 749fa9e4066Sahrens } 750fa9e4066Sahrens mutex_exit(hash_lock); 751fa9e4066Sahrens *lockp = NULL; 752fa9e4066Sahrens return (NULL); 753fa9e4066Sahrens } 754fa9e4066Sahrens 755fa9e4066Sahrens /* 756fa9e4066Sahrens * Insert an entry into the hash table. If there is already an element 757fa9e4066Sahrens * equal to elem in the hash table, then the already existing element 758fa9e4066Sahrens * will be returned and the new element will not be inserted. 759fa9e4066Sahrens * Otherwise returns NULL. 760fa9e4066Sahrens */ 761fa9e4066Sahrens static arc_buf_hdr_t * 762*7adb730bSGeorge Wilson buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) 763fa9e4066Sahrens { 764*7adb730bSGeorge Wilson uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 765fa9e4066Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 766*7adb730bSGeorge Wilson arc_buf_hdr_t *fhdr; 76744cb6abcSbmc uint32_t i; 768fa9e4066Sahrens 769*7adb730bSGeorge Wilson ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); 770*7adb730bSGeorge Wilson ASSERT(hdr->b_birth != 0); 771*7adb730bSGeorge Wilson ASSERT(!HDR_IN_HASH_TABLE(hdr)); 772fa9e4066Sahrens *lockp = hash_lock; 773fa9e4066Sahrens mutex_enter(hash_lock); 774*7adb730bSGeorge Wilson for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; 775*7adb730bSGeorge Wilson fhdr = fhdr->b_hash_next, i++) { 776*7adb730bSGeorge Wilson if (BUF_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) 777*7adb730bSGeorge Wilson return (fhdr); 778fa9e4066Sahrens } 779fa9e4066Sahrens 780*7adb730bSGeorge Wilson hdr->b_hash_next = buf_hash_table.ht_table[idx]; 781*7adb730bSGeorge Wilson buf_hash_table.ht_table[idx] = hdr; 782*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE; 783fa9e4066Sahrens 784fa9e4066Sahrens /* collect some hash table performance data */ 785fa9e4066Sahrens if (i > 0) { 78644cb6abcSbmc ARCSTAT_BUMP(arcstat_hash_collisions); 787fa9e4066Sahrens if (i == 1) 78844cb6abcSbmc ARCSTAT_BUMP(arcstat_hash_chains); 78944cb6abcSbmc 79044cb6abcSbmc ARCSTAT_MAX(arcstat_hash_chain_max, i); 791fa9e4066Sahrens } 79244cb6abcSbmc 79344cb6abcSbmc ARCSTAT_BUMP(arcstat_hash_elements); 79444cb6abcSbmc ARCSTAT_MAXSTAT(arcstat_hash_elements); 795fa9e4066Sahrens 796fa9e4066Sahrens return (NULL); 797fa9e4066Sahrens } 798fa9e4066Sahrens 799fa9e4066Sahrens static void 800*7adb730bSGeorge Wilson buf_hash_remove(arc_buf_hdr_t *hdr) 801fa9e4066Sahrens { 802*7adb730bSGeorge Wilson arc_buf_hdr_t *fhdr, **hdrp; 803*7adb730bSGeorge Wilson uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 804fa9e4066Sahrens 805fa9e4066Sahrens ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 806*7adb730bSGeorge Wilson ASSERT(HDR_IN_HASH_TABLE(hdr)); 807fa9e4066Sahrens 808*7adb730bSGeorge Wilson hdrp = &buf_hash_table.ht_table[idx]; 809*7adb730bSGeorge Wilson while ((fhdr = *hdrp) != hdr) { 810*7adb730bSGeorge Wilson ASSERT(fhdr != NULL); 811*7adb730bSGeorge Wilson hdrp = &fhdr->b_hash_next; 812fa9e4066Sahrens } 813*7adb730bSGeorge Wilson *hdrp = hdr->b_hash_next; 814*7adb730bSGeorge Wilson hdr->b_hash_next = NULL; 815*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_IN_HASH_TABLE; 816fa9e4066Sahrens 817fa9e4066Sahrens /* collect some hash table performance data */ 81844cb6abcSbmc ARCSTAT_BUMPDOWN(arcstat_hash_elements); 81944cb6abcSbmc 820fa9e4066Sahrens if (buf_hash_table.ht_table[idx] && 821fa9e4066Sahrens buf_hash_table.ht_table[idx]->b_hash_next == NULL) 82244cb6abcSbmc ARCSTAT_BUMPDOWN(arcstat_hash_chains); 823fa9e4066Sahrens } 824fa9e4066Sahrens 825fa9e4066Sahrens /* 826fa9e4066Sahrens * Global data structures and functions for the buf kmem cache. 827fa9e4066Sahrens */ 828fa9e4066Sahrens static kmem_cache_t *hdr_cache; 829fa9e4066Sahrens static kmem_cache_t *buf_cache; 830fa9e4066Sahrens 831fa9e4066Sahrens static void 832fa9e4066Sahrens buf_fini(void) 833fa9e4066Sahrens { 834fa9e4066Sahrens int i; 835fa9e4066Sahrens 836fa9e4066Sahrens kmem_free(buf_hash_table.ht_table, 837fa9e4066Sahrens (buf_hash_table.ht_mask + 1) * sizeof (void *)); 838fa9e4066Sahrens for (i = 0; i < BUF_LOCKS; i++) 839fa9e4066Sahrens mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 840fa9e4066Sahrens kmem_cache_destroy(hdr_cache); 841fa9e4066Sahrens kmem_cache_destroy(buf_cache); 842fa9e4066Sahrens } 843fa9e4066Sahrens 844fa9e4066Sahrens /* 845fa9e4066Sahrens * Constructor callback - called when the cache is empty 846fa9e4066Sahrens * and a new buf is requested. 847fa9e4066Sahrens */ 848fa9e4066Sahrens /* ARGSUSED */ 849fa9e4066Sahrens static int 850fa9e4066Sahrens hdr_cons(void *vbuf, void *unused, int kmflag) 851fa9e4066Sahrens { 852*7adb730bSGeorge Wilson arc_buf_hdr_t *hdr = vbuf; 853fa9e4066Sahrens 854*7adb730bSGeorge Wilson bzero(hdr, sizeof (arc_buf_hdr_t)); 855*7adb730bSGeorge Wilson refcount_create(&hdr->b_refcnt); 856*7adb730bSGeorge Wilson cv_init(&hdr->b_cv, NULL, CV_DEFAULT, NULL); 857*7adb730bSGeorge Wilson mutex_init(&hdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 8585a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 859fa94a07fSbrendan 860fa9e4066Sahrens return (0); 861fa9e4066Sahrens } 862fa9e4066Sahrens 8636f83844dSMark Maybee /* ARGSUSED */ 8646f83844dSMark Maybee static int 8656f83844dSMark Maybee buf_cons(void *vbuf, void *unused, int kmflag) 8666f83844dSMark Maybee { 8676f83844dSMark Maybee arc_buf_t *buf = vbuf; 8686f83844dSMark Maybee 8696f83844dSMark Maybee bzero(buf, sizeof (arc_buf_t)); 8703f9d6ad7SLin Ling mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 8715a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 8725a98e54bSBrendan Gregg - Sun Microsystems 8736f83844dSMark Maybee return (0); 8746f83844dSMark Maybee } 8756f83844dSMark Maybee 876fa9e4066Sahrens /* 877fa9e4066Sahrens * Destructor callback - called when a cached buf is 878fa9e4066Sahrens * no longer required. 879fa9e4066Sahrens */ 880fa9e4066Sahrens /* ARGSUSED */ 881fa9e4066Sahrens static void 882fa9e4066Sahrens hdr_dest(void *vbuf, void *unused) 883fa9e4066Sahrens { 884*7adb730bSGeorge Wilson arc_buf_hdr_t *hdr = vbuf; 885fa9e4066Sahrens 886*7adb730bSGeorge Wilson ASSERT(BUF_EMPTY(hdr)); 887*7adb730bSGeorge Wilson refcount_destroy(&hdr->b_refcnt); 888*7adb730bSGeorge Wilson cv_destroy(&hdr->b_cv); 889*7adb730bSGeorge Wilson mutex_destroy(&hdr->b_freeze_lock); 8905a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 891fa9e4066Sahrens } 892fa9e4066Sahrens 8936f83844dSMark Maybee /* ARGSUSED */ 8946f83844dSMark Maybee static void 8956f83844dSMark Maybee buf_dest(void *vbuf, void *unused) 8966f83844dSMark Maybee { 8976f83844dSMark Maybee arc_buf_t *buf = vbuf; 8986f83844dSMark Maybee 8993f9d6ad7SLin Ling mutex_destroy(&buf->b_evict_lock); 9005a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 9016f83844dSMark Maybee } 9026f83844dSMark Maybee 903fa9e4066Sahrens /* 904fa9e4066Sahrens * Reclaim callback -- invoked when memory is low. 905fa9e4066Sahrens */ 906fa9e4066Sahrens /* ARGSUSED */ 907fa9e4066Sahrens static void 908fa9e4066Sahrens hdr_recl(void *unused) 909fa9e4066Sahrens { 910fa9e4066Sahrens dprintf("hdr_recl called\n"); 91149e3519aSmaybee /* 91249e3519aSmaybee * umem calls the reclaim func when we destroy the buf cache, 91349e3519aSmaybee * which is after we do arc_fini(). 91449e3519aSmaybee */ 91549e3519aSmaybee if (!arc_dead) 91649e3519aSmaybee cv_signal(&arc_reclaim_thr_cv); 917fa9e4066Sahrens } 918fa9e4066Sahrens 919fa9e4066Sahrens static void 920fa9e4066Sahrens buf_init(void) 921fa9e4066Sahrens { 922fa9e4066Sahrens uint64_t *ct; 923ea8dc4b6Seschrock uint64_t hsize = 1ULL << 12; 924fa9e4066Sahrens int i, j; 925fa9e4066Sahrens 926fa9e4066Sahrens /* 927fa9e4066Sahrens * The hash table is big enough to fill all of physical memory 92863e911b6SMatthew Ahrens * with an average block size of zfs_arc_average_blocksize (default 8K). 92963e911b6SMatthew Ahrens * By default, the table will take up 93063e911b6SMatthew Ahrens * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 931fa9e4066Sahrens */ 93263e911b6SMatthew Ahrens while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) 933fa9e4066Sahrens hsize <<= 1; 934ea8dc4b6Seschrock retry: 935fa9e4066Sahrens buf_hash_table.ht_mask = hsize - 1; 936ea8dc4b6Seschrock buf_hash_table.ht_table = 937ea8dc4b6Seschrock kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 938ea8dc4b6Seschrock if (buf_hash_table.ht_table == NULL) { 939ea8dc4b6Seschrock ASSERT(hsize > (1ULL << 8)); 940ea8dc4b6Seschrock hsize >>= 1; 941ea8dc4b6Seschrock goto retry; 942ea8dc4b6Seschrock } 943fa9e4066Sahrens 944fa9e4066Sahrens hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 945fa9e4066Sahrens 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 946fa9e4066Sahrens buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 9476f83844dSMark Maybee 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 948fa9e4066Sahrens 949fa9e4066Sahrens for (i = 0; i < 256; i++) 950fa9e4066Sahrens for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 951fa9e4066Sahrens *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 952fa9e4066Sahrens 953fa9e4066Sahrens for (i = 0; i < BUF_LOCKS; i++) { 954fa9e4066Sahrens mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 955fa9e4066Sahrens NULL, MUTEX_DEFAULT, NULL); 956fa9e4066Sahrens } 957fa9e4066Sahrens } 958fa9e4066Sahrens 959fa9e4066Sahrens #define ARC_MINTIME (hz>>4) /* 62 ms */ 960fa9e4066Sahrens 9616b4acc8bSahrens static void 9626b4acc8bSahrens arc_cksum_verify(arc_buf_t *buf) 9636b4acc8bSahrens { 9646b4acc8bSahrens zio_cksum_t zc; 9656b4acc8bSahrens 966cc60fd72Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 9676b4acc8bSahrens return; 9686b4acc8bSahrens 9696b4acc8bSahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 9703ccfa83cSahrens if (buf->b_hdr->b_freeze_cksum == NULL || 971*7adb730bSGeorge Wilson (buf->b_hdr->b_flags & ARC_FLAG_IO_ERROR)) { 9726b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 9736b4acc8bSahrens return; 9746b4acc8bSahrens } 9756b4acc8bSahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 9766b4acc8bSahrens if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 9776b4acc8bSahrens panic("buffer modified while frozen!"); 9786b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 9796b4acc8bSahrens } 9806b4acc8bSahrens 981fa94a07fSbrendan static int 982fa94a07fSbrendan arc_cksum_equal(arc_buf_t *buf) 983fa94a07fSbrendan { 984fa94a07fSbrendan zio_cksum_t zc; 985fa94a07fSbrendan int equal; 986fa94a07fSbrendan 987fa94a07fSbrendan mutex_enter(&buf->b_hdr->b_freeze_lock); 988fa94a07fSbrendan fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 989fa94a07fSbrendan equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 990fa94a07fSbrendan mutex_exit(&buf->b_hdr->b_freeze_lock); 991fa94a07fSbrendan 992fa94a07fSbrendan return (equal); 993fa94a07fSbrendan } 994fa94a07fSbrendan 9956b4acc8bSahrens static void 996fa94a07fSbrendan arc_cksum_compute(arc_buf_t *buf, boolean_t force) 9976b4acc8bSahrens { 998fa94a07fSbrendan if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 9996b4acc8bSahrens return; 10006b4acc8bSahrens 10016b4acc8bSahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 10026b4acc8bSahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 10036b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 10046b4acc8bSahrens return; 10056b4acc8bSahrens } 10066b4acc8bSahrens buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 10076b4acc8bSahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 10086b4acc8bSahrens buf->b_hdr->b_freeze_cksum); 10096b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 1010cd1c8b85SMatthew Ahrens arc_buf_watch(buf); 1011cd1c8b85SMatthew Ahrens } 1012cd1c8b85SMatthew Ahrens 1013cd1c8b85SMatthew Ahrens #ifndef _KERNEL 1014cd1c8b85SMatthew Ahrens typedef struct procctl { 1015cd1c8b85SMatthew Ahrens long cmd; 1016cd1c8b85SMatthew Ahrens prwatch_t prwatch; 1017cd1c8b85SMatthew Ahrens } procctl_t; 1018cd1c8b85SMatthew Ahrens #endif 1019cd1c8b85SMatthew Ahrens 1020cd1c8b85SMatthew Ahrens /* ARGSUSED */ 1021cd1c8b85SMatthew Ahrens static void 1022cd1c8b85SMatthew Ahrens arc_buf_unwatch(arc_buf_t *buf) 1023cd1c8b85SMatthew Ahrens { 1024cd1c8b85SMatthew Ahrens #ifndef _KERNEL 1025cd1c8b85SMatthew Ahrens if (arc_watch) { 1026cd1c8b85SMatthew Ahrens int result; 1027cd1c8b85SMatthew Ahrens procctl_t ctl; 1028cd1c8b85SMatthew Ahrens ctl.cmd = PCWATCH; 1029cd1c8b85SMatthew Ahrens ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1030cd1c8b85SMatthew Ahrens ctl.prwatch.pr_size = 0; 1031cd1c8b85SMatthew Ahrens ctl.prwatch.pr_wflags = 0; 1032cd1c8b85SMatthew Ahrens result = write(arc_procfd, &ctl, sizeof (ctl)); 1033cd1c8b85SMatthew Ahrens ASSERT3U(result, ==, sizeof (ctl)); 1034cd1c8b85SMatthew Ahrens } 1035cd1c8b85SMatthew Ahrens #endif 1036cd1c8b85SMatthew Ahrens } 1037cd1c8b85SMatthew Ahrens 1038cd1c8b85SMatthew Ahrens /* ARGSUSED */ 1039cd1c8b85SMatthew Ahrens static void 1040cd1c8b85SMatthew Ahrens arc_buf_watch(arc_buf_t *buf) 1041cd1c8b85SMatthew Ahrens { 1042cd1c8b85SMatthew Ahrens #ifndef _KERNEL 1043cd1c8b85SMatthew Ahrens if (arc_watch) { 1044cd1c8b85SMatthew Ahrens int result; 1045cd1c8b85SMatthew Ahrens procctl_t ctl; 1046cd1c8b85SMatthew Ahrens ctl.cmd = PCWATCH; 1047cd1c8b85SMatthew Ahrens ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1048cd1c8b85SMatthew Ahrens ctl.prwatch.pr_size = buf->b_hdr->b_size; 1049cd1c8b85SMatthew Ahrens ctl.prwatch.pr_wflags = WA_WRITE; 1050cd1c8b85SMatthew Ahrens result = write(arc_procfd, &ctl, sizeof (ctl)); 1051cd1c8b85SMatthew Ahrens ASSERT3U(result, ==, sizeof (ctl)); 1052cd1c8b85SMatthew Ahrens } 1053cd1c8b85SMatthew Ahrens #endif 10546b4acc8bSahrens } 10556b4acc8bSahrens 10566b4acc8bSahrens void 10576b4acc8bSahrens arc_buf_thaw(arc_buf_t *buf) 10586b4acc8bSahrens { 1059fa94a07fSbrendan if (zfs_flags & ZFS_DEBUG_MODIFY) { 1060fa94a07fSbrendan if (buf->b_hdr->b_state != arc_anon) 1061fa94a07fSbrendan panic("modifying non-anon buffer!"); 1062*7adb730bSGeorge Wilson if (buf->b_hdr->b_flags & ARC_FLAG_IO_IN_PROGRESS) 1063fa94a07fSbrendan panic("modifying buffer while i/o in progress!"); 1064fa94a07fSbrendan arc_cksum_verify(buf); 1065fa94a07fSbrendan } 10666b4acc8bSahrens 10676b4acc8bSahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 10686b4acc8bSahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 10696b4acc8bSahrens kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 10706b4acc8bSahrens buf->b_hdr->b_freeze_cksum = NULL; 10716b4acc8bSahrens } 10723f9d6ad7SLin Ling 10733f9d6ad7SLin Ling if (zfs_flags & ZFS_DEBUG_MODIFY) { 10743f9d6ad7SLin Ling if (buf->b_hdr->b_thawed) 10753f9d6ad7SLin Ling kmem_free(buf->b_hdr->b_thawed, 1); 10763f9d6ad7SLin Ling buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 10773f9d6ad7SLin Ling } 10783f9d6ad7SLin Ling 10796b4acc8bSahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 1080cd1c8b85SMatthew Ahrens 1081cd1c8b85SMatthew Ahrens arc_buf_unwatch(buf); 10826b4acc8bSahrens } 10836b4acc8bSahrens 10846b4acc8bSahrens void 10856b4acc8bSahrens arc_buf_freeze(arc_buf_t *buf) 10866b4acc8bSahrens { 10873f9d6ad7SLin Ling kmutex_t *hash_lock; 10883f9d6ad7SLin Ling 1089cc60fd72Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1090cc60fd72Sahrens return; 1091cc60fd72Sahrens 10923f9d6ad7SLin Ling hash_lock = HDR_LOCK(buf->b_hdr); 10933f9d6ad7SLin Ling mutex_enter(hash_lock); 10943f9d6ad7SLin Ling 10956b4acc8bSahrens ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 109644cb6abcSbmc buf->b_hdr->b_state == arc_anon); 1097fa94a07fSbrendan arc_cksum_compute(buf, B_FALSE); 10983f9d6ad7SLin Ling mutex_exit(hash_lock); 1099cd1c8b85SMatthew Ahrens 11006b4acc8bSahrens } 11016b4acc8bSahrens 1102fa9e4066Sahrens static void 1103*7adb730bSGeorge Wilson add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) 1104fa9e4066Sahrens { 1105fa9e4066Sahrens ASSERT(MUTEX_HELD(hash_lock)); 1106fa9e4066Sahrens 1107*7adb730bSGeorge Wilson if ((refcount_add(&hdr->b_refcnt, tag) == 1) && 1108*7adb730bSGeorge Wilson (hdr->b_state != arc_anon)) { 1109*7adb730bSGeorge Wilson uint64_t delta = hdr->b_size * hdr->b_datacnt; 1110*7adb730bSGeorge Wilson list_t *list = &hdr->b_state->arcs_list[hdr->b_type]; 1111*7adb730bSGeorge Wilson uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 1112*7adb730bSGeorge Wilson 1113*7adb730bSGeorge Wilson ASSERT(!MUTEX_HELD(&hdr->b_state->arcs_mtx)); 1114*7adb730bSGeorge Wilson mutex_enter(&hdr->b_state->arcs_mtx); 1115*7adb730bSGeorge Wilson ASSERT(list_link_active(&hdr->b_arc_node)); 1116*7adb730bSGeorge Wilson list_remove(list, hdr); 1117*7adb730bSGeorge Wilson if (GHOST_STATE(hdr->b_state)) { 1118*7adb730bSGeorge Wilson ASSERT0(hdr->b_datacnt); 1119*7adb730bSGeorge Wilson ASSERT3P(hdr->b_buf, ==, NULL); 1120*7adb730bSGeorge Wilson delta = hdr->b_size; 1121ea8dc4b6Seschrock } 1122ea8dc4b6Seschrock ASSERT(delta > 0); 11230e8c6158Smaybee ASSERT3U(*size, >=, delta); 11240e8c6158Smaybee atomic_add_64(size, -delta); 1125*7adb730bSGeorge Wilson mutex_exit(&hdr->b_state->arcs_mtx); 1126088f3894Sahrens /* remove the prefetch flag if we get a reference */ 1127*7adb730bSGeorge Wilson if (hdr->b_flags & ARC_FLAG_PREFETCH) 1128*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_PREFETCH; 1129fa9e4066Sahrens } 1130fa9e4066Sahrens } 1131fa9e4066Sahrens 1132fa9e4066Sahrens static int 1133*7adb730bSGeorge Wilson remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) 1134fa9e4066Sahrens { 1135fa9e4066Sahrens int cnt; 1136*7adb730bSGeorge Wilson arc_state_t *state = hdr->b_state; 1137fa9e4066Sahrens 113844cb6abcSbmc ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 113944cb6abcSbmc ASSERT(!GHOST_STATE(state)); 1140fa9e4066Sahrens 1141*7adb730bSGeorge Wilson if (((cnt = refcount_remove(&hdr->b_refcnt, tag)) == 0) && 114244cb6abcSbmc (state != arc_anon)) { 1143*7adb730bSGeorge Wilson uint64_t *size = &state->arcs_lsize[hdr->b_type]; 11440e8c6158Smaybee 114544cb6abcSbmc ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 114644cb6abcSbmc mutex_enter(&state->arcs_mtx); 1147*7adb730bSGeorge Wilson ASSERT(!list_link_active(&hdr->b_arc_node)); 1148*7adb730bSGeorge Wilson list_insert_head(&state->arcs_list[hdr->b_type], hdr); 1149*7adb730bSGeorge Wilson ASSERT(hdr->b_datacnt > 0); 1150*7adb730bSGeorge Wilson atomic_add_64(size, hdr->b_size * hdr->b_datacnt); 115144cb6abcSbmc mutex_exit(&state->arcs_mtx); 1152fa9e4066Sahrens } 1153fa9e4066Sahrens return (cnt); 1154fa9e4066Sahrens } 1155fa9e4066Sahrens 1156fa9e4066Sahrens /* 1157fa9e4066Sahrens * Move the supplied buffer to the indicated state. The mutex 1158fa9e4066Sahrens * for the buffer must be held by the caller. 1159fa9e4066Sahrens */ 1160fa9e4066Sahrens static void 1161*7adb730bSGeorge Wilson arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, 1162*7adb730bSGeorge Wilson kmutex_t *hash_lock) 1163fa9e4066Sahrens { 1164*7adb730bSGeorge Wilson arc_state_t *old_state = hdr->b_state; 1165*7adb730bSGeorge Wilson int64_t refcnt = refcount_count(&hdr->b_refcnt); 1166c0a81264Sek uint64_t from_delta, to_delta; 1167fa9e4066Sahrens 1168fa9e4066Sahrens ASSERT(MUTEX_HELD(hash_lock)); 116969962b56SMatthew Ahrens ASSERT3P(new_state, !=, old_state); 1170*7adb730bSGeorge Wilson ASSERT(refcnt == 0 || hdr->b_datacnt > 0); 1171*7adb730bSGeorge Wilson ASSERT(hdr->b_datacnt == 0 || !GHOST_STATE(new_state)); 1172*7adb730bSGeorge Wilson ASSERT(hdr->b_datacnt <= 1 || old_state != arc_anon); 1173ea8dc4b6Seschrock 1174*7adb730bSGeorge Wilson from_delta = to_delta = hdr->b_datacnt * hdr->b_size; 1175fa9e4066Sahrens 1176fa9e4066Sahrens /* 1177fa9e4066Sahrens * If this buffer is evictable, transfer it from the 1178fa9e4066Sahrens * old state list to the new state list. 1179fa9e4066Sahrens */ 1180ea8dc4b6Seschrock if (refcnt == 0) { 118144cb6abcSbmc if (old_state != arc_anon) { 118244cb6abcSbmc int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1183*7adb730bSGeorge Wilson uint64_t *size = &old_state->arcs_lsize[hdr->b_type]; 1184ea8dc4b6Seschrock 1185ea8dc4b6Seschrock if (use_mutex) 118644cb6abcSbmc mutex_enter(&old_state->arcs_mtx); 1187fa9e4066Sahrens 1188*7adb730bSGeorge Wilson ASSERT(list_link_active(&hdr->b_arc_node)); 1189*7adb730bSGeorge Wilson list_remove(&old_state->arcs_list[hdr->b_type], hdr); 1190ea8dc4b6Seschrock 119113506d1eSmaybee /* 119213506d1eSmaybee * If prefetching out of the ghost cache, 11933f9d6ad7SLin Ling * we will have a non-zero datacnt. 119413506d1eSmaybee */ 1195*7adb730bSGeorge Wilson if (GHOST_STATE(old_state) && hdr->b_datacnt == 0) { 119613506d1eSmaybee /* ghost elements have a ghost size */ 1197*7adb730bSGeorge Wilson ASSERT(hdr->b_buf == NULL); 1198*7adb730bSGeorge Wilson from_delta = hdr->b_size; 1199ea8dc4b6Seschrock } 12000e8c6158Smaybee ASSERT3U(*size, >=, from_delta); 12010e8c6158Smaybee atomic_add_64(size, -from_delta); 1202ea8dc4b6Seschrock 1203ea8dc4b6Seschrock if (use_mutex) 120444cb6abcSbmc mutex_exit(&old_state->arcs_mtx); 1205fa9e4066Sahrens } 120644cb6abcSbmc if (new_state != arc_anon) { 120744cb6abcSbmc int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1208*7adb730bSGeorge Wilson uint64_t *size = &new_state->arcs_lsize[hdr->b_type]; 1209fa9e4066Sahrens 1210ea8dc4b6Seschrock if (use_mutex) 121144cb6abcSbmc mutex_enter(&new_state->arcs_mtx); 1212ea8dc4b6Seschrock 1213*7adb730bSGeorge Wilson list_insert_head(&new_state->arcs_list[hdr->b_type], 1214*7adb730bSGeorge Wilson hdr); 1215ea8dc4b6Seschrock 1216ea8dc4b6Seschrock /* ghost elements have a ghost size */ 1217ea8dc4b6Seschrock if (GHOST_STATE(new_state)) { 1218*7adb730bSGeorge Wilson ASSERT(hdr->b_datacnt == 0); 1219*7adb730bSGeorge Wilson ASSERT(hdr->b_buf == NULL); 1220*7adb730bSGeorge Wilson to_delta = hdr->b_size; 1221ea8dc4b6Seschrock } 12220e8c6158Smaybee atomic_add_64(size, to_delta); 1223ea8dc4b6Seschrock 1224ea8dc4b6Seschrock if (use_mutex) 122544cb6abcSbmc mutex_exit(&new_state->arcs_mtx); 1226fa9e4066Sahrens } 1227fa9e4066Sahrens } 1228fa9e4066Sahrens 1229*7adb730bSGeorge Wilson ASSERT(!BUF_EMPTY(hdr)); 1230*7adb730bSGeorge Wilson if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) 1231*7adb730bSGeorge Wilson buf_hash_remove(hdr); 1232fa9e4066Sahrens 1233ea8dc4b6Seschrock /* adjust state sizes */ 1234ea8dc4b6Seschrock if (to_delta) 123544cb6abcSbmc atomic_add_64(&new_state->arcs_size, to_delta); 1236ea8dc4b6Seschrock if (from_delta) { 123744cb6abcSbmc ASSERT3U(old_state->arcs_size, >=, from_delta); 123844cb6abcSbmc atomic_add_64(&old_state->arcs_size, -from_delta); 1239fa9e4066Sahrens } 1240*7adb730bSGeorge Wilson hdr->b_state = new_state; 1241fa94a07fSbrendan 1242fa94a07fSbrendan /* adjust l2arc hdr stats */ 1243fa94a07fSbrendan if (new_state == arc_l2c_only) 1244fa94a07fSbrendan l2arc_hdr_stat_add(); 1245fa94a07fSbrendan else if (old_state == arc_l2c_only) 1246fa94a07fSbrendan l2arc_hdr_stat_remove(); 1247fa9e4066Sahrens } 1248fa9e4066Sahrens 12490e8c6158Smaybee void 12505a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(uint64_t space, arc_space_type_t type) 12510e8c6158Smaybee { 12525a98e54bSBrendan Gregg - Sun Microsystems ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 12535a98e54bSBrendan Gregg - Sun Microsystems 12545a98e54bSBrendan Gregg - Sun Microsystems switch (type) { 12555a98e54bSBrendan Gregg - Sun Microsystems case ARC_SPACE_DATA: 12565a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_data_size, space); 12575a98e54bSBrendan Gregg - Sun Microsystems break; 12585a98e54bSBrendan Gregg - Sun Microsystems case ARC_SPACE_OTHER: 12595a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_other_size, space); 12605a98e54bSBrendan Gregg - Sun Microsystems break; 12615a98e54bSBrendan Gregg - Sun Microsystems case ARC_SPACE_HDRS: 12625a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_hdr_size, space); 12635a98e54bSBrendan Gregg - Sun Microsystems break; 12645a98e54bSBrendan Gregg - Sun Microsystems case ARC_SPACE_L2HDRS: 12655a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_l2_hdr_size, space); 12665a98e54bSBrendan Gregg - Sun Microsystems break; 12675a98e54bSBrendan Gregg - Sun Microsystems } 12685a98e54bSBrendan Gregg - Sun Microsystems 126920128a08SGeorge Wilson ARCSTAT_INCR(arcstat_meta_used, space); 12700e8c6158Smaybee atomic_add_64(&arc_size, space); 12710e8c6158Smaybee } 12720e8c6158Smaybee 12730e8c6158Smaybee void 12745a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(uint64_t space, arc_space_type_t type) 12750e8c6158Smaybee { 12765a98e54bSBrendan Gregg - Sun Microsystems ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 12775a98e54bSBrendan Gregg - Sun Microsystems 12785a98e54bSBrendan Gregg - Sun Microsystems switch (type) { 12795a98e54bSBrendan Gregg - Sun Microsystems case ARC_SPACE_DATA: 12805a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_data_size, -space); 12815a98e54bSBrendan Gregg - Sun Microsystems break; 12825a98e54bSBrendan Gregg - Sun Microsystems case ARC_SPACE_OTHER: 12835a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_other_size, -space); 12845a98e54bSBrendan Gregg - Sun Microsystems break; 12855a98e54bSBrendan Gregg - Sun Microsystems case ARC_SPACE_HDRS: 12865a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_hdr_size, -space); 12875a98e54bSBrendan Gregg - Sun Microsystems break; 12885a98e54bSBrendan Gregg - Sun Microsystems case ARC_SPACE_L2HDRS: 12895a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 12905a98e54bSBrendan Gregg - Sun Microsystems break; 12915a98e54bSBrendan Gregg - Sun Microsystems } 12925a98e54bSBrendan Gregg - Sun Microsystems 12930e8c6158Smaybee ASSERT(arc_meta_used >= space); 12940e8c6158Smaybee if (arc_meta_max < arc_meta_used) 12950e8c6158Smaybee arc_meta_max = arc_meta_used; 129620128a08SGeorge Wilson ARCSTAT_INCR(arcstat_meta_used, -space); 12970e8c6158Smaybee ASSERT(arc_size >= space); 12980e8c6158Smaybee atomic_add_64(&arc_size, -space); 12990e8c6158Smaybee } 13000e8c6158Smaybee 1301fa9e4066Sahrens arc_buf_t * 1302ad23a2dbSjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1303fa9e4066Sahrens { 1304fa9e4066Sahrens arc_buf_hdr_t *hdr; 1305fa9e4066Sahrens arc_buf_t *buf; 1306fa9e4066Sahrens 1307fa9e4066Sahrens ASSERT3U(size, >, 0); 13081ab7f2deSmaybee hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1309fa9e4066Sahrens ASSERT(BUF_EMPTY(hdr)); 1310fa9e4066Sahrens hdr->b_size = size; 1311ad23a2dbSjohansen hdr->b_type = type; 1312e9103aaeSGarrett D'Amore hdr->b_spa = spa_load_guid(spa); 131344cb6abcSbmc hdr->b_state = arc_anon; 1314fa9e4066Sahrens hdr->b_arc_access = 0; 13151ab7f2deSmaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1316fa9e4066Sahrens buf->b_hdr = hdr; 131744eda4d7Smaybee buf->b_data = NULL; 1318ea8dc4b6Seschrock buf->b_efunc = NULL; 1319ea8dc4b6Seschrock buf->b_private = NULL; 1320fa9e4066Sahrens buf->b_next = NULL; 1321fa9e4066Sahrens hdr->b_buf = buf; 132244eda4d7Smaybee arc_get_data_buf(buf); 1323ea8dc4b6Seschrock hdr->b_datacnt = 1; 1324fa9e4066Sahrens hdr->b_flags = 0; 1325fa9e4066Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1326fa9e4066Sahrens (void) refcount_add(&hdr->b_refcnt, tag); 1327fa9e4066Sahrens 1328fa9e4066Sahrens return (buf); 1329fa9e4066Sahrens } 1330fa9e4066Sahrens 13312fdbea25SAleksandr Guzovskiy static char *arc_onloan_tag = "onloan"; 13322fdbea25SAleksandr Guzovskiy 13332fdbea25SAleksandr Guzovskiy /* 13342fdbea25SAleksandr Guzovskiy * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 13352fdbea25SAleksandr Guzovskiy * flight data by arc_tempreserve_space() until they are "returned". Loaned 13362fdbea25SAleksandr Guzovskiy * buffers must be returned to the arc before they can be used by the DMU or 13372fdbea25SAleksandr Guzovskiy * freed. 13382fdbea25SAleksandr Guzovskiy */ 13392fdbea25SAleksandr Guzovskiy arc_buf_t * 13402fdbea25SAleksandr Guzovskiy arc_loan_buf(spa_t *spa, int size) 13412fdbea25SAleksandr Guzovskiy { 13422fdbea25SAleksandr Guzovskiy arc_buf_t *buf; 13432fdbea25SAleksandr Guzovskiy 13442fdbea25SAleksandr Guzovskiy buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 13452fdbea25SAleksandr Guzovskiy 13462fdbea25SAleksandr Guzovskiy atomic_add_64(&arc_loaned_bytes, size); 13472fdbea25SAleksandr Guzovskiy return (buf); 13482fdbea25SAleksandr Guzovskiy } 13492fdbea25SAleksandr Guzovskiy 13502fdbea25SAleksandr Guzovskiy /* 13512fdbea25SAleksandr Guzovskiy * Return a loaned arc buffer to the arc. 13522fdbea25SAleksandr Guzovskiy */ 13532fdbea25SAleksandr Guzovskiy void 13542fdbea25SAleksandr Guzovskiy arc_return_buf(arc_buf_t *buf, void *tag) 13552fdbea25SAleksandr Guzovskiy { 13562fdbea25SAleksandr Guzovskiy arc_buf_hdr_t *hdr = buf->b_hdr; 13572fdbea25SAleksandr Guzovskiy 13582fdbea25SAleksandr Guzovskiy ASSERT(buf->b_data != NULL); 1359c242f9a0Schunli zhang - Sun Microsystems - Irvine United States (void) refcount_add(&hdr->b_refcnt, tag); 1360c242f9a0Schunli zhang - Sun Microsystems - Irvine United States (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 13612fdbea25SAleksandr Guzovskiy 13622fdbea25SAleksandr Guzovskiy atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 13632fdbea25SAleksandr Guzovskiy } 13642fdbea25SAleksandr Guzovskiy 1365c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* Detach an arc_buf from a dbuf (tag) */ 1366c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1367c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1368c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1369c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_hdr_t *hdr; 1370c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1371c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(buf->b_data != NULL); 1372c242f9a0Schunli zhang - Sun Microsystems - Irvine United States hdr = buf->b_hdr; 1373c242f9a0Schunli zhang - Sun Microsystems - Irvine United States (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1374c242f9a0Schunli zhang - Sun Microsystems - Irvine United States (void) refcount_remove(&hdr->b_refcnt, tag); 1375c242f9a0Schunli zhang - Sun Microsystems - Irvine United States buf->b_efunc = NULL; 1376c242f9a0Schunli zhang - Sun Microsystems - Irvine United States buf->b_private = NULL; 1377c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1378c242f9a0Schunli zhang - Sun Microsystems - Irvine United States atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1379c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1380c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 138144eda4d7Smaybee static arc_buf_t * 138244eda4d7Smaybee arc_buf_clone(arc_buf_t *from) 1383ea8dc4b6Seschrock { 138444eda4d7Smaybee arc_buf_t *buf; 138544eda4d7Smaybee arc_buf_hdr_t *hdr = from->b_hdr; 138644eda4d7Smaybee uint64_t size = hdr->b_size; 1387ea8dc4b6Seschrock 1388b24ab676SJeff Bonwick ASSERT(hdr->b_state != arc_anon); 1389b24ab676SJeff Bonwick 13901ab7f2deSmaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 139144eda4d7Smaybee buf->b_hdr = hdr; 139244eda4d7Smaybee buf->b_data = NULL; 139344eda4d7Smaybee buf->b_efunc = NULL; 139444eda4d7Smaybee buf->b_private = NULL; 139544eda4d7Smaybee buf->b_next = hdr->b_buf; 139644eda4d7Smaybee hdr->b_buf = buf; 139744eda4d7Smaybee arc_get_data_buf(buf); 139844eda4d7Smaybee bcopy(from->b_data, buf->b_data, size); 13999253d63dSGeorge Wilson 14009253d63dSGeorge Wilson /* 14019253d63dSGeorge Wilson * This buffer already exists in the arc so create a duplicate 14029253d63dSGeorge Wilson * copy for the caller. If the buffer is associated with user data 14039253d63dSGeorge Wilson * then track the size and number of duplicates. These stats will be 14049253d63dSGeorge Wilson * updated as duplicate buffers are created and destroyed. 14059253d63dSGeorge Wilson */ 14069253d63dSGeorge Wilson if (hdr->b_type == ARC_BUFC_DATA) { 14079253d63dSGeorge Wilson ARCSTAT_BUMP(arcstat_duplicate_buffers); 14089253d63dSGeorge Wilson ARCSTAT_INCR(arcstat_duplicate_buffers_size, size); 14099253d63dSGeorge Wilson } 141044eda4d7Smaybee hdr->b_datacnt += 1; 141144eda4d7Smaybee return (buf); 1412ea8dc4b6Seschrock } 1413ea8dc4b6Seschrock 1414ea8dc4b6Seschrock void 1415ea8dc4b6Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag) 1416ea8dc4b6Seschrock { 141740d7d650Smaybee arc_buf_hdr_t *hdr; 1418ea8dc4b6Seschrock kmutex_t *hash_lock; 1419ea8dc4b6Seschrock 14209b23f181Smaybee /* 14216f83844dSMark Maybee * Check to see if this buffer is evicted. Callers 14226f83844dSMark Maybee * must verify b_data != NULL to know if the add_ref 14236f83844dSMark Maybee * was successful. 14249b23f181Smaybee */ 14253f9d6ad7SLin Ling mutex_enter(&buf->b_evict_lock); 14266f83844dSMark Maybee if (buf->b_data == NULL) { 14273f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 14289b23f181Smaybee return; 142940d7d650Smaybee } 14303f9d6ad7SLin Ling hash_lock = HDR_LOCK(buf->b_hdr); 14319b23f181Smaybee mutex_enter(hash_lock); 14323f9d6ad7SLin Ling hdr = buf->b_hdr; 14333f9d6ad7SLin Ling ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 14343f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 1435ea8dc4b6Seschrock 143644cb6abcSbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1437ea8dc4b6Seschrock add_reference(hdr, hash_lock, tag); 14385a98e54bSBrendan Gregg - Sun Microsystems DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 143944eda4d7Smaybee arc_access(hdr, hash_lock); 144044eda4d7Smaybee mutex_exit(hash_lock); 144144cb6abcSbmc ARCSTAT_BUMP(arcstat_hits); 1442*7adb730bSGeorge Wilson ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_FLAG_PREFETCH), 144344cb6abcSbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 144444cb6abcSbmc data, metadata, hits); 1445ea8dc4b6Seschrock } 1446ea8dc4b6Seschrock 1447fa94a07fSbrendan /* 1448fa94a07fSbrendan * Free the arc data buffer. If it is an l2arc write in progress, 1449fa94a07fSbrendan * the buffer is placed on l2arc_free_on_write to be freed later. 1450fa94a07fSbrendan */ 1451fa94a07fSbrendan static void 1452cd1c8b85SMatthew Ahrens arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t)) 1453fa94a07fSbrendan { 1454cd1c8b85SMatthew Ahrens arc_buf_hdr_t *hdr = buf->b_hdr; 1455cd1c8b85SMatthew Ahrens 1456fa94a07fSbrendan if (HDR_L2_WRITING(hdr)) { 1457fa94a07fSbrendan l2arc_data_free_t *df; 1458fa94a07fSbrendan df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1459cd1c8b85SMatthew Ahrens df->l2df_data = buf->b_data; 1460cd1c8b85SMatthew Ahrens df->l2df_size = hdr->b_size; 1461fa94a07fSbrendan df->l2df_func = free_func; 1462fa94a07fSbrendan mutex_enter(&l2arc_free_on_write_mtx); 1463fa94a07fSbrendan list_insert_head(l2arc_free_on_write, df); 1464fa94a07fSbrendan mutex_exit(&l2arc_free_on_write_mtx); 1465fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_free_on_write); 1466fa94a07fSbrendan } else { 1467cd1c8b85SMatthew Ahrens free_func(buf->b_data, hdr->b_size); 1468fa94a07fSbrendan } 1469fa94a07fSbrendan } 1470fa94a07fSbrendan 1471bbfa8ea8SMatthew Ahrens /* 1472bbfa8ea8SMatthew Ahrens * Free up buf->b_data and if 'remove' is set, then pull the 1473bbfa8ea8SMatthew Ahrens * arc_buf_t off of the the arc_buf_hdr_t's list and free it. 1474bbfa8ea8SMatthew Ahrens */ 1475ea8dc4b6Seschrock static void 1476bbfa8ea8SMatthew Ahrens arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t remove) 1477ea8dc4b6Seschrock { 1478ea8dc4b6Seschrock arc_buf_t **bufp; 1479ea8dc4b6Seschrock 1480ea8dc4b6Seschrock /* free up data associated with the buf */ 1481ea8dc4b6Seschrock if (buf->b_data) { 1482ea8dc4b6Seschrock arc_state_t *state = buf->b_hdr->b_state; 1483ea8dc4b6Seschrock uint64_t size = buf->b_hdr->b_size; 1484ad23a2dbSjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 1485ea8dc4b6Seschrock 14866b4acc8bSahrens arc_cksum_verify(buf); 1487cd1c8b85SMatthew Ahrens arc_buf_unwatch(buf); 1488b24ab676SJeff Bonwick 148944eda4d7Smaybee if (!recycle) { 1490ad23a2dbSjohansen if (type == ARC_BUFC_METADATA) { 1491cd1c8b85SMatthew Ahrens arc_buf_data_free(buf, zio_buf_free); 14925a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(size, ARC_SPACE_DATA); 1493ad23a2dbSjohansen } else { 1494ad23a2dbSjohansen ASSERT(type == ARC_BUFC_DATA); 1495cd1c8b85SMatthew Ahrens arc_buf_data_free(buf, zio_data_buf_free); 14965a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_data_size, -size); 14970e8c6158Smaybee atomic_add_64(&arc_size, -size); 1498ad23a2dbSjohansen } 149944eda4d7Smaybee } 1500ea8dc4b6Seschrock if (list_link_active(&buf->b_hdr->b_arc_node)) { 15010e8c6158Smaybee uint64_t *cnt = &state->arcs_lsize[type]; 15020e8c6158Smaybee 1503ea8dc4b6Seschrock ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 150444cb6abcSbmc ASSERT(state != arc_anon); 15050e8c6158Smaybee 15060e8c6158Smaybee ASSERT3U(*cnt, >=, size); 15070e8c6158Smaybee atomic_add_64(cnt, -size); 1508ea8dc4b6Seschrock } 150944cb6abcSbmc ASSERT3U(state->arcs_size, >=, size); 151044cb6abcSbmc atomic_add_64(&state->arcs_size, -size); 1511ea8dc4b6Seschrock buf->b_data = NULL; 15129253d63dSGeorge Wilson 15139253d63dSGeorge Wilson /* 15149253d63dSGeorge Wilson * If we're destroying a duplicate buffer make sure 15159253d63dSGeorge Wilson * that the appropriate statistics are updated. 15169253d63dSGeorge Wilson */ 15179253d63dSGeorge Wilson if (buf->b_hdr->b_datacnt > 1 && 15189253d63dSGeorge Wilson buf->b_hdr->b_type == ARC_BUFC_DATA) { 15199253d63dSGeorge Wilson ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 15209253d63dSGeorge Wilson ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size); 15219253d63dSGeorge Wilson } 1522ea8dc4b6Seschrock ASSERT(buf->b_hdr->b_datacnt > 0); 1523ea8dc4b6Seschrock buf->b_hdr->b_datacnt -= 1; 1524ea8dc4b6Seschrock } 1525ea8dc4b6Seschrock 1526ea8dc4b6Seschrock /* only remove the buf if requested */ 1527bbfa8ea8SMatthew Ahrens if (!remove) 1528ea8dc4b6Seschrock return; 1529ea8dc4b6Seschrock 1530ea8dc4b6Seschrock /* remove the buf from the hdr list */ 1531ea8dc4b6Seschrock for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1532ea8dc4b6Seschrock continue; 1533ea8dc4b6Seschrock *bufp = buf->b_next; 15343f9d6ad7SLin Ling buf->b_next = NULL; 1535ea8dc4b6Seschrock 1536ea8dc4b6Seschrock ASSERT(buf->b_efunc == NULL); 1537ea8dc4b6Seschrock 1538ea8dc4b6Seschrock /* clean up the buf */ 1539ea8dc4b6Seschrock buf->b_hdr = NULL; 1540ea8dc4b6Seschrock kmem_cache_free(buf_cache, buf); 1541ea8dc4b6Seschrock } 1542ea8dc4b6Seschrock 1543fa9e4066Sahrens static void 1544ea8dc4b6Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr) 1545fa9e4066Sahrens { 1546fa9e4066Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 154744cb6abcSbmc ASSERT3P(hdr->b_state, ==, arc_anon); 1548ea8dc4b6Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1549b24ab676SJeff Bonwick l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1550fa9e4066Sahrens 1551b24ab676SJeff Bonwick if (l2hdr != NULL) { 1552b24ab676SJeff Bonwick boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1553b24ab676SJeff Bonwick /* 1554b24ab676SJeff Bonwick * To prevent arc_free() and l2arc_evict() from 1555b24ab676SJeff Bonwick * attempting to free the same buffer at the same time, 1556b24ab676SJeff Bonwick * a FREE_IN_PROGRESS flag is given to arc_free() to 1557b24ab676SJeff Bonwick * give it priority. l2arc_evict() can't destroy this 1558b24ab676SJeff Bonwick * header while we are waiting on l2arc_buflist_mtx. 1559b24ab676SJeff Bonwick * 1560b24ab676SJeff Bonwick * The hdr may be removed from l2ad_buflist before we 1561b24ab676SJeff Bonwick * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1562b24ab676SJeff Bonwick */ 1563b24ab676SJeff Bonwick if (!buflist_held) { 1564fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 1565b24ab676SJeff Bonwick l2hdr = hdr->b_l2hdr; 1566fa94a07fSbrendan } 1567b24ab676SJeff Bonwick 1568b24ab676SJeff Bonwick if (l2hdr != NULL) { 1569b24ab676SJeff Bonwick list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1570b24ab676SJeff Bonwick ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1571aad02571SSaso Kiselkov ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize); 15723038a2b4SSaso Kiselkov vdev_space_update(l2hdr->b_dev->l2ad_vdev, 15733038a2b4SSaso Kiselkov -l2hdr->b_asize, 0, 0); 1574b24ab676SJeff Bonwick kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1575b24ab676SJeff Bonwick if (hdr->b_state == arc_l2c_only) 1576b24ab676SJeff Bonwick l2arc_hdr_stat_remove(); 1577b24ab676SJeff Bonwick hdr->b_l2hdr = NULL; 1578b24ab676SJeff Bonwick } 1579b24ab676SJeff Bonwick 1580b24ab676SJeff Bonwick if (!buflist_held) 1581b24ab676SJeff Bonwick mutex_exit(&l2arc_buflist_mtx); 1582fa94a07fSbrendan } 1583fa94a07fSbrendan 1584fa9e4066Sahrens if (!BUF_EMPTY(hdr)) { 1585ea8dc4b6Seschrock ASSERT(!HDR_IN_HASH_TABLE(hdr)); 15863f9d6ad7SLin Ling buf_discard_identity(hdr); 1587fa9e4066Sahrens } 1588ea8dc4b6Seschrock while (hdr->b_buf) { 1589fa9e4066Sahrens arc_buf_t *buf = hdr->b_buf; 1590fa9e4066Sahrens 1591ea8dc4b6Seschrock if (buf->b_efunc) { 1592ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 15933f9d6ad7SLin Ling mutex_enter(&buf->b_evict_lock); 1594ea8dc4b6Seschrock ASSERT(buf->b_hdr != NULL); 159544eda4d7Smaybee arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1596ea8dc4b6Seschrock hdr->b_buf = buf->b_next; 159740d7d650Smaybee buf->b_hdr = &arc_eviction_hdr; 1598ea8dc4b6Seschrock buf->b_next = arc_eviction_list; 1599ea8dc4b6Seschrock arc_eviction_list = buf; 16003f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 1601ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 1602ea8dc4b6Seschrock } else { 160344eda4d7Smaybee arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1604ea8dc4b6Seschrock } 1605fa9e4066Sahrens } 16066b4acc8bSahrens if (hdr->b_freeze_cksum != NULL) { 16076b4acc8bSahrens kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 16086b4acc8bSahrens hdr->b_freeze_cksum = NULL; 16096b4acc8bSahrens } 16103f9d6ad7SLin Ling if (hdr->b_thawed) { 16113f9d6ad7SLin Ling kmem_free(hdr->b_thawed, 1); 16123f9d6ad7SLin Ling hdr->b_thawed = NULL; 16133f9d6ad7SLin Ling } 1614ea8dc4b6Seschrock 1615fa9e4066Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 1616fa9e4066Sahrens ASSERT3P(hdr->b_hash_next, ==, NULL); 1617fa9e4066Sahrens ASSERT3P(hdr->b_acb, ==, NULL); 1618fa9e4066Sahrens kmem_cache_free(hdr_cache, hdr); 1619fa9e4066Sahrens } 1620fa9e4066Sahrens 1621fa9e4066Sahrens void 1622fa9e4066Sahrens arc_buf_free(arc_buf_t *buf, void *tag) 1623fa9e4066Sahrens { 1624fa9e4066Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 162544cb6abcSbmc int hashed = hdr->b_state != arc_anon; 1626fa9e4066Sahrens 1627ea8dc4b6Seschrock ASSERT(buf->b_efunc == NULL); 1628ea8dc4b6Seschrock ASSERT(buf->b_data != NULL); 1629ea8dc4b6Seschrock 1630ea8dc4b6Seschrock if (hashed) { 1631ea8dc4b6Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 1632ea8dc4b6Seschrock 1633ea8dc4b6Seschrock mutex_enter(hash_lock); 16343f9d6ad7SLin Ling hdr = buf->b_hdr; 16353f9d6ad7SLin Ling ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 16363f9d6ad7SLin Ling 1637ea8dc4b6Seschrock (void) remove_reference(hdr, hash_lock, tag); 1638b24ab676SJeff Bonwick if (hdr->b_datacnt > 1) { 163944eda4d7Smaybee arc_buf_destroy(buf, FALSE, TRUE); 1640b24ab676SJeff Bonwick } else { 1641b24ab676SJeff Bonwick ASSERT(buf == hdr->b_buf); 1642b24ab676SJeff Bonwick ASSERT(buf->b_efunc == NULL); 1643*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; 1644b24ab676SJeff Bonwick } 1645fa9e4066Sahrens mutex_exit(hash_lock); 1646ea8dc4b6Seschrock } else if (HDR_IO_IN_PROGRESS(hdr)) { 1647ea8dc4b6Seschrock int destroy_hdr; 1648ea8dc4b6Seschrock /* 1649ea8dc4b6Seschrock * We are in the middle of an async write. Don't destroy 1650ea8dc4b6Seschrock * this buffer unless the write completes before we finish 1651ea8dc4b6Seschrock * decrementing the reference count. 1652ea8dc4b6Seschrock */ 1653ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 1654ea8dc4b6Seschrock (void) remove_reference(hdr, NULL, tag); 1655ea8dc4b6Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1656ea8dc4b6Seschrock destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1657ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 1658ea8dc4b6Seschrock if (destroy_hdr) 1659ea8dc4b6Seschrock arc_hdr_destroy(hdr); 1660ea8dc4b6Seschrock } else { 16613f9d6ad7SLin Ling if (remove_reference(hdr, NULL, tag) > 0) 166244eda4d7Smaybee arc_buf_destroy(buf, FALSE, TRUE); 16633f9d6ad7SLin Ling else 1664ea8dc4b6Seschrock arc_hdr_destroy(hdr); 1665fa9e4066Sahrens } 1666ea8dc4b6Seschrock } 1667fa9e4066Sahrens 16683b2aab18SMatthew Ahrens boolean_t 1669ea8dc4b6Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1670ea8dc4b6Seschrock { 1671ea8dc4b6Seschrock arc_buf_hdr_t *hdr = buf->b_hdr; 1672ea8dc4b6Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 16733b2aab18SMatthew Ahrens boolean_t no_callback = (buf->b_efunc == NULL); 1674fa9e4066Sahrens 167544cb6abcSbmc if (hdr->b_state == arc_anon) { 1676b24ab676SJeff Bonwick ASSERT(hdr->b_datacnt == 1); 1677ea8dc4b6Seschrock arc_buf_free(buf, tag); 1678ea8dc4b6Seschrock return (no_callback); 1679ea8dc4b6Seschrock } 1680ea8dc4b6Seschrock 1681ea8dc4b6Seschrock mutex_enter(hash_lock); 16823f9d6ad7SLin Ling hdr = buf->b_hdr; 16833f9d6ad7SLin Ling ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 168444cb6abcSbmc ASSERT(hdr->b_state != arc_anon); 1685ea8dc4b6Seschrock ASSERT(buf->b_data != NULL); 1686ea8dc4b6Seschrock 1687ea8dc4b6Seschrock (void) remove_reference(hdr, hash_lock, tag); 1688ea8dc4b6Seschrock if (hdr->b_datacnt > 1) { 1689ea8dc4b6Seschrock if (no_callback) 169044eda4d7Smaybee arc_buf_destroy(buf, FALSE, TRUE); 1691ea8dc4b6Seschrock } else if (no_callback) { 1692ea8dc4b6Seschrock ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1693b24ab676SJeff Bonwick ASSERT(buf->b_efunc == NULL); 1694*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; 1695ea8dc4b6Seschrock } 1696ea8dc4b6Seschrock ASSERT(no_callback || hdr->b_datacnt > 1 || 1697ea8dc4b6Seschrock refcount_is_zero(&hdr->b_refcnt)); 1698ea8dc4b6Seschrock mutex_exit(hash_lock); 1699ea8dc4b6Seschrock return (no_callback); 1700fa9e4066Sahrens } 1701fa9e4066Sahrens 1702fa9e4066Sahrens int 1703fa9e4066Sahrens arc_buf_size(arc_buf_t *buf) 1704fa9e4066Sahrens { 1705fa9e4066Sahrens return (buf->b_hdr->b_size); 1706fa9e4066Sahrens } 1707fa9e4066Sahrens 17089253d63dSGeorge Wilson /* 17099253d63dSGeorge Wilson * Called from the DMU to determine if the current buffer should be 17109253d63dSGeorge Wilson * evicted. In order to ensure proper locking, the eviction must be initiated 17119253d63dSGeorge Wilson * from the DMU. Return true if the buffer is associated with user data and 17129253d63dSGeorge Wilson * duplicate buffers still exist. 17139253d63dSGeorge Wilson */ 17149253d63dSGeorge Wilson boolean_t 17159253d63dSGeorge Wilson arc_buf_eviction_needed(arc_buf_t *buf) 17169253d63dSGeorge Wilson { 17179253d63dSGeorge Wilson arc_buf_hdr_t *hdr; 17189253d63dSGeorge Wilson boolean_t evict_needed = B_FALSE; 17199253d63dSGeorge Wilson 17209253d63dSGeorge Wilson if (zfs_disable_dup_eviction) 17219253d63dSGeorge Wilson return (B_FALSE); 17229253d63dSGeorge Wilson 17239253d63dSGeorge Wilson mutex_enter(&buf->b_evict_lock); 17249253d63dSGeorge Wilson hdr = buf->b_hdr; 17259253d63dSGeorge Wilson if (hdr == NULL) { 17269253d63dSGeorge Wilson /* 17279253d63dSGeorge Wilson * We are in arc_do_user_evicts(); let that function 17289253d63dSGeorge Wilson * perform the eviction. 17299253d63dSGeorge Wilson */ 17309253d63dSGeorge Wilson ASSERT(buf->b_data == NULL); 17319253d63dSGeorge Wilson mutex_exit(&buf->b_evict_lock); 17329253d63dSGeorge Wilson return (B_FALSE); 17339253d63dSGeorge Wilson } else if (buf->b_data == NULL) { 17349253d63dSGeorge Wilson /* 17359253d63dSGeorge Wilson * We have already been added to the arc eviction list; 17369253d63dSGeorge Wilson * recommend eviction. 17379253d63dSGeorge Wilson */ 17389253d63dSGeorge Wilson ASSERT3P(hdr, ==, &arc_eviction_hdr); 17399253d63dSGeorge Wilson mutex_exit(&buf->b_evict_lock); 17409253d63dSGeorge Wilson return (B_TRUE); 17419253d63dSGeorge Wilson } 17429253d63dSGeorge Wilson 17439253d63dSGeorge Wilson if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA) 17449253d63dSGeorge Wilson evict_needed = B_TRUE; 17459253d63dSGeorge Wilson 17469253d63dSGeorge Wilson mutex_exit(&buf->b_evict_lock); 17479253d63dSGeorge Wilson return (evict_needed); 17489253d63dSGeorge Wilson } 17499253d63dSGeorge Wilson 1750fa9e4066Sahrens /* 1751fa9e4066Sahrens * Evict buffers from list until we've removed the specified number of 1752fa9e4066Sahrens * bytes. Move the removed buffers to the appropriate evict state. 175344eda4d7Smaybee * If the recycle flag is set, then attempt to "recycle" a buffer: 175444eda4d7Smaybee * - look for a buffer to evict that is `bytes' long. 175544eda4d7Smaybee * - return the data block from this buffer rather than freeing it. 175644eda4d7Smaybee * This flag is used by callers that are trying to make space for a 175744eda4d7Smaybee * new buffer in a full arc cache. 1758874395d5Smaybee * 1759874395d5Smaybee * This function makes a "best effort". It skips over any buffers 1760874395d5Smaybee * it can't get a hash_lock on, and so may not catch all candidates. 1761874395d5Smaybee * It may also return without evicting as much space as requested. 1762fa9e4066Sahrens */ 176344eda4d7Smaybee static void * 1764ac05c741SMark Maybee arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1765ad23a2dbSjohansen arc_buf_contents_t type) 1766fa9e4066Sahrens { 1767fa9e4066Sahrens arc_state_t *evicted_state; 176844eda4d7Smaybee uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1769*7adb730bSGeorge Wilson arc_buf_hdr_t *hdr, *hdr_prev = NULL; 1770fa9e4066Sahrens kmutex_t *hash_lock; 177144eda4d7Smaybee boolean_t have_lock; 17723fa51506Smaybee void *stolen = NULL; 177369962b56SMatthew Ahrens arc_buf_hdr_t marker = { 0 }; 177469962b56SMatthew Ahrens int count = 0; 1775fa9e4066Sahrens 177644cb6abcSbmc ASSERT(state == arc_mru || state == arc_mfu); 1777fa9e4066Sahrens 177844cb6abcSbmc evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1779fa9e4066Sahrens 178044cb6abcSbmc mutex_enter(&state->arcs_mtx); 178144cb6abcSbmc mutex_enter(&evicted_state->arcs_mtx); 1782fa9e4066Sahrens 17833a5286a1SMatthew Ahrens /* 17843a5286a1SMatthew Ahrens * Decide which "type" (data vs metadata) to recycle from. 17853a5286a1SMatthew Ahrens * 17863a5286a1SMatthew Ahrens * If we are over the metadata limit, recycle from metadata. 17873a5286a1SMatthew Ahrens * If we are under the metadata minimum, recycle from data. 17883a5286a1SMatthew Ahrens * Otherwise, recycle from whichever type has the oldest (least 17893a5286a1SMatthew Ahrens * recently accessed) header. 17903a5286a1SMatthew Ahrens */ 17913a5286a1SMatthew Ahrens if (recycle) { 17923a5286a1SMatthew Ahrens arc_buf_hdr_t *data_hdr = 17933a5286a1SMatthew Ahrens list_tail(&state->arcs_list[ARC_BUFC_DATA]); 17943a5286a1SMatthew Ahrens arc_buf_hdr_t *metadata_hdr = 17953a5286a1SMatthew Ahrens list_tail(&state->arcs_list[ARC_BUFC_METADATA]); 17963a5286a1SMatthew Ahrens arc_buf_contents_t realtype; 17973a5286a1SMatthew Ahrens if (data_hdr == NULL) { 17983a5286a1SMatthew Ahrens realtype = ARC_BUFC_METADATA; 17993a5286a1SMatthew Ahrens } else if (metadata_hdr == NULL) { 18003a5286a1SMatthew Ahrens realtype = ARC_BUFC_DATA; 18013a5286a1SMatthew Ahrens } else if (arc_meta_used >= arc_meta_limit) { 18023a5286a1SMatthew Ahrens realtype = ARC_BUFC_METADATA; 18033a5286a1SMatthew Ahrens } else if (arc_meta_used <= arc_meta_min) { 18043a5286a1SMatthew Ahrens realtype = ARC_BUFC_DATA; 18053a5286a1SMatthew Ahrens } else { 18063a5286a1SMatthew Ahrens if (data_hdr->b_arc_access < 18073a5286a1SMatthew Ahrens metadata_hdr->b_arc_access) { 18083a5286a1SMatthew Ahrens realtype = ARC_BUFC_DATA; 18093a5286a1SMatthew Ahrens } else { 18103a5286a1SMatthew Ahrens realtype = ARC_BUFC_METADATA; 18113a5286a1SMatthew Ahrens } 18123a5286a1SMatthew Ahrens } 18133a5286a1SMatthew Ahrens if (realtype != type) { 18143a5286a1SMatthew Ahrens /* 18153a5286a1SMatthew Ahrens * If we want to evict from a different list, 18163a5286a1SMatthew Ahrens * we can not recycle, because DATA vs METADATA 18173a5286a1SMatthew Ahrens * buffers are segregated into different kmem 18183a5286a1SMatthew Ahrens * caches (and vmem arenas). 18193a5286a1SMatthew Ahrens */ 18203a5286a1SMatthew Ahrens type = realtype; 18213a5286a1SMatthew Ahrens recycle = B_FALSE; 18223a5286a1SMatthew Ahrens } 18233a5286a1SMatthew Ahrens } 18243a5286a1SMatthew Ahrens 18253a5286a1SMatthew Ahrens list_t *list = &state->arcs_list[type]; 18263a5286a1SMatthew Ahrens 1827*7adb730bSGeorge Wilson for (hdr = list_tail(list); hdr; hdr = hdr_prev) { 1828*7adb730bSGeorge Wilson hdr_prev = list_prev(list, hdr); 182913506d1eSmaybee /* prefetch buffers have a minimum lifespan */ 1830*7adb730bSGeorge Wilson if (HDR_IO_IN_PROGRESS(hdr) || 1831*7adb730bSGeorge Wilson (spa && hdr->b_spa != spa) || 1832*7adb730bSGeorge Wilson (hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT) && 1833*7adb730bSGeorge Wilson ddi_get_lbolt() - hdr->b_arc_access < 1834d3d50737SRafael Vanoni arc_min_prefetch_lifespan)) { 183513506d1eSmaybee skipped++; 183613506d1eSmaybee continue; 183713506d1eSmaybee } 18383fa51506Smaybee /* "lookahead" for better eviction candidate */ 1839*7adb730bSGeorge Wilson if (recycle && hdr->b_size != bytes && 1840*7adb730bSGeorge Wilson hdr_prev && hdr_prev->b_size == bytes) 184144eda4d7Smaybee continue; 184269962b56SMatthew Ahrens 184369962b56SMatthew Ahrens /* ignore markers */ 1844*7adb730bSGeorge Wilson if (hdr->b_spa == 0) 184569962b56SMatthew Ahrens continue; 184669962b56SMatthew Ahrens 184769962b56SMatthew Ahrens /* 184869962b56SMatthew Ahrens * It may take a long time to evict all the bufs requested. 184969962b56SMatthew Ahrens * To avoid blocking all arc activity, periodically drop 185069962b56SMatthew Ahrens * the arcs_mtx and give other threads a chance to run 185169962b56SMatthew Ahrens * before reacquiring the lock. 185269962b56SMatthew Ahrens * 185369962b56SMatthew Ahrens * If we are looking for a buffer to recycle, we are in 185469962b56SMatthew Ahrens * the hot code path, so don't sleep. 185569962b56SMatthew Ahrens */ 185669962b56SMatthew Ahrens if (!recycle && count++ > arc_evict_iterations) { 1857*7adb730bSGeorge Wilson list_insert_after(list, hdr, &marker); 185869962b56SMatthew Ahrens mutex_exit(&evicted_state->arcs_mtx); 185969962b56SMatthew Ahrens mutex_exit(&state->arcs_mtx); 186069962b56SMatthew Ahrens kpreempt(KPREEMPT_SYNC); 186169962b56SMatthew Ahrens mutex_enter(&state->arcs_mtx); 186269962b56SMatthew Ahrens mutex_enter(&evicted_state->arcs_mtx); 1863*7adb730bSGeorge Wilson hdr_prev = list_prev(list, &marker); 186469962b56SMatthew Ahrens list_remove(list, &marker); 186569962b56SMatthew Ahrens count = 0; 186669962b56SMatthew Ahrens continue; 186769962b56SMatthew Ahrens } 186869962b56SMatthew Ahrens 1869*7adb730bSGeorge Wilson hash_lock = HDR_LOCK(hdr); 187044eda4d7Smaybee have_lock = MUTEX_HELD(hash_lock); 187144eda4d7Smaybee if (have_lock || mutex_tryenter(hash_lock)) { 1872*7adb730bSGeorge Wilson ASSERT0(refcount_count(&hdr->b_refcnt)); 1873*7adb730bSGeorge Wilson ASSERT(hdr->b_datacnt > 0); 1874*7adb730bSGeorge Wilson while (hdr->b_buf) { 1875*7adb730bSGeorge Wilson arc_buf_t *buf = hdr->b_buf; 18763f9d6ad7SLin Ling if (!mutex_tryenter(&buf->b_evict_lock)) { 18776f83844dSMark Maybee missed += 1; 18786f83844dSMark Maybee break; 18796f83844dSMark Maybee } 188044eda4d7Smaybee if (buf->b_data) { 1881*7adb730bSGeorge Wilson bytes_evicted += hdr->b_size; 1882*7adb730bSGeorge Wilson if (recycle && hdr->b_type == type && 1883*7adb730bSGeorge Wilson hdr->b_size == bytes && 1884*7adb730bSGeorge Wilson !HDR_L2_WRITING(hdr)) { 18853fa51506Smaybee stolen = buf->b_data; 18863fa51506Smaybee recycle = FALSE; 18873fa51506Smaybee } 188844eda4d7Smaybee } 1889ea8dc4b6Seschrock if (buf->b_efunc) { 1890ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 18913fa51506Smaybee arc_buf_destroy(buf, 18923fa51506Smaybee buf->b_data == stolen, FALSE); 1893*7adb730bSGeorge Wilson hdr->b_buf = buf->b_next; 189440d7d650Smaybee buf->b_hdr = &arc_eviction_hdr; 1895ea8dc4b6Seschrock buf->b_next = arc_eviction_list; 1896ea8dc4b6Seschrock arc_eviction_list = buf; 1897ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 18983f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 1899ea8dc4b6Seschrock } else { 19003f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 19013fa51506Smaybee arc_buf_destroy(buf, 19023fa51506Smaybee buf->b_data == stolen, TRUE); 1903ea8dc4b6Seschrock } 1904ea8dc4b6Seschrock } 19055ea40c06SBrendan Gregg - Sun Microsystems 1906*7adb730bSGeorge Wilson if (hdr->b_l2hdr) { 19075ea40c06SBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_evict_l2_cached, 1908*7adb730bSGeorge Wilson hdr->b_size); 19095ea40c06SBrendan Gregg - Sun Microsystems } else { 1910*7adb730bSGeorge Wilson if (l2arc_write_eligible(hdr->b_spa, hdr)) { 19115ea40c06SBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_evict_l2_eligible, 1912*7adb730bSGeorge Wilson hdr->b_size); 19135ea40c06SBrendan Gregg - Sun Microsystems } else { 19145ea40c06SBrendan Gregg - Sun Microsystems ARCSTAT_INCR( 19155ea40c06SBrendan Gregg - Sun Microsystems arcstat_evict_l2_ineligible, 1916*7adb730bSGeorge Wilson hdr->b_size); 19175ea40c06SBrendan Gregg - Sun Microsystems } 19185ea40c06SBrendan Gregg - Sun Microsystems } 19195ea40c06SBrendan Gregg - Sun Microsystems 1920*7adb730bSGeorge Wilson if (hdr->b_datacnt == 0) { 1921*7adb730bSGeorge Wilson arc_change_state(evicted_state, hdr, hash_lock); 1922*7adb730bSGeorge Wilson ASSERT(HDR_IN_HASH_TABLE(hdr)); 1923*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE; 1924*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; 1925*7adb730bSGeorge Wilson DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); 19266f83844dSMark Maybee } 192744eda4d7Smaybee if (!have_lock) 192844eda4d7Smaybee mutex_exit(hash_lock); 1929ea8dc4b6Seschrock if (bytes >= 0 && bytes_evicted >= bytes) 1930fa9e4066Sahrens break; 1931fa9e4066Sahrens } else { 193244eda4d7Smaybee missed += 1; 1933fa9e4066Sahrens } 1934fa9e4066Sahrens } 193544cb6abcSbmc 193644cb6abcSbmc mutex_exit(&evicted_state->arcs_mtx); 193744cb6abcSbmc mutex_exit(&state->arcs_mtx); 1938fa9e4066Sahrens 1939fa9e4066Sahrens if (bytes_evicted < bytes) 1940fa9e4066Sahrens dprintf("only evicted %lld bytes from %x", 1941fa9e4066Sahrens (longlong_t)bytes_evicted, state); 1942fa9e4066Sahrens 194344eda4d7Smaybee if (skipped) 194444cb6abcSbmc ARCSTAT_INCR(arcstat_evict_skip, skipped); 194544cb6abcSbmc 194644eda4d7Smaybee if (missed) 194744cb6abcSbmc ARCSTAT_INCR(arcstat_mutex_miss, missed); 1948f4d2e9e6Smaybee 1949f4d2e9e6Smaybee /* 195069962b56SMatthew Ahrens * Note: we have just evicted some data into the ghost state, 195169962b56SMatthew Ahrens * potentially putting the ghost size over the desired size. Rather 195269962b56SMatthew Ahrens * that evicting from the ghost list in this hot code path, leave 195369962b56SMatthew Ahrens * this chore to the arc_reclaim_thread(). 1954f4d2e9e6Smaybee */ 195544cb6abcSbmc 19563fa51506Smaybee return (stolen); 1957fa9e4066Sahrens } 1958fa9e4066Sahrens 1959fa9e4066Sahrens /* 1960fa9e4066Sahrens * Remove buffers from list until we've removed the specified number of 1961fa9e4066Sahrens * bytes. Destroy the buffers that are removed. 1962fa9e4066Sahrens */ 1963fa9e4066Sahrens static void 1964ac05c741SMark Maybee arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 1965fa9e4066Sahrens { 1966*7adb730bSGeorge Wilson arc_buf_hdr_t *hdr, *hdr_prev; 1967b802aa8cSSanjeev Bagewadi arc_buf_hdr_t marker = { 0 }; 19680e8c6158Smaybee list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1969fa9e4066Sahrens kmutex_t *hash_lock; 1970ea8dc4b6Seschrock uint64_t bytes_deleted = 0; 1971c0a81264Sek uint64_t bufs_skipped = 0; 197269962b56SMatthew Ahrens int count = 0; 1973fa9e4066Sahrens 1974ea8dc4b6Seschrock ASSERT(GHOST_STATE(state)); 1975fa9e4066Sahrens top: 197644cb6abcSbmc mutex_enter(&state->arcs_mtx); 1977*7adb730bSGeorge Wilson for (hdr = list_tail(list); hdr; hdr = hdr_prev) { 1978*7adb730bSGeorge Wilson hdr_prev = list_prev(list, hdr); 1979*7adb730bSGeorge Wilson if (hdr->b_type > ARC_BUFC_NUMTYPES) 1980*7adb730bSGeorge Wilson panic("invalid hdr=%p", (void *)hdr); 1981*7adb730bSGeorge Wilson if (spa && hdr->b_spa != spa) 1982874395d5Smaybee continue; 1983b802aa8cSSanjeev Bagewadi 1984b802aa8cSSanjeev Bagewadi /* ignore markers */ 1985*7adb730bSGeorge Wilson if (hdr->b_spa == 0) 1986b802aa8cSSanjeev Bagewadi continue; 1987b802aa8cSSanjeev Bagewadi 1988*7adb730bSGeorge Wilson hash_lock = HDR_LOCK(hdr); 19897e453561SWilliam Gorrell /* caller may be trying to modify this buffer, skip it */ 19907e453561SWilliam Gorrell if (MUTEX_HELD(hash_lock)) 19917e453561SWilliam Gorrell continue; 199269962b56SMatthew Ahrens 199369962b56SMatthew Ahrens /* 199469962b56SMatthew Ahrens * It may take a long time to evict all the bufs requested. 199569962b56SMatthew Ahrens * To avoid blocking all arc activity, periodically drop 199669962b56SMatthew Ahrens * the arcs_mtx and give other threads a chance to run 199769962b56SMatthew Ahrens * before reacquiring the lock. 199869962b56SMatthew Ahrens */ 199969962b56SMatthew Ahrens if (count++ > arc_evict_iterations) { 2000*7adb730bSGeorge Wilson list_insert_after(list, hdr, &marker); 200169962b56SMatthew Ahrens mutex_exit(&state->arcs_mtx); 200269962b56SMatthew Ahrens kpreempt(KPREEMPT_SYNC); 200369962b56SMatthew Ahrens mutex_enter(&state->arcs_mtx); 2004*7adb730bSGeorge Wilson hdr_prev = list_prev(list, &marker); 200569962b56SMatthew Ahrens list_remove(list, &marker); 200669962b56SMatthew Ahrens count = 0; 200769962b56SMatthew Ahrens continue; 200869962b56SMatthew Ahrens } 20097e453561SWilliam Gorrell if (mutex_tryenter(hash_lock)) { 2010*7adb730bSGeorge Wilson ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2011*7adb730bSGeorge Wilson ASSERT(hdr->b_buf == NULL); 201244cb6abcSbmc ARCSTAT_BUMP(arcstat_deleted); 2013*7adb730bSGeorge Wilson bytes_deleted += hdr->b_size; 2014fa94a07fSbrendan 2015*7adb730bSGeorge Wilson if (hdr->b_l2hdr != NULL) { 2016fa94a07fSbrendan /* 2017fa94a07fSbrendan * This buffer is cached on the 2nd Level ARC; 2018fa94a07fSbrendan * don't destroy the header. 2019fa94a07fSbrendan */ 2020*7adb730bSGeorge Wilson arc_change_state(arc_l2c_only, hdr, hash_lock); 20217e453561SWilliam Gorrell mutex_exit(hash_lock); 2022fa94a07fSbrendan } else { 2023*7adb730bSGeorge Wilson arc_change_state(arc_anon, hdr, hash_lock); 20247e453561SWilliam Gorrell mutex_exit(hash_lock); 2025*7adb730bSGeorge Wilson arc_hdr_destroy(hdr); 2026fa94a07fSbrendan } 2027fa94a07fSbrendan 2028*7adb730bSGeorge Wilson DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); 2029fa9e4066Sahrens if (bytes >= 0 && bytes_deleted >= bytes) 2030fa9e4066Sahrens break; 2031b802aa8cSSanjeev Bagewadi } else if (bytes < 0) { 2032b802aa8cSSanjeev Bagewadi /* 2033b802aa8cSSanjeev Bagewadi * Insert a list marker and then wait for the 2034b802aa8cSSanjeev Bagewadi * hash lock to become available. Once its 2035b802aa8cSSanjeev Bagewadi * available, restart from where we left off. 2036b802aa8cSSanjeev Bagewadi */ 2037*7adb730bSGeorge Wilson list_insert_after(list, hdr, &marker); 2038b802aa8cSSanjeev Bagewadi mutex_exit(&state->arcs_mtx); 2039b802aa8cSSanjeev Bagewadi mutex_enter(hash_lock); 2040b802aa8cSSanjeev Bagewadi mutex_exit(hash_lock); 2041b802aa8cSSanjeev Bagewadi mutex_enter(&state->arcs_mtx); 2042*7adb730bSGeorge Wilson hdr_prev = list_prev(list, &marker); 2043b802aa8cSSanjeev Bagewadi list_remove(list, &marker); 204469962b56SMatthew Ahrens } else { 2045fa9e4066Sahrens bufs_skipped += 1; 204669962b56SMatthew Ahrens } 204769962b56SMatthew Ahrens 2048fa9e4066Sahrens } 204944cb6abcSbmc mutex_exit(&state->arcs_mtx); 2050fa9e4066Sahrens 20510e8c6158Smaybee if (list == &state->arcs_list[ARC_BUFC_DATA] && 20520e8c6158Smaybee (bytes < 0 || bytes_deleted < bytes)) { 20530e8c6158Smaybee list = &state->arcs_list[ARC_BUFC_METADATA]; 20540e8c6158Smaybee goto top; 20550e8c6158Smaybee } 20560e8c6158Smaybee 2057fa9e4066Sahrens if (bufs_skipped) { 205844cb6abcSbmc ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 2059fa9e4066Sahrens ASSERT(bytes >= 0); 2060fa9e4066Sahrens } 2061fa9e4066Sahrens 2062fa9e4066Sahrens if (bytes_deleted < bytes) 2063fa9e4066Sahrens dprintf("only deleted %lld bytes from %p", 2064fa9e4066Sahrens (longlong_t)bytes_deleted, state); 2065fa9e4066Sahrens } 2066fa9e4066Sahrens 2067fa9e4066Sahrens static void 2068fa9e4066Sahrens arc_adjust(void) 2069fa9e4066Sahrens { 20705a98e54bSBrendan Gregg - Sun Microsystems int64_t adjustment, delta; 2071fa9e4066Sahrens 20725a98e54bSBrendan Gregg - Sun Microsystems /* 20735a98e54bSBrendan Gregg - Sun Microsystems * Adjust MRU size 20745a98e54bSBrendan Gregg - Sun Microsystems */ 20755a98e54bSBrendan Gregg - Sun Microsystems 20763e4e8481STom Erickson adjustment = MIN((int64_t)(arc_size - arc_c), 20773e4e8481STom Erickson (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 20783e4e8481STom Erickson arc_p)); 2079fa9e4066Sahrens 20805a98e54bSBrendan Gregg - Sun Microsystems if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 20815a98e54bSBrendan Gregg - Sun Microsystems delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 20825a98e54bSBrendan Gregg - Sun Microsystems (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA); 20835a98e54bSBrendan Gregg - Sun Microsystems adjustment -= delta; 20840e8c6158Smaybee } 20850e8c6158Smaybee 20865a98e54bSBrendan Gregg - Sun Microsystems if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 20875a98e54bSBrendan Gregg - Sun Microsystems delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 20885a98e54bSBrendan Gregg - Sun Microsystems (void) arc_evict(arc_mru, NULL, delta, FALSE, 2089874395d5Smaybee ARC_BUFC_METADATA); 2090fa9e4066Sahrens } 2091fa9e4066Sahrens 20925a98e54bSBrendan Gregg - Sun Microsystems /* 20935a98e54bSBrendan Gregg - Sun Microsystems * Adjust MFU size 20945a98e54bSBrendan Gregg - Sun Microsystems */ 2095fa9e4066Sahrens 20965a98e54bSBrendan Gregg - Sun Microsystems adjustment = arc_size - arc_c; 20975a98e54bSBrendan Gregg - Sun Microsystems 20985a98e54bSBrendan Gregg - Sun Microsystems if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 20995a98e54bSBrendan Gregg - Sun Microsystems delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 21005a98e54bSBrendan Gregg - Sun Microsystems (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA); 21015a98e54bSBrendan Gregg - Sun Microsystems adjustment -= delta; 2102fa9e4066Sahrens } 2103fa9e4066Sahrens 21045a98e54bSBrendan Gregg - Sun Microsystems if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 21055a98e54bSBrendan Gregg - Sun Microsystems int64_t delta = MIN(adjustment, 21065a98e54bSBrendan Gregg - Sun Microsystems arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 21075a98e54bSBrendan Gregg - Sun Microsystems (void) arc_evict(arc_mfu, NULL, delta, FALSE, 21085a98e54bSBrendan Gregg - Sun Microsystems ARC_BUFC_METADATA); 21095a98e54bSBrendan Gregg - Sun Microsystems } 2110fa9e4066Sahrens 21115a98e54bSBrendan Gregg - Sun Microsystems /* 21125a98e54bSBrendan Gregg - Sun Microsystems * Adjust ghost lists 21135a98e54bSBrendan Gregg - Sun Microsystems */ 2114fa9e4066Sahrens 21155a98e54bSBrendan Gregg - Sun Microsystems adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 2116fa9e4066Sahrens 21175a98e54bSBrendan Gregg - Sun Microsystems if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 21185a98e54bSBrendan Gregg - Sun Microsystems delta = MIN(arc_mru_ghost->arcs_size, adjustment); 21195a98e54bSBrendan Gregg - Sun Microsystems arc_evict_ghost(arc_mru_ghost, NULL, delta); 21205a98e54bSBrendan Gregg - Sun Microsystems } 21210e8c6158Smaybee 21225a98e54bSBrendan Gregg - Sun Microsystems adjustment = 21235a98e54bSBrendan Gregg - Sun Microsystems arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 21245a98e54bSBrendan Gregg - Sun Microsystems 21255a98e54bSBrendan Gregg - Sun Microsystems if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 21265a98e54bSBrendan Gregg - Sun Microsystems delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 21275a98e54bSBrendan Gregg - Sun Microsystems arc_evict_ghost(arc_mfu_ghost, NULL, delta); 2128fa9e4066Sahrens } 2129fa9e4066Sahrens } 2130fa9e4066Sahrens 2131ea8dc4b6Seschrock static void 2132ea8dc4b6Seschrock arc_do_user_evicts(void) 2133ea8dc4b6Seschrock { 2134ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 2135ea8dc4b6Seschrock while (arc_eviction_list != NULL) { 2136ea8dc4b6Seschrock arc_buf_t *buf = arc_eviction_list; 2137ea8dc4b6Seschrock arc_eviction_list = buf->b_next; 21383f9d6ad7SLin Ling mutex_enter(&buf->b_evict_lock); 2139ea8dc4b6Seschrock buf->b_hdr = NULL; 21403f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 2141ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 2142ea8dc4b6Seschrock 2143dd6ef538Smaybee if (buf->b_efunc != NULL) 2144bbfa8ea8SMatthew Ahrens VERIFY0(buf->b_efunc(buf->b_private)); 2145ea8dc4b6Seschrock 2146ea8dc4b6Seschrock buf->b_efunc = NULL; 2147ea8dc4b6Seschrock buf->b_private = NULL; 2148ea8dc4b6Seschrock kmem_cache_free(buf_cache, buf); 2149ea8dc4b6Seschrock mutex_enter(&arc_eviction_mtx); 2150ea8dc4b6Seschrock } 2151ea8dc4b6Seschrock mutex_exit(&arc_eviction_mtx); 2152ea8dc4b6Seschrock } 2153ea8dc4b6Seschrock 2154fa9e4066Sahrens /* 2155874395d5Smaybee * Flush all *evictable* data from the cache for the given spa. 2156fa9e4066Sahrens * NOTE: this will not touch "active" (i.e. referenced) data. 2157fa9e4066Sahrens */ 2158fa9e4066Sahrens void 2159874395d5Smaybee arc_flush(spa_t *spa) 2160fa9e4066Sahrens { 2161ac05c741SMark Maybee uint64_t guid = 0; 2162ac05c741SMark Maybee 2163ac05c741SMark Maybee if (spa) 2164e9103aaeSGarrett D'Amore guid = spa_load_guid(spa); 2165ac05c741SMark Maybee 2166874395d5Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 2167ac05c741SMark Maybee (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 2168874395d5Smaybee if (spa) 2169874395d5Smaybee break; 2170874395d5Smaybee } 2171874395d5Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 2172ac05c741SMark Maybee (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 2173874395d5Smaybee if (spa) 2174874395d5Smaybee break; 2175874395d5Smaybee } 2176874395d5Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 2177ac05c741SMark Maybee (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 2178874395d5Smaybee if (spa) 2179874395d5Smaybee break; 2180874395d5Smaybee } 2181874395d5Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 2182ac05c741SMark Maybee (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 2183874395d5Smaybee if (spa) 2184874395d5Smaybee break; 2185874395d5Smaybee } 2186874395d5Smaybee 2187ac05c741SMark Maybee arc_evict_ghost(arc_mru_ghost, guid, -1); 2188ac05c741SMark Maybee arc_evict_ghost(arc_mfu_ghost, guid, -1); 2189ea8dc4b6Seschrock 2190ea8dc4b6Seschrock mutex_enter(&arc_reclaim_thr_lock); 2191ea8dc4b6Seschrock arc_do_user_evicts(); 2192ea8dc4b6Seschrock mutex_exit(&arc_reclaim_thr_lock); 2193874395d5Smaybee ASSERT(spa || arc_eviction_list == NULL); 2194fa9e4066Sahrens } 2195fa9e4066Sahrens 2196fa9e4066Sahrens void 219749e3519aSmaybee arc_shrink(void) 2198fa9e4066Sahrens { 219944cb6abcSbmc if (arc_c > arc_c_min) { 220049e3519aSmaybee uint64_t to_free; 2201fa9e4066Sahrens 22023cff2f43Sstans #ifdef _KERNEL 220344cb6abcSbmc to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 22043cff2f43Sstans #else 220544cb6abcSbmc to_free = arc_c >> arc_shrink_shift; 22063cff2f43Sstans #endif 220744cb6abcSbmc if (arc_c > arc_c_min + to_free) 220844cb6abcSbmc atomic_add_64(&arc_c, -to_free); 220949e3519aSmaybee else 221044cb6abcSbmc arc_c = arc_c_min; 221144cb6abcSbmc 221244cb6abcSbmc atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 221344cb6abcSbmc if (arc_c > arc_size) 221444cb6abcSbmc arc_c = MAX(arc_size, arc_c_min); 221544cb6abcSbmc if (arc_p > arc_c) 221644cb6abcSbmc arc_p = (arc_c >> 1); 221744cb6abcSbmc ASSERT(arc_c >= arc_c_min); 221844cb6abcSbmc ASSERT((int64_t)arc_p >= 0); 221949e3519aSmaybee } 2220fa9e4066Sahrens 222144cb6abcSbmc if (arc_size > arc_c) 222249e3519aSmaybee arc_adjust(); 2223fa9e4066Sahrens } 2224fa9e4066Sahrens 222594dd93aeSGeorge Wilson /* 222694dd93aeSGeorge Wilson * Determine if the system is under memory pressure and is asking 222794dd93aeSGeorge Wilson * to reclaim memory. A return value of 1 indicates that the system 222894dd93aeSGeorge Wilson * is under memory pressure and that the arc should adjust accordingly. 222994dd93aeSGeorge Wilson */ 2230fa9e4066Sahrens static int 2231fa9e4066Sahrens arc_reclaim_needed(void) 2232fa9e4066Sahrens { 2233fa9e4066Sahrens uint64_t extra; 2234fa9e4066Sahrens 2235fa9e4066Sahrens #ifdef _KERNEL 22363cff2f43Sstans 22373cff2f43Sstans if (needfree) 22383cff2f43Sstans return (1); 22393cff2f43Sstans 2240fa9e4066Sahrens /* 2241fa9e4066Sahrens * take 'desfree' extra pages, so we reclaim sooner, rather than later 2242fa9e4066Sahrens */ 2243fa9e4066Sahrens extra = desfree; 2244fa9e4066Sahrens 2245fa9e4066Sahrens /* 2246fa9e4066Sahrens * check that we're out of range of the pageout scanner. It starts to 2247fa9e4066Sahrens * schedule paging if freemem is less than lotsfree and needfree. 2248fa9e4066Sahrens * lotsfree is the high-water mark for pageout, and needfree is the 2249fa9e4066Sahrens * number of needed free pages. We add extra pages here to make sure 2250fa9e4066Sahrens * the scanner doesn't start up while we're freeing memory. 2251fa9e4066Sahrens */ 2252fa9e4066Sahrens if (freemem < lotsfree + needfree + extra) 2253fa9e4066Sahrens return (1); 2254fa9e4066Sahrens 2255fa9e4066Sahrens /* 2256fa9e4066Sahrens * check to make sure that swapfs has enough space so that anon 2257fa94a07fSbrendan * reservations can still succeed. anon_resvmem() checks that the 2258fa9e4066Sahrens * availrmem is greater than swapfs_minfree, and the number of reserved 2259fa9e4066Sahrens * swap pages. We also add a bit of extra here just to prevent 2260fa9e4066Sahrens * circumstances from getting really dire. 2261fa9e4066Sahrens */ 2262fa9e4066Sahrens if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2263fa9e4066Sahrens return (1); 2264fa9e4066Sahrens 2265cf746768SBryan Cantrill /* 2266cf746768SBryan Cantrill * Check that we have enough availrmem that memory locking (e.g., via 2267cf746768SBryan Cantrill * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum 2268cf746768SBryan Cantrill * stores the number of pages that cannot be locked; when availrmem 2269cf746768SBryan Cantrill * drops below pages_pp_maximum, page locking mechanisms such as 2270cf746768SBryan Cantrill * page_pp_lock() will fail.) 2271cf746768SBryan Cantrill */ 2272cf746768SBryan Cantrill if (availrmem <= pages_pp_maximum) 2273cf746768SBryan Cantrill return (1); 2274cf746768SBryan Cantrill 22755dc8af33Smaybee #if defined(__i386) 2276fa9e4066Sahrens /* 2277fa9e4066Sahrens * If we're on an i386 platform, it's possible that we'll exhaust the 2278fa9e4066Sahrens * kernel heap space before we ever run out of available physical 2279fa9e4066Sahrens * memory. Most checks of the size of the heap_area compare against 2280fa9e4066Sahrens * tune.t_minarmem, which is the minimum available real memory that we 2281fa9e4066Sahrens * can have in the system. However, this is generally fixed at 25 pages 2282fa9e4066Sahrens * which is so low that it's useless. In this comparison, we seek to 2283fa9e4066Sahrens * calculate the total heap-size, and reclaim if more than 3/4ths of the 2284fa94a07fSbrendan * heap is allocated. (Or, in the calculation, if less than 1/4th is 2285fa9e4066Sahrens * free) 2286fa9e4066Sahrens */ 228794dd93aeSGeorge Wilson if (vmem_size(heap_arena, VMEM_FREE) < 228894dd93aeSGeorge Wilson (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) 2289fa9e4066Sahrens return (1); 2290fa9e4066Sahrens #endif 2291fa9e4066Sahrens 229294dd93aeSGeorge Wilson /* 229394dd93aeSGeorge Wilson * If zio data pages are being allocated out of a separate heap segment, 229494dd93aeSGeorge Wilson * then enforce that the size of available vmem for this arena remains 229594dd93aeSGeorge Wilson * above about 1/16th free. 229694dd93aeSGeorge Wilson * 229794dd93aeSGeorge Wilson * Note: The 1/16th arena free requirement was put in place 229894dd93aeSGeorge Wilson * to aggressively evict memory from the arc in order to avoid 229994dd93aeSGeorge Wilson * memory fragmentation issues. 230094dd93aeSGeorge Wilson */ 230194dd93aeSGeorge Wilson if (zio_arena != NULL && 230294dd93aeSGeorge Wilson vmem_size(zio_arena, VMEM_FREE) < 230394dd93aeSGeorge Wilson (vmem_size(zio_arena, VMEM_ALLOC) >> 4)) 230494dd93aeSGeorge Wilson return (1); 2305fa9e4066Sahrens #else 2306fa9e4066Sahrens if (spa_get_random(100) == 0) 2307fa9e4066Sahrens return (1); 2308fa9e4066Sahrens #endif 2309fa9e4066Sahrens return (0); 2310fa9e4066Sahrens } 2311fa9e4066Sahrens 2312fa9e4066Sahrens static void 2313fa9e4066Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2314fa9e4066Sahrens { 2315fa9e4066Sahrens size_t i; 2316fa9e4066Sahrens kmem_cache_t *prev_cache = NULL; 2317ad23a2dbSjohansen kmem_cache_t *prev_data_cache = NULL; 2318fa9e4066Sahrens extern kmem_cache_t *zio_buf_cache[]; 2319ad23a2dbSjohansen extern kmem_cache_t *zio_data_buf_cache[]; 232083803b51SGeorge Wilson extern kmem_cache_t *range_seg_cache; 2321fa9e4066Sahrens 2322033f9833Sek #ifdef _KERNEL 23230e8c6158Smaybee if (arc_meta_used >= arc_meta_limit) { 23240e8c6158Smaybee /* 23250e8c6158Smaybee * We are exceeding our meta-data cache limit. 23260e8c6158Smaybee * Purge some DNLC entries to release holds on meta-data. 23270e8c6158Smaybee */ 23280e8c6158Smaybee dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 23290e8c6158Smaybee } 23305dc8af33Smaybee #if defined(__i386) 23315dc8af33Smaybee /* 23325dc8af33Smaybee * Reclaim unused memory from all kmem caches. 23335dc8af33Smaybee */ 23345dc8af33Smaybee kmem_reap(); 23355dc8af33Smaybee #endif 2336033f9833Sek #endif 2337033f9833Sek 2338fa9e4066Sahrens /* 2339fa94a07fSbrendan * An aggressive reclamation will shrink the cache size as well as 2340ea8dc4b6Seschrock * reap free buffers from the arc kmem caches. 2341fa9e4066Sahrens */ 2342fa9e4066Sahrens if (strat == ARC_RECLAIM_AGGR) 234349e3519aSmaybee arc_shrink(); 2344fa9e4066Sahrens 2345fa9e4066Sahrens for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2346fa9e4066Sahrens if (zio_buf_cache[i] != prev_cache) { 2347fa9e4066Sahrens prev_cache = zio_buf_cache[i]; 2348fa9e4066Sahrens kmem_cache_reap_now(zio_buf_cache[i]); 2349fa9e4066Sahrens } 2350ad23a2dbSjohansen if (zio_data_buf_cache[i] != prev_data_cache) { 2351ad23a2dbSjohansen prev_data_cache = zio_data_buf_cache[i]; 2352ad23a2dbSjohansen kmem_cache_reap_now(zio_data_buf_cache[i]); 2353ad23a2dbSjohansen } 2354fa9e4066Sahrens } 2355ea8dc4b6Seschrock kmem_cache_reap_now(buf_cache); 2356ea8dc4b6Seschrock kmem_cache_reap_now(hdr_cache); 235783803b51SGeorge Wilson kmem_cache_reap_now(range_seg_cache); 235894dd93aeSGeorge Wilson 235994dd93aeSGeorge Wilson /* 236094dd93aeSGeorge Wilson * Ask the vmem areana to reclaim unused memory from its 236194dd93aeSGeorge Wilson * quantum caches. 236294dd93aeSGeorge Wilson */ 236394dd93aeSGeorge Wilson if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR) 236494dd93aeSGeorge Wilson vmem_qcache_reap(zio_arena); 2365fa9e4066Sahrens } 2366fa9e4066Sahrens 2367fa9e4066Sahrens static void 2368fa9e4066Sahrens arc_reclaim_thread(void) 2369fa9e4066Sahrens { 2370fa9e4066Sahrens clock_t growtime = 0; 2371fa9e4066Sahrens arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2372fa9e4066Sahrens callb_cpr_t cpr; 2373fa9e4066Sahrens 2374fa9e4066Sahrens CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2375fa9e4066Sahrens 2376fa9e4066Sahrens mutex_enter(&arc_reclaim_thr_lock); 2377fa9e4066Sahrens while (arc_thread_exit == 0) { 2378fa9e4066Sahrens if (arc_reclaim_needed()) { 2379fa9e4066Sahrens 238044cb6abcSbmc if (arc_no_grow) { 2381fa9e4066Sahrens if (last_reclaim == ARC_RECLAIM_CONS) { 2382fa9e4066Sahrens last_reclaim = ARC_RECLAIM_AGGR; 2383fa9e4066Sahrens } else { 2384fa9e4066Sahrens last_reclaim = ARC_RECLAIM_CONS; 2385fa9e4066Sahrens } 2386fa9e4066Sahrens } else { 238744cb6abcSbmc arc_no_grow = TRUE; 2388fa9e4066Sahrens last_reclaim = ARC_RECLAIM_AGGR; 2389fa9e4066Sahrens membar_producer(); 2390fa9e4066Sahrens } 2391fa9e4066Sahrens 2392fa9e4066Sahrens /* reset the growth delay for every reclaim */ 2393d3d50737SRafael Vanoni growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2394fa9e4066Sahrens 2395fa9e4066Sahrens arc_kmem_reap_now(last_reclaim); 23963a737e0dSbrendan arc_warm = B_TRUE; 2397fa9e4066Sahrens 2398d3d50737SRafael Vanoni } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 239944cb6abcSbmc arc_no_grow = FALSE; 2400fa9e4066Sahrens } 2401fa9e4066Sahrens 24023e4e8481STom Erickson arc_adjust(); 2403641fbdaeSmaybee 2404ea8dc4b6Seschrock if (arc_eviction_list != NULL) 2405ea8dc4b6Seschrock arc_do_user_evicts(); 2406ea8dc4b6Seschrock 2407fa9e4066Sahrens /* block until needed, or one second, whichever is shorter */ 2408fa9e4066Sahrens CALLB_CPR_SAFE_BEGIN(&cpr); 2409fa9e4066Sahrens (void) cv_timedwait(&arc_reclaim_thr_cv, 2410d3d50737SRafael Vanoni &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz)); 2411fa9e4066Sahrens CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2412fa9e4066Sahrens } 2413fa9e4066Sahrens 2414fa9e4066Sahrens arc_thread_exit = 0; 2415fa9e4066Sahrens cv_broadcast(&arc_reclaim_thr_cv); 2416fa9e4066Sahrens CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2417fa9e4066Sahrens thread_exit(); 2418fa9e4066Sahrens } 2419fa9e4066Sahrens 2420ea8dc4b6Seschrock /* 2421ea8dc4b6Seschrock * Adapt arc info given the number of bytes we are trying to add and 2422ea8dc4b6Seschrock * the state that we are comming from. This function is only called 2423ea8dc4b6Seschrock * when we are adding new content to the cache. 2424ea8dc4b6Seschrock */ 2425fa9e4066Sahrens static void 2426ea8dc4b6Seschrock arc_adapt(int bytes, arc_state_t *state) 2427fa9e4066Sahrens { 2428ea8dc4b6Seschrock int mult; 24295a98e54bSBrendan Gregg - Sun Microsystems uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2430ea8dc4b6Seschrock 2431fa94a07fSbrendan if (state == arc_l2c_only) 2432fa94a07fSbrendan return; 2433fa94a07fSbrendan 2434ea8dc4b6Seschrock ASSERT(bytes > 0); 2435fa9e4066Sahrens /* 2436ea8dc4b6Seschrock * Adapt the target size of the MRU list: 2437ea8dc4b6Seschrock * - if we just hit in the MRU ghost list, then increase 2438ea8dc4b6Seschrock * the target size of the MRU list. 2439ea8dc4b6Seschrock * - if we just hit in the MFU ghost list, then increase 2440ea8dc4b6Seschrock * the target size of the MFU list by decreasing the 2441ea8dc4b6Seschrock * target size of the MRU list. 2442fa9e4066Sahrens */ 244344cb6abcSbmc if (state == arc_mru_ghost) { 244444cb6abcSbmc mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 244544cb6abcSbmc 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 24463e4e8481STom Erickson mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2447ea8dc4b6Seschrock 24485a98e54bSBrendan Gregg - Sun Microsystems arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 244944cb6abcSbmc } else if (state == arc_mfu_ghost) { 24505a98e54bSBrendan Gregg - Sun Microsystems uint64_t delta; 24515a98e54bSBrendan Gregg - Sun Microsystems 245244cb6abcSbmc mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 245344cb6abcSbmc 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 24543e4e8481STom Erickson mult = MIN(mult, 10); 2455ea8dc4b6Seschrock 24565a98e54bSBrendan Gregg - Sun Microsystems delta = MIN(bytes * mult, arc_p); 24575a98e54bSBrendan Gregg - Sun Microsystems arc_p = MAX(arc_p_min, arc_p - delta); 2458ea8dc4b6Seschrock } 245944cb6abcSbmc ASSERT((int64_t)arc_p >= 0); 2460fa9e4066Sahrens 2461fa9e4066Sahrens if (arc_reclaim_needed()) { 2462fa9e4066Sahrens cv_signal(&arc_reclaim_thr_cv); 2463fa9e4066Sahrens return; 2464fa9e4066Sahrens } 2465fa9e4066Sahrens 246644cb6abcSbmc if (arc_no_grow) 2467fa9e4066Sahrens return; 2468fa9e4066Sahrens 246944cb6abcSbmc if (arc_c >= arc_c_max) 2470ea8dc4b6Seschrock return; 2471ea8dc4b6Seschrock 2472fa9e4066Sahrens /* 2473ea8dc4b6Seschrock * If we're within (2 * maxblocksize) bytes of the target 2474ea8dc4b6Seschrock * cache size, increment the target cache size 2475fa9e4066Sahrens */ 247644cb6abcSbmc if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 247744cb6abcSbmc atomic_add_64(&arc_c, (int64_t)bytes); 247844cb6abcSbmc if (arc_c > arc_c_max) 247944cb6abcSbmc arc_c = arc_c_max; 248044cb6abcSbmc else if (state == arc_anon) 248144cb6abcSbmc atomic_add_64(&arc_p, (int64_t)bytes); 248244cb6abcSbmc if (arc_p > arc_c) 248344cb6abcSbmc arc_p = arc_c; 2484fa9e4066Sahrens } 248544cb6abcSbmc ASSERT((int64_t)arc_p >= 0); 2486fa9e4066Sahrens } 2487fa9e4066Sahrens 2488fa9e4066Sahrens /* 2489ea8dc4b6Seschrock * Check if the cache has reached its limits and eviction is required 2490ea8dc4b6Seschrock * prior to insert. 2491fa9e4066Sahrens */ 2492fa9e4066Sahrens static int 24930e8c6158Smaybee arc_evict_needed(arc_buf_contents_t type) 2494fa9e4066Sahrens { 24950e8c6158Smaybee if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 24960e8c6158Smaybee return (1); 24970e8c6158Smaybee 2498fa9e4066Sahrens if (arc_reclaim_needed()) 2499fa9e4066Sahrens return (1); 2500fa9e4066Sahrens 250144cb6abcSbmc return (arc_size > arc_c); 2502fa9e4066Sahrens } 2503fa9e4066Sahrens 2504fa9e4066Sahrens /* 250544eda4d7Smaybee * The buffer, supplied as the first argument, needs a data block. 250644eda4d7Smaybee * So, if we are at cache max, determine which cache should be victimized. 250744eda4d7Smaybee * We have the following cases: 2508fa9e4066Sahrens * 250944cb6abcSbmc * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2510fa9e4066Sahrens * In this situation if we're out of space, but the resident size of the MFU is 2511fa9e4066Sahrens * under the limit, victimize the MFU cache to satisfy this insertion request. 2512fa9e4066Sahrens * 251344cb6abcSbmc * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2514fa9e4066Sahrens * Here, we've used up all of the available space for the MRU, so we need to 2515fa9e4066Sahrens * evict from our own cache instead. Evict from the set of resident MRU 2516fa9e4066Sahrens * entries. 2517fa9e4066Sahrens * 251844cb6abcSbmc * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2519fa9e4066Sahrens * c minus p represents the MFU space in the cache, since p is the size of the 2520fa9e4066Sahrens * cache that is dedicated to the MRU. In this situation there's still space on 2521fa9e4066Sahrens * the MFU side, so the MRU side needs to be victimized. 2522fa9e4066Sahrens * 252344cb6abcSbmc * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2524fa9e4066Sahrens * MFU's resident set is consuming more space than it has been allotted. In 2525fa9e4066Sahrens * this situation, we must victimize our own cache, the MFU, for this insertion. 2526fa9e4066Sahrens */ 2527fa9e4066Sahrens static void 252844eda4d7Smaybee arc_get_data_buf(arc_buf_t *buf) 2529fa9e4066Sahrens { 2530ad23a2dbSjohansen arc_state_t *state = buf->b_hdr->b_state; 2531ad23a2dbSjohansen uint64_t size = buf->b_hdr->b_size; 2532ad23a2dbSjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 2533fa9e4066Sahrens 253444eda4d7Smaybee arc_adapt(size, state); 2535fa9e4066Sahrens 253644eda4d7Smaybee /* 253744eda4d7Smaybee * We have not yet reached cache maximum size, 253844eda4d7Smaybee * just allocate a new buffer. 253944eda4d7Smaybee */ 25400e8c6158Smaybee if (!arc_evict_needed(type)) { 2541ad23a2dbSjohansen if (type == ARC_BUFC_METADATA) { 2542ad23a2dbSjohansen buf->b_data = zio_buf_alloc(size); 25435a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(size, ARC_SPACE_DATA); 2544ad23a2dbSjohansen } else { 2545ad23a2dbSjohansen ASSERT(type == ARC_BUFC_DATA); 2546ad23a2dbSjohansen buf->b_data = zio_data_buf_alloc(size); 25475a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_data_size, size); 25480e8c6158Smaybee atomic_add_64(&arc_size, size); 2549ad23a2dbSjohansen } 255044eda4d7Smaybee goto out; 255144eda4d7Smaybee } 255244eda4d7Smaybee 255344eda4d7Smaybee /* 255444eda4d7Smaybee * If we are prefetching from the mfu ghost list, this buffer 255544eda4d7Smaybee * will end up on the mru list; so steal space from there. 255644eda4d7Smaybee */ 255744cb6abcSbmc if (state == arc_mfu_ghost) 2558*7adb730bSGeorge Wilson state = buf->b_hdr->b_flags & ARC_FLAG_PREFETCH ? 2559*7adb730bSGeorge Wilson arc_mru : arc_mfu; 256044cb6abcSbmc else if (state == arc_mru_ghost) 256144cb6abcSbmc state = arc_mru; 256244cb6abcSbmc 256344cb6abcSbmc if (state == arc_mru || state == arc_anon) { 256444cb6abcSbmc uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 25655a98e54bSBrendan Gregg - Sun Microsystems state = (arc_mfu->arcs_lsize[type] >= size && 25660e8c6158Smaybee arc_p > mru_used) ? arc_mfu : arc_mru; 2567fa9e4066Sahrens } else { 256844eda4d7Smaybee /* MFU cases */ 256944cb6abcSbmc uint64_t mfu_space = arc_c - arc_p; 25705a98e54bSBrendan Gregg - Sun Microsystems state = (arc_mru->arcs_lsize[type] >= size && 25710e8c6158Smaybee mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 257244eda4d7Smaybee } 2573874395d5Smaybee if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2574ad23a2dbSjohansen if (type == ARC_BUFC_METADATA) { 2575ad23a2dbSjohansen buf->b_data = zio_buf_alloc(size); 25765a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(size, ARC_SPACE_DATA); 2577ad23a2dbSjohansen } else { 2578ad23a2dbSjohansen ASSERT(type == ARC_BUFC_DATA); 2579ad23a2dbSjohansen buf->b_data = zio_data_buf_alloc(size); 25805a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_INCR(arcstat_data_size, size); 25810e8c6158Smaybee atomic_add_64(&arc_size, size); 2582ad23a2dbSjohansen } 258344cb6abcSbmc ARCSTAT_BUMP(arcstat_recycle_miss); 258444eda4d7Smaybee } 258544eda4d7Smaybee ASSERT(buf->b_data != NULL); 258644eda4d7Smaybee out: 258744eda4d7Smaybee /* 258844eda4d7Smaybee * Update the state size. Note that ghost states have a 258944eda4d7Smaybee * "ghost size" and so don't need to be updated. 259044eda4d7Smaybee */ 259144eda4d7Smaybee if (!GHOST_STATE(buf->b_hdr->b_state)) { 259244eda4d7Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 259344eda4d7Smaybee 259444cb6abcSbmc atomic_add_64(&hdr->b_state->arcs_size, size); 259544eda4d7Smaybee if (list_link_active(&hdr->b_arc_node)) { 259644eda4d7Smaybee ASSERT(refcount_is_zero(&hdr->b_refcnt)); 25970e8c6158Smaybee atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2598fa9e4066Sahrens } 2599641fbdaeSmaybee /* 2600641fbdaeSmaybee * If we are growing the cache, and we are adding anonymous 260144cb6abcSbmc * data, and we have outgrown arc_p, update arc_p 2602641fbdaeSmaybee */ 260344cb6abcSbmc if (arc_size < arc_c && hdr->b_state == arc_anon && 260444cb6abcSbmc arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 260544cb6abcSbmc arc_p = MIN(arc_c, arc_p + size); 2606fa9e4066Sahrens } 2607fa9e4066Sahrens } 2608fa9e4066Sahrens 2609fa9e4066Sahrens /* 2610fa9e4066Sahrens * This routine is called whenever a buffer is accessed. 2611ea8dc4b6Seschrock * NOTE: the hash lock is dropped in this function. 2612fa9e4066Sahrens */ 2613fa9e4066Sahrens static void 2614*7adb730bSGeorge Wilson arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 2615fa9e4066Sahrens { 2616d3d50737SRafael Vanoni clock_t now; 2617d3d50737SRafael Vanoni 2618fa9e4066Sahrens ASSERT(MUTEX_HELD(hash_lock)); 2619fa9e4066Sahrens 2620*7adb730bSGeorge Wilson if (hdr->b_state == arc_anon) { 2621fa9e4066Sahrens /* 2622fa9e4066Sahrens * This buffer is not in the cache, and does not 2623fa9e4066Sahrens * appear in our "ghost" list. Add the new buffer 2624fa9e4066Sahrens * to the MRU state. 2625fa9e4066Sahrens */ 2626fa9e4066Sahrens 2627*7adb730bSGeorge Wilson ASSERT(hdr->b_arc_access == 0); 2628*7adb730bSGeorge Wilson hdr->b_arc_access = ddi_get_lbolt(); 2629*7adb730bSGeorge Wilson DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 2630*7adb730bSGeorge Wilson arc_change_state(arc_mru, hdr, hash_lock); 2631fa9e4066Sahrens 2632*7adb730bSGeorge Wilson } else if (hdr->b_state == arc_mru) { 2633d3d50737SRafael Vanoni now = ddi_get_lbolt(); 2634d3d50737SRafael Vanoni 2635fa9e4066Sahrens /* 263613506d1eSmaybee * If this buffer is here because of a prefetch, then either: 263713506d1eSmaybee * - clear the flag if this is a "referencing" read 263813506d1eSmaybee * (any subsequent access will bump this into the MFU state). 263913506d1eSmaybee * or 264013506d1eSmaybee * - move the buffer to the head of the list if this is 264113506d1eSmaybee * another prefetch (to make it less likely to be evicted). 2642fa9e4066Sahrens */ 2643*7adb730bSGeorge Wilson if ((hdr->b_flags & ARC_FLAG_PREFETCH) != 0) { 2644*7adb730bSGeorge Wilson if (refcount_count(&hdr->b_refcnt) == 0) { 2645*7adb730bSGeorge Wilson ASSERT(list_link_active(&hdr->b_arc_node)); 264613506d1eSmaybee } else { 2647*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_PREFETCH; 264844cb6abcSbmc ARCSTAT_BUMP(arcstat_mru_hits); 264913506d1eSmaybee } 2650*7adb730bSGeorge Wilson hdr->b_arc_access = now; 2651fa9e4066Sahrens return; 2652fa9e4066Sahrens } 2653fa9e4066Sahrens 2654fa9e4066Sahrens /* 2655fa9e4066Sahrens * This buffer has been "accessed" only once so far, 2656fa9e4066Sahrens * but it is still in the cache. Move it to the MFU 2657fa9e4066Sahrens * state. 2658fa9e4066Sahrens */ 2659*7adb730bSGeorge Wilson if (now > hdr->b_arc_access + ARC_MINTIME) { 2660fa9e4066Sahrens /* 2661fa9e4066Sahrens * More than 125ms have passed since we 2662fa9e4066Sahrens * instantiated this buffer. Move it to the 2663fa9e4066Sahrens * most frequently used state. 2664fa9e4066Sahrens */ 2665*7adb730bSGeorge Wilson hdr->b_arc_access = now; 2666*7adb730bSGeorge Wilson DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 2667*7adb730bSGeorge Wilson arc_change_state(arc_mfu, hdr, hash_lock); 2668fa9e4066Sahrens } 266944cb6abcSbmc ARCSTAT_BUMP(arcstat_mru_hits); 2670*7adb730bSGeorge Wilson } else if (hdr->b_state == arc_mru_ghost) { 2671fa9e4066Sahrens arc_state_t *new_state; 2672fa9e4066Sahrens /* 2673fa9e4066Sahrens * This buffer has been "accessed" recently, but 2674fa9e4066Sahrens * was evicted from the cache. Move it to the 2675fa9e4066Sahrens * MFU state. 2676fa9e4066Sahrens */ 2677fa9e4066Sahrens 2678*7adb730bSGeorge Wilson if (hdr->b_flags & ARC_FLAG_PREFETCH) { 267944cb6abcSbmc new_state = arc_mru; 2680*7adb730bSGeorge Wilson if (refcount_count(&hdr->b_refcnt) > 0) 2681*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_PREFETCH; 2682*7adb730bSGeorge Wilson DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 2683fa9e4066Sahrens } else { 268444cb6abcSbmc new_state = arc_mfu; 2685*7adb730bSGeorge Wilson DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 2686fa9e4066Sahrens } 2687fa9e4066Sahrens 2688*7adb730bSGeorge Wilson hdr->b_arc_access = ddi_get_lbolt(); 2689*7adb730bSGeorge Wilson arc_change_state(new_state, hdr, hash_lock); 2690fa9e4066Sahrens 269144cb6abcSbmc ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2692*7adb730bSGeorge Wilson } else if (hdr->b_state == arc_mfu) { 2693fa9e4066Sahrens /* 2694fa9e4066Sahrens * This buffer has been accessed more than once and is 2695fa9e4066Sahrens * still in the cache. Keep it in the MFU state. 2696fa9e4066Sahrens * 269713506d1eSmaybee * NOTE: an add_reference() that occurred when we did 269813506d1eSmaybee * the arc_read() will have kicked this off the list. 269913506d1eSmaybee * If it was a prefetch, we will explicitly move it to 270013506d1eSmaybee * the head of the list now. 2701fa9e4066Sahrens */ 2702*7adb730bSGeorge Wilson if ((hdr->b_flags & ARC_FLAG_PREFETCH) != 0) { 2703*7adb730bSGeorge Wilson ASSERT(refcount_count(&hdr->b_refcnt) == 0); 2704*7adb730bSGeorge Wilson ASSERT(list_link_active(&hdr->b_arc_node)); 270513506d1eSmaybee } 270644cb6abcSbmc ARCSTAT_BUMP(arcstat_mfu_hits); 2707*7adb730bSGeorge Wilson hdr->b_arc_access = ddi_get_lbolt(); 2708*7adb730bSGeorge Wilson } else if (hdr->b_state == arc_mfu_ghost) { 270944cb6abcSbmc arc_state_t *new_state = arc_mfu; 2710fa9e4066Sahrens /* 2711fa9e4066Sahrens * This buffer has been accessed more than once but has 2712fa9e4066Sahrens * been evicted from the cache. Move it back to the 2713fa9e4066Sahrens * MFU state. 2714fa9e4066Sahrens */ 2715fa9e4066Sahrens 2716*7adb730bSGeorge Wilson if (hdr->b_flags & ARC_FLAG_PREFETCH) { 271713506d1eSmaybee /* 271813506d1eSmaybee * This is a prefetch access... 271913506d1eSmaybee * move this block back to the MRU state. 272013506d1eSmaybee */ 2721*7adb730bSGeorge Wilson ASSERT0(refcount_count(&hdr->b_refcnt)); 272244cb6abcSbmc new_state = arc_mru; 272313506d1eSmaybee } 272413506d1eSmaybee 2725*7adb730bSGeorge Wilson hdr->b_arc_access = ddi_get_lbolt(); 2726*7adb730bSGeorge Wilson DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 2727*7adb730bSGeorge Wilson arc_change_state(new_state, hdr, hash_lock); 2728fa9e4066Sahrens 272944cb6abcSbmc ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2730*7adb730bSGeorge Wilson } else if (hdr->b_state == arc_l2c_only) { 2731fa94a07fSbrendan /* 2732fa94a07fSbrendan * This buffer is on the 2nd Level ARC. 2733fa94a07fSbrendan */ 2734fa94a07fSbrendan 2735*7adb730bSGeorge Wilson hdr->b_arc_access = ddi_get_lbolt(); 2736*7adb730bSGeorge Wilson DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 2737*7adb730bSGeorge Wilson arc_change_state(arc_mfu, hdr, hash_lock); 2738fa9e4066Sahrens } else { 2739fa9e4066Sahrens ASSERT(!"invalid arc state"); 2740fa9e4066Sahrens } 2741fa9e4066Sahrens } 2742fa9e4066Sahrens 2743fa9e4066Sahrens /* a generic arc_done_func_t which you can use */ 2744fa9e4066Sahrens /* ARGSUSED */ 2745fa9e4066Sahrens void 2746fa9e4066Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2747fa9e4066Sahrens { 27483f9d6ad7SLin Ling if (zio == NULL || zio->io_error == 0) 27493f9d6ad7SLin Ling bcopy(buf->b_data, arg, buf->b_hdr->b_size); 27503b2aab18SMatthew Ahrens VERIFY(arc_buf_remove_ref(buf, arg)); 2751fa9e4066Sahrens } 2752fa9e4066Sahrens 27530e8c6158Smaybee /* a generic arc_done_func_t */ 2754fa9e4066Sahrens void 2755fa9e4066Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2756fa9e4066Sahrens { 2757fa9e4066Sahrens arc_buf_t **bufp = arg; 2758fa9e4066Sahrens if (zio && zio->io_error) { 27593b2aab18SMatthew Ahrens VERIFY(arc_buf_remove_ref(buf, arg)); 2760fa9e4066Sahrens *bufp = NULL; 2761fa9e4066Sahrens } else { 2762fa9e4066Sahrens *bufp = buf; 27633f9d6ad7SLin Ling ASSERT(buf->b_data); 2764fa9e4066Sahrens } 2765fa9e4066Sahrens } 2766fa9e4066Sahrens 2767fa9e4066Sahrens static void 2768fa9e4066Sahrens arc_read_done(zio_t *zio) 2769fa9e4066Sahrens { 27705d7b4d43SMatthew Ahrens arc_buf_hdr_t *hdr; 2771fa9e4066Sahrens arc_buf_t *buf; 2772fa9e4066Sahrens arc_buf_t *abuf; /* buffer we're assigning to callback */ 27735d7b4d43SMatthew Ahrens kmutex_t *hash_lock = NULL; 2774fa9e4066Sahrens arc_callback_t *callback_list, *acb; 2775fa9e4066Sahrens int freeable = FALSE; 2776fa9e4066Sahrens 2777fa9e4066Sahrens buf = zio->io_private; 2778fa9e4066Sahrens hdr = buf->b_hdr; 2779fa9e4066Sahrens 2780bbf4a8dfSmaybee /* 2781bbf4a8dfSmaybee * The hdr was inserted into hash-table and removed from lists 2782bbf4a8dfSmaybee * prior to starting I/O. We should find this header, since 2783bbf4a8dfSmaybee * it's in the hash table, and it should be legit since it's 2784bbf4a8dfSmaybee * not possible to evict it during the I/O. The only possible 2785bbf4a8dfSmaybee * reason for it not to be found is if we were freed during the 2786bbf4a8dfSmaybee * read. 2787bbf4a8dfSmaybee */ 27885d7b4d43SMatthew Ahrens if (HDR_IN_HASH_TABLE(hdr)) { 27895d7b4d43SMatthew Ahrens ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); 27905d7b4d43SMatthew Ahrens ASSERT3U(hdr->b_dva.dva_word[0], ==, 27915d7b4d43SMatthew Ahrens BP_IDENTITY(zio->io_bp)->dva_word[0]); 27925d7b4d43SMatthew Ahrens ASSERT3U(hdr->b_dva.dva_word[1], ==, 27935d7b4d43SMatthew Ahrens BP_IDENTITY(zio->io_bp)->dva_word[1]); 27945d7b4d43SMatthew Ahrens 27955d7b4d43SMatthew Ahrens arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp, 27965d7b4d43SMatthew Ahrens &hash_lock); 27975d7b4d43SMatthew Ahrens 27985d7b4d43SMatthew Ahrens ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && 27995d7b4d43SMatthew Ahrens hash_lock == NULL) || 28005d7b4d43SMatthew Ahrens (found == hdr && 28015d7b4d43SMatthew Ahrens DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 28025d7b4d43SMatthew Ahrens (found == hdr && HDR_L2_READING(hdr))); 28035d7b4d43SMatthew Ahrens } 2804fa94a07fSbrendan 2805*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_L2_EVICTED; 2806*7adb730bSGeorge Wilson if (l2arc_noprefetch && (hdr->b_flags & ARC_FLAG_PREFETCH)) 2807*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_L2CACHE; 2808fa9e4066Sahrens 2809fa9e4066Sahrens /* byteswap if necessary */ 2810fa9e4066Sahrens callback_list = hdr->b_acb; 2811fa9e4066Sahrens ASSERT(callback_list != NULL); 28128e0f0d3dSWilliam Gorrell if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 2813ad135b5dSChristopher Siden dmu_object_byteswap_t bswap = 2814ad135b5dSChristopher Siden DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 2815088f3894Sahrens arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2816088f3894Sahrens byteswap_uint64_array : 2817ad135b5dSChristopher Siden dmu_ot_byteswap[bswap].ob_func; 2818088f3894Sahrens func(buf->b_data, hdr->b_size); 2819088f3894Sahrens } 2820fa9e4066Sahrens 2821fa94a07fSbrendan arc_cksum_compute(buf, B_FALSE); 2822cd1c8b85SMatthew Ahrens arc_buf_watch(buf); 28236b4acc8bSahrens 2824b24ab676SJeff Bonwick if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 2825b24ab676SJeff Bonwick /* 2826b24ab676SJeff Bonwick * Only call arc_access on anonymous buffers. This is because 2827b24ab676SJeff Bonwick * if we've issued an I/O for an evicted buffer, we've already 2828b24ab676SJeff Bonwick * called arc_access (to prevent any simultaneous readers from 2829b24ab676SJeff Bonwick * getting confused). 2830b24ab676SJeff Bonwick */ 2831b24ab676SJeff Bonwick arc_access(hdr, hash_lock); 2832b24ab676SJeff Bonwick } 2833b24ab676SJeff Bonwick 2834fa9e4066Sahrens /* create copies of the data buffer for the callers */ 2835fa9e4066Sahrens abuf = buf; 2836fa9e4066Sahrens for (acb = callback_list; acb; acb = acb->acb_next) { 2837fa9e4066Sahrens if (acb->acb_done) { 28389253d63dSGeorge Wilson if (abuf == NULL) { 28399253d63dSGeorge Wilson ARCSTAT_BUMP(arcstat_duplicate_reads); 284044eda4d7Smaybee abuf = arc_buf_clone(buf); 28419253d63dSGeorge Wilson } 2842fa9e4066Sahrens acb->acb_buf = abuf; 2843fa9e4066Sahrens abuf = NULL; 2844fa9e4066Sahrens } 2845fa9e4066Sahrens } 2846fa9e4066Sahrens hdr->b_acb = NULL; 2847*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; 2848ea8dc4b6Seschrock ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2849b24ab676SJeff Bonwick if (abuf == buf) { 2850b24ab676SJeff Bonwick ASSERT(buf->b_efunc == NULL); 2851b24ab676SJeff Bonwick ASSERT(hdr->b_datacnt == 1); 2852*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; 2853b24ab676SJeff Bonwick } 2854fa9e4066Sahrens 2855fa9e4066Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2856fa9e4066Sahrens 2857fa9e4066Sahrens if (zio->io_error != 0) { 2858*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_IO_ERROR; 285944cb6abcSbmc if (hdr->b_state != arc_anon) 286044cb6abcSbmc arc_change_state(arc_anon, hdr, hash_lock); 2861ea8dc4b6Seschrock if (HDR_IN_HASH_TABLE(hdr)) 2862ea8dc4b6Seschrock buf_hash_remove(hdr); 2863fa9e4066Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 2864fa9e4066Sahrens } 2865fa9e4066Sahrens 2866ea8dc4b6Seschrock /* 286713506d1eSmaybee * Broadcast before we drop the hash_lock to avoid the possibility 286813506d1eSmaybee * that the hdr (and hence the cv) might be freed before we get to 286913506d1eSmaybee * the cv_broadcast(). 2870ea8dc4b6Seschrock */ 2871ea8dc4b6Seschrock cv_broadcast(&hdr->b_cv); 2872ea8dc4b6Seschrock 2873bbf4a8dfSmaybee if (hash_lock) { 287444eda4d7Smaybee mutex_exit(hash_lock); 2875fa9e4066Sahrens } else { 2876fa9e4066Sahrens /* 2877fa9e4066Sahrens * This block was freed while we waited for the read to 2878fa9e4066Sahrens * complete. It has been removed from the hash table and 2879fa9e4066Sahrens * moved to the anonymous state (so that it won't show up 2880fa9e4066Sahrens * in the cache). 2881fa9e4066Sahrens */ 288244cb6abcSbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2883fa9e4066Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 2884fa9e4066Sahrens } 2885fa9e4066Sahrens 2886fa9e4066Sahrens /* execute each callback and free its structure */ 2887fa9e4066Sahrens while ((acb = callback_list) != NULL) { 2888fa9e4066Sahrens if (acb->acb_done) 2889fa9e4066Sahrens acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2890fa9e4066Sahrens 2891fa9e4066Sahrens if (acb->acb_zio_dummy != NULL) { 2892fa9e4066Sahrens acb->acb_zio_dummy->io_error = zio->io_error; 2893fa9e4066Sahrens zio_nowait(acb->acb_zio_dummy); 2894fa9e4066Sahrens } 2895fa9e4066Sahrens 2896fa9e4066Sahrens callback_list = acb->acb_next; 2897fa9e4066Sahrens kmem_free(acb, sizeof (arc_callback_t)); 2898fa9e4066Sahrens } 2899fa9e4066Sahrens 2900fa9e4066Sahrens if (freeable) 2901ea8dc4b6Seschrock arc_hdr_destroy(hdr); 2902fa9e4066Sahrens } 2903fa9e4066Sahrens 2904fa9e4066Sahrens /* 2905fc98fea5SBart Coddens * "Read" the block at the specified DVA (in bp) via the 2906fa9e4066Sahrens * cache. If the block is found in the cache, invoke the provided 2907fa9e4066Sahrens * callback immediately and return. Note that the `zio' parameter 2908fa9e4066Sahrens * in the callback will be NULL in this case, since no IO was 2909fa9e4066Sahrens * required. If the block is not in the cache pass the read request 2910fa9e4066Sahrens * on to the spa with a substitute callback function, so that the 2911fa9e4066Sahrens * requested block will be added to the cache. 2912fa9e4066Sahrens * 2913fa9e4066Sahrens * If a read request arrives for a block that has a read in-progress, 2914fa9e4066Sahrens * either wait for the in-progress read to complete (and return the 2915fa9e4066Sahrens * results); or, if this is a read with a "done" func, add a record 2916fa9e4066Sahrens * to the read to invoke the "done" func when the read completes, 2917fa9e4066Sahrens * and return; or just return. 2918fa9e4066Sahrens * 2919fa9e4066Sahrens * arc_read_done() will invoke all the requested "done" functions 2920fa9e4066Sahrens * for readers of this block. 2921fa9e4066Sahrens */ 2922fa9e4066Sahrens int 29231b912ec7SGeorge Wilson arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, 2924*7adb730bSGeorge Wilson void *private, zio_priority_t priority, int zio_flags, 2925*7adb730bSGeorge Wilson arc_flags_t *arc_flags, const zbookmark_phys_t *zb) 2926fa9e4066Sahrens { 29275d7b4d43SMatthew Ahrens arc_buf_hdr_t *hdr = NULL; 2928d5285caeSGeorge Wilson arc_buf_t *buf = NULL; 29295d7b4d43SMatthew Ahrens kmutex_t *hash_lock = NULL; 2930fa94a07fSbrendan zio_t *rzio; 2931e9103aaeSGarrett D'Amore uint64_t guid = spa_load_guid(spa); 2932fa9e4066Sahrens 29335d7b4d43SMatthew Ahrens ASSERT(!BP_IS_EMBEDDED(bp) || 29345d7b4d43SMatthew Ahrens BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 29355d7b4d43SMatthew Ahrens 2936fa9e4066Sahrens top: 29375d7b4d43SMatthew Ahrens if (!BP_IS_EMBEDDED(bp)) { 29385d7b4d43SMatthew Ahrens /* 29395d7b4d43SMatthew Ahrens * Embedded BP's have no DVA and require no I/O to "read". 29405d7b4d43SMatthew Ahrens * Create an anonymous arc buf to back it. 29415d7b4d43SMatthew Ahrens */ 29425d7b4d43SMatthew Ahrens hdr = buf_hash_find(guid, bp, &hash_lock); 29435d7b4d43SMatthew Ahrens } 29445d7b4d43SMatthew Ahrens 29455d7b4d43SMatthew Ahrens if (hdr != NULL && hdr->b_datacnt > 0) { 2946fa9e4066Sahrens 2947*7adb730bSGeorge Wilson *arc_flags |= ARC_FLAG_CACHED; 294813506d1eSmaybee 2949fa9e4066Sahrens if (HDR_IO_IN_PROGRESS(hdr)) { 295013506d1eSmaybee 2951*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_WAIT) { 295213506d1eSmaybee cv_wait(&hdr->b_cv, hash_lock); 295313506d1eSmaybee mutex_exit(hash_lock); 295413506d1eSmaybee goto top; 295513506d1eSmaybee } 2956*7adb730bSGeorge Wilson ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 295713506d1eSmaybee 295813506d1eSmaybee if (done) { 2959fa9e4066Sahrens arc_callback_t *acb = NULL; 2960fa9e4066Sahrens 2961fa9e4066Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), 2962fa9e4066Sahrens KM_SLEEP); 2963fa9e4066Sahrens acb->acb_done = done; 2964fa9e4066Sahrens acb->acb_private = private; 2965fa9e4066Sahrens if (pio != NULL) 2966fa9e4066Sahrens acb->acb_zio_dummy = zio_null(pio, 2967a3f829aeSBill Moore spa, NULL, NULL, NULL, zio_flags); 2968fa9e4066Sahrens 2969fa9e4066Sahrens ASSERT(acb->acb_done != NULL); 2970fa9e4066Sahrens acb->acb_next = hdr->b_acb; 2971fa9e4066Sahrens hdr->b_acb = acb; 2972fa9e4066Sahrens add_reference(hdr, hash_lock, private); 2973fa9e4066Sahrens mutex_exit(hash_lock); 2974fa9e4066Sahrens return (0); 2975fa9e4066Sahrens } 2976fa9e4066Sahrens mutex_exit(hash_lock); 2977fa9e4066Sahrens return (0); 2978fa9e4066Sahrens } 2979fa9e4066Sahrens 298044cb6abcSbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2981fa9e4066Sahrens 2982ea8dc4b6Seschrock if (done) { 298344eda4d7Smaybee add_reference(hdr, hash_lock, private); 2984ea8dc4b6Seschrock /* 2985ea8dc4b6Seschrock * If this block is already in use, create a new 2986ea8dc4b6Seschrock * copy of the data so that we will be guaranteed 2987ea8dc4b6Seschrock * that arc_release() will always succeed. 2988ea8dc4b6Seschrock */ 2989fa9e4066Sahrens buf = hdr->b_buf; 2990ea8dc4b6Seschrock ASSERT(buf); 2991ea8dc4b6Seschrock ASSERT(buf->b_data); 299244eda4d7Smaybee if (HDR_BUF_AVAILABLE(hdr)) { 2993ea8dc4b6Seschrock ASSERT(buf->b_efunc == NULL); 2994*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; 299544eda4d7Smaybee } else { 299644eda4d7Smaybee buf = arc_buf_clone(buf); 2997ea8dc4b6Seschrock } 2998b24ab676SJeff Bonwick 2999*7adb730bSGeorge Wilson } else if (*arc_flags & ARC_FLAG_PREFETCH && 300013506d1eSmaybee refcount_count(&hdr->b_refcnt) == 0) { 3001*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_PREFETCH; 3002fa9e4066Sahrens } 3003fa9e4066Sahrens DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 300444eda4d7Smaybee arc_access(hdr, hash_lock); 3005*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_L2CACHE) 3006*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2CACHE; 3007*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_L2COMPRESS) 3008*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2COMPRESS; 300944eda4d7Smaybee mutex_exit(hash_lock); 301044cb6abcSbmc ARCSTAT_BUMP(arcstat_hits); 3011*7adb730bSGeorge Wilson ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_FLAG_PREFETCH), 301244cb6abcSbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 301344cb6abcSbmc data, metadata, hits); 301444cb6abcSbmc 3015fa9e4066Sahrens if (done) 3016fa9e4066Sahrens done(NULL, buf, private); 3017fa9e4066Sahrens } else { 3018fa9e4066Sahrens uint64_t size = BP_GET_LSIZE(bp); 30195d7b4d43SMatthew Ahrens arc_callback_t *acb; 30203a737e0dSbrendan vdev_t *vd = NULL; 3021d5285caeSGeorge Wilson uint64_t addr = 0; 30225a98e54bSBrendan Gregg - Sun Microsystems boolean_t devw = B_FALSE; 302357815f6bSBoris Protopopov enum zio_compress b_compress = ZIO_COMPRESS_OFF; 302457815f6bSBoris Protopopov uint64_t b_asize = 0; 3025fa9e4066Sahrens 3026fa9e4066Sahrens if (hdr == NULL) { 3027fa9e4066Sahrens /* this block is not in the cache */ 30285d7b4d43SMatthew Ahrens arc_buf_hdr_t *exists = NULL; 3029ad23a2dbSjohansen arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 3030ad23a2dbSjohansen buf = arc_buf_alloc(spa, size, private, type); 3031fa9e4066Sahrens hdr = buf->b_hdr; 30325d7b4d43SMatthew Ahrens if (!BP_IS_EMBEDDED(bp)) { 30335d7b4d43SMatthew Ahrens hdr->b_dva = *BP_IDENTITY(bp); 30345d7b4d43SMatthew Ahrens hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 30355d7b4d43SMatthew Ahrens hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 30365d7b4d43SMatthew Ahrens exists = buf_hash_insert(hdr, &hash_lock); 30375d7b4d43SMatthew Ahrens } 30385d7b4d43SMatthew Ahrens if (exists != NULL) { 3039fa9e4066Sahrens /* somebody beat us to the hash insert */ 3040fa9e4066Sahrens mutex_exit(hash_lock); 30413f9d6ad7SLin Ling buf_discard_identity(hdr); 3042ea8dc4b6Seschrock (void) arc_buf_remove_ref(buf, private); 3043fa9e4066Sahrens goto top; /* restart the IO request */ 3044fa9e4066Sahrens } 3045*7adb730bSGeorge Wilson 304613506d1eSmaybee /* if this is a prefetch, we don't have a reference */ 3047*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_PREFETCH) { 304813506d1eSmaybee (void) remove_reference(hdr, hash_lock, 304913506d1eSmaybee private); 3050*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_PREFETCH; 305113506d1eSmaybee } 3052*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_L2CACHE) 3053*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2CACHE; 3054*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_L2COMPRESS) 3055*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2COMPRESS; 305613506d1eSmaybee if (BP_GET_LEVEL(bp) > 0) 3057*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_INDIRECT; 3058fa9e4066Sahrens } else { 3059fa9e4066Sahrens /* this block is in the ghost cache */ 3060ea8dc4b6Seschrock ASSERT(GHOST_STATE(hdr->b_state)); 3061ea8dc4b6Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3062fb09f5aaSMadhav Suresh ASSERT0(refcount_count(&hdr->b_refcnt)); 3063ea8dc4b6Seschrock ASSERT(hdr->b_buf == NULL); 306413506d1eSmaybee 306513506d1eSmaybee /* if this is a prefetch, we don't have a reference */ 3066*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_PREFETCH) 3067*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_PREFETCH; 306813506d1eSmaybee else 306913506d1eSmaybee add_reference(hdr, hash_lock, private); 3070*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_L2CACHE) 3071*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2CACHE; 3072*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_L2COMPRESS) 3073*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2COMPRESS; 30741ab7f2deSmaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 3075fa9e4066Sahrens buf->b_hdr = hdr; 307644eda4d7Smaybee buf->b_data = NULL; 3077ea8dc4b6Seschrock buf->b_efunc = NULL; 3078ea8dc4b6Seschrock buf->b_private = NULL; 3079fa9e4066Sahrens buf->b_next = NULL; 3080fa9e4066Sahrens hdr->b_buf = buf; 3081ea8dc4b6Seschrock ASSERT(hdr->b_datacnt == 0); 3082ea8dc4b6Seschrock hdr->b_datacnt = 1; 30835614b00aSWilliam Gorrell arc_get_data_buf(buf); 30847e453561SWilliam Gorrell arc_access(hdr, hash_lock); 3085fa9e4066Sahrens } 3086fa9e4066Sahrens 30875614b00aSWilliam Gorrell ASSERT(!GHOST_STATE(hdr->b_state)); 30885614b00aSWilliam Gorrell 3089fa9e4066Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 3090fa9e4066Sahrens acb->acb_done = done; 3091fa9e4066Sahrens acb->acb_private = private; 3092fa9e4066Sahrens 3093fa9e4066Sahrens ASSERT(hdr->b_acb == NULL); 3094fa9e4066Sahrens hdr->b_acb = acb; 3095*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS; 3096fa9e4066Sahrens 309757815f6bSBoris Protopopov if (hdr->b_l2hdr != NULL && 3098e14bb325SJeff Bonwick (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 30995a98e54bSBrendan Gregg - Sun Microsystems devw = hdr->b_l2hdr->b_dev->l2ad_writing; 31003a737e0dSbrendan addr = hdr->b_l2hdr->b_daddr; 310157815f6bSBoris Protopopov b_compress = hdr->b_l2hdr->b_compress; 310257815f6bSBoris Protopopov b_asize = hdr->b_l2hdr->b_asize; 3103e14bb325SJeff Bonwick /* 3104e14bb325SJeff Bonwick * Lock out device removal. 3105e14bb325SJeff Bonwick */ 3106e14bb325SJeff Bonwick if (vdev_is_dead(vd) || 3107e14bb325SJeff Bonwick !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 3108e14bb325SJeff Bonwick vd = NULL; 31093a737e0dSbrendan } 31103a737e0dSbrendan 31115d7b4d43SMatthew Ahrens if (hash_lock != NULL) 31125d7b4d43SMatthew Ahrens mutex_exit(hash_lock); 31133a737e0dSbrendan 31143e30c24aSWill Andrews /* 31153e30c24aSWill Andrews * At this point, we have a level 1 cache miss. Try again in 31163e30c24aSWill Andrews * L2ARC if possible. 31173e30c24aSWill Andrews */ 3118fa9e4066Sahrens ASSERT3U(hdr->b_size, ==, size); 31195c28183bSBrendan Gregg - Sun Microsystems DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 31207802d7bfSMatthew Ahrens uint64_t, size, zbookmark_phys_t *, zb); 312144cb6abcSbmc ARCSTAT_BUMP(arcstat_misses); 3122*7adb730bSGeorge Wilson ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_FLAG_PREFETCH), 312344cb6abcSbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 312444cb6abcSbmc data, metadata, misses); 3125ea8dc4b6Seschrock 31265a98e54bSBrendan Gregg - Sun Microsystems if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 3127fa94a07fSbrendan /* 3128fa94a07fSbrendan * Read from the L2ARC if the following are true: 31293a737e0dSbrendan * 1. The L2ARC vdev was previously cached. 31303a737e0dSbrendan * 2. This buffer still has L2ARC metadata. 31313a737e0dSbrendan * 3. This buffer isn't currently writing to the L2ARC. 31323a737e0dSbrendan * 4. The L2ARC entry wasn't evicted, which may 31333a737e0dSbrendan * also have invalidated the vdev. 31345a98e54bSBrendan Gregg - Sun Microsystems * 5. This isn't prefetch and l2arc_noprefetch is set. 3135fa94a07fSbrendan */ 3136e14bb325SJeff Bonwick if (hdr->b_l2hdr != NULL && 31375a98e54bSBrendan Gregg - Sun Microsystems !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 31385a98e54bSBrendan Gregg - Sun Microsystems !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 3139fa94a07fSbrendan l2arc_read_callback_t *cb; 3140fa94a07fSbrendan 3141c5904d13Seschrock DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 3142c5904d13Seschrock ARCSTAT_BUMP(arcstat_l2_hits); 3143c5904d13Seschrock 3144fa94a07fSbrendan cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 3145fa94a07fSbrendan KM_SLEEP); 3146fa94a07fSbrendan cb->l2rcb_buf = buf; 3147fa94a07fSbrendan cb->l2rcb_spa = spa; 3148fa94a07fSbrendan cb->l2rcb_bp = *bp; 3149fa94a07fSbrendan cb->l2rcb_zb = *zb; 31503baa08fcSek cb->l2rcb_flags = zio_flags; 315157815f6bSBoris Protopopov cb->l2rcb_compress = b_compress; 3152fa94a07fSbrendan 3153d5285caeSGeorge Wilson ASSERT(addr >= VDEV_LABEL_START_SIZE && 3154d5285caeSGeorge Wilson addr + size < vd->vdev_psize - 3155d5285caeSGeorge Wilson VDEV_LABEL_END_SIZE); 3156d5285caeSGeorge Wilson 3157fa94a07fSbrendan /* 3158e14bb325SJeff Bonwick * l2arc read. The SCL_L2ARC lock will be 3159e14bb325SJeff Bonwick * released by l2arc_read_done(). 3160aad02571SSaso Kiselkov * Issue a null zio if the underlying buffer 3161aad02571SSaso Kiselkov * was squashed to zero size by compression. 3162fa94a07fSbrendan */ 316357815f6bSBoris Protopopov if (b_compress == ZIO_COMPRESS_EMPTY) { 3164aad02571SSaso Kiselkov rzio = zio_null(pio, spa, vd, 3165aad02571SSaso Kiselkov l2arc_read_done, cb, 3166aad02571SSaso Kiselkov zio_flags | ZIO_FLAG_DONT_CACHE | 3167aad02571SSaso Kiselkov ZIO_FLAG_CANFAIL | 3168aad02571SSaso Kiselkov ZIO_FLAG_DONT_PROPAGATE | 3169aad02571SSaso Kiselkov ZIO_FLAG_DONT_RETRY); 3170aad02571SSaso Kiselkov } else { 3171aad02571SSaso Kiselkov rzio = zio_read_phys(pio, vd, addr, 317257815f6bSBoris Protopopov b_asize, buf->b_data, 317357815f6bSBoris Protopopov ZIO_CHECKSUM_OFF, 3174aad02571SSaso Kiselkov l2arc_read_done, cb, priority, 3175aad02571SSaso Kiselkov zio_flags | ZIO_FLAG_DONT_CACHE | 3176aad02571SSaso Kiselkov ZIO_FLAG_CANFAIL | 3177aad02571SSaso Kiselkov ZIO_FLAG_DONT_PROPAGATE | 3178aad02571SSaso Kiselkov ZIO_FLAG_DONT_RETRY, B_FALSE); 3179aad02571SSaso Kiselkov } 3180fa94a07fSbrendan DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 3181fa94a07fSbrendan zio_t *, rzio); 318257815f6bSBoris Protopopov ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize); 3183fa94a07fSbrendan 3184*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_NOWAIT) { 31853a737e0dSbrendan zio_nowait(rzio); 31863a737e0dSbrendan return (0); 31873a737e0dSbrendan } 3188fa94a07fSbrendan 3189*7adb730bSGeorge Wilson ASSERT(*arc_flags & ARC_FLAG_WAIT); 31903a737e0dSbrendan if (zio_wait(rzio) == 0) 31913a737e0dSbrendan return (0); 31923a737e0dSbrendan 31933a737e0dSbrendan /* l2arc read error; goto zio_read() */ 3194fa94a07fSbrendan } else { 3195fa94a07fSbrendan DTRACE_PROBE1(l2arc__miss, 3196fa94a07fSbrendan arc_buf_hdr_t *, hdr); 3197fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_misses); 3198fa94a07fSbrendan if (HDR_L2_WRITING(hdr)) 3199fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_rw_clash); 3200e14bb325SJeff Bonwick spa_config_exit(spa, SCL_L2ARC, vd); 3201fa94a07fSbrendan } 32025a98e54bSBrendan Gregg - Sun Microsystems } else { 320376a25fafSBill Moore if (vd != NULL) 320476a25fafSBill Moore spa_config_exit(spa, SCL_L2ARC, vd); 32055a98e54bSBrendan Gregg - Sun Microsystems if (l2arc_ndev != 0) { 32065a98e54bSBrendan Gregg - Sun Microsystems DTRACE_PROBE1(l2arc__miss, 32075a98e54bSBrendan Gregg - Sun Microsystems arc_buf_hdr_t *, hdr); 32085a98e54bSBrendan Gregg - Sun Microsystems ARCSTAT_BUMP(arcstat_l2_misses); 32095a98e54bSBrendan Gregg - Sun Microsystems } 3210fa94a07fSbrendan } 3211c5904d13Seschrock 3212fa9e4066Sahrens rzio = zio_read(pio, spa, bp, buf->b_data, size, 32133baa08fcSek arc_read_done, buf, priority, zio_flags, zb); 3214fa9e4066Sahrens 3215*7adb730bSGeorge Wilson if (*arc_flags & ARC_FLAG_WAIT) 3216fa9e4066Sahrens return (zio_wait(rzio)); 3217fa9e4066Sahrens 3218*7adb730bSGeorge Wilson ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 3219fa9e4066Sahrens zio_nowait(rzio); 3220fa9e4066Sahrens } 3221fa9e4066Sahrens return (0); 3222fa9e4066Sahrens } 3223fa9e4066Sahrens 3224ea8dc4b6Seschrock void 3225ea8dc4b6Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3226ea8dc4b6Seschrock { 3227ea8dc4b6Seschrock ASSERT(buf->b_hdr != NULL); 322844cb6abcSbmc ASSERT(buf->b_hdr->b_state != arc_anon); 3229ea8dc4b6Seschrock ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3230b24ab676SJeff Bonwick ASSERT(buf->b_efunc == NULL); 3231b24ab676SJeff Bonwick ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 3232b24ab676SJeff Bonwick 3233ea8dc4b6Seschrock buf->b_efunc = func; 3234ea8dc4b6Seschrock buf->b_private = private; 3235ea8dc4b6Seschrock } 3236ea8dc4b6Seschrock 32376e6d5868SMatthew Ahrens /* 32386e6d5868SMatthew Ahrens * Notify the arc that a block was freed, and thus will never be used again. 32396e6d5868SMatthew Ahrens */ 32406e6d5868SMatthew Ahrens void 32416e6d5868SMatthew Ahrens arc_freed(spa_t *spa, const blkptr_t *bp) 32426e6d5868SMatthew Ahrens { 32436e6d5868SMatthew Ahrens arc_buf_hdr_t *hdr; 32446e6d5868SMatthew Ahrens kmutex_t *hash_lock; 32456e6d5868SMatthew Ahrens uint64_t guid = spa_load_guid(spa); 32466e6d5868SMatthew Ahrens 32475d7b4d43SMatthew Ahrens ASSERT(!BP_IS_EMBEDDED(bp)); 32485d7b4d43SMatthew Ahrens 32495d7b4d43SMatthew Ahrens hdr = buf_hash_find(guid, bp, &hash_lock); 32506e6d5868SMatthew Ahrens if (hdr == NULL) 32516e6d5868SMatthew Ahrens return; 32526e6d5868SMatthew Ahrens if (HDR_BUF_AVAILABLE(hdr)) { 32536e6d5868SMatthew Ahrens arc_buf_t *buf = hdr->b_buf; 32546e6d5868SMatthew Ahrens add_reference(hdr, hash_lock, FTAG); 3255*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; 32566e6d5868SMatthew Ahrens mutex_exit(hash_lock); 32576e6d5868SMatthew Ahrens 32586e6d5868SMatthew Ahrens arc_release(buf, FTAG); 32596e6d5868SMatthew Ahrens (void) arc_buf_remove_ref(buf, FTAG); 32606e6d5868SMatthew Ahrens } else { 32616e6d5868SMatthew Ahrens mutex_exit(hash_lock); 32626e6d5868SMatthew Ahrens } 32636e6d5868SMatthew Ahrens 32646e6d5868SMatthew Ahrens } 32656e6d5868SMatthew Ahrens 3266ea8dc4b6Seschrock /* 3267bbfa8ea8SMatthew Ahrens * Clear the user eviction callback set by arc_set_callback(), first calling 3268bbfa8ea8SMatthew Ahrens * it if it exists. Because the presence of a callback keeps an arc_buf cached 3269bbfa8ea8SMatthew Ahrens * clearing the callback may result in the arc_buf being destroyed. However, 3270bbfa8ea8SMatthew Ahrens * it will not result in the *last* arc_buf being destroyed, hence the data 3271bbfa8ea8SMatthew Ahrens * will remain cached in the ARC. We make a copy of the arc buffer here so 3272bbfa8ea8SMatthew Ahrens * that we can process the callback without holding any locks. 3273bbfa8ea8SMatthew Ahrens * 3274bbfa8ea8SMatthew Ahrens * It's possible that the callback is already in the process of being cleared 3275bbfa8ea8SMatthew Ahrens * by another thread. In this case we can not clear the callback. 3276bbfa8ea8SMatthew Ahrens * 3277bbfa8ea8SMatthew Ahrens * Returns B_TRUE if the callback was successfully called and cleared. 3278ea8dc4b6Seschrock */ 3279bbfa8ea8SMatthew Ahrens boolean_t 3280bbfa8ea8SMatthew Ahrens arc_clear_callback(arc_buf_t *buf) 3281ea8dc4b6Seschrock { 328240d7d650Smaybee arc_buf_hdr_t *hdr; 3283ea8dc4b6Seschrock kmutex_t *hash_lock; 3284bbfa8ea8SMatthew Ahrens arc_evict_func_t *efunc = buf->b_efunc; 3285bbfa8ea8SMatthew Ahrens void *private = buf->b_private; 3286ea8dc4b6Seschrock 32873f9d6ad7SLin Ling mutex_enter(&buf->b_evict_lock); 328840d7d650Smaybee hdr = buf->b_hdr; 3289ea8dc4b6Seschrock if (hdr == NULL) { 3290ea8dc4b6Seschrock /* 3291ea8dc4b6Seschrock * We are in arc_do_user_evicts(). 3292ea8dc4b6Seschrock */ 3293ea8dc4b6Seschrock ASSERT(buf->b_data == NULL); 32943f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 3295bbfa8ea8SMatthew Ahrens return (B_FALSE); 32966f83844dSMark Maybee } else if (buf->b_data == NULL) { 32979b23f181Smaybee /* 32986f83844dSMark Maybee * We are on the eviction list; process this buffer now 32996f83844dSMark Maybee * but let arc_do_user_evicts() do the reaping. 33009b23f181Smaybee */ 33016f83844dSMark Maybee buf->b_efunc = NULL; 33023f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 3303bbfa8ea8SMatthew Ahrens VERIFY0(efunc(private)); 3304bbfa8ea8SMatthew Ahrens return (B_TRUE); 33059b23f181Smaybee } 33066f83844dSMark Maybee hash_lock = HDR_LOCK(hdr); 33076f83844dSMark Maybee mutex_enter(hash_lock); 33083f9d6ad7SLin Ling hdr = buf->b_hdr; 33093f9d6ad7SLin Ling ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 33109b23f181Smaybee 33119b23f181Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 331244cb6abcSbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3313ea8dc4b6Seschrock 3314bbfa8ea8SMatthew Ahrens buf->b_efunc = NULL; 3315bbfa8ea8SMatthew Ahrens buf->b_private = NULL; 3316ea8dc4b6Seschrock 3317bbfa8ea8SMatthew Ahrens if (hdr->b_datacnt > 1) { 3318bbfa8ea8SMatthew Ahrens mutex_exit(&buf->b_evict_lock); 3319bbfa8ea8SMatthew Ahrens arc_buf_destroy(buf, FALSE, TRUE); 3320bbfa8ea8SMatthew Ahrens } else { 3321bbfa8ea8SMatthew Ahrens ASSERT(buf == hdr->b_buf); 3322*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; 3323bbfa8ea8SMatthew Ahrens mutex_exit(&buf->b_evict_lock); 3324ea8dc4b6Seschrock } 3325dd6ef538Smaybee 3326bbfa8ea8SMatthew Ahrens mutex_exit(hash_lock); 3327bbfa8ea8SMatthew Ahrens VERIFY0(efunc(private)); 3328bbfa8ea8SMatthew Ahrens return (B_TRUE); 3329ea8dc4b6Seschrock } 3330ea8dc4b6Seschrock 3331fa9e4066Sahrens /* 33323e30c24aSWill Andrews * Release this buffer from the cache, making it an anonymous buffer. This 33333e30c24aSWill Andrews * must be done after a read and prior to modifying the buffer contents. 3334fa9e4066Sahrens * If the buffer has more than one reference, we must make 3335088f3894Sahrens * a new hdr for the buffer. 3336fa9e4066Sahrens */ 3337fa9e4066Sahrens void 3338fa9e4066Sahrens arc_release(arc_buf_t *buf, void *tag) 3339fa9e4066Sahrens { 33406f83844dSMark Maybee arc_buf_hdr_t *hdr; 33413f9d6ad7SLin Ling kmutex_t *hash_lock = NULL; 33426f83844dSMark Maybee l2arc_buf_hdr_t *l2hdr; 3343fa94a07fSbrendan uint64_t buf_size; 3344fa9e4066Sahrens 33453f9d6ad7SLin Ling /* 33463f9d6ad7SLin Ling * It would be nice to assert that if it's DMU metadata (level > 33473f9d6ad7SLin Ling * 0 || it's the dnode file), then it must be syncing context. 33483f9d6ad7SLin Ling * But we don't know that information at this level. 33493f9d6ad7SLin Ling */ 33503f9d6ad7SLin Ling 33513f9d6ad7SLin Ling mutex_enter(&buf->b_evict_lock); 33526f83844dSMark Maybee hdr = buf->b_hdr; 33536f83844dSMark Maybee 3354fa9e4066Sahrens /* this buffer is not on any list */ 3355fa9e4066Sahrens ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3356fa9e4066Sahrens 335744cb6abcSbmc if (hdr->b_state == arc_anon) { 3358fa9e4066Sahrens /* this buffer is already released */ 3359ea8dc4b6Seschrock ASSERT(buf->b_efunc == NULL); 33600a95608cSBrendan Gregg - Sun Microsystems } else { 33610a95608cSBrendan Gregg - Sun Microsystems hash_lock = HDR_LOCK(hdr); 33620a95608cSBrendan Gregg - Sun Microsystems mutex_enter(hash_lock); 33633f9d6ad7SLin Ling hdr = buf->b_hdr; 33643f9d6ad7SLin Ling ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3365fa9e4066Sahrens } 3366fa9e4066Sahrens 33676f83844dSMark Maybee l2hdr = hdr->b_l2hdr; 33686f83844dSMark Maybee if (l2hdr) { 33696f83844dSMark Maybee mutex_enter(&l2arc_buflist_mtx); 33706f83844dSMark Maybee hdr->b_l2hdr = NULL; 3371ccc22e13SBoris Protopopov list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 33726f83844dSMark Maybee } 3373d5285caeSGeorge Wilson buf_size = hdr->b_size; 33746f83844dSMark Maybee 3375ea8dc4b6Seschrock /* 3376ea8dc4b6Seschrock * Do we have more than one buf? 3377ea8dc4b6Seschrock */ 33786f83844dSMark Maybee if (hdr->b_datacnt > 1) { 3379fa9e4066Sahrens arc_buf_hdr_t *nhdr; 3380fa9e4066Sahrens arc_buf_t **bufp; 3381fa9e4066Sahrens uint64_t blksz = hdr->b_size; 3382ac05c741SMark Maybee uint64_t spa = hdr->b_spa; 3383ad23a2dbSjohansen arc_buf_contents_t type = hdr->b_type; 3384fa94a07fSbrendan uint32_t flags = hdr->b_flags; 3385fa9e4066Sahrens 33866f83844dSMark Maybee ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3387fa9e4066Sahrens /* 33883f9d6ad7SLin Ling * Pull the data off of this hdr and attach it to 33893f9d6ad7SLin Ling * a new anonymous hdr. 3390fa9e4066Sahrens */ 3391ea8dc4b6Seschrock (void) remove_reference(hdr, hash_lock, tag); 3392fa9e4066Sahrens bufp = &hdr->b_buf; 3393ea8dc4b6Seschrock while (*bufp != buf) 3394fa9e4066Sahrens bufp = &(*bufp)->b_next; 33953f9d6ad7SLin Ling *bufp = buf->b_next; 3396af2c4821Smaybee buf->b_next = NULL; 3397ea8dc4b6Seschrock 339844cb6abcSbmc ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 339944cb6abcSbmc atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3400ea8dc4b6Seschrock if (refcount_is_zero(&hdr->b_refcnt)) { 34010e8c6158Smaybee uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 34020e8c6158Smaybee ASSERT3U(*size, >=, hdr->b_size); 34030e8c6158Smaybee atomic_add_64(size, -hdr->b_size); 3404ea8dc4b6Seschrock } 34059253d63dSGeorge Wilson 34069253d63dSGeorge Wilson /* 34079253d63dSGeorge Wilson * We're releasing a duplicate user data buffer, update 34089253d63dSGeorge Wilson * our statistics accordingly. 34099253d63dSGeorge Wilson */ 34109253d63dSGeorge Wilson if (hdr->b_type == ARC_BUFC_DATA) { 34119253d63dSGeorge Wilson ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 34129253d63dSGeorge Wilson ARCSTAT_INCR(arcstat_duplicate_buffers_size, 34139253d63dSGeorge Wilson -hdr->b_size); 34149253d63dSGeorge Wilson } 3415ea8dc4b6Seschrock hdr->b_datacnt -= 1; 3416c717a561Smaybee arc_cksum_verify(buf); 3417cd1c8b85SMatthew Ahrens arc_buf_unwatch(buf); 3418ea8dc4b6Seschrock 3419fa9e4066Sahrens mutex_exit(hash_lock); 3420fa9e4066Sahrens 34211ab7f2deSmaybee nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3422fa9e4066Sahrens nhdr->b_size = blksz; 3423fa9e4066Sahrens nhdr->b_spa = spa; 3424ad23a2dbSjohansen nhdr->b_type = type; 3425fa9e4066Sahrens nhdr->b_buf = buf; 342644cb6abcSbmc nhdr->b_state = arc_anon; 3427fa9e4066Sahrens nhdr->b_arc_access = 0; 3428*7adb730bSGeorge Wilson nhdr->b_flags = flags & ARC_FLAG_L2_WRITING; 3429fa94a07fSbrendan nhdr->b_l2hdr = NULL; 3430ea8dc4b6Seschrock nhdr->b_datacnt = 1; 3431c717a561Smaybee nhdr->b_freeze_cksum = NULL; 3432fa9e4066Sahrens (void) refcount_add(&nhdr->b_refcnt, tag); 3433af2c4821Smaybee buf->b_hdr = nhdr; 34343f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 343544cb6abcSbmc atomic_add_64(&arc_anon->arcs_size, blksz); 3436fa9e4066Sahrens } else { 34373f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 3438ea8dc4b6Seschrock ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3439fa9e4066Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 3440fa9e4066Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 34413f9d6ad7SLin Ling if (hdr->b_state != arc_anon) 34423f9d6ad7SLin Ling arc_change_state(arc_anon, hdr, hash_lock); 3443fa9e4066Sahrens hdr->b_arc_access = 0; 34443f9d6ad7SLin Ling if (hash_lock) 34453f9d6ad7SLin Ling mutex_exit(hash_lock); 3446fa94a07fSbrendan 34473f9d6ad7SLin Ling buf_discard_identity(hdr); 3448c717a561Smaybee arc_buf_thaw(buf); 3449fa9e4066Sahrens } 3450ea8dc4b6Seschrock buf->b_efunc = NULL; 3451ea8dc4b6Seschrock buf->b_private = NULL; 3452fa94a07fSbrendan 3453fa94a07fSbrendan if (l2hdr) { 3454aad02571SSaso Kiselkov ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize); 34553038a2b4SSaso Kiselkov vdev_space_update(l2hdr->b_dev->l2ad_vdev, 34563038a2b4SSaso Kiselkov -l2hdr->b_asize, 0, 0); 3457fa94a07fSbrendan kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3458fa94a07fSbrendan ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3459fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 34606f83844dSMark Maybee } 3461fa9e4066Sahrens } 3462fa9e4066Sahrens 3463fa9e4066Sahrens int 3464fa9e4066Sahrens arc_released(arc_buf_t *buf) 3465fa9e4066Sahrens { 34666f83844dSMark Maybee int released; 34676f83844dSMark Maybee 34683f9d6ad7SLin Ling mutex_enter(&buf->b_evict_lock); 34696f83844dSMark Maybee released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 34703f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 34716f83844dSMark Maybee return (released); 3472ea8dc4b6Seschrock } 3473ea8dc4b6Seschrock 3474ea8dc4b6Seschrock #ifdef ZFS_DEBUG 3475ea8dc4b6Seschrock int 3476ea8dc4b6Seschrock arc_referenced(arc_buf_t *buf) 3477ea8dc4b6Seschrock { 34786f83844dSMark Maybee int referenced; 34796f83844dSMark Maybee 34803f9d6ad7SLin Ling mutex_enter(&buf->b_evict_lock); 34816f83844dSMark Maybee referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 34823f9d6ad7SLin Ling mutex_exit(&buf->b_evict_lock); 34836f83844dSMark Maybee return (referenced); 3484ea8dc4b6Seschrock } 3485ea8dc4b6Seschrock #endif 3486ea8dc4b6Seschrock 3487c717a561Smaybee static void 3488c717a561Smaybee arc_write_ready(zio_t *zio) 3489c717a561Smaybee { 3490c717a561Smaybee arc_write_callback_t *callback = zio->io_private; 3491c717a561Smaybee arc_buf_t *buf = callback->awcb_buf; 34920a4e9518Sgw arc_buf_hdr_t *hdr = buf->b_hdr; 3493c717a561Smaybee 3494e14bb325SJeff Bonwick ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3495e14bb325SJeff Bonwick callback->awcb_ready(zio, buf, callback->awcb_private); 3496e14bb325SJeff Bonwick 34970a4e9518Sgw /* 34980a4e9518Sgw * If the IO is already in progress, then this is a re-write 3499e14bb325SJeff Bonwick * attempt, so we need to thaw and re-compute the cksum. 3500e14bb325SJeff Bonwick * It is the responsibility of the callback to handle the 3501e14bb325SJeff Bonwick * accounting for any re-write attempt. 35020a4e9518Sgw */ 35030a4e9518Sgw if (HDR_IO_IN_PROGRESS(hdr)) { 35040a4e9518Sgw mutex_enter(&hdr->b_freeze_lock); 35050a4e9518Sgw if (hdr->b_freeze_cksum != NULL) { 35060a4e9518Sgw kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 35070a4e9518Sgw hdr->b_freeze_cksum = NULL; 35080a4e9518Sgw } 35090a4e9518Sgw mutex_exit(&hdr->b_freeze_lock); 35100a4e9518Sgw } 3511fa94a07fSbrendan arc_cksum_compute(buf, B_FALSE); 3512*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS; 3513c717a561Smaybee } 3514c717a561Smaybee 351569962b56SMatthew Ahrens /* 351669962b56SMatthew Ahrens * The SPA calls this callback for each physical write that happens on behalf 351769962b56SMatthew Ahrens * of a logical write. See the comment in dbuf_write_physdone() for details. 351869962b56SMatthew Ahrens */ 351969962b56SMatthew Ahrens static void 352069962b56SMatthew Ahrens arc_write_physdone(zio_t *zio) 352169962b56SMatthew Ahrens { 352269962b56SMatthew Ahrens arc_write_callback_t *cb = zio->io_private; 352369962b56SMatthew Ahrens if (cb->awcb_physdone != NULL) 352469962b56SMatthew Ahrens cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); 352569962b56SMatthew Ahrens } 352669962b56SMatthew Ahrens 3527fa9e4066Sahrens static void 3528fa9e4066Sahrens arc_write_done(zio_t *zio) 3529fa9e4066Sahrens { 3530c717a561Smaybee arc_write_callback_t *callback = zio->io_private; 3531c717a561Smaybee arc_buf_t *buf = callback->awcb_buf; 3532c717a561Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 3533fa9e4066Sahrens 3534b24ab676SJeff Bonwick ASSERT(hdr->b_acb == NULL); 3535b24ab676SJeff Bonwick 3536b24ab676SJeff Bonwick if (zio->io_error == 0) { 35375d7b4d43SMatthew Ahrens if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 353843466aaeSMax Grossman buf_discard_identity(hdr); 353943466aaeSMax Grossman } else { 354043466aaeSMax Grossman hdr->b_dva = *BP_IDENTITY(zio->io_bp); 354143466aaeSMax Grossman hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 354243466aaeSMax Grossman hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 354343466aaeSMax Grossman } 3544b24ab676SJeff Bonwick } else { 3545b24ab676SJeff Bonwick ASSERT(BUF_EMPTY(hdr)); 3546b24ab676SJeff Bonwick } 3547fa9e4066Sahrens 3548ea8dc4b6Seschrock /* 35495d7b4d43SMatthew Ahrens * If the block to be written was all-zero or compressed enough to be 35505d7b4d43SMatthew Ahrens * embedded in the BP, no write was performed so there will be no 35515d7b4d43SMatthew Ahrens * dva/birth/checksum. The buffer must therefore remain anonymous 35525d7b4d43SMatthew Ahrens * (and uncached). 3553ea8dc4b6Seschrock */ 3554fa9e4066Sahrens if (!BUF_EMPTY(hdr)) { 3555fa9e4066Sahrens arc_buf_hdr_t *exists; 3556fa9e4066Sahrens kmutex_t *hash_lock; 3557fa9e4066Sahrens 3558b24ab676SJeff Bonwick ASSERT(zio->io_error == 0); 3559b24ab676SJeff Bonwick 35606b4acc8bSahrens arc_cksum_verify(buf); 35616b4acc8bSahrens 3562fa9e4066Sahrens exists = buf_hash_insert(hdr, &hash_lock); 3563fa9e4066Sahrens if (exists) { 3564fa9e4066Sahrens /* 3565fa9e4066Sahrens * This can only happen if we overwrite for 3566fa9e4066Sahrens * sync-to-convergence, because we remove 3567fa9e4066Sahrens * buffers from the hash table when we arc_free(). 3568fa9e4066Sahrens */ 3569b24ab676SJeff Bonwick if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3570b24ab676SJeff Bonwick if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3571b24ab676SJeff Bonwick panic("bad overwrite, hdr=%p exists=%p", 3572b24ab676SJeff Bonwick (void *)hdr, (void *)exists); 3573b24ab676SJeff Bonwick ASSERT(refcount_is_zero(&exists->b_refcnt)); 3574b24ab676SJeff Bonwick arc_change_state(arc_anon, exists, hash_lock); 3575b24ab676SJeff Bonwick mutex_exit(hash_lock); 3576b24ab676SJeff Bonwick arc_hdr_destroy(exists); 3577b24ab676SJeff Bonwick exists = buf_hash_insert(hdr, &hash_lock); 3578b24ab676SJeff Bonwick ASSERT3P(exists, ==, NULL); 357980901aeaSGeorge Wilson } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 358080901aeaSGeorge Wilson /* nopwrite */ 358180901aeaSGeorge Wilson ASSERT(zio->io_prop.zp_nopwrite); 358280901aeaSGeorge Wilson if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 358380901aeaSGeorge Wilson panic("bad nopwrite, hdr=%p exists=%p", 358480901aeaSGeorge Wilson (void *)hdr, (void *)exists); 3585b24ab676SJeff Bonwick } else { 3586b24ab676SJeff Bonwick /* Dedup */ 3587b24ab676SJeff Bonwick ASSERT(hdr->b_datacnt == 1); 3588b24ab676SJeff Bonwick ASSERT(hdr->b_state == arc_anon); 3589b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(zio->io_bp)); 3590b24ab676SJeff Bonwick ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3591ae46e4c7SMatthew Ahrens } 3592fa9e4066Sahrens } 3593*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; 3594088f3894Sahrens /* if it's not anon, we are doing a scrub */ 3595b24ab676SJeff Bonwick if (!exists && hdr->b_state == arc_anon) 3596088f3894Sahrens arc_access(hdr, hash_lock); 359744eda4d7Smaybee mutex_exit(hash_lock); 3598ea8dc4b6Seschrock } else { 3599*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; 3600fa9e4066Sahrens } 3601ea8dc4b6Seschrock 3602b24ab676SJeff Bonwick ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3603b24ab676SJeff Bonwick callback->awcb_done(zio, buf, callback->awcb_private); 3604fa9e4066Sahrens 3605c717a561Smaybee kmem_free(callback, sizeof (arc_write_callback_t)); 3606fa9e4066Sahrens } 3607fa9e4066Sahrens 3608c717a561Smaybee zio_t * 3609b24ab676SJeff Bonwick arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3610aad02571SSaso Kiselkov blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress, 361169962b56SMatthew Ahrens const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone, 361269962b56SMatthew Ahrens arc_done_func_t *done, void *private, zio_priority_t priority, 36137802d7bfSMatthew Ahrens int zio_flags, const zbookmark_phys_t *zb) 3614fa9e4066Sahrens { 3615fa9e4066Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 3616c717a561Smaybee arc_write_callback_t *callback; 3617e14bb325SJeff Bonwick zio_t *zio; 3618fa9e4066Sahrens 3619e14bb325SJeff Bonwick ASSERT(ready != NULL); 3620b24ab676SJeff Bonwick ASSERT(done != NULL); 3621fa9e4066Sahrens ASSERT(!HDR_IO_ERROR(hdr)); 3622*7adb730bSGeorge Wilson ASSERT((hdr->b_flags & ARC_FLAG_IO_IN_PROGRESS) == 0); 3623b24ab676SJeff Bonwick ASSERT(hdr->b_acb == NULL); 36243baa08fcSek if (l2arc) 3625*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2CACHE; 3626aad02571SSaso Kiselkov if (l2arc_compress) 3627*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2COMPRESS; 3628c717a561Smaybee callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3629c717a561Smaybee callback->awcb_ready = ready; 363069962b56SMatthew Ahrens callback->awcb_physdone = physdone; 3631c717a561Smaybee callback->awcb_done = done; 3632c717a561Smaybee callback->awcb_private = private; 3633c717a561Smaybee callback->awcb_buf = buf; 3634088f3894Sahrens 3635b24ab676SJeff Bonwick zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 363669962b56SMatthew Ahrens arc_write_ready, arc_write_physdone, arc_write_done, callback, 363769962b56SMatthew Ahrens priority, zio_flags, zb); 3638fa9e4066Sahrens 3639c717a561Smaybee return (zio); 3640fa9e4066Sahrens } 3641fa9e4066Sahrens 36421ab7f2deSmaybee static int 364369962b56SMatthew Ahrens arc_memory_throttle(uint64_t reserve, uint64_t txg) 36441ab7f2deSmaybee { 36451ab7f2deSmaybee #ifdef _KERNEL 36461ab7f2deSmaybee uint64_t available_memory = ptob(freemem); 36471ab7f2deSmaybee static uint64_t page_load = 0; 36481ab7f2deSmaybee static uint64_t last_txg = 0; 36491ab7f2deSmaybee 36501ab7f2deSmaybee #if defined(__i386) 36511ab7f2deSmaybee available_memory = 36521ab7f2deSmaybee MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 36531ab7f2deSmaybee #endif 365469962b56SMatthew Ahrens 365569962b56SMatthew Ahrens if (freemem > physmem * arc_lotsfree_percent / 100) 36561ab7f2deSmaybee return (0); 36571ab7f2deSmaybee 36581ab7f2deSmaybee if (txg > last_txg) { 36591ab7f2deSmaybee last_txg = txg; 36601ab7f2deSmaybee page_load = 0; 36611ab7f2deSmaybee } 36621ab7f2deSmaybee /* 36631ab7f2deSmaybee * If we are in pageout, we know that memory is already tight, 36641ab7f2deSmaybee * the arc is already going to be evicting, so we just want to 36651ab7f2deSmaybee * continue to let page writes occur as quickly as possible. 36661ab7f2deSmaybee */ 36671ab7f2deSmaybee if (curproc == proc_pageout) { 36681ab7f2deSmaybee if (page_load > MAX(ptob(minfree), available_memory) / 4) 3669be6fd75aSMatthew Ahrens return (SET_ERROR(ERESTART)); 36701ab7f2deSmaybee /* Note: reserve is inflated, so we deflate */ 36711ab7f2deSmaybee page_load += reserve / 8; 36721ab7f2deSmaybee return (0); 36731ab7f2deSmaybee } else if (page_load > 0 && arc_reclaim_needed()) { 36741ab7f2deSmaybee /* memory is low, delay before restarting */ 36751ab7f2deSmaybee ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3676be6fd75aSMatthew Ahrens return (SET_ERROR(EAGAIN)); 36771ab7f2deSmaybee } 36781ab7f2deSmaybee page_load = 0; 36791ab7f2deSmaybee #endif 36801ab7f2deSmaybee return (0); 36811ab7f2deSmaybee } 36821ab7f2deSmaybee 3683fa9e4066Sahrens void 36841ab7f2deSmaybee arc_tempreserve_clear(uint64_t reserve) 3685fa9e4066Sahrens { 36861ab7f2deSmaybee atomic_add_64(&arc_tempreserve, -reserve); 3687fa9e4066Sahrens ASSERT((int64_t)arc_tempreserve >= 0); 3688fa9e4066Sahrens } 3689fa9e4066Sahrens 3690fa9e4066Sahrens int 36911ab7f2deSmaybee arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3692fa9e4066Sahrens { 36931ab7f2deSmaybee int error; 36942fdbea25SAleksandr Guzovskiy uint64_t anon_size; 36951ab7f2deSmaybee 36961ab7f2deSmaybee if (reserve > arc_c/4 && !arc_no_grow) 36971ab7f2deSmaybee arc_c = MIN(arc_c_max, reserve * 4); 36981ab7f2deSmaybee if (reserve > arc_c) 3699be6fd75aSMatthew Ahrens return (SET_ERROR(ENOMEM)); 3700112fe045Smaybee 37012fdbea25SAleksandr Guzovskiy /* 37022fdbea25SAleksandr Guzovskiy * Don't count loaned bufs as in flight dirty data to prevent long 37032fdbea25SAleksandr Guzovskiy * network delays from blocking transactions that are ready to be 37042fdbea25SAleksandr Guzovskiy * assigned to a txg. 37052fdbea25SAleksandr Guzovskiy */ 37062fdbea25SAleksandr Guzovskiy anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 37072fdbea25SAleksandr Guzovskiy 37081ab7f2deSmaybee /* 37091ab7f2deSmaybee * Writes will, almost always, require additional memory allocations 3710f7170741SWill Andrews * in order to compress/encrypt/etc the data. We therefore need to 37111ab7f2deSmaybee * make sure that there is sufficient available memory for this. 37121ab7f2deSmaybee */ 371369962b56SMatthew Ahrens error = arc_memory_throttle(reserve, txg); 371469962b56SMatthew Ahrens if (error != 0) 37151ab7f2deSmaybee return (error); 37161ab7f2deSmaybee 3717fa9e4066Sahrens /* 3718112fe045Smaybee * Throttle writes when the amount of dirty data in the cache 3719112fe045Smaybee * gets too large. We try to keep the cache less than half full 3720112fe045Smaybee * of dirty blocks so that our sync times don't grow too large. 3721112fe045Smaybee * Note: if two requests come in concurrently, we might let them 3722112fe045Smaybee * both succeed, when one of them should fail. Not a huge deal. 3723fa9e4066Sahrens */ 37242fdbea25SAleksandr Guzovskiy 37252fdbea25SAleksandr Guzovskiy if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 37262fdbea25SAleksandr Guzovskiy anon_size > arc_c / 4) { 37270e8c6158Smaybee dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 37280e8c6158Smaybee "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 37290e8c6158Smaybee arc_tempreserve>>10, 37300e8c6158Smaybee arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 37310e8c6158Smaybee arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 37321ab7f2deSmaybee reserve>>10, arc_c>>10); 3733be6fd75aSMatthew Ahrens return (SET_ERROR(ERESTART)); 3734fa9e4066Sahrens } 37351ab7f2deSmaybee atomic_add_64(&arc_tempreserve, reserve); 3736fa9e4066Sahrens return (0); 3737fa9e4066Sahrens } 3738fa9e4066Sahrens 3739fa9e4066Sahrens void 3740fa9e4066Sahrens arc_init(void) 3741fa9e4066Sahrens { 3742fa9e4066Sahrens mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3743fa9e4066Sahrens cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3744fa9e4066Sahrens 374513506d1eSmaybee /* Convert seconds to clock ticks */ 3746b19a79ecSperrin arc_min_prefetch_lifespan = 1 * hz; 374713506d1eSmaybee 3748fa9e4066Sahrens /* Start out with 1/8 of all memory */ 374944cb6abcSbmc arc_c = physmem * PAGESIZE / 8; 3750fa9e4066Sahrens 3751fa9e4066Sahrens #ifdef _KERNEL 3752fa9e4066Sahrens /* 3753fa9e4066Sahrens * On architectures where the physical memory can be larger 3754fa9e4066Sahrens * than the addressable space (intel in 32-bit mode), we may 3755fa9e4066Sahrens * need to limit the cache to 1/8 of VM size. 3756fa9e4066Sahrens */ 375744cb6abcSbmc arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3758fa9e4066Sahrens #endif 3759fa9e4066Sahrens 3760112fe045Smaybee /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 376144cb6abcSbmc arc_c_min = MAX(arc_c / 4, 64<<20); 3762112fe045Smaybee /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 376344cb6abcSbmc if (arc_c * 8 >= 1<<30) 376444cb6abcSbmc arc_c_max = (arc_c * 8) - (1<<30); 3765fa9e4066Sahrens else 376644cb6abcSbmc arc_c_max = arc_c_min; 376744cb6abcSbmc arc_c_max = MAX(arc_c * 6, arc_c_max); 3768a2eea2e1Sahrens 3769a2eea2e1Sahrens /* 3770a2eea2e1Sahrens * Allow the tunables to override our calculations if they are 3771a2eea2e1Sahrens * reasonable (ie. over 64MB) 3772a2eea2e1Sahrens */ 3773a2eea2e1Sahrens if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 377444cb6abcSbmc arc_c_max = zfs_arc_max; 377544cb6abcSbmc if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 377644cb6abcSbmc arc_c_min = zfs_arc_min; 3777a2eea2e1Sahrens 377844cb6abcSbmc arc_c = arc_c_max; 377944cb6abcSbmc arc_p = (arc_c >> 1); 3780fa9e4066Sahrens 37810e8c6158Smaybee /* limit meta-data to 1/4 of the arc capacity */ 37820e8c6158Smaybee arc_meta_limit = arc_c_max / 4; 37831116048bSek 37841116048bSek /* Allow the tunable to override if it is reasonable */ 37851116048bSek if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 37861116048bSek arc_meta_limit = zfs_arc_meta_limit; 37871116048bSek 37880e8c6158Smaybee if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 37890e8c6158Smaybee arc_c_min = arc_meta_limit / 2; 37900e8c6158Smaybee 37913a5286a1SMatthew Ahrens if (zfs_arc_meta_min > 0) { 37923a5286a1SMatthew Ahrens arc_meta_min = zfs_arc_meta_min; 37933a5286a1SMatthew Ahrens } else { 37943a5286a1SMatthew Ahrens arc_meta_min = arc_c_min / 2; 37953a5286a1SMatthew Ahrens } 37963a5286a1SMatthew Ahrens 37975a98e54bSBrendan Gregg - Sun Microsystems if (zfs_arc_grow_retry > 0) 37985a98e54bSBrendan Gregg - Sun Microsystems arc_grow_retry = zfs_arc_grow_retry; 37995a98e54bSBrendan Gregg - Sun Microsystems 38005a98e54bSBrendan Gregg - Sun Microsystems if (zfs_arc_shrink_shift > 0) 38015a98e54bSBrendan Gregg - Sun Microsystems arc_shrink_shift = zfs_arc_shrink_shift; 38025a98e54bSBrendan Gregg - Sun Microsystems 38035a98e54bSBrendan Gregg - Sun Microsystems if (zfs_arc_p_min_shift > 0) 38045a98e54bSBrendan Gregg - Sun Microsystems arc_p_min_shift = zfs_arc_p_min_shift; 38055a98e54bSBrendan Gregg - Sun Microsystems 3806fa9e4066Sahrens /* if kmem_flags are set, lets try to use less memory */ 3807fa9e4066Sahrens if (kmem_debugging()) 380844cb6abcSbmc arc_c = arc_c / 2; 380944cb6abcSbmc if (arc_c < arc_c_min) 381044cb6abcSbmc arc_c = arc_c_min; 381144cb6abcSbmc 381244cb6abcSbmc arc_anon = &ARC_anon; 381344cb6abcSbmc arc_mru = &ARC_mru; 381444cb6abcSbmc arc_mru_ghost = &ARC_mru_ghost; 381544cb6abcSbmc arc_mfu = &ARC_mfu; 381644cb6abcSbmc arc_mfu_ghost = &ARC_mfu_ghost; 3817fa94a07fSbrendan arc_l2c_only = &ARC_l2c_only; 381844cb6abcSbmc arc_size = 0; 381944cb6abcSbmc 382044cb6abcSbmc mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 382144cb6abcSbmc mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 382244cb6abcSbmc mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 382344cb6abcSbmc mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 382444cb6abcSbmc mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3825fa94a07fSbrendan mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 382644cb6abcSbmc 38270e8c6158Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 38280e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 38290e8c6158Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 38300e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 38310e8c6158Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 38320e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 38330e8c6158Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 38340e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 38350e8c6158Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 38360e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 38370e8c6158Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 38380e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 38390e8c6158Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 38400e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 38410e8c6158Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 38420e8c6158Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3843fa94a07fSbrendan list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3844fa94a07fSbrendan sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3845fa94a07fSbrendan list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3846fa94a07fSbrendan sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3847fa9e4066Sahrens 3848fa9e4066Sahrens buf_init(); 3849fa9e4066Sahrens 3850fa9e4066Sahrens arc_thread_exit = 0; 3851ea8dc4b6Seschrock arc_eviction_list = NULL; 3852ea8dc4b6Seschrock mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 385340d7d650Smaybee bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3854fa9e4066Sahrens 385544cb6abcSbmc arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 385644cb6abcSbmc sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 385744cb6abcSbmc 385844cb6abcSbmc if (arc_ksp != NULL) { 385944cb6abcSbmc arc_ksp->ks_data = &arc_stats; 386044cb6abcSbmc kstat_install(arc_ksp); 386144cb6abcSbmc } 386244cb6abcSbmc 3863fa9e4066Sahrens (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3864fa9e4066Sahrens TS_RUN, minclsyspri); 386549e3519aSmaybee 386649e3519aSmaybee arc_dead = FALSE; 38673a737e0dSbrendan arc_warm = B_FALSE; 38681ab7f2deSmaybee 386969962b56SMatthew Ahrens /* 387069962b56SMatthew Ahrens * Calculate maximum amount of dirty data per pool. 387169962b56SMatthew Ahrens * 387269962b56SMatthew Ahrens * If it has been set by /etc/system, take that. 387369962b56SMatthew Ahrens * Otherwise, use a percentage of physical memory defined by 387469962b56SMatthew Ahrens * zfs_dirty_data_max_percent (default 10%) with a cap at 387569962b56SMatthew Ahrens * zfs_dirty_data_max_max (default 4GB). 387669962b56SMatthew Ahrens */ 387769962b56SMatthew Ahrens if (zfs_dirty_data_max == 0) { 387869962b56SMatthew Ahrens zfs_dirty_data_max = physmem * PAGESIZE * 387969962b56SMatthew Ahrens zfs_dirty_data_max_percent / 100; 388069962b56SMatthew Ahrens zfs_dirty_data_max = MIN(zfs_dirty_data_max, 388169962b56SMatthew Ahrens zfs_dirty_data_max_max); 388269962b56SMatthew Ahrens } 3883fa9e4066Sahrens } 3884fa9e4066Sahrens 3885fa9e4066Sahrens void 3886fa9e4066Sahrens arc_fini(void) 3887fa9e4066Sahrens { 3888fa9e4066Sahrens mutex_enter(&arc_reclaim_thr_lock); 3889fa9e4066Sahrens arc_thread_exit = 1; 3890fa9e4066Sahrens while (arc_thread_exit != 0) 3891fa9e4066Sahrens cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3892fa9e4066Sahrens mutex_exit(&arc_reclaim_thr_lock); 3893fa9e4066Sahrens 3894874395d5Smaybee arc_flush(NULL); 3895fa9e4066Sahrens 3896fa9e4066Sahrens arc_dead = TRUE; 3897fa9e4066Sahrens 389844cb6abcSbmc if (arc_ksp != NULL) { 389944cb6abcSbmc kstat_delete(arc_ksp); 390044cb6abcSbmc arc_ksp = NULL; 390144cb6abcSbmc } 390244cb6abcSbmc 3903ea8dc4b6Seschrock mutex_destroy(&arc_eviction_mtx); 3904fa9e4066Sahrens mutex_destroy(&arc_reclaim_thr_lock); 3905fa9e4066Sahrens cv_destroy(&arc_reclaim_thr_cv); 3906fa9e4066Sahrens 39070e8c6158Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 39080e8c6158Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 39090e8c6158Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 39100e8c6158Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 39110e8c6158Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 39120e8c6158Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 39130e8c6158Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 39140e8c6158Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3915fa9e4066Sahrens 391644cb6abcSbmc mutex_destroy(&arc_anon->arcs_mtx); 391744cb6abcSbmc mutex_destroy(&arc_mru->arcs_mtx); 391844cb6abcSbmc mutex_destroy(&arc_mru_ghost->arcs_mtx); 391944cb6abcSbmc mutex_destroy(&arc_mfu->arcs_mtx); 392044cb6abcSbmc mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3921b5e70f97SRicardo M. Correia mutex_destroy(&arc_l2c_only->arcs_mtx); 39225ad82045Snd 3923fa9e4066Sahrens buf_fini(); 39242fdbea25SAleksandr Guzovskiy 39252fdbea25SAleksandr Guzovskiy ASSERT(arc_loaned_bytes == 0); 3926fa9e4066Sahrens } 3927fa94a07fSbrendan 3928fa94a07fSbrendan /* 3929fa94a07fSbrendan * Level 2 ARC 3930fa94a07fSbrendan * 3931fa94a07fSbrendan * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3932fa94a07fSbrendan * It uses dedicated storage devices to hold cached data, which are populated 3933fa94a07fSbrendan * using large infrequent writes. The main role of this cache is to boost 3934fa94a07fSbrendan * the performance of random read workloads. The intended L2ARC devices 3935fa94a07fSbrendan * include short-stroked disks, solid state disks, and other media with 3936fa94a07fSbrendan * substantially faster read latency than disk. 3937fa94a07fSbrendan * 3938fa94a07fSbrendan * +-----------------------+ 3939fa94a07fSbrendan * | ARC | 3940fa94a07fSbrendan * +-----------------------+ 3941fa94a07fSbrendan * | ^ ^ 3942fa94a07fSbrendan * | | | 3943fa94a07fSbrendan * l2arc_feed_thread() arc_read() 3944fa94a07fSbrendan * | | | 3945fa94a07fSbrendan * | l2arc read | 3946fa94a07fSbrendan * V | | 3947fa94a07fSbrendan * +---------------+ | 3948fa94a07fSbrendan * | L2ARC | | 3949fa94a07fSbrendan * +---------------+ | 3950fa94a07fSbrendan * | ^ | 3951fa94a07fSbrendan * l2arc_write() | | 3952fa94a07fSbrendan * | | | 3953fa94a07fSbrendan * V | | 3954fa94a07fSbrendan * +-------+ +-------+ 3955fa94a07fSbrendan * | vdev | | vdev | 3956fa94a07fSbrendan * | cache | | cache | 3957fa94a07fSbrendan * +-------+ +-------+ 3958fa94a07fSbrendan * +=========+ .-----. 3959fa94a07fSbrendan * : L2ARC : |-_____-| 3960fa94a07fSbrendan * : devices : | Disks | 3961fa94a07fSbrendan * +=========+ `-_____-' 3962fa94a07fSbrendan * 3963fa94a07fSbrendan * Read requests are satisfied from the following sources, in order: 3964fa94a07fSbrendan * 3965fa94a07fSbrendan * 1) ARC 3966fa94a07fSbrendan * 2) vdev cache of L2ARC devices 3967fa94a07fSbrendan * 3) L2ARC devices 3968fa94a07fSbrendan * 4) vdev cache of disks 3969fa94a07fSbrendan * 5) disks 3970fa94a07fSbrendan * 3971fa94a07fSbrendan * Some L2ARC device types exhibit extremely slow write performance. 3972fa94a07fSbrendan * To accommodate for this there are some significant differences between 3973fa94a07fSbrendan * the L2ARC and traditional cache design: 3974fa94a07fSbrendan * 3975fa94a07fSbrendan * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3976fa94a07fSbrendan * the ARC behave as usual, freeing buffers and placing headers on ghost 3977fa94a07fSbrendan * lists. The ARC does not send buffers to the L2ARC during eviction as 3978fa94a07fSbrendan * this would add inflated write latencies for all ARC memory pressure. 3979fa94a07fSbrendan * 3980fa94a07fSbrendan * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3981fa94a07fSbrendan * It does this by periodically scanning buffers from the eviction-end of 3982fa94a07fSbrendan * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3983aad02571SSaso Kiselkov * not already there. It scans until a headroom of buffers is satisfied, 3984aad02571SSaso Kiselkov * which itself is a buffer for ARC eviction. If a compressible buffer is 3985aad02571SSaso Kiselkov * found during scanning and selected for writing to an L2ARC device, we 3986aad02571SSaso Kiselkov * temporarily boost scanning headroom during the next scan cycle to make 3987aad02571SSaso Kiselkov * sure we adapt to compression effects (which might significantly reduce 3988aad02571SSaso Kiselkov * the data volume we write to L2ARC). The thread that does this is 3989fa94a07fSbrendan * l2arc_feed_thread(), illustrated below; example sizes are included to 3990fa94a07fSbrendan * provide a better sense of ratio than this diagram: 3991fa94a07fSbrendan * 3992fa94a07fSbrendan * head --> tail 3993fa94a07fSbrendan * +---------------------+----------+ 3994fa94a07fSbrendan * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3995fa94a07fSbrendan * +---------------------+----------+ | o L2ARC eligible 3996fa94a07fSbrendan * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3997fa94a07fSbrendan * +---------------------+----------+ | 3998fa94a07fSbrendan * 15.9 Gbytes ^ 32 Mbytes | 3999fa94a07fSbrendan * headroom | 4000fa94a07fSbrendan * l2arc_feed_thread() 4001fa94a07fSbrendan * | 4002fa94a07fSbrendan * l2arc write hand <--[oooo]--' 4003fa94a07fSbrendan * | 8 Mbyte 4004fa94a07fSbrendan * | write max 4005fa94a07fSbrendan * V 4006fa94a07fSbrendan * +==============================+ 4007fa94a07fSbrendan * L2ARC dev |####|#|###|###| |####| ... | 4008fa94a07fSbrendan * +==============================+ 4009fa94a07fSbrendan * 32 Gbytes 4010fa94a07fSbrendan * 4011fa94a07fSbrendan * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 4012fa94a07fSbrendan * evicted, then the L2ARC has cached a buffer much sooner than it probably 4013fa94a07fSbrendan * needed to, potentially wasting L2ARC device bandwidth and storage. It is 4014fa94a07fSbrendan * safe to say that this is an uncommon case, since buffers at the end of 4015fa94a07fSbrendan * the ARC lists have moved there due to inactivity. 4016fa94a07fSbrendan * 4017fa94a07fSbrendan * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 4018fa94a07fSbrendan * then the L2ARC simply misses copying some buffers. This serves as a 4019fa94a07fSbrendan * pressure valve to prevent heavy read workloads from both stalling the ARC 4020fa94a07fSbrendan * with waits and clogging the L2ARC with writes. This also helps prevent 4021fa94a07fSbrendan * the potential for the L2ARC to churn if it attempts to cache content too 4022fa94a07fSbrendan * quickly, such as during backups of the entire pool. 4023fa94a07fSbrendan * 40243a737e0dSbrendan * 5. After system boot and before the ARC has filled main memory, there are 40253a737e0dSbrendan * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 40263a737e0dSbrendan * lists can remain mostly static. Instead of searching from tail of these 40273a737e0dSbrendan * lists as pictured, the l2arc_feed_thread() will search from the list heads 40283a737e0dSbrendan * for eligible buffers, greatly increasing its chance of finding them. 40293a737e0dSbrendan * 40303a737e0dSbrendan * The L2ARC device write speed is also boosted during this time so that 40313a737e0dSbrendan * the L2ARC warms up faster. Since there have been no ARC evictions yet, 40323a737e0dSbrendan * there are no L2ARC reads, and no fear of degrading read performance 40333a737e0dSbrendan * through increased writes. 40343a737e0dSbrendan * 40353a737e0dSbrendan * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 4036fa94a07fSbrendan * the vdev queue can aggregate them into larger and fewer writes. Each 4037fa94a07fSbrendan * device is written to in a rotor fashion, sweeping writes through 4038fa94a07fSbrendan * available space then repeating. 4039fa94a07fSbrendan * 40403a737e0dSbrendan * 7. The L2ARC does not store dirty content. It never needs to flush 4041fa94a07fSbrendan * write buffers back to disk based storage. 4042fa94a07fSbrendan * 40433a737e0dSbrendan * 8. If an ARC buffer is written (and dirtied) which also exists in the 4044fa94a07fSbrendan * L2ARC, the now stale L2ARC buffer is immediately dropped. 4045fa94a07fSbrendan * 4046fa94a07fSbrendan * The performance of the L2ARC can be tweaked by a number of tunables, which 4047fa94a07fSbrendan * may be necessary for different workloads: 4048fa94a07fSbrendan * 4049fa94a07fSbrendan * l2arc_write_max max write bytes per interval 40503a737e0dSbrendan * l2arc_write_boost extra write bytes during device warmup 4051fa94a07fSbrendan * l2arc_noprefetch skip caching prefetched buffers 4052fa94a07fSbrendan * l2arc_headroom number of max device writes to precache 4053aad02571SSaso Kiselkov * l2arc_headroom_boost when we find compressed buffers during ARC 4054aad02571SSaso Kiselkov * scanning, we multiply headroom by this 4055aad02571SSaso Kiselkov * percentage factor for the next scan cycle, 4056aad02571SSaso Kiselkov * since more compressed buffers are likely to 4057aad02571SSaso Kiselkov * be present 4058fa94a07fSbrendan * l2arc_feed_secs seconds between L2ARC writing 4059fa94a07fSbrendan * 4060fa94a07fSbrendan * Tunables may be removed or added as future performance improvements are 4061fa94a07fSbrendan * integrated, and also may become zpool properties. 40625a98e54bSBrendan Gregg - Sun Microsystems * 40635a98e54bSBrendan Gregg - Sun Microsystems * There are three key functions that control how the L2ARC warms up: 40645a98e54bSBrendan Gregg - Sun Microsystems * 40655a98e54bSBrendan Gregg - Sun Microsystems * l2arc_write_eligible() check if a buffer is eligible to cache 40665a98e54bSBrendan Gregg - Sun Microsystems * l2arc_write_size() calculate how much to write 40675a98e54bSBrendan Gregg - Sun Microsystems * l2arc_write_interval() calculate sleep delay between writes 40685a98e54bSBrendan Gregg - Sun Microsystems * 40695a98e54bSBrendan Gregg - Sun Microsystems * These three functions determine what to write, how much, and how quickly 40705a98e54bSBrendan Gregg - Sun Microsystems * to send writes. 4071fa94a07fSbrendan */ 4072fa94a07fSbrendan 40735a98e54bSBrendan Gregg - Sun Microsystems static boolean_t 4074*7adb730bSGeorge Wilson l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) 40755a98e54bSBrendan Gregg - Sun Microsystems { 40765a98e54bSBrendan Gregg - Sun Microsystems /* 40775a98e54bSBrendan Gregg - Sun Microsystems * A buffer is *not* eligible for the L2ARC if it: 40785a98e54bSBrendan Gregg - Sun Microsystems * 1. belongs to a different spa. 40795ea40c06SBrendan Gregg - Sun Microsystems * 2. is already cached on the L2ARC. 40805ea40c06SBrendan Gregg - Sun Microsystems * 3. has an I/O in progress (it may be an incomplete read). 40815ea40c06SBrendan Gregg - Sun Microsystems * 4. is flagged not eligible (zfs property). 40825a98e54bSBrendan Gregg - Sun Microsystems */ 4083*7adb730bSGeorge Wilson if (hdr->b_spa != spa_guid || hdr->b_l2hdr != NULL || 4084*7adb730bSGeorge Wilson HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr)) 40855a98e54bSBrendan Gregg - Sun Microsystems return (B_FALSE); 40865a98e54bSBrendan Gregg - Sun Microsystems 40875a98e54bSBrendan Gregg - Sun Microsystems return (B_TRUE); 40885a98e54bSBrendan Gregg - Sun Microsystems } 40895a98e54bSBrendan Gregg - Sun Microsystems 40905a98e54bSBrendan Gregg - Sun Microsystems static uint64_t 4091aad02571SSaso Kiselkov l2arc_write_size(void) 40925a98e54bSBrendan Gregg - Sun Microsystems { 40935a98e54bSBrendan Gregg - Sun Microsystems uint64_t size; 40945a98e54bSBrendan Gregg - Sun Microsystems 4095aad02571SSaso Kiselkov /* 4096aad02571SSaso Kiselkov * Make sure our globals have meaningful values in case the user 4097aad02571SSaso Kiselkov * altered them. 4098aad02571SSaso Kiselkov */ 4099aad02571SSaso Kiselkov size = l2arc_write_max; 4100aad02571SSaso Kiselkov if (size == 0) { 4101aad02571SSaso Kiselkov cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " 4102aad02571SSaso Kiselkov "be greater than zero, resetting it to the default (%d)", 4103aad02571SSaso Kiselkov L2ARC_WRITE_SIZE); 4104aad02571SSaso Kiselkov size = l2arc_write_max = L2ARC_WRITE_SIZE; 4105aad02571SSaso Kiselkov } 41065a98e54bSBrendan Gregg - Sun Microsystems 41075a98e54bSBrendan Gregg - Sun Microsystems if (arc_warm == B_FALSE) 4108aad02571SSaso Kiselkov size += l2arc_write_boost; 41095a98e54bSBrendan Gregg - Sun Microsystems 41105a98e54bSBrendan Gregg - Sun Microsystems return (size); 41115a98e54bSBrendan Gregg - Sun Microsystems 41125a98e54bSBrendan Gregg - Sun Microsystems } 41135a98e54bSBrendan Gregg - Sun Microsystems 41145a98e54bSBrendan Gregg - Sun Microsystems static clock_t 41155a98e54bSBrendan Gregg - Sun Microsystems l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 41165a98e54bSBrendan Gregg - Sun Microsystems { 4117d3d50737SRafael Vanoni clock_t interval, next, now; 41185a98e54bSBrendan Gregg - Sun Microsystems 41195a98e54bSBrendan Gregg - Sun Microsystems /* 41205a98e54bSBrendan Gregg - Sun Microsystems * If the ARC lists are busy, increase our write rate; if the 41215a98e54bSBrendan Gregg - Sun Microsystems * lists are stale, idle back. This is achieved by checking 41225a98e54bSBrendan Gregg - Sun Microsystems * how much we previously wrote - if it was more than half of 41235a98e54bSBrendan Gregg - Sun Microsystems * what we wanted, schedule the next write much sooner. 41245a98e54bSBrendan Gregg - Sun Microsystems */ 41255a98e54bSBrendan Gregg - Sun Microsystems if (l2arc_feed_again && wrote > (wanted / 2)) 41265a98e54bSBrendan Gregg - Sun Microsystems interval = (hz * l2arc_feed_min_ms) / 1000; 41275a98e54bSBrendan Gregg - Sun Microsystems else 41285a98e54bSBrendan Gregg - Sun Microsystems interval = hz * l2arc_feed_secs; 41295a98e54bSBrendan Gregg - Sun Microsystems 4130d3d50737SRafael Vanoni now = ddi_get_lbolt(); 4131d3d50737SRafael Vanoni next = MAX(now, MIN(now + interval, began + interval)); 41325a98e54bSBrendan Gregg - Sun Microsystems 41335a98e54bSBrendan Gregg - Sun Microsystems return (next); 41345a98e54bSBrendan Gregg - Sun Microsystems } 41355a98e54bSBrendan Gregg - Sun Microsystems 4136fa94a07fSbrendan static void 4137fa94a07fSbrendan l2arc_hdr_stat_add(void) 4138fa94a07fSbrendan { 4139e6c728e1Sbrendan ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 4140e6c728e1Sbrendan ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 4141fa94a07fSbrendan } 4142fa94a07fSbrendan 4143fa94a07fSbrendan static void 4144fa94a07fSbrendan l2arc_hdr_stat_remove(void) 4145fa94a07fSbrendan { 4146e6c728e1Sbrendan ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 4147e6c728e1Sbrendan ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 4148fa94a07fSbrendan } 4149fa94a07fSbrendan 4150fa94a07fSbrendan /* 4151fa94a07fSbrendan * Cycle through L2ARC devices. This is how L2ARC load balances. 41523a737e0dSbrendan * If a device is returned, this also returns holding the spa config lock. 4153fa94a07fSbrendan */ 4154fa94a07fSbrendan static l2arc_dev_t * 4155fa94a07fSbrendan l2arc_dev_get_next(void) 4156fa94a07fSbrendan { 41573a737e0dSbrendan l2arc_dev_t *first, *next = NULL; 41583a737e0dSbrendan 41593a737e0dSbrendan /* 41603a737e0dSbrendan * Lock out the removal of spas (spa_namespace_lock), then removal 41613a737e0dSbrendan * of cache devices (l2arc_dev_mtx). Once a device has been selected, 41623a737e0dSbrendan * both locks will be dropped and a spa config lock held instead. 41633a737e0dSbrendan */ 41643a737e0dSbrendan mutex_enter(&spa_namespace_lock); 41653a737e0dSbrendan mutex_enter(&l2arc_dev_mtx); 4166fa94a07fSbrendan 4167c5904d13Seschrock /* if there are no vdevs, there is nothing to do */ 4168c5904d13Seschrock if (l2arc_ndev == 0) 41693a737e0dSbrendan goto out; 4170c5904d13Seschrock 4171c5904d13Seschrock first = NULL; 4172c5904d13Seschrock next = l2arc_dev_last; 4173c5904d13Seschrock do { 4174c5904d13Seschrock /* loop around the list looking for a non-faulted vdev */ 4175c5904d13Seschrock if (next == NULL) { 4176fa94a07fSbrendan next = list_head(l2arc_dev_list); 4177c5904d13Seschrock } else { 4178c5904d13Seschrock next = list_next(l2arc_dev_list, next); 4179c5904d13Seschrock if (next == NULL) 4180c5904d13Seschrock next = list_head(l2arc_dev_list); 4181c5904d13Seschrock } 4182c5904d13Seschrock 4183c5904d13Seschrock /* if we have come back to the start, bail out */ 4184c5904d13Seschrock if (first == NULL) 4185c5904d13Seschrock first = next; 4186c5904d13Seschrock else if (next == first) 4187c5904d13Seschrock break; 4188c5904d13Seschrock 4189c5904d13Seschrock } while (vdev_is_dead(next->l2ad_vdev)); 4190c5904d13Seschrock 4191c5904d13Seschrock /* if we were unable to find any usable vdevs, return NULL */ 4192c5904d13Seschrock if (vdev_is_dead(next->l2ad_vdev)) 41933a737e0dSbrendan next = NULL; 4194fa94a07fSbrendan 4195fa94a07fSbrendan l2arc_dev_last = next; 4196fa94a07fSbrendan 41973a737e0dSbrendan out: 41983a737e0dSbrendan mutex_exit(&l2arc_dev_mtx); 41993a737e0dSbrendan 42003a737e0dSbrendan /* 42013a737e0dSbrendan * Grab the config lock to prevent the 'next' device from being 42023a737e0dSbrendan * removed while we are writing to it. 42033a737e0dSbrendan */ 42043a737e0dSbrendan if (next != NULL) 4205e14bb325SJeff Bonwick spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 42063a737e0dSbrendan mutex_exit(&spa_namespace_lock); 42073a737e0dSbrendan 4208fa94a07fSbrendan return (next); 4209fa94a07fSbrendan } 4210fa94a07fSbrendan 42113a737e0dSbrendan /* 42123a737e0dSbrendan * Free buffers that were tagged for destruction. 42133a737e0dSbrendan */ 42143a737e0dSbrendan static void 42153a737e0dSbrendan l2arc_do_free_on_write() 42163a737e0dSbrendan { 42173a737e0dSbrendan list_t *buflist; 42183a737e0dSbrendan l2arc_data_free_t *df, *df_prev; 42193a737e0dSbrendan 42203a737e0dSbrendan mutex_enter(&l2arc_free_on_write_mtx); 42213a737e0dSbrendan buflist = l2arc_free_on_write; 42223a737e0dSbrendan 42233a737e0dSbrendan for (df = list_tail(buflist); df; df = df_prev) { 42243a737e0dSbrendan df_prev = list_prev(buflist, df); 42253a737e0dSbrendan ASSERT(df->l2df_data != NULL); 42263a737e0dSbrendan ASSERT(df->l2df_func != NULL); 42273a737e0dSbrendan df->l2df_func(df->l2df_data, df->l2df_size); 42283a737e0dSbrendan list_remove(buflist, df); 42293a737e0dSbrendan kmem_free(df, sizeof (l2arc_data_free_t)); 42303a737e0dSbrendan } 42313a737e0dSbrendan 42323a737e0dSbrendan mutex_exit(&l2arc_free_on_write_mtx); 42333a737e0dSbrendan } 42343a737e0dSbrendan 4235fa94a07fSbrendan /* 4236fa94a07fSbrendan * A write to a cache device has completed. Update all headers to allow 4237fa94a07fSbrendan * reads from these buffers to begin. 4238fa94a07fSbrendan */ 4239fa94a07fSbrendan static void 4240fa94a07fSbrendan l2arc_write_done(zio_t *zio) 4241fa94a07fSbrendan { 4242fa94a07fSbrendan l2arc_write_callback_t *cb; 4243fa94a07fSbrendan l2arc_dev_t *dev; 4244fa94a07fSbrendan list_t *buflist; 4245*7adb730bSGeorge Wilson arc_buf_hdr_t *head, *hdr, *hdr_prev; 42463a737e0dSbrendan l2arc_buf_hdr_t *abl2; 4247fa94a07fSbrendan kmutex_t *hash_lock; 42483038a2b4SSaso Kiselkov int64_t bytes_dropped = 0; 4249fa94a07fSbrendan 4250fa94a07fSbrendan cb = zio->io_private; 4251fa94a07fSbrendan ASSERT(cb != NULL); 4252fa94a07fSbrendan dev = cb->l2wcb_dev; 4253fa94a07fSbrendan ASSERT(dev != NULL); 4254fa94a07fSbrendan head = cb->l2wcb_head; 4255fa94a07fSbrendan ASSERT(head != NULL); 4256fa94a07fSbrendan buflist = dev->l2ad_buflist; 4257fa94a07fSbrendan ASSERT(buflist != NULL); 4258fa94a07fSbrendan DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4259fa94a07fSbrendan l2arc_write_callback_t *, cb); 4260fa94a07fSbrendan 4261fa94a07fSbrendan if (zio->io_error != 0) 4262fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_writes_error); 4263fa94a07fSbrendan 4264fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 4265fa94a07fSbrendan 4266fa94a07fSbrendan /* 4267fa94a07fSbrendan * All writes completed, or an error was hit. 4268fa94a07fSbrendan */ 4269*7adb730bSGeorge Wilson for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { 4270*7adb730bSGeorge Wilson hdr_prev = list_prev(buflist, hdr); 4271*7adb730bSGeorge Wilson abl2 = hdr->b_l2hdr; 427271cb1b74SSaso Kiselkov 427371cb1b74SSaso Kiselkov /* 427471cb1b74SSaso Kiselkov * Release the temporary compressed buffer as soon as possible. 427571cb1b74SSaso Kiselkov */ 427671cb1b74SSaso Kiselkov if (abl2->b_compress != ZIO_COMPRESS_OFF) 4277*7adb730bSGeorge Wilson l2arc_release_cdata_buf(hdr); 4278fa94a07fSbrendan 4279*7adb730bSGeorge Wilson hash_lock = HDR_LOCK(hdr); 4280fa94a07fSbrendan if (!mutex_tryenter(hash_lock)) { 4281fa94a07fSbrendan /* 4282fa94a07fSbrendan * This buffer misses out. It may be in a stage 4283fa94a07fSbrendan * of eviction. Its ARC_L2_WRITING flag will be 4284fa94a07fSbrendan * left set, denying reads to this buffer. 4285fa94a07fSbrendan */ 4286fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4287fa94a07fSbrendan continue; 4288fa94a07fSbrendan } 4289fa94a07fSbrendan 4290fa94a07fSbrendan if (zio->io_error != 0) { 4291fa94a07fSbrendan /* 42923a737e0dSbrendan * Error - drop L2ARC entry. 4293fa94a07fSbrendan */ 4294*7adb730bSGeorge Wilson list_remove(buflist, hdr); 4295aad02571SSaso Kiselkov ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize); 42963038a2b4SSaso Kiselkov bytes_dropped += abl2->b_asize; 4297*7adb730bSGeorge Wilson hdr->b_l2hdr = NULL; 42983a737e0dSbrendan kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4299*7adb730bSGeorge Wilson ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 4300fa94a07fSbrendan } 4301fa94a07fSbrendan 4302fa94a07fSbrendan /* 4303fa94a07fSbrendan * Allow ARC to begin reads to this L2ARC entry. 4304fa94a07fSbrendan */ 4305*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_L2_WRITING; 4306fa94a07fSbrendan 4307fa94a07fSbrendan mutex_exit(hash_lock); 4308fa94a07fSbrendan } 4309fa94a07fSbrendan 4310fa94a07fSbrendan atomic_inc_64(&l2arc_writes_done); 4311fa94a07fSbrendan list_remove(buflist, head); 4312fa94a07fSbrendan kmem_cache_free(hdr_cache, head); 4313fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 4314fa94a07fSbrendan 43153038a2b4SSaso Kiselkov vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); 43163038a2b4SSaso Kiselkov 43173a737e0dSbrendan l2arc_do_free_on_write(); 4318fa94a07fSbrendan 4319fa94a07fSbrendan kmem_free(cb, sizeof (l2arc_write_callback_t)); 4320fa94a07fSbrendan } 4321fa94a07fSbrendan 4322fa94a07fSbrendan /* 4323fa94a07fSbrendan * A read to a cache device completed. Validate buffer contents before 4324fa94a07fSbrendan * handing over to the regular ARC routines. 4325fa94a07fSbrendan */ 4326fa94a07fSbrendan static void 4327fa94a07fSbrendan l2arc_read_done(zio_t *zio) 4328fa94a07fSbrendan { 4329fa94a07fSbrendan l2arc_read_callback_t *cb; 4330fa94a07fSbrendan arc_buf_hdr_t *hdr; 4331fa94a07fSbrendan arc_buf_t *buf; 4332fa94a07fSbrendan kmutex_t *hash_lock; 43333a737e0dSbrendan int equal; 4334fa94a07fSbrendan 4335e14bb325SJeff Bonwick ASSERT(zio->io_vd != NULL); 4336e14bb325SJeff Bonwick ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4337e14bb325SJeff Bonwick 4338e14bb325SJeff Bonwick spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4339e14bb325SJeff Bonwick 4340fa94a07fSbrendan cb = zio->io_private; 4341fa94a07fSbrendan ASSERT(cb != NULL); 4342fa94a07fSbrendan buf = cb->l2rcb_buf; 4343fa94a07fSbrendan ASSERT(buf != NULL); 4344fa94a07fSbrendan 43453f9d6ad7SLin Ling hash_lock = HDR_LOCK(buf->b_hdr); 4346fa94a07fSbrendan mutex_enter(hash_lock); 43473f9d6ad7SLin Ling hdr = buf->b_hdr; 43483f9d6ad7SLin Ling ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4349fa94a07fSbrendan 4350aad02571SSaso Kiselkov /* 4351aad02571SSaso Kiselkov * If the buffer was compressed, decompress it first. 4352aad02571SSaso Kiselkov */ 4353aad02571SSaso Kiselkov if (cb->l2rcb_compress != ZIO_COMPRESS_OFF) 4354aad02571SSaso Kiselkov l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress); 4355aad02571SSaso Kiselkov ASSERT(zio->io_data != NULL); 4356aad02571SSaso Kiselkov 4357fa94a07fSbrendan /* 4358fa94a07fSbrendan * Check this survived the L2ARC journey. 4359fa94a07fSbrendan */ 4360fa94a07fSbrendan equal = arc_cksum_equal(buf); 4361fa94a07fSbrendan if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4362fa94a07fSbrendan mutex_exit(hash_lock); 4363fa94a07fSbrendan zio->io_private = buf; 4364e14bb325SJeff Bonwick zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4365e14bb325SJeff Bonwick zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4366fa94a07fSbrendan arc_read_done(zio); 4367fa94a07fSbrendan } else { 4368fa94a07fSbrendan mutex_exit(hash_lock); 4369fa94a07fSbrendan /* 4370fa94a07fSbrendan * Buffer didn't survive caching. Increment stats and 4371fa94a07fSbrendan * reissue to the original storage device. 4372fa94a07fSbrendan */ 43733a737e0dSbrendan if (zio->io_error != 0) { 4374fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_io_error); 43753a737e0dSbrendan } else { 4376be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(EIO); 43773a737e0dSbrendan } 4378fa94a07fSbrendan if (!equal) 4379fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4380fa94a07fSbrendan 4381e14bb325SJeff Bonwick /* 4382e14bb325SJeff Bonwick * If there's no waiter, issue an async i/o to the primary 4383e14bb325SJeff Bonwick * storage now. If there *is* a waiter, the caller must 4384e14bb325SJeff Bonwick * issue the i/o in a context where it's OK to block. 4385e14bb325SJeff Bonwick */ 4386a3f829aeSBill Moore if (zio->io_waiter == NULL) { 4387a3f829aeSBill Moore zio_t *pio = zio_unique_parent(zio); 4388a3f829aeSBill Moore 4389a3f829aeSBill Moore ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4390a3f829aeSBill Moore 4391a3f829aeSBill Moore zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4392e14bb325SJeff Bonwick buf->b_data, zio->io_size, arc_read_done, buf, 4393e14bb325SJeff Bonwick zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4394a3f829aeSBill Moore } 4395fa94a07fSbrendan } 4396fa94a07fSbrendan 4397fa94a07fSbrendan kmem_free(cb, sizeof (l2arc_read_callback_t)); 4398fa94a07fSbrendan } 4399fa94a07fSbrendan 4400fa94a07fSbrendan /* 4401fa94a07fSbrendan * This is the list priority from which the L2ARC will search for pages to 4402fa94a07fSbrendan * cache. This is used within loops (0..3) to cycle through lists in the 4403fa94a07fSbrendan * desired order. This order can have a significant effect on cache 4404fa94a07fSbrendan * performance. 4405fa94a07fSbrendan * 4406fa94a07fSbrendan * Currently the metadata lists are hit first, MFU then MRU, followed by 4407fa94a07fSbrendan * the data lists. This function returns a locked list, and also returns 4408fa94a07fSbrendan * the lock pointer. 4409fa94a07fSbrendan */ 4410fa94a07fSbrendan static list_t * 4411fa94a07fSbrendan l2arc_list_locked(int list_num, kmutex_t **lock) 4412fa94a07fSbrendan { 4413d5285caeSGeorge Wilson list_t *list = NULL; 4414fa94a07fSbrendan 4415fa94a07fSbrendan ASSERT(list_num >= 0 && list_num <= 3); 4416fa94a07fSbrendan 4417fa94a07fSbrendan switch (list_num) { 4418fa94a07fSbrendan case 0: 4419fa94a07fSbrendan list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 4420fa94a07fSbrendan *lock = &arc_mfu->arcs_mtx; 4421fa94a07fSbrendan break; 4422fa94a07fSbrendan case 1: 4423fa94a07fSbrendan list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 4424fa94a07fSbrendan *lock = &arc_mru->arcs_mtx; 4425fa94a07fSbrendan break; 4426fa94a07fSbrendan case 2: 4427fa94a07fSbrendan list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 4428fa94a07fSbrendan *lock = &arc_mfu->arcs_mtx; 4429fa94a07fSbrendan break; 4430fa94a07fSbrendan case 3: 4431fa94a07fSbrendan list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 4432fa94a07fSbrendan *lock = &arc_mru->arcs_mtx; 4433fa94a07fSbrendan break; 4434fa94a07fSbrendan } 4435fa94a07fSbrendan 4436fa94a07fSbrendan ASSERT(!(MUTEX_HELD(*lock))); 4437fa94a07fSbrendan mutex_enter(*lock); 4438fa94a07fSbrendan return (list); 4439fa94a07fSbrendan } 4440fa94a07fSbrendan 4441fa94a07fSbrendan /* 4442fa94a07fSbrendan * Evict buffers from the device write hand to the distance specified in 4443fa94a07fSbrendan * bytes. This distance may span populated buffers, it may span nothing. 4444fa94a07fSbrendan * This is clearing a region on the L2ARC device ready for writing. 4445fa94a07fSbrendan * If the 'all' boolean is set, every buffer is evicted. 4446fa94a07fSbrendan */ 4447fa94a07fSbrendan static void 4448fa94a07fSbrendan l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4449fa94a07fSbrendan { 4450fa94a07fSbrendan list_t *buflist; 4451fa94a07fSbrendan l2arc_buf_hdr_t *abl2; 4452*7adb730bSGeorge Wilson arc_buf_hdr_t *hdr, *hdr_prev; 4453fa94a07fSbrendan kmutex_t *hash_lock; 4454fa94a07fSbrendan uint64_t taddr; 44553038a2b4SSaso Kiselkov int64_t bytes_evicted = 0; 4456fa94a07fSbrendan 4457fa94a07fSbrendan buflist = dev->l2ad_buflist; 4458fa94a07fSbrendan 4459fa94a07fSbrendan if (buflist == NULL) 4460fa94a07fSbrendan return; 4461fa94a07fSbrendan 4462fa94a07fSbrendan if (!all && dev->l2ad_first) { 4463fa94a07fSbrendan /* 4464fa94a07fSbrendan * This is the first sweep through the device. There is 4465fa94a07fSbrendan * nothing to evict. 4466fa94a07fSbrendan */ 4467fa94a07fSbrendan return; 4468fa94a07fSbrendan } 4469fa94a07fSbrendan 44703a737e0dSbrendan if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4471fa94a07fSbrendan /* 4472fa94a07fSbrendan * When nearing the end of the device, evict to the end 4473fa94a07fSbrendan * before the device write hand jumps to the start. 4474fa94a07fSbrendan */ 4475fa94a07fSbrendan taddr = dev->l2ad_end; 4476fa94a07fSbrendan } else { 4477fa94a07fSbrendan taddr = dev->l2ad_hand + distance; 4478fa94a07fSbrendan } 4479fa94a07fSbrendan DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4480fa94a07fSbrendan uint64_t, taddr, boolean_t, all); 4481fa94a07fSbrendan 4482fa94a07fSbrendan top: 4483fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 4484*7adb730bSGeorge Wilson for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { 4485*7adb730bSGeorge Wilson hdr_prev = list_prev(buflist, hdr); 4486fa94a07fSbrendan 4487*7adb730bSGeorge Wilson hash_lock = HDR_LOCK(hdr); 4488fa94a07fSbrendan if (!mutex_tryenter(hash_lock)) { 4489fa94a07fSbrendan /* 4490fa94a07fSbrendan * Missed the hash lock. Retry. 4491fa94a07fSbrendan */ 4492fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4493fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 4494fa94a07fSbrendan mutex_enter(hash_lock); 4495fa94a07fSbrendan mutex_exit(hash_lock); 4496fa94a07fSbrendan goto top; 4497fa94a07fSbrendan } 4498fa94a07fSbrendan 4499*7adb730bSGeorge Wilson if (HDR_L2_WRITE_HEAD(hdr)) { 4500fa94a07fSbrendan /* 4501fa94a07fSbrendan * We hit a write head node. Leave it for 4502fa94a07fSbrendan * l2arc_write_done(). 4503fa94a07fSbrendan */ 4504*7adb730bSGeorge Wilson list_remove(buflist, hdr); 4505fa94a07fSbrendan mutex_exit(hash_lock); 4506fa94a07fSbrendan continue; 4507fa94a07fSbrendan } 4508fa94a07fSbrendan 4509*7adb730bSGeorge Wilson if (!all && hdr->b_l2hdr != NULL && 4510*7adb730bSGeorge Wilson (hdr->b_l2hdr->b_daddr > taddr || 4511*7adb730bSGeorge Wilson hdr->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4512fa94a07fSbrendan /* 4513fa94a07fSbrendan * We've evicted to the target address, 4514fa94a07fSbrendan * or the end of the device. 4515fa94a07fSbrendan */ 4516fa94a07fSbrendan mutex_exit(hash_lock); 4517fa94a07fSbrendan break; 4518fa94a07fSbrendan } 4519fa94a07fSbrendan 4520*7adb730bSGeorge Wilson if (HDR_FREE_IN_PROGRESS(hdr)) { 4521fa94a07fSbrendan /* 4522fa94a07fSbrendan * Already on the path to destruction. 4523fa94a07fSbrendan */ 4524fa94a07fSbrendan mutex_exit(hash_lock); 4525fa94a07fSbrendan continue; 4526fa94a07fSbrendan } 4527fa94a07fSbrendan 4528*7adb730bSGeorge Wilson if (hdr->b_state == arc_l2c_only) { 4529*7adb730bSGeorge Wilson ASSERT(!HDR_L2_READING(hdr)); 4530fa94a07fSbrendan /* 4531fa94a07fSbrendan * This doesn't exist in the ARC. Destroy. 4532fa94a07fSbrendan * arc_hdr_destroy() will call list_remove() 4533fa94a07fSbrendan * and decrement arcstat_l2_size. 4534fa94a07fSbrendan */ 4535*7adb730bSGeorge Wilson arc_change_state(arc_anon, hdr, hash_lock); 4536*7adb730bSGeorge Wilson arc_hdr_destroy(hdr); 4537fa94a07fSbrendan } else { 45383a737e0dSbrendan /* 45393a737e0dSbrendan * Invalidate issued or about to be issued 45403a737e0dSbrendan * reads, since we may be about to write 45413a737e0dSbrendan * over this location. 45423a737e0dSbrendan */ 4543*7adb730bSGeorge Wilson if (HDR_L2_READING(hdr)) { 45443a737e0dSbrendan ARCSTAT_BUMP(arcstat_l2_evict_reading); 4545*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2_EVICTED; 45463a737e0dSbrendan } 45473a737e0dSbrendan 4548fa94a07fSbrendan /* 4549fa94a07fSbrendan * Tell ARC this no longer exists in L2ARC. 4550fa94a07fSbrendan */ 4551*7adb730bSGeorge Wilson if (hdr->b_l2hdr != NULL) { 4552*7adb730bSGeorge Wilson abl2 = hdr->b_l2hdr; 4553aad02571SSaso Kiselkov ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize); 45543038a2b4SSaso Kiselkov bytes_evicted += abl2->b_asize; 4555*7adb730bSGeorge Wilson hdr->b_l2hdr = NULL; 4556fa94a07fSbrendan kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4557*7adb730bSGeorge Wilson ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 4558fa94a07fSbrendan } 4559*7adb730bSGeorge Wilson list_remove(buflist, hdr); 4560fa94a07fSbrendan 4561fa94a07fSbrendan /* 4562fa94a07fSbrendan * This may have been leftover after a 4563fa94a07fSbrendan * failed write. 4564fa94a07fSbrendan */ 4565*7adb730bSGeorge Wilson hdr->b_flags &= ~ARC_FLAG_L2_WRITING; 4566fa94a07fSbrendan } 4567fa94a07fSbrendan mutex_exit(hash_lock); 4568fa94a07fSbrendan } 4569fa94a07fSbrendan mutex_exit(&l2arc_buflist_mtx); 4570fa94a07fSbrendan 45713038a2b4SSaso Kiselkov vdev_space_update(dev->l2ad_vdev, -bytes_evicted, 0, 0); 4572fa94a07fSbrendan dev->l2ad_evict = taddr; 4573fa94a07fSbrendan } 4574fa94a07fSbrendan 4575fa94a07fSbrendan /* 4576fa94a07fSbrendan * Find and write ARC buffers to the L2ARC device. 4577fa94a07fSbrendan * 4578*7adb730bSGeorge Wilson * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid 4579fa94a07fSbrendan * for reading until they have completed writing. 4580aad02571SSaso Kiselkov * The headroom_boost is an in-out parameter used to maintain headroom boost 4581aad02571SSaso Kiselkov * state between calls to this function. 4582aad02571SSaso Kiselkov * 4583aad02571SSaso Kiselkov * Returns the number of bytes actually written (which may be smaller than 4584aad02571SSaso Kiselkov * the delta by which the device hand has changed due to alignment). 4585fa94a07fSbrendan */ 45865a98e54bSBrendan Gregg - Sun Microsystems static uint64_t 4587aad02571SSaso Kiselkov l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz, 4588aad02571SSaso Kiselkov boolean_t *headroom_boost) 4589fa94a07fSbrendan { 4590*7adb730bSGeorge Wilson arc_buf_hdr_t *hdr, *hdr_prev, *head; 4591fa94a07fSbrendan list_t *list; 4592aad02571SSaso Kiselkov uint64_t write_asize, write_psize, write_sz, headroom, 4593aad02571SSaso Kiselkov buf_compress_minsz; 4594fa94a07fSbrendan void *buf_data; 4595aad02571SSaso Kiselkov kmutex_t *list_lock; 4596aad02571SSaso Kiselkov boolean_t full; 4597fa94a07fSbrendan l2arc_write_callback_t *cb; 4598fa94a07fSbrendan zio_t *pio, *wzio; 4599e9103aaeSGarrett D'Amore uint64_t guid = spa_load_guid(spa); 4600aad02571SSaso Kiselkov const boolean_t do_headroom_boost = *headroom_boost; 4601fa94a07fSbrendan 4602fa94a07fSbrendan ASSERT(dev->l2ad_vdev != NULL); 4603fa94a07fSbrendan 4604aad02571SSaso Kiselkov /* Lower the flag now, we might want to raise it again later. */ 4605aad02571SSaso Kiselkov *headroom_boost = B_FALSE; 4606aad02571SSaso Kiselkov 4607fa94a07fSbrendan pio = NULL; 4608aad02571SSaso Kiselkov write_sz = write_asize = write_psize = 0; 4609fa94a07fSbrendan full = B_FALSE; 46101ab7f2deSmaybee head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4611*7adb730bSGeorge Wilson head->b_flags |= ARC_FLAG_L2_WRITE_HEAD; 4612fa94a07fSbrendan 4613aad02571SSaso Kiselkov /* 4614aad02571SSaso Kiselkov * We will want to try to compress buffers that are at least 2x the 4615aad02571SSaso Kiselkov * device sector size. 4616aad02571SSaso Kiselkov */ 4617aad02571SSaso Kiselkov buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift; 4618aad02571SSaso Kiselkov 4619fa94a07fSbrendan /* 4620fa94a07fSbrendan * Copy buffers for L2ARC writing. 4621fa94a07fSbrendan */ 4622fa94a07fSbrendan mutex_enter(&l2arc_buflist_mtx); 4623fa94a07fSbrendan for (int try = 0; try <= 3; try++) { 4624aad02571SSaso Kiselkov uint64_t passed_sz = 0; 4625aad02571SSaso Kiselkov 4626fa94a07fSbrendan list = l2arc_list_locked(try, &list_lock); 4627fa94a07fSbrendan 46283a737e0dSbrendan /* 46293a737e0dSbrendan * L2ARC fast warmup. 46303a737e0dSbrendan * 46313a737e0dSbrendan * Until the ARC is warm and starts to evict, read from the 46323a737e0dSbrendan * head of the ARC lists rather than the tail. 46333a737e0dSbrendan */ 46343a737e0dSbrendan if (arc_warm == B_FALSE) 4635*7adb730bSGeorge Wilson hdr = list_head(list); 46363a737e0dSbrendan else 4637*7adb730bSGeorge Wilson hdr = list_tail(list); 46383a737e0dSbrendan 4639aad02571SSaso Kiselkov headroom = target_sz * l2arc_headroom; 4640aad02571SSaso Kiselkov if (do_headroom_boost) 4641aad02571SSaso Kiselkov headroom = (headroom * l2arc_headroom_boost) / 100; 4642aad02571SSaso Kiselkov 4643*7adb730bSGeorge Wilson for (; hdr; hdr = hdr_prev) { 4644aad02571SSaso Kiselkov l2arc_buf_hdr_t *l2hdr; 4645aad02571SSaso Kiselkov kmutex_t *hash_lock; 4646aad02571SSaso Kiselkov uint64_t buf_sz; 4647aad02571SSaso Kiselkov 46483a737e0dSbrendan if (arc_warm == B_FALSE) 4649*7adb730bSGeorge Wilson hdr_prev = list_next(list, hdr); 46503a737e0dSbrendan else 4651*7adb730bSGeorge Wilson hdr_prev = list_prev(list, hdr); 4652fa94a07fSbrendan 4653*7adb730bSGeorge Wilson hash_lock = HDR_LOCK(hdr); 4654aad02571SSaso Kiselkov if (!mutex_tryenter(hash_lock)) { 4655fa94a07fSbrendan /* 4656fa94a07fSbrendan * Skip this buffer rather than waiting. 4657fa94a07fSbrendan */ 4658fa94a07fSbrendan continue; 4659fa94a07fSbrendan } 4660fa94a07fSbrendan 4661*7adb730bSGeorge Wilson passed_sz += hdr->b_size; 4662fa94a07fSbrendan if (passed_sz > headroom) { 4663fa94a07fSbrendan /* 4664fa94a07fSbrendan * Searched too far. 4665fa94a07fSbrendan */ 4666fa94a07fSbrendan mutex_exit(hash_lock); 4667fa94a07fSbrendan break; 4668fa94a07fSbrendan } 4669fa94a07fSbrendan 4670*7adb730bSGeorge Wilson if (!l2arc_write_eligible(guid, hdr)) { 4671fa94a07fSbrendan mutex_exit(hash_lock); 4672fa94a07fSbrendan continue; 4673fa94a07fSbrendan } 4674fa94a07fSbrendan 4675*7adb730bSGeorge Wilson if ((write_sz + hdr->b_size) > target_sz) { 4676fa94a07fSbrendan full = B_TRUE; 4677fa94a07fSbrendan mutex_exit(hash_lock); 4678fa94a07fSbrendan break; 4679fa94a07fSbrendan } 4680fa94a07fSbrendan 4681fa94a07fSbrendan if (pio == NULL) { 4682fa94a07fSbrendan /* 4683fa94a07fSbrendan * Insert a dummy header on the buflist so 4684fa94a07fSbrendan * l2arc_write_done() can find where the 4685fa94a07fSbrendan * write buffers begin without searching. 4686fa94a07fSbrendan */ 4687fa94a07fSbrendan list_insert_head(dev->l2ad_buflist, head); 4688fa94a07fSbrendan 4689fa94a07fSbrendan cb = kmem_alloc( 4690fa94a07fSbrendan sizeof (l2arc_write_callback_t), KM_SLEEP); 4691fa94a07fSbrendan cb->l2wcb_dev = dev; 4692fa94a07fSbrendan cb->l2wcb_head = head; 4693fa94a07fSbrendan pio = zio_root(spa, l2arc_write_done, cb, 4694fa94a07fSbrendan ZIO_FLAG_CANFAIL); 4695fa94a07fSbrendan } 4696fa94a07fSbrendan 4697fa94a07fSbrendan /* 4698fa94a07fSbrendan * Create and add a new L2ARC header. 4699fa94a07fSbrendan */ 4700aad02571SSaso Kiselkov l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4701aad02571SSaso Kiselkov l2hdr->b_dev = dev; 4702*7adb730bSGeorge Wilson hdr->b_flags |= ARC_FLAG_L2_WRITING; 4703aad02571SSaso Kiselkov 4704aad02571SSaso Kiselkov /* 4705aad02571SSaso Kiselkov * Temporarily stash the data buffer in b_tmp_cdata. 4706aad02571SSaso Kiselkov * The subsequent write step will pick it up from 4707*7adb730bSGeorge Wilson * there. This is because can't access hdr->b_buf 4708aad02571SSaso Kiselkov * without holding the hash_lock, which we in turn 4709aad02571SSaso Kiselkov * can't access without holding the ARC list locks 4710aad02571SSaso Kiselkov * (which we want to avoid during compression/writing). 4711aad02571SSaso Kiselkov */ 4712aad02571SSaso Kiselkov l2hdr->b_compress = ZIO_COMPRESS_OFF; 4713*7adb730bSGeorge Wilson l2hdr->b_asize = hdr->b_size; 4714*7adb730bSGeorge Wilson l2hdr->b_tmp_cdata = hdr->b_buf->b_data; 4715aad02571SSaso Kiselkov 4716*7adb730bSGeorge Wilson buf_sz = hdr->b_size; 4717*7adb730bSGeorge Wilson hdr->b_l2hdr = l2hdr; 4718aad02571SSaso Kiselkov 4719*7adb730bSGeorge Wilson list_insert_head(dev->l2ad_buflist, hdr); 4720fa94a07fSbrendan 4721fa94a07fSbrendan /* 4722fa94a07fSbrendan * Compute and store the buffer cksum before 4723fa94a07fSbrendan * writing. On debug the cksum is verified first. 4724fa94a07fSbrendan */ 4725*7adb730bSGeorge Wilson arc_cksum_verify(hdr->b_buf); 4726*7adb730bSGeorge Wilson arc_cksum_compute(hdr->b_buf, B_TRUE); 4727fa94a07fSbrendan 4728fa94a07fSbrendan mutex_exit(hash_lock); 4729fa94a07fSbrendan 4730aad02571SSaso Kiselkov write_sz += buf_sz; 4731aad02571SSaso Kiselkov } 4732aad02571SSaso Kiselkov 4733aad02571SSaso Kiselkov mutex_exit(list_lock); 4734aad02571SSaso Kiselkov 4735aad02571SSaso Kiselkov if (full == B_TRUE) 4736aad02571SSaso Kiselkov break; 4737aad02571SSaso Kiselkov } 4738aad02571SSaso Kiselkov 4739aad02571SSaso Kiselkov /* No buffers selected for writing? */ 4740aad02571SSaso Kiselkov if (pio == NULL) { 4741aad02571SSaso Kiselkov ASSERT0(write_sz); 4742aad02571SSaso Kiselkov mutex_exit(&l2arc_buflist_mtx); 4743aad02571SSaso Kiselkov kmem_cache_free(hdr_cache, head); 4744aad02571SSaso Kiselkov return (0); 4745aad02571SSaso Kiselkov } 4746aad02571SSaso Kiselkov 4747aad02571SSaso Kiselkov /* 4748aad02571SSaso Kiselkov * Now start writing the buffers. We're starting at the write head 4749aad02571SSaso Kiselkov * and work backwards, retracing the course of the buffer selector 4750aad02571SSaso Kiselkov * loop above. 4751aad02571SSaso Kiselkov */ 4752*7adb730bSGeorge Wilson for (hdr = list_prev(dev->l2ad_buflist, head); hdr; 4753*7adb730bSGeorge Wilson hdr = list_prev(dev->l2ad_buflist, hdr)) { 4754aad02571SSaso Kiselkov l2arc_buf_hdr_t *l2hdr; 4755aad02571SSaso Kiselkov uint64_t buf_sz; 4756aad02571SSaso Kiselkov 4757aad02571SSaso Kiselkov /* 4758aad02571SSaso Kiselkov * We shouldn't need to lock the buffer here, since we flagged 4759*7adb730bSGeorge Wilson * it as ARC_FLAG_L2_WRITING in the previous step, but we must 4760*7adb730bSGeorge Wilson * take care to only access its L2 cache parameters. In 4761*7adb730bSGeorge Wilson * particular, hdr->b_buf may be invalid by now due to 4762*7adb730bSGeorge Wilson * ARC eviction. 4763aad02571SSaso Kiselkov */ 4764*7adb730bSGeorge Wilson l2hdr = hdr->b_l2hdr; 4765aad02571SSaso Kiselkov l2hdr->b_daddr = dev->l2ad_hand; 4766aad02571SSaso Kiselkov 4767*7adb730bSGeorge Wilson if ((hdr->b_flags & ARC_FLAG_L2COMPRESS) && 4768aad02571SSaso Kiselkov l2hdr->b_asize >= buf_compress_minsz) { 4769aad02571SSaso Kiselkov if (l2arc_compress_buf(l2hdr)) { 4770aad02571SSaso Kiselkov /* 4771aad02571SSaso Kiselkov * If compression succeeded, enable headroom 4772aad02571SSaso Kiselkov * boost on the next scan cycle. 4773aad02571SSaso Kiselkov */ 4774aad02571SSaso Kiselkov *headroom_boost = B_TRUE; 4775aad02571SSaso Kiselkov } 4776aad02571SSaso Kiselkov } 4777aad02571SSaso Kiselkov 4778aad02571SSaso Kiselkov /* 4779aad02571SSaso Kiselkov * Pick up the buffer data we had previously stashed away 4780aad02571SSaso Kiselkov * (and now potentially also compressed). 4781aad02571SSaso Kiselkov */ 4782aad02571SSaso Kiselkov buf_data = l2hdr->b_tmp_cdata; 4783aad02571SSaso Kiselkov buf_sz = l2hdr->b_asize; 4784aad02571SSaso Kiselkov 4785aad02571SSaso Kiselkov /* Compression may have squashed the buffer to zero length. */ 4786aad02571SSaso Kiselkov if (buf_sz != 0) { 4787aad02571SSaso Kiselkov uint64_t buf_p_sz; 4788aad02571SSaso Kiselkov 4789fa94a07fSbrendan wzio = zio_write_phys(pio, dev->l2ad_vdev, 4790fa94a07fSbrendan dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4791fa94a07fSbrendan NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4792fa94a07fSbrendan ZIO_FLAG_CANFAIL, B_FALSE); 4793fa94a07fSbrendan 4794fa94a07fSbrendan DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4795fa94a07fSbrendan zio_t *, wzio); 4796fa94a07fSbrendan (void) zio_nowait(wzio); 4797fa94a07fSbrendan 4798aad02571SSaso Kiselkov write_asize += buf_sz; 4799e14bb325SJeff Bonwick /* 4800e14bb325SJeff Bonwick * Keep the clock hand suitably device-aligned. 4801e14bb325SJeff Bonwick */ 4802aad02571SSaso Kiselkov buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4803aad02571SSaso Kiselkov write_psize += buf_p_sz; 4804aad02571SSaso Kiselkov dev->l2ad_hand += buf_p_sz; 4805fa94a07fSbrendan } 4806fa94a07fSbrendan } 4807fa94a07fSbrendan 4808aad02571SSaso Kiselkov mutex_exit(&l2arc_buflist_mtx); 4809fa94a07fSbrendan 4810aad02571SSaso Kiselkov ASSERT3U(write_asize, <=, target_sz); 4811fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_writes_sent); 4812aad02571SSaso Kiselkov ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize); 4813fa94a07fSbrendan ARCSTAT_INCR(arcstat_l2_size, write_sz); 4814aad02571SSaso Kiselkov ARCSTAT_INCR(arcstat_l2_asize, write_asize); 48153038a2b4SSaso Kiselkov vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0); 4816fa94a07fSbrendan 4817fa94a07fSbrendan /* 4818fa94a07fSbrendan * Bump device hand to the device start if it is approaching the end. 4819fa94a07fSbrendan * l2arc_evict() will already have evicted ahead for this case. 4820fa94a07fSbrendan */ 48213a737e0dSbrendan if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4822fa94a07fSbrendan dev->l2ad_hand = dev->l2ad_start; 4823fa94a07fSbrendan dev->l2ad_evict = dev->l2ad_start; 4824fa94a07fSbrendan dev->l2ad_first = B_FALSE; 4825fa94a07fSbrendan } 4826fa94a07fSbrendan 48275a98e54bSBrendan Gregg - Sun Microsystems dev->l2ad_writing = B_TRUE; 4828fa94a07fSbrendan (void) zio_wait(pio); 48295a98e54bSBrendan Gregg - Sun Microsystems dev->l2ad_writing = B_FALSE; 48305a98e54bSBrendan Gregg - Sun Microsystems 4831aad02571SSaso Kiselkov return (write_asize); 4832aad02571SSaso Kiselkov } 4833aad02571SSaso Kiselkov 4834aad02571SSaso Kiselkov /* 4835aad02571SSaso Kiselkov * Compresses an L2ARC buffer. 4836aad02571SSaso Kiselkov * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its 4837aad02571SSaso Kiselkov * size in l2hdr->b_asize. This routine tries to compress the data and 4838aad02571SSaso Kiselkov * depending on the compression result there are three possible outcomes: 4839aad02571SSaso Kiselkov * *) The buffer was incompressible. The original l2hdr contents were left 4840aad02571SSaso Kiselkov * untouched and are ready for writing to an L2 device. 4841aad02571SSaso Kiselkov * *) The buffer was all-zeros, so there is no need to write it to an L2 4842aad02571SSaso Kiselkov * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is 4843aad02571SSaso Kiselkov * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY. 4844aad02571SSaso Kiselkov * *) Compression succeeded and b_tmp_cdata was replaced with a temporary 4845aad02571SSaso Kiselkov * data buffer which holds the compressed data to be written, and b_asize 4846aad02571SSaso Kiselkov * tells us how much data there is. b_compress is set to the appropriate 4847aad02571SSaso Kiselkov * compression algorithm. Once writing is done, invoke 4848aad02571SSaso Kiselkov * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer. 4849aad02571SSaso Kiselkov * 4850aad02571SSaso Kiselkov * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the 4851aad02571SSaso Kiselkov * buffer was incompressible). 4852aad02571SSaso Kiselkov */ 4853aad02571SSaso Kiselkov static boolean_t 4854aad02571SSaso Kiselkov l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr) 4855aad02571SSaso Kiselkov { 4856aad02571SSaso Kiselkov void *cdata; 48575d7b4d43SMatthew Ahrens size_t csize, len, rounded; 4858aad02571SSaso Kiselkov 4859aad02571SSaso Kiselkov ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF); 4860aad02571SSaso Kiselkov ASSERT(l2hdr->b_tmp_cdata != NULL); 4861aad02571SSaso Kiselkov 4862aad02571SSaso Kiselkov len = l2hdr->b_asize; 4863aad02571SSaso Kiselkov cdata = zio_data_buf_alloc(len); 4864aad02571SSaso Kiselkov csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata, 4865aad02571SSaso Kiselkov cdata, l2hdr->b_asize); 4866aad02571SSaso Kiselkov 48675d7b4d43SMatthew Ahrens rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE); 48685d7b4d43SMatthew Ahrens if (rounded > csize) { 48695d7b4d43SMatthew Ahrens bzero((char *)cdata + csize, rounded - csize); 48705d7b4d43SMatthew Ahrens csize = rounded; 48715d7b4d43SMatthew Ahrens } 48725d7b4d43SMatthew Ahrens 4873aad02571SSaso Kiselkov if (csize == 0) { 4874aad02571SSaso Kiselkov /* zero block, indicate that there's nothing to write */ 4875aad02571SSaso Kiselkov zio_data_buf_free(cdata, len); 4876aad02571SSaso Kiselkov l2hdr->b_compress = ZIO_COMPRESS_EMPTY; 4877aad02571SSaso Kiselkov l2hdr->b_asize = 0; 4878aad02571SSaso Kiselkov l2hdr->b_tmp_cdata = NULL; 4879aad02571SSaso Kiselkov ARCSTAT_BUMP(arcstat_l2_compress_zeros); 4880aad02571SSaso Kiselkov return (B_TRUE); 4881aad02571SSaso Kiselkov } else if (csize > 0 && csize < len) { 4882aad02571SSaso Kiselkov /* 4883aad02571SSaso Kiselkov * Compression succeeded, we'll keep the cdata around for 4884aad02571SSaso Kiselkov * writing and release it afterwards. 4885aad02571SSaso Kiselkov */ 4886aad02571SSaso Kiselkov l2hdr->b_compress = ZIO_COMPRESS_LZ4; 4887aad02571SSaso Kiselkov l2hdr->b_asize = csize; 4888aad02571SSaso Kiselkov l2hdr->b_tmp_cdata = cdata; 4889aad02571SSaso Kiselkov ARCSTAT_BUMP(arcstat_l2_compress_successes); 4890aad02571SSaso Kiselkov return (B_TRUE); 4891aad02571SSaso Kiselkov } else { 4892aad02571SSaso Kiselkov /* 4893aad02571SSaso Kiselkov * Compression failed, release the compressed buffer. 4894aad02571SSaso Kiselkov * l2hdr will be left unmodified. 4895aad02571SSaso Kiselkov */ 4896aad02571SSaso Kiselkov zio_data_buf_free(cdata, len); 4897aad02571SSaso Kiselkov ARCSTAT_BUMP(arcstat_l2_compress_failures); 4898aad02571SSaso Kiselkov return (B_FALSE); 4899aad02571SSaso Kiselkov } 4900aad02571SSaso Kiselkov } 4901aad02571SSaso Kiselkov 4902aad02571SSaso Kiselkov /* 4903aad02571SSaso Kiselkov * Decompresses a zio read back from an l2arc device. On success, the 4904aad02571SSaso Kiselkov * underlying zio's io_data buffer is overwritten by the uncompressed 4905aad02571SSaso Kiselkov * version. On decompression error (corrupt compressed stream), the 4906aad02571SSaso Kiselkov * zio->io_error value is set to signal an I/O error. 4907aad02571SSaso Kiselkov * 4908aad02571SSaso Kiselkov * Please note that the compressed data stream is not checksummed, so 4909aad02571SSaso Kiselkov * if the underlying device is experiencing data corruption, we may feed 4910aad02571SSaso Kiselkov * corrupt data to the decompressor, so the decompressor needs to be 4911aad02571SSaso Kiselkov * able to handle this situation (LZ4 does). 4912aad02571SSaso Kiselkov */ 4913aad02571SSaso Kiselkov static void 4914aad02571SSaso Kiselkov l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c) 4915aad02571SSaso Kiselkov { 4916aad02571SSaso Kiselkov ASSERT(L2ARC_IS_VALID_COMPRESS(c)); 4917aad02571SSaso Kiselkov 4918aad02571SSaso Kiselkov if (zio->io_error != 0) { 4919aad02571SSaso Kiselkov /* 4920aad02571SSaso Kiselkov * An io error has occured, just restore the original io 4921aad02571SSaso Kiselkov * size in preparation for a main pool read. 4922aad02571SSaso Kiselkov */ 4923aad02571SSaso Kiselkov zio->io_orig_size = zio->io_size = hdr->b_size; 4924aad02571SSaso Kiselkov return; 4925aad02571SSaso Kiselkov } 4926aad02571SSaso Kiselkov 4927aad02571SSaso Kiselkov if (c == ZIO_COMPRESS_EMPTY) { 4928aad02571SSaso Kiselkov /* 4929aad02571SSaso Kiselkov * An empty buffer results in a null zio, which means we 4930aad02571SSaso Kiselkov * need to fill its io_data after we're done restoring the 4931aad02571SSaso Kiselkov * buffer's contents. 4932aad02571SSaso Kiselkov */ 4933aad02571SSaso Kiselkov ASSERT(hdr->b_buf != NULL); 4934aad02571SSaso Kiselkov bzero(hdr->b_buf->b_data, hdr->b_size); 4935aad02571SSaso Kiselkov zio->io_data = zio->io_orig_data = hdr->b_buf->b_data; 4936aad02571SSaso Kiselkov } else { 4937aad02571SSaso Kiselkov ASSERT(zio->io_data != NULL); 4938aad02571SSaso Kiselkov /* 4939aad02571SSaso Kiselkov * We copy the compressed data from the start of the arc buffer 4940aad02571SSaso Kiselkov * (the zio_read will have pulled in only what we need, the 4941aad02571SSaso Kiselkov * rest is garbage which we will overwrite at decompression) 4942aad02571SSaso Kiselkov * and then decompress back to the ARC data buffer. This way we 4943aad02571SSaso Kiselkov * can minimize copying by simply decompressing back over the 4944aad02571SSaso Kiselkov * original compressed data (rather than decompressing to an 4945aad02571SSaso Kiselkov * aux buffer and then copying back the uncompressed buffer, 4946aad02571SSaso Kiselkov * which is likely to be much larger). 4947aad02571SSaso Kiselkov */ 4948aad02571SSaso Kiselkov uint64_t csize; 4949aad02571SSaso Kiselkov void *cdata; 4950aad02571SSaso Kiselkov 4951aad02571SSaso Kiselkov csize = zio->io_size; 4952aad02571SSaso Kiselkov cdata = zio_data_buf_alloc(csize); 4953aad02571SSaso Kiselkov bcopy(zio->io_data, cdata, csize); 4954aad02571SSaso Kiselkov if (zio_decompress_data(c, cdata, zio->io_data, csize, 4955aad02571SSaso Kiselkov hdr->b_size) != 0) 4956aad02571SSaso Kiselkov zio->io_error = EIO; 4957aad02571SSaso Kiselkov zio_data_buf_free(cdata, csize); 4958aad02571SSaso Kiselkov } 4959aad02571SSaso Kiselkov 4960aad02571SSaso Kiselkov /* Restore the expected uncompressed IO size. */ 4961aad02571SSaso Kiselkov zio->io_orig_size = zio->io_size = hdr->b_size; 4962aad02571SSaso Kiselkov } 4963aad02571SSaso Kiselkov 4964aad02571SSaso Kiselkov /* 4965aad02571SSaso Kiselkov * Releases the temporary b_tmp_cdata buffer in an l2arc header structure. 4966aad02571SSaso Kiselkov * This buffer serves as a temporary holder of compressed data while 4967aad02571SSaso Kiselkov * the buffer entry is being written to an l2arc device. Once that is 4968aad02571SSaso Kiselkov * done, we can dispose of it. 4969aad02571SSaso Kiselkov */ 4970aad02571SSaso Kiselkov static void 4971*7adb730bSGeorge Wilson l2arc_release_cdata_buf(arc_buf_hdr_t *hdr) 4972aad02571SSaso Kiselkov { 4973*7adb730bSGeorge Wilson l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 4974aad02571SSaso Kiselkov 4975aad02571SSaso Kiselkov if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) { 4976aad02571SSaso Kiselkov /* 4977aad02571SSaso Kiselkov * If the data was compressed, then we've allocated a 4978aad02571SSaso Kiselkov * temporary buffer for it, so now we need to release it. 4979aad02571SSaso Kiselkov */ 4980aad02571SSaso Kiselkov ASSERT(l2hdr->b_tmp_cdata != NULL); 4981*7adb730bSGeorge Wilson zio_data_buf_free(l2hdr->b_tmp_cdata, hdr->b_size); 4982aad02571SSaso Kiselkov } 4983aad02571SSaso Kiselkov l2hdr->b_tmp_cdata = NULL; 4984fa94a07fSbrendan } 4985fa94a07fSbrendan 4986fa94a07fSbrendan /* 4987fa94a07fSbrendan * This thread feeds the L2ARC at regular intervals. This is the beating 4988fa94a07fSbrendan * heart of the L2ARC. 4989fa94a07fSbrendan */ 4990fa94a07fSbrendan static void 4991fa94a07fSbrendan l2arc_feed_thread(void) 4992fa94a07fSbrendan { 4993fa94a07fSbrendan callb_cpr_t cpr; 4994fa94a07fSbrendan l2arc_dev_t *dev; 4995fa94a07fSbrendan spa_t *spa; 49965a98e54bSBrendan Gregg - Sun Microsystems uint64_t size, wrote; 4997d3d50737SRafael Vanoni clock_t begin, next = ddi_get_lbolt(); 4998aad02571SSaso Kiselkov boolean_t headroom_boost = B_FALSE; 4999fa94a07fSbrendan 5000fa94a07fSbrendan CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 5001fa94a07fSbrendan 5002fa94a07fSbrendan mutex_enter(&l2arc_feed_thr_lock); 5003fa94a07fSbrendan 5004fa94a07fSbrendan while (l2arc_thread_exit == 0) { 5005fa94a07fSbrendan CALLB_CPR_SAFE_BEGIN(&cpr); 5006fa94a07fSbrendan (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 50075a98e54bSBrendan Gregg - Sun Microsystems next); 5008fa94a07fSbrendan CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 5009d3d50737SRafael Vanoni next = ddi_get_lbolt() + hz; 5010fa94a07fSbrendan 50113a737e0dSbrendan /* 50123a737e0dSbrendan * Quick check for L2ARC devices. 50133a737e0dSbrendan */ 5014c5904d13Seschrock mutex_enter(&l2arc_dev_mtx); 50153a737e0dSbrendan if (l2arc_ndev == 0) { 50163a737e0dSbrendan mutex_exit(&l2arc_dev_mtx); 50173a737e0dSbrendan continue; 50183a737e0dSbrendan } 50193a737e0dSbrendan mutex_exit(&l2arc_dev_mtx); 5020d3d50737SRafael Vanoni begin = ddi_get_lbolt(); 5021c5904d13Seschrock 5022fa94a07fSbrendan /* 5023c5904d13Seschrock * This selects the next l2arc device to write to, and in 5024c5904d13Seschrock * doing so the next spa to feed from: dev->l2ad_spa. This 50253a737e0dSbrendan * will return NULL if there are now no l2arc devices or if 50263a737e0dSbrendan * they are all faulted. 50273a737e0dSbrendan * 50283a737e0dSbrendan * If a device is returned, its spa's config lock is also 50293a737e0dSbrendan * held to prevent device removal. l2arc_dev_get_next() 50303a737e0dSbrendan * will grab and release l2arc_dev_mtx. 5031fa94a07fSbrendan */ 50323a737e0dSbrendan if ((dev = l2arc_dev_get_next()) == NULL) 5033fa94a07fSbrendan continue; 50343a737e0dSbrendan 50353a737e0dSbrendan spa = dev->l2ad_spa; 50363a737e0dSbrendan ASSERT(spa != NULL); 5037fa94a07fSbrendan 5038f9af39baSGeorge Wilson /* 5039f9af39baSGeorge Wilson * If the pool is read-only then force the feed thread to 5040f9af39baSGeorge Wilson * sleep a little longer. 5041f9af39baSGeorge Wilson */ 5042f9af39baSGeorge Wilson if (!spa_writeable(spa)) { 5043f9af39baSGeorge Wilson next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 5044f9af39baSGeorge Wilson spa_config_exit(spa, SCL_L2ARC, dev); 5045f9af39baSGeorge Wilson continue; 5046f9af39baSGeorge Wilson } 5047f9af39baSGeorge Wilson 5048fa94a07fSbrendan /* 5049fa94a07fSbrendan * Avoid contributing to memory pressure. 5050fa94a07fSbrendan */ 5051fa94a07fSbrendan if (arc_reclaim_needed()) { 5052fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 5053e14bb325SJeff Bonwick spa_config_exit(spa, SCL_L2ARC, dev); 5054fa94a07fSbrendan continue; 5055fa94a07fSbrendan } 5056fa94a07fSbrendan 5057fa94a07fSbrendan ARCSTAT_BUMP(arcstat_l2_feeds); 5058fa94a07fSbrendan 5059aad02571SSaso Kiselkov size = l2arc_write_size(); 50603a737e0dSbrendan 5061fa94a07fSbrendan /* 5062fa94a07fSbrendan * Evict L2ARC buffers that will be overwritten. 5063fa94a07fSbrendan */ 50643a737e0dSbrendan l2arc_evict(dev, size, B_FALSE); 5065fa94a07fSbrendan 5066fa94a07fSbrendan /* 5067fa94a07fSbrendan * Write ARC buffers. 5068fa94a07fSbrendan */ 5069aad02571SSaso Kiselkov wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost); 50705a98e54bSBrendan Gregg - Sun Microsystems 50715a98e54bSBrendan Gregg - Sun Microsystems /* 50725a98e54bSBrendan Gregg - Sun Microsystems * Calculate interval between writes. 50735a98e54bSBrendan Gregg - Sun Microsystems */ 50745a98e54bSBrendan Gregg - Sun Microsystems next = l2arc_write_interval(begin, size, wrote); 5075e14bb325SJeff Bonwick spa_config_exit(spa, SCL_L2ARC, dev); 5076fa94a07fSbrendan } 5077fa94a07fSbrendan 5078fa94a07fSbrendan l2arc_thread_exit = 0; 5079fa94a07fSbrendan cv_broadcast(&l2arc_feed_thr_cv); 5080fa94a07fSbrendan CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 5081fa94a07fSbrendan thread_exit(); 5082fa94a07fSbrendan } 5083fa94a07fSbrendan 5084c5904d13Seschrock boolean_t 5085c5904d13Seschrock l2arc_vdev_present(vdev_t *vd) 5086c5904d13Seschrock { 5087c5904d13Seschrock l2arc_dev_t *dev; 5088c5904d13Seschrock 5089c5904d13Seschrock mutex_enter(&l2arc_dev_mtx); 5090c5904d13Seschrock for (dev = list_head(l2arc_dev_list); dev != NULL; 5091c5904d13Seschrock dev = list_next(l2arc_dev_list, dev)) { 5092c5904d13Seschrock if (dev->l2ad_vdev == vd) 5093c5904d13Seschrock break; 5094c5904d13Seschrock } 5095c5904d13Seschrock mutex_exit(&l2arc_dev_mtx); 5096c5904d13Seschrock 5097c5904d13Seschrock return (dev != NULL); 5098c5904d13Seschrock } 5099c5904d13Seschrock 5100fa94a07fSbrendan /* 5101fa94a07fSbrendan * Add a vdev for use by the L2ARC. By this point the spa has already 5102fa94a07fSbrendan * validated the vdev and opened it. 5103fa94a07fSbrendan */ 5104fa94a07fSbrendan void 5105573ca77eSGeorge Wilson l2arc_add_vdev(spa_t *spa, vdev_t *vd) 5106fa94a07fSbrendan { 5107fa94a07fSbrendan l2arc_dev_t *adddev; 5108fa94a07fSbrendan 5109c5904d13Seschrock ASSERT(!l2arc_vdev_present(vd)); 5110c5904d13Seschrock 5111fa94a07fSbrendan /* 5112fa94a07fSbrendan * Create a new l2arc device entry. 5113fa94a07fSbrendan */ 5114fa94a07fSbrendan adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 5115fa94a07fSbrendan adddev->l2ad_spa = spa; 5116fa94a07fSbrendan adddev->l2ad_vdev = vd; 5117573ca77eSGeorge Wilson adddev->l2ad_start = VDEV_LABEL_START_SIZE; 5118573ca77eSGeorge Wilson adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 5119fa94a07fSbrendan adddev->l2ad_hand = adddev->l2ad_start; 5120fa94a07fSbrendan adddev->l2ad_evict = adddev->l2ad_start; 5121fa94a07fSbrendan adddev->l2ad_first = B_TRUE; 51225a98e54bSBrendan Gregg - Sun Microsystems adddev->l2ad_writing = B_FALSE; 5123fa94a07fSbrendan 5124fa94a07fSbrendan /* 5125fa94a07fSbrendan * This is a list of all ARC buffers that are still valid on the 5126fa94a07fSbrendan * device. 5127fa94a07fSbrendan */ 5128fa94a07fSbrendan adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 5129fa94a07fSbrendan list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 5130fa94a07fSbrendan offsetof(arc_buf_hdr_t, b_l2node)); 5131fa94a07fSbrendan 5132b24ab676SJeff Bonwick vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 5133fa94a07fSbrendan 5134fa94a07fSbrendan /* 5135fa94a07fSbrendan * Add device to global list 5136fa94a07fSbrendan */ 5137fa94a07fSbrendan mutex_enter(&l2arc_dev_mtx); 5138fa94a07fSbrendan list_insert_head(l2arc_dev_list, adddev); 5139fa94a07fSbrendan atomic_inc_64(&l2arc_ndev); 5140fa94a07fSbrendan mutex_exit(&l2arc_dev_mtx); 5141fa94a07fSbrendan } 5142fa94a07fSbrendan 5143fa94a07fSbrendan /* 5144fa94a07fSbrendan * Remove a vdev from the L2ARC. 5145fa94a07fSbrendan */ 5146fa94a07fSbrendan void 5147fa94a07fSbrendan l2arc_remove_vdev(vdev_t *vd) 5148fa94a07fSbrendan { 5149fa94a07fSbrendan l2arc_dev_t *dev, *nextdev, *remdev = NULL; 5150fa94a07fSbrendan 5151fa94a07fSbrendan /* 5152fa94a07fSbrendan * Find the device by vdev 5153fa94a07fSbrendan */ 5154fa94a07fSbrendan mutex_enter(&l2arc_dev_mtx); 5155fa94a07fSbrendan for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 5156fa94a07fSbrendan nextdev = list_next(l2arc_dev_list, dev); 5157fa94a07fSbrendan if (vd == dev->l2ad_vdev) { 5158fa94a07fSbrendan remdev = dev; 5159fa94a07fSbrendan break; 5160fa94a07fSbrendan } 5161fa94a07fSbrendan } 5162fa94a07fSbrendan ASSERT(remdev != NULL); 5163fa94a07fSbrendan 5164fa94a07fSbrendan /* 5165fa94a07fSbrendan * Remove device from global list 5166fa94a07fSbrendan */ 5167fa94a07fSbrendan list_remove(l2arc_dev_list, remdev); 5168fa94a07fSbrendan l2arc_dev_last = NULL; /* may have been invalidated */ 51693a737e0dSbrendan atomic_dec_64(&l2arc_ndev); 51703a737e0dSbrendan mutex_exit(&l2arc_dev_mtx); 5171fa94a07fSbrendan 5172fa94a07fSbrendan /* 5173fa94a07fSbrendan * Clear all buflists and ARC references. L2ARC device flush. 5174fa94a07fSbrendan */ 5175fa94a07fSbrendan l2arc_evict(remdev, 0, B_TRUE); 5176fa94a07fSbrendan list_destroy(remdev->l2ad_buflist); 5177fa94a07fSbrendan kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 5178fa94a07fSbrendan kmem_free(remdev, sizeof (l2arc_dev_t)); 5179fa94a07fSbrendan } 5180fa94a07fSbrendan 5181fa94a07fSbrendan void 5182e14bb325SJeff Bonwick l2arc_init(void) 5183fa94a07fSbrendan { 5184fa94a07fSbrendan l2arc_thread_exit = 0; 5185fa94a07fSbrendan l2arc_ndev = 0; 5186fa94a07fSbrendan l2arc_writes_sent = 0; 5187fa94a07fSbrendan l2arc_writes_done = 0; 5188fa94a07fSbrendan 5189fa94a07fSbrendan mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 5190fa94a07fSbrendan cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 5191fa94a07fSbrendan mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 5192fa94a07fSbrendan mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 5193fa94a07fSbrendan mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 5194fa94a07fSbrendan 5195fa94a07fSbrendan l2arc_dev_list = &L2ARC_dev_list; 5196fa94a07fSbrendan l2arc_free_on_write = &L2ARC_free_on_write; 5197fa94a07fSbrendan list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 5198fa94a07fSbrendan offsetof(l2arc_dev_t, l2ad_node)); 5199fa94a07fSbrendan list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 5200fa94a07fSbrendan offsetof(l2arc_data_free_t, l2df_list_node)); 5201fa94a07fSbrendan } 5202fa94a07fSbrendan 5203fa94a07fSbrendan void 5204e14bb325SJeff Bonwick l2arc_fini(void) 5205fa94a07fSbrendan { 52063a737e0dSbrendan /* 52073a737e0dSbrendan * This is called from dmu_fini(), which is called from spa_fini(); 52083a737e0dSbrendan * Because of this, we can assume that all l2arc devices have 52093a737e0dSbrendan * already been removed when the pools themselves were removed. 52103a737e0dSbrendan */ 52113a737e0dSbrendan 52123a737e0dSbrendan l2arc_do_free_on_write(); 52133a737e0dSbrendan 5214fa94a07fSbrendan mutex_destroy(&l2arc_feed_thr_lock); 5215fa94a07fSbrendan cv_destroy(&l2arc_feed_thr_cv); 5216fa94a07fSbrendan mutex_destroy(&l2arc_dev_mtx); 5217fa94a07fSbrendan mutex_destroy(&l2arc_buflist_mtx); 5218fa94a07fSbrendan mutex_destroy(&l2arc_free_on_write_mtx); 5219fa94a07fSbrendan 5220fa94a07fSbrendan list_destroy(l2arc_dev_list); 5221fa94a07fSbrendan list_destroy(l2arc_free_on_write); 5222fa94a07fSbrendan } 5223e14bb325SJeff Bonwick 5224e14bb325SJeff Bonwick void 5225e14bb325SJeff Bonwick l2arc_start(void) 5226e14bb325SJeff Bonwick { 52278ad4d6ddSJeff Bonwick if (!(spa_mode_global & FWRITE)) 5228e14bb325SJeff Bonwick return; 5229e14bb325SJeff Bonwick 5230e14bb325SJeff Bonwick (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 5231e14bb325SJeff Bonwick TS_RUN, minclsyspri); 5232e14bb325SJeff Bonwick } 5233e14bb325SJeff Bonwick 5234e14bb325SJeff Bonwick void 5235e14bb325SJeff Bonwick l2arc_stop(void) 5236e14bb325SJeff Bonwick { 52378ad4d6ddSJeff Bonwick if (!(spa_mode_global & FWRITE)) 5238e14bb325SJeff Bonwick return; 5239e14bb325SJeff Bonwick 5240e14bb325SJeff Bonwick mutex_enter(&l2arc_feed_thr_lock); 5241e14bb325SJeff Bonwick cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 5242e14bb325SJeff Bonwick l2arc_thread_exit = 1; 5243e14bb325SJeff Bonwick while (l2arc_thread_exit != 0) 5244e14bb325SJeff Bonwick cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 5245e14bb325SJeff Bonwick mutex_exit(&l2arc_feed_thr_lock); 5246e14bb325SJeff Bonwick } 5247