1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012 by Delphix. All rights reserved. 25 */ 26 27 /* 28 * DVA-based Adjustable Replacement Cache 29 * 30 * While much of the theory of operation used here is 31 * based on the self-tuning, low overhead replacement cache 32 * presented by Megiddo and Modha at FAST 2003, there are some 33 * significant differences: 34 * 35 * 1. The Megiddo and Modha model assumes any page is evictable. 36 * Pages in its cache cannot be "locked" into memory. This makes 37 * the eviction algorithm simple: evict the last page in the list. 38 * This also make the performance characteristics easy to reason 39 * about. Our cache is not so simple. At any given moment, some 40 * subset of the blocks in the cache are un-evictable because we 41 * have handed out a reference to them. Blocks are only evictable 42 * when there are no external references active. This makes 43 * eviction far more problematic: we choose to evict the evictable 44 * blocks that are the "lowest" in the list. 45 * 46 * There are times when it is not possible to evict the requested 47 * space. In these circumstances we are unable to adjust the cache 48 * size. To prevent the cache growing unbounded at these times we 49 * implement a "cache throttle" that slows the flow of new data 50 * into the cache until we can make space available. 51 * 52 * 2. The Megiddo and Modha model assumes a fixed cache size. 53 * Pages are evicted when the cache is full and there is a cache 54 * miss. Our model has a variable sized cache. It grows with 55 * high use, but also tries to react to memory pressure from the 56 * operating system: decreasing its size when system memory is 57 * tight. 58 * 59 * 3. The Megiddo and Modha model assumes a fixed page size. All 60 * elements of the cache are therefor exactly the same size. So 61 * when adjusting the cache size following a cache miss, its simply 62 * a matter of choosing a single page to evict. In our model, we 63 * have variable sized cache blocks (rangeing from 512 bytes to 64 * 128K bytes). We therefor choose a set of blocks to evict to make 65 * space for a cache miss that approximates as closely as possible 66 * the space used by the new block. 67 * 68 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 69 * by N. Megiddo & D. Modha, FAST 2003 70 */ 71 72 /* 73 * The locking model: 74 * 75 * A new reference to a cache buffer can be obtained in two 76 * ways: 1) via a hash table lookup using the DVA as a key, 77 * or 2) via one of the ARC lists. The arc_read() interface 78 * uses method 1, while the internal arc algorithms for 79 * adjusting the cache use method 2. We therefor provide two 80 * types of locks: 1) the hash table lock array, and 2) the 81 * arc list locks. 82 * 83 * Buffers do not have their own mutexes, rather they rely on the 84 * hash table mutexes for the bulk of their protection (i.e. most 85 * fields in the arc_buf_hdr_t are protected by these mutexes). 86 * 87 * buf_hash_find() returns the appropriate mutex (held) when it 88 * locates the requested buffer in the hash table. It returns 89 * NULL for the mutex if the buffer was not in the table. 90 * 91 * buf_hash_remove() expects the appropriate hash mutex to be 92 * already held before it is invoked. 93 * 94 * Each arc state also has a mutex which is used to protect the 95 * buffer list associated with the state. When attempting to 96 * obtain a hash table lock while holding an arc list lock you 97 * must use: mutex_tryenter() to avoid deadlock. Also note that 98 * the active state mutex must be held before the ghost state mutex. 99 * 100 * Arc buffers may have an associated eviction callback function. 101 * This function will be invoked prior to removing the buffer (e.g. 102 * in arc_do_user_evicts()). Note however that the data associated 103 * with the buffer may be evicted prior to the callback. The callback 104 * must be made with *no locks held* (to prevent deadlock). Additionally, 105 * the users of callbacks must ensure that their private data is 106 * protected from simultaneous callbacks from arc_buf_evict() 107 * and arc_do_user_evicts(). 108 * 109 * Note that the majority of the performance stats are manipulated 110 * with atomic operations. 111 * 112 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 113 * 114 * - L2ARC buflist creation 115 * - L2ARC buflist eviction 116 * - L2ARC write completion, which walks L2ARC buflists 117 * - ARC header destruction, as it removes from L2ARC buflists 118 * - ARC header release, as it removes from L2ARC buflists 119 */ 120 121 #include <sys/spa.h> 122 #include <sys/zio.h> 123 #include <sys/zfs_context.h> 124 #include <sys/arc.h> 125 #include <sys/refcount.h> 126 #include <sys/vdev.h> 127 #include <sys/vdev_impl.h> 128 #ifdef _KERNEL 129 #include <sys/vmsystm.h> 130 #include <vm/anon.h> 131 #include <sys/fs/swapnode.h> 132 #include <sys/dnlc.h> 133 #endif 134 #include <sys/callb.h> 135 #include <sys/kstat.h> 136 #include <zfs_fletcher.h> 137 138 #ifndef _KERNEL 139 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 140 boolean_t arc_watch = B_FALSE; 141 int arc_procfd; 142 #endif 143 144 static kmutex_t arc_reclaim_thr_lock; 145 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 146 static uint8_t arc_thread_exit; 147 148 extern int zfs_write_limit_shift; 149 extern uint64_t zfs_write_limit_max; 150 extern kmutex_t zfs_write_limit_lock; 151 152 #define ARC_REDUCE_DNLC_PERCENT 3 153 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 154 155 typedef enum arc_reclaim_strategy { 156 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 157 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 158 } arc_reclaim_strategy_t; 159 160 /* number of seconds before growing cache again */ 161 static int arc_grow_retry = 60; 162 163 /* shift of arc_c for calculating both min and max arc_p */ 164 static int arc_p_min_shift = 4; 165 166 /* log2(fraction of arc to reclaim) */ 167 static int arc_shrink_shift = 5; 168 169 /* 170 * minimum lifespan of a prefetch block in clock ticks 171 * (initialized in arc_init()) 172 */ 173 static int arc_min_prefetch_lifespan; 174 175 static int arc_dead; 176 177 /* 178 * The arc has filled available memory and has now warmed up. 179 */ 180 static boolean_t arc_warm; 181 182 /* 183 * These tunables are for performance analysis. 184 */ 185 uint64_t zfs_arc_max; 186 uint64_t zfs_arc_min; 187 uint64_t zfs_arc_meta_limit = 0; 188 int zfs_arc_grow_retry = 0; 189 int zfs_arc_shrink_shift = 0; 190 int zfs_arc_p_min_shift = 0; 191 int zfs_disable_dup_eviction = 0; 192 193 /* 194 * Note that buffers can be in one of 6 states: 195 * ARC_anon - anonymous (discussed below) 196 * ARC_mru - recently used, currently cached 197 * ARC_mru_ghost - recentely used, no longer in cache 198 * ARC_mfu - frequently used, currently cached 199 * ARC_mfu_ghost - frequently used, no longer in cache 200 * ARC_l2c_only - exists in L2ARC but not other states 201 * When there are no active references to the buffer, they are 202 * are linked onto a list in one of these arc states. These are 203 * the only buffers that can be evicted or deleted. Within each 204 * state there are multiple lists, one for meta-data and one for 205 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 206 * etc.) is tracked separately so that it can be managed more 207 * explicitly: favored over data, limited explicitly. 208 * 209 * Anonymous buffers are buffers that are not associated with 210 * a DVA. These are buffers that hold dirty block copies 211 * before they are written to stable storage. By definition, 212 * they are "ref'd" and are considered part of arc_mru 213 * that cannot be freed. Generally, they will aquire a DVA 214 * as they are written and migrate onto the arc_mru list. 215 * 216 * The ARC_l2c_only state is for buffers that are in the second 217 * level ARC but no longer in any of the ARC_m* lists. The second 218 * level ARC itself may also contain buffers that are in any of 219 * the ARC_m* states - meaning that a buffer can exist in two 220 * places. The reason for the ARC_l2c_only state is to keep the 221 * buffer header in the hash table, so that reads that hit the 222 * second level ARC benefit from these fast lookups. 223 */ 224 225 typedef struct arc_state { 226 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 227 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 228 uint64_t arcs_size; /* total amount of data in this state */ 229 kmutex_t arcs_mtx; 230 } arc_state_t; 231 232 /* The 6 states: */ 233 static arc_state_t ARC_anon; 234 static arc_state_t ARC_mru; 235 static arc_state_t ARC_mru_ghost; 236 static arc_state_t ARC_mfu; 237 static arc_state_t ARC_mfu_ghost; 238 static arc_state_t ARC_l2c_only; 239 240 typedef struct arc_stats { 241 kstat_named_t arcstat_hits; 242 kstat_named_t arcstat_misses; 243 kstat_named_t arcstat_demand_data_hits; 244 kstat_named_t arcstat_demand_data_misses; 245 kstat_named_t arcstat_demand_metadata_hits; 246 kstat_named_t arcstat_demand_metadata_misses; 247 kstat_named_t arcstat_prefetch_data_hits; 248 kstat_named_t arcstat_prefetch_data_misses; 249 kstat_named_t arcstat_prefetch_metadata_hits; 250 kstat_named_t arcstat_prefetch_metadata_misses; 251 kstat_named_t arcstat_mru_hits; 252 kstat_named_t arcstat_mru_ghost_hits; 253 kstat_named_t arcstat_mfu_hits; 254 kstat_named_t arcstat_mfu_ghost_hits; 255 kstat_named_t arcstat_deleted; 256 kstat_named_t arcstat_recycle_miss; 257 kstat_named_t arcstat_mutex_miss; 258 kstat_named_t arcstat_evict_skip; 259 kstat_named_t arcstat_evict_l2_cached; 260 kstat_named_t arcstat_evict_l2_eligible; 261 kstat_named_t arcstat_evict_l2_ineligible; 262 kstat_named_t arcstat_hash_elements; 263 kstat_named_t arcstat_hash_elements_max; 264 kstat_named_t arcstat_hash_collisions; 265 kstat_named_t arcstat_hash_chains; 266 kstat_named_t arcstat_hash_chain_max; 267 kstat_named_t arcstat_p; 268 kstat_named_t arcstat_c; 269 kstat_named_t arcstat_c_min; 270 kstat_named_t arcstat_c_max; 271 kstat_named_t arcstat_size; 272 kstat_named_t arcstat_hdr_size; 273 kstat_named_t arcstat_data_size; 274 kstat_named_t arcstat_other_size; 275 kstat_named_t arcstat_l2_hits; 276 kstat_named_t arcstat_l2_misses; 277 kstat_named_t arcstat_l2_feeds; 278 kstat_named_t arcstat_l2_rw_clash; 279 kstat_named_t arcstat_l2_read_bytes; 280 kstat_named_t arcstat_l2_write_bytes; 281 kstat_named_t arcstat_l2_writes_sent; 282 kstat_named_t arcstat_l2_writes_done; 283 kstat_named_t arcstat_l2_writes_error; 284 kstat_named_t arcstat_l2_writes_hdr_miss; 285 kstat_named_t arcstat_l2_evict_lock_retry; 286 kstat_named_t arcstat_l2_evict_reading; 287 kstat_named_t arcstat_l2_free_on_write; 288 kstat_named_t arcstat_l2_abort_lowmem; 289 kstat_named_t arcstat_l2_cksum_bad; 290 kstat_named_t arcstat_l2_io_error; 291 kstat_named_t arcstat_l2_size; 292 kstat_named_t arcstat_l2_hdr_size; 293 kstat_named_t arcstat_memory_throttle_count; 294 kstat_named_t arcstat_duplicate_buffers; 295 kstat_named_t arcstat_duplicate_buffers_size; 296 kstat_named_t arcstat_duplicate_reads; 297 } arc_stats_t; 298 299 static arc_stats_t arc_stats = { 300 { "hits", KSTAT_DATA_UINT64 }, 301 { "misses", KSTAT_DATA_UINT64 }, 302 { "demand_data_hits", KSTAT_DATA_UINT64 }, 303 { "demand_data_misses", KSTAT_DATA_UINT64 }, 304 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 305 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 306 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 307 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 308 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 309 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 310 { "mru_hits", KSTAT_DATA_UINT64 }, 311 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 312 { "mfu_hits", KSTAT_DATA_UINT64 }, 313 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 314 { "deleted", KSTAT_DATA_UINT64 }, 315 { "recycle_miss", KSTAT_DATA_UINT64 }, 316 { "mutex_miss", KSTAT_DATA_UINT64 }, 317 { "evict_skip", KSTAT_DATA_UINT64 }, 318 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 319 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 320 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 321 { "hash_elements", KSTAT_DATA_UINT64 }, 322 { "hash_elements_max", KSTAT_DATA_UINT64 }, 323 { "hash_collisions", KSTAT_DATA_UINT64 }, 324 { "hash_chains", KSTAT_DATA_UINT64 }, 325 { "hash_chain_max", KSTAT_DATA_UINT64 }, 326 { "p", KSTAT_DATA_UINT64 }, 327 { "c", KSTAT_DATA_UINT64 }, 328 { "c_min", KSTAT_DATA_UINT64 }, 329 { "c_max", KSTAT_DATA_UINT64 }, 330 { "size", KSTAT_DATA_UINT64 }, 331 { "hdr_size", KSTAT_DATA_UINT64 }, 332 { "data_size", KSTAT_DATA_UINT64 }, 333 { "other_size", KSTAT_DATA_UINT64 }, 334 { "l2_hits", KSTAT_DATA_UINT64 }, 335 { "l2_misses", KSTAT_DATA_UINT64 }, 336 { "l2_feeds", KSTAT_DATA_UINT64 }, 337 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 338 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 339 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 340 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 341 { "l2_writes_done", KSTAT_DATA_UINT64 }, 342 { "l2_writes_error", KSTAT_DATA_UINT64 }, 343 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 344 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 345 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 346 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 347 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 348 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 349 { "l2_io_error", KSTAT_DATA_UINT64 }, 350 { "l2_size", KSTAT_DATA_UINT64 }, 351 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 352 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 353 { "duplicate_buffers", KSTAT_DATA_UINT64 }, 354 { "duplicate_buffers_size", KSTAT_DATA_UINT64 }, 355 { "duplicate_reads", KSTAT_DATA_UINT64 } 356 }; 357 358 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 359 360 #define ARCSTAT_INCR(stat, val) \ 361 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 362 363 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 364 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 365 366 #define ARCSTAT_MAX(stat, val) { \ 367 uint64_t m; \ 368 while ((val) > (m = arc_stats.stat.value.ui64) && \ 369 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 370 continue; \ 371 } 372 373 #define ARCSTAT_MAXSTAT(stat) \ 374 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 375 376 /* 377 * We define a macro to allow ARC hits/misses to be easily broken down by 378 * two separate conditions, giving a total of four different subtypes for 379 * each of hits and misses (so eight statistics total). 380 */ 381 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 382 if (cond1) { \ 383 if (cond2) { \ 384 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 385 } else { \ 386 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 387 } \ 388 } else { \ 389 if (cond2) { \ 390 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 391 } else { \ 392 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 393 } \ 394 } 395 396 kstat_t *arc_ksp; 397 static arc_state_t *arc_anon; 398 static arc_state_t *arc_mru; 399 static arc_state_t *arc_mru_ghost; 400 static arc_state_t *arc_mfu; 401 static arc_state_t *arc_mfu_ghost; 402 static arc_state_t *arc_l2c_only; 403 404 /* 405 * There are several ARC variables that are critical to export as kstats -- 406 * but we don't want to have to grovel around in the kstat whenever we wish to 407 * manipulate them. For these variables, we therefore define them to be in 408 * terms of the statistic variable. This assures that we are not introducing 409 * the possibility of inconsistency by having shadow copies of the variables, 410 * while still allowing the code to be readable. 411 */ 412 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 413 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 414 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 415 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 416 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 417 418 static int arc_no_grow; /* Don't try to grow cache size */ 419 static uint64_t arc_tempreserve; 420 static uint64_t arc_loaned_bytes; 421 static uint64_t arc_meta_used; 422 static uint64_t arc_meta_limit; 423 static uint64_t arc_meta_max = 0; 424 425 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 426 427 typedef struct arc_callback arc_callback_t; 428 429 struct arc_callback { 430 void *acb_private; 431 arc_done_func_t *acb_done; 432 arc_buf_t *acb_buf; 433 zio_t *acb_zio_dummy; 434 arc_callback_t *acb_next; 435 }; 436 437 typedef struct arc_write_callback arc_write_callback_t; 438 439 struct arc_write_callback { 440 void *awcb_private; 441 arc_done_func_t *awcb_ready; 442 arc_done_func_t *awcb_done; 443 arc_buf_t *awcb_buf; 444 }; 445 446 struct arc_buf_hdr { 447 /* protected by hash lock */ 448 dva_t b_dva; 449 uint64_t b_birth; 450 uint64_t b_cksum0; 451 452 kmutex_t b_freeze_lock; 453 zio_cksum_t *b_freeze_cksum; 454 void *b_thawed; 455 456 arc_buf_hdr_t *b_hash_next; 457 arc_buf_t *b_buf; 458 uint32_t b_flags; 459 uint32_t b_datacnt; 460 461 arc_callback_t *b_acb; 462 kcondvar_t b_cv; 463 464 /* immutable */ 465 arc_buf_contents_t b_type; 466 uint64_t b_size; 467 uint64_t b_spa; 468 469 /* protected by arc state mutex */ 470 arc_state_t *b_state; 471 list_node_t b_arc_node; 472 473 /* updated atomically */ 474 clock_t b_arc_access; 475 476 /* self protecting */ 477 refcount_t b_refcnt; 478 479 l2arc_buf_hdr_t *b_l2hdr; 480 list_node_t b_l2node; 481 }; 482 483 static arc_buf_t *arc_eviction_list; 484 static kmutex_t arc_eviction_mtx; 485 static arc_buf_hdr_t arc_eviction_hdr; 486 static void arc_get_data_buf(arc_buf_t *buf); 487 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 488 static int arc_evict_needed(arc_buf_contents_t type); 489 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); 490 static void arc_buf_watch(arc_buf_t *buf); 491 492 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 493 494 #define GHOST_STATE(state) \ 495 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 496 (state) == arc_l2c_only) 497 498 /* 499 * Private ARC flags. These flags are private ARC only flags that will show up 500 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 501 * be passed in as arc_flags in things like arc_read. However, these flags 502 * should never be passed and should only be set by ARC code. When adding new 503 * public flags, make sure not to smash the private ones. 504 */ 505 506 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 507 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 508 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 509 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 510 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 511 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 512 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 513 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 514 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 515 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 516 517 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 518 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 519 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 520 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 521 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 522 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 523 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 524 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 525 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 526 (hdr)->b_l2hdr != NULL) 527 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 528 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 529 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 530 531 /* 532 * Other sizes 533 */ 534 535 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 536 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 537 538 /* 539 * Hash table routines 540 */ 541 542 #define HT_LOCK_PAD 64 543 544 struct ht_lock { 545 kmutex_t ht_lock; 546 #ifdef _KERNEL 547 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 548 #endif 549 }; 550 551 #define BUF_LOCKS 256 552 typedef struct buf_hash_table { 553 uint64_t ht_mask; 554 arc_buf_hdr_t **ht_table; 555 struct ht_lock ht_locks[BUF_LOCKS]; 556 } buf_hash_table_t; 557 558 static buf_hash_table_t buf_hash_table; 559 560 #define BUF_HASH_INDEX(spa, dva, birth) \ 561 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 562 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 563 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 564 #define HDR_LOCK(hdr) \ 565 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 566 567 uint64_t zfs_crc64_table[256]; 568 569 /* 570 * Level 2 ARC 571 */ 572 573 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 574 #define L2ARC_HEADROOM 2 /* num of writes */ 575 #define L2ARC_FEED_SECS 1 /* caching interval secs */ 576 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 577 578 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 579 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 580 581 /* 582 * L2ARC Performance Tunables 583 */ 584 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 585 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 586 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 587 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 588 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 589 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 590 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 591 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 592 593 /* 594 * L2ARC Internals 595 */ 596 typedef struct l2arc_dev { 597 vdev_t *l2ad_vdev; /* vdev */ 598 spa_t *l2ad_spa; /* spa */ 599 uint64_t l2ad_hand; /* next write location */ 600 uint64_t l2ad_write; /* desired write size, bytes */ 601 uint64_t l2ad_boost; /* warmup write boost, bytes */ 602 uint64_t l2ad_start; /* first addr on device */ 603 uint64_t l2ad_end; /* last addr on device */ 604 uint64_t l2ad_evict; /* last addr eviction reached */ 605 boolean_t l2ad_first; /* first sweep through */ 606 boolean_t l2ad_writing; /* currently writing */ 607 list_t *l2ad_buflist; /* buffer list */ 608 list_node_t l2ad_node; /* device list node */ 609 } l2arc_dev_t; 610 611 static list_t L2ARC_dev_list; /* device list */ 612 static list_t *l2arc_dev_list; /* device list pointer */ 613 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 614 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 615 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 616 static list_t L2ARC_free_on_write; /* free after write buf list */ 617 static list_t *l2arc_free_on_write; /* free after write list ptr */ 618 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 619 static uint64_t l2arc_ndev; /* number of devices */ 620 621 typedef struct l2arc_read_callback { 622 arc_buf_t *l2rcb_buf; /* read buffer */ 623 spa_t *l2rcb_spa; /* spa */ 624 blkptr_t l2rcb_bp; /* original blkptr */ 625 zbookmark_t l2rcb_zb; /* original bookmark */ 626 int l2rcb_flags; /* original flags */ 627 } l2arc_read_callback_t; 628 629 typedef struct l2arc_write_callback { 630 l2arc_dev_t *l2wcb_dev; /* device info */ 631 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 632 } l2arc_write_callback_t; 633 634 struct l2arc_buf_hdr { 635 /* protected by arc_buf_hdr mutex */ 636 l2arc_dev_t *b_dev; /* L2ARC device */ 637 uint64_t b_daddr; /* disk address, offset byte */ 638 }; 639 640 typedef struct l2arc_data_free { 641 /* protected by l2arc_free_on_write_mtx */ 642 void *l2df_data; 643 size_t l2df_size; 644 void (*l2df_func)(void *, size_t); 645 list_node_t l2df_list_node; 646 } l2arc_data_free_t; 647 648 static kmutex_t l2arc_feed_thr_lock; 649 static kcondvar_t l2arc_feed_thr_cv; 650 static uint8_t l2arc_thread_exit; 651 652 static void l2arc_read_done(zio_t *zio); 653 static void l2arc_hdr_stat_add(void); 654 static void l2arc_hdr_stat_remove(void); 655 656 static uint64_t 657 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 658 { 659 uint8_t *vdva = (uint8_t *)dva; 660 uint64_t crc = -1ULL; 661 int i; 662 663 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 664 665 for (i = 0; i < sizeof (dva_t); i++) 666 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 667 668 crc ^= (spa>>8) ^ birth; 669 670 return (crc); 671 } 672 673 #define BUF_EMPTY(buf) \ 674 ((buf)->b_dva.dva_word[0] == 0 && \ 675 (buf)->b_dva.dva_word[1] == 0 && \ 676 (buf)->b_birth == 0) 677 678 #define BUF_EQUAL(spa, dva, birth, buf) \ 679 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 680 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 681 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 682 683 static void 684 buf_discard_identity(arc_buf_hdr_t *hdr) 685 { 686 hdr->b_dva.dva_word[0] = 0; 687 hdr->b_dva.dva_word[1] = 0; 688 hdr->b_birth = 0; 689 hdr->b_cksum0 = 0; 690 } 691 692 static arc_buf_hdr_t * 693 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 694 { 695 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 696 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 697 arc_buf_hdr_t *buf; 698 699 mutex_enter(hash_lock); 700 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 701 buf = buf->b_hash_next) { 702 if (BUF_EQUAL(spa, dva, birth, buf)) { 703 *lockp = hash_lock; 704 return (buf); 705 } 706 } 707 mutex_exit(hash_lock); 708 *lockp = NULL; 709 return (NULL); 710 } 711 712 /* 713 * Insert an entry into the hash table. If there is already an element 714 * equal to elem in the hash table, then the already existing element 715 * will be returned and the new element will not be inserted. 716 * Otherwise returns NULL. 717 */ 718 static arc_buf_hdr_t * 719 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 720 { 721 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 722 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 723 arc_buf_hdr_t *fbuf; 724 uint32_t i; 725 726 ASSERT(!HDR_IN_HASH_TABLE(buf)); 727 *lockp = hash_lock; 728 mutex_enter(hash_lock); 729 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 730 fbuf = fbuf->b_hash_next, i++) { 731 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 732 return (fbuf); 733 } 734 735 buf->b_hash_next = buf_hash_table.ht_table[idx]; 736 buf_hash_table.ht_table[idx] = buf; 737 buf->b_flags |= ARC_IN_HASH_TABLE; 738 739 /* collect some hash table performance data */ 740 if (i > 0) { 741 ARCSTAT_BUMP(arcstat_hash_collisions); 742 if (i == 1) 743 ARCSTAT_BUMP(arcstat_hash_chains); 744 745 ARCSTAT_MAX(arcstat_hash_chain_max, i); 746 } 747 748 ARCSTAT_BUMP(arcstat_hash_elements); 749 ARCSTAT_MAXSTAT(arcstat_hash_elements); 750 751 return (NULL); 752 } 753 754 static void 755 buf_hash_remove(arc_buf_hdr_t *buf) 756 { 757 arc_buf_hdr_t *fbuf, **bufp; 758 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 759 760 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 761 ASSERT(HDR_IN_HASH_TABLE(buf)); 762 763 bufp = &buf_hash_table.ht_table[idx]; 764 while ((fbuf = *bufp) != buf) { 765 ASSERT(fbuf != NULL); 766 bufp = &fbuf->b_hash_next; 767 } 768 *bufp = buf->b_hash_next; 769 buf->b_hash_next = NULL; 770 buf->b_flags &= ~ARC_IN_HASH_TABLE; 771 772 /* collect some hash table performance data */ 773 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 774 775 if (buf_hash_table.ht_table[idx] && 776 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 777 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 778 } 779 780 /* 781 * Global data structures and functions for the buf kmem cache. 782 */ 783 static kmem_cache_t *hdr_cache; 784 static kmem_cache_t *buf_cache; 785 786 static void 787 buf_fini(void) 788 { 789 int i; 790 791 kmem_free(buf_hash_table.ht_table, 792 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 793 for (i = 0; i < BUF_LOCKS; i++) 794 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 795 kmem_cache_destroy(hdr_cache); 796 kmem_cache_destroy(buf_cache); 797 } 798 799 /* 800 * Constructor callback - called when the cache is empty 801 * and a new buf is requested. 802 */ 803 /* ARGSUSED */ 804 static int 805 hdr_cons(void *vbuf, void *unused, int kmflag) 806 { 807 arc_buf_hdr_t *buf = vbuf; 808 809 bzero(buf, sizeof (arc_buf_hdr_t)); 810 refcount_create(&buf->b_refcnt); 811 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 812 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 813 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 814 815 return (0); 816 } 817 818 /* ARGSUSED */ 819 static int 820 buf_cons(void *vbuf, void *unused, int kmflag) 821 { 822 arc_buf_t *buf = vbuf; 823 824 bzero(buf, sizeof (arc_buf_t)); 825 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 826 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 827 828 return (0); 829 } 830 831 /* 832 * Destructor callback - called when a cached buf is 833 * no longer required. 834 */ 835 /* ARGSUSED */ 836 static void 837 hdr_dest(void *vbuf, void *unused) 838 { 839 arc_buf_hdr_t *buf = vbuf; 840 841 ASSERT(BUF_EMPTY(buf)); 842 refcount_destroy(&buf->b_refcnt); 843 cv_destroy(&buf->b_cv); 844 mutex_destroy(&buf->b_freeze_lock); 845 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 846 } 847 848 /* ARGSUSED */ 849 static void 850 buf_dest(void *vbuf, void *unused) 851 { 852 arc_buf_t *buf = vbuf; 853 854 mutex_destroy(&buf->b_evict_lock); 855 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 856 } 857 858 /* 859 * Reclaim callback -- invoked when memory is low. 860 */ 861 /* ARGSUSED */ 862 static void 863 hdr_recl(void *unused) 864 { 865 dprintf("hdr_recl called\n"); 866 /* 867 * umem calls the reclaim func when we destroy the buf cache, 868 * which is after we do arc_fini(). 869 */ 870 if (!arc_dead) 871 cv_signal(&arc_reclaim_thr_cv); 872 } 873 874 static void 875 buf_init(void) 876 { 877 uint64_t *ct; 878 uint64_t hsize = 1ULL << 12; 879 int i, j; 880 881 /* 882 * The hash table is big enough to fill all of physical memory 883 * with an average 64K block size. The table will take up 884 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 885 */ 886 while (hsize * 65536 < physmem * PAGESIZE) 887 hsize <<= 1; 888 retry: 889 buf_hash_table.ht_mask = hsize - 1; 890 buf_hash_table.ht_table = 891 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 892 if (buf_hash_table.ht_table == NULL) { 893 ASSERT(hsize > (1ULL << 8)); 894 hsize >>= 1; 895 goto retry; 896 } 897 898 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 899 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 900 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 901 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 902 903 for (i = 0; i < 256; i++) 904 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 905 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 906 907 for (i = 0; i < BUF_LOCKS; i++) { 908 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 909 NULL, MUTEX_DEFAULT, NULL); 910 } 911 } 912 913 #define ARC_MINTIME (hz>>4) /* 62 ms */ 914 915 static void 916 arc_cksum_verify(arc_buf_t *buf) 917 { 918 zio_cksum_t zc; 919 920 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 921 return; 922 923 mutex_enter(&buf->b_hdr->b_freeze_lock); 924 if (buf->b_hdr->b_freeze_cksum == NULL || 925 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 926 mutex_exit(&buf->b_hdr->b_freeze_lock); 927 return; 928 } 929 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 930 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 931 panic("buffer modified while frozen!"); 932 mutex_exit(&buf->b_hdr->b_freeze_lock); 933 } 934 935 static int 936 arc_cksum_equal(arc_buf_t *buf) 937 { 938 zio_cksum_t zc; 939 int equal; 940 941 mutex_enter(&buf->b_hdr->b_freeze_lock); 942 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 943 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 944 mutex_exit(&buf->b_hdr->b_freeze_lock); 945 946 return (equal); 947 } 948 949 static void 950 arc_cksum_compute(arc_buf_t *buf, boolean_t force) 951 { 952 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 953 return; 954 955 mutex_enter(&buf->b_hdr->b_freeze_lock); 956 if (buf->b_hdr->b_freeze_cksum != NULL) { 957 mutex_exit(&buf->b_hdr->b_freeze_lock); 958 return; 959 } 960 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 961 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 962 buf->b_hdr->b_freeze_cksum); 963 mutex_exit(&buf->b_hdr->b_freeze_lock); 964 arc_buf_watch(buf); 965 } 966 967 #ifndef _KERNEL 968 typedef struct procctl { 969 long cmd; 970 prwatch_t prwatch; 971 } procctl_t; 972 #endif 973 974 /* ARGSUSED */ 975 static void 976 arc_buf_unwatch(arc_buf_t *buf) 977 { 978 #ifndef _KERNEL 979 if (arc_watch) { 980 int result; 981 procctl_t ctl; 982 ctl.cmd = PCWATCH; 983 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 984 ctl.prwatch.pr_size = 0; 985 ctl.prwatch.pr_wflags = 0; 986 result = write(arc_procfd, &ctl, sizeof (ctl)); 987 ASSERT3U(result, ==, sizeof (ctl)); 988 } 989 #endif 990 } 991 992 /* ARGSUSED */ 993 static void 994 arc_buf_watch(arc_buf_t *buf) 995 { 996 #ifndef _KERNEL 997 if (arc_watch) { 998 int result; 999 procctl_t ctl; 1000 ctl.cmd = PCWATCH; 1001 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1002 ctl.prwatch.pr_size = buf->b_hdr->b_size; 1003 ctl.prwatch.pr_wflags = WA_WRITE; 1004 result = write(arc_procfd, &ctl, sizeof (ctl)); 1005 ASSERT3U(result, ==, sizeof (ctl)); 1006 } 1007 #endif 1008 } 1009 1010 void 1011 arc_buf_thaw(arc_buf_t *buf) 1012 { 1013 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1014 if (buf->b_hdr->b_state != arc_anon) 1015 panic("modifying non-anon buffer!"); 1016 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1017 panic("modifying buffer while i/o in progress!"); 1018 arc_cksum_verify(buf); 1019 } 1020 1021 mutex_enter(&buf->b_hdr->b_freeze_lock); 1022 if (buf->b_hdr->b_freeze_cksum != NULL) { 1023 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1024 buf->b_hdr->b_freeze_cksum = NULL; 1025 } 1026 1027 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1028 if (buf->b_hdr->b_thawed) 1029 kmem_free(buf->b_hdr->b_thawed, 1); 1030 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 1031 } 1032 1033 mutex_exit(&buf->b_hdr->b_freeze_lock); 1034 1035 arc_buf_unwatch(buf); 1036 } 1037 1038 void 1039 arc_buf_freeze(arc_buf_t *buf) 1040 { 1041 kmutex_t *hash_lock; 1042 1043 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1044 return; 1045 1046 hash_lock = HDR_LOCK(buf->b_hdr); 1047 mutex_enter(hash_lock); 1048 1049 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1050 buf->b_hdr->b_state == arc_anon); 1051 arc_cksum_compute(buf, B_FALSE); 1052 mutex_exit(hash_lock); 1053 1054 } 1055 1056 static void 1057 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1058 { 1059 ASSERT(MUTEX_HELD(hash_lock)); 1060 1061 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1062 (ab->b_state != arc_anon)) { 1063 uint64_t delta = ab->b_size * ab->b_datacnt; 1064 list_t *list = &ab->b_state->arcs_list[ab->b_type]; 1065 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1066 1067 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 1068 mutex_enter(&ab->b_state->arcs_mtx); 1069 ASSERT(list_link_active(&ab->b_arc_node)); 1070 list_remove(list, ab); 1071 if (GHOST_STATE(ab->b_state)) { 1072 ASSERT0(ab->b_datacnt); 1073 ASSERT3P(ab->b_buf, ==, NULL); 1074 delta = ab->b_size; 1075 } 1076 ASSERT(delta > 0); 1077 ASSERT3U(*size, >=, delta); 1078 atomic_add_64(size, -delta); 1079 mutex_exit(&ab->b_state->arcs_mtx); 1080 /* remove the prefetch flag if we get a reference */ 1081 if (ab->b_flags & ARC_PREFETCH) 1082 ab->b_flags &= ~ARC_PREFETCH; 1083 } 1084 } 1085 1086 static int 1087 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1088 { 1089 int cnt; 1090 arc_state_t *state = ab->b_state; 1091 1092 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1093 ASSERT(!GHOST_STATE(state)); 1094 1095 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1096 (state != arc_anon)) { 1097 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1098 1099 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 1100 mutex_enter(&state->arcs_mtx); 1101 ASSERT(!list_link_active(&ab->b_arc_node)); 1102 list_insert_head(&state->arcs_list[ab->b_type], ab); 1103 ASSERT(ab->b_datacnt > 0); 1104 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1105 mutex_exit(&state->arcs_mtx); 1106 } 1107 return (cnt); 1108 } 1109 1110 /* 1111 * Move the supplied buffer to the indicated state. The mutex 1112 * for the buffer must be held by the caller. 1113 */ 1114 static void 1115 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1116 { 1117 arc_state_t *old_state = ab->b_state; 1118 int64_t refcnt = refcount_count(&ab->b_refcnt); 1119 uint64_t from_delta, to_delta; 1120 1121 ASSERT(MUTEX_HELD(hash_lock)); 1122 ASSERT(new_state != old_state); 1123 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1124 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1125 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1126 1127 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1128 1129 /* 1130 * If this buffer is evictable, transfer it from the 1131 * old state list to the new state list. 1132 */ 1133 if (refcnt == 0) { 1134 if (old_state != arc_anon) { 1135 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1136 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1137 1138 if (use_mutex) 1139 mutex_enter(&old_state->arcs_mtx); 1140 1141 ASSERT(list_link_active(&ab->b_arc_node)); 1142 list_remove(&old_state->arcs_list[ab->b_type], ab); 1143 1144 /* 1145 * If prefetching out of the ghost cache, 1146 * we will have a non-zero datacnt. 1147 */ 1148 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1149 /* ghost elements have a ghost size */ 1150 ASSERT(ab->b_buf == NULL); 1151 from_delta = ab->b_size; 1152 } 1153 ASSERT3U(*size, >=, from_delta); 1154 atomic_add_64(size, -from_delta); 1155 1156 if (use_mutex) 1157 mutex_exit(&old_state->arcs_mtx); 1158 } 1159 if (new_state != arc_anon) { 1160 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1161 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1162 1163 if (use_mutex) 1164 mutex_enter(&new_state->arcs_mtx); 1165 1166 list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1167 1168 /* ghost elements have a ghost size */ 1169 if (GHOST_STATE(new_state)) { 1170 ASSERT(ab->b_datacnt == 0); 1171 ASSERT(ab->b_buf == NULL); 1172 to_delta = ab->b_size; 1173 } 1174 atomic_add_64(size, to_delta); 1175 1176 if (use_mutex) 1177 mutex_exit(&new_state->arcs_mtx); 1178 } 1179 } 1180 1181 ASSERT(!BUF_EMPTY(ab)); 1182 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab)) 1183 buf_hash_remove(ab); 1184 1185 /* adjust state sizes */ 1186 if (to_delta) 1187 atomic_add_64(&new_state->arcs_size, to_delta); 1188 if (from_delta) { 1189 ASSERT3U(old_state->arcs_size, >=, from_delta); 1190 atomic_add_64(&old_state->arcs_size, -from_delta); 1191 } 1192 ab->b_state = new_state; 1193 1194 /* adjust l2arc hdr stats */ 1195 if (new_state == arc_l2c_only) 1196 l2arc_hdr_stat_add(); 1197 else if (old_state == arc_l2c_only) 1198 l2arc_hdr_stat_remove(); 1199 } 1200 1201 void 1202 arc_space_consume(uint64_t space, arc_space_type_t type) 1203 { 1204 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1205 1206 switch (type) { 1207 case ARC_SPACE_DATA: 1208 ARCSTAT_INCR(arcstat_data_size, space); 1209 break; 1210 case ARC_SPACE_OTHER: 1211 ARCSTAT_INCR(arcstat_other_size, space); 1212 break; 1213 case ARC_SPACE_HDRS: 1214 ARCSTAT_INCR(arcstat_hdr_size, space); 1215 break; 1216 case ARC_SPACE_L2HDRS: 1217 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1218 break; 1219 } 1220 1221 atomic_add_64(&arc_meta_used, space); 1222 atomic_add_64(&arc_size, space); 1223 } 1224 1225 void 1226 arc_space_return(uint64_t space, arc_space_type_t type) 1227 { 1228 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1229 1230 switch (type) { 1231 case ARC_SPACE_DATA: 1232 ARCSTAT_INCR(arcstat_data_size, -space); 1233 break; 1234 case ARC_SPACE_OTHER: 1235 ARCSTAT_INCR(arcstat_other_size, -space); 1236 break; 1237 case ARC_SPACE_HDRS: 1238 ARCSTAT_INCR(arcstat_hdr_size, -space); 1239 break; 1240 case ARC_SPACE_L2HDRS: 1241 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1242 break; 1243 } 1244 1245 ASSERT(arc_meta_used >= space); 1246 if (arc_meta_max < arc_meta_used) 1247 arc_meta_max = arc_meta_used; 1248 atomic_add_64(&arc_meta_used, -space); 1249 ASSERT(arc_size >= space); 1250 atomic_add_64(&arc_size, -space); 1251 } 1252 1253 void * 1254 arc_data_buf_alloc(uint64_t size) 1255 { 1256 if (arc_evict_needed(ARC_BUFC_DATA)) 1257 cv_signal(&arc_reclaim_thr_cv); 1258 atomic_add_64(&arc_size, size); 1259 return (zio_data_buf_alloc(size)); 1260 } 1261 1262 void 1263 arc_data_buf_free(void *buf, uint64_t size) 1264 { 1265 zio_data_buf_free(buf, size); 1266 ASSERT(arc_size >= size); 1267 atomic_add_64(&arc_size, -size); 1268 } 1269 1270 arc_buf_t * 1271 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1272 { 1273 arc_buf_hdr_t *hdr; 1274 arc_buf_t *buf; 1275 1276 ASSERT3U(size, >, 0); 1277 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1278 ASSERT(BUF_EMPTY(hdr)); 1279 hdr->b_size = size; 1280 hdr->b_type = type; 1281 hdr->b_spa = spa_load_guid(spa); 1282 hdr->b_state = arc_anon; 1283 hdr->b_arc_access = 0; 1284 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1285 buf->b_hdr = hdr; 1286 buf->b_data = NULL; 1287 buf->b_efunc = NULL; 1288 buf->b_private = NULL; 1289 buf->b_next = NULL; 1290 hdr->b_buf = buf; 1291 arc_get_data_buf(buf); 1292 hdr->b_datacnt = 1; 1293 hdr->b_flags = 0; 1294 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1295 (void) refcount_add(&hdr->b_refcnt, tag); 1296 1297 return (buf); 1298 } 1299 1300 static char *arc_onloan_tag = "onloan"; 1301 1302 /* 1303 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1304 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1305 * buffers must be returned to the arc before they can be used by the DMU or 1306 * freed. 1307 */ 1308 arc_buf_t * 1309 arc_loan_buf(spa_t *spa, int size) 1310 { 1311 arc_buf_t *buf; 1312 1313 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1314 1315 atomic_add_64(&arc_loaned_bytes, size); 1316 return (buf); 1317 } 1318 1319 /* 1320 * Return a loaned arc buffer to the arc. 1321 */ 1322 void 1323 arc_return_buf(arc_buf_t *buf, void *tag) 1324 { 1325 arc_buf_hdr_t *hdr = buf->b_hdr; 1326 1327 ASSERT(buf->b_data != NULL); 1328 (void) refcount_add(&hdr->b_refcnt, tag); 1329 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1330 1331 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1332 } 1333 1334 /* Detach an arc_buf from a dbuf (tag) */ 1335 void 1336 arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1337 { 1338 arc_buf_hdr_t *hdr; 1339 1340 ASSERT(buf->b_data != NULL); 1341 hdr = buf->b_hdr; 1342 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1343 (void) refcount_remove(&hdr->b_refcnt, tag); 1344 buf->b_efunc = NULL; 1345 buf->b_private = NULL; 1346 1347 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1348 } 1349 1350 static arc_buf_t * 1351 arc_buf_clone(arc_buf_t *from) 1352 { 1353 arc_buf_t *buf; 1354 arc_buf_hdr_t *hdr = from->b_hdr; 1355 uint64_t size = hdr->b_size; 1356 1357 ASSERT(hdr->b_state != arc_anon); 1358 1359 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1360 buf->b_hdr = hdr; 1361 buf->b_data = NULL; 1362 buf->b_efunc = NULL; 1363 buf->b_private = NULL; 1364 buf->b_next = hdr->b_buf; 1365 hdr->b_buf = buf; 1366 arc_get_data_buf(buf); 1367 bcopy(from->b_data, buf->b_data, size); 1368 1369 /* 1370 * This buffer already exists in the arc so create a duplicate 1371 * copy for the caller. If the buffer is associated with user data 1372 * then track the size and number of duplicates. These stats will be 1373 * updated as duplicate buffers are created and destroyed. 1374 */ 1375 if (hdr->b_type == ARC_BUFC_DATA) { 1376 ARCSTAT_BUMP(arcstat_duplicate_buffers); 1377 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size); 1378 } 1379 hdr->b_datacnt += 1; 1380 return (buf); 1381 } 1382 1383 void 1384 arc_buf_add_ref(arc_buf_t *buf, void* tag) 1385 { 1386 arc_buf_hdr_t *hdr; 1387 kmutex_t *hash_lock; 1388 1389 /* 1390 * Check to see if this buffer is evicted. Callers 1391 * must verify b_data != NULL to know if the add_ref 1392 * was successful. 1393 */ 1394 mutex_enter(&buf->b_evict_lock); 1395 if (buf->b_data == NULL) { 1396 mutex_exit(&buf->b_evict_lock); 1397 return; 1398 } 1399 hash_lock = HDR_LOCK(buf->b_hdr); 1400 mutex_enter(hash_lock); 1401 hdr = buf->b_hdr; 1402 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1403 mutex_exit(&buf->b_evict_lock); 1404 1405 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1406 add_reference(hdr, hash_lock, tag); 1407 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1408 arc_access(hdr, hash_lock); 1409 mutex_exit(hash_lock); 1410 ARCSTAT_BUMP(arcstat_hits); 1411 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1412 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1413 data, metadata, hits); 1414 } 1415 1416 /* 1417 * Free the arc data buffer. If it is an l2arc write in progress, 1418 * the buffer is placed on l2arc_free_on_write to be freed later. 1419 */ 1420 static void 1421 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t)) 1422 { 1423 arc_buf_hdr_t *hdr = buf->b_hdr; 1424 1425 if (HDR_L2_WRITING(hdr)) { 1426 l2arc_data_free_t *df; 1427 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1428 df->l2df_data = buf->b_data; 1429 df->l2df_size = hdr->b_size; 1430 df->l2df_func = free_func; 1431 mutex_enter(&l2arc_free_on_write_mtx); 1432 list_insert_head(l2arc_free_on_write, df); 1433 mutex_exit(&l2arc_free_on_write_mtx); 1434 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1435 } else { 1436 free_func(buf->b_data, hdr->b_size); 1437 } 1438 } 1439 1440 static void 1441 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1442 { 1443 arc_buf_t **bufp; 1444 1445 /* free up data associated with the buf */ 1446 if (buf->b_data) { 1447 arc_state_t *state = buf->b_hdr->b_state; 1448 uint64_t size = buf->b_hdr->b_size; 1449 arc_buf_contents_t type = buf->b_hdr->b_type; 1450 1451 arc_cksum_verify(buf); 1452 arc_buf_unwatch(buf); 1453 1454 if (!recycle) { 1455 if (type == ARC_BUFC_METADATA) { 1456 arc_buf_data_free(buf, zio_buf_free); 1457 arc_space_return(size, ARC_SPACE_DATA); 1458 } else { 1459 ASSERT(type == ARC_BUFC_DATA); 1460 arc_buf_data_free(buf, zio_data_buf_free); 1461 ARCSTAT_INCR(arcstat_data_size, -size); 1462 atomic_add_64(&arc_size, -size); 1463 } 1464 } 1465 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1466 uint64_t *cnt = &state->arcs_lsize[type]; 1467 1468 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1469 ASSERT(state != arc_anon); 1470 1471 ASSERT3U(*cnt, >=, size); 1472 atomic_add_64(cnt, -size); 1473 } 1474 ASSERT3U(state->arcs_size, >=, size); 1475 atomic_add_64(&state->arcs_size, -size); 1476 buf->b_data = NULL; 1477 1478 /* 1479 * If we're destroying a duplicate buffer make sure 1480 * that the appropriate statistics are updated. 1481 */ 1482 if (buf->b_hdr->b_datacnt > 1 && 1483 buf->b_hdr->b_type == ARC_BUFC_DATA) { 1484 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 1485 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size); 1486 } 1487 ASSERT(buf->b_hdr->b_datacnt > 0); 1488 buf->b_hdr->b_datacnt -= 1; 1489 } 1490 1491 /* only remove the buf if requested */ 1492 if (!all) 1493 return; 1494 1495 /* remove the buf from the hdr list */ 1496 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1497 continue; 1498 *bufp = buf->b_next; 1499 buf->b_next = NULL; 1500 1501 ASSERT(buf->b_efunc == NULL); 1502 1503 /* clean up the buf */ 1504 buf->b_hdr = NULL; 1505 kmem_cache_free(buf_cache, buf); 1506 } 1507 1508 static void 1509 arc_hdr_destroy(arc_buf_hdr_t *hdr) 1510 { 1511 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1512 ASSERT3P(hdr->b_state, ==, arc_anon); 1513 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1514 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1515 1516 if (l2hdr != NULL) { 1517 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1518 /* 1519 * To prevent arc_free() and l2arc_evict() from 1520 * attempting to free the same buffer at the same time, 1521 * a FREE_IN_PROGRESS flag is given to arc_free() to 1522 * give it priority. l2arc_evict() can't destroy this 1523 * header while we are waiting on l2arc_buflist_mtx. 1524 * 1525 * The hdr may be removed from l2ad_buflist before we 1526 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1527 */ 1528 if (!buflist_held) { 1529 mutex_enter(&l2arc_buflist_mtx); 1530 l2hdr = hdr->b_l2hdr; 1531 } 1532 1533 if (l2hdr != NULL) { 1534 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1535 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1536 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1537 if (hdr->b_state == arc_l2c_only) 1538 l2arc_hdr_stat_remove(); 1539 hdr->b_l2hdr = NULL; 1540 } 1541 1542 if (!buflist_held) 1543 mutex_exit(&l2arc_buflist_mtx); 1544 } 1545 1546 if (!BUF_EMPTY(hdr)) { 1547 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1548 buf_discard_identity(hdr); 1549 } 1550 while (hdr->b_buf) { 1551 arc_buf_t *buf = hdr->b_buf; 1552 1553 if (buf->b_efunc) { 1554 mutex_enter(&arc_eviction_mtx); 1555 mutex_enter(&buf->b_evict_lock); 1556 ASSERT(buf->b_hdr != NULL); 1557 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1558 hdr->b_buf = buf->b_next; 1559 buf->b_hdr = &arc_eviction_hdr; 1560 buf->b_next = arc_eviction_list; 1561 arc_eviction_list = buf; 1562 mutex_exit(&buf->b_evict_lock); 1563 mutex_exit(&arc_eviction_mtx); 1564 } else { 1565 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1566 } 1567 } 1568 if (hdr->b_freeze_cksum != NULL) { 1569 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1570 hdr->b_freeze_cksum = NULL; 1571 } 1572 if (hdr->b_thawed) { 1573 kmem_free(hdr->b_thawed, 1); 1574 hdr->b_thawed = NULL; 1575 } 1576 1577 ASSERT(!list_link_active(&hdr->b_arc_node)); 1578 ASSERT3P(hdr->b_hash_next, ==, NULL); 1579 ASSERT3P(hdr->b_acb, ==, NULL); 1580 kmem_cache_free(hdr_cache, hdr); 1581 } 1582 1583 void 1584 arc_buf_free(arc_buf_t *buf, void *tag) 1585 { 1586 arc_buf_hdr_t *hdr = buf->b_hdr; 1587 int hashed = hdr->b_state != arc_anon; 1588 1589 ASSERT(buf->b_efunc == NULL); 1590 ASSERT(buf->b_data != NULL); 1591 1592 if (hashed) { 1593 kmutex_t *hash_lock = HDR_LOCK(hdr); 1594 1595 mutex_enter(hash_lock); 1596 hdr = buf->b_hdr; 1597 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1598 1599 (void) remove_reference(hdr, hash_lock, tag); 1600 if (hdr->b_datacnt > 1) { 1601 arc_buf_destroy(buf, FALSE, TRUE); 1602 } else { 1603 ASSERT(buf == hdr->b_buf); 1604 ASSERT(buf->b_efunc == NULL); 1605 hdr->b_flags |= ARC_BUF_AVAILABLE; 1606 } 1607 mutex_exit(hash_lock); 1608 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1609 int destroy_hdr; 1610 /* 1611 * We are in the middle of an async write. Don't destroy 1612 * this buffer unless the write completes before we finish 1613 * decrementing the reference count. 1614 */ 1615 mutex_enter(&arc_eviction_mtx); 1616 (void) remove_reference(hdr, NULL, tag); 1617 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1618 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1619 mutex_exit(&arc_eviction_mtx); 1620 if (destroy_hdr) 1621 arc_hdr_destroy(hdr); 1622 } else { 1623 if (remove_reference(hdr, NULL, tag) > 0) 1624 arc_buf_destroy(buf, FALSE, TRUE); 1625 else 1626 arc_hdr_destroy(hdr); 1627 } 1628 } 1629 1630 int 1631 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1632 { 1633 arc_buf_hdr_t *hdr = buf->b_hdr; 1634 kmutex_t *hash_lock = HDR_LOCK(hdr); 1635 int no_callback = (buf->b_efunc == NULL); 1636 1637 if (hdr->b_state == arc_anon) { 1638 ASSERT(hdr->b_datacnt == 1); 1639 arc_buf_free(buf, tag); 1640 return (no_callback); 1641 } 1642 1643 mutex_enter(hash_lock); 1644 hdr = buf->b_hdr; 1645 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1646 ASSERT(hdr->b_state != arc_anon); 1647 ASSERT(buf->b_data != NULL); 1648 1649 (void) remove_reference(hdr, hash_lock, tag); 1650 if (hdr->b_datacnt > 1) { 1651 if (no_callback) 1652 arc_buf_destroy(buf, FALSE, TRUE); 1653 } else if (no_callback) { 1654 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1655 ASSERT(buf->b_efunc == NULL); 1656 hdr->b_flags |= ARC_BUF_AVAILABLE; 1657 } 1658 ASSERT(no_callback || hdr->b_datacnt > 1 || 1659 refcount_is_zero(&hdr->b_refcnt)); 1660 mutex_exit(hash_lock); 1661 return (no_callback); 1662 } 1663 1664 int 1665 arc_buf_size(arc_buf_t *buf) 1666 { 1667 return (buf->b_hdr->b_size); 1668 } 1669 1670 /* 1671 * Called from the DMU to determine if the current buffer should be 1672 * evicted. In order to ensure proper locking, the eviction must be initiated 1673 * from the DMU. Return true if the buffer is associated with user data and 1674 * duplicate buffers still exist. 1675 */ 1676 boolean_t 1677 arc_buf_eviction_needed(arc_buf_t *buf) 1678 { 1679 arc_buf_hdr_t *hdr; 1680 boolean_t evict_needed = B_FALSE; 1681 1682 if (zfs_disable_dup_eviction) 1683 return (B_FALSE); 1684 1685 mutex_enter(&buf->b_evict_lock); 1686 hdr = buf->b_hdr; 1687 if (hdr == NULL) { 1688 /* 1689 * We are in arc_do_user_evicts(); let that function 1690 * perform the eviction. 1691 */ 1692 ASSERT(buf->b_data == NULL); 1693 mutex_exit(&buf->b_evict_lock); 1694 return (B_FALSE); 1695 } else if (buf->b_data == NULL) { 1696 /* 1697 * We have already been added to the arc eviction list; 1698 * recommend eviction. 1699 */ 1700 ASSERT3P(hdr, ==, &arc_eviction_hdr); 1701 mutex_exit(&buf->b_evict_lock); 1702 return (B_TRUE); 1703 } 1704 1705 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA) 1706 evict_needed = B_TRUE; 1707 1708 mutex_exit(&buf->b_evict_lock); 1709 return (evict_needed); 1710 } 1711 1712 /* 1713 * Evict buffers from list until we've removed the specified number of 1714 * bytes. Move the removed buffers to the appropriate evict state. 1715 * If the recycle flag is set, then attempt to "recycle" a buffer: 1716 * - look for a buffer to evict that is `bytes' long. 1717 * - return the data block from this buffer rather than freeing it. 1718 * This flag is used by callers that are trying to make space for a 1719 * new buffer in a full arc cache. 1720 * 1721 * This function makes a "best effort". It skips over any buffers 1722 * it can't get a hash_lock on, and so may not catch all candidates. 1723 * It may also return without evicting as much space as requested. 1724 */ 1725 static void * 1726 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1727 arc_buf_contents_t type) 1728 { 1729 arc_state_t *evicted_state; 1730 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1731 arc_buf_hdr_t *ab, *ab_prev = NULL; 1732 list_t *list = &state->arcs_list[type]; 1733 kmutex_t *hash_lock; 1734 boolean_t have_lock; 1735 void *stolen = NULL; 1736 1737 ASSERT(state == arc_mru || state == arc_mfu); 1738 1739 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1740 1741 mutex_enter(&state->arcs_mtx); 1742 mutex_enter(&evicted_state->arcs_mtx); 1743 1744 for (ab = list_tail(list); ab; ab = ab_prev) { 1745 ab_prev = list_prev(list, ab); 1746 /* prefetch buffers have a minimum lifespan */ 1747 if (HDR_IO_IN_PROGRESS(ab) || 1748 (spa && ab->b_spa != spa) || 1749 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1750 ddi_get_lbolt() - ab->b_arc_access < 1751 arc_min_prefetch_lifespan)) { 1752 skipped++; 1753 continue; 1754 } 1755 /* "lookahead" for better eviction candidate */ 1756 if (recycle && ab->b_size != bytes && 1757 ab_prev && ab_prev->b_size == bytes) 1758 continue; 1759 hash_lock = HDR_LOCK(ab); 1760 have_lock = MUTEX_HELD(hash_lock); 1761 if (have_lock || mutex_tryenter(hash_lock)) { 1762 ASSERT0(refcount_count(&ab->b_refcnt)); 1763 ASSERT(ab->b_datacnt > 0); 1764 while (ab->b_buf) { 1765 arc_buf_t *buf = ab->b_buf; 1766 if (!mutex_tryenter(&buf->b_evict_lock)) { 1767 missed += 1; 1768 break; 1769 } 1770 if (buf->b_data) { 1771 bytes_evicted += ab->b_size; 1772 if (recycle && ab->b_type == type && 1773 ab->b_size == bytes && 1774 !HDR_L2_WRITING(ab)) { 1775 stolen = buf->b_data; 1776 recycle = FALSE; 1777 } 1778 } 1779 if (buf->b_efunc) { 1780 mutex_enter(&arc_eviction_mtx); 1781 arc_buf_destroy(buf, 1782 buf->b_data == stolen, FALSE); 1783 ab->b_buf = buf->b_next; 1784 buf->b_hdr = &arc_eviction_hdr; 1785 buf->b_next = arc_eviction_list; 1786 arc_eviction_list = buf; 1787 mutex_exit(&arc_eviction_mtx); 1788 mutex_exit(&buf->b_evict_lock); 1789 } else { 1790 mutex_exit(&buf->b_evict_lock); 1791 arc_buf_destroy(buf, 1792 buf->b_data == stolen, TRUE); 1793 } 1794 } 1795 1796 if (ab->b_l2hdr) { 1797 ARCSTAT_INCR(arcstat_evict_l2_cached, 1798 ab->b_size); 1799 } else { 1800 if (l2arc_write_eligible(ab->b_spa, ab)) { 1801 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1802 ab->b_size); 1803 } else { 1804 ARCSTAT_INCR( 1805 arcstat_evict_l2_ineligible, 1806 ab->b_size); 1807 } 1808 } 1809 1810 if (ab->b_datacnt == 0) { 1811 arc_change_state(evicted_state, ab, hash_lock); 1812 ASSERT(HDR_IN_HASH_TABLE(ab)); 1813 ab->b_flags |= ARC_IN_HASH_TABLE; 1814 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1815 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1816 } 1817 if (!have_lock) 1818 mutex_exit(hash_lock); 1819 if (bytes >= 0 && bytes_evicted >= bytes) 1820 break; 1821 } else { 1822 missed += 1; 1823 } 1824 } 1825 1826 mutex_exit(&evicted_state->arcs_mtx); 1827 mutex_exit(&state->arcs_mtx); 1828 1829 if (bytes_evicted < bytes) 1830 dprintf("only evicted %lld bytes from %x", 1831 (longlong_t)bytes_evicted, state); 1832 1833 if (skipped) 1834 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1835 1836 if (missed) 1837 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1838 1839 /* 1840 * We have just evicted some date into the ghost state, make 1841 * sure we also adjust the ghost state size if necessary. 1842 */ 1843 if (arc_no_grow && 1844 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1845 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1846 arc_mru_ghost->arcs_size - arc_c; 1847 1848 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1849 int64_t todelete = 1850 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1851 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1852 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1853 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1854 arc_mru_ghost->arcs_size + 1855 arc_mfu_ghost->arcs_size - arc_c); 1856 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1857 } 1858 } 1859 1860 return (stolen); 1861 } 1862 1863 /* 1864 * Remove buffers from list until we've removed the specified number of 1865 * bytes. Destroy the buffers that are removed. 1866 */ 1867 static void 1868 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 1869 { 1870 arc_buf_hdr_t *ab, *ab_prev; 1871 arc_buf_hdr_t marker = { 0 }; 1872 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1873 kmutex_t *hash_lock; 1874 uint64_t bytes_deleted = 0; 1875 uint64_t bufs_skipped = 0; 1876 1877 ASSERT(GHOST_STATE(state)); 1878 top: 1879 mutex_enter(&state->arcs_mtx); 1880 for (ab = list_tail(list); ab; ab = ab_prev) { 1881 ab_prev = list_prev(list, ab); 1882 if (spa && ab->b_spa != spa) 1883 continue; 1884 1885 /* ignore markers */ 1886 if (ab->b_spa == 0) 1887 continue; 1888 1889 hash_lock = HDR_LOCK(ab); 1890 /* caller may be trying to modify this buffer, skip it */ 1891 if (MUTEX_HELD(hash_lock)) 1892 continue; 1893 if (mutex_tryenter(hash_lock)) { 1894 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1895 ASSERT(ab->b_buf == NULL); 1896 ARCSTAT_BUMP(arcstat_deleted); 1897 bytes_deleted += ab->b_size; 1898 1899 if (ab->b_l2hdr != NULL) { 1900 /* 1901 * This buffer is cached on the 2nd Level ARC; 1902 * don't destroy the header. 1903 */ 1904 arc_change_state(arc_l2c_only, ab, hash_lock); 1905 mutex_exit(hash_lock); 1906 } else { 1907 arc_change_state(arc_anon, ab, hash_lock); 1908 mutex_exit(hash_lock); 1909 arc_hdr_destroy(ab); 1910 } 1911 1912 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1913 if (bytes >= 0 && bytes_deleted >= bytes) 1914 break; 1915 } else if (bytes < 0) { 1916 /* 1917 * Insert a list marker and then wait for the 1918 * hash lock to become available. Once its 1919 * available, restart from where we left off. 1920 */ 1921 list_insert_after(list, ab, &marker); 1922 mutex_exit(&state->arcs_mtx); 1923 mutex_enter(hash_lock); 1924 mutex_exit(hash_lock); 1925 mutex_enter(&state->arcs_mtx); 1926 ab_prev = list_prev(list, &marker); 1927 list_remove(list, &marker); 1928 } else 1929 bufs_skipped += 1; 1930 } 1931 mutex_exit(&state->arcs_mtx); 1932 1933 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1934 (bytes < 0 || bytes_deleted < bytes)) { 1935 list = &state->arcs_list[ARC_BUFC_METADATA]; 1936 goto top; 1937 } 1938 1939 if (bufs_skipped) { 1940 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1941 ASSERT(bytes >= 0); 1942 } 1943 1944 if (bytes_deleted < bytes) 1945 dprintf("only deleted %lld bytes from %p", 1946 (longlong_t)bytes_deleted, state); 1947 } 1948 1949 static void 1950 arc_adjust(void) 1951 { 1952 int64_t adjustment, delta; 1953 1954 /* 1955 * Adjust MRU size 1956 */ 1957 1958 adjustment = MIN((int64_t)(arc_size - arc_c), 1959 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 1960 arc_p)); 1961 1962 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1963 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 1964 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA); 1965 adjustment -= delta; 1966 } 1967 1968 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1969 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 1970 (void) arc_evict(arc_mru, NULL, delta, FALSE, 1971 ARC_BUFC_METADATA); 1972 } 1973 1974 /* 1975 * Adjust MFU size 1976 */ 1977 1978 adjustment = arc_size - arc_c; 1979 1980 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1981 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 1982 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA); 1983 adjustment -= delta; 1984 } 1985 1986 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1987 int64_t delta = MIN(adjustment, 1988 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 1989 (void) arc_evict(arc_mfu, NULL, delta, FALSE, 1990 ARC_BUFC_METADATA); 1991 } 1992 1993 /* 1994 * Adjust ghost lists 1995 */ 1996 1997 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 1998 1999 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 2000 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 2001 arc_evict_ghost(arc_mru_ghost, NULL, delta); 2002 } 2003 2004 adjustment = 2005 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 2006 2007 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 2008 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 2009 arc_evict_ghost(arc_mfu_ghost, NULL, delta); 2010 } 2011 } 2012 2013 static void 2014 arc_do_user_evicts(void) 2015 { 2016 mutex_enter(&arc_eviction_mtx); 2017 while (arc_eviction_list != NULL) { 2018 arc_buf_t *buf = arc_eviction_list; 2019 arc_eviction_list = buf->b_next; 2020 mutex_enter(&buf->b_evict_lock); 2021 buf->b_hdr = NULL; 2022 mutex_exit(&buf->b_evict_lock); 2023 mutex_exit(&arc_eviction_mtx); 2024 2025 if (buf->b_efunc != NULL) 2026 VERIFY(buf->b_efunc(buf) == 0); 2027 2028 buf->b_efunc = NULL; 2029 buf->b_private = NULL; 2030 kmem_cache_free(buf_cache, buf); 2031 mutex_enter(&arc_eviction_mtx); 2032 } 2033 mutex_exit(&arc_eviction_mtx); 2034 } 2035 2036 /* 2037 * Flush all *evictable* data from the cache for the given spa. 2038 * NOTE: this will not touch "active" (i.e. referenced) data. 2039 */ 2040 void 2041 arc_flush(spa_t *spa) 2042 { 2043 uint64_t guid = 0; 2044 2045 if (spa) 2046 guid = spa_load_guid(spa); 2047 2048 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 2049 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 2050 if (spa) 2051 break; 2052 } 2053 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 2054 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 2055 if (spa) 2056 break; 2057 } 2058 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 2059 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 2060 if (spa) 2061 break; 2062 } 2063 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 2064 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 2065 if (spa) 2066 break; 2067 } 2068 2069 arc_evict_ghost(arc_mru_ghost, guid, -1); 2070 arc_evict_ghost(arc_mfu_ghost, guid, -1); 2071 2072 mutex_enter(&arc_reclaim_thr_lock); 2073 arc_do_user_evicts(); 2074 mutex_exit(&arc_reclaim_thr_lock); 2075 ASSERT(spa || arc_eviction_list == NULL); 2076 } 2077 2078 void 2079 arc_shrink(void) 2080 { 2081 if (arc_c > arc_c_min) { 2082 uint64_t to_free; 2083 2084 #ifdef _KERNEL 2085 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 2086 #else 2087 to_free = arc_c >> arc_shrink_shift; 2088 #endif 2089 if (arc_c > arc_c_min + to_free) 2090 atomic_add_64(&arc_c, -to_free); 2091 else 2092 arc_c = arc_c_min; 2093 2094 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2095 if (arc_c > arc_size) 2096 arc_c = MAX(arc_size, arc_c_min); 2097 if (arc_p > arc_c) 2098 arc_p = (arc_c >> 1); 2099 ASSERT(arc_c >= arc_c_min); 2100 ASSERT((int64_t)arc_p >= 0); 2101 } 2102 2103 if (arc_size > arc_c) 2104 arc_adjust(); 2105 } 2106 2107 /* 2108 * Determine if the system is under memory pressure and is asking 2109 * to reclaim memory. A return value of 1 indicates that the system 2110 * is under memory pressure and that the arc should adjust accordingly. 2111 */ 2112 static int 2113 arc_reclaim_needed(void) 2114 { 2115 uint64_t extra; 2116 2117 #ifdef _KERNEL 2118 2119 if (needfree) 2120 return (1); 2121 2122 /* 2123 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2124 */ 2125 extra = desfree; 2126 2127 /* 2128 * check that we're out of range of the pageout scanner. It starts to 2129 * schedule paging if freemem is less than lotsfree and needfree. 2130 * lotsfree is the high-water mark for pageout, and needfree is the 2131 * number of needed free pages. We add extra pages here to make sure 2132 * the scanner doesn't start up while we're freeing memory. 2133 */ 2134 if (freemem < lotsfree + needfree + extra) 2135 return (1); 2136 2137 /* 2138 * check to make sure that swapfs has enough space so that anon 2139 * reservations can still succeed. anon_resvmem() checks that the 2140 * availrmem is greater than swapfs_minfree, and the number of reserved 2141 * swap pages. We also add a bit of extra here just to prevent 2142 * circumstances from getting really dire. 2143 */ 2144 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2145 return (1); 2146 2147 #if defined(__i386) 2148 /* 2149 * If we're on an i386 platform, it's possible that we'll exhaust the 2150 * kernel heap space before we ever run out of available physical 2151 * memory. Most checks of the size of the heap_area compare against 2152 * tune.t_minarmem, which is the minimum available real memory that we 2153 * can have in the system. However, this is generally fixed at 25 pages 2154 * which is so low that it's useless. In this comparison, we seek to 2155 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2156 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2157 * free) 2158 */ 2159 if (vmem_size(heap_arena, VMEM_FREE) < 2160 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) 2161 return (1); 2162 #endif 2163 2164 /* 2165 * If zio data pages are being allocated out of a separate heap segment, 2166 * then enforce that the size of available vmem for this arena remains 2167 * above about 1/16th free. 2168 * 2169 * Note: The 1/16th arena free requirement was put in place 2170 * to aggressively evict memory from the arc in order to avoid 2171 * memory fragmentation issues. 2172 */ 2173 if (zio_arena != NULL && 2174 vmem_size(zio_arena, VMEM_FREE) < 2175 (vmem_size(zio_arena, VMEM_ALLOC) >> 4)) 2176 return (1); 2177 #else 2178 if (spa_get_random(100) == 0) 2179 return (1); 2180 #endif 2181 return (0); 2182 } 2183 2184 static void 2185 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2186 { 2187 size_t i; 2188 kmem_cache_t *prev_cache = NULL; 2189 kmem_cache_t *prev_data_cache = NULL; 2190 extern kmem_cache_t *zio_buf_cache[]; 2191 extern kmem_cache_t *zio_data_buf_cache[]; 2192 2193 #ifdef _KERNEL 2194 if (arc_meta_used >= arc_meta_limit) { 2195 /* 2196 * We are exceeding our meta-data cache limit. 2197 * Purge some DNLC entries to release holds on meta-data. 2198 */ 2199 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2200 } 2201 #if defined(__i386) 2202 /* 2203 * Reclaim unused memory from all kmem caches. 2204 */ 2205 kmem_reap(); 2206 #endif 2207 #endif 2208 2209 /* 2210 * An aggressive reclamation will shrink the cache size as well as 2211 * reap free buffers from the arc kmem caches. 2212 */ 2213 if (strat == ARC_RECLAIM_AGGR) 2214 arc_shrink(); 2215 2216 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2217 if (zio_buf_cache[i] != prev_cache) { 2218 prev_cache = zio_buf_cache[i]; 2219 kmem_cache_reap_now(zio_buf_cache[i]); 2220 } 2221 if (zio_data_buf_cache[i] != prev_data_cache) { 2222 prev_data_cache = zio_data_buf_cache[i]; 2223 kmem_cache_reap_now(zio_data_buf_cache[i]); 2224 } 2225 } 2226 kmem_cache_reap_now(buf_cache); 2227 kmem_cache_reap_now(hdr_cache); 2228 2229 /* 2230 * Ask the vmem areana to reclaim unused memory from its 2231 * quantum caches. 2232 */ 2233 if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR) 2234 vmem_qcache_reap(zio_arena); 2235 } 2236 2237 static void 2238 arc_reclaim_thread(void) 2239 { 2240 clock_t growtime = 0; 2241 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2242 callb_cpr_t cpr; 2243 2244 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2245 2246 mutex_enter(&arc_reclaim_thr_lock); 2247 while (arc_thread_exit == 0) { 2248 if (arc_reclaim_needed()) { 2249 2250 if (arc_no_grow) { 2251 if (last_reclaim == ARC_RECLAIM_CONS) { 2252 last_reclaim = ARC_RECLAIM_AGGR; 2253 } else { 2254 last_reclaim = ARC_RECLAIM_CONS; 2255 } 2256 } else { 2257 arc_no_grow = TRUE; 2258 last_reclaim = ARC_RECLAIM_AGGR; 2259 membar_producer(); 2260 } 2261 2262 /* reset the growth delay for every reclaim */ 2263 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2264 2265 arc_kmem_reap_now(last_reclaim); 2266 arc_warm = B_TRUE; 2267 2268 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2269 arc_no_grow = FALSE; 2270 } 2271 2272 arc_adjust(); 2273 2274 if (arc_eviction_list != NULL) 2275 arc_do_user_evicts(); 2276 2277 /* block until needed, or one second, whichever is shorter */ 2278 CALLB_CPR_SAFE_BEGIN(&cpr); 2279 (void) cv_timedwait(&arc_reclaim_thr_cv, 2280 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz)); 2281 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2282 } 2283 2284 arc_thread_exit = 0; 2285 cv_broadcast(&arc_reclaim_thr_cv); 2286 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2287 thread_exit(); 2288 } 2289 2290 /* 2291 * Adapt arc info given the number of bytes we are trying to add and 2292 * the state that we are comming from. This function is only called 2293 * when we are adding new content to the cache. 2294 */ 2295 static void 2296 arc_adapt(int bytes, arc_state_t *state) 2297 { 2298 int mult; 2299 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2300 2301 if (state == arc_l2c_only) 2302 return; 2303 2304 ASSERT(bytes > 0); 2305 /* 2306 * Adapt the target size of the MRU list: 2307 * - if we just hit in the MRU ghost list, then increase 2308 * the target size of the MRU list. 2309 * - if we just hit in the MFU ghost list, then increase 2310 * the target size of the MFU list by decreasing the 2311 * target size of the MRU list. 2312 */ 2313 if (state == arc_mru_ghost) { 2314 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2315 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2316 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2317 2318 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2319 } else if (state == arc_mfu_ghost) { 2320 uint64_t delta; 2321 2322 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2323 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2324 mult = MIN(mult, 10); 2325 2326 delta = MIN(bytes * mult, arc_p); 2327 arc_p = MAX(arc_p_min, arc_p - delta); 2328 } 2329 ASSERT((int64_t)arc_p >= 0); 2330 2331 if (arc_reclaim_needed()) { 2332 cv_signal(&arc_reclaim_thr_cv); 2333 return; 2334 } 2335 2336 if (arc_no_grow) 2337 return; 2338 2339 if (arc_c >= arc_c_max) 2340 return; 2341 2342 /* 2343 * If we're within (2 * maxblocksize) bytes of the target 2344 * cache size, increment the target cache size 2345 */ 2346 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2347 atomic_add_64(&arc_c, (int64_t)bytes); 2348 if (arc_c > arc_c_max) 2349 arc_c = arc_c_max; 2350 else if (state == arc_anon) 2351 atomic_add_64(&arc_p, (int64_t)bytes); 2352 if (arc_p > arc_c) 2353 arc_p = arc_c; 2354 } 2355 ASSERT((int64_t)arc_p >= 0); 2356 } 2357 2358 /* 2359 * Check if the cache has reached its limits and eviction is required 2360 * prior to insert. 2361 */ 2362 static int 2363 arc_evict_needed(arc_buf_contents_t type) 2364 { 2365 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2366 return (1); 2367 2368 if (arc_reclaim_needed()) 2369 return (1); 2370 2371 return (arc_size > arc_c); 2372 } 2373 2374 /* 2375 * The buffer, supplied as the first argument, needs a data block. 2376 * So, if we are at cache max, determine which cache should be victimized. 2377 * We have the following cases: 2378 * 2379 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2380 * In this situation if we're out of space, but the resident size of the MFU is 2381 * under the limit, victimize the MFU cache to satisfy this insertion request. 2382 * 2383 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2384 * Here, we've used up all of the available space for the MRU, so we need to 2385 * evict from our own cache instead. Evict from the set of resident MRU 2386 * entries. 2387 * 2388 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2389 * c minus p represents the MFU space in the cache, since p is the size of the 2390 * cache that is dedicated to the MRU. In this situation there's still space on 2391 * the MFU side, so the MRU side needs to be victimized. 2392 * 2393 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2394 * MFU's resident set is consuming more space than it has been allotted. In 2395 * this situation, we must victimize our own cache, the MFU, for this insertion. 2396 */ 2397 static void 2398 arc_get_data_buf(arc_buf_t *buf) 2399 { 2400 arc_state_t *state = buf->b_hdr->b_state; 2401 uint64_t size = buf->b_hdr->b_size; 2402 arc_buf_contents_t type = buf->b_hdr->b_type; 2403 2404 arc_adapt(size, state); 2405 2406 /* 2407 * We have not yet reached cache maximum size, 2408 * just allocate a new buffer. 2409 */ 2410 if (!arc_evict_needed(type)) { 2411 if (type == ARC_BUFC_METADATA) { 2412 buf->b_data = zio_buf_alloc(size); 2413 arc_space_consume(size, ARC_SPACE_DATA); 2414 } else { 2415 ASSERT(type == ARC_BUFC_DATA); 2416 buf->b_data = zio_data_buf_alloc(size); 2417 ARCSTAT_INCR(arcstat_data_size, size); 2418 atomic_add_64(&arc_size, size); 2419 } 2420 goto out; 2421 } 2422 2423 /* 2424 * If we are prefetching from the mfu ghost list, this buffer 2425 * will end up on the mru list; so steal space from there. 2426 */ 2427 if (state == arc_mfu_ghost) 2428 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2429 else if (state == arc_mru_ghost) 2430 state = arc_mru; 2431 2432 if (state == arc_mru || state == arc_anon) { 2433 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2434 state = (arc_mfu->arcs_lsize[type] >= size && 2435 arc_p > mru_used) ? arc_mfu : arc_mru; 2436 } else { 2437 /* MFU cases */ 2438 uint64_t mfu_space = arc_c - arc_p; 2439 state = (arc_mru->arcs_lsize[type] >= size && 2440 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2441 } 2442 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2443 if (type == ARC_BUFC_METADATA) { 2444 buf->b_data = zio_buf_alloc(size); 2445 arc_space_consume(size, ARC_SPACE_DATA); 2446 } else { 2447 ASSERT(type == ARC_BUFC_DATA); 2448 buf->b_data = zio_data_buf_alloc(size); 2449 ARCSTAT_INCR(arcstat_data_size, size); 2450 atomic_add_64(&arc_size, size); 2451 } 2452 ARCSTAT_BUMP(arcstat_recycle_miss); 2453 } 2454 ASSERT(buf->b_data != NULL); 2455 out: 2456 /* 2457 * Update the state size. Note that ghost states have a 2458 * "ghost size" and so don't need to be updated. 2459 */ 2460 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2461 arc_buf_hdr_t *hdr = buf->b_hdr; 2462 2463 atomic_add_64(&hdr->b_state->arcs_size, size); 2464 if (list_link_active(&hdr->b_arc_node)) { 2465 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2466 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2467 } 2468 /* 2469 * If we are growing the cache, and we are adding anonymous 2470 * data, and we have outgrown arc_p, update arc_p 2471 */ 2472 if (arc_size < arc_c && hdr->b_state == arc_anon && 2473 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2474 arc_p = MIN(arc_c, arc_p + size); 2475 } 2476 } 2477 2478 /* 2479 * This routine is called whenever a buffer is accessed. 2480 * NOTE: the hash lock is dropped in this function. 2481 */ 2482 static void 2483 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2484 { 2485 clock_t now; 2486 2487 ASSERT(MUTEX_HELD(hash_lock)); 2488 2489 if (buf->b_state == arc_anon) { 2490 /* 2491 * This buffer is not in the cache, and does not 2492 * appear in our "ghost" list. Add the new buffer 2493 * to the MRU state. 2494 */ 2495 2496 ASSERT(buf->b_arc_access == 0); 2497 buf->b_arc_access = ddi_get_lbolt(); 2498 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2499 arc_change_state(arc_mru, buf, hash_lock); 2500 2501 } else if (buf->b_state == arc_mru) { 2502 now = ddi_get_lbolt(); 2503 2504 /* 2505 * If this buffer is here because of a prefetch, then either: 2506 * - clear the flag if this is a "referencing" read 2507 * (any subsequent access will bump this into the MFU state). 2508 * or 2509 * - move the buffer to the head of the list if this is 2510 * another prefetch (to make it less likely to be evicted). 2511 */ 2512 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2513 if (refcount_count(&buf->b_refcnt) == 0) { 2514 ASSERT(list_link_active(&buf->b_arc_node)); 2515 } else { 2516 buf->b_flags &= ~ARC_PREFETCH; 2517 ARCSTAT_BUMP(arcstat_mru_hits); 2518 } 2519 buf->b_arc_access = now; 2520 return; 2521 } 2522 2523 /* 2524 * This buffer has been "accessed" only once so far, 2525 * but it is still in the cache. Move it to the MFU 2526 * state. 2527 */ 2528 if (now > buf->b_arc_access + ARC_MINTIME) { 2529 /* 2530 * More than 125ms have passed since we 2531 * instantiated this buffer. Move it to the 2532 * most frequently used state. 2533 */ 2534 buf->b_arc_access = now; 2535 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2536 arc_change_state(arc_mfu, buf, hash_lock); 2537 } 2538 ARCSTAT_BUMP(arcstat_mru_hits); 2539 } else if (buf->b_state == arc_mru_ghost) { 2540 arc_state_t *new_state; 2541 /* 2542 * This buffer has been "accessed" recently, but 2543 * was evicted from the cache. Move it to the 2544 * MFU state. 2545 */ 2546 2547 if (buf->b_flags & ARC_PREFETCH) { 2548 new_state = arc_mru; 2549 if (refcount_count(&buf->b_refcnt) > 0) 2550 buf->b_flags &= ~ARC_PREFETCH; 2551 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2552 } else { 2553 new_state = arc_mfu; 2554 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2555 } 2556 2557 buf->b_arc_access = ddi_get_lbolt(); 2558 arc_change_state(new_state, buf, hash_lock); 2559 2560 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2561 } else if (buf->b_state == arc_mfu) { 2562 /* 2563 * This buffer has been accessed more than once and is 2564 * still in the cache. Keep it in the MFU state. 2565 * 2566 * NOTE: an add_reference() that occurred when we did 2567 * the arc_read() will have kicked this off the list. 2568 * If it was a prefetch, we will explicitly move it to 2569 * the head of the list now. 2570 */ 2571 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2572 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2573 ASSERT(list_link_active(&buf->b_arc_node)); 2574 } 2575 ARCSTAT_BUMP(arcstat_mfu_hits); 2576 buf->b_arc_access = ddi_get_lbolt(); 2577 } else if (buf->b_state == arc_mfu_ghost) { 2578 arc_state_t *new_state = arc_mfu; 2579 /* 2580 * This buffer has been accessed more than once but has 2581 * been evicted from the cache. Move it back to the 2582 * MFU state. 2583 */ 2584 2585 if (buf->b_flags & ARC_PREFETCH) { 2586 /* 2587 * This is a prefetch access... 2588 * move this block back to the MRU state. 2589 */ 2590 ASSERT0(refcount_count(&buf->b_refcnt)); 2591 new_state = arc_mru; 2592 } 2593 2594 buf->b_arc_access = ddi_get_lbolt(); 2595 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2596 arc_change_state(new_state, buf, hash_lock); 2597 2598 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2599 } else if (buf->b_state == arc_l2c_only) { 2600 /* 2601 * This buffer is on the 2nd Level ARC. 2602 */ 2603 2604 buf->b_arc_access = ddi_get_lbolt(); 2605 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2606 arc_change_state(arc_mfu, buf, hash_lock); 2607 } else { 2608 ASSERT(!"invalid arc state"); 2609 } 2610 } 2611 2612 /* a generic arc_done_func_t which you can use */ 2613 /* ARGSUSED */ 2614 void 2615 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2616 { 2617 if (zio == NULL || zio->io_error == 0) 2618 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2619 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2620 } 2621 2622 /* a generic arc_done_func_t */ 2623 void 2624 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2625 { 2626 arc_buf_t **bufp = arg; 2627 if (zio && zio->io_error) { 2628 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2629 *bufp = NULL; 2630 } else { 2631 *bufp = buf; 2632 ASSERT(buf->b_data); 2633 } 2634 } 2635 2636 static void 2637 arc_read_done(zio_t *zio) 2638 { 2639 arc_buf_hdr_t *hdr, *found; 2640 arc_buf_t *buf; 2641 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2642 kmutex_t *hash_lock; 2643 arc_callback_t *callback_list, *acb; 2644 int freeable = FALSE; 2645 2646 buf = zio->io_private; 2647 hdr = buf->b_hdr; 2648 2649 /* 2650 * The hdr was inserted into hash-table and removed from lists 2651 * prior to starting I/O. We should find this header, since 2652 * it's in the hash table, and it should be legit since it's 2653 * not possible to evict it during the I/O. The only possible 2654 * reason for it not to be found is if we were freed during the 2655 * read. 2656 */ 2657 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 2658 &hash_lock); 2659 2660 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2661 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2662 (found == hdr && HDR_L2_READING(hdr))); 2663 2664 hdr->b_flags &= ~ARC_L2_EVICTED; 2665 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2666 hdr->b_flags &= ~ARC_L2CACHE; 2667 2668 /* byteswap if necessary */ 2669 callback_list = hdr->b_acb; 2670 ASSERT(callback_list != NULL); 2671 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 2672 dmu_object_byteswap_t bswap = 2673 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 2674 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2675 byteswap_uint64_array : 2676 dmu_ot_byteswap[bswap].ob_func; 2677 func(buf->b_data, hdr->b_size); 2678 } 2679 2680 arc_cksum_compute(buf, B_FALSE); 2681 arc_buf_watch(buf); 2682 2683 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 2684 /* 2685 * Only call arc_access on anonymous buffers. This is because 2686 * if we've issued an I/O for an evicted buffer, we've already 2687 * called arc_access (to prevent any simultaneous readers from 2688 * getting confused). 2689 */ 2690 arc_access(hdr, hash_lock); 2691 } 2692 2693 /* create copies of the data buffer for the callers */ 2694 abuf = buf; 2695 for (acb = callback_list; acb; acb = acb->acb_next) { 2696 if (acb->acb_done) { 2697 if (abuf == NULL) { 2698 ARCSTAT_BUMP(arcstat_duplicate_reads); 2699 abuf = arc_buf_clone(buf); 2700 } 2701 acb->acb_buf = abuf; 2702 abuf = NULL; 2703 } 2704 } 2705 hdr->b_acb = NULL; 2706 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2707 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2708 if (abuf == buf) { 2709 ASSERT(buf->b_efunc == NULL); 2710 ASSERT(hdr->b_datacnt == 1); 2711 hdr->b_flags |= ARC_BUF_AVAILABLE; 2712 } 2713 2714 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2715 2716 if (zio->io_error != 0) { 2717 hdr->b_flags |= ARC_IO_ERROR; 2718 if (hdr->b_state != arc_anon) 2719 arc_change_state(arc_anon, hdr, hash_lock); 2720 if (HDR_IN_HASH_TABLE(hdr)) 2721 buf_hash_remove(hdr); 2722 freeable = refcount_is_zero(&hdr->b_refcnt); 2723 } 2724 2725 /* 2726 * Broadcast before we drop the hash_lock to avoid the possibility 2727 * that the hdr (and hence the cv) might be freed before we get to 2728 * the cv_broadcast(). 2729 */ 2730 cv_broadcast(&hdr->b_cv); 2731 2732 if (hash_lock) { 2733 mutex_exit(hash_lock); 2734 } else { 2735 /* 2736 * This block was freed while we waited for the read to 2737 * complete. It has been removed from the hash table and 2738 * moved to the anonymous state (so that it won't show up 2739 * in the cache). 2740 */ 2741 ASSERT3P(hdr->b_state, ==, arc_anon); 2742 freeable = refcount_is_zero(&hdr->b_refcnt); 2743 } 2744 2745 /* execute each callback and free its structure */ 2746 while ((acb = callback_list) != NULL) { 2747 if (acb->acb_done) 2748 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2749 2750 if (acb->acb_zio_dummy != NULL) { 2751 acb->acb_zio_dummy->io_error = zio->io_error; 2752 zio_nowait(acb->acb_zio_dummy); 2753 } 2754 2755 callback_list = acb->acb_next; 2756 kmem_free(acb, sizeof (arc_callback_t)); 2757 } 2758 2759 if (freeable) 2760 arc_hdr_destroy(hdr); 2761 } 2762 2763 /* 2764 * "Read" the block at the specified DVA (in bp) via the 2765 * cache. If the block is found in the cache, invoke the provided 2766 * callback immediately and return. Note that the `zio' parameter 2767 * in the callback will be NULL in this case, since no IO was 2768 * required. If the block is not in the cache pass the read request 2769 * on to the spa with a substitute callback function, so that the 2770 * requested block will be added to the cache. 2771 * 2772 * If a read request arrives for a block that has a read in-progress, 2773 * either wait for the in-progress read to complete (and return the 2774 * results); or, if this is a read with a "done" func, add a record 2775 * to the read to invoke the "done" func when the read completes, 2776 * and return; or just return. 2777 * 2778 * arc_read_done() will invoke all the requested "done" functions 2779 * for readers of this block. 2780 */ 2781 int 2782 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, 2783 void *private, int priority, int zio_flags, uint32_t *arc_flags, 2784 const zbookmark_t *zb) 2785 { 2786 arc_buf_hdr_t *hdr; 2787 arc_buf_t *buf = NULL; 2788 kmutex_t *hash_lock; 2789 zio_t *rzio; 2790 uint64_t guid = spa_load_guid(spa); 2791 2792 top: 2793 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 2794 &hash_lock); 2795 if (hdr && hdr->b_datacnt > 0) { 2796 2797 *arc_flags |= ARC_CACHED; 2798 2799 if (HDR_IO_IN_PROGRESS(hdr)) { 2800 2801 if (*arc_flags & ARC_WAIT) { 2802 cv_wait(&hdr->b_cv, hash_lock); 2803 mutex_exit(hash_lock); 2804 goto top; 2805 } 2806 ASSERT(*arc_flags & ARC_NOWAIT); 2807 2808 if (done) { 2809 arc_callback_t *acb = NULL; 2810 2811 acb = kmem_zalloc(sizeof (arc_callback_t), 2812 KM_SLEEP); 2813 acb->acb_done = done; 2814 acb->acb_private = private; 2815 if (pio != NULL) 2816 acb->acb_zio_dummy = zio_null(pio, 2817 spa, NULL, NULL, NULL, zio_flags); 2818 2819 ASSERT(acb->acb_done != NULL); 2820 acb->acb_next = hdr->b_acb; 2821 hdr->b_acb = acb; 2822 add_reference(hdr, hash_lock, private); 2823 mutex_exit(hash_lock); 2824 return (0); 2825 } 2826 mutex_exit(hash_lock); 2827 return (0); 2828 } 2829 2830 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2831 2832 if (done) { 2833 add_reference(hdr, hash_lock, private); 2834 /* 2835 * If this block is already in use, create a new 2836 * copy of the data so that we will be guaranteed 2837 * that arc_release() will always succeed. 2838 */ 2839 buf = hdr->b_buf; 2840 ASSERT(buf); 2841 ASSERT(buf->b_data); 2842 if (HDR_BUF_AVAILABLE(hdr)) { 2843 ASSERT(buf->b_efunc == NULL); 2844 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2845 } else { 2846 buf = arc_buf_clone(buf); 2847 } 2848 2849 } else if (*arc_flags & ARC_PREFETCH && 2850 refcount_count(&hdr->b_refcnt) == 0) { 2851 hdr->b_flags |= ARC_PREFETCH; 2852 } 2853 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2854 arc_access(hdr, hash_lock); 2855 if (*arc_flags & ARC_L2CACHE) 2856 hdr->b_flags |= ARC_L2CACHE; 2857 mutex_exit(hash_lock); 2858 ARCSTAT_BUMP(arcstat_hits); 2859 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2860 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2861 data, metadata, hits); 2862 2863 if (done) 2864 done(NULL, buf, private); 2865 } else { 2866 uint64_t size = BP_GET_LSIZE(bp); 2867 arc_callback_t *acb; 2868 vdev_t *vd = NULL; 2869 uint64_t addr = 0; 2870 boolean_t devw = B_FALSE; 2871 2872 if (hdr == NULL) { 2873 /* this block is not in the cache */ 2874 arc_buf_hdr_t *exists; 2875 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2876 buf = arc_buf_alloc(spa, size, private, type); 2877 hdr = buf->b_hdr; 2878 hdr->b_dva = *BP_IDENTITY(bp); 2879 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 2880 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2881 exists = buf_hash_insert(hdr, &hash_lock); 2882 if (exists) { 2883 /* somebody beat us to the hash insert */ 2884 mutex_exit(hash_lock); 2885 buf_discard_identity(hdr); 2886 (void) arc_buf_remove_ref(buf, private); 2887 goto top; /* restart the IO request */ 2888 } 2889 /* if this is a prefetch, we don't have a reference */ 2890 if (*arc_flags & ARC_PREFETCH) { 2891 (void) remove_reference(hdr, hash_lock, 2892 private); 2893 hdr->b_flags |= ARC_PREFETCH; 2894 } 2895 if (*arc_flags & ARC_L2CACHE) 2896 hdr->b_flags |= ARC_L2CACHE; 2897 if (BP_GET_LEVEL(bp) > 0) 2898 hdr->b_flags |= ARC_INDIRECT; 2899 } else { 2900 /* this block is in the ghost cache */ 2901 ASSERT(GHOST_STATE(hdr->b_state)); 2902 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2903 ASSERT0(refcount_count(&hdr->b_refcnt)); 2904 ASSERT(hdr->b_buf == NULL); 2905 2906 /* if this is a prefetch, we don't have a reference */ 2907 if (*arc_flags & ARC_PREFETCH) 2908 hdr->b_flags |= ARC_PREFETCH; 2909 else 2910 add_reference(hdr, hash_lock, private); 2911 if (*arc_flags & ARC_L2CACHE) 2912 hdr->b_flags |= ARC_L2CACHE; 2913 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2914 buf->b_hdr = hdr; 2915 buf->b_data = NULL; 2916 buf->b_efunc = NULL; 2917 buf->b_private = NULL; 2918 buf->b_next = NULL; 2919 hdr->b_buf = buf; 2920 ASSERT(hdr->b_datacnt == 0); 2921 hdr->b_datacnt = 1; 2922 arc_get_data_buf(buf); 2923 arc_access(hdr, hash_lock); 2924 } 2925 2926 ASSERT(!GHOST_STATE(hdr->b_state)); 2927 2928 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2929 acb->acb_done = done; 2930 acb->acb_private = private; 2931 2932 ASSERT(hdr->b_acb == NULL); 2933 hdr->b_acb = acb; 2934 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2935 2936 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2937 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2938 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 2939 addr = hdr->b_l2hdr->b_daddr; 2940 /* 2941 * Lock out device removal. 2942 */ 2943 if (vdev_is_dead(vd) || 2944 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2945 vd = NULL; 2946 } 2947 2948 mutex_exit(hash_lock); 2949 2950 ASSERT3U(hdr->b_size, ==, size); 2951 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 2952 uint64_t, size, zbookmark_t *, zb); 2953 ARCSTAT_BUMP(arcstat_misses); 2954 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2955 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2956 data, metadata, misses); 2957 2958 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 2959 /* 2960 * Read from the L2ARC if the following are true: 2961 * 1. The L2ARC vdev was previously cached. 2962 * 2. This buffer still has L2ARC metadata. 2963 * 3. This buffer isn't currently writing to the L2ARC. 2964 * 4. The L2ARC entry wasn't evicted, which may 2965 * also have invalidated the vdev. 2966 * 5. This isn't prefetch and l2arc_noprefetch is set. 2967 */ 2968 if (hdr->b_l2hdr != NULL && 2969 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 2970 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 2971 l2arc_read_callback_t *cb; 2972 2973 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2974 ARCSTAT_BUMP(arcstat_l2_hits); 2975 2976 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2977 KM_SLEEP); 2978 cb->l2rcb_buf = buf; 2979 cb->l2rcb_spa = spa; 2980 cb->l2rcb_bp = *bp; 2981 cb->l2rcb_zb = *zb; 2982 cb->l2rcb_flags = zio_flags; 2983 2984 ASSERT(addr >= VDEV_LABEL_START_SIZE && 2985 addr + size < vd->vdev_psize - 2986 VDEV_LABEL_END_SIZE); 2987 2988 /* 2989 * l2arc read. The SCL_L2ARC lock will be 2990 * released by l2arc_read_done(). 2991 */ 2992 rzio = zio_read_phys(pio, vd, addr, size, 2993 buf->b_data, ZIO_CHECKSUM_OFF, 2994 l2arc_read_done, cb, priority, zio_flags | 2995 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 2996 ZIO_FLAG_DONT_PROPAGATE | 2997 ZIO_FLAG_DONT_RETRY, B_FALSE); 2998 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2999 zio_t *, rzio); 3000 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 3001 3002 if (*arc_flags & ARC_NOWAIT) { 3003 zio_nowait(rzio); 3004 return (0); 3005 } 3006 3007 ASSERT(*arc_flags & ARC_WAIT); 3008 if (zio_wait(rzio) == 0) 3009 return (0); 3010 3011 /* l2arc read error; goto zio_read() */ 3012 } else { 3013 DTRACE_PROBE1(l2arc__miss, 3014 arc_buf_hdr_t *, hdr); 3015 ARCSTAT_BUMP(arcstat_l2_misses); 3016 if (HDR_L2_WRITING(hdr)) 3017 ARCSTAT_BUMP(arcstat_l2_rw_clash); 3018 spa_config_exit(spa, SCL_L2ARC, vd); 3019 } 3020 } else { 3021 if (vd != NULL) 3022 spa_config_exit(spa, SCL_L2ARC, vd); 3023 if (l2arc_ndev != 0) { 3024 DTRACE_PROBE1(l2arc__miss, 3025 arc_buf_hdr_t *, hdr); 3026 ARCSTAT_BUMP(arcstat_l2_misses); 3027 } 3028 } 3029 3030 rzio = zio_read(pio, spa, bp, buf->b_data, size, 3031 arc_read_done, buf, priority, zio_flags, zb); 3032 3033 if (*arc_flags & ARC_WAIT) 3034 return (zio_wait(rzio)); 3035 3036 ASSERT(*arc_flags & ARC_NOWAIT); 3037 zio_nowait(rzio); 3038 } 3039 return (0); 3040 } 3041 3042 void 3043 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3044 { 3045 ASSERT(buf->b_hdr != NULL); 3046 ASSERT(buf->b_hdr->b_state != arc_anon); 3047 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3048 ASSERT(buf->b_efunc == NULL); 3049 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 3050 3051 buf->b_efunc = func; 3052 buf->b_private = private; 3053 } 3054 3055 /* 3056 * This is used by the DMU to let the ARC know that a buffer is 3057 * being evicted, so the ARC should clean up. If this arc buf 3058 * is not yet in the evicted state, it will be put there. 3059 */ 3060 int 3061 arc_buf_evict(arc_buf_t *buf) 3062 { 3063 arc_buf_hdr_t *hdr; 3064 kmutex_t *hash_lock; 3065 arc_buf_t **bufp; 3066 3067 mutex_enter(&buf->b_evict_lock); 3068 hdr = buf->b_hdr; 3069 if (hdr == NULL) { 3070 /* 3071 * We are in arc_do_user_evicts(). 3072 */ 3073 ASSERT(buf->b_data == NULL); 3074 mutex_exit(&buf->b_evict_lock); 3075 return (0); 3076 } else if (buf->b_data == NULL) { 3077 arc_buf_t copy = *buf; /* structure assignment */ 3078 /* 3079 * We are on the eviction list; process this buffer now 3080 * but let arc_do_user_evicts() do the reaping. 3081 */ 3082 buf->b_efunc = NULL; 3083 mutex_exit(&buf->b_evict_lock); 3084 VERIFY(copy.b_efunc(©) == 0); 3085 return (1); 3086 } 3087 hash_lock = HDR_LOCK(hdr); 3088 mutex_enter(hash_lock); 3089 hdr = buf->b_hdr; 3090 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3091 3092 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3093 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3094 3095 /* 3096 * Pull this buffer off of the hdr 3097 */ 3098 bufp = &hdr->b_buf; 3099 while (*bufp != buf) 3100 bufp = &(*bufp)->b_next; 3101 *bufp = buf->b_next; 3102 3103 ASSERT(buf->b_data != NULL); 3104 arc_buf_destroy(buf, FALSE, FALSE); 3105 3106 if (hdr->b_datacnt == 0) { 3107 arc_state_t *old_state = hdr->b_state; 3108 arc_state_t *evicted_state; 3109 3110 ASSERT(hdr->b_buf == NULL); 3111 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3112 3113 evicted_state = 3114 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3115 3116 mutex_enter(&old_state->arcs_mtx); 3117 mutex_enter(&evicted_state->arcs_mtx); 3118 3119 arc_change_state(evicted_state, hdr, hash_lock); 3120 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3121 hdr->b_flags |= ARC_IN_HASH_TABLE; 3122 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3123 3124 mutex_exit(&evicted_state->arcs_mtx); 3125 mutex_exit(&old_state->arcs_mtx); 3126 } 3127 mutex_exit(hash_lock); 3128 mutex_exit(&buf->b_evict_lock); 3129 3130 VERIFY(buf->b_efunc(buf) == 0); 3131 buf->b_efunc = NULL; 3132 buf->b_private = NULL; 3133 buf->b_hdr = NULL; 3134 buf->b_next = NULL; 3135 kmem_cache_free(buf_cache, buf); 3136 return (1); 3137 } 3138 3139 /* 3140 * Release this buffer from the cache. This must be done 3141 * after a read and prior to modifying the buffer contents. 3142 * If the buffer has more than one reference, we must make 3143 * a new hdr for the buffer. 3144 */ 3145 void 3146 arc_release(arc_buf_t *buf, void *tag) 3147 { 3148 arc_buf_hdr_t *hdr; 3149 kmutex_t *hash_lock = NULL; 3150 l2arc_buf_hdr_t *l2hdr; 3151 uint64_t buf_size; 3152 3153 /* 3154 * It would be nice to assert that if it's DMU metadata (level > 3155 * 0 || it's the dnode file), then it must be syncing context. 3156 * But we don't know that information at this level. 3157 */ 3158 3159 mutex_enter(&buf->b_evict_lock); 3160 hdr = buf->b_hdr; 3161 3162 /* this buffer is not on any list */ 3163 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3164 3165 if (hdr->b_state == arc_anon) { 3166 /* this buffer is already released */ 3167 ASSERT(buf->b_efunc == NULL); 3168 } else { 3169 hash_lock = HDR_LOCK(hdr); 3170 mutex_enter(hash_lock); 3171 hdr = buf->b_hdr; 3172 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3173 } 3174 3175 l2hdr = hdr->b_l2hdr; 3176 if (l2hdr) { 3177 mutex_enter(&l2arc_buflist_mtx); 3178 hdr->b_l2hdr = NULL; 3179 } 3180 buf_size = hdr->b_size; 3181 3182 /* 3183 * Do we have more than one buf? 3184 */ 3185 if (hdr->b_datacnt > 1) { 3186 arc_buf_hdr_t *nhdr; 3187 arc_buf_t **bufp; 3188 uint64_t blksz = hdr->b_size; 3189 uint64_t spa = hdr->b_spa; 3190 arc_buf_contents_t type = hdr->b_type; 3191 uint32_t flags = hdr->b_flags; 3192 3193 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3194 /* 3195 * Pull the data off of this hdr and attach it to 3196 * a new anonymous hdr. 3197 */ 3198 (void) remove_reference(hdr, hash_lock, tag); 3199 bufp = &hdr->b_buf; 3200 while (*bufp != buf) 3201 bufp = &(*bufp)->b_next; 3202 *bufp = buf->b_next; 3203 buf->b_next = NULL; 3204 3205 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3206 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3207 if (refcount_is_zero(&hdr->b_refcnt)) { 3208 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3209 ASSERT3U(*size, >=, hdr->b_size); 3210 atomic_add_64(size, -hdr->b_size); 3211 } 3212 3213 /* 3214 * We're releasing a duplicate user data buffer, update 3215 * our statistics accordingly. 3216 */ 3217 if (hdr->b_type == ARC_BUFC_DATA) { 3218 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 3219 ARCSTAT_INCR(arcstat_duplicate_buffers_size, 3220 -hdr->b_size); 3221 } 3222 hdr->b_datacnt -= 1; 3223 arc_cksum_verify(buf); 3224 arc_buf_unwatch(buf); 3225 3226 mutex_exit(hash_lock); 3227 3228 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3229 nhdr->b_size = blksz; 3230 nhdr->b_spa = spa; 3231 nhdr->b_type = type; 3232 nhdr->b_buf = buf; 3233 nhdr->b_state = arc_anon; 3234 nhdr->b_arc_access = 0; 3235 nhdr->b_flags = flags & ARC_L2_WRITING; 3236 nhdr->b_l2hdr = NULL; 3237 nhdr->b_datacnt = 1; 3238 nhdr->b_freeze_cksum = NULL; 3239 (void) refcount_add(&nhdr->b_refcnt, tag); 3240 buf->b_hdr = nhdr; 3241 mutex_exit(&buf->b_evict_lock); 3242 atomic_add_64(&arc_anon->arcs_size, blksz); 3243 } else { 3244 mutex_exit(&buf->b_evict_lock); 3245 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3246 ASSERT(!list_link_active(&hdr->b_arc_node)); 3247 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3248 if (hdr->b_state != arc_anon) 3249 arc_change_state(arc_anon, hdr, hash_lock); 3250 hdr->b_arc_access = 0; 3251 if (hash_lock) 3252 mutex_exit(hash_lock); 3253 3254 buf_discard_identity(hdr); 3255 arc_buf_thaw(buf); 3256 } 3257 buf->b_efunc = NULL; 3258 buf->b_private = NULL; 3259 3260 if (l2hdr) { 3261 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3262 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3263 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3264 mutex_exit(&l2arc_buflist_mtx); 3265 } 3266 } 3267 3268 int 3269 arc_released(arc_buf_t *buf) 3270 { 3271 int released; 3272 3273 mutex_enter(&buf->b_evict_lock); 3274 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3275 mutex_exit(&buf->b_evict_lock); 3276 return (released); 3277 } 3278 3279 int 3280 arc_has_callback(arc_buf_t *buf) 3281 { 3282 int callback; 3283 3284 mutex_enter(&buf->b_evict_lock); 3285 callback = (buf->b_efunc != NULL); 3286 mutex_exit(&buf->b_evict_lock); 3287 return (callback); 3288 } 3289 3290 #ifdef ZFS_DEBUG 3291 int 3292 arc_referenced(arc_buf_t *buf) 3293 { 3294 int referenced; 3295 3296 mutex_enter(&buf->b_evict_lock); 3297 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3298 mutex_exit(&buf->b_evict_lock); 3299 return (referenced); 3300 } 3301 #endif 3302 3303 static void 3304 arc_write_ready(zio_t *zio) 3305 { 3306 arc_write_callback_t *callback = zio->io_private; 3307 arc_buf_t *buf = callback->awcb_buf; 3308 arc_buf_hdr_t *hdr = buf->b_hdr; 3309 3310 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3311 callback->awcb_ready(zio, buf, callback->awcb_private); 3312 3313 /* 3314 * If the IO is already in progress, then this is a re-write 3315 * attempt, so we need to thaw and re-compute the cksum. 3316 * It is the responsibility of the callback to handle the 3317 * accounting for any re-write attempt. 3318 */ 3319 if (HDR_IO_IN_PROGRESS(hdr)) { 3320 mutex_enter(&hdr->b_freeze_lock); 3321 if (hdr->b_freeze_cksum != NULL) { 3322 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3323 hdr->b_freeze_cksum = NULL; 3324 } 3325 mutex_exit(&hdr->b_freeze_lock); 3326 } 3327 arc_cksum_compute(buf, B_FALSE); 3328 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3329 } 3330 3331 static void 3332 arc_write_done(zio_t *zio) 3333 { 3334 arc_write_callback_t *callback = zio->io_private; 3335 arc_buf_t *buf = callback->awcb_buf; 3336 arc_buf_hdr_t *hdr = buf->b_hdr; 3337 3338 ASSERT(hdr->b_acb == NULL); 3339 3340 if (zio->io_error == 0) { 3341 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3342 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3343 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3344 } else { 3345 ASSERT(BUF_EMPTY(hdr)); 3346 } 3347 3348 /* 3349 * If the block to be written was all-zero, we may have 3350 * compressed it away. In this case no write was performed 3351 * so there will be no dva/birth/checksum. The buffer must 3352 * therefore remain anonymous (and uncached). 3353 */ 3354 if (!BUF_EMPTY(hdr)) { 3355 arc_buf_hdr_t *exists; 3356 kmutex_t *hash_lock; 3357 3358 ASSERT(zio->io_error == 0); 3359 3360 arc_cksum_verify(buf); 3361 3362 exists = buf_hash_insert(hdr, &hash_lock); 3363 if (exists) { 3364 /* 3365 * This can only happen if we overwrite for 3366 * sync-to-convergence, because we remove 3367 * buffers from the hash table when we arc_free(). 3368 */ 3369 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3370 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3371 panic("bad overwrite, hdr=%p exists=%p", 3372 (void *)hdr, (void *)exists); 3373 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3374 arc_change_state(arc_anon, exists, hash_lock); 3375 mutex_exit(hash_lock); 3376 arc_hdr_destroy(exists); 3377 exists = buf_hash_insert(hdr, &hash_lock); 3378 ASSERT3P(exists, ==, NULL); 3379 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 3380 /* nopwrite */ 3381 ASSERT(zio->io_prop.zp_nopwrite); 3382 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3383 panic("bad nopwrite, hdr=%p exists=%p", 3384 (void *)hdr, (void *)exists); 3385 } else { 3386 /* Dedup */ 3387 ASSERT(hdr->b_datacnt == 1); 3388 ASSERT(hdr->b_state == arc_anon); 3389 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3390 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3391 } 3392 } 3393 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3394 /* if it's not anon, we are doing a scrub */ 3395 if (!exists && hdr->b_state == arc_anon) 3396 arc_access(hdr, hash_lock); 3397 mutex_exit(hash_lock); 3398 } else { 3399 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3400 } 3401 3402 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3403 callback->awcb_done(zio, buf, callback->awcb_private); 3404 3405 kmem_free(callback, sizeof (arc_write_callback_t)); 3406 } 3407 3408 zio_t * 3409 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3410 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp, 3411 arc_done_func_t *ready, arc_done_func_t *done, void *private, 3412 int priority, int zio_flags, const zbookmark_t *zb) 3413 { 3414 arc_buf_hdr_t *hdr = buf->b_hdr; 3415 arc_write_callback_t *callback; 3416 zio_t *zio; 3417 3418 ASSERT(ready != NULL); 3419 ASSERT(done != NULL); 3420 ASSERT(!HDR_IO_ERROR(hdr)); 3421 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3422 ASSERT(hdr->b_acb == NULL); 3423 if (l2arc) 3424 hdr->b_flags |= ARC_L2CACHE; 3425 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3426 callback->awcb_ready = ready; 3427 callback->awcb_done = done; 3428 callback->awcb_private = private; 3429 callback->awcb_buf = buf; 3430 3431 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3432 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3433 3434 return (zio); 3435 } 3436 3437 static int 3438 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg) 3439 { 3440 #ifdef _KERNEL 3441 uint64_t available_memory = ptob(freemem); 3442 static uint64_t page_load = 0; 3443 static uint64_t last_txg = 0; 3444 3445 #if defined(__i386) 3446 available_memory = 3447 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3448 #endif 3449 if (available_memory >= zfs_write_limit_max) 3450 return (0); 3451 3452 if (txg > last_txg) { 3453 last_txg = txg; 3454 page_load = 0; 3455 } 3456 /* 3457 * If we are in pageout, we know that memory is already tight, 3458 * the arc is already going to be evicting, so we just want to 3459 * continue to let page writes occur as quickly as possible. 3460 */ 3461 if (curproc == proc_pageout) { 3462 if (page_load > MAX(ptob(minfree), available_memory) / 4) 3463 return (ERESTART); 3464 /* Note: reserve is inflated, so we deflate */ 3465 page_load += reserve / 8; 3466 return (0); 3467 } else if (page_load > 0 && arc_reclaim_needed()) { 3468 /* memory is low, delay before restarting */ 3469 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3470 return (EAGAIN); 3471 } 3472 page_load = 0; 3473 3474 if (arc_size > arc_c_min) { 3475 uint64_t evictable_memory = 3476 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3477 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3478 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3479 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3480 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3481 } 3482 3483 if (inflight_data > available_memory / 4) { 3484 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3485 return (ERESTART); 3486 } 3487 #endif 3488 return (0); 3489 } 3490 3491 void 3492 arc_tempreserve_clear(uint64_t reserve) 3493 { 3494 atomic_add_64(&arc_tempreserve, -reserve); 3495 ASSERT((int64_t)arc_tempreserve >= 0); 3496 } 3497 3498 int 3499 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3500 { 3501 int error; 3502 uint64_t anon_size; 3503 3504 #ifdef ZFS_DEBUG 3505 /* 3506 * Once in a while, fail for no reason. Everything should cope. 3507 */ 3508 if (spa_get_random(10000) == 0) { 3509 dprintf("forcing random failure\n"); 3510 return (ERESTART); 3511 } 3512 #endif 3513 if (reserve > arc_c/4 && !arc_no_grow) 3514 arc_c = MIN(arc_c_max, reserve * 4); 3515 if (reserve > arc_c) 3516 return (ENOMEM); 3517 3518 /* 3519 * Don't count loaned bufs as in flight dirty data to prevent long 3520 * network delays from blocking transactions that are ready to be 3521 * assigned to a txg. 3522 */ 3523 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3524 3525 /* 3526 * Writes will, almost always, require additional memory allocations 3527 * in order to compress/encrypt/etc the data. We therefor need to 3528 * make sure that there is sufficient available memory for this. 3529 */ 3530 if (error = arc_memory_throttle(reserve, anon_size, txg)) 3531 return (error); 3532 3533 /* 3534 * Throttle writes when the amount of dirty data in the cache 3535 * gets too large. We try to keep the cache less than half full 3536 * of dirty blocks so that our sync times don't grow too large. 3537 * Note: if two requests come in concurrently, we might let them 3538 * both succeed, when one of them should fail. Not a huge deal. 3539 */ 3540 3541 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3542 anon_size > arc_c / 4) { 3543 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3544 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3545 arc_tempreserve>>10, 3546 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3547 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3548 reserve>>10, arc_c>>10); 3549 return (ERESTART); 3550 } 3551 atomic_add_64(&arc_tempreserve, reserve); 3552 return (0); 3553 } 3554 3555 void 3556 arc_init(void) 3557 { 3558 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3559 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3560 3561 /* Convert seconds to clock ticks */ 3562 arc_min_prefetch_lifespan = 1 * hz; 3563 3564 /* Start out with 1/8 of all memory */ 3565 arc_c = physmem * PAGESIZE / 8; 3566 3567 #ifdef _KERNEL 3568 /* 3569 * On architectures where the physical memory can be larger 3570 * than the addressable space (intel in 32-bit mode), we may 3571 * need to limit the cache to 1/8 of VM size. 3572 */ 3573 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3574 #endif 3575 3576 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 3577 arc_c_min = MAX(arc_c / 4, 64<<20); 3578 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 3579 if (arc_c * 8 >= 1<<30) 3580 arc_c_max = (arc_c * 8) - (1<<30); 3581 else 3582 arc_c_max = arc_c_min; 3583 arc_c_max = MAX(arc_c * 6, arc_c_max); 3584 3585 /* 3586 * Allow the tunables to override our calculations if they are 3587 * reasonable (ie. over 64MB) 3588 */ 3589 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 3590 arc_c_max = zfs_arc_max; 3591 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 3592 arc_c_min = zfs_arc_min; 3593 3594 arc_c = arc_c_max; 3595 arc_p = (arc_c >> 1); 3596 3597 /* limit meta-data to 1/4 of the arc capacity */ 3598 arc_meta_limit = arc_c_max / 4; 3599 3600 /* Allow the tunable to override if it is reasonable */ 3601 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3602 arc_meta_limit = zfs_arc_meta_limit; 3603 3604 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3605 arc_c_min = arc_meta_limit / 2; 3606 3607 if (zfs_arc_grow_retry > 0) 3608 arc_grow_retry = zfs_arc_grow_retry; 3609 3610 if (zfs_arc_shrink_shift > 0) 3611 arc_shrink_shift = zfs_arc_shrink_shift; 3612 3613 if (zfs_arc_p_min_shift > 0) 3614 arc_p_min_shift = zfs_arc_p_min_shift; 3615 3616 /* if kmem_flags are set, lets try to use less memory */ 3617 if (kmem_debugging()) 3618 arc_c = arc_c / 2; 3619 if (arc_c < arc_c_min) 3620 arc_c = arc_c_min; 3621 3622 arc_anon = &ARC_anon; 3623 arc_mru = &ARC_mru; 3624 arc_mru_ghost = &ARC_mru_ghost; 3625 arc_mfu = &ARC_mfu; 3626 arc_mfu_ghost = &ARC_mfu_ghost; 3627 arc_l2c_only = &ARC_l2c_only; 3628 arc_size = 0; 3629 3630 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3631 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3632 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3633 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3634 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3635 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3636 3637 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3638 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3639 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3640 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3641 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3642 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3643 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3644 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3645 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3646 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3647 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3648 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3649 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3650 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3651 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3652 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3653 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3654 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3655 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3656 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3657 3658 buf_init(); 3659 3660 arc_thread_exit = 0; 3661 arc_eviction_list = NULL; 3662 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3663 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3664 3665 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3666 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3667 3668 if (arc_ksp != NULL) { 3669 arc_ksp->ks_data = &arc_stats; 3670 kstat_install(arc_ksp); 3671 } 3672 3673 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3674 TS_RUN, minclsyspri); 3675 3676 arc_dead = FALSE; 3677 arc_warm = B_FALSE; 3678 3679 if (zfs_write_limit_max == 0) 3680 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3681 else 3682 zfs_write_limit_shift = 0; 3683 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3684 } 3685 3686 void 3687 arc_fini(void) 3688 { 3689 mutex_enter(&arc_reclaim_thr_lock); 3690 arc_thread_exit = 1; 3691 while (arc_thread_exit != 0) 3692 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3693 mutex_exit(&arc_reclaim_thr_lock); 3694 3695 arc_flush(NULL); 3696 3697 arc_dead = TRUE; 3698 3699 if (arc_ksp != NULL) { 3700 kstat_delete(arc_ksp); 3701 arc_ksp = NULL; 3702 } 3703 3704 mutex_destroy(&arc_eviction_mtx); 3705 mutex_destroy(&arc_reclaim_thr_lock); 3706 cv_destroy(&arc_reclaim_thr_cv); 3707 3708 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3709 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3710 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3711 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3712 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3713 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3714 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3715 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3716 3717 mutex_destroy(&arc_anon->arcs_mtx); 3718 mutex_destroy(&arc_mru->arcs_mtx); 3719 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3720 mutex_destroy(&arc_mfu->arcs_mtx); 3721 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3722 mutex_destroy(&arc_l2c_only->arcs_mtx); 3723 3724 mutex_destroy(&zfs_write_limit_lock); 3725 3726 buf_fini(); 3727 3728 ASSERT(arc_loaned_bytes == 0); 3729 } 3730 3731 /* 3732 * Level 2 ARC 3733 * 3734 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3735 * It uses dedicated storage devices to hold cached data, which are populated 3736 * using large infrequent writes. The main role of this cache is to boost 3737 * the performance of random read workloads. The intended L2ARC devices 3738 * include short-stroked disks, solid state disks, and other media with 3739 * substantially faster read latency than disk. 3740 * 3741 * +-----------------------+ 3742 * | ARC | 3743 * +-----------------------+ 3744 * | ^ ^ 3745 * | | | 3746 * l2arc_feed_thread() arc_read() 3747 * | | | 3748 * | l2arc read | 3749 * V | | 3750 * +---------------+ | 3751 * | L2ARC | | 3752 * +---------------+ | 3753 * | ^ | 3754 * l2arc_write() | | 3755 * | | | 3756 * V | | 3757 * +-------+ +-------+ 3758 * | vdev | | vdev | 3759 * | cache | | cache | 3760 * +-------+ +-------+ 3761 * +=========+ .-----. 3762 * : L2ARC : |-_____-| 3763 * : devices : | Disks | 3764 * +=========+ `-_____-' 3765 * 3766 * Read requests are satisfied from the following sources, in order: 3767 * 3768 * 1) ARC 3769 * 2) vdev cache of L2ARC devices 3770 * 3) L2ARC devices 3771 * 4) vdev cache of disks 3772 * 5) disks 3773 * 3774 * Some L2ARC device types exhibit extremely slow write performance. 3775 * To accommodate for this there are some significant differences between 3776 * the L2ARC and traditional cache design: 3777 * 3778 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3779 * the ARC behave as usual, freeing buffers and placing headers on ghost 3780 * lists. The ARC does not send buffers to the L2ARC during eviction as 3781 * this would add inflated write latencies for all ARC memory pressure. 3782 * 3783 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3784 * It does this by periodically scanning buffers from the eviction-end of 3785 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3786 * not already there. It scans until a headroom of buffers is satisfied, 3787 * which itself is a buffer for ARC eviction. The thread that does this is 3788 * l2arc_feed_thread(), illustrated below; example sizes are included to 3789 * provide a better sense of ratio than this diagram: 3790 * 3791 * head --> tail 3792 * +---------------------+----------+ 3793 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3794 * +---------------------+----------+ | o L2ARC eligible 3795 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3796 * +---------------------+----------+ | 3797 * 15.9 Gbytes ^ 32 Mbytes | 3798 * headroom | 3799 * l2arc_feed_thread() 3800 * | 3801 * l2arc write hand <--[oooo]--' 3802 * | 8 Mbyte 3803 * | write max 3804 * V 3805 * +==============================+ 3806 * L2ARC dev |####|#|###|###| |####| ... | 3807 * +==============================+ 3808 * 32 Gbytes 3809 * 3810 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3811 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3812 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3813 * safe to say that this is an uncommon case, since buffers at the end of 3814 * the ARC lists have moved there due to inactivity. 3815 * 3816 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3817 * then the L2ARC simply misses copying some buffers. This serves as a 3818 * pressure valve to prevent heavy read workloads from both stalling the ARC 3819 * with waits and clogging the L2ARC with writes. This also helps prevent 3820 * the potential for the L2ARC to churn if it attempts to cache content too 3821 * quickly, such as during backups of the entire pool. 3822 * 3823 * 5. After system boot and before the ARC has filled main memory, there are 3824 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3825 * lists can remain mostly static. Instead of searching from tail of these 3826 * lists as pictured, the l2arc_feed_thread() will search from the list heads 3827 * for eligible buffers, greatly increasing its chance of finding them. 3828 * 3829 * The L2ARC device write speed is also boosted during this time so that 3830 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3831 * there are no L2ARC reads, and no fear of degrading read performance 3832 * through increased writes. 3833 * 3834 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3835 * the vdev queue can aggregate them into larger and fewer writes. Each 3836 * device is written to in a rotor fashion, sweeping writes through 3837 * available space then repeating. 3838 * 3839 * 7. The L2ARC does not store dirty content. It never needs to flush 3840 * write buffers back to disk based storage. 3841 * 3842 * 8. If an ARC buffer is written (and dirtied) which also exists in the 3843 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3844 * 3845 * The performance of the L2ARC can be tweaked by a number of tunables, which 3846 * may be necessary for different workloads: 3847 * 3848 * l2arc_write_max max write bytes per interval 3849 * l2arc_write_boost extra write bytes during device warmup 3850 * l2arc_noprefetch skip caching prefetched buffers 3851 * l2arc_headroom number of max device writes to precache 3852 * l2arc_feed_secs seconds between L2ARC writing 3853 * 3854 * Tunables may be removed or added as future performance improvements are 3855 * integrated, and also may become zpool properties. 3856 * 3857 * There are three key functions that control how the L2ARC warms up: 3858 * 3859 * l2arc_write_eligible() check if a buffer is eligible to cache 3860 * l2arc_write_size() calculate how much to write 3861 * l2arc_write_interval() calculate sleep delay between writes 3862 * 3863 * These three functions determine what to write, how much, and how quickly 3864 * to send writes. 3865 */ 3866 3867 static boolean_t 3868 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 3869 { 3870 /* 3871 * A buffer is *not* eligible for the L2ARC if it: 3872 * 1. belongs to a different spa. 3873 * 2. is already cached on the L2ARC. 3874 * 3. has an I/O in progress (it may be an incomplete read). 3875 * 4. is flagged not eligible (zfs property). 3876 */ 3877 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL || 3878 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) 3879 return (B_FALSE); 3880 3881 return (B_TRUE); 3882 } 3883 3884 static uint64_t 3885 l2arc_write_size(l2arc_dev_t *dev) 3886 { 3887 uint64_t size; 3888 3889 size = dev->l2ad_write; 3890 3891 if (arc_warm == B_FALSE) 3892 size += dev->l2ad_boost; 3893 3894 return (size); 3895 3896 } 3897 3898 static clock_t 3899 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 3900 { 3901 clock_t interval, next, now; 3902 3903 /* 3904 * If the ARC lists are busy, increase our write rate; if the 3905 * lists are stale, idle back. This is achieved by checking 3906 * how much we previously wrote - if it was more than half of 3907 * what we wanted, schedule the next write much sooner. 3908 */ 3909 if (l2arc_feed_again && wrote > (wanted / 2)) 3910 interval = (hz * l2arc_feed_min_ms) / 1000; 3911 else 3912 interval = hz * l2arc_feed_secs; 3913 3914 now = ddi_get_lbolt(); 3915 next = MAX(now, MIN(now + interval, began + interval)); 3916 3917 return (next); 3918 } 3919 3920 static void 3921 l2arc_hdr_stat_add(void) 3922 { 3923 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3924 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3925 } 3926 3927 static void 3928 l2arc_hdr_stat_remove(void) 3929 { 3930 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3931 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3932 } 3933 3934 /* 3935 * Cycle through L2ARC devices. This is how L2ARC load balances. 3936 * If a device is returned, this also returns holding the spa config lock. 3937 */ 3938 static l2arc_dev_t * 3939 l2arc_dev_get_next(void) 3940 { 3941 l2arc_dev_t *first, *next = NULL; 3942 3943 /* 3944 * Lock out the removal of spas (spa_namespace_lock), then removal 3945 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3946 * both locks will be dropped and a spa config lock held instead. 3947 */ 3948 mutex_enter(&spa_namespace_lock); 3949 mutex_enter(&l2arc_dev_mtx); 3950 3951 /* if there are no vdevs, there is nothing to do */ 3952 if (l2arc_ndev == 0) 3953 goto out; 3954 3955 first = NULL; 3956 next = l2arc_dev_last; 3957 do { 3958 /* loop around the list looking for a non-faulted vdev */ 3959 if (next == NULL) { 3960 next = list_head(l2arc_dev_list); 3961 } else { 3962 next = list_next(l2arc_dev_list, next); 3963 if (next == NULL) 3964 next = list_head(l2arc_dev_list); 3965 } 3966 3967 /* if we have come back to the start, bail out */ 3968 if (first == NULL) 3969 first = next; 3970 else if (next == first) 3971 break; 3972 3973 } while (vdev_is_dead(next->l2ad_vdev)); 3974 3975 /* if we were unable to find any usable vdevs, return NULL */ 3976 if (vdev_is_dead(next->l2ad_vdev)) 3977 next = NULL; 3978 3979 l2arc_dev_last = next; 3980 3981 out: 3982 mutex_exit(&l2arc_dev_mtx); 3983 3984 /* 3985 * Grab the config lock to prevent the 'next' device from being 3986 * removed while we are writing to it. 3987 */ 3988 if (next != NULL) 3989 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 3990 mutex_exit(&spa_namespace_lock); 3991 3992 return (next); 3993 } 3994 3995 /* 3996 * Free buffers that were tagged for destruction. 3997 */ 3998 static void 3999 l2arc_do_free_on_write() 4000 { 4001 list_t *buflist; 4002 l2arc_data_free_t *df, *df_prev; 4003 4004 mutex_enter(&l2arc_free_on_write_mtx); 4005 buflist = l2arc_free_on_write; 4006 4007 for (df = list_tail(buflist); df; df = df_prev) { 4008 df_prev = list_prev(buflist, df); 4009 ASSERT(df->l2df_data != NULL); 4010 ASSERT(df->l2df_func != NULL); 4011 df->l2df_func(df->l2df_data, df->l2df_size); 4012 list_remove(buflist, df); 4013 kmem_free(df, sizeof (l2arc_data_free_t)); 4014 } 4015 4016 mutex_exit(&l2arc_free_on_write_mtx); 4017 } 4018 4019 /* 4020 * A write to a cache device has completed. Update all headers to allow 4021 * reads from these buffers to begin. 4022 */ 4023 static void 4024 l2arc_write_done(zio_t *zio) 4025 { 4026 l2arc_write_callback_t *cb; 4027 l2arc_dev_t *dev; 4028 list_t *buflist; 4029 arc_buf_hdr_t *head, *ab, *ab_prev; 4030 l2arc_buf_hdr_t *abl2; 4031 kmutex_t *hash_lock; 4032 4033 cb = zio->io_private; 4034 ASSERT(cb != NULL); 4035 dev = cb->l2wcb_dev; 4036 ASSERT(dev != NULL); 4037 head = cb->l2wcb_head; 4038 ASSERT(head != NULL); 4039 buflist = dev->l2ad_buflist; 4040 ASSERT(buflist != NULL); 4041 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4042 l2arc_write_callback_t *, cb); 4043 4044 if (zio->io_error != 0) 4045 ARCSTAT_BUMP(arcstat_l2_writes_error); 4046 4047 mutex_enter(&l2arc_buflist_mtx); 4048 4049 /* 4050 * All writes completed, or an error was hit. 4051 */ 4052 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4053 ab_prev = list_prev(buflist, ab); 4054 4055 hash_lock = HDR_LOCK(ab); 4056 if (!mutex_tryenter(hash_lock)) { 4057 /* 4058 * This buffer misses out. It may be in a stage 4059 * of eviction. Its ARC_L2_WRITING flag will be 4060 * left set, denying reads to this buffer. 4061 */ 4062 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4063 continue; 4064 } 4065 4066 if (zio->io_error != 0) { 4067 /* 4068 * Error - drop L2ARC entry. 4069 */ 4070 list_remove(buflist, ab); 4071 abl2 = ab->b_l2hdr; 4072 ab->b_l2hdr = NULL; 4073 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4074 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4075 } 4076 4077 /* 4078 * Allow ARC to begin reads to this L2ARC entry. 4079 */ 4080 ab->b_flags &= ~ARC_L2_WRITING; 4081 4082 mutex_exit(hash_lock); 4083 } 4084 4085 atomic_inc_64(&l2arc_writes_done); 4086 list_remove(buflist, head); 4087 kmem_cache_free(hdr_cache, head); 4088 mutex_exit(&l2arc_buflist_mtx); 4089 4090 l2arc_do_free_on_write(); 4091 4092 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4093 } 4094 4095 /* 4096 * A read to a cache device completed. Validate buffer contents before 4097 * handing over to the regular ARC routines. 4098 */ 4099 static void 4100 l2arc_read_done(zio_t *zio) 4101 { 4102 l2arc_read_callback_t *cb; 4103 arc_buf_hdr_t *hdr; 4104 arc_buf_t *buf; 4105 kmutex_t *hash_lock; 4106 int equal; 4107 4108 ASSERT(zio->io_vd != NULL); 4109 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4110 4111 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4112 4113 cb = zio->io_private; 4114 ASSERT(cb != NULL); 4115 buf = cb->l2rcb_buf; 4116 ASSERT(buf != NULL); 4117 4118 hash_lock = HDR_LOCK(buf->b_hdr); 4119 mutex_enter(hash_lock); 4120 hdr = buf->b_hdr; 4121 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4122 4123 /* 4124 * Check this survived the L2ARC journey. 4125 */ 4126 equal = arc_cksum_equal(buf); 4127 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4128 mutex_exit(hash_lock); 4129 zio->io_private = buf; 4130 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4131 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4132 arc_read_done(zio); 4133 } else { 4134 mutex_exit(hash_lock); 4135 /* 4136 * Buffer didn't survive caching. Increment stats and 4137 * reissue to the original storage device. 4138 */ 4139 if (zio->io_error != 0) { 4140 ARCSTAT_BUMP(arcstat_l2_io_error); 4141 } else { 4142 zio->io_error = EIO; 4143 } 4144 if (!equal) 4145 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4146 4147 /* 4148 * If there's no waiter, issue an async i/o to the primary 4149 * storage now. If there *is* a waiter, the caller must 4150 * issue the i/o in a context where it's OK to block. 4151 */ 4152 if (zio->io_waiter == NULL) { 4153 zio_t *pio = zio_unique_parent(zio); 4154 4155 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4156 4157 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4158 buf->b_data, zio->io_size, arc_read_done, buf, 4159 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4160 } 4161 } 4162 4163 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4164 } 4165 4166 /* 4167 * This is the list priority from which the L2ARC will search for pages to 4168 * cache. This is used within loops (0..3) to cycle through lists in the 4169 * desired order. This order can have a significant effect on cache 4170 * performance. 4171 * 4172 * Currently the metadata lists are hit first, MFU then MRU, followed by 4173 * the data lists. This function returns a locked list, and also returns 4174 * the lock pointer. 4175 */ 4176 static list_t * 4177 l2arc_list_locked(int list_num, kmutex_t **lock) 4178 { 4179 list_t *list = NULL; 4180 4181 ASSERT(list_num >= 0 && list_num <= 3); 4182 4183 switch (list_num) { 4184 case 0: 4185 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 4186 *lock = &arc_mfu->arcs_mtx; 4187 break; 4188 case 1: 4189 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 4190 *lock = &arc_mru->arcs_mtx; 4191 break; 4192 case 2: 4193 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 4194 *lock = &arc_mfu->arcs_mtx; 4195 break; 4196 case 3: 4197 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 4198 *lock = &arc_mru->arcs_mtx; 4199 break; 4200 } 4201 4202 ASSERT(!(MUTEX_HELD(*lock))); 4203 mutex_enter(*lock); 4204 return (list); 4205 } 4206 4207 /* 4208 * Evict buffers from the device write hand to the distance specified in 4209 * bytes. This distance may span populated buffers, it may span nothing. 4210 * This is clearing a region on the L2ARC device ready for writing. 4211 * If the 'all' boolean is set, every buffer is evicted. 4212 */ 4213 static void 4214 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4215 { 4216 list_t *buflist; 4217 l2arc_buf_hdr_t *abl2; 4218 arc_buf_hdr_t *ab, *ab_prev; 4219 kmutex_t *hash_lock; 4220 uint64_t taddr; 4221 4222 buflist = dev->l2ad_buflist; 4223 4224 if (buflist == NULL) 4225 return; 4226 4227 if (!all && dev->l2ad_first) { 4228 /* 4229 * This is the first sweep through the device. There is 4230 * nothing to evict. 4231 */ 4232 return; 4233 } 4234 4235 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4236 /* 4237 * When nearing the end of the device, evict to the end 4238 * before the device write hand jumps to the start. 4239 */ 4240 taddr = dev->l2ad_end; 4241 } else { 4242 taddr = dev->l2ad_hand + distance; 4243 } 4244 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4245 uint64_t, taddr, boolean_t, all); 4246 4247 top: 4248 mutex_enter(&l2arc_buflist_mtx); 4249 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4250 ab_prev = list_prev(buflist, ab); 4251 4252 hash_lock = HDR_LOCK(ab); 4253 if (!mutex_tryenter(hash_lock)) { 4254 /* 4255 * Missed the hash lock. Retry. 4256 */ 4257 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4258 mutex_exit(&l2arc_buflist_mtx); 4259 mutex_enter(hash_lock); 4260 mutex_exit(hash_lock); 4261 goto top; 4262 } 4263 4264 if (HDR_L2_WRITE_HEAD(ab)) { 4265 /* 4266 * We hit a write head node. Leave it for 4267 * l2arc_write_done(). 4268 */ 4269 list_remove(buflist, ab); 4270 mutex_exit(hash_lock); 4271 continue; 4272 } 4273 4274 if (!all && ab->b_l2hdr != NULL && 4275 (ab->b_l2hdr->b_daddr > taddr || 4276 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4277 /* 4278 * We've evicted to the target address, 4279 * or the end of the device. 4280 */ 4281 mutex_exit(hash_lock); 4282 break; 4283 } 4284 4285 if (HDR_FREE_IN_PROGRESS(ab)) { 4286 /* 4287 * Already on the path to destruction. 4288 */ 4289 mutex_exit(hash_lock); 4290 continue; 4291 } 4292 4293 if (ab->b_state == arc_l2c_only) { 4294 ASSERT(!HDR_L2_READING(ab)); 4295 /* 4296 * This doesn't exist in the ARC. Destroy. 4297 * arc_hdr_destroy() will call list_remove() 4298 * and decrement arcstat_l2_size. 4299 */ 4300 arc_change_state(arc_anon, ab, hash_lock); 4301 arc_hdr_destroy(ab); 4302 } else { 4303 /* 4304 * Invalidate issued or about to be issued 4305 * reads, since we may be about to write 4306 * over this location. 4307 */ 4308 if (HDR_L2_READING(ab)) { 4309 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4310 ab->b_flags |= ARC_L2_EVICTED; 4311 } 4312 4313 /* 4314 * Tell ARC this no longer exists in L2ARC. 4315 */ 4316 if (ab->b_l2hdr != NULL) { 4317 abl2 = ab->b_l2hdr; 4318 ab->b_l2hdr = NULL; 4319 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4320 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4321 } 4322 list_remove(buflist, ab); 4323 4324 /* 4325 * This may have been leftover after a 4326 * failed write. 4327 */ 4328 ab->b_flags &= ~ARC_L2_WRITING; 4329 } 4330 mutex_exit(hash_lock); 4331 } 4332 mutex_exit(&l2arc_buflist_mtx); 4333 4334 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4335 dev->l2ad_evict = taddr; 4336 } 4337 4338 /* 4339 * Find and write ARC buffers to the L2ARC device. 4340 * 4341 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4342 * for reading until they have completed writing. 4343 */ 4344 static uint64_t 4345 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4346 { 4347 arc_buf_hdr_t *ab, *ab_prev, *head; 4348 l2arc_buf_hdr_t *hdrl2; 4349 list_t *list; 4350 uint64_t passed_sz, write_sz, buf_sz, headroom; 4351 void *buf_data; 4352 kmutex_t *hash_lock, *list_lock; 4353 boolean_t have_lock, full; 4354 l2arc_write_callback_t *cb; 4355 zio_t *pio, *wzio; 4356 uint64_t guid = spa_load_guid(spa); 4357 4358 ASSERT(dev->l2ad_vdev != NULL); 4359 4360 pio = NULL; 4361 write_sz = 0; 4362 full = B_FALSE; 4363 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4364 head->b_flags |= ARC_L2_WRITE_HEAD; 4365 4366 /* 4367 * Copy buffers for L2ARC writing. 4368 */ 4369 mutex_enter(&l2arc_buflist_mtx); 4370 for (int try = 0; try <= 3; try++) { 4371 list = l2arc_list_locked(try, &list_lock); 4372 passed_sz = 0; 4373 4374 /* 4375 * L2ARC fast warmup. 4376 * 4377 * Until the ARC is warm and starts to evict, read from the 4378 * head of the ARC lists rather than the tail. 4379 */ 4380 headroom = target_sz * l2arc_headroom; 4381 if (arc_warm == B_FALSE) 4382 ab = list_head(list); 4383 else 4384 ab = list_tail(list); 4385 4386 for (; ab; ab = ab_prev) { 4387 if (arc_warm == B_FALSE) 4388 ab_prev = list_next(list, ab); 4389 else 4390 ab_prev = list_prev(list, ab); 4391 4392 hash_lock = HDR_LOCK(ab); 4393 have_lock = MUTEX_HELD(hash_lock); 4394 if (!have_lock && !mutex_tryenter(hash_lock)) { 4395 /* 4396 * Skip this buffer rather than waiting. 4397 */ 4398 continue; 4399 } 4400 4401 passed_sz += ab->b_size; 4402 if (passed_sz > headroom) { 4403 /* 4404 * Searched too far. 4405 */ 4406 mutex_exit(hash_lock); 4407 break; 4408 } 4409 4410 if (!l2arc_write_eligible(guid, ab)) { 4411 mutex_exit(hash_lock); 4412 continue; 4413 } 4414 4415 if ((write_sz + ab->b_size) > target_sz) { 4416 full = B_TRUE; 4417 mutex_exit(hash_lock); 4418 break; 4419 } 4420 4421 if (pio == NULL) { 4422 /* 4423 * Insert a dummy header on the buflist so 4424 * l2arc_write_done() can find where the 4425 * write buffers begin without searching. 4426 */ 4427 list_insert_head(dev->l2ad_buflist, head); 4428 4429 cb = kmem_alloc( 4430 sizeof (l2arc_write_callback_t), KM_SLEEP); 4431 cb->l2wcb_dev = dev; 4432 cb->l2wcb_head = head; 4433 pio = zio_root(spa, l2arc_write_done, cb, 4434 ZIO_FLAG_CANFAIL); 4435 } 4436 4437 /* 4438 * Create and add a new L2ARC header. 4439 */ 4440 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4441 hdrl2->b_dev = dev; 4442 hdrl2->b_daddr = dev->l2ad_hand; 4443 4444 ab->b_flags |= ARC_L2_WRITING; 4445 ab->b_l2hdr = hdrl2; 4446 list_insert_head(dev->l2ad_buflist, ab); 4447 buf_data = ab->b_buf->b_data; 4448 buf_sz = ab->b_size; 4449 4450 /* 4451 * Compute and store the buffer cksum before 4452 * writing. On debug the cksum is verified first. 4453 */ 4454 arc_cksum_verify(ab->b_buf); 4455 arc_cksum_compute(ab->b_buf, B_TRUE); 4456 4457 mutex_exit(hash_lock); 4458 4459 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4460 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4461 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4462 ZIO_FLAG_CANFAIL, B_FALSE); 4463 4464 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4465 zio_t *, wzio); 4466 (void) zio_nowait(wzio); 4467 4468 /* 4469 * Keep the clock hand suitably device-aligned. 4470 */ 4471 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4472 4473 write_sz += buf_sz; 4474 dev->l2ad_hand += buf_sz; 4475 } 4476 4477 mutex_exit(list_lock); 4478 4479 if (full == B_TRUE) 4480 break; 4481 } 4482 mutex_exit(&l2arc_buflist_mtx); 4483 4484 if (pio == NULL) { 4485 ASSERT0(write_sz); 4486 kmem_cache_free(hdr_cache, head); 4487 return (0); 4488 } 4489 4490 ASSERT3U(write_sz, <=, target_sz); 4491 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4492 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4493 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4494 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0); 4495 4496 /* 4497 * Bump device hand to the device start if it is approaching the end. 4498 * l2arc_evict() will already have evicted ahead for this case. 4499 */ 4500 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4501 vdev_space_update(dev->l2ad_vdev, 4502 dev->l2ad_end - dev->l2ad_hand, 0, 0); 4503 dev->l2ad_hand = dev->l2ad_start; 4504 dev->l2ad_evict = dev->l2ad_start; 4505 dev->l2ad_first = B_FALSE; 4506 } 4507 4508 dev->l2ad_writing = B_TRUE; 4509 (void) zio_wait(pio); 4510 dev->l2ad_writing = B_FALSE; 4511 4512 return (write_sz); 4513 } 4514 4515 /* 4516 * This thread feeds the L2ARC at regular intervals. This is the beating 4517 * heart of the L2ARC. 4518 */ 4519 static void 4520 l2arc_feed_thread(void) 4521 { 4522 callb_cpr_t cpr; 4523 l2arc_dev_t *dev; 4524 spa_t *spa; 4525 uint64_t size, wrote; 4526 clock_t begin, next = ddi_get_lbolt(); 4527 4528 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4529 4530 mutex_enter(&l2arc_feed_thr_lock); 4531 4532 while (l2arc_thread_exit == 0) { 4533 CALLB_CPR_SAFE_BEGIN(&cpr); 4534 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4535 next); 4536 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4537 next = ddi_get_lbolt() + hz; 4538 4539 /* 4540 * Quick check for L2ARC devices. 4541 */ 4542 mutex_enter(&l2arc_dev_mtx); 4543 if (l2arc_ndev == 0) { 4544 mutex_exit(&l2arc_dev_mtx); 4545 continue; 4546 } 4547 mutex_exit(&l2arc_dev_mtx); 4548 begin = ddi_get_lbolt(); 4549 4550 /* 4551 * This selects the next l2arc device to write to, and in 4552 * doing so the next spa to feed from: dev->l2ad_spa. This 4553 * will return NULL if there are now no l2arc devices or if 4554 * they are all faulted. 4555 * 4556 * If a device is returned, its spa's config lock is also 4557 * held to prevent device removal. l2arc_dev_get_next() 4558 * will grab and release l2arc_dev_mtx. 4559 */ 4560 if ((dev = l2arc_dev_get_next()) == NULL) 4561 continue; 4562 4563 spa = dev->l2ad_spa; 4564 ASSERT(spa != NULL); 4565 4566 /* 4567 * If the pool is read-only then force the feed thread to 4568 * sleep a little longer. 4569 */ 4570 if (!spa_writeable(spa)) { 4571 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 4572 spa_config_exit(spa, SCL_L2ARC, dev); 4573 continue; 4574 } 4575 4576 /* 4577 * Avoid contributing to memory pressure. 4578 */ 4579 if (arc_reclaim_needed()) { 4580 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4581 spa_config_exit(spa, SCL_L2ARC, dev); 4582 continue; 4583 } 4584 4585 ARCSTAT_BUMP(arcstat_l2_feeds); 4586 4587 size = l2arc_write_size(dev); 4588 4589 /* 4590 * Evict L2ARC buffers that will be overwritten. 4591 */ 4592 l2arc_evict(dev, size, B_FALSE); 4593 4594 /* 4595 * Write ARC buffers. 4596 */ 4597 wrote = l2arc_write_buffers(spa, dev, size); 4598 4599 /* 4600 * Calculate interval between writes. 4601 */ 4602 next = l2arc_write_interval(begin, size, wrote); 4603 spa_config_exit(spa, SCL_L2ARC, dev); 4604 } 4605 4606 l2arc_thread_exit = 0; 4607 cv_broadcast(&l2arc_feed_thr_cv); 4608 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4609 thread_exit(); 4610 } 4611 4612 boolean_t 4613 l2arc_vdev_present(vdev_t *vd) 4614 { 4615 l2arc_dev_t *dev; 4616 4617 mutex_enter(&l2arc_dev_mtx); 4618 for (dev = list_head(l2arc_dev_list); dev != NULL; 4619 dev = list_next(l2arc_dev_list, dev)) { 4620 if (dev->l2ad_vdev == vd) 4621 break; 4622 } 4623 mutex_exit(&l2arc_dev_mtx); 4624 4625 return (dev != NULL); 4626 } 4627 4628 /* 4629 * Add a vdev for use by the L2ARC. By this point the spa has already 4630 * validated the vdev and opened it. 4631 */ 4632 void 4633 l2arc_add_vdev(spa_t *spa, vdev_t *vd) 4634 { 4635 l2arc_dev_t *adddev; 4636 4637 ASSERT(!l2arc_vdev_present(vd)); 4638 4639 /* 4640 * Create a new l2arc device entry. 4641 */ 4642 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4643 adddev->l2ad_spa = spa; 4644 adddev->l2ad_vdev = vd; 4645 adddev->l2ad_write = l2arc_write_max; 4646 adddev->l2ad_boost = l2arc_write_boost; 4647 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 4648 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 4649 adddev->l2ad_hand = adddev->l2ad_start; 4650 adddev->l2ad_evict = adddev->l2ad_start; 4651 adddev->l2ad_first = B_TRUE; 4652 adddev->l2ad_writing = B_FALSE; 4653 ASSERT3U(adddev->l2ad_write, >, 0); 4654 4655 /* 4656 * This is a list of all ARC buffers that are still valid on the 4657 * device. 4658 */ 4659 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4660 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4661 offsetof(arc_buf_hdr_t, b_l2node)); 4662 4663 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 4664 4665 /* 4666 * Add device to global list 4667 */ 4668 mutex_enter(&l2arc_dev_mtx); 4669 list_insert_head(l2arc_dev_list, adddev); 4670 atomic_inc_64(&l2arc_ndev); 4671 mutex_exit(&l2arc_dev_mtx); 4672 } 4673 4674 /* 4675 * Remove a vdev from the L2ARC. 4676 */ 4677 void 4678 l2arc_remove_vdev(vdev_t *vd) 4679 { 4680 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4681 4682 /* 4683 * Find the device by vdev 4684 */ 4685 mutex_enter(&l2arc_dev_mtx); 4686 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4687 nextdev = list_next(l2arc_dev_list, dev); 4688 if (vd == dev->l2ad_vdev) { 4689 remdev = dev; 4690 break; 4691 } 4692 } 4693 ASSERT(remdev != NULL); 4694 4695 /* 4696 * Remove device from global list 4697 */ 4698 list_remove(l2arc_dev_list, remdev); 4699 l2arc_dev_last = NULL; /* may have been invalidated */ 4700 atomic_dec_64(&l2arc_ndev); 4701 mutex_exit(&l2arc_dev_mtx); 4702 4703 /* 4704 * Clear all buflists and ARC references. L2ARC device flush. 4705 */ 4706 l2arc_evict(remdev, 0, B_TRUE); 4707 list_destroy(remdev->l2ad_buflist); 4708 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4709 kmem_free(remdev, sizeof (l2arc_dev_t)); 4710 } 4711 4712 void 4713 l2arc_init(void) 4714 { 4715 l2arc_thread_exit = 0; 4716 l2arc_ndev = 0; 4717 l2arc_writes_sent = 0; 4718 l2arc_writes_done = 0; 4719 4720 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4721 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4722 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4723 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4724 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4725 4726 l2arc_dev_list = &L2ARC_dev_list; 4727 l2arc_free_on_write = &L2ARC_free_on_write; 4728 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4729 offsetof(l2arc_dev_t, l2ad_node)); 4730 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4731 offsetof(l2arc_data_free_t, l2df_list_node)); 4732 } 4733 4734 void 4735 l2arc_fini(void) 4736 { 4737 /* 4738 * This is called from dmu_fini(), which is called from spa_fini(); 4739 * Because of this, we can assume that all l2arc devices have 4740 * already been removed when the pools themselves were removed. 4741 */ 4742 4743 l2arc_do_free_on_write(); 4744 4745 mutex_destroy(&l2arc_feed_thr_lock); 4746 cv_destroy(&l2arc_feed_thr_cv); 4747 mutex_destroy(&l2arc_dev_mtx); 4748 mutex_destroy(&l2arc_buflist_mtx); 4749 mutex_destroy(&l2arc_free_on_write_mtx); 4750 4751 list_destroy(l2arc_dev_list); 4752 list_destroy(l2arc_free_on_write); 4753 } 4754 4755 void 4756 l2arc_start(void) 4757 { 4758 if (!(spa_mode_global & FWRITE)) 4759 return; 4760 4761 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4762 TS_RUN, minclsyspri); 4763 } 4764 4765 void 4766 l2arc_stop(void) 4767 { 4768 if (!(spa_mode_global & FWRITE)) 4769 return; 4770 4771 mutex_enter(&l2arc_feed_thr_lock); 4772 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4773 l2arc_thread_exit = 1; 4774 while (l2arc_thread_exit != 0) 4775 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4776 mutex_exit(&l2arc_feed_thr_lock); 4777 } 4778