1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29 /* 30 * DVA-based Adjustable Replacement Cache 31 * 32 * While much of the theory of operation used here is 33 * based on the self-tuning, low overhead replacement cache 34 * presented by Megiddo and Modha at FAST 2003, there are some 35 * significant differences: 36 * 37 * 1. The Megiddo and Modha model assumes any page is evictable. 38 * Pages in its cache cannot be "locked" into memory. This makes 39 * the eviction algorithm simple: evict the last page in the list. 40 * This also make the performance characteristics easy to reason 41 * about. Our cache is not so simple. At any given moment, some 42 * subset of the blocks in the cache are un-evictable because we 43 * have handed out a reference to them. Blocks are only evictable 44 * when there are no external references active. This makes 45 * eviction far more problematic: we choose to evict the evictable 46 * blocks that are the "lowest" in the list. 47 * 48 * There are times when it is not possible to evict the requested 49 * space. In these circumstances we are unable to adjust the cache 50 * size. To prevent the cache growing unbounded at these times we 51 * implement a "cache throttle" that slows the flow of new data 52 * into the cache until we can make space available. 53 * 54 * 2. The Megiddo and Modha model assumes a fixed cache size. 55 * Pages are evicted when the cache is full and there is a cache 56 * miss. Our model has a variable sized cache. It grows with 57 * high use, but also tries to react to memory pressure from the 58 * operating system: decreasing its size when system memory is 59 * tight. 60 * 61 * 3. The Megiddo and Modha model assumes a fixed page size. All 62 * elements of the cache are therefore exactly the same size. So 63 * when adjusting the cache size following a cache miss, its simply 64 * a matter of choosing a single page to evict. In our model, we 65 * have variable sized cache blocks (rangeing from 512 bytes to 66 * 128K bytes). We therefore choose a set of blocks to evict to make 67 * space for a cache miss that approximates as closely as possible 68 * the space used by the new block. 69 * 70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 71 * by N. Megiddo & D. Modha, FAST 2003 72 */ 73 74 /* 75 * The locking model: 76 * 77 * A new reference to a cache buffer can be obtained in two 78 * ways: 1) via a hash table lookup using the DVA as a key, 79 * or 2) via one of the ARC lists. The arc_read() interface 80 * uses method 1, while the internal ARC algorithms for 81 * adjusting the cache use method 2. We therefore provide two 82 * types of locks: 1) the hash table lock array, and 2) the 83 * ARC list locks. 84 * 85 * Buffers do not have their own mutexes, rather they rely on the 86 * hash table mutexes for the bulk of their protection (i.e. most 87 * fields in the arc_buf_hdr_t are protected by these mutexes). 88 * 89 * buf_hash_find() returns the appropriate mutex (held) when it 90 * locates the requested buffer in the hash table. It returns 91 * NULL for the mutex if the buffer was not in the table. 92 * 93 * buf_hash_remove() expects the appropriate hash mutex to be 94 * already held before it is invoked. 95 * 96 * Each ARC state also has a mutex which is used to protect the 97 * buffer list associated with the state. When attempting to 98 * obtain a hash table lock while holding an ARC list lock you 99 * must use: mutex_tryenter() to avoid deadlock. Also note that 100 * the active state mutex must be held before the ghost state mutex. 101 * 102 * Note that the majority of the performance stats are manipulated 103 * with atomic operations. 104 * 105 * The L2ARC uses the l2ad_mtx on each vdev for the following: 106 * 107 * - L2ARC buflist creation 108 * - L2ARC buflist eviction 109 * - L2ARC write completion, which walks L2ARC buflists 110 * - ARC header destruction, as it removes from L2ARC buflists 111 * - ARC header release, as it removes from L2ARC buflists 112 */ 113 114 /* 115 * ARC operation: 116 * 117 * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure. 118 * This structure can point either to a block that is still in the cache or to 119 * one that is only accessible in an L2 ARC device, or it can provide 120 * information about a block that was recently evicted. If a block is 121 * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough 122 * information to retrieve it from the L2ARC device. This information is 123 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block 124 * that is in this state cannot access the data directly. 125 * 126 * Blocks that are actively being referenced or have not been evicted 127 * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within 128 * the arc_buf_hdr_t that will point to the data block in memory. A block can 129 * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC 130 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and 131 * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd). 132 * 133 * The L1ARC's data pointer may or may not be uncompressed. The ARC has the 134 * ability to store the physical data (b_pabd) associated with the DVA of the 135 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block, 136 * it will match its on-disk compression characteristics. This behavior can be 137 * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the 138 * compressed ARC functionality is disabled, the b_pabd will point to an 139 * uncompressed version of the on-disk data. 140 * 141 * Data in the L1ARC is not accessed by consumers of the ARC directly. Each 142 * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it. 143 * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC 144 * consumer. The ARC will provide references to this data and will keep it 145 * cached until it is no longer in use. The ARC caches only the L1ARC's physical 146 * data block and will evict any arc_buf_t that is no longer referenced. The 147 * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the 148 * "overhead_size" kstat. 149 * 150 * Depending on the consumer, an arc_buf_t can be requested in uncompressed or 151 * compressed form. The typical case is that consumers will want uncompressed 152 * data, and when that happens a new data buffer is allocated where the data is 153 * decompressed for them to use. Currently the only consumer who wants 154 * compressed arc_buf_t's is "zfs send", when it streams data exactly as it 155 * exists on disk. When this happens, the arc_buf_t's data buffer is shared 156 * with the arc_buf_hdr_t. 157 * 158 * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The 159 * first one is owned by a compressed send consumer (and therefore references 160 * the same compressed data buffer as the arc_buf_hdr_t) and the second could be 161 * used by any other consumer (and has its own uncompressed copy of the data 162 * buffer). 163 * 164 * arc_buf_hdr_t 165 * +-----------+ 166 * | fields | 167 * | common to | 168 * | L1- and | 169 * | L2ARC | 170 * +-----------+ 171 * | l2arc_buf_hdr_t 172 * | | 173 * +-----------+ 174 * | l1arc_buf_hdr_t 175 * | | arc_buf_t 176 * | b_buf +------------>+-----------+ arc_buf_t 177 * | b_pabd +-+ |b_next +---->+-----------+ 178 * +-----------+ | |-----------| |b_next +-->NULL 179 * | |b_comp = T | +-----------+ 180 * | |b_data +-+ |b_comp = F | 181 * | +-----------+ | |b_data +-+ 182 * +->+------+ | +-----------+ | 183 * compressed | | | | 184 * data | |<--------------+ | uncompressed 185 * +------+ compressed, | data 186 * shared +-->+------+ 187 * data | | 188 * | | 189 * +------+ 190 * 191 * When a consumer reads a block, the ARC must first look to see if the 192 * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new 193 * arc_buf_t and either copies uncompressed data into a new data buffer from an 194 * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a 195 * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the 196 * hdr is compressed and the desired compression characteristics of the 197 * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the 198 * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be 199 * the last buffer in the hdr's b_buf list, however a shared compressed buf can 200 * be anywhere in the hdr's list. 201 * 202 * The diagram below shows an example of an uncompressed ARC hdr that is 203 * sharing its data with an arc_buf_t (note that the shared uncompressed buf is 204 * the last element in the buf list): 205 * 206 * arc_buf_hdr_t 207 * +-----------+ 208 * | | 209 * | | 210 * | | 211 * +-----------+ 212 * l2arc_buf_hdr_t| | 213 * | | 214 * +-----------+ 215 * l1arc_buf_hdr_t| | 216 * | | arc_buf_t (shared) 217 * | b_buf +------------>+---------+ arc_buf_t 218 * | | |b_next +---->+---------+ 219 * | b_pabd +-+ |---------| |b_next +-->NULL 220 * +-----------+ | | | +---------+ 221 * | |b_data +-+ | | 222 * | +---------+ | |b_data +-+ 223 * +->+------+ | +---------+ | 224 * | | | | 225 * uncompressed | | | | 226 * data +------+ | | 227 * ^ +->+------+ | 228 * | uncompressed | | | 229 * | data | | | 230 * | +------+ | 231 * +---------------------------------+ 232 * 233 * Writing to the ARC requires that the ARC first discard the hdr's b_pabd 234 * since the physical block is about to be rewritten. The new data contents 235 * will be contained in the arc_buf_t. As the I/O pipeline performs the write, 236 * it may compress the data before writing it to disk. The ARC will be called 237 * with the transformed data and will bcopy the transformed on-disk block into 238 * a newly allocated b_pabd. Writes are always done into buffers which have 239 * either been loaned (and hence are new and don't have other readers) or 240 * buffers which have been released (and hence have their own hdr, if there 241 * were originally other readers of the buf's original hdr). This ensures that 242 * the ARC only needs to update a single buf and its hdr after a write occurs. 243 * 244 * When the L2ARC is in use, it will also take advantage of the b_pabd. The 245 * L2ARC will always write the contents of b_pabd to the L2ARC. This means 246 * that when compressed ARC is enabled that the L2ARC blocks are identical 247 * to the on-disk block in the main data pool. This provides a significant 248 * advantage since the ARC can leverage the bp's checksum when reading from the 249 * L2ARC to determine if the contents are valid. However, if the compressed 250 * ARC is disabled, then the L2ARC's block must be transformed to look 251 * like the physical block in the main data pool before comparing the 252 * checksum and determining its validity. 253 */ 254 255 #include <sys/spa.h> 256 #include <sys/zio.h> 257 #include <sys/spa_impl.h> 258 #include <sys/zio_compress.h> 259 #include <sys/zio_checksum.h> 260 #include <sys/zfs_context.h> 261 #include <sys/arc.h> 262 #include <sys/refcount.h> 263 #include <sys/vdev.h> 264 #include <sys/vdev_impl.h> 265 #include <sys/dsl_pool.h> 266 #include <sys/zio_checksum.h> 267 #include <sys/multilist.h> 268 #include <sys/abd.h> 269 #ifdef _KERNEL 270 #include <sys/vmsystm.h> 271 #include <vm/anon.h> 272 #include <sys/fs/swapnode.h> 273 #include <sys/dnlc.h> 274 #endif 275 #include <sys/callb.h> 276 #include <sys/kstat.h> 277 #include <zfs_fletcher.h> 278 279 #ifndef _KERNEL 280 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 281 boolean_t arc_watch = B_FALSE; 282 int arc_procfd; 283 #endif 284 285 static kmutex_t arc_reclaim_lock; 286 static kcondvar_t arc_reclaim_thread_cv; 287 static boolean_t arc_reclaim_thread_exit; 288 static kcondvar_t arc_reclaim_waiters_cv; 289 290 uint_t arc_reduce_dnlc_percent = 3; 291 292 /* 293 * The number of headers to evict in arc_evict_state_impl() before 294 * dropping the sublist lock and evicting from another sublist. A lower 295 * value means we're more likely to evict the "correct" header (i.e. the 296 * oldest header in the arc state), but comes with higher overhead 297 * (i.e. more invocations of arc_evict_state_impl()). 298 */ 299 int zfs_arc_evict_batch_limit = 10; 300 301 /* number of seconds before growing cache again */ 302 static int arc_grow_retry = 60; 303 304 /* shift of arc_c for calculating overflow limit in arc_get_data_impl */ 305 int zfs_arc_overflow_shift = 8; 306 307 /* shift of arc_c for calculating both min and max arc_p */ 308 static int arc_p_min_shift = 4; 309 310 /* log2(fraction of arc to reclaim) */ 311 static int arc_shrink_shift = 7; 312 313 /* 314 * log2(fraction of ARC which must be free to allow growing). 315 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory, 316 * when reading a new block into the ARC, we will evict an equal-sized block 317 * from the ARC. 318 * 319 * This must be less than arc_shrink_shift, so that when we shrink the ARC, 320 * we will still not allow it to grow. 321 */ 322 int arc_no_grow_shift = 5; 323 324 325 /* 326 * minimum lifespan of a prefetch block in clock ticks 327 * (initialized in arc_init()) 328 */ 329 static int arc_min_prefetch_lifespan; 330 331 /* 332 * If this percent of memory is free, don't throttle. 333 */ 334 int arc_lotsfree_percent = 10; 335 336 static int arc_dead; 337 338 /* 339 * The arc has filled available memory and has now warmed up. 340 */ 341 static boolean_t arc_warm; 342 343 /* 344 * log2 fraction of the zio arena to keep free. 345 */ 346 int arc_zio_arena_free_shift = 2; 347 348 /* 349 * These tunables are for performance analysis. 350 */ 351 uint64_t zfs_arc_max; 352 uint64_t zfs_arc_min; 353 uint64_t zfs_arc_meta_limit = 0; 354 uint64_t zfs_arc_meta_min = 0; 355 int zfs_arc_grow_retry = 0; 356 int zfs_arc_shrink_shift = 0; 357 int zfs_arc_p_min_shift = 0; 358 int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ 359 360 boolean_t zfs_compressed_arc_enabled = B_TRUE; 361 362 /* 363 * Note that buffers can be in one of 6 states: 364 * ARC_anon - anonymous (discussed below) 365 * ARC_mru - recently used, currently cached 366 * ARC_mru_ghost - recentely used, no longer in cache 367 * ARC_mfu - frequently used, currently cached 368 * ARC_mfu_ghost - frequently used, no longer in cache 369 * ARC_l2c_only - exists in L2ARC but not other states 370 * When there are no active references to the buffer, they are 371 * are linked onto a list in one of these arc states. These are 372 * the only buffers that can be evicted or deleted. Within each 373 * state there are multiple lists, one for meta-data and one for 374 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 375 * etc.) is tracked separately so that it can be managed more 376 * explicitly: favored over data, limited explicitly. 377 * 378 * Anonymous buffers are buffers that are not associated with 379 * a DVA. These are buffers that hold dirty block copies 380 * before they are written to stable storage. By definition, 381 * they are "ref'd" and are considered part of arc_mru 382 * that cannot be freed. Generally, they will aquire a DVA 383 * as they are written and migrate onto the arc_mru list. 384 * 385 * The ARC_l2c_only state is for buffers that are in the second 386 * level ARC but no longer in any of the ARC_m* lists. The second 387 * level ARC itself may also contain buffers that are in any of 388 * the ARC_m* states - meaning that a buffer can exist in two 389 * places. The reason for the ARC_l2c_only state is to keep the 390 * buffer header in the hash table, so that reads that hit the 391 * second level ARC benefit from these fast lookups. 392 */ 393 394 typedef struct arc_state { 395 /* 396 * list of evictable buffers 397 */ 398 multilist_t *arcs_list[ARC_BUFC_NUMTYPES]; 399 /* 400 * total amount of evictable data in this state 401 */ 402 refcount_t arcs_esize[ARC_BUFC_NUMTYPES]; 403 /* 404 * total amount of data in this state; this includes: evictable, 405 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA. 406 */ 407 refcount_t arcs_size; 408 } arc_state_t; 409 410 /* The 6 states: */ 411 static arc_state_t ARC_anon; 412 static arc_state_t ARC_mru; 413 static arc_state_t ARC_mru_ghost; 414 static arc_state_t ARC_mfu; 415 static arc_state_t ARC_mfu_ghost; 416 static arc_state_t ARC_l2c_only; 417 418 typedef struct arc_stats { 419 kstat_named_t arcstat_hits; 420 kstat_named_t arcstat_misses; 421 kstat_named_t arcstat_demand_data_hits; 422 kstat_named_t arcstat_demand_data_misses; 423 kstat_named_t arcstat_demand_metadata_hits; 424 kstat_named_t arcstat_demand_metadata_misses; 425 kstat_named_t arcstat_prefetch_data_hits; 426 kstat_named_t arcstat_prefetch_data_misses; 427 kstat_named_t arcstat_prefetch_metadata_hits; 428 kstat_named_t arcstat_prefetch_metadata_misses; 429 kstat_named_t arcstat_mru_hits; 430 kstat_named_t arcstat_mru_ghost_hits; 431 kstat_named_t arcstat_mfu_hits; 432 kstat_named_t arcstat_mfu_ghost_hits; 433 kstat_named_t arcstat_deleted; 434 /* 435 * Number of buffers that could not be evicted because the hash lock 436 * was held by another thread. The lock may not necessarily be held 437 * by something using the same buffer, since hash locks are shared 438 * by multiple buffers. 439 */ 440 kstat_named_t arcstat_mutex_miss; 441 /* 442 * Number of buffers skipped because they have I/O in progress, are 443 * indrect prefetch buffers that have not lived long enough, or are 444 * not from the spa we're trying to evict from. 445 */ 446 kstat_named_t arcstat_evict_skip; 447 /* 448 * Number of times arc_evict_state() was unable to evict enough 449 * buffers to reach it's target amount. 450 */ 451 kstat_named_t arcstat_evict_not_enough; 452 kstat_named_t arcstat_evict_l2_cached; 453 kstat_named_t arcstat_evict_l2_eligible; 454 kstat_named_t arcstat_evict_l2_ineligible; 455 kstat_named_t arcstat_evict_l2_skip; 456 kstat_named_t arcstat_hash_elements; 457 kstat_named_t arcstat_hash_elements_max; 458 kstat_named_t arcstat_hash_collisions; 459 kstat_named_t arcstat_hash_chains; 460 kstat_named_t arcstat_hash_chain_max; 461 kstat_named_t arcstat_p; 462 kstat_named_t arcstat_c; 463 kstat_named_t arcstat_c_min; 464 kstat_named_t arcstat_c_max; 465 kstat_named_t arcstat_size; 466 /* 467 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd. 468 * Note that the compressed bytes may match the uncompressed bytes 469 * if the block is either not compressed or compressed arc is disabled. 470 */ 471 kstat_named_t arcstat_compressed_size; 472 /* 473 * Uncompressed size of the data stored in b_pabd. If compressed 474 * arc is disabled then this value will be identical to the stat 475 * above. 476 */ 477 kstat_named_t arcstat_uncompressed_size; 478 /* 479 * Number of bytes stored in all the arc_buf_t's. This is classified 480 * as "overhead" since this data is typically short-lived and will 481 * be evicted from the arc when it becomes unreferenced unless the 482 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level 483 * values have been set (see comment in dbuf.c for more information). 484 */ 485 kstat_named_t arcstat_overhead_size; 486 /* 487 * Number of bytes consumed by internal ARC structures necessary 488 * for tracking purposes; these structures are not actually 489 * backed by ARC buffers. This includes arc_buf_hdr_t structures 490 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only 491 * caches), and arc_buf_t structures (allocated via arc_buf_t 492 * cache). 493 */ 494 kstat_named_t arcstat_hdr_size; 495 /* 496 * Number of bytes consumed by ARC buffers of type equal to 497 * ARC_BUFC_DATA. This is generally consumed by buffers backing 498 * on disk user data (e.g. plain file contents). 499 */ 500 kstat_named_t arcstat_data_size; 501 /* 502 * Number of bytes consumed by ARC buffers of type equal to 503 * ARC_BUFC_METADATA. This is generally consumed by buffers 504 * backing on disk data that is used for internal ZFS 505 * structures (e.g. ZAP, dnode, indirect blocks, etc). 506 */ 507 kstat_named_t arcstat_metadata_size; 508 /* 509 * Number of bytes consumed by various buffers and structures 510 * not actually backed with ARC buffers. This includes bonus 511 * buffers (allocated directly via zio_buf_* functions), 512 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t 513 * cache), and dnode_t structures (allocated via dnode_t cache). 514 */ 515 kstat_named_t arcstat_other_size; 516 /* 517 * Total number of bytes consumed by ARC buffers residing in the 518 * arc_anon state. This includes *all* buffers in the arc_anon 519 * state; e.g. data, metadata, evictable, and unevictable buffers 520 * are all included in this value. 521 */ 522 kstat_named_t arcstat_anon_size; 523 /* 524 * Number of bytes consumed by ARC buffers that meet the 525 * following criteria: backing buffers of type ARC_BUFC_DATA, 526 * residing in the arc_anon state, and are eligible for eviction 527 * (e.g. have no outstanding holds on the buffer). 528 */ 529 kstat_named_t arcstat_anon_evictable_data; 530 /* 531 * Number of bytes consumed by ARC buffers that meet the 532 * following criteria: backing buffers of type ARC_BUFC_METADATA, 533 * residing in the arc_anon state, and are eligible for eviction 534 * (e.g. have no outstanding holds on the buffer). 535 */ 536 kstat_named_t arcstat_anon_evictable_metadata; 537 /* 538 * Total number of bytes consumed by ARC buffers residing in the 539 * arc_mru state. This includes *all* buffers in the arc_mru 540 * state; e.g. data, metadata, evictable, and unevictable buffers 541 * are all included in this value. 542 */ 543 kstat_named_t arcstat_mru_size; 544 /* 545 * Number of bytes consumed by ARC buffers that meet the 546 * following criteria: backing buffers of type ARC_BUFC_DATA, 547 * residing in the arc_mru state, and are eligible for eviction 548 * (e.g. have no outstanding holds on the buffer). 549 */ 550 kstat_named_t arcstat_mru_evictable_data; 551 /* 552 * Number of bytes consumed by ARC buffers that meet the 553 * following criteria: backing buffers of type ARC_BUFC_METADATA, 554 * residing in the arc_mru state, and are eligible for eviction 555 * (e.g. have no outstanding holds on the buffer). 556 */ 557 kstat_named_t arcstat_mru_evictable_metadata; 558 /* 559 * Total number of bytes that *would have been* consumed by ARC 560 * buffers in the arc_mru_ghost state. The key thing to note 561 * here, is the fact that this size doesn't actually indicate 562 * RAM consumption. The ghost lists only consist of headers and 563 * don't actually have ARC buffers linked off of these headers. 564 * Thus, *if* the headers had associated ARC buffers, these 565 * buffers *would have* consumed this number of bytes. 566 */ 567 kstat_named_t arcstat_mru_ghost_size; 568 /* 569 * Number of bytes that *would have been* consumed by ARC 570 * buffers that are eligible for eviction, of type 571 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state. 572 */ 573 kstat_named_t arcstat_mru_ghost_evictable_data; 574 /* 575 * Number of bytes that *would have been* consumed by ARC 576 * buffers that are eligible for eviction, of type 577 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 578 */ 579 kstat_named_t arcstat_mru_ghost_evictable_metadata; 580 /* 581 * Total number of bytes consumed by ARC buffers residing in the 582 * arc_mfu state. This includes *all* buffers in the arc_mfu 583 * state; e.g. data, metadata, evictable, and unevictable buffers 584 * are all included in this value. 585 */ 586 kstat_named_t arcstat_mfu_size; 587 /* 588 * Number of bytes consumed by ARC buffers that are eligible for 589 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu 590 * state. 591 */ 592 kstat_named_t arcstat_mfu_evictable_data; 593 /* 594 * Number of bytes consumed by ARC buffers that are eligible for 595 * eviction, of type ARC_BUFC_METADATA, and reside in the 596 * arc_mfu state. 597 */ 598 kstat_named_t arcstat_mfu_evictable_metadata; 599 /* 600 * Total number of bytes that *would have been* consumed by ARC 601 * buffers in the arc_mfu_ghost state. See the comment above 602 * arcstat_mru_ghost_size for more details. 603 */ 604 kstat_named_t arcstat_mfu_ghost_size; 605 /* 606 * Number of bytes that *would have been* consumed by ARC 607 * buffers that are eligible for eviction, of type 608 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state. 609 */ 610 kstat_named_t arcstat_mfu_ghost_evictable_data; 611 /* 612 * Number of bytes that *would have been* consumed by ARC 613 * buffers that are eligible for eviction, of type 614 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 615 */ 616 kstat_named_t arcstat_mfu_ghost_evictable_metadata; 617 kstat_named_t arcstat_l2_hits; 618 kstat_named_t arcstat_l2_misses; 619 kstat_named_t arcstat_l2_feeds; 620 kstat_named_t arcstat_l2_rw_clash; 621 kstat_named_t arcstat_l2_read_bytes; 622 kstat_named_t arcstat_l2_write_bytes; 623 kstat_named_t arcstat_l2_writes_sent; 624 kstat_named_t arcstat_l2_writes_done; 625 kstat_named_t arcstat_l2_writes_error; 626 kstat_named_t arcstat_l2_writes_lock_retry; 627 kstat_named_t arcstat_l2_evict_lock_retry; 628 kstat_named_t arcstat_l2_evict_reading; 629 kstat_named_t arcstat_l2_evict_l1cached; 630 kstat_named_t arcstat_l2_free_on_write; 631 kstat_named_t arcstat_l2_abort_lowmem; 632 kstat_named_t arcstat_l2_cksum_bad; 633 kstat_named_t arcstat_l2_io_error; 634 kstat_named_t arcstat_l2_lsize; 635 kstat_named_t arcstat_l2_psize; 636 kstat_named_t arcstat_l2_hdr_size; 637 kstat_named_t arcstat_memory_throttle_count; 638 kstat_named_t arcstat_meta_used; 639 kstat_named_t arcstat_meta_limit; 640 kstat_named_t arcstat_meta_max; 641 kstat_named_t arcstat_meta_min; 642 kstat_named_t arcstat_sync_wait_for_async; 643 kstat_named_t arcstat_demand_hit_predictive_prefetch; 644 } arc_stats_t; 645 646 static arc_stats_t arc_stats = { 647 { "hits", KSTAT_DATA_UINT64 }, 648 { "misses", KSTAT_DATA_UINT64 }, 649 { "demand_data_hits", KSTAT_DATA_UINT64 }, 650 { "demand_data_misses", KSTAT_DATA_UINT64 }, 651 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 652 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 653 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 654 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 655 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 656 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 657 { "mru_hits", KSTAT_DATA_UINT64 }, 658 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 659 { "mfu_hits", KSTAT_DATA_UINT64 }, 660 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 661 { "deleted", KSTAT_DATA_UINT64 }, 662 { "mutex_miss", KSTAT_DATA_UINT64 }, 663 { "evict_skip", KSTAT_DATA_UINT64 }, 664 { "evict_not_enough", KSTAT_DATA_UINT64 }, 665 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 666 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 667 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 668 { "evict_l2_skip", KSTAT_DATA_UINT64 }, 669 { "hash_elements", KSTAT_DATA_UINT64 }, 670 { "hash_elements_max", KSTAT_DATA_UINT64 }, 671 { "hash_collisions", KSTAT_DATA_UINT64 }, 672 { "hash_chains", KSTAT_DATA_UINT64 }, 673 { "hash_chain_max", KSTAT_DATA_UINT64 }, 674 { "p", KSTAT_DATA_UINT64 }, 675 { "c", KSTAT_DATA_UINT64 }, 676 { "c_min", KSTAT_DATA_UINT64 }, 677 { "c_max", KSTAT_DATA_UINT64 }, 678 { "size", KSTAT_DATA_UINT64 }, 679 { "compressed_size", KSTAT_DATA_UINT64 }, 680 { "uncompressed_size", KSTAT_DATA_UINT64 }, 681 { "overhead_size", KSTAT_DATA_UINT64 }, 682 { "hdr_size", KSTAT_DATA_UINT64 }, 683 { "data_size", KSTAT_DATA_UINT64 }, 684 { "metadata_size", KSTAT_DATA_UINT64 }, 685 { "other_size", KSTAT_DATA_UINT64 }, 686 { "anon_size", KSTAT_DATA_UINT64 }, 687 { "anon_evictable_data", KSTAT_DATA_UINT64 }, 688 { "anon_evictable_metadata", KSTAT_DATA_UINT64 }, 689 { "mru_size", KSTAT_DATA_UINT64 }, 690 { "mru_evictable_data", KSTAT_DATA_UINT64 }, 691 { "mru_evictable_metadata", KSTAT_DATA_UINT64 }, 692 { "mru_ghost_size", KSTAT_DATA_UINT64 }, 693 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 }, 694 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 695 { "mfu_size", KSTAT_DATA_UINT64 }, 696 { "mfu_evictable_data", KSTAT_DATA_UINT64 }, 697 { "mfu_evictable_metadata", KSTAT_DATA_UINT64 }, 698 { "mfu_ghost_size", KSTAT_DATA_UINT64 }, 699 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 }, 700 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 701 { "l2_hits", KSTAT_DATA_UINT64 }, 702 { "l2_misses", KSTAT_DATA_UINT64 }, 703 { "l2_feeds", KSTAT_DATA_UINT64 }, 704 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 705 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 706 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 707 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 708 { "l2_writes_done", KSTAT_DATA_UINT64 }, 709 { "l2_writes_error", KSTAT_DATA_UINT64 }, 710 { "l2_writes_lock_retry", KSTAT_DATA_UINT64 }, 711 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 712 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 713 { "l2_evict_l1cached", KSTAT_DATA_UINT64 }, 714 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 715 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 716 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 717 { "l2_io_error", KSTAT_DATA_UINT64 }, 718 { "l2_size", KSTAT_DATA_UINT64 }, 719 { "l2_asize", KSTAT_DATA_UINT64 }, 720 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 721 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 722 { "arc_meta_used", KSTAT_DATA_UINT64 }, 723 { "arc_meta_limit", KSTAT_DATA_UINT64 }, 724 { "arc_meta_max", KSTAT_DATA_UINT64 }, 725 { "arc_meta_min", KSTAT_DATA_UINT64 }, 726 { "sync_wait_for_async", KSTAT_DATA_UINT64 }, 727 { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 }, 728 }; 729 730 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 731 732 #define ARCSTAT_INCR(stat, val) \ 733 atomic_add_64(&arc_stats.stat.value.ui64, (val)) 734 735 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 736 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 737 738 #define ARCSTAT_MAX(stat, val) { \ 739 uint64_t m; \ 740 while ((val) > (m = arc_stats.stat.value.ui64) && \ 741 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 742 continue; \ 743 } 744 745 #define ARCSTAT_MAXSTAT(stat) \ 746 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 747 748 /* 749 * We define a macro to allow ARC hits/misses to be easily broken down by 750 * two separate conditions, giving a total of four different subtypes for 751 * each of hits and misses (so eight statistics total). 752 */ 753 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 754 if (cond1) { \ 755 if (cond2) { \ 756 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 757 } else { \ 758 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 759 } \ 760 } else { \ 761 if (cond2) { \ 762 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 763 } else { \ 764 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 765 } \ 766 } 767 768 kstat_t *arc_ksp; 769 static arc_state_t *arc_anon; 770 static arc_state_t *arc_mru; 771 static arc_state_t *arc_mru_ghost; 772 static arc_state_t *arc_mfu; 773 static arc_state_t *arc_mfu_ghost; 774 static arc_state_t *arc_l2c_only; 775 776 /* 777 * There are several ARC variables that are critical to export as kstats -- 778 * but we don't want to have to grovel around in the kstat whenever we wish to 779 * manipulate them. For these variables, we therefore define them to be in 780 * terms of the statistic variable. This assures that we are not introducing 781 * the possibility of inconsistency by having shadow copies of the variables, 782 * while still allowing the code to be readable. 783 */ 784 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 785 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 786 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 787 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 788 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 789 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ 790 #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ 791 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */ 792 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ 793 794 /* compressed size of entire arc */ 795 #define arc_compressed_size ARCSTAT(arcstat_compressed_size) 796 /* uncompressed size of entire arc */ 797 #define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size) 798 /* number of bytes in the arc from arc_buf_t's */ 799 #define arc_overhead_size ARCSTAT(arcstat_overhead_size) 800 801 static int arc_no_grow; /* Don't try to grow cache size */ 802 static uint64_t arc_tempreserve; 803 static uint64_t arc_loaned_bytes; 804 805 typedef struct arc_callback arc_callback_t; 806 807 struct arc_callback { 808 void *acb_private; 809 arc_done_func_t *acb_done; 810 arc_buf_t *acb_buf; 811 boolean_t acb_compressed; 812 zio_t *acb_zio_dummy; 813 arc_callback_t *acb_next; 814 }; 815 816 typedef struct arc_write_callback arc_write_callback_t; 817 818 struct arc_write_callback { 819 void *awcb_private; 820 arc_done_func_t *awcb_ready; 821 arc_done_func_t *awcb_children_ready; 822 arc_done_func_t *awcb_physdone; 823 arc_done_func_t *awcb_done; 824 arc_buf_t *awcb_buf; 825 }; 826 827 /* 828 * ARC buffers are separated into multiple structs as a memory saving measure: 829 * - Common fields struct, always defined, and embedded within it: 830 * - L2-only fields, always allocated but undefined when not in L2ARC 831 * - L1-only fields, only allocated when in L1ARC 832 * 833 * Buffer in L1 Buffer only in L2 834 * +------------------------+ +------------------------+ 835 * | arc_buf_hdr_t | | arc_buf_hdr_t | 836 * | | | | 837 * | | | | 838 * | | | | 839 * +------------------------+ +------------------------+ 840 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t | 841 * | (undefined if L1-only) | | | 842 * +------------------------+ +------------------------+ 843 * | l1arc_buf_hdr_t | 844 * | | 845 * | | 846 * | | 847 * | | 848 * +------------------------+ 849 * 850 * Because it's possible for the L2ARC to become extremely large, we can wind 851 * up eating a lot of memory in L2ARC buffer headers, so the size of a header 852 * is minimized by only allocating the fields necessary for an L1-cached buffer 853 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and 854 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple 855 * words in pointers. arc_hdr_realloc() is used to switch a header between 856 * these two allocation states. 857 */ 858 typedef struct l1arc_buf_hdr { 859 kmutex_t b_freeze_lock; 860 zio_cksum_t *b_freeze_cksum; 861 #ifdef ZFS_DEBUG 862 /* 863 * Used for debugging with kmem_flags - by allocating and freeing 864 * b_thawed when the buffer is thawed, we get a record of the stack 865 * trace that thawed it. 866 */ 867 void *b_thawed; 868 #endif 869 870 arc_buf_t *b_buf; 871 uint32_t b_bufcnt; 872 /* for waiting on writes to complete */ 873 kcondvar_t b_cv; 874 uint8_t b_byteswap; 875 876 /* protected by arc state mutex */ 877 arc_state_t *b_state; 878 multilist_node_t b_arc_node; 879 880 /* updated atomically */ 881 clock_t b_arc_access; 882 883 /* self protecting */ 884 refcount_t b_refcnt; 885 886 arc_callback_t *b_acb; 887 abd_t *b_pabd; 888 } l1arc_buf_hdr_t; 889 890 typedef struct l2arc_dev l2arc_dev_t; 891 892 typedef struct l2arc_buf_hdr { 893 /* protected by arc_buf_hdr mutex */ 894 l2arc_dev_t *b_dev; /* L2ARC device */ 895 uint64_t b_daddr; /* disk address, offset byte */ 896 897 list_node_t b_l2node; 898 } l2arc_buf_hdr_t; 899 900 struct arc_buf_hdr { 901 /* protected by hash lock */ 902 dva_t b_dva; 903 uint64_t b_birth; 904 905 arc_buf_contents_t b_type; 906 arc_buf_hdr_t *b_hash_next; 907 arc_flags_t b_flags; 908 909 /* 910 * This field stores the size of the data buffer after 911 * compression, and is set in the arc's zio completion handlers. 912 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes). 913 * 914 * While the block pointers can store up to 32MB in their psize 915 * field, we can only store up to 32MB minus 512B. This is due 916 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e. 917 * a field of zeros represents 512B in the bp). We can't use a 918 * bias of 1 since we need to reserve a psize of zero, here, to 919 * represent holes and embedded blocks. 920 * 921 * This isn't a problem in practice, since the maximum size of a 922 * buffer is limited to 16MB, so we never need to store 32MB in 923 * this field. Even in the upstream illumos code base, the 924 * maximum size of a buffer is limited to 16MB. 925 */ 926 uint16_t b_psize; 927 928 /* 929 * This field stores the size of the data buffer before 930 * compression, and cannot change once set. It is in units 931 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes) 932 */ 933 uint16_t b_lsize; /* immutable */ 934 uint64_t b_spa; /* immutable */ 935 936 /* L2ARC fields. Undefined when not in L2ARC. */ 937 l2arc_buf_hdr_t b_l2hdr; 938 /* L1ARC fields. Undefined when in l2arc_only state */ 939 l1arc_buf_hdr_t b_l1hdr; 940 }; 941 942 #define GHOST_STATE(state) \ 943 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 944 (state) == arc_l2c_only) 945 946 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) 947 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) 948 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) 949 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) 950 #define HDR_COMPRESSION_ENABLED(hdr) \ 951 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC) 952 953 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) 954 #define HDR_L2_READING(hdr) \ 955 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \ 956 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)) 957 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) 958 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) 959 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) 960 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA) 961 962 #define HDR_ISTYPE_METADATA(hdr) \ 963 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA) 964 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr)) 965 966 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR) 967 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR) 968 969 /* For storing compression mode in b_flags */ 970 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1) 971 972 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \ 973 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS)) 974 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \ 975 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp)); 976 977 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL) 978 #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED) 979 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED) 980 981 /* 982 * Other sizes 983 */ 984 985 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 986 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr)) 987 988 /* 989 * Hash table routines 990 */ 991 992 #define HT_LOCK_PAD 64 993 994 struct ht_lock { 995 kmutex_t ht_lock; 996 #ifdef _KERNEL 997 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 998 #endif 999 }; 1000 1001 #define BUF_LOCKS 256 1002 typedef struct buf_hash_table { 1003 uint64_t ht_mask; 1004 arc_buf_hdr_t **ht_table; 1005 struct ht_lock ht_locks[BUF_LOCKS]; 1006 } buf_hash_table_t; 1007 1008 static buf_hash_table_t buf_hash_table; 1009 1010 #define BUF_HASH_INDEX(spa, dva, birth) \ 1011 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 1012 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 1013 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 1014 #define HDR_LOCK(hdr) \ 1015 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 1016 1017 uint64_t zfs_crc64_table[256]; 1018 1019 /* 1020 * Level 2 ARC 1021 */ 1022 1023 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 1024 #define L2ARC_HEADROOM 2 /* num of writes */ 1025 /* 1026 * If we discover during ARC scan any buffers to be compressed, we boost 1027 * our headroom for the next scanning cycle by this percentage multiple. 1028 */ 1029 #define L2ARC_HEADROOM_BOOST 200 1030 #define L2ARC_FEED_SECS 1 /* caching interval secs */ 1031 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 1032 1033 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 1034 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 1035 1036 /* L2ARC Performance Tunables */ 1037 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 1038 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 1039 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 1040 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; 1041 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 1042 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 1043 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 1044 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 1045 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 1046 1047 /* 1048 * L2ARC Internals 1049 */ 1050 struct l2arc_dev { 1051 vdev_t *l2ad_vdev; /* vdev */ 1052 spa_t *l2ad_spa; /* spa */ 1053 uint64_t l2ad_hand; /* next write location */ 1054 uint64_t l2ad_start; /* first addr on device */ 1055 uint64_t l2ad_end; /* last addr on device */ 1056 boolean_t l2ad_first; /* first sweep through */ 1057 boolean_t l2ad_writing; /* currently writing */ 1058 kmutex_t l2ad_mtx; /* lock for buffer list */ 1059 list_t l2ad_buflist; /* buffer list */ 1060 list_node_t l2ad_node; /* device list node */ 1061 refcount_t l2ad_alloc; /* allocated bytes */ 1062 }; 1063 1064 static list_t L2ARC_dev_list; /* device list */ 1065 static list_t *l2arc_dev_list; /* device list pointer */ 1066 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 1067 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 1068 static list_t L2ARC_free_on_write; /* free after write buf list */ 1069 static list_t *l2arc_free_on_write; /* free after write list ptr */ 1070 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 1071 static uint64_t l2arc_ndev; /* number of devices */ 1072 1073 typedef struct l2arc_read_callback { 1074 arc_buf_hdr_t *l2rcb_hdr; /* read header */ 1075 blkptr_t l2rcb_bp; /* original blkptr */ 1076 zbookmark_phys_t l2rcb_zb; /* original bookmark */ 1077 int l2rcb_flags; /* original flags */ 1078 abd_t *l2rcb_abd; /* temporary buffer */ 1079 } l2arc_read_callback_t; 1080 1081 typedef struct l2arc_write_callback { 1082 l2arc_dev_t *l2wcb_dev; /* device info */ 1083 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 1084 } l2arc_write_callback_t; 1085 1086 typedef struct l2arc_data_free { 1087 /* protected by l2arc_free_on_write_mtx */ 1088 abd_t *l2df_abd; 1089 size_t l2df_size; 1090 arc_buf_contents_t l2df_type; 1091 list_node_t l2df_list_node; 1092 } l2arc_data_free_t; 1093 1094 static kmutex_t l2arc_feed_thr_lock; 1095 static kcondvar_t l2arc_feed_thr_cv; 1096 static uint8_t l2arc_thread_exit; 1097 1098 static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *); 1099 static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *); 1100 static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *); 1101 static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *); 1102 static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *); 1103 static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag); 1104 static void arc_hdr_free_pabd(arc_buf_hdr_t *); 1105 static void arc_hdr_alloc_pabd(arc_buf_hdr_t *); 1106 static void arc_access(arc_buf_hdr_t *, kmutex_t *); 1107 static boolean_t arc_is_overflowing(); 1108 static void arc_buf_watch(arc_buf_t *); 1109 1110 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); 1111 static uint32_t arc_bufc_to_flags(arc_buf_contents_t); 1112 static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); 1113 static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); 1114 1115 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); 1116 static void l2arc_read_done(zio_t *); 1117 1118 static uint64_t 1119 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 1120 { 1121 uint8_t *vdva = (uint8_t *)dva; 1122 uint64_t crc = -1ULL; 1123 int i; 1124 1125 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 1126 1127 for (i = 0; i < sizeof (dva_t); i++) 1128 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 1129 1130 crc ^= (spa>>8) ^ birth; 1131 1132 return (crc); 1133 } 1134 1135 #define HDR_EMPTY(hdr) \ 1136 ((hdr)->b_dva.dva_word[0] == 0 && \ 1137 (hdr)->b_dva.dva_word[1] == 0) 1138 1139 #define HDR_EQUAL(spa, dva, birth, hdr) \ 1140 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 1141 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 1142 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa) 1143 1144 static void 1145 buf_discard_identity(arc_buf_hdr_t *hdr) 1146 { 1147 hdr->b_dva.dva_word[0] = 0; 1148 hdr->b_dva.dva_word[1] = 0; 1149 hdr->b_birth = 0; 1150 } 1151 1152 static arc_buf_hdr_t * 1153 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) 1154 { 1155 const dva_t *dva = BP_IDENTITY(bp); 1156 uint64_t birth = BP_PHYSICAL_BIRTH(bp); 1157 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 1158 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1159 arc_buf_hdr_t *hdr; 1160 1161 mutex_enter(hash_lock); 1162 for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; 1163 hdr = hdr->b_hash_next) { 1164 if (HDR_EQUAL(spa, dva, birth, hdr)) { 1165 *lockp = hash_lock; 1166 return (hdr); 1167 } 1168 } 1169 mutex_exit(hash_lock); 1170 *lockp = NULL; 1171 return (NULL); 1172 } 1173 1174 /* 1175 * Insert an entry into the hash table. If there is already an element 1176 * equal to elem in the hash table, then the already existing element 1177 * will be returned and the new element will not be inserted. 1178 * Otherwise returns NULL. 1179 * If lockp == NULL, the caller is assumed to already hold the hash lock. 1180 */ 1181 static arc_buf_hdr_t * 1182 buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) 1183 { 1184 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1185 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1186 arc_buf_hdr_t *fhdr; 1187 uint32_t i; 1188 1189 ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); 1190 ASSERT(hdr->b_birth != 0); 1191 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1192 1193 if (lockp != NULL) { 1194 *lockp = hash_lock; 1195 mutex_enter(hash_lock); 1196 } else { 1197 ASSERT(MUTEX_HELD(hash_lock)); 1198 } 1199 1200 for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; 1201 fhdr = fhdr->b_hash_next, i++) { 1202 if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) 1203 return (fhdr); 1204 } 1205 1206 hdr->b_hash_next = buf_hash_table.ht_table[idx]; 1207 buf_hash_table.ht_table[idx] = hdr; 1208 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 1209 1210 /* collect some hash table performance data */ 1211 if (i > 0) { 1212 ARCSTAT_BUMP(arcstat_hash_collisions); 1213 if (i == 1) 1214 ARCSTAT_BUMP(arcstat_hash_chains); 1215 1216 ARCSTAT_MAX(arcstat_hash_chain_max, i); 1217 } 1218 1219 ARCSTAT_BUMP(arcstat_hash_elements); 1220 ARCSTAT_MAXSTAT(arcstat_hash_elements); 1221 1222 return (NULL); 1223 } 1224 1225 static void 1226 buf_hash_remove(arc_buf_hdr_t *hdr) 1227 { 1228 arc_buf_hdr_t *fhdr, **hdrp; 1229 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1230 1231 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 1232 ASSERT(HDR_IN_HASH_TABLE(hdr)); 1233 1234 hdrp = &buf_hash_table.ht_table[idx]; 1235 while ((fhdr = *hdrp) != hdr) { 1236 ASSERT3P(fhdr, !=, NULL); 1237 hdrp = &fhdr->b_hash_next; 1238 } 1239 *hdrp = hdr->b_hash_next; 1240 hdr->b_hash_next = NULL; 1241 arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 1242 1243 /* collect some hash table performance data */ 1244 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 1245 1246 if (buf_hash_table.ht_table[idx] && 1247 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 1248 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 1249 } 1250 1251 /* 1252 * Global data structures and functions for the buf kmem cache. 1253 */ 1254 static kmem_cache_t *hdr_full_cache; 1255 static kmem_cache_t *hdr_l2only_cache; 1256 static kmem_cache_t *buf_cache; 1257 1258 static void 1259 buf_fini(void) 1260 { 1261 int i; 1262 1263 kmem_free(buf_hash_table.ht_table, 1264 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 1265 for (i = 0; i < BUF_LOCKS; i++) 1266 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 1267 kmem_cache_destroy(hdr_full_cache); 1268 kmem_cache_destroy(hdr_l2only_cache); 1269 kmem_cache_destroy(buf_cache); 1270 } 1271 1272 /* 1273 * Constructor callback - called when the cache is empty 1274 * and a new buf is requested. 1275 */ 1276 /* ARGSUSED */ 1277 static int 1278 hdr_full_cons(void *vbuf, void *unused, int kmflag) 1279 { 1280 arc_buf_hdr_t *hdr = vbuf; 1281 1282 bzero(hdr, HDR_FULL_SIZE); 1283 cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL); 1284 refcount_create(&hdr->b_l1hdr.b_refcnt); 1285 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 1286 multilist_link_init(&hdr->b_l1hdr.b_arc_node); 1287 arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1288 1289 return (0); 1290 } 1291 1292 /* ARGSUSED */ 1293 static int 1294 hdr_l2only_cons(void *vbuf, void *unused, int kmflag) 1295 { 1296 arc_buf_hdr_t *hdr = vbuf; 1297 1298 bzero(hdr, HDR_L2ONLY_SIZE); 1299 arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1300 1301 return (0); 1302 } 1303 1304 /* ARGSUSED */ 1305 static int 1306 buf_cons(void *vbuf, void *unused, int kmflag) 1307 { 1308 arc_buf_t *buf = vbuf; 1309 1310 bzero(buf, sizeof (arc_buf_t)); 1311 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 1312 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1313 1314 return (0); 1315 } 1316 1317 /* 1318 * Destructor callback - called when a cached buf is 1319 * no longer required. 1320 */ 1321 /* ARGSUSED */ 1322 static void 1323 hdr_full_dest(void *vbuf, void *unused) 1324 { 1325 arc_buf_hdr_t *hdr = vbuf; 1326 1327 ASSERT(HDR_EMPTY(hdr)); 1328 cv_destroy(&hdr->b_l1hdr.b_cv); 1329 refcount_destroy(&hdr->b_l1hdr.b_refcnt); 1330 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); 1331 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 1332 arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1333 } 1334 1335 /* ARGSUSED */ 1336 static void 1337 hdr_l2only_dest(void *vbuf, void *unused) 1338 { 1339 arc_buf_hdr_t *hdr = vbuf; 1340 1341 ASSERT(HDR_EMPTY(hdr)); 1342 arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1343 } 1344 1345 /* ARGSUSED */ 1346 static void 1347 buf_dest(void *vbuf, void *unused) 1348 { 1349 arc_buf_t *buf = vbuf; 1350 1351 mutex_destroy(&buf->b_evict_lock); 1352 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1353 } 1354 1355 /* 1356 * Reclaim callback -- invoked when memory is low. 1357 */ 1358 /* ARGSUSED */ 1359 static void 1360 hdr_recl(void *unused) 1361 { 1362 dprintf("hdr_recl called\n"); 1363 /* 1364 * umem calls the reclaim func when we destroy the buf cache, 1365 * which is after we do arc_fini(). 1366 */ 1367 if (!arc_dead) 1368 cv_signal(&arc_reclaim_thread_cv); 1369 } 1370 1371 static void 1372 buf_init(void) 1373 { 1374 uint64_t *ct; 1375 uint64_t hsize = 1ULL << 12; 1376 int i, j; 1377 1378 /* 1379 * The hash table is big enough to fill all of physical memory 1380 * with an average block size of zfs_arc_average_blocksize (default 8K). 1381 * By default, the table will take up 1382 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 1383 */ 1384 while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) 1385 hsize <<= 1; 1386 retry: 1387 buf_hash_table.ht_mask = hsize - 1; 1388 buf_hash_table.ht_table = 1389 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1390 if (buf_hash_table.ht_table == NULL) { 1391 ASSERT(hsize > (1ULL << 8)); 1392 hsize >>= 1; 1393 goto retry; 1394 } 1395 1396 hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE, 1397 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0); 1398 hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only", 1399 HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl, 1400 NULL, NULL, 0); 1401 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1402 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1403 1404 for (i = 0; i < 256; i++) 1405 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1406 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1407 1408 for (i = 0; i < BUF_LOCKS; i++) { 1409 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1410 NULL, MUTEX_DEFAULT, NULL); 1411 } 1412 } 1413 1414 /* 1415 * This is the size that the buf occupies in memory. If the buf is compressed, 1416 * it will correspond to the compressed size. You should use this method of 1417 * getting the buf size unless you explicitly need the logical size. 1418 */ 1419 int32_t 1420 arc_buf_size(arc_buf_t *buf) 1421 { 1422 return (ARC_BUF_COMPRESSED(buf) ? 1423 HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr)); 1424 } 1425 1426 int32_t 1427 arc_buf_lsize(arc_buf_t *buf) 1428 { 1429 return (HDR_GET_LSIZE(buf->b_hdr)); 1430 } 1431 1432 enum zio_compress 1433 arc_get_compression(arc_buf_t *buf) 1434 { 1435 return (ARC_BUF_COMPRESSED(buf) ? 1436 HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF); 1437 } 1438 1439 #define ARC_MINTIME (hz>>4) /* 62 ms */ 1440 1441 static inline boolean_t 1442 arc_buf_is_shared(arc_buf_t *buf) 1443 { 1444 boolean_t shared = (buf->b_data != NULL && 1445 buf->b_hdr->b_l1hdr.b_pabd != NULL && 1446 abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) && 1447 buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd)); 1448 IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr)); 1449 IMPLY(shared, ARC_BUF_SHARED(buf)); 1450 IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf)); 1451 1452 /* 1453 * It would be nice to assert arc_can_share() too, but the "hdr isn't 1454 * already being shared" requirement prevents us from doing that. 1455 */ 1456 1457 return (shared); 1458 } 1459 1460 /* 1461 * Free the checksum associated with this header. If there is no checksum, this 1462 * is a no-op. 1463 */ 1464 static inline void 1465 arc_cksum_free(arc_buf_hdr_t *hdr) 1466 { 1467 ASSERT(HDR_HAS_L1HDR(hdr)); 1468 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1469 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { 1470 kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t)); 1471 hdr->b_l1hdr.b_freeze_cksum = NULL; 1472 } 1473 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1474 } 1475 1476 /* 1477 * Return true iff at least one of the bufs on hdr is not compressed. 1478 */ 1479 static boolean_t 1480 arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr) 1481 { 1482 for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) { 1483 if (!ARC_BUF_COMPRESSED(b)) { 1484 return (B_TRUE); 1485 } 1486 } 1487 return (B_FALSE); 1488 } 1489 1490 /* 1491 * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data 1492 * matches the checksum that is stored in the hdr. If there is no checksum, 1493 * or if the buf is compressed, this is a no-op. 1494 */ 1495 static void 1496 arc_cksum_verify(arc_buf_t *buf) 1497 { 1498 arc_buf_hdr_t *hdr = buf->b_hdr; 1499 zio_cksum_t zc; 1500 1501 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1502 return; 1503 1504 if (ARC_BUF_COMPRESSED(buf)) { 1505 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 1506 arc_hdr_has_uncompressed_buf(hdr)); 1507 return; 1508 } 1509 1510 ASSERT(HDR_HAS_L1HDR(hdr)); 1511 1512 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1513 if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) { 1514 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1515 return; 1516 } 1517 1518 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc); 1519 if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc)) 1520 panic("buffer modified while frozen!"); 1521 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1522 } 1523 1524 static boolean_t 1525 arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio) 1526 { 1527 enum zio_compress compress = BP_GET_COMPRESS(zio->io_bp); 1528 boolean_t valid_cksum; 1529 1530 ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); 1531 VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); 1532 1533 /* 1534 * We rely on the blkptr's checksum to determine if the block 1535 * is valid or not. When compressed arc is enabled, the l2arc 1536 * writes the block to the l2arc just as it appears in the pool. 1537 * This allows us to use the blkptr's checksum to validate the 1538 * data that we just read off of the l2arc without having to store 1539 * a separate checksum in the arc_buf_hdr_t. However, if compressed 1540 * arc is disabled, then the data written to the l2arc is always 1541 * uncompressed and won't match the block as it exists in the main 1542 * pool. When this is the case, we must first compress it if it is 1543 * compressed on the main pool before we can validate the checksum. 1544 */ 1545 if (!HDR_COMPRESSION_ENABLED(hdr) && compress != ZIO_COMPRESS_OFF) { 1546 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 1547 uint64_t lsize = HDR_GET_LSIZE(hdr); 1548 uint64_t csize; 1549 1550 abd_t *cdata = abd_alloc_linear(HDR_GET_PSIZE(hdr), B_TRUE); 1551 csize = zio_compress_data(compress, zio->io_abd, 1552 abd_to_buf(cdata), lsize); 1553 1554 ASSERT3U(csize, <=, HDR_GET_PSIZE(hdr)); 1555 if (csize < HDR_GET_PSIZE(hdr)) { 1556 /* 1557 * Compressed blocks are always a multiple of the 1558 * smallest ashift in the pool. Ideally, we would 1559 * like to round up the csize to the next 1560 * spa_min_ashift but that value may have changed 1561 * since the block was last written. Instead, 1562 * we rely on the fact that the hdr's psize 1563 * was set to the psize of the block when it was 1564 * last written. We set the csize to that value 1565 * and zero out any part that should not contain 1566 * data. 1567 */ 1568 abd_zero_off(cdata, csize, HDR_GET_PSIZE(hdr) - csize); 1569 csize = HDR_GET_PSIZE(hdr); 1570 } 1571 zio_push_transform(zio, cdata, csize, HDR_GET_PSIZE(hdr), NULL); 1572 } 1573 1574 /* 1575 * Block pointers always store the checksum for the logical data. 1576 * If the block pointer has the gang bit set, then the checksum 1577 * it represents is for the reconstituted data and not for an 1578 * individual gang member. The zio pipeline, however, must be able to 1579 * determine the checksum of each of the gang constituents so it 1580 * treats the checksum comparison differently than what we need 1581 * for l2arc blocks. This prevents us from using the 1582 * zio_checksum_error() interface directly. Instead we must call the 1583 * zio_checksum_error_impl() so that we can ensure the checksum is 1584 * generated using the correct checksum algorithm and accounts for the 1585 * logical I/O size and not just a gang fragment. 1586 */ 1587 valid_cksum = (zio_checksum_error_impl(zio->io_spa, zio->io_bp, 1588 BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size, 1589 zio->io_offset, NULL) == 0); 1590 zio_pop_transforms(zio); 1591 return (valid_cksum); 1592 } 1593 1594 /* 1595 * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a 1596 * checksum and attaches it to the buf's hdr so that we can ensure that the buf 1597 * isn't modified later on. If buf is compressed or there is already a checksum 1598 * on the hdr, this is a no-op (we only checksum uncompressed bufs). 1599 */ 1600 static void 1601 arc_cksum_compute(arc_buf_t *buf) 1602 { 1603 arc_buf_hdr_t *hdr = buf->b_hdr; 1604 1605 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1606 return; 1607 1608 ASSERT(HDR_HAS_L1HDR(hdr)); 1609 1610 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1611 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { 1612 ASSERT(arc_hdr_has_uncompressed_buf(hdr)); 1613 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1614 return; 1615 } else if (ARC_BUF_COMPRESSED(buf)) { 1616 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1617 return; 1618 } 1619 1620 ASSERT(!ARC_BUF_COMPRESSED(buf)); 1621 hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), 1622 KM_SLEEP); 1623 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, 1624 hdr->b_l1hdr.b_freeze_cksum); 1625 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1626 arc_buf_watch(buf); 1627 } 1628 1629 #ifndef _KERNEL 1630 typedef struct procctl { 1631 long cmd; 1632 prwatch_t prwatch; 1633 } procctl_t; 1634 #endif 1635 1636 /* ARGSUSED */ 1637 static void 1638 arc_buf_unwatch(arc_buf_t *buf) 1639 { 1640 #ifndef _KERNEL 1641 if (arc_watch) { 1642 int result; 1643 procctl_t ctl; 1644 ctl.cmd = PCWATCH; 1645 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1646 ctl.prwatch.pr_size = 0; 1647 ctl.prwatch.pr_wflags = 0; 1648 result = write(arc_procfd, &ctl, sizeof (ctl)); 1649 ASSERT3U(result, ==, sizeof (ctl)); 1650 } 1651 #endif 1652 } 1653 1654 /* ARGSUSED */ 1655 static void 1656 arc_buf_watch(arc_buf_t *buf) 1657 { 1658 #ifndef _KERNEL 1659 if (arc_watch) { 1660 int result; 1661 procctl_t ctl; 1662 ctl.cmd = PCWATCH; 1663 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1664 ctl.prwatch.pr_size = arc_buf_size(buf); 1665 ctl.prwatch.pr_wflags = WA_WRITE; 1666 result = write(arc_procfd, &ctl, sizeof (ctl)); 1667 ASSERT3U(result, ==, sizeof (ctl)); 1668 } 1669 #endif 1670 } 1671 1672 static arc_buf_contents_t 1673 arc_buf_type(arc_buf_hdr_t *hdr) 1674 { 1675 arc_buf_contents_t type; 1676 if (HDR_ISTYPE_METADATA(hdr)) { 1677 type = ARC_BUFC_METADATA; 1678 } else { 1679 type = ARC_BUFC_DATA; 1680 } 1681 VERIFY3U(hdr->b_type, ==, type); 1682 return (type); 1683 } 1684 1685 boolean_t 1686 arc_is_metadata(arc_buf_t *buf) 1687 { 1688 return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0); 1689 } 1690 1691 static uint32_t 1692 arc_bufc_to_flags(arc_buf_contents_t type) 1693 { 1694 switch (type) { 1695 case ARC_BUFC_DATA: 1696 /* metadata field is 0 if buffer contains normal data */ 1697 return (0); 1698 case ARC_BUFC_METADATA: 1699 return (ARC_FLAG_BUFC_METADATA); 1700 default: 1701 break; 1702 } 1703 panic("undefined ARC buffer type!"); 1704 return ((uint32_t)-1); 1705 } 1706 1707 void 1708 arc_buf_thaw(arc_buf_t *buf) 1709 { 1710 arc_buf_hdr_t *hdr = buf->b_hdr; 1711 1712 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 1713 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1714 1715 arc_cksum_verify(buf); 1716 1717 /* 1718 * Compressed buffers do not manipulate the b_freeze_cksum or 1719 * allocate b_thawed. 1720 */ 1721 if (ARC_BUF_COMPRESSED(buf)) { 1722 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 1723 arc_hdr_has_uncompressed_buf(hdr)); 1724 return; 1725 } 1726 1727 ASSERT(HDR_HAS_L1HDR(hdr)); 1728 arc_cksum_free(hdr); 1729 1730 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1731 #ifdef ZFS_DEBUG 1732 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1733 if (hdr->b_l1hdr.b_thawed != NULL) 1734 kmem_free(hdr->b_l1hdr.b_thawed, 1); 1735 hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP); 1736 } 1737 #endif 1738 1739 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1740 1741 arc_buf_unwatch(buf); 1742 } 1743 1744 void 1745 arc_buf_freeze(arc_buf_t *buf) 1746 { 1747 arc_buf_hdr_t *hdr = buf->b_hdr; 1748 kmutex_t *hash_lock; 1749 1750 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1751 return; 1752 1753 if (ARC_BUF_COMPRESSED(buf)) { 1754 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 1755 arc_hdr_has_uncompressed_buf(hdr)); 1756 return; 1757 } 1758 1759 hash_lock = HDR_LOCK(hdr); 1760 mutex_enter(hash_lock); 1761 1762 ASSERT(HDR_HAS_L1HDR(hdr)); 1763 ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL || 1764 hdr->b_l1hdr.b_state == arc_anon); 1765 arc_cksum_compute(buf); 1766 mutex_exit(hash_lock); 1767 } 1768 1769 /* 1770 * The arc_buf_hdr_t's b_flags should never be modified directly. Instead, 1771 * the following functions should be used to ensure that the flags are 1772 * updated in a thread-safe way. When manipulating the flags either 1773 * the hash_lock must be held or the hdr must be undiscoverable. This 1774 * ensures that we're not racing with any other threads when updating 1775 * the flags. 1776 */ 1777 static inline void 1778 arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) 1779 { 1780 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 1781 hdr->b_flags |= flags; 1782 } 1783 1784 static inline void 1785 arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) 1786 { 1787 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 1788 hdr->b_flags &= ~flags; 1789 } 1790 1791 /* 1792 * Setting the compression bits in the arc_buf_hdr_t's b_flags is 1793 * done in a special way since we have to clear and set bits 1794 * at the same time. Consumers that wish to set the compression bits 1795 * must use this function to ensure that the flags are updated in 1796 * thread-safe manner. 1797 */ 1798 static void 1799 arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp) 1800 { 1801 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 1802 1803 /* 1804 * Holes and embedded blocks will always have a psize = 0 so 1805 * we ignore the compression of the blkptr and set the 1806 * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF. 1807 * Holes and embedded blocks remain anonymous so we don't 1808 * want to uncompress them. Mark them as uncompressed. 1809 */ 1810 if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) { 1811 arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC); 1812 HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF); 1813 ASSERT(!HDR_COMPRESSION_ENABLED(hdr)); 1814 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 1815 } else { 1816 arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC); 1817 HDR_SET_COMPRESS(hdr, cmp); 1818 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp); 1819 ASSERT(HDR_COMPRESSION_ENABLED(hdr)); 1820 } 1821 } 1822 1823 /* 1824 * Looks for another buf on the same hdr which has the data decompressed, copies 1825 * from it, and returns true. If no such buf exists, returns false. 1826 */ 1827 static boolean_t 1828 arc_buf_try_copy_decompressed_data(arc_buf_t *buf) 1829 { 1830 arc_buf_hdr_t *hdr = buf->b_hdr; 1831 boolean_t copied = B_FALSE; 1832 1833 ASSERT(HDR_HAS_L1HDR(hdr)); 1834 ASSERT3P(buf->b_data, !=, NULL); 1835 ASSERT(!ARC_BUF_COMPRESSED(buf)); 1836 1837 for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL; 1838 from = from->b_next) { 1839 /* can't use our own data buffer */ 1840 if (from == buf) { 1841 continue; 1842 } 1843 1844 if (!ARC_BUF_COMPRESSED(from)) { 1845 bcopy(from->b_data, buf->b_data, arc_buf_size(buf)); 1846 copied = B_TRUE; 1847 break; 1848 } 1849 } 1850 1851 /* 1852 * There were no decompressed bufs, so there should not be a 1853 * checksum on the hdr either. 1854 */ 1855 EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL); 1856 1857 return (copied); 1858 } 1859 1860 /* 1861 * Given a buf that has a data buffer attached to it, this function will 1862 * efficiently fill the buf with data of the specified compression setting from 1863 * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr 1864 * are already sharing a data buf, no copy is performed. 1865 * 1866 * If the buf is marked as compressed but uncompressed data was requested, this 1867 * will allocate a new data buffer for the buf, remove that flag, and fill the 1868 * buf with uncompressed data. You can't request a compressed buf on a hdr with 1869 * uncompressed data, and (since we haven't added support for it yet) if you 1870 * want compressed data your buf must already be marked as compressed and have 1871 * the correct-sized data buffer. 1872 */ 1873 static int 1874 arc_buf_fill(arc_buf_t *buf, boolean_t compressed) 1875 { 1876 arc_buf_hdr_t *hdr = buf->b_hdr; 1877 boolean_t hdr_compressed = (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF); 1878 dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap; 1879 1880 ASSERT3P(buf->b_data, !=, NULL); 1881 IMPLY(compressed, hdr_compressed); 1882 IMPLY(compressed, ARC_BUF_COMPRESSED(buf)); 1883 1884 if (hdr_compressed == compressed) { 1885 if (!arc_buf_is_shared(buf)) { 1886 abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd, 1887 arc_buf_size(buf)); 1888 } 1889 } else { 1890 ASSERT(hdr_compressed); 1891 ASSERT(!compressed); 1892 ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr)); 1893 1894 /* 1895 * If the buf is sharing its data with the hdr, unlink it and 1896 * allocate a new data buffer for the buf. 1897 */ 1898 if (arc_buf_is_shared(buf)) { 1899 ASSERT(ARC_BUF_COMPRESSED(buf)); 1900 1901 /* We need to give the buf it's own b_data */ 1902 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; 1903 buf->b_data = 1904 arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); 1905 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 1906 1907 /* Previously overhead was 0; just add new overhead */ 1908 ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr)); 1909 } else if (ARC_BUF_COMPRESSED(buf)) { 1910 /* We need to reallocate the buf's b_data */ 1911 arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr), 1912 buf); 1913 buf->b_data = 1914 arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); 1915 1916 /* We increased the size of b_data; update overhead */ 1917 ARCSTAT_INCR(arcstat_overhead_size, 1918 HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr)); 1919 } 1920 1921 /* 1922 * Regardless of the buf's previous compression settings, it 1923 * should not be compressed at the end of this function. 1924 */ 1925 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; 1926 1927 /* 1928 * Try copying the data from another buf which already has a 1929 * decompressed version. If that's not possible, it's time to 1930 * bite the bullet and decompress the data from the hdr. 1931 */ 1932 if (arc_buf_try_copy_decompressed_data(buf)) { 1933 /* Skip byteswapping and checksumming (already done) */ 1934 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, !=, NULL); 1935 return (0); 1936 } else { 1937 int error = zio_decompress_data(HDR_GET_COMPRESS(hdr), 1938 hdr->b_l1hdr.b_pabd, buf->b_data, 1939 HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); 1940 1941 /* 1942 * Absent hardware errors or software bugs, this should 1943 * be impossible, but log it anyway so we can debug it. 1944 */ 1945 if (error != 0) { 1946 zfs_dbgmsg( 1947 "hdr %p, compress %d, psize %d, lsize %d", 1948 hdr, HDR_GET_COMPRESS(hdr), 1949 HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); 1950 return (SET_ERROR(EIO)); 1951 } 1952 } 1953 } 1954 1955 /* Byteswap the buf's data if necessary */ 1956 if (bswap != DMU_BSWAP_NUMFUNCS) { 1957 ASSERT(!HDR_SHARED_DATA(hdr)); 1958 ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS); 1959 dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr)); 1960 } 1961 1962 /* Compute the hdr's checksum if necessary */ 1963 arc_cksum_compute(buf); 1964 1965 return (0); 1966 } 1967 1968 int 1969 arc_decompress(arc_buf_t *buf) 1970 { 1971 return (arc_buf_fill(buf, B_FALSE)); 1972 } 1973 1974 /* 1975 * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t. 1976 */ 1977 static uint64_t 1978 arc_hdr_size(arc_buf_hdr_t *hdr) 1979 { 1980 uint64_t size; 1981 1982 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && 1983 HDR_GET_PSIZE(hdr) > 0) { 1984 size = HDR_GET_PSIZE(hdr); 1985 } else { 1986 ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0); 1987 size = HDR_GET_LSIZE(hdr); 1988 } 1989 return (size); 1990 } 1991 1992 /* 1993 * Increment the amount of evictable space in the arc_state_t's refcount. 1994 * We account for the space used by the hdr and the arc buf individually 1995 * so that we can add and remove them from the refcount individually. 1996 */ 1997 static void 1998 arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state) 1999 { 2000 arc_buf_contents_t type = arc_buf_type(hdr); 2001 2002 ASSERT(HDR_HAS_L1HDR(hdr)); 2003 2004 if (GHOST_STATE(state)) { 2005 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2006 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2007 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2008 (void) refcount_add_many(&state->arcs_esize[type], 2009 HDR_GET_LSIZE(hdr), hdr); 2010 return; 2011 } 2012 2013 ASSERT(!GHOST_STATE(state)); 2014 if (hdr->b_l1hdr.b_pabd != NULL) { 2015 (void) refcount_add_many(&state->arcs_esize[type], 2016 arc_hdr_size(hdr), hdr); 2017 } 2018 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2019 buf = buf->b_next) { 2020 if (arc_buf_is_shared(buf)) 2021 continue; 2022 (void) refcount_add_many(&state->arcs_esize[type], 2023 arc_buf_size(buf), buf); 2024 } 2025 } 2026 2027 /* 2028 * Decrement the amount of evictable space in the arc_state_t's refcount. 2029 * We account for the space used by the hdr and the arc buf individually 2030 * so that we can add and remove them from the refcount individually. 2031 */ 2032 static void 2033 arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state) 2034 { 2035 arc_buf_contents_t type = arc_buf_type(hdr); 2036 2037 ASSERT(HDR_HAS_L1HDR(hdr)); 2038 2039 if (GHOST_STATE(state)) { 2040 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2041 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2042 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2043 (void) refcount_remove_many(&state->arcs_esize[type], 2044 HDR_GET_LSIZE(hdr), hdr); 2045 return; 2046 } 2047 2048 ASSERT(!GHOST_STATE(state)); 2049 if (hdr->b_l1hdr.b_pabd != NULL) { 2050 (void) refcount_remove_many(&state->arcs_esize[type], 2051 arc_hdr_size(hdr), hdr); 2052 } 2053 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2054 buf = buf->b_next) { 2055 if (arc_buf_is_shared(buf)) 2056 continue; 2057 (void) refcount_remove_many(&state->arcs_esize[type], 2058 arc_buf_size(buf), buf); 2059 } 2060 } 2061 2062 /* 2063 * Add a reference to this hdr indicating that someone is actively 2064 * referencing that memory. When the refcount transitions from 0 to 1, 2065 * we remove it from the respective arc_state_t list to indicate that 2066 * it is not evictable. 2067 */ 2068 static void 2069 add_reference(arc_buf_hdr_t *hdr, void *tag) 2070 { 2071 ASSERT(HDR_HAS_L1HDR(hdr)); 2072 if (!MUTEX_HELD(HDR_LOCK(hdr))) { 2073 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 2074 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2075 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2076 } 2077 2078 arc_state_t *state = hdr->b_l1hdr.b_state; 2079 2080 if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && 2081 (state != arc_anon)) { 2082 /* We don't use the L2-only state list. */ 2083 if (state != arc_l2c_only) { 2084 multilist_remove(state->arcs_list[arc_buf_type(hdr)], 2085 hdr); 2086 arc_evictable_space_decrement(hdr, state); 2087 } 2088 /* remove the prefetch flag if we get a reference */ 2089 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); 2090 } 2091 } 2092 2093 /* 2094 * Remove a reference from this hdr. When the reference transitions from 2095 * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's 2096 * list making it eligible for eviction. 2097 */ 2098 static int 2099 remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) 2100 { 2101 int cnt; 2102 arc_state_t *state = hdr->b_l1hdr.b_state; 2103 2104 ASSERT(HDR_HAS_L1HDR(hdr)); 2105 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 2106 ASSERT(!GHOST_STATE(state)); 2107 2108 /* 2109 * arc_l2c_only counts as a ghost state so we don't need to explicitly 2110 * check to prevent usage of the arc_l2c_only list. 2111 */ 2112 if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) && 2113 (state != arc_anon)) { 2114 multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr); 2115 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); 2116 arc_evictable_space_increment(hdr, state); 2117 } 2118 return (cnt); 2119 } 2120 2121 /* 2122 * Move the supplied buffer to the indicated state. The hash lock 2123 * for the buffer must be held by the caller. 2124 */ 2125 static void 2126 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, 2127 kmutex_t *hash_lock) 2128 { 2129 arc_state_t *old_state; 2130 int64_t refcnt; 2131 uint32_t bufcnt; 2132 boolean_t update_old, update_new; 2133 arc_buf_contents_t buftype = arc_buf_type(hdr); 2134 2135 /* 2136 * We almost always have an L1 hdr here, since we call arc_hdr_realloc() 2137 * in arc_read() when bringing a buffer out of the L2ARC. However, the 2138 * L1 hdr doesn't always exist when we change state to arc_anon before 2139 * destroying a header, in which case reallocating to add the L1 hdr is 2140 * pointless. 2141 */ 2142 if (HDR_HAS_L1HDR(hdr)) { 2143 old_state = hdr->b_l1hdr.b_state; 2144 refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt); 2145 bufcnt = hdr->b_l1hdr.b_bufcnt; 2146 update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL); 2147 } else { 2148 old_state = arc_l2c_only; 2149 refcnt = 0; 2150 bufcnt = 0; 2151 update_old = B_FALSE; 2152 } 2153 update_new = update_old; 2154 2155 ASSERT(MUTEX_HELD(hash_lock)); 2156 ASSERT3P(new_state, !=, old_state); 2157 ASSERT(!GHOST_STATE(new_state) || bufcnt == 0); 2158 ASSERT(old_state != arc_anon || bufcnt <= 1); 2159 2160 /* 2161 * If this buffer is evictable, transfer it from the 2162 * old state list to the new state list. 2163 */ 2164 if (refcnt == 0) { 2165 if (old_state != arc_anon && old_state != arc_l2c_only) { 2166 ASSERT(HDR_HAS_L1HDR(hdr)); 2167 multilist_remove(old_state->arcs_list[buftype], hdr); 2168 2169 if (GHOST_STATE(old_state)) { 2170 ASSERT0(bufcnt); 2171 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2172 update_old = B_TRUE; 2173 } 2174 arc_evictable_space_decrement(hdr, old_state); 2175 } 2176 if (new_state != arc_anon && new_state != arc_l2c_only) { 2177 2178 /* 2179 * An L1 header always exists here, since if we're 2180 * moving to some L1-cached state (i.e. not l2c_only or 2181 * anonymous), we realloc the header to add an L1hdr 2182 * beforehand. 2183 */ 2184 ASSERT(HDR_HAS_L1HDR(hdr)); 2185 multilist_insert(new_state->arcs_list[buftype], hdr); 2186 2187 if (GHOST_STATE(new_state)) { 2188 ASSERT0(bufcnt); 2189 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2190 update_new = B_TRUE; 2191 } 2192 arc_evictable_space_increment(hdr, new_state); 2193 } 2194 } 2195 2196 ASSERT(!HDR_EMPTY(hdr)); 2197 if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) 2198 buf_hash_remove(hdr); 2199 2200 /* adjust state sizes (ignore arc_l2c_only) */ 2201 2202 if (update_new && new_state != arc_l2c_only) { 2203 ASSERT(HDR_HAS_L1HDR(hdr)); 2204 if (GHOST_STATE(new_state)) { 2205 ASSERT0(bufcnt); 2206 2207 /* 2208 * When moving a header to a ghost state, we first 2209 * remove all arc buffers. Thus, we'll have a 2210 * bufcnt of zero, and no arc buffer to use for 2211 * the reference. As a result, we use the arc 2212 * header pointer for the reference. 2213 */ 2214 (void) refcount_add_many(&new_state->arcs_size, 2215 HDR_GET_LSIZE(hdr), hdr); 2216 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2217 } else { 2218 uint32_t buffers = 0; 2219 2220 /* 2221 * Each individual buffer holds a unique reference, 2222 * thus we must remove each of these references one 2223 * at a time. 2224 */ 2225 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2226 buf = buf->b_next) { 2227 ASSERT3U(bufcnt, !=, 0); 2228 buffers++; 2229 2230 /* 2231 * When the arc_buf_t is sharing the data 2232 * block with the hdr, the owner of the 2233 * reference belongs to the hdr. Only 2234 * add to the refcount if the arc_buf_t is 2235 * not shared. 2236 */ 2237 if (arc_buf_is_shared(buf)) 2238 continue; 2239 2240 (void) refcount_add_many(&new_state->arcs_size, 2241 arc_buf_size(buf), buf); 2242 } 2243 ASSERT3U(bufcnt, ==, buffers); 2244 2245 if (hdr->b_l1hdr.b_pabd != NULL) { 2246 (void) refcount_add_many(&new_state->arcs_size, 2247 arc_hdr_size(hdr), hdr); 2248 } else { 2249 ASSERT(GHOST_STATE(old_state)); 2250 } 2251 } 2252 } 2253 2254 if (update_old && old_state != arc_l2c_only) { 2255 ASSERT(HDR_HAS_L1HDR(hdr)); 2256 if (GHOST_STATE(old_state)) { 2257 ASSERT0(bufcnt); 2258 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2259 2260 /* 2261 * When moving a header off of a ghost state, 2262 * the header will not contain any arc buffers. 2263 * We use the arc header pointer for the reference 2264 * which is exactly what we did when we put the 2265 * header on the ghost state. 2266 */ 2267 2268 (void) refcount_remove_many(&old_state->arcs_size, 2269 HDR_GET_LSIZE(hdr), hdr); 2270 } else { 2271 uint32_t buffers = 0; 2272 2273 /* 2274 * Each individual buffer holds a unique reference, 2275 * thus we must remove each of these references one 2276 * at a time. 2277 */ 2278 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2279 buf = buf->b_next) { 2280 ASSERT3U(bufcnt, !=, 0); 2281 buffers++; 2282 2283 /* 2284 * When the arc_buf_t is sharing the data 2285 * block with the hdr, the owner of the 2286 * reference belongs to the hdr. Only 2287 * add to the refcount if the arc_buf_t is 2288 * not shared. 2289 */ 2290 if (arc_buf_is_shared(buf)) 2291 continue; 2292 2293 (void) refcount_remove_many( 2294 &old_state->arcs_size, arc_buf_size(buf), 2295 buf); 2296 } 2297 ASSERT3U(bufcnt, ==, buffers); 2298 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2299 (void) refcount_remove_many( 2300 &old_state->arcs_size, arc_hdr_size(hdr), hdr); 2301 } 2302 } 2303 2304 if (HDR_HAS_L1HDR(hdr)) 2305 hdr->b_l1hdr.b_state = new_state; 2306 2307 /* 2308 * L2 headers should never be on the L2 state list since they don't 2309 * have L1 headers allocated. 2310 */ 2311 ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DATA]) && 2312 multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_METADATA])); 2313 } 2314 2315 void 2316 arc_space_consume(uint64_t space, arc_space_type_t type) 2317 { 2318 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 2319 2320 switch (type) { 2321 case ARC_SPACE_DATA: 2322 ARCSTAT_INCR(arcstat_data_size, space); 2323 break; 2324 case ARC_SPACE_META: 2325 ARCSTAT_INCR(arcstat_metadata_size, space); 2326 break; 2327 case ARC_SPACE_OTHER: 2328 ARCSTAT_INCR(arcstat_other_size, space); 2329 break; 2330 case ARC_SPACE_HDRS: 2331 ARCSTAT_INCR(arcstat_hdr_size, space); 2332 break; 2333 case ARC_SPACE_L2HDRS: 2334 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 2335 break; 2336 } 2337 2338 if (type != ARC_SPACE_DATA) 2339 ARCSTAT_INCR(arcstat_meta_used, space); 2340 2341 atomic_add_64(&arc_size, space); 2342 } 2343 2344 void 2345 arc_space_return(uint64_t space, arc_space_type_t type) 2346 { 2347 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 2348 2349 switch (type) { 2350 case ARC_SPACE_DATA: 2351 ARCSTAT_INCR(arcstat_data_size, -space); 2352 break; 2353 case ARC_SPACE_META: 2354 ARCSTAT_INCR(arcstat_metadata_size, -space); 2355 break; 2356 case ARC_SPACE_OTHER: 2357 ARCSTAT_INCR(arcstat_other_size, -space); 2358 break; 2359 case ARC_SPACE_HDRS: 2360 ARCSTAT_INCR(arcstat_hdr_size, -space); 2361 break; 2362 case ARC_SPACE_L2HDRS: 2363 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 2364 break; 2365 } 2366 2367 if (type != ARC_SPACE_DATA) { 2368 ASSERT(arc_meta_used >= space); 2369 if (arc_meta_max < arc_meta_used) 2370 arc_meta_max = arc_meta_used; 2371 ARCSTAT_INCR(arcstat_meta_used, -space); 2372 } 2373 2374 ASSERT(arc_size >= space); 2375 atomic_add_64(&arc_size, -space); 2376 } 2377 2378 /* 2379 * Given a hdr and a buf, returns whether that buf can share its b_data buffer 2380 * with the hdr's b_pabd. 2381 */ 2382 static boolean_t 2383 arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf) 2384 { 2385 /* 2386 * The criteria for sharing a hdr's data are: 2387 * 1. the hdr's compression matches the buf's compression 2388 * 2. the hdr doesn't need to be byteswapped 2389 * 3. the hdr isn't already being shared 2390 * 4. the buf is either compressed or it is the last buf in the hdr list 2391 * 2392 * Criterion #4 maintains the invariant that shared uncompressed 2393 * bufs must be the final buf in the hdr's b_buf list. Reading this, you 2394 * might ask, "if a compressed buf is allocated first, won't that be the 2395 * last thing in the list?", but in that case it's impossible to create 2396 * a shared uncompressed buf anyway (because the hdr must be compressed 2397 * to have the compressed buf). You might also think that #3 is 2398 * sufficient to make this guarantee, however it's possible 2399 * (specifically in the rare L2ARC write race mentioned in 2400 * arc_buf_alloc_impl()) there will be an existing uncompressed buf that 2401 * is sharable, but wasn't at the time of its allocation. Rather than 2402 * allow a new shared uncompressed buf to be created and then shuffle 2403 * the list around to make it the last element, this simply disallows 2404 * sharing if the new buf isn't the first to be added. 2405 */ 2406 ASSERT3P(buf->b_hdr, ==, hdr); 2407 boolean_t hdr_compressed = HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF; 2408 boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0; 2409 return (buf_compressed == hdr_compressed && 2410 hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS && 2411 !HDR_SHARED_DATA(hdr) && 2412 (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf))); 2413 } 2414 2415 /* 2416 * Allocate a buf for this hdr. If you care about the data that's in the hdr, 2417 * or if you want a compressed buffer, pass those flags in. Returns 0 if the 2418 * copy was made successfully, or an error code otherwise. 2419 */ 2420 static int 2421 arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed, 2422 boolean_t fill, arc_buf_t **ret) 2423 { 2424 arc_buf_t *buf; 2425 2426 ASSERT(HDR_HAS_L1HDR(hdr)); 2427 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); 2428 VERIFY(hdr->b_type == ARC_BUFC_DATA || 2429 hdr->b_type == ARC_BUFC_METADATA); 2430 ASSERT3P(ret, !=, NULL); 2431 ASSERT3P(*ret, ==, NULL); 2432 2433 buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2434 buf->b_hdr = hdr; 2435 buf->b_data = NULL; 2436 buf->b_next = hdr->b_l1hdr.b_buf; 2437 buf->b_flags = 0; 2438 2439 add_reference(hdr, tag); 2440 2441 /* 2442 * We're about to change the hdr's b_flags. We must either 2443 * hold the hash_lock or be undiscoverable. 2444 */ 2445 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2446 2447 /* 2448 * Only honor requests for compressed bufs if the hdr is actually 2449 * compressed. 2450 */ 2451 if (compressed && HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) 2452 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; 2453 2454 /* 2455 * If the hdr's data can be shared then we share the data buffer and 2456 * set the appropriate bit in the hdr's b_flags to indicate the hdr is 2457 * sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new 2458 * buffer to store the buf's data. 2459 * 2460 * There are two additional restrictions here because we're sharing 2461 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be 2462 * actively involved in an L2ARC write, because if this buf is used by 2463 * an arc_write() then the hdr's data buffer will be released when the 2464 * write completes, even though the L2ARC write might still be using it. 2465 * Second, the hdr's ABD must be linear so that the buf's user doesn't 2466 * need to be ABD-aware. 2467 */ 2468 boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) && 2469 abd_is_linear(hdr->b_l1hdr.b_pabd); 2470 2471 /* Set up b_data and sharing */ 2472 if (can_share) { 2473 buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd); 2474 buf->b_flags |= ARC_BUF_FLAG_SHARED; 2475 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); 2476 } else { 2477 buf->b_data = 2478 arc_get_data_buf(hdr, arc_buf_size(buf), buf); 2479 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); 2480 } 2481 VERIFY3P(buf->b_data, !=, NULL); 2482 2483 hdr->b_l1hdr.b_buf = buf; 2484 hdr->b_l1hdr.b_bufcnt += 1; 2485 2486 /* 2487 * If the user wants the data from the hdr, we need to either copy or 2488 * decompress the data. 2489 */ 2490 if (fill) { 2491 return (arc_buf_fill(buf, ARC_BUF_COMPRESSED(buf) != 0)); 2492 } 2493 2494 return (0); 2495 } 2496 2497 static char *arc_onloan_tag = "onloan"; 2498 2499 static inline void 2500 arc_loaned_bytes_update(int64_t delta) 2501 { 2502 atomic_add_64(&arc_loaned_bytes, delta); 2503 2504 /* assert that it did not wrap around */ 2505 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); 2506 } 2507 2508 /* 2509 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 2510 * flight data by arc_tempreserve_space() until they are "returned". Loaned 2511 * buffers must be returned to the arc before they can be used by the DMU or 2512 * freed. 2513 */ 2514 arc_buf_t * 2515 arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size) 2516 { 2517 arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag, 2518 is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size); 2519 2520 arc_loaned_bytes_update(size); 2521 2522 return (buf); 2523 } 2524 2525 arc_buf_t * 2526 arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize, 2527 enum zio_compress compression_type) 2528 { 2529 arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag, 2530 psize, lsize, compression_type); 2531 2532 arc_loaned_bytes_update(psize); 2533 2534 return (buf); 2535 } 2536 2537 2538 /* 2539 * Return a loaned arc buffer to the arc. 2540 */ 2541 void 2542 arc_return_buf(arc_buf_t *buf, void *tag) 2543 { 2544 arc_buf_hdr_t *hdr = buf->b_hdr; 2545 2546 ASSERT3P(buf->b_data, !=, NULL); 2547 ASSERT(HDR_HAS_L1HDR(hdr)); 2548 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag); 2549 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 2550 2551 arc_loaned_bytes_update(-arc_buf_size(buf)); 2552 } 2553 2554 /* Detach an arc_buf from a dbuf (tag) */ 2555 void 2556 arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 2557 { 2558 arc_buf_hdr_t *hdr = buf->b_hdr; 2559 2560 ASSERT3P(buf->b_data, !=, NULL); 2561 ASSERT(HDR_HAS_L1HDR(hdr)); 2562 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 2563 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); 2564 2565 arc_loaned_bytes_update(arc_buf_size(buf)); 2566 } 2567 2568 static void 2569 l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type) 2570 { 2571 l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP); 2572 2573 df->l2df_abd = abd; 2574 df->l2df_size = size; 2575 df->l2df_type = type; 2576 mutex_enter(&l2arc_free_on_write_mtx); 2577 list_insert_head(l2arc_free_on_write, df); 2578 mutex_exit(&l2arc_free_on_write_mtx); 2579 } 2580 2581 static void 2582 arc_hdr_free_on_write(arc_buf_hdr_t *hdr) 2583 { 2584 arc_state_t *state = hdr->b_l1hdr.b_state; 2585 arc_buf_contents_t type = arc_buf_type(hdr); 2586 uint64_t size = arc_hdr_size(hdr); 2587 2588 /* protected by hash lock, if in the hash table */ 2589 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 2590 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2591 ASSERT(state != arc_anon && state != arc_l2c_only); 2592 2593 (void) refcount_remove_many(&state->arcs_esize[type], 2594 size, hdr); 2595 } 2596 (void) refcount_remove_many(&state->arcs_size, size, hdr); 2597 if (type == ARC_BUFC_METADATA) { 2598 arc_space_return(size, ARC_SPACE_META); 2599 } else { 2600 ASSERT(type == ARC_BUFC_DATA); 2601 arc_space_return(size, ARC_SPACE_DATA); 2602 } 2603 2604 l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type); 2605 } 2606 2607 /* 2608 * Share the arc_buf_t's data with the hdr. Whenever we are sharing the 2609 * data buffer, we transfer the refcount ownership to the hdr and update 2610 * the appropriate kstats. 2611 */ 2612 static void 2613 arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) 2614 { 2615 arc_state_t *state = hdr->b_l1hdr.b_state; 2616 2617 ASSERT(arc_can_share(hdr, buf)); 2618 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2619 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2620 2621 /* 2622 * Start sharing the data buffer. We transfer the 2623 * refcount ownership to the hdr since it always owns 2624 * the refcount whenever an arc_buf_t is shared. 2625 */ 2626 refcount_transfer_ownership(&state->arcs_size, buf, hdr); 2627 hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf)); 2628 abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd, 2629 HDR_ISTYPE_METADATA(hdr)); 2630 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); 2631 buf->b_flags |= ARC_BUF_FLAG_SHARED; 2632 2633 /* 2634 * Since we've transferred ownership to the hdr we need 2635 * to increment its compressed and uncompressed kstats and 2636 * decrement the overhead size. 2637 */ 2638 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); 2639 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); 2640 ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf)); 2641 } 2642 2643 static void 2644 arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) 2645 { 2646 arc_state_t *state = hdr->b_l1hdr.b_state; 2647 2648 ASSERT(arc_buf_is_shared(buf)); 2649 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2650 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2651 2652 /* 2653 * We are no longer sharing this buffer so we need 2654 * to transfer its ownership to the rightful owner. 2655 */ 2656 refcount_transfer_ownership(&state->arcs_size, hdr, buf); 2657 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 2658 abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd); 2659 abd_put(hdr->b_l1hdr.b_pabd); 2660 hdr->b_l1hdr.b_pabd = NULL; 2661 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; 2662 2663 /* 2664 * Since the buffer is no longer shared between 2665 * the arc buf and the hdr, count it as overhead. 2666 */ 2667 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); 2668 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); 2669 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); 2670 } 2671 2672 /* 2673 * Remove an arc_buf_t from the hdr's buf list and return the last 2674 * arc_buf_t on the list. If no buffers remain on the list then return 2675 * NULL. 2676 */ 2677 static arc_buf_t * 2678 arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf) 2679 { 2680 ASSERT(HDR_HAS_L1HDR(hdr)); 2681 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2682 2683 arc_buf_t **bufp = &hdr->b_l1hdr.b_buf; 2684 arc_buf_t *lastbuf = NULL; 2685 2686 /* 2687 * Remove the buf from the hdr list and locate the last 2688 * remaining buffer on the list. 2689 */ 2690 while (*bufp != NULL) { 2691 if (*bufp == buf) 2692 *bufp = buf->b_next; 2693 2694 /* 2695 * If we've removed a buffer in the middle of 2696 * the list then update the lastbuf and update 2697 * bufp. 2698 */ 2699 if (*bufp != NULL) { 2700 lastbuf = *bufp; 2701 bufp = &(*bufp)->b_next; 2702 } 2703 } 2704 buf->b_next = NULL; 2705 ASSERT3P(lastbuf, !=, buf); 2706 IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL); 2707 IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL); 2708 IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf)); 2709 2710 return (lastbuf); 2711 } 2712 2713 /* 2714 * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's 2715 * list and free it. 2716 */ 2717 static void 2718 arc_buf_destroy_impl(arc_buf_t *buf) 2719 { 2720 arc_buf_hdr_t *hdr = buf->b_hdr; 2721 2722 /* 2723 * Free up the data associated with the buf but only if we're not 2724 * sharing this with the hdr. If we are sharing it with the hdr, the 2725 * hdr is responsible for doing the free. 2726 */ 2727 if (buf->b_data != NULL) { 2728 /* 2729 * We're about to change the hdr's b_flags. We must either 2730 * hold the hash_lock or be undiscoverable. 2731 */ 2732 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2733 2734 arc_cksum_verify(buf); 2735 arc_buf_unwatch(buf); 2736 2737 if (arc_buf_is_shared(buf)) { 2738 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 2739 } else { 2740 uint64_t size = arc_buf_size(buf); 2741 arc_free_data_buf(hdr, buf->b_data, size, buf); 2742 ARCSTAT_INCR(arcstat_overhead_size, -size); 2743 } 2744 buf->b_data = NULL; 2745 2746 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 2747 hdr->b_l1hdr.b_bufcnt -= 1; 2748 } 2749 2750 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); 2751 2752 if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) { 2753 /* 2754 * If the current arc_buf_t is sharing its data buffer with the 2755 * hdr, then reassign the hdr's b_pabd to share it with the new 2756 * buffer at the end of the list. The shared buffer is always 2757 * the last one on the hdr's buffer list. 2758 * 2759 * There is an equivalent case for compressed bufs, but since 2760 * they aren't guaranteed to be the last buf in the list and 2761 * that is an exceedingly rare case, we just allow that space be 2762 * wasted temporarily. 2763 */ 2764 if (lastbuf != NULL) { 2765 /* Only one buf can be shared at once */ 2766 VERIFY(!arc_buf_is_shared(lastbuf)); 2767 /* hdr is uncompressed so can't have compressed buf */ 2768 VERIFY(!ARC_BUF_COMPRESSED(lastbuf)); 2769 2770 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2771 arc_hdr_free_pabd(hdr); 2772 2773 /* 2774 * We must setup a new shared block between the 2775 * last buffer and the hdr. The data would have 2776 * been allocated by the arc buf so we need to transfer 2777 * ownership to the hdr since it's now being shared. 2778 */ 2779 arc_share_buf(hdr, lastbuf); 2780 } 2781 } else if (HDR_SHARED_DATA(hdr)) { 2782 /* 2783 * Uncompressed shared buffers are always at the end 2784 * of the list. Compressed buffers don't have the 2785 * same requirements. This makes it hard to 2786 * simply assert that the lastbuf is shared so 2787 * we rely on the hdr's compression flags to determine 2788 * if we have a compressed, shared buffer. 2789 */ 2790 ASSERT3P(lastbuf, !=, NULL); 2791 ASSERT(arc_buf_is_shared(lastbuf) || 2792 HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF); 2793 } 2794 2795 /* 2796 * Free the checksum if we're removing the last uncompressed buf from 2797 * this hdr. 2798 */ 2799 if (!arc_hdr_has_uncompressed_buf(hdr)) { 2800 arc_cksum_free(hdr); 2801 } 2802 2803 /* clean up the buf */ 2804 buf->b_hdr = NULL; 2805 kmem_cache_free(buf_cache, buf); 2806 } 2807 2808 static void 2809 arc_hdr_alloc_pabd(arc_buf_hdr_t *hdr) 2810 { 2811 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); 2812 ASSERT(HDR_HAS_L1HDR(hdr)); 2813 ASSERT(!HDR_SHARED_DATA(hdr)); 2814 2815 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2816 hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr); 2817 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 2818 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2819 2820 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); 2821 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); 2822 } 2823 2824 static void 2825 arc_hdr_free_pabd(arc_buf_hdr_t *hdr) 2826 { 2827 ASSERT(HDR_HAS_L1HDR(hdr)); 2828 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2829 2830 /* 2831 * If the hdr is currently being written to the l2arc then 2832 * we defer freeing the data by adding it to the l2arc_free_on_write 2833 * list. The l2arc will free the data once it's finished 2834 * writing it to the l2arc device. 2835 */ 2836 if (HDR_L2_WRITING(hdr)) { 2837 arc_hdr_free_on_write(hdr); 2838 ARCSTAT_BUMP(arcstat_l2_free_on_write); 2839 } else { 2840 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, 2841 arc_hdr_size(hdr), hdr); 2842 } 2843 hdr->b_l1hdr.b_pabd = NULL; 2844 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 2845 2846 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); 2847 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); 2848 } 2849 2850 static arc_buf_hdr_t * 2851 arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize, 2852 enum zio_compress compression_type, arc_buf_contents_t type) 2853 { 2854 arc_buf_hdr_t *hdr; 2855 2856 VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA); 2857 2858 hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); 2859 ASSERT(HDR_EMPTY(hdr)); 2860 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 2861 ASSERT3P(hdr->b_l1hdr.b_thawed, ==, NULL); 2862 HDR_SET_PSIZE(hdr, psize); 2863 HDR_SET_LSIZE(hdr, lsize); 2864 hdr->b_spa = spa; 2865 hdr->b_type = type; 2866 hdr->b_flags = 0; 2867 arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR); 2868 arc_hdr_set_compress(hdr, compression_type); 2869 2870 hdr->b_l1hdr.b_state = arc_anon; 2871 hdr->b_l1hdr.b_arc_access = 0; 2872 hdr->b_l1hdr.b_bufcnt = 0; 2873 hdr->b_l1hdr.b_buf = NULL; 2874 2875 /* 2876 * Allocate the hdr's buffer. This will contain either 2877 * the compressed or uncompressed data depending on the block 2878 * it references and compressed arc enablement. 2879 */ 2880 arc_hdr_alloc_pabd(hdr); 2881 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2882 2883 return (hdr); 2884 } 2885 2886 /* 2887 * Transition between the two allocation states for the arc_buf_hdr struct. 2888 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without 2889 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller 2890 * version is used when a cache buffer is only in the L2ARC in order to reduce 2891 * memory usage. 2892 */ 2893 static arc_buf_hdr_t * 2894 arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) 2895 { 2896 ASSERT(HDR_HAS_L2HDR(hdr)); 2897 2898 arc_buf_hdr_t *nhdr; 2899 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 2900 2901 ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || 2902 (old == hdr_l2only_cache && new == hdr_full_cache)); 2903 2904 nhdr = kmem_cache_alloc(new, KM_PUSHPAGE); 2905 2906 ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); 2907 buf_hash_remove(hdr); 2908 2909 bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); 2910 2911 if (new == hdr_full_cache) { 2912 arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR); 2913 /* 2914 * arc_access and arc_change_state need to be aware that a 2915 * header has just come out of L2ARC, so we set its state to 2916 * l2c_only even though it's about to change. 2917 */ 2918 nhdr->b_l1hdr.b_state = arc_l2c_only; 2919 2920 /* Verify previous threads set to NULL before freeing */ 2921 ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL); 2922 } else { 2923 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2924 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2925 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 2926 2927 /* 2928 * If we've reached here, We must have been called from 2929 * arc_evict_hdr(), as such we should have already been 2930 * removed from any ghost list we were previously on 2931 * (which protects us from racing with arc_evict_state), 2932 * thus no locking is needed during this check. 2933 */ 2934 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 2935 2936 /* 2937 * A buffer must not be moved into the arc_l2c_only 2938 * state if it's not finished being written out to the 2939 * l2arc device. Otherwise, the b_l1hdr.b_pabd field 2940 * might try to be accessed, even though it was removed. 2941 */ 2942 VERIFY(!HDR_L2_WRITING(hdr)); 2943 VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2944 2945 #ifdef ZFS_DEBUG 2946 if (hdr->b_l1hdr.b_thawed != NULL) { 2947 kmem_free(hdr->b_l1hdr.b_thawed, 1); 2948 hdr->b_l1hdr.b_thawed = NULL; 2949 } 2950 #endif 2951 2952 arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR); 2953 } 2954 /* 2955 * The header has been reallocated so we need to re-insert it into any 2956 * lists it was on. 2957 */ 2958 (void) buf_hash_insert(nhdr, NULL); 2959 2960 ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); 2961 2962 mutex_enter(&dev->l2ad_mtx); 2963 2964 /* 2965 * We must place the realloc'ed header back into the list at 2966 * the same spot. Otherwise, if it's placed earlier in the list, 2967 * l2arc_write_buffers() could find it during the function's 2968 * write phase, and try to write it out to the l2arc. 2969 */ 2970 list_insert_after(&dev->l2ad_buflist, hdr, nhdr); 2971 list_remove(&dev->l2ad_buflist, hdr); 2972 2973 mutex_exit(&dev->l2ad_mtx); 2974 2975 /* 2976 * Since we're using the pointer address as the tag when 2977 * incrementing and decrementing the l2ad_alloc refcount, we 2978 * must remove the old pointer (that we're about to destroy) and 2979 * add the new pointer to the refcount. Otherwise we'd remove 2980 * the wrong pointer address when calling arc_hdr_destroy() later. 2981 */ 2982 2983 (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); 2984 (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr); 2985 2986 buf_discard_identity(hdr); 2987 kmem_cache_free(old, hdr); 2988 2989 return (nhdr); 2990 } 2991 2992 /* 2993 * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller. 2994 * The buf is returned thawed since we expect the consumer to modify it. 2995 */ 2996 arc_buf_t * 2997 arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size) 2998 { 2999 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size, 3000 ZIO_COMPRESS_OFF, type); 3001 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); 3002 3003 arc_buf_t *buf = NULL; 3004 VERIFY0(arc_buf_alloc_impl(hdr, tag, B_FALSE, B_FALSE, &buf)); 3005 arc_buf_thaw(buf); 3006 3007 return (buf); 3008 } 3009 3010 /* 3011 * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this 3012 * for bufs containing metadata. 3013 */ 3014 arc_buf_t * 3015 arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize, 3016 enum zio_compress compression_type) 3017 { 3018 ASSERT3U(lsize, >, 0); 3019 ASSERT3U(lsize, >=, psize); 3020 ASSERT(compression_type > ZIO_COMPRESS_OFF); 3021 ASSERT(compression_type < ZIO_COMPRESS_FUNCTIONS); 3022 3023 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, 3024 compression_type, ARC_BUFC_DATA); 3025 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); 3026 3027 arc_buf_t *buf = NULL; 3028 VERIFY0(arc_buf_alloc_impl(hdr, tag, B_TRUE, B_FALSE, &buf)); 3029 arc_buf_thaw(buf); 3030 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 3031 3032 if (!arc_buf_is_shared(buf)) { 3033 /* 3034 * To ensure that the hdr has the correct data in it if we call 3035 * arc_decompress() on this buf before it's been written to 3036 * disk, it's easiest if we just set up sharing between the 3037 * buf and the hdr. 3038 */ 3039 ASSERT(!abd_is_linear(hdr->b_l1hdr.b_pabd)); 3040 arc_hdr_free_pabd(hdr); 3041 arc_share_buf(hdr, buf); 3042 } 3043 3044 return (buf); 3045 } 3046 3047 static void 3048 arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr) 3049 { 3050 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; 3051 l2arc_dev_t *dev = l2hdr->b_dev; 3052 uint64_t psize = arc_hdr_size(hdr); 3053 3054 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); 3055 ASSERT(HDR_HAS_L2HDR(hdr)); 3056 3057 list_remove(&dev->l2ad_buflist, hdr); 3058 3059 ARCSTAT_INCR(arcstat_l2_psize, -psize); 3060 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr)); 3061 3062 vdev_space_update(dev->l2ad_vdev, -psize, 0, 0); 3063 3064 (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr); 3065 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); 3066 } 3067 3068 static void 3069 arc_hdr_destroy(arc_buf_hdr_t *hdr) 3070 { 3071 if (HDR_HAS_L1HDR(hdr)) { 3072 ASSERT(hdr->b_l1hdr.b_buf == NULL || 3073 hdr->b_l1hdr.b_bufcnt > 0); 3074 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3075 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 3076 } 3077 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3078 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 3079 3080 if (!HDR_EMPTY(hdr)) 3081 buf_discard_identity(hdr); 3082 3083 if (HDR_HAS_L2HDR(hdr)) { 3084 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 3085 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); 3086 3087 if (!buflist_held) 3088 mutex_enter(&dev->l2ad_mtx); 3089 3090 /* 3091 * Even though we checked this conditional above, we 3092 * need to check this again now that we have the 3093 * l2ad_mtx. This is because we could be racing with 3094 * another thread calling l2arc_evict() which might have 3095 * destroyed this header's L2 portion as we were waiting 3096 * to acquire the l2ad_mtx. If that happens, we don't 3097 * want to re-destroy the header's L2 portion. 3098 */ 3099 if (HDR_HAS_L2HDR(hdr)) 3100 arc_hdr_l2hdr_destroy(hdr); 3101 3102 if (!buflist_held) 3103 mutex_exit(&dev->l2ad_mtx); 3104 } 3105 3106 if (HDR_HAS_L1HDR(hdr)) { 3107 arc_cksum_free(hdr); 3108 3109 while (hdr->b_l1hdr.b_buf != NULL) 3110 arc_buf_destroy_impl(hdr->b_l1hdr.b_buf); 3111 3112 #ifdef ZFS_DEBUG 3113 if (hdr->b_l1hdr.b_thawed != NULL) { 3114 kmem_free(hdr->b_l1hdr.b_thawed, 1); 3115 hdr->b_l1hdr.b_thawed = NULL; 3116 } 3117 #endif 3118 3119 if (hdr->b_l1hdr.b_pabd != NULL) { 3120 arc_hdr_free_pabd(hdr); 3121 } 3122 } 3123 3124 ASSERT3P(hdr->b_hash_next, ==, NULL); 3125 if (HDR_HAS_L1HDR(hdr)) { 3126 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 3127 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 3128 kmem_cache_free(hdr_full_cache, hdr); 3129 } else { 3130 kmem_cache_free(hdr_l2only_cache, hdr); 3131 } 3132 } 3133 3134 void 3135 arc_buf_destroy(arc_buf_t *buf, void* tag) 3136 { 3137 arc_buf_hdr_t *hdr = buf->b_hdr; 3138 kmutex_t *hash_lock = HDR_LOCK(hdr); 3139 3140 if (hdr->b_l1hdr.b_state == arc_anon) { 3141 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 3142 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3143 VERIFY0(remove_reference(hdr, NULL, tag)); 3144 arc_hdr_destroy(hdr); 3145 return; 3146 } 3147 3148 mutex_enter(hash_lock); 3149 ASSERT3P(hdr, ==, buf->b_hdr); 3150 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 3151 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3152 ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon); 3153 ASSERT3P(buf->b_data, !=, NULL); 3154 3155 (void) remove_reference(hdr, hash_lock, tag); 3156 arc_buf_destroy_impl(buf); 3157 mutex_exit(hash_lock); 3158 } 3159 3160 /* 3161 * Evict the arc_buf_hdr that is provided as a parameter. The resultant 3162 * state of the header is dependent on it's state prior to entering this 3163 * function. The following transitions are possible: 3164 * 3165 * - arc_mru -> arc_mru_ghost 3166 * - arc_mfu -> arc_mfu_ghost 3167 * - arc_mru_ghost -> arc_l2c_only 3168 * - arc_mru_ghost -> deleted 3169 * - arc_mfu_ghost -> arc_l2c_only 3170 * - arc_mfu_ghost -> deleted 3171 */ 3172 static int64_t 3173 arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 3174 { 3175 arc_state_t *evicted_state, *state; 3176 int64_t bytes_evicted = 0; 3177 3178 ASSERT(MUTEX_HELD(hash_lock)); 3179 ASSERT(HDR_HAS_L1HDR(hdr)); 3180 3181 state = hdr->b_l1hdr.b_state; 3182 if (GHOST_STATE(state)) { 3183 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3184 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 3185 3186 /* 3187 * l2arc_write_buffers() relies on a header's L1 portion 3188 * (i.e. its b_pabd field) during it's write phase. 3189 * Thus, we cannot push a header onto the arc_l2c_only 3190 * state (removing it's L1 piece) until the header is 3191 * done being written to the l2arc. 3192 */ 3193 if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) { 3194 ARCSTAT_BUMP(arcstat_evict_l2_skip); 3195 return (bytes_evicted); 3196 } 3197 3198 ARCSTAT_BUMP(arcstat_deleted); 3199 bytes_evicted += HDR_GET_LSIZE(hdr); 3200 3201 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); 3202 3203 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 3204 if (HDR_HAS_L2HDR(hdr)) { 3205 /* 3206 * This buffer is cached on the 2nd Level ARC; 3207 * don't destroy the header. 3208 */ 3209 arc_change_state(arc_l2c_only, hdr, hash_lock); 3210 /* 3211 * dropping from L1+L2 cached to L2-only, 3212 * realloc to remove the L1 header. 3213 */ 3214 hdr = arc_hdr_realloc(hdr, hdr_full_cache, 3215 hdr_l2only_cache); 3216 } else { 3217 arc_change_state(arc_anon, hdr, hash_lock); 3218 arc_hdr_destroy(hdr); 3219 } 3220 return (bytes_evicted); 3221 } 3222 3223 ASSERT(state == arc_mru || state == arc_mfu); 3224 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3225 3226 /* prefetch buffers have a minimum lifespan */ 3227 if (HDR_IO_IN_PROGRESS(hdr) || 3228 ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && 3229 ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < 3230 arc_min_prefetch_lifespan)) { 3231 ARCSTAT_BUMP(arcstat_evict_skip); 3232 return (bytes_evicted); 3233 } 3234 3235 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); 3236 while (hdr->b_l1hdr.b_buf) { 3237 arc_buf_t *buf = hdr->b_l1hdr.b_buf; 3238 if (!mutex_tryenter(&buf->b_evict_lock)) { 3239 ARCSTAT_BUMP(arcstat_mutex_miss); 3240 break; 3241 } 3242 if (buf->b_data != NULL) 3243 bytes_evicted += HDR_GET_LSIZE(hdr); 3244 mutex_exit(&buf->b_evict_lock); 3245 arc_buf_destroy_impl(buf); 3246 } 3247 3248 if (HDR_HAS_L2HDR(hdr)) { 3249 ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr)); 3250 } else { 3251 if (l2arc_write_eligible(hdr->b_spa, hdr)) { 3252 ARCSTAT_INCR(arcstat_evict_l2_eligible, 3253 HDR_GET_LSIZE(hdr)); 3254 } else { 3255 ARCSTAT_INCR(arcstat_evict_l2_ineligible, 3256 HDR_GET_LSIZE(hdr)); 3257 } 3258 } 3259 3260 if (hdr->b_l1hdr.b_bufcnt == 0) { 3261 arc_cksum_free(hdr); 3262 3263 bytes_evicted += arc_hdr_size(hdr); 3264 3265 /* 3266 * If this hdr is being evicted and has a compressed 3267 * buffer then we discard it here before we change states. 3268 * This ensures that the accounting is updated correctly 3269 * in arc_free_data_impl(). 3270 */ 3271 arc_hdr_free_pabd(hdr); 3272 3273 arc_change_state(evicted_state, hdr, hash_lock); 3274 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3275 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 3276 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); 3277 } 3278 3279 return (bytes_evicted); 3280 } 3281 3282 static uint64_t 3283 arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, 3284 uint64_t spa, int64_t bytes) 3285 { 3286 multilist_sublist_t *mls; 3287 uint64_t bytes_evicted = 0; 3288 arc_buf_hdr_t *hdr; 3289 kmutex_t *hash_lock; 3290 int evict_count = 0; 3291 3292 ASSERT3P(marker, !=, NULL); 3293 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 3294 3295 mls = multilist_sublist_lock(ml, idx); 3296 3297 for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL; 3298 hdr = multilist_sublist_prev(mls, marker)) { 3299 if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) || 3300 (evict_count >= zfs_arc_evict_batch_limit)) 3301 break; 3302 3303 /* 3304 * To keep our iteration location, move the marker 3305 * forward. Since we're not holding hdr's hash lock, we 3306 * must be very careful and not remove 'hdr' from the 3307 * sublist. Otherwise, other consumers might mistake the 3308 * 'hdr' as not being on a sublist when they call the 3309 * multilist_link_active() function (they all rely on 3310 * the hash lock protecting concurrent insertions and 3311 * removals). multilist_sublist_move_forward() was 3312 * specifically implemented to ensure this is the case 3313 * (only 'marker' will be removed and re-inserted). 3314 */ 3315 multilist_sublist_move_forward(mls, marker); 3316 3317 /* 3318 * The only case where the b_spa field should ever be 3319 * zero, is the marker headers inserted by 3320 * arc_evict_state(). It's possible for multiple threads 3321 * to be calling arc_evict_state() concurrently (e.g. 3322 * dsl_pool_close() and zio_inject_fault()), so we must 3323 * skip any markers we see from these other threads. 3324 */ 3325 if (hdr->b_spa == 0) 3326 continue; 3327 3328 /* we're only interested in evicting buffers of a certain spa */ 3329 if (spa != 0 && hdr->b_spa != spa) { 3330 ARCSTAT_BUMP(arcstat_evict_skip); 3331 continue; 3332 } 3333 3334 hash_lock = HDR_LOCK(hdr); 3335 3336 /* 3337 * We aren't calling this function from any code path 3338 * that would already be holding a hash lock, so we're 3339 * asserting on this assumption to be defensive in case 3340 * this ever changes. Without this check, it would be 3341 * possible to incorrectly increment arcstat_mutex_miss 3342 * below (e.g. if the code changed such that we called 3343 * this function with a hash lock held). 3344 */ 3345 ASSERT(!MUTEX_HELD(hash_lock)); 3346 3347 if (mutex_tryenter(hash_lock)) { 3348 uint64_t evicted = arc_evict_hdr(hdr, hash_lock); 3349 mutex_exit(hash_lock); 3350 3351 bytes_evicted += evicted; 3352 3353 /* 3354 * If evicted is zero, arc_evict_hdr() must have 3355 * decided to skip this header, don't increment 3356 * evict_count in this case. 3357 */ 3358 if (evicted != 0) 3359 evict_count++; 3360 3361 /* 3362 * If arc_size isn't overflowing, signal any 3363 * threads that might happen to be waiting. 3364 * 3365 * For each header evicted, we wake up a single 3366 * thread. If we used cv_broadcast, we could 3367 * wake up "too many" threads causing arc_size 3368 * to significantly overflow arc_c; since 3369 * arc_get_data_impl() doesn't check for overflow 3370 * when it's woken up (it doesn't because it's 3371 * possible for the ARC to be overflowing while 3372 * full of un-evictable buffers, and the 3373 * function should proceed in this case). 3374 * 3375 * If threads are left sleeping, due to not 3376 * using cv_broadcast, they will be woken up 3377 * just before arc_reclaim_thread() sleeps. 3378 */ 3379 mutex_enter(&arc_reclaim_lock); 3380 if (!arc_is_overflowing()) 3381 cv_signal(&arc_reclaim_waiters_cv); 3382 mutex_exit(&arc_reclaim_lock); 3383 } else { 3384 ARCSTAT_BUMP(arcstat_mutex_miss); 3385 } 3386 } 3387 3388 multilist_sublist_unlock(mls); 3389 3390 return (bytes_evicted); 3391 } 3392 3393 /* 3394 * Evict buffers from the given arc state, until we've removed the 3395 * specified number of bytes. Move the removed buffers to the 3396 * appropriate evict state. 3397 * 3398 * This function makes a "best effort". It skips over any buffers 3399 * it can't get a hash_lock on, and so, may not catch all candidates. 3400 * It may also return without evicting as much space as requested. 3401 * 3402 * If bytes is specified using the special value ARC_EVICT_ALL, this 3403 * will evict all available (i.e. unlocked and evictable) buffers from 3404 * the given arc state; which is used by arc_flush(). 3405 */ 3406 static uint64_t 3407 arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes, 3408 arc_buf_contents_t type) 3409 { 3410 uint64_t total_evicted = 0; 3411 multilist_t *ml = state->arcs_list[type]; 3412 int num_sublists; 3413 arc_buf_hdr_t **markers; 3414 3415 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 3416 3417 num_sublists = multilist_get_num_sublists(ml); 3418 3419 /* 3420 * If we've tried to evict from each sublist, made some 3421 * progress, but still have not hit the target number of bytes 3422 * to evict, we want to keep trying. The markers allow us to 3423 * pick up where we left off for each individual sublist, rather 3424 * than starting from the tail each time. 3425 */ 3426 markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); 3427 for (int i = 0; i < num_sublists; i++) { 3428 markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); 3429 3430 /* 3431 * A b_spa of 0 is used to indicate that this header is 3432 * a marker. This fact is used in arc_adjust_type() and 3433 * arc_evict_state_impl(). 3434 */ 3435 markers[i]->b_spa = 0; 3436 3437 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 3438 multilist_sublist_insert_tail(mls, markers[i]); 3439 multilist_sublist_unlock(mls); 3440 } 3441 3442 /* 3443 * While we haven't hit our target number of bytes to evict, or 3444 * we're evicting all available buffers. 3445 */ 3446 while (total_evicted < bytes || bytes == ARC_EVICT_ALL) { 3447 /* 3448 * Start eviction using a randomly selected sublist, 3449 * this is to try and evenly balance eviction across all 3450 * sublists. Always starting at the same sublist 3451 * (e.g. index 0) would cause evictions to favor certain 3452 * sublists over others. 3453 */ 3454 int sublist_idx = multilist_get_random_index(ml); 3455 uint64_t scan_evicted = 0; 3456 3457 for (int i = 0; i < num_sublists; i++) { 3458 uint64_t bytes_remaining; 3459 uint64_t bytes_evicted; 3460 3461 if (bytes == ARC_EVICT_ALL) 3462 bytes_remaining = ARC_EVICT_ALL; 3463 else if (total_evicted < bytes) 3464 bytes_remaining = bytes - total_evicted; 3465 else 3466 break; 3467 3468 bytes_evicted = arc_evict_state_impl(ml, sublist_idx, 3469 markers[sublist_idx], spa, bytes_remaining); 3470 3471 scan_evicted += bytes_evicted; 3472 total_evicted += bytes_evicted; 3473 3474 /* we've reached the end, wrap to the beginning */ 3475 if (++sublist_idx >= num_sublists) 3476 sublist_idx = 0; 3477 } 3478 3479 /* 3480 * If we didn't evict anything during this scan, we have 3481 * no reason to believe we'll evict more during another 3482 * scan, so break the loop. 3483 */ 3484 if (scan_evicted == 0) { 3485 /* This isn't possible, let's make that obvious */ 3486 ASSERT3S(bytes, !=, 0); 3487 3488 /* 3489 * When bytes is ARC_EVICT_ALL, the only way to 3490 * break the loop is when scan_evicted is zero. 3491 * In that case, we actually have evicted enough, 3492 * so we don't want to increment the kstat. 3493 */ 3494 if (bytes != ARC_EVICT_ALL) { 3495 ASSERT3S(total_evicted, <, bytes); 3496 ARCSTAT_BUMP(arcstat_evict_not_enough); 3497 } 3498 3499 break; 3500 } 3501 } 3502 3503 for (int i = 0; i < num_sublists; i++) { 3504 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 3505 multilist_sublist_remove(mls, markers[i]); 3506 multilist_sublist_unlock(mls); 3507 3508 kmem_cache_free(hdr_full_cache, markers[i]); 3509 } 3510 kmem_free(markers, sizeof (*markers) * num_sublists); 3511 3512 return (total_evicted); 3513 } 3514 3515 /* 3516 * Flush all "evictable" data of the given type from the arc state 3517 * specified. This will not evict any "active" buffers (i.e. referenced). 3518 * 3519 * When 'retry' is set to B_FALSE, the function will make a single pass 3520 * over the state and evict any buffers that it can. Since it doesn't 3521 * continually retry the eviction, it might end up leaving some buffers 3522 * in the ARC due to lock misses. 3523 * 3524 * When 'retry' is set to B_TRUE, the function will continually retry the 3525 * eviction until *all* evictable buffers have been removed from the 3526 * state. As a result, if concurrent insertions into the state are 3527 * allowed (e.g. if the ARC isn't shutting down), this function might 3528 * wind up in an infinite loop, continually trying to evict buffers. 3529 */ 3530 static uint64_t 3531 arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, 3532 boolean_t retry) 3533 { 3534 uint64_t evicted = 0; 3535 3536 while (refcount_count(&state->arcs_esize[type]) != 0) { 3537 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type); 3538 3539 if (!retry) 3540 break; 3541 } 3542 3543 return (evicted); 3544 } 3545 3546 /* 3547 * Evict the specified number of bytes from the state specified, 3548 * restricting eviction to the spa and type given. This function 3549 * prevents us from trying to evict more from a state's list than 3550 * is "evictable", and to skip evicting altogether when passed a 3551 * negative value for "bytes". In contrast, arc_evict_state() will 3552 * evict everything it can, when passed a negative value for "bytes". 3553 */ 3554 static uint64_t 3555 arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes, 3556 arc_buf_contents_t type) 3557 { 3558 int64_t delta; 3559 3560 if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) { 3561 delta = MIN(refcount_count(&state->arcs_esize[type]), bytes); 3562 return (arc_evict_state(state, spa, delta, type)); 3563 } 3564 3565 return (0); 3566 } 3567 3568 /* 3569 * Evict metadata buffers from the cache, such that arc_meta_used is 3570 * capped by the arc_meta_limit tunable. 3571 */ 3572 static uint64_t 3573 arc_adjust_meta(void) 3574 { 3575 uint64_t total_evicted = 0; 3576 int64_t target; 3577 3578 /* 3579 * If we're over the meta limit, we want to evict enough 3580 * metadata to get back under the meta limit. We don't want to 3581 * evict so much that we drop the MRU below arc_p, though. If 3582 * we're over the meta limit more than we're over arc_p, we 3583 * evict some from the MRU here, and some from the MFU below. 3584 */ 3585 target = MIN((int64_t)(arc_meta_used - arc_meta_limit), 3586 (int64_t)(refcount_count(&arc_anon->arcs_size) + 3587 refcount_count(&arc_mru->arcs_size) - arc_p)); 3588 3589 total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 3590 3591 /* 3592 * Similar to the above, we want to evict enough bytes to get us 3593 * below the meta limit, but not so much as to drop us below the 3594 * space allotted to the MFU (which is defined as arc_c - arc_p). 3595 */ 3596 target = MIN((int64_t)(arc_meta_used - arc_meta_limit), 3597 (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p))); 3598 3599 total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 3600 3601 return (total_evicted); 3602 } 3603 3604 /* 3605 * Return the type of the oldest buffer in the given arc state 3606 * 3607 * This function will select a random sublist of type ARC_BUFC_DATA and 3608 * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist 3609 * is compared, and the type which contains the "older" buffer will be 3610 * returned. 3611 */ 3612 static arc_buf_contents_t 3613 arc_adjust_type(arc_state_t *state) 3614 { 3615 multilist_t *data_ml = state->arcs_list[ARC_BUFC_DATA]; 3616 multilist_t *meta_ml = state->arcs_list[ARC_BUFC_METADATA]; 3617 int data_idx = multilist_get_random_index(data_ml); 3618 int meta_idx = multilist_get_random_index(meta_ml); 3619 multilist_sublist_t *data_mls; 3620 multilist_sublist_t *meta_mls; 3621 arc_buf_contents_t type; 3622 arc_buf_hdr_t *data_hdr; 3623 arc_buf_hdr_t *meta_hdr; 3624 3625 /* 3626 * We keep the sublist lock until we're finished, to prevent 3627 * the headers from being destroyed via arc_evict_state(). 3628 */ 3629 data_mls = multilist_sublist_lock(data_ml, data_idx); 3630 meta_mls = multilist_sublist_lock(meta_ml, meta_idx); 3631 3632 /* 3633 * These two loops are to ensure we skip any markers that 3634 * might be at the tail of the lists due to arc_evict_state(). 3635 */ 3636 3637 for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL; 3638 data_hdr = multilist_sublist_prev(data_mls, data_hdr)) { 3639 if (data_hdr->b_spa != 0) 3640 break; 3641 } 3642 3643 for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL; 3644 meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) { 3645 if (meta_hdr->b_spa != 0) 3646 break; 3647 } 3648 3649 if (data_hdr == NULL && meta_hdr == NULL) { 3650 type = ARC_BUFC_DATA; 3651 } else if (data_hdr == NULL) { 3652 ASSERT3P(meta_hdr, !=, NULL); 3653 type = ARC_BUFC_METADATA; 3654 } else if (meta_hdr == NULL) { 3655 ASSERT3P(data_hdr, !=, NULL); 3656 type = ARC_BUFC_DATA; 3657 } else { 3658 ASSERT3P(data_hdr, !=, NULL); 3659 ASSERT3P(meta_hdr, !=, NULL); 3660 3661 /* The headers can't be on the sublist without an L1 header */ 3662 ASSERT(HDR_HAS_L1HDR(data_hdr)); 3663 ASSERT(HDR_HAS_L1HDR(meta_hdr)); 3664 3665 if (data_hdr->b_l1hdr.b_arc_access < 3666 meta_hdr->b_l1hdr.b_arc_access) { 3667 type = ARC_BUFC_DATA; 3668 } else { 3669 type = ARC_BUFC_METADATA; 3670 } 3671 } 3672 3673 multilist_sublist_unlock(meta_mls); 3674 multilist_sublist_unlock(data_mls); 3675 3676 return (type); 3677 } 3678 3679 /* 3680 * Evict buffers from the cache, such that arc_size is capped by arc_c. 3681 */ 3682 static uint64_t 3683 arc_adjust(void) 3684 { 3685 uint64_t total_evicted = 0; 3686 uint64_t bytes; 3687 int64_t target; 3688 3689 /* 3690 * If we're over arc_meta_limit, we want to correct that before 3691 * potentially evicting data buffers below. 3692 */ 3693 total_evicted += arc_adjust_meta(); 3694 3695 /* 3696 * Adjust MRU size 3697 * 3698 * If we're over the target cache size, we want to evict enough 3699 * from the list to get back to our target size. We don't want 3700 * to evict too much from the MRU, such that it drops below 3701 * arc_p. So, if we're over our target cache size more than 3702 * the MRU is over arc_p, we'll evict enough to get back to 3703 * arc_p here, and then evict more from the MFU below. 3704 */ 3705 target = MIN((int64_t)(arc_size - arc_c), 3706 (int64_t)(refcount_count(&arc_anon->arcs_size) + 3707 refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p)); 3708 3709 /* 3710 * If we're below arc_meta_min, always prefer to evict data. 3711 * Otherwise, try to satisfy the requested number of bytes to 3712 * evict from the type which contains older buffers; in an 3713 * effort to keep newer buffers in the cache regardless of their 3714 * type. If we cannot satisfy the number of bytes from this 3715 * type, spill over into the next type. 3716 */ 3717 if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA && 3718 arc_meta_used > arc_meta_min) { 3719 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 3720 total_evicted += bytes; 3721 3722 /* 3723 * If we couldn't evict our target number of bytes from 3724 * metadata, we try to get the rest from data. 3725 */ 3726 target -= bytes; 3727 3728 total_evicted += 3729 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 3730 } else { 3731 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 3732 total_evicted += bytes; 3733 3734 /* 3735 * If we couldn't evict our target number of bytes from 3736 * data, we try to get the rest from metadata. 3737 */ 3738 target -= bytes; 3739 3740 total_evicted += 3741 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 3742 } 3743 3744 /* 3745 * Adjust MFU size 3746 * 3747 * Now that we've tried to evict enough from the MRU to get its 3748 * size back to arc_p, if we're still above the target cache 3749 * size, we evict the rest from the MFU. 3750 */ 3751 target = arc_size - arc_c; 3752 3753 if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA && 3754 arc_meta_used > arc_meta_min) { 3755 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 3756 total_evicted += bytes; 3757 3758 /* 3759 * If we couldn't evict our target number of bytes from 3760 * metadata, we try to get the rest from data. 3761 */ 3762 target -= bytes; 3763 3764 total_evicted += 3765 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 3766 } else { 3767 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 3768 total_evicted += bytes; 3769 3770 /* 3771 * If we couldn't evict our target number of bytes from 3772 * data, we try to get the rest from data. 3773 */ 3774 target -= bytes; 3775 3776 total_evicted += 3777 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 3778 } 3779 3780 /* 3781 * Adjust ghost lists 3782 * 3783 * In addition to the above, the ARC also defines target values 3784 * for the ghost lists. The sum of the mru list and mru ghost 3785 * list should never exceed the target size of the cache, and 3786 * the sum of the mru list, mfu list, mru ghost list, and mfu 3787 * ghost list should never exceed twice the target size of the 3788 * cache. The following logic enforces these limits on the ghost 3789 * caches, and evicts from them as needed. 3790 */ 3791 target = refcount_count(&arc_mru->arcs_size) + 3792 refcount_count(&arc_mru_ghost->arcs_size) - arc_c; 3793 3794 bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA); 3795 total_evicted += bytes; 3796 3797 target -= bytes; 3798 3799 total_evicted += 3800 arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA); 3801 3802 /* 3803 * We assume the sum of the mru list and mfu list is less than 3804 * or equal to arc_c (we enforced this above), which means we 3805 * can use the simpler of the two equations below: 3806 * 3807 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c 3808 * mru ghost + mfu ghost <= arc_c 3809 */ 3810 target = refcount_count(&arc_mru_ghost->arcs_size) + 3811 refcount_count(&arc_mfu_ghost->arcs_size) - arc_c; 3812 3813 bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA); 3814 total_evicted += bytes; 3815 3816 target -= bytes; 3817 3818 total_evicted += 3819 arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA); 3820 3821 return (total_evicted); 3822 } 3823 3824 void 3825 arc_flush(spa_t *spa, boolean_t retry) 3826 { 3827 uint64_t guid = 0; 3828 3829 /* 3830 * If retry is B_TRUE, a spa must not be specified since we have 3831 * no good way to determine if all of a spa's buffers have been 3832 * evicted from an arc state. 3833 */ 3834 ASSERT(!retry || spa == 0); 3835 3836 if (spa != NULL) 3837 guid = spa_load_guid(spa); 3838 3839 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry); 3840 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry); 3841 3842 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry); 3843 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry); 3844 3845 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry); 3846 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry); 3847 3848 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry); 3849 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); 3850 } 3851 3852 void 3853 arc_shrink(int64_t to_free) 3854 { 3855 if (arc_c > arc_c_min) { 3856 3857 if (arc_c > arc_c_min + to_free) 3858 atomic_add_64(&arc_c, -to_free); 3859 else 3860 arc_c = arc_c_min; 3861 3862 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 3863 if (arc_c > arc_size) 3864 arc_c = MAX(arc_size, arc_c_min); 3865 if (arc_p > arc_c) 3866 arc_p = (arc_c >> 1); 3867 ASSERT(arc_c >= arc_c_min); 3868 ASSERT((int64_t)arc_p >= 0); 3869 } 3870 3871 if (arc_size > arc_c) 3872 (void) arc_adjust(); 3873 } 3874 3875 typedef enum free_memory_reason_t { 3876 FMR_UNKNOWN, 3877 FMR_NEEDFREE, 3878 FMR_LOTSFREE, 3879 FMR_SWAPFS_MINFREE, 3880 FMR_PAGES_PP_MAXIMUM, 3881 FMR_HEAP_ARENA, 3882 FMR_ZIO_ARENA, 3883 } free_memory_reason_t; 3884 3885 int64_t last_free_memory; 3886 free_memory_reason_t last_free_reason; 3887 3888 /* 3889 * Additional reserve of pages for pp_reserve. 3890 */ 3891 int64_t arc_pages_pp_reserve = 64; 3892 3893 /* 3894 * Additional reserve of pages for swapfs. 3895 */ 3896 int64_t arc_swapfs_reserve = 64; 3897 3898 /* 3899 * Return the amount of memory that can be consumed before reclaim will be 3900 * needed. Positive if there is sufficient free memory, negative indicates 3901 * the amount of memory that needs to be freed up. 3902 */ 3903 static int64_t 3904 arc_available_memory(void) 3905 { 3906 int64_t lowest = INT64_MAX; 3907 int64_t n; 3908 free_memory_reason_t r = FMR_UNKNOWN; 3909 3910 #ifdef _KERNEL 3911 if (needfree > 0) { 3912 n = PAGESIZE * (-needfree); 3913 if (n < lowest) { 3914 lowest = n; 3915 r = FMR_NEEDFREE; 3916 } 3917 } 3918 3919 /* 3920 * check that we're out of range of the pageout scanner. It starts to 3921 * schedule paging if freemem is less than lotsfree and needfree. 3922 * lotsfree is the high-water mark for pageout, and needfree is the 3923 * number of needed free pages. We add extra pages here to make sure 3924 * the scanner doesn't start up while we're freeing memory. 3925 */ 3926 n = PAGESIZE * (freemem - lotsfree - needfree - desfree); 3927 if (n < lowest) { 3928 lowest = n; 3929 r = FMR_LOTSFREE; 3930 } 3931 3932 /* 3933 * check to make sure that swapfs has enough space so that anon 3934 * reservations can still succeed. anon_resvmem() checks that the 3935 * availrmem is greater than swapfs_minfree, and the number of reserved 3936 * swap pages. We also add a bit of extra here just to prevent 3937 * circumstances from getting really dire. 3938 */ 3939 n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve - 3940 desfree - arc_swapfs_reserve); 3941 if (n < lowest) { 3942 lowest = n; 3943 r = FMR_SWAPFS_MINFREE; 3944 } 3945 3946 3947 /* 3948 * Check that we have enough availrmem that memory locking (e.g., via 3949 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum 3950 * stores the number of pages that cannot be locked; when availrmem 3951 * drops below pages_pp_maximum, page locking mechanisms such as 3952 * page_pp_lock() will fail.) 3953 */ 3954 n = PAGESIZE * (availrmem - pages_pp_maximum - 3955 arc_pages_pp_reserve); 3956 if (n < lowest) { 3957 lowest = n; 3958 r = FMR_PAGES_PP_MAXIMUM; 3959 } 3960 3961 #if defined(__i386) 3962 /* 3963 * If we're on an i386 platform, it's possible that we'll exhaust the 3964 * kernel heap space before we ever run out of available physical 3965 * memory. Most checks of the size of the heap_area compare against 3966 * tune.t_minarmem, which is the minimum available real memory that we 3967 * can have in the system. However, this is generally fixed at 25 pages 3968 * which is so low that it's useless. In this comparison, we seek to 3969 * calculate the total heap-size, and reclaim if more than 3/4ths of the 3970 * heap is allocated. (Or, in the calculation, if less than 1/4th is 3971 * free) 3972 */ 3973 n = (int64_t)vmem_size(heap_arena, VMEM_FREE) - 3974 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); 3975 if (n < lowest) { 3976 lowest = n; 3977 r = FMR_HEAP_ARENA; 3978 } 3979 #endif 3980 3981 /* 3982 * If zio data pages are being allocated out of a separate heap segment, 3983 * then enforce that the size of available vmem for this arena remains 3984 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free. 3985 * 3986 * Note that reducing the arc_zio_arena_free_shift keeps more virtual 3987 * memory (in the zio_arena) free, which can avoid memory 3988 * fragmentation issues. 3989 */ 3990 if (zio_arena != NULL) { 3991 n = (int64_t)vmem_size(zio_arena, VMEM_FREE) - 3992 (vmem_size(zio_arena, VMEM_ALLOC) >> 3993 arc_zio_arena_free_shift); 3994 if (n < lowest) { 3995 lowest = n; 3996 r = FMR_ZIO_ARENA; 3997 } 3998 } 3999 #else 4000 /* Every 100 calls, free a small amount */ 4001 if (spa_get_random(100) == 0) 4002 lowest = -1024; 4003 #endif 4004 4005 last_free_memory = lowest; 4006 last_free_reason = r; 4007 4008 return (lowest); 4009 } 4010 4011 4012 /* 4013 * Determine if the system is under memory pressure and is asking 4014 * to reclaim memory. A return value of B_TRUE indicates that the system 4015 * is under memory pressure and that the arc should adjust accordingly. 4016 */ 4017 static boolean_t 4018 arc_reclaim_needed(void) 4019 { 4020 return (arc_available_memory() < 0); 4021 } 4022 4023 static void 4024 arc_kmem_reap_now(void) 4025 { 4026 size_t i; 4027 kmem_cache_t *prev_cache = NULL; 4028 kmem_cache_t *prev_data_cache = NULL; 4029 extern kmem_cache_t *zio_buf_cache[]; 4030 extern kmem_cache_t *zio_data_buf_cache[]; 4031 extern kmem_cache_t *range_seg_cache; 4032 extern kmem_cache_t *abd_chunk_cache; 4033 4034 #ifdef _KERNEL 4035 if (arc_meta_used >= arc_meta_limit) { 4036 /* 4037 * We are exceeding our meta-data cache limit. 4038 * Purge some DNLC entries to release holds on meta-data. 4039 */ 4040 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 4041 } 4042 #if defined(__i386) 4043 /* 4044 * Reclaim unused memory from all kmem caches. 4045 */ 4046 kmem_reap(); 4047 #endif 4048 #endif 4049 4050 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 4051 if (zio_buf_cache[i] != prev_cache) { 4052 prev_cache = zio_buf_cache[i]; 4053 kmem_cache_reap_now(zio_buf_cache[i]); 4054 } 4055 if (zio_data_buf_cache[i] != prev_data_cache) { 4056 prev_data_cache = zio_data_buf_cache[i]; 4057 kmem_cache_reap_now(zio_data_buf_cache[i]); 4058 } 4059 } 4060 kmem_cache_reap_now(abd_chunk_cache); 4061 kmem_cache_reap_now(buf_cache); 4062 kmem_cache_reap_now(hdr_full_cache); 4063 kmem_cache_reap_now(hdr_l2only_cache); 4064 kmem_cache_reap_now(range_seg_cache); 4065 4066 if (zio_arena != NULL) { 4067 /* 4068 * Ask the vmem arena to reclaim unused memory from its 4069 * quantum caches. 4070 */ 4071 vmem_qcache_reap(zio_arena); 4072 } 4073 } 4074 4075 /* 4076 * Threads can block in arc_get_data_impl() waiting for this thread to evict 4077 * enough data and signal them to proceed. When this happens, the threads in 4078 * arc_get_data_impl() are sleeping while holding the hash lock for their 4079 * particular arc header. Thus, we must be careful to never sleep on a 4080 * hash lock in this thread. This is to prevent the following deadlock: 4081 * 4082 * - Thread A sleeps on CV in arc_get_data_impl() holding hash lock "L", 4083 * waiting for the reclaim thread to signal it. 4084 * 4085 * - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter, 4086 * fails, and goes to sleep forever. 4087 * 4088 * This possible deadlock is avoided by always acquiring a hash lock 4089 * using mutex_tryenter() from arc_reclaim_thread(). 4090 */ 4091 /* ARGSUSED */ 4092 static void 4093 arc_reclaim_thread(void *unused) 4094 { 4095 hrtime_t growtime = 0; 4096 callb_cpr_t cpr; 4097 4098 CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG); 4099 4100 mutex_enter(&arc_reclaim_lock); 4101 while (!arc_reclaim_thread_exit) { 4102 uint64_t evicted = 0; 4103 4104 /* 4105 * This is necessary in order for the mdb ::arc dcmd to 4106 * show up to date information. Since the ::arc command 4107 * does not call the kstat's update function, without 4108 * this call, the command may show stale stats for the 4109 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even 4110 * with this change, the data might be up to 1 second 4111 * out of date; but that should suffice. The arc_state_t 4112 * structures can be queried directly if more accurate 4113 * information is needed. 4114 */ 4115 if (arc_ksp != NULL) 4116 arc_ksp->ks_update(arc_ksp, KSTAT_READ); 4117 4118 mutex_exit(&arc_reclaim_lock); 4119 4120 /* 4121 * We call arc_adjust() before (possibly) calling 4122 * arc_kmem_reap_now(), so that we can wake up 4123 * arc_get_data_impl() sooner. 4124 */ 4125 evicted = arc_adjust(); 4126 4127 int64_t free_memory = arc_available_memory(); 4128 if (free_memory < 0) { 4129 4130 arc_no_grow = B_TRUE; 4131 arc_warm = B_TRUE; 4132 4133 /* 4134 * Wait at least zfs_grow_retry (default 60) seconds 4135 * before considering growing. 4136 */ 4137 growtime = gethrtime() + SEC2NSEC(arc_grow_retry); 4138 4139 arc_kmem_reap_now(); 4140 4141 /* 4142 * If we are still low on memory, shrink the ARC 4143 * so that we have arc_shrink_min free space. 4144 */ 4145 free_memory = arc_available_memory(); 4146 4147 int64_t to_free = 4148 (arc_c >> arc_shrink_shift) - free_memory; 4149 if (to_free > 0) { 4150 #ifdef _KERNEL 4151 to_free = MAX(to_free, ptob(needfree)); 4152 #endif 4153 arc_shrink(to_free); 4154 } 4155 } else if (free_memory < arc_c >> arc_no_grow_shift) { 4156 arc_no_grow = B_TRUE; 4157 } else if (gethrtime() >= growtime) { 4158 arc_no_grow = B_FALSE; 4159 } 4160 4161 mutex_enter(&arc_reclaim_lock); 4162 4163 /* 4164 * If evicted is zero, we couldn't evict anything via 4165 * arc_adjust(). This could be due to hash lock 4166 * collisions, but more likely due to the majority of 4167 * arc buffers being unevictable. Therefore, even if 4168 * arc_size is above arc_c, another pass is unlikely to 4169 * be helpful and could potentially cause us to enter an 4170 * infinite loop. 4171 */ 4172 if (arc_size <= arc_c || evicted == 0) { 4173 /* 4174 * We're either no longer overflowing, or we 4175 * can't evict anything more, so we should wake 4176 * up any threads before we go to sleep. 4177 */ 4178 cv_broadcast(&arc_reclaim_waiters_cv); 4179 4180 /* 4181 * Block until signaled, or after one second (we 4182 * might need to perform arc_kmem_reap_now() 4183 * even if we aren't being signalled) 4184 */ 4185 CALLB_CPR_SAFE_BEGIN(&cpr); 4186 (void) cv_timedwait_hires(&arc_reclaim_thread_cv, 4187 &arc_reclaim_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 4188 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock); 4189 } 4190 } 4191 4192 arc_reclaim_thread_exit = B_FALSE; 4193 cv_broadcast(&arc_reclaim_thread_cv); 4194 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_lock */ 4195 thread_exit(); 4196 } 4197 4198 /* 4199 * Adapt arc info given the number of bytes we are trying to add and 4200 * the state that we are comming from. This function is only called 4201 * when we are adding new content to the cache. 4202 */ 4203 static void 4204 arc_adapt(int bytes, arc_state_t *state) 4205 { 4206 int mult; 4207 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 4208 int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size); 4209 int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size); 4210 4211 if (state == arc_l2c_only) 4212 return; 4213 4214 ASSERT(bytes > 0); 4215 /* 4216 * Adapt the target size of the MRU list: 4217 * - if we just hit in the MRU ghost list, then increase 4218 * the target size of the MRU list. 4219 * - if we just hit in the MFU ghost list, then increase 4220 * the target size of the MFU list by decreasing the 4221 * target size of the MRU list. 4222 */ 4223 if (state == arc_mru_ghost) { 4224 mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size); 4225 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 4226 4227 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 4228 } else if (state == arc_mfu_ghost) { 4229 uint64_t delta; 4230 4231 mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size); 4232 mult = MIN(mult, 10); 4233 4234 delta = MIN(bytes * mult, arc_p); 4235 arc_p = MAX(arc_p_min, arc_p - delta); 4236 } 4237 ASSERT((int64_t)arc_p >= 0); 4238 4239 if (arc_reclaim_needed()) { 4240 cv_signal(&arc_reclaim_thread_cv); 4241 return; 4242 } 4243 4244 if (arc_no_grow) 4245 return; 4246 4247 if (arc_c >= arc_c_max) 4248 return; 4249 4250 /* 4251 * If we're within (2 * maxblocksize) bytes of the target 4252 * cache size, increment the target cache size 4253 */ 4254 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 4255 atomic_add_64(&arc_c, (int64_t)bytes); 4256 if (arc_c > arc_c_max) 4257 arc_c = arc_c_max; 4258 else if (state == arc_anon) 4259 atomic_add_64(&arc_p, (int64_t)bytes); 4260 if (arc_p > arc_c) 4261 arc_p = arc_c; 4262 } 4263 ASSERT((int64_t)arc_p >= 0); 4264 } 4265 4266 /* 4267 * Check if arc_size has grown past our upper threshold, determined by 4268 * zfs_arc_overflow_shift. 4269 */ 4270 static boolean_t 4271 arc_is_overflowing(void) 4272 { 4273 /* Always allow at least one block of overflow */ 4274 uint64_t overflow = MAX(SPA_MAXBLOCKSIZE, 4275 arc_c >> zfs_arc_overflow_shift); 4276 4277 return (arc_size >= arc_c + overflow); 4278 } 4279 4280 static abd_t * 4281 arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 4282 { 4283 arc_buf_contents_t type = arc_buf_type(hdr); 4284 4285 arc_get_data_impl(hdr, size, tag); 4286 if (type == ARC_BUFC_METADATA) { 4287 return (abd_alloc(size, B_TRUE)); 4288 } else { 4289 ASSERT(type == ARC_BUFC_DATA); 4290 return (abd_alloc(size, B_FALSE)); 4291 } 4292 } 4293 4294 static void * 4295 arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 4296 { 4297 arc_buf_contents_t type = arc_buf_type(hdr); 4298 4299 arc_get_data_impl(hdr, size, tag); 4300 if (type == ARC_BUFC_METADATA) { 4301 return (zio_buf_alloc(size)); 4302 } else { 4303 ASSERT(type == ARC_BUFC_DATA); 4304 return (zio_data_buf_alloc(size)); 4305 } 4306 } 4307 4308 /* 4309 * Allocate a block and return it to the caller. If we are hitting the 4310 * hard limit for the cache size, we must sleep, waiting for the eviction 4311 * thread to catch up. If we're past the target size but below the hard 4312 * limit, we'll only signal the reclaim thread and continue on. 4313 */ 4314 static void 4315 arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 4316 { 4317 arc_state_t *state = hdr->b_l1hdr.b_state; 4318 arc_buf_contents_t type = arc_buf_type(hdr); 4319 4320 arc_adapt(size, state); 4321 4322 /* 4323 * If arc_size is currently overflowing, and has grown past our 4324 * upper limit, we must be adding data faster than the evict 4325 * thread can evict. Thus, to ensure we don't compound the 4326 * problem by adding more data and forcing arc_size to grow even 4327 * further past it's target size, we halt and wait for the 4328 * eviction thread to catch up. 4329 * 4330 * It's also possible that the reclaim thread is unable to evict 4331 * enough buffers to get arc_size below the overflow limit (e.g. 4332 * due to buffers being un-evictable, or hash lock collisions). 4333 * In this case, we want to proceed regardless if we're 4334 * overflowing; thus we don't use a while loop here. 4335 */ 4336 if (arc_is_overflowing()) { 4337 mutex_enter(&arc_reclaim_lock); 4338 4339 /* 4340 * Now that we've acquired the lock, we may no longer be 4341 * over the overflow limit, lets check. 4342 * 4343 * We're ignoring the case of spurious wake ups. If that 4344 * were to happen, it'd let this thread consume an ARC 4345 * buffer before it should have (i.e. before we're under 4346 * the overflow limit and were signalled by the reclaim 4347 * thread). As long as that is a rare occurrence, it 4348 * shouldn't cause any harm. 4349 */ 4350 if (arc_is_overflowing()) { 4351 cv_signal(&arc_reclaim_thread_cv); 4352 cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock); 4353 } 4354 4355 mutex_exit(&arc_reclaim_lock); 4356 } 4357 4358 VERIFY3U(hdr->b_type, ==, type); 4359 if (type == ARC_BUFC_METADATA) { 4360 arc_space_consume(size, ARC_SPACE_META); 4361 } else { 4362 arc_space_consume(size, ARC_SPACE_DATA); 4363 } 4364 4365 /* 4366 * Update the state size. Note that ghost states have a 4367 * "ghost size" and so don't need to be updated. 4368 */ 4369 if (!GHOST_STATE(state)) { 4370 4371 (void) refcount_add_many(&state->arcs_size, size, tag); 4372 4373 /* 4374 * If this is reached via arc_read, the link is 4375 * protected by the hash lock. If reached via 4376 * arc_buf_alloc, the header should not be accessed by 4377 * any other thread. And, if reached via arc_read_done, 4378 * the hash lock will protect it if it's found in the 4379 * hash table; otherwise no other thread should be 4380 * trying to [add|remove]_reference it. 4381 */ 4382 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 4383 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4384 (void) refcount_add_many(&state->arcs_esize[type], 4385 size, tag); 4386 } 4387 4388 /* 4389 * If we are growing the cache, and we are adding anonymous 4390 * data, and we have outgrown arc_p, update arc_p 4391 */ 4392 if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon && 4393 (refcount_count(&arc_anon->arcs_size) + 4394 refcount_count(&arc_mru->arcs_size) > arc_p)) 4395 arc_p = MIN(arc_c, arc_p + size); 4396 } 4397 } 4398 4399 static void 4400 arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag) 4401 { 4402 arc_free_data_impl(hdr, size, tag); 4403 abd_free(abd); 4404 } 4405 4406 static void 4407 arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag) 4408 { 4409 arc_buf_contents_t type = arc_buf_type(hdr); 4410 4411 arc_free_data_impl(hdr, size, tag); 4412 if (type == ARC_BUFC_METADATA) { 4413 zio_buf_free(buf, size); 4414 } else { 4415 ASSERT(type == ARC_BUFC_DATA); 4416 zio_data_buf_free(buf, size); 4417 } 4418 } 4419 4420 /* 4421 * Free the arc data buffer. 4422 */ 4423 static void 4424 arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 4425 { 4426 arc_state_t *state = hdr->b_l1hdr.b_state; 4427 arc_buf_contents_t type = arc_buf_type(hdr); 4428 4429 /* protected by hash lock, if in the hash table */ 4430 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 4431 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4432 ASSERT(state != arc_anon && state != arc_l2c_only); 4433 4434 (void) refcount_remove_many(&state->arcs_esize[type], 4435 size, tag); 4436 } 4437 (void) refcount_remove_many(&state->arcs_size, size, tag); 4438 4439 VERIFY3U(hdr->b_type, ==, type); 4440 if (type == ARC_BUFC_METADATA) { 4441 arc_space_return(size, ARC_SPACE_META); 4442 } else { 4443 ASSERT(type == ARC_BUFC_DATA); 4444 arc_space_return(size, ARC_SPACE_DATA); 4445 } 4446 } 4447 4448 /* 4449 * This routine is called whenever a buffer is accessed. 4450 * NOTE: the hash lock is dropped in this function. 4451 */ 4452 static void 4453 arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 4454 { 4455 clock_t now; 4456 4457 ASSERT(MUTEX_HELD(hash_lock)); 4458 ASSERT(HDR_HAS_L1HDR(hdr)); 4459 4460 if (hdr->b_l1hdr.b_state == arc_anon) { 4461 /* 4462 * This buffer is not in the cache, and does not 4463 * appear in our "ghost" list. Add the new buffer 4464 * to the MRU state. 4465 */ 4466 4467 ASSERT0(hdr->b_l1hdr.b_arc_access); 4468 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4469 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 4470 arc_change_state(arc_mru, hdr, hash_lock); 4471 4472 } else if (hdr->b_l1hdr.b_state == arc_mru) { 4473 now = ddi_get_lbolt(); 4474 4475 /* 4476 * If this buffer is here because of a prefetch, then either: 4477 * - clear the flag if this is a "referencing" read 4478 * (any subsequent access will bump this into the MFU state). 4479 * or 4480 * - move the buffer to the head of the list if this is 4481 * another prefetch (to make it less likely to be evicted). 4482 */ 4483 if (HDR_PREFETCH(hdr)) { 4484 if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 4485 /* link protected by hash lock */ 4486 ASSERT(multilist_link_active( 4487 &hdr->b_l1hdr.b_arc_node)); 4488 } else { 4489 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); 4490 ARCSTAT_BUMP(arcstat_mru_hits); 4491 } 4492 hdr->b_l1hdr.b_arc_access = now; 4493 return; 4494 } 4495 4496 /* 4497 * This buffer has been "accessed" only once so far, 4498 * but it is still in the cache. Move it to the MFU 4499 * state. 4500 */ 4501 if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) { 4502 /* 4503 * More than 125ms have passed since we 4504 * instantiated this buffer. Move it to the 4505 * most frequently used state. 4506 */ 4507 hdr->b_l1hdr.b_arc_access = now; 4508 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 4509 arc_change_state(arc_mfu, hdr, hash_lock); 4510 } 4511 ARCSTAT_BUMP(arcstat_mru_hits); 4512 } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { 4513 arc_state_t *new_state; 4514 /* 4515 * This buffer has been "accessed" recently, but 4516 * was evicted from the cache. Move it to the 4517 * MFU state. 4518 */ 4519 4520 if (HDR_PREFETCH(hdr)) { 4521 new_state = arc_mru; 4522 if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) 4523 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); 4524 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 4525 } else { 4526 new_state = arc_mfu; 4527 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 4528 } 4529 4530 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4531 arc_change_state(new_state, hdr, hash_lock); 4532 4533 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 4534 } else if (hdr->b_l1hdr.b_state == arc_mfu) { 4535 /* 4536 * This buffer has been accessed more than once and is 4537 * still in the cache. Keep it in the MFU state. 4538 * 4539 * NOTE: an add_reference() that occurred when we did 4540 * the arc_read() will have kicked this off the list. 4541 * If it was a prefetch, we will explicitly move it to 4542 * the head of the list now. 4543 */ 4544 if ((HDR_PREFETCH(hdr)) != 0) { 4545 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4546 /* link protected by hash_lock */ 4547 ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 4548 } 4549 ARCSTAT_BUMP(arcstat_mfu_hits); 4550 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4551 } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { 4552 arc_state_t *new_state = arc_mfu; 4553 /* 4554 * This buffer has been accessed more than once but has 4555 * been evicted from the cache. Move it back to the 4556 * MFU state. 4557 */ 4558 4559 if (HDR_PREFETCH(hdr)) { 4560 /* 4561 * This is a prefetch access... 4562 * move this block back to the MRU state. 4563 */ 4564 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); 4565 new_state = arc_mru; 4566 } 4567 4568 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4569 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 4570 arc_change_state(new_state, hdr, hash_lock); 4571 4572 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 4573 } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { 4574 /* 4575 * This buffer is on the 2nd Level ARC. 4576 */ 4577 4578 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4579 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 4580 arc_change_state(arc_mfu, hdr, hash_lock); 4581 } else { 4582 ASSERT(!"invalid arc state"); 4583 } 4584 } 4585 4586 /* a generic arc_done_func_t which you can use */ 4587 /* ARGSUSED */ 4588 void 4589 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 4590 { 4591 if (zio == NULL || zio->io_error == 0) 4592 bcopy(buf->b_data, arg, arc_buf_size(buf)); 4593 arc_buf_destroy(buf, arg); 4594 } 4595 4596 /* a generic arc_done_func_t */ 4597 void 4598 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 4599 { 4600 arc_buf_t **bufp = arg; 4601 if (zio && zio->io_error) { 4602 arc_buf_destroy(buf, arg); 4603 *bufp = NULL; 4604 } else { 4605 *bufp = buf; 4606 ASSERT(buf->b_data); 4607 } 4608 } 4609 4610 static void 4611 arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp) 4612 { 4613 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { 4614 ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0); 4615 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 4616 } else { 4617 if (HDR_COMPRESSION_ENABLED(hdr)) { 4618 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, 4619 BP_GET_COMPRESS(bp)); 4620 } 4621 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); 4622 ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp)); 4623 } 4624 } 4625 4626 static void 4627 arc_read_done(zio_t *zio) 4628 { 4629 arc_buf_hdr_t *hdr = zio->io_private; 4630 kmutex_t *hash_lock = NULL; 4631 arc_callback_t *callback_list; 4632 arc_callback_t *acb; 4633 boolean_t freeable = B_FALSE; 4634 boolean_t no_zio_error = (zio->io_error == 0); 4635 4636 /* 4637 * The hdr was inserted into hash-table and removed from lists 4638 * prior to starting I/O. We should find this header, since 4639 * it's in the hash table, and it should be legit since it's 4640 * not possible to evict it during the I/O. The only possible 4641 * reason for it not to be found is if we were freed during the 4642 * read. 4643 */ 4644 if (HDR_IN_HASH_TABLE(hdr)) { 4645 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); 4646 ASSERT3U(hdr->b_dva.dva_word[0], ==, 4647 BP_IDENTITY(zio->io_bp)->dva_word[0]); 4648 ASSERT3U(hdr->b_dva.dva_word[1], ==, 4649 BP_IDENTITY(zio->io_bp)->dva_word[1]); 4650 4651 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp, 4652 &hash_lock); 4653 4654 ASSERT((found == hdr && 4655 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 4656 (found == hdr && HDR_L2_READING(hdr))); 4657 ASSERT3P(hash_lock, !=, NULL); 4658 } 4659 4660 if (no_zio_error) { 4661 /* byteswap if necessary */ 4662 if (BP_SHOULD_BYTESWAP(zio->io_bp)) { 4663 if (BP_GET_LEVEL(zio->io_bp) > 0) { 4664 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; 4665 } else { 4666 hdr->b_l1hdr.b_byteswap = 4667 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 4668 } 4669 } else { 4670 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 4671 } 4672 } 4673 4674 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED); 4675 if (l2arc_noprefetch && HDR_PREFETCH(hdr)) 4676 arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE); 4677 4678 callback_list = hdr->b_l1hdr.b_acb; 4679 ASSERT3P(callback_list, !=, NULL); 4680 4681 if (hash_lock && no_zio_error && hdr->b_l1hdr.b_state == arc_anon) { 4682 /* 4683 * Only call arc_access on anonymous buffers. This is because 4684 * if we've issued an I/O for an evicted buffer, we've already 4685 * called arc_access (to prevent any simultaneous readers from 4686 * getting confused). 4687 */ 4688 arc_access(hdr, hash_lock); 4689 } 4690 4691 /* 4692 * If a read request has a callback (i.e. acb_done is not NULL), then we 4693 * make a buf containing the data according to the parameters which were 4694 * passed in. The implementation of arc_buf_alloc_impl() ensures that we 4695 * aren't needlessly decompressing the data multiple times. 4696 */ 4697 int callback_cnt = 0; 4698 for (acb = callback_list; acb != NULL; acb = acb->acb_next) { 4699 if (!acb->acb_done) 4700 continue; 4701 4702 /* This is a demand read since prefetches don't use callbacks */ 4703 callback_cnt++; 4704 4705 int error = arc_buf_alloc_impl(hdr, acb->acb_private, 4706 acb->acb_compressed, no_zio_error, &acb->acb_buf); 4707 if (no_zio_error) { 4708 zio->io_error = error; 4709 } 4710 } 4711 hdr->b_l1hdr.b_acb = NULL; 4712 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 4713 if (callback_cnt == 0) { 4714 ASSERT(HDR_PREFETCH(hdr)); 4715 ASSERT0(hdr->b_l1hdr.b_bufcnt); 4716 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 4717 } 4718 4719 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) || 4720 callback_list != NULL); 4721 4722 if (no_zio_error) { 4723 arc_hdr_verify(hdr, zio->io_bp); 4724 } else { 4725 arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); 4726 if (hdr->b_l1hdr.b_state != arc_anon) 4727 arc_change_state(arc_anon, hdr, hash_lock); 4728 if (HDR_IN_HASH_TABLE(hdr)) 4729 buf_hash_remove(hdr); 4730 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 4731 } 4732 4733 /* 4734 * Broadcast before we drop the hash_lock to avoid the possibility 4735 * that the hdr (and hence the cv) might be freed before we get to 4736 * the cv_broadcast(). 4737 */ 4738 cv_broadcast(&hdr->b_l1hdr.b_cv); 4739 4740 if (hash_lock != NULL) { 4741 mutex_exit(hash_lock); 4742 } else { 4743 /* 4744 * This block was freed while we waited for the read to 4745 * complete. It has been removed from the hash table and 4746 * moved to the anonymous state (so that it won't show up 4747 * in the cache). 4748 */ 4749 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 4750 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 4751 } 4752 4753 /* execute each callback and free its structure */ 4754 while ((acb = callback_list) != NULL) { 4755 if (acb->acb_done) 4756 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 4757 4758 if (acb->acb_zio_dummy != NULL) { 4759 acb->acb_zio_dummy->io_error = zio->io_error; 4760 zio_nowait(acb->acb_zio_dummy); 4761 } 4762 4763 callback_list = acb->acb_next; 4764 kmem_free(acb, sizeof (arc_callback_t)); 4765 } 4766 4767 if (freeable) 4768 arc_hdr_destroy(hdr); 4769 } 4770 4771 /* 4772 * "Read" the block at the specified DVA (in bp) via the 4773 * cache. If the block is found in the cache, invoke the provided 4774 * callback immediately and return. Note that the `zio' parameter 4775 * in the callback will be NULL in this case, since no IO was 4776 * required. If the block is not in the cache pass the read request 4777 * on to the spa with a substitute callback function, so that the 4778 * requested block will be added to the cache. 4779 * 4780 * If a read request arrives for a block that has a read in-progress, 4781 * either wait for the in-progress read to complete (and return the 4782 * results); or, if this is a read with a "done" func, add a record 4783 * to the read to invoke the "done" func when the read completes, 4784 * and return; or just return. 4785 * 4786 * arc_read_done() will invoke all the requested "done" functions 4787 * for readers of this block. 4788 */ 4789 int 4790 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, 4791 void *private, zio_priority_t priority, int zio_flags, 4792 arc_flags_t *arc_flags, const zbookmark_phys_t *zb) 4793 { 4794 arc_buf_hdr_t *hdr = NULL; 4795 kmutex_t *hash_lock = NULL; 4796 zio_t *rzio; 4797 uint64_t guid = spa_load_guid(spa); 4798 boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW) != 0; 4799 4800 ASSERT(!BP_IS_EMBEDDED(bp) || 4801 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 4802 4803 top: 4804 if (!BP_IS_EMBEDDED(bp)) { 4805 /* 4806 * Embedded BP's have no DVA and require no I/O to "read". 4807 * Create an anonymous arc buf to back it. 4808 */ 4809 hdr = buf_hash_find(guid, bp, &hash_lock); 4810 } 4811 4812 if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_pabd != NULL) { 4813 arc_buf_t *buf = NULL; 4814 *arc_flags |= ARC_FLAG_CACHED; 4815 4816 if (HDR_IO_IN_PROGRESS(hdr)) { 4817 4818 if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) && 4819 priority == ZIO_PRIORITY_SYNC_READ) { 4820 /* 4821 * This sync read must wait for an 4822 * in-progress async read (e.g. a predictive 4823 * prefetch). Async reads are queued 4824 * separately at the vdev_queue layer, so 4825 * this is a form of priority inversion. 4826 * Ideally, we would "inherit" the demand 4827 * i/o's priority by moving the i/o from 4828 * the async queue to the synchronous queue, 4829 * but there is currently no mechanism to do 4830 * so. Track this so that we can evaluate 4831 * the magnitude of this potential performance 4832 * problem. 4833 * 4834 * Note that if the prefetch i/o is already 4835 * active (has been issued to the device), 4836 * the prefetch improved performance, because 4837 * we issued it sooner than we would have 4838 * without the prefetch. 4839 */ 4840 DTRACE_PROBE1(arc__sync__wait__for__async, 4841 arc_buf_hdr_t *, hdr); 4842 ARCSTAT_BUMP(arcstat_sync_wait_for_async); 4843 } 4844 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { 4845 arc_hdr_clear_flags(hdr, 4846 ARC_FLAG_PREDICTIVE_PREFETCH); 4847 } 4848 4849 if (*arc_flags & ARC_FLAG_WAIT) { 4850 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); 4851 mutex_exit(hash_lock); 4852 goto top; 4853 } 4854 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 4855 4856 if (done) { 4857 arc_callback_t *acb = NULL; 4858 4859 acb = kmem_zalloc(sizeof (arc_callback_t), 4860 KM_SLEEP); 4861 acb->acb_done = done; 4862 acb->acb_private = private; 4863 acb->acb_compressed = compressed_read; 4864 if (pio != NULL) 4865 acb->acb_zio_dummy = zio_null(pio, 4866 spa, NULL, NULL, NULL, zio_flags); 4867 4868 ASSERT3P(acb->acb_done, !=, NULL); 4869 acb->acb_next = hdr->b_l1hdr.b_acb; 4870 hdr->b_l1hdr.b_acb = acb; 4871 mutex_exit(hash_lock); 4872 return (0); 4873 } 4874 mutex_exit(hash_lock); 4875 return (0); 4876 } 4877 4878 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 4879 hdr->b_l1hdr.b_state == arc_mfu); 4880 4881 if (done) { 4882 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { 4883 /* 4884 * This is a demand read which does not have to 4885 * wait for i/o because we did a predictive 4886 * prefetch i/o for it, which has completed. 4887 */ 4888 DTRACE_PROBE1( 4889 arc__demand__hit__predictive__prefetch, 4890 arc_buf_hdr_t *, hdr); 4891 ARCSTAT_BUMP( 4892 arcstat_demand_hit_predictive_prefetch); 4893 arc_hdr_clear_flags(hdr, 4894 ARC_FLAG_PREDICTIVE_PREFETCH); 4895 } 4896 ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp)); 4897 4898 /* Get a buf with the desired data in it. */ 4899 VERIFY0(arc_buf_alloc_impl(hdr, private, 4900 compressed_read, B_TRUE, &buf)); 4901 } else if (*arc_flags & ARC_FLAG_PREFETCH && 4902 refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 4903 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); 4904 } 4905 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 4906 arc_access(hdr, hash_lock); 4907 if (*arc_flags & ARC_FLAG_L2CACHE) 4908 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 4909 mutex_exit(hash_lock); 4910 ARCSTAT_BUMP(arcstat_hits); 4911 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 4912 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 4913 data, metadata, hits); 4914 4915 if (done) 4916 done(NULL, buf, private); 4917 } else { 4918 uint64_t lsize = BP_GET_LSIZE(bp); 4919 uint64_t psize = BP_GET_PSIZE(bp); 4920 arc_callback_t *acb; 4921 vdev_t *vd = NULL; 4922 uint64_t addr = 0; 4923 boolean_t devw = B_FALSE; 4924 uint64_t size; 4925 4926 if (hdr == NULL) { 4927 /* this block is not in the cache */ 4928 arc_buf_hdr_t *exists = NULL; 4929 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 4930 hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, 4931 BP_GET_COMPRESS(bp), type); 4932 4933 if (!BP_IS_EMBEDDED(bp)) { 4934 hdr->b_dva = *BP_IDENTITY(bp); 4935 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 4936 exists = buf_hash_insert(hdr, &hash_lock); 4937 } 4938 if (exists != NULL) { 4939 /* somebody beat us to the hash insert */ 4940 mutex_exit(hash_lock); 4941 buf_discard_identity(hdr); 4942 arc_hdr_destroy(hdr); 4943 goto top; /* restart the IO request */ 4944 } 4945 } else { 4946 /* 4947 * This block is in the ghost cache. If it was L2-only 4948 * (and thus didn't have an L1 hdr), we realloc the 4949 * header to add an L1 hdr. 4950 */ 4951 if (!HDR_HAS_L1HDR(hdr)) { 4952 hdr = arc_hdr_realloc(hdr, hdr_l2only_cache, 4953 hdr_full_cache); 4954 } 4955 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 4956 ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state)); 4957 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 4958 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4959 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 4960 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 4961 4962 /* 4963 * This is a delicate dance that we play here. 4964 * This hdr is in the ghost list so we access it 4965 * to move it out of the ghost list before we 4966 * initiate the read. If it's a prefetch then 4967 * it won't have a callback so we'll remove the 4968 * reference that arc_buf_alloc_impl() created. We 4969 * do this after we've called arc_access() to 4970 * avoid hitting an assert in remove_reference(). 4971 */ 4972 arc_access(hdr, hash_lock); 4973 arc_hdr_alloc_pabd(hdr); 4974 } 4975 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 4976 size = arc_hdr_size(hdr); 4977 4978 /* 4979 * If compression is enabled on the hdr, then will do 4980 * RAW I/O and will store the compressed data in the hdr's 4981 * data block. Otherwise, the hdr's data block will contain 4982 * the uncompressed data. 4983 */ 4984 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) { 4985 zio_flags |= ZIO_FLAG_RAW; 4986 } 4987 4988 if (*arc_flags & ARC_FLAG_PREFETCH) 4989 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); 4990 if (*arc_flags & ARC_FLAG_L2CACHE) 4991 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 4992 if (BP_GET_LEVEL(bp) > 0) 4993 arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT); 4994 if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH) 4995 arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH); 4996 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); 4997 4998 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 4999 acb->acb_done = done; 5000 acb->acb_private = private; 5001 acb->acb_compressed = compressed_read; 5002 5003 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 5004 hdr->b_l1hdr.b_acb = acb; 5005 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5006 5007 if (HDR_HAS_L2HDR(hdr) && 5008 (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { 5009 devw = hdr->b_l2hdr.b_dev->l2ad_writing; 5010 addr = hdr->b_l2hdr.b_daddr; 5011 /* 5012 * Lock out L2ARC device removal. 5013 */ 5014 if (vdev_is_dead(vd) || 5015 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 5016 vd = NULL; 5017 } 5018 5019 if (priority == ZIO_PRIORITY_ASYNC_READ) 5020 arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); 5021 else 5022 arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); 5023 5024 if (hash_lock != NULL) 5025 mutex_exit(hash_lock); 5026 5027 /* 5028 * At this point, we have a level 1 cache miss. Try again in 5029 * L2ARC if possible. 5030 */ 5031 ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize); 5032 5033 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 5034 uint64_t, lsize, zbookmark_phys_t *, zb); 5035 ARCSTAT_BUMP(arcstat_misses); 5036 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 5037 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 5038 data, metadata, misses); 5039 5040 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 5041 /* 5042 * Read from the L2ARC if the following are true: 5043 * 1. The L2ARC vdev was previously cached. 5044 * 2. This buffer still has L2ARC metadata. 5045 * 3. This buffer isn't currently writing to the L2ARC. 5046 * 4. The L2ARC entry wasn't evicted, which may 5047 * also have invalidated the vdev. 5048 * 5. This isn't prefetch and l2arc_noprefetch is set. 5049 */ 5050 if (HDR_HAS_L2HDR(hdr) && 5051 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 5052 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 5053 l2arc_read_callback_t *cb; 5054 abd_t *abd; 5055 uint64_t asize; 5056 5057 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 5058 ARCSTAT_BUMP(arcstat_l2_hits); 5059 5060 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 5061 KM_SLEEP); 5062 cb->l2rcb_hdr = hdr; 5063 cb->l2rcb_bp = *bp; 5064 cb->l2rcb_zb = *zb; 5065 cb->l2rcb_flags = zio_flags; 5066 5067 asize = vdev_psize_to_asize(vd, size); 5068 if (asize != size) { 5069 abd = abd_alloc_for_io(asize, 5070 HDR_ISTYPE_METADATA(hdr)); 5071 cb->l2rcb_abd = abd; 5072 } else { 5073 abd = hdr->b_l1hdr.b_pabd; 5074 } 5075 5076 ASSERT(addr >= VDEV_LABEL_START_SIZE && 5077 addr + asize <= vd->vdev_psize - 5078 VDEV_LABEL_END_SIZE); 5079 5080 /* 5081 * l2arc read. The SCL_L2ARC lock will be 5082 * released by l2arc_read_done(). 5083 * Issue a null zio if the underlying buffer 5084 * was squashed to zero size by compression. 5085 */ 5086 ASSERT3U(HDR_GET_COMPRESS(hdr), !=, 5087 ZIO_COMPRESS_EMPTY); 5088 rzio = zio_read_phys(pio, vd, addr, 5089 asize, abd, 5090 ZIO_CHECKSUM_OFF, 5091 l2arc_read_done, cb, priority, 5092 zio_flags | ZIO_FLAG_DONT_CACHE | 5093 ZIO_FLAG_CANFAIL | 5094 ZIO_FLAG_DONT_PROPAGATE | 5095 ZIO_FLAG_DONT_RETRY, B_FALSE); 5096 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 5097 zio_t *, rzio); 5098 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 5099 5100 if (*arc_flags & ARC_FLAG_NOWAIT) { 5101 zio_nowait(rzio); 5102 return (0); 5103 } 5104 5105 ASSERT(*arc_flags & ARC_FLAG_WAIT); 5106 if (zio_wait(rzio) == 0) 5107 return (0); 5108 5109 /* l2arc read error; goto zio_read() */ 5110 } else { 5111 DTRACE_PROBE1(l2arc__miss, 5112 arc_buf_hdr_t *, hdr); 5113 ARCSTAT_BUMP(arcstat_l2_misses); 5114 if (HDR_L2_WRITING(hdr)) 5115 ARCSTAT_BUMP(arcstat_l2_rw_clash); 5116 spa_config_exit(spa, SCL_L2ARC, vd); 5117 } 5118 } else { 5119 if (vd != NULL) 5120 spa_config_exit(spa, SCL_L2ARC, vd); 5121 if (l2arc_ndev != 0) { 5122 DTRACE_PROBE1(l2arc__miss, 5123 arc_buf_hdr_t *, hdr); 5124 ARCSTAT_BUMP(arcstat_l2_misses); 5125 } 5126 } 5127 5128 rzio = zio_read(pio, spa, bp, hdr->b_l1hdr.b_pabd, size, 5129 arc_read_done, hdr, priority, zio_flags, zb); 5130 5131 if (*arc_flags & ARC_FLAG_WAIT) 5132 return (zio_wait(rzio)); 5133 5134 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 5135 zio_nowait(rzio); 5136 } 5137 return (0); 5138 } 5139 5140 /* 5141 * Notify the arc that a block was freed, and thus will never be used again. 5142 */ 5143 void 5144 arc_freed(spa_t *spa, const blkptr_t *bp) 5145 { 5146 arc_buf_hdr_t *hdr; 5147 kmutex_t *hash_lock; 5148 uint64_t guid = spa_load_guid(spa); 5149 5150 ASSERT(!BP_IS_EMBEDDED(bp)); 5151 5152 hdr = buf_hash_find(guid, bp, &hash_lock); 5153 if (hdr == NULL) 5154 return; 5155 5156 /* 5157 * We might be trying to free a block that is still doing I/O 5158 * (i.e. prefetch) or has a reference (i.e. a dedup-ed, 5159 * dmu_sync-ed block). If this block is being prefetched, then it 5160 * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr 5161 * until the I/O completes. A block may also have a reference if it is 5162 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would 5163 * have written the new block to its final resting place on disk but 5164 * without the dedup flag set. This would have left the hdr in the MRU 5165 * state and discoverable. When the txg finally syncs it detects that 5166 * the block was overridden in open context and issues an override I/O. 5167 * Since this is a dedup block, the override I/O will determine if the 5168 * block is already in the DDT. If so, then it will replace the io_bp 5169 * with the bp from the DDT and allow the I/O to finish. When the I/O 5170 * reaches the done callback, dbuf_write_override_done, it will 5171 * check to see if the io_bp and io_bp_override are identical. 5172 * If they are not, then it indicates that the bp was replaced with 5173 * the bp in the DDT and the override bp is freed. This allows 5174 * us to arrive here with a reference on a block that is being 5175 * freed. So if we have an I/O in progress, or a reference to 5176 * this hdr, then we don't destroy the hdr. 5177 */ 5178 if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) && 5179 refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) { 5180 arc_change_state(arc_anon, hdr, hash_lock); 5181 arc_hdr_destroy(hdr); 5182 mutex_exit(hash_lock); 5183 } else { 5184 mutex_exit(hash_lock); 5185 } 5186 5187 } 5188 5189 /* 5190 * Release this buffer from the cache, making it an anonymous buffer. This 5191 * must be done after a read and prior to modifying the buffer contents. 5192 * If the buffer has more than one reference, we must make 5193 * a new hdr for the buffer. 5194 */ 5195 void 5196 arc_release(arc_buf_t *buf, void *tag) 5197 { 5198 arc_buf_hdr_t *hdr = buf->b_hdr; 5199 5200 /* 5201 * It would be nice to assert that if it's DMU metadata (level > 5202 * 0 || it's the dnode file), then it must be syncing context. 5203 * But we don't know that information at this level. 5204 */ 5205 5206 mutex_enter(&buf->b_evict_lock); 5207 5208 ASSERT(HDR_HAS_L1HDR(hdr)); 5209 5210 /* 5211 * We don't grab the hash lock prior to this check, because if 5212 * the buffer's header is in the arc_anon state, it won't be 5213 * linked into the hash table. 5214 */ 5215 if (hdr->b_l1hdr.b_state == arc_anon) { 5216 mutex_exit(&buf->b_evict_lock); 5217 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5218 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 5219 ASSERT(!HDR_HAS_L2HDR(hdr)); 5220 ASSERT(HDR_EMPTY(hdr)); 5221 5222 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 5223 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); 5224 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node)); 5225 5226 hdr->b_l1hdr.b_arc_access = 0; 5227 5228 /* 5229 * If the buf is being overridden then it may already 5230 * have a hdr that is not empty. 5231 */ 5232 buf_discard_identity(hdr); 5233 arc_buf_thaw(buf); 5234 5235 return; 5236 } 5237 5238 kmutex_t *hash_lock = HDR_LOCK(hdr); 5239 mutex_enter(hash_lock); 5240 5241 /* 5242 * This assignment is only valid as long as the hash_lock is 5243 * held, we must be careful not to reference state or the 5244 * b_state field after dropping the lock. 5245 */ 5246 arc_state_t *state = hdr->b_l1hdr.b_state; 5247 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 5248 ASSERT3P(state, !=, arc_anon); 5249 5250 /* this buffer is not on any list */ 5251 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0); 5252 5253 if (HDR_HAS_L2HDR(hdr)) { 5254 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); 5255 5256 /* 5257 * We have to recheck this conditional again now that 5258 * we're holding the l2ad_mtx to prevent a race with 5259 * another thread which might be concurrently calling 5260 * l2arc_evict(). In that case, l2arc_evict() might have 5261 * destroyed the header's L2 portion as we were waiting 5262 * to acquire the l2ad_mtx. 5263 */ 5264 if (HDR_HAS_L2HDR(hdr)) 5265 arc_hdr_l2hdr_destroy(hdr); 5266 5267 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); 5268 } 5269 5270 /* 5271 * Do we have more than one buf? 5272 */ 5273 if (hdr->b_l1hdr.b_bufcnt > 1) { 5274 arc_buf_hdr_t *nhdr; 5275 uint64_t spa = hdr->b_spa; 5276 uint64_t psize = HDR_GET_PSIZE(hdr); 5277 uint64_t lsize = HDR_GET_LSIZE(hdr); 5278 enum zio_compress compress = HDR_GET_COMPRESS(hdr); 5279 arc_buf_contents_t type = arc_buf_type(hdr); 5280 VERIFY3U(hdr->b_type, ==, type); 5281 5282 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); 5283 (void) remove_reference(hdr, hash_lock, tag); 5284 5285 if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) { 5286 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); 5287 ASSERT(ARC_BUF_LAST(buf)); 5288 } 5289 5290 /* 5291 * Pull the data off of this hdr and attach it to 5292 * a new anonymous hdr. Also find the last buffer 5293 * in the hdr's buffer list. 5294 */ 5295 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); 5296 ASSERT3P(lastbuf, !=, NULL); 5297 5298 /* 5299 * If the current arc_buf_t and the hdr are sharing their data 5300 * buffer, then we must stop sharing that block. 5301 */ 5302 if (arc_buf_is_shared(buf)) { 5303 VERIFY(!arc_buf_is_shared(lastbuf)); 5304 5305 /* 5306 * First, sever the block sharing relationship between 5307 * buf and the arc_buf_hdr_t. 5308 */ 5309 arc_unshare_buf(hdr, buf); 5310 5311 /* 5312 * Now we need to recreate the hdr's b_pabd. Since we 5313 * have lastbuf handy, we try to share with it, but if 5314 * we can't then we allocate a new b_pabd and copy the 5315 * data from buf into it. 5316 */ 5317 if (arc_can_share(hdr, lastbuf)) { 5318 arc_share_buf(hdr, lastbuf); 5319 } else { 5320 arc_hdr_alloc_pabd(hdr); 5321 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, 5322 buf->b_data, psize); 5323 } 5324 VERIFY3P(lastbuf->b_data, !=, NULL); 5325 } else if (HDR_SHARED_DATA(hdr)) { 5326 /* 5327 * Uncompressed shared buffers are always at the end 5328 * of the list. Compressed buffers don't have the 5329 * same requirements. This makes it hard to 5330 * simply assert that the lastbuf is shared so 5331 * we rely on the hdr's compression flags to determine 5332 * if we have a compressed, shared buffer. 5333 */ 5334 ASSERT(arc_buf_is_shared(lastbuf) || 5335 HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF); 5336 ASSERT(!ARC_BUF_SHARED(buf)); 5337 } 5338 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 5339 ASSERT3P(state, !=, arc_l2c_only); 5340 5341 (void) refcount_remove_many(&state->arcs_size, 5342 arc_buf_size(buf), buf); 5343 5344 if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { 5345 ASSERT3P(state, !=, arc_l2c_only); 5346 (void) refcount_remove_many(&state->arcs_esize[type], 5347 arc_buf_size(buf), buf); 5348 } 5349 5350 hdr->b_l1hdr.b_bufcnt -= 1; 5351 arc_cksum_verify(buf); 5352 arc_buf_unwatch(buf); 5353 5354 mutex_exit(hash_lock); 5355 5356 /* 5357 * Allocate a new hdr. The new hdr will contain a b_pabd 5358 * buffer which will be freed in arc_write(). 5359 */ 5360 nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type); 5361 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL); 5362 ASSERT0(nhdr->b_l1hdr.b_bufcnt); 5363 ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt)); 5364 VERIFY3U(nhdr->b_type, ==, type); 5365 ASSERT(!HDR_SHARED_DATA(nhdr)); 5366 5367 nhdr->b_l1hdr.b_buf = buf; 5368 nhdr->b_l1hdr.b_bufcnt = 1; 5369 (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); 5370 buf->b_hdr = nhdr; 5371 5372 mutex_exit(&buf->b_evict_lock); 5373 (void) refcount_add_many(&arc_anon->arcs_size, 5374 arc_buf_size(buf), buf); 5375 } else { 5376 mutex_exit(&buf->b_evict_lock); 5377 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); 5378 /* protected by hash lock, or hdr is on arc_anon */ 5379 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 5380 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5381 arc_change_state(arc_anon, hdr, hash_lock); 5382 hdr->b_l1hdr.b_arc_access = 0; 5383 mutex_exit(hash_lock); 5384 5385 buf_discard_identity(hdr); 5386 arc_buf_thaw(buf); 5387 } 5388 } 5389 5390 int 5391 arc_released(arc_buf_t *buf) 5392 { 5393 int released; 5394 5395 mutex_enter(&buf->b_evict_lock); 5396 released = (buf->b_data != NULL && 5397 buf->b_hdr->b_l1hdr.b_state == arc_anon); 5398 mutex_exit(&buf->b_evict_lock); 5399 return (released); 5400 } 5401 5402 #ifdef ZFS_DEBUG 5403 int 5404 arc_referenced(arc_buf_t *buf) 5405 { 5406 int referenced; 5407 5408 mutex_enter(&buf->b_evict_lock); 5409 referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); 5410 mutex_exit(&buf->b_evict_lock); 5411 return (referenced); 5412 } 5413 #endif 5414 5415 static void 5416 arc_write_ready(zio_t *zio) 5417 { 5418 arc_write_callback_t *callback = zio->io_private; 5419 arc_buf_t *buf = callback->awcb_buf; 5420 arc_buf_hdr_t *hdr = buf->b_hdr; 5421 uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp); 5422 5423 ASSERT(HDR_HAS_L1HDR(hdr)); 5424 ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); 5425 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 5426 5427 /* 5428 * If we're reexecuting this zio because the pool suspended, then 5429 * cleanup any state that was previously set the first time the 5430 * callback was invoked. 5431 */ 5432 if (zio->io_flags & ZIO_FLAG_REEXECUTED) { 5433 arc_cksum_free(hdr); 5434 arc_buf_unwatch(buf); 5435 if (hdr->b_l1hdr.b_pabd != NULL) { 5436 if (arc_buf_is_shared(buf)) { 5437 arc_unshare_buf(hdr, buf); 5438 } else { 5439 arc_hdr_free_pabd(hdr); 5440 } 5441 } 5442 } 5443 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 5444 ASSERT(!HDR_SHARED_DATA(hdr)); 5445 ASSERT(!arc_buf_is_shared(buf)); 5446 5447 callback->awcb_ready(zio, buf, callback->awcb_private); 5448 5449 if (HDR_IO_IN_PROGRESS(hdr)) 5450 ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED); 5451 5452 arc_cksum_compute(buf); 5453 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5454 5455 enum zio_compress compress; 5456 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 5457 compress = ZIO_COMPRESS_OFF; 5458 } else { 5459 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(zio->io_bp)); 5460 compress = BP_GET_COMPRESS(zio->io_bp); 5461 } 5462 HDR_SET_PSIZE(hdr, psize); 5463 arc_hdr_set_compress(hdr, compress); 5464 5465 5466 /* 5467 * Fill the hdr with data. If the hdr is compressed, the data we want 5468 * is available from the zio, otherwise we can take it from the buf. 5469 * 5470 * We might be able to share the buf's data with the hdr here. However, 5471 * doing so would cause the ARC to be full of linear ABDs if we write a 5472 * lot of shareable data. As a compromise, we check whether scattered 5473 * ABDs are allowed, and assume that if they are then the user wants 5474 * the ARC to be primarily filled with them regardless of the data being 5475 * written. Therefore, if they're allowed then we allocate one and copy 5476 * the data into it; otherwise, we share the data directly if we can. 5477 */ 5478 if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) { 5479 arc_hdr_alloc_pabd(hdr); 5480 5481 /* 5482 * Ideally, we would always copy the io_abd into b_pabd, but the 5483 * user may have disabled compressed ARC, thus we must check the 5484 * hdr's compression setting rather than the io_bp's. 5485 */ 5486 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) { 5487 ASSERT3U(BP_GET_COMPRESS(zio->io_bp), !=, 5488 ZIO_COMPRESS_OFF); 5489 ASSERT3U(psize, >, 0); 5490 5491 abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize); 5492 } else { 5493 ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr)); 5494 5495 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data, 5496 arc_buf_size(buf)); 5497 } 5498 } else { 5499 ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd)); 5500 ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf)); 5501 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 5502 5503 arc_share_buf(hdr, buf); 5504 } 5505 5506 arc_hdr_verify(hdr, zio->io_bp); 5507 } 5508 5509 static void 5510 arc_write_children_ready(zio_t *zio) 5511 { 5512 arc_write_callback_t *callback = zio->io_private; 5513 arc_buf_t *buf = callback->awcb_buf; 5514 5515 callback->awcb_children_ready(zio, buf, callback->awcb_private); 5516 } 5517 5518 /* 5519 * The SPA calls this callback for each physical write that happens on behalf 5520 * of a logical write. See the comment in dbuf_write_physdone() for details. 5521 */ 5522 static void 5523 arc_write_physdone(zio_t *zio) 5524 { 5525 arc_write_callback_t *cb = zio->io_private; 5526 if (cb->awcb_physdone != NULL) 5527 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); 5528 } 5529 5530 static void 5531 arc_write_done(zio_t *zio) 5532 { 5533 arc_write_callback_t *callback = zio->io_private; 5534 arc_buf_t *buf = callback->awcb_buf; 5535 arc_buf_hdr_t *hdr = buf->b_hdr; 5536 5537 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 5538 5539 if (zio->io_error == 0) { 5540 arc_hdr_verify(hdr, zio->io_bp); 5541 5542 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 5543 buf_discard_identity(hdr); 5544 } else { 5545 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 5546 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 5547 } 5548 } else { 5549 ASSERT(HDR_EMPTY(hdr)); 5550 } 5551 5552 /* 5553 * If the block to be written was all-zero or compressed enough to be 5554 * embedded in the BP, no write was performed so there will be no 5555 * dva/birth/checksum. The buffer must therefore remain anonymous 5556 * (and uncached). 5557 */ 5558 if (!HDR_EMPTY(hdr)) { 5559 arc_buf_hdr_t *exists; 5560 kmutex_t *hash_lock; 5561 5562 ASSERT3U(zio->io_error, ==, 0); 5563 5564 arc_cksum_verify(buf); 5565 5566 exists = buf_hash_insert(hdr, &hash_lock); 5567 if (exists != NULL) { 5568 /* 5569 * This can only happen if we overwrite for 5570 * sync-to-convergence, because we remove 5571 * buffers from the hash table when we arc_free(). 5572 */ 5573 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 5574 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 5575 panic("bad overwrite, hdr=%p exists=%p", 5576 (void *)hdr, (void *)exists); 5577 ASSERT(refcount_is_zero( 5578 &exists->b_l1hdr.b_refcnt)); 5579 arc_change_state(arc_anon, exists, hash_lock); 5580 mutex_exit(hash_lock); 5581 arc_hdr_destroy(exists); 5582 exists = buf_hash_insert(hdr, &hash_lock); 5583 ASSERT3P(exists, ==, NULL); 5584 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 5585 /* nopwrite */ 5586 ASSERT(zio->io_prop.zp_nopwrite); 5587 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 5588 panic("bad nopwrite, hdr=%p exists=%p", 5589 (void *)hdr, (void *)exists); 5590 } else { 5591 /* Dedup */ 5592 ASSERT(hdr->b_l1hdr.b_bufcnt == 1); 5593 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 5594 ASSERT(BP_GET_DEDUP(zio->io_bp)); 5595 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 5596 } 5597 } 5598 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5599 /* if it's not anon, we are doing a scrub */ 5600 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) 5601 arc_access(hdr, hash_lock); 5602 mutex_exit(hash_lock); 5603 } else { 5604 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5605 } 5606 5607 ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 5608 callback->awcb_done(zio, buf, callback->awcb_private); 5609 5610 abd_put(zio->io_abd); 5611 kmem_free(callback, sizeof (arc_write_callback_t)); 5612 } 5613 5614 zio_t * 5615 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 5616 boolean_t l2arc, const zio_prop_t *zp, arc_done_func_t *ready, 5617 arc_done_func_t *children_ready, arc_done_func_t *physdone, 5618 arc_done_func_t *done, void *private, zio_priority_t priority, 5619 int zio_flags, const zbookmark_phys_t *zb) 5620 { 5621 arc_buf_hdr_t *hdr = buf->b_hdr; 5622 arc_write_callback_t *callback; 5623 zio_t *zio; 5624 zio_prop_t localprop = *zp; 5625 5626 ASSERT3P(ready, !=, NULL); 5627 ASSERT3P(done, !=, NULL); 5628 ASSERT(!HDR_IO_ERROR(hdr)); 5629 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5630 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 5631 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); 5632 if (l2arc) 5633 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 5634 if (ARC_BUF_COMPRESSED(buf)) { 5635 /* 5636 * We're writing a pre-compressed buffer. Make the 5637 * compression algorithm requested by the zio_prop_t match 5638 * the pre-compressed buffer's compression algorithm. 5639 */ 5640 localprop.zp_compress = HDR_GET_COMPRESS(hdr); 5641 5642 ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf)); 5643 zio_flags |= ZIO_FLAG_RAW; 5644 } 5645 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 5646 callback->awcb_ready = ready; 5647 callback->awcb_children_ready = children_ready; 5648 callback->awcb_physdone = physdone; 5649 callback->awcb_done = done; 5650 callback->awcb_private = private; 5651 callback->awcb_buf = buf; 5652 5653 /* 5654 * The hdr's b_pabd is now stale, free it now. A new data block 5655 * will be allocated when the zio pipeline calls arc_write_ready(). 5656 */ 5657 if (hdr->b_l1hdr.b_pabd != NULL) { 5658 /* 5659 * If the buf is currently sharing the data block with 5660 * the hdr then we need to break that relationship here. 5661 * The hdr will remain with a NULL data pointer and the 5662 * buf will take sole ownership of the block. 5663 */ 5664 if (arc_buf_is_shared(buf)) { 5665 arc_unshare_buf(hdr, buf); 5666 } else { 5667 arc_hdr_free_pabd(hdr); 5668 } 5669 VERIFY3P(buf->b_data, !=, NULL); 5670 arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF); 5671 } 5672 ASSERT(!arc_buf_is_shared(buf)); 5673 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 5674 5675 zio = zio_write(pio, spa, txg, bp, 5676 abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)), 5677 HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready, 5678 (children_ready != NULL) ? arc_write_children_ready : NULL, 5679 arc_write_physdone, arc_write_done, callback, 5680 priority, zio_flags, zb); 5681 5682 return (zio); 5683 } 5684 5685 static int 5686 arc_memory_throttle(uint64_t reserve, uint64_t txg) 5687 { 5688 #ifdef _KERNEL 5689 uint64_t available_memory = ptob(freemem); 5690 static uint64_t page_load = 0; 5691 static uint64_t last_txg = 0; 5692 5693 #if defined(__i386) 5694 available_memory = 5695 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 5696 #endif 5697 5698 if (freemem > physmem * arc_lotsfree_percent / 100) 5699 return (0); 5700 5701 if (txg > last_txg) { 5702 last_txg = txg; 5703 page_load = 0; 5704 } 5705 /* 5706 * If we are in pageout, we know that memory is already tight, 5707 * the arc is already going to be evicting, so we just want to 5708 * continue to let page writes occur as quickly as possible. 5709 */ 5710 if (curproc == proc_pageout) { 5711 if (page_load > MAX(ptob(minfree), available_memory) / 4) 5712 return (SET_ERROR(ERESTART)); 5713 /* Note: reserve is inflated, so we deflate */ 5714 page_load += reserve / 8; 5715 return (0); 5716 } else if (page_load > 0 && arc_reclaim_needed()) { 5717 /* memory is low, delay before restarting */ 5718 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 5719 return (SET_ERROR(EAGAIN)); 5720 } 5721 page_load = 0; 5722 #endif 5723 return (0); 5724 } 5725 5726 void 5727 arc_tempreserve_clear(uint64_t reserve) 5728 { 5729 atomic_add_64(&arc_tempreserve, -reserve); 5730 ASSERT((int64_t)arc_tempreserve >= 0); 5731 } 5732 5733 int 5734 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 5735 { 5736 int error; 5737 uint64_t anon_size; 5738 5739 if (reserve > arc_c/4 && !arc_no_grow) 5740 arc_c = MIN(arc_c_max, reserve * 4); 5741 if (reserve > arc_c) 5742 return (SET_ERROR(ENOMEM)); 5743 5744 /* 5745 * Don't count loaned bufs as in flight dirty data to prevent long 5746 * network delays from blocking transactions that are ready to be 5747 * assigned to a txg. 5748 */ 5749 5750 /* assert that it has not wrapped around */ 5751 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); 5752 5753 anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) - 5754 arc_loaned_bytes), 0); 5755 5756 /* 5757 * Writes will, almost always, require additional memory allocations 5758 * in order to compress/encrypt/etc the data. We therefore need to 5759 * make sure that there is sufficient available memory for this. 5760 */ 5761 error = arc_memory_throttle(reserve, txg); 5762 if (error != 0) 5763 return (error); 5764 5765 /* 5766 * Throttle writes when the amount of dirty data in the cache 5767 * gets too large. We try to keep the cache less than half full 5768 * of dirty blocks so that our sync times don't grow too large. 5769 * Note: if two requests come in concurrently, we might let them 5770 * both succeed, when one of them should fail. Not a huge deal. 5771 */ 5772 5773 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 5774 anon_size > arc_c / 4) { 5775 uint64_t meta_esize = 5776 refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 5777 uint64_t data_esize = 5778 refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 5779 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 5780 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 5781 arc_tempreserve >> 10, meta_esize >> 10, 5782 data_esize >> 10, reserve >> 10, arc_c >> 10); 5783 return (SET_ERROR(ERESTART)); 5784 } 5785 atomic_add_64(&arc_tempreserve, reserve); 5786 return (0); 5787 } 5788 5789 static void 5790 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size, 5791 kstat_named_t *evict_data, kstat_named_t *evict_metadata) 5792 { 5793 size->value.ui64 = refcount_count(&state->arcs_size); 5794 evict_data->value.ui64 = 5795 refcount_count(&state->arcs_esize[ARC_BUFC_DATA]); 5796 evict_metadata->value.ui64 = 5797 refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]); 5798 } 5799 5800 static int 5801 arc_kstat_update(kstat_t *ksp, int rw) 5802 { 5803 arc_stats_t *as = ksp->ks_data; 5804 5805 if (rw == KSTAT_WRITE) { 5806 return (EACCES); 5807 } else { 5808 arc_kstat_update_state(arc_anon, 5809 &as->arcstat_anon_size, 5810 &as->arcstat_anon_evictable_data, 5811 &as->arcstat_anon_evictable_metadata); 5812 arc_kstat_update_state(arc_mru, 5813 &as->arcstat_mru_size, 5814 &as->arcstat_mru_evictable_data, 5815 &as->arcstat_mru_evictable_metadata); 5816 arc_kstat_update_state(arc_mru_ghost, 5817 &as->arcstat_mru_ghost_size, 5818 &as->arcstat_mru_ghost_evictable_data, 5819 &as->arcstat_mru_ghost_evictable_metadata); 5820 arc_kstat_update_state(arc_mfu, 5821 &as->arcstat_mfu_size, 5822 &as->arcstat_mfu_evictable_data, 5823 &as->arcstat_mfu_evictable_metadata); 5824 arc_kstat_update_state(arc_mfu_ghost, 5825 &as->arcstat_mfu_ghost_size, 5826 &as->arcstat_mfu_ghost_evictable_data, 5827 &as->arcstat_mfu_ghost_evictable_metadata); 5828 } 5829 5830 return (0); 5831 } 5832 5833 /* 5834 * This function *must* return indices evenly distributed between all 5835 * sublists of the multilist. This is needed due to how the ARC eviction 5836 * code is laid out; arc_evict_state() assumes ARC buffers are evenly 5837 * distributed between all sublists and uses this assumption when 5838 * deciding which sublist to evict from and how much to evict from it. 5839 */ 5840 unsigned int 5841 arc_state_multilist_index_func(multilist_t *ml, void *obj) 5842 { 5843 arc_buf_hdr_t *hdr = obj; 5844 5845 /* 5846 * We rely on b_dva to generate evenly distributed index 5847 * numbers using buf_hash below. So, as an added precaution, 5848 * let's make sure we never add empty buffers to the arc lists. 5849 */ 5850 ASSERT(!HDR_EMPTY(hdr)); 5851 5852 /* 5853 * The assumption here, is the hash value for a given 5854 * arc_buf_hdr_t will remain constant throughout it's lifetime 5855 * (i.e. it's b_spa, b_dva, and b_birth fields don't change). 5856 * Thus, we don't need to store the header's sublist index 5857 * on insertion, as this index can be recalculated on removal. 5858 * 5859 * Also, the low order bits of the hash value are thought to be 5860 * distributed evenly. Otherwise, in the case that the multilist 5861 * has a power of two number of sublists, each sublists' usage 5862 * would not be evenly distributed. 5863 */ 5864 return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % 5865 multilist_get_num_sublists(ml)); 5866 } 5867 5868 static void 5869 arc_state_init(void) 5870 { 5871 arc_anon = &ARC_anon; 5872 arc_mru = &ARC_mru; 5873 arc_mru_ghost = &ARC_mru_ghost; 5874 arc_mfu = &ARC_mfu; 5875 arc_mfu_ghost = &ARC_mfu_ghost; 5876 arc_l2c_only = &ARC_l2c_only; 5877 5878 arc_mru->arcs_list[ARC_BUFC_METADATA] = 5879 multilist_create(sizeof (arc_buf_hdr_t), 5880 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5881 arc_state_multilist_index_func); 5882 arc_mru->arcs_list[ARC_BUFC_DATA] = 5883 multilist_create(sizeof (arc_buf_hdr_t), 5884 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5885 arc_state_multilist_index_func); 5886 arc_mru_ghost->arcs_list[ARC_BUFC_METADATA] = 5887 multilist_create(sizeof (arc_buf_hdr_t), 5888 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5889 arc_state_multilist_index_func); 5890 arc_mru_ghost->arcs_list[ARC_BUFC_DATA] = 5891 multilist_create(sizeof (arc_buf_hdr_t), 5892 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5893 arc_state_multilist_index_func); 5894 arc_mfu->arcs_list[ARC_BUFC_METADATA] = 5895 multilist_create(sizeof (arc_buf_hdr_t), 5896 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5897 arc_state_multilist_index_func); 5898 arc_mfu->arcs_list[ARC_BUFC_DATA] = 5899 multilist_create(sizeof (arc_buf_hdr_t), 5900 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5901 arc_state_multilist_index_func); 5902 arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA] = 5903 multilist_create(sizeof (arc_buf_hdr_t), 5904 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5905 arc_state_multilist_index_func); 5906 arc_mfu_ghost->arcs_list[ARC_BUFC_DATA] = 5907 multilist_create(sizeof (arc_buf_hdr_t), 5908 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5909 arc_state_multilist_index_func); 5910 arc_l2c_only->arcs_list[ARC_BUFC_METADATA] = 5911 multilist_create(sizeof (arc_buf_hdr_t), 5912 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5913 arc_state_multilist_index_func); 5914 arc_l2c_only->arcs_list[ARC_BUFC_DATA] = 5915 multilist_create(sizeof (arc_buf_hdr_t), 5916 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5917 arc_state_multilist_index_func); 5918 5919 refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 5920 refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 5921 refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); 5922 refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]); 5923 refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); 5924 refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); 5925 refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 5926 refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); 5927 refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); 5928 refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); 5929 refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); 5930 refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); 5931 5932 refcount_create(&arc_anon->arcs_size); 5933 refcount_create(&arc_mru->arcs_size); 5934 refcount_create(&arc_mru_ghost->arcs_size); 5935 refcount_create(&arc_mfu->arcs_size); 5936 refcount_create(&arc_mfu_ghost->arcs_size); 5937 refcount_create(&arc_l2c_only->arcs_size); 5938 } 5939 5940 static void 5941 arc_state_fini(void) 5942 { 5943 refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 5944 refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 5945 refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); 5946 refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]); 5947 refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); 5948 refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); 5949 refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 5950 refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); 5951 refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); 5952 refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); 5953 refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); 5954 refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); 5955 5956 refcount_destroy(&arc_anon->arcs_size); 5957 refcount_destroy(&arc_mru->arcs_size); 5958 refcount_destroy(&arc_mru_ghost->arcs_size); 5959 refcount_destroy(&arc_mfu->arcs_size); 5960 refcount_destroy(&arc_mfu_ghost->arcs_size); 5961 refcount_destroy(&arc_l2c_only->arcs_size); 5962 5963 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]); 5964 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 5965 multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_METADATA]); 5966 multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 5967 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_DATA]); 5968 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 5969 multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_DATA]); 5970 multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 5971 } 5972 5973 uint64_t 5974 arc_max_bytes(void) 5975 { 5976 return (arc_c_max); 5977 } 5978 5979 void 5980 arc_init(void) 5981 { 5982 /* 5983 * allmem is "all memory that we could possibly use". 5984 */ 5985 #ifdef _KERNEL 5986 uint64_t allmem = ptob(physmem - swapfs_minfree); 5987 #else 5988 uint64_t allmem = (physmem * PAGESIZE) / 2; 5989 #endif 5990 5991 mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL); 5992 cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL); 5993 cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL); 5994 5995 /* Convert seconds to clock ticks */ 5996 arc_min_prefetch_lifespan = 1 * hz; 5997 5998 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 5999 arc_c_min = MAX(allmem / 32, 64 << 20); 6000 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 6001 if (allmem >= 1 << 30) 6002 arc_c_max = allmem - (1 << 30); 6003 else 6004 arc_c_max = arc_c_min; 6005 arc_c_max = MAX(allmem * 3 / 4, arc_c_max); 6006 6007 /* 6008 * In userland, there's only the memory pressure that we artificially 6009 * create (see arc_available_memory()). Don't let arc_c get too 6010 * small, because it can cause transactions to be larger than 6011 * arc_c, causing arc_tempreserve_space() to fail. 6012 */ 6013 #ifndef _KERNEL 6014 arc_c_min = arc_c_max / 2; 6015 #endif 6016 6017 /* 6018 * Allow the tunables to override our calculations if they are 6019 * reasonable (ie. over 64MB) 6020 */ 6021 if (zfs_arc_max > 64 << 20 && zfs_arc_max < allmem) { 6022 arc_c_max = zfs_arc_max; 6023 arc_c_min = MIN(arc_c_min, arc_c_max); 6024 } 6025 if (zfs_arc_min > 64 << 20 && zfs_arc_min <= arc_c_max) 6026 arc_c_min = zfs_arc_min; 6027 6028 arc_c = arc_c_max; 6029 arc_p = (arc_c >> 1); 6030 arc_size = 0; 6031 6032 /* limit meta-data to 1/4 of the arc capacity */ 6033 arc_meta_limit = arc_c_max / 4; 6034 6035 #ifdef _KERNEL 6036 /* 6037 * Metadata is stored in the kernel's heap. Don't let us 6038 * use more than half the heap for the ARC. 6039 */ 6040 arc_meta_limit = MIN(arc_meta_limit, 6041 vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 2); 6042 #endif 6043 6044 /* Allow the tunable to override if it is reasonable */ 6045 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 6046 arc_meta_limit = zfs_arc_meta_limit; 6047 6048 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 6049 arc_c_min = arc_meta_limit / 2; 6050 6051 if (zfs_arc_meta_min > 0) { 6052 arc_meta_min = zfs_arc_meta_min; 6053 } else { 6054 arc_meta_min = arc_c_min / 2; 6055 } 6056 6057 if (zfs_arc_grow_retry > 0) 6058 arc_grow_retry = zfs_arc_grow_retry; 6059 6060 if (zfs_arc_shrink_shift > 0) 6061 arc_shrink_shift = zfs_arc_shrink_shift; 6062 6063 /* 6064 * Ensure that arc_no_grow_shift is less than arc_shrink_shift. 6065 */ 6066 if (arc_no_grow_shift >= arc_shrink_shift) 6067 arc_no_grow_shift = arc_shrink_shift - 1; 6068 6069 if (zfs_arc_p_min_shift > 0) 6070 arc_p_min_shift = zfs_arc_p_min_shift; 6071 6072 /* if kmem_flags are set, lets try to use less memory */ 6073 if (kmem_debugging()) 6074 arc_c = arc_c / 2; 6075 if (arc_c < arc_c_min) 6076 arc_c = arc_c_min; 6077 6078 arc_state_init(); 6079 buf_init(); 6080 6081 arc_reclaim_thread_exit = B_FALSE; 6082 6083 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 6084 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 6085 6086 if (arc_ksp != NULL) { 6087 arc_ksp->ks_data = &arc_stats; 6088 arc_ksp->ks_update = arc_kstat_update; 6089 kstat_install(arc_ksp); 6090 } 6091 6092 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 6093 TS_RUN, minclsyspri); 6094 6095 arc_dead = B_FALSE; 6096 arc_warm = B_FALSE; 6097 6098 /* 6099 * Calculate maximum amount of dirty data per pool. 6100 * 6101 * If it has been set by /etc/system, take that. 6102 * Otherwise, use a percentage of physical memory defined by 6103 * zfs_dirty_data_max_percent (default 10%) with a cap at 6104 * zfs_dirty_data_max_max (default 4GB). 6105 */ 6106 if (zfs_dirty_data_max == 0) { 6107 zfs_dirty_data_max = physmem * PAGESIZE * 6108 zfs_dirty_data_max_percent / 100; 6109 zfs_dirty_data_max = MIN(zfs_dirty_data_max, 6110 zfs_dirty_data_max_max); 6111 } 6112 } 6113 6114 void 6115 arc_fini(void) 6116 { 6117 mutex_enter(&arc_reclaim_lock); 6118 arc_reclaim_thread_exit = B_TRUE; 6119 /* 6120 * The reclaim thread will set arc_reclaim_thread_exit back to 6121 * B_FALSE when it is finished exiting; we're waiting for that. 6122 */ 6123 while (arc_reclaim_thread_exit) { 6124 cv_signal(&arc_reclaim_thread_cv); 6125 cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock); 6126 } 6127 mutex_exit(&arc_reclaim_lock); 6128 6129 /* Use B_TRUE to ensure *all* buffers are evicted */ 6130 arc_flush(NULL, B_TRUE); 6131 6132 arc_dead = B_TRUE; 6133 6134 if (arc_ksp != NULL) { 6135 kstat_delete(arc_ksp); 6136 arc_ksp = NULL; 6137 } 6138 6139 mutex_destroy(&arc_reclaim_lock); 6140 cv_destroy(&arc_reclaim_thread_cv); 6141 cv_destroy(&arc_reclaim_waiters_cv); 6142 6143 arc_state_fini(); 6144 buf_fini(); 6145 6146 ASSERT0(arc_loaned_bytes); 6147 } 6148 6149 /* 6150 * Level 2 ARC 6151 * 6152 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 6153 * It uses dedicated storage devices to hold cached data, which are populated 6154 * using large infrequent writes. The main role of this cache is to boost 6155 * the performance of random read workloads. The intended L2ARC devices 6156 * include short-stroked disks, solid state disks, and other media with 6157 * substantially faster read latency than disk. 6158 * 6159 * +-----------------------+ 6160 * | ARC | 6161 * +-----------------------+ 6162 * | ^ ^ 6163 * | | | 6164 * l2arc_feed_thread() arc_read() 6165 * | | | 6166 * | l2arc read | 6167 * V | | 6168 * +---------------+ | 6169 * | L2ARC | | 6170 * +---------------+ | 6171 * | ^ | 6172 * l2arc_write() | | 6173 * | | | 6174 * V | | 6175 * +-------+ +-------+ 6176 * | vdev | | vdev | 6177 * | cache | | cache | 6178 * +-------+ +-------+ 6179 * +=========+ .-----. 6180 * : L2ARC : |-_____-| 6181 * : devices : | Disks | 6182 * +=========+ `-_____-' 6183 * 6184 * Read requests are satisfied from the following sources, in order: 6185 * 6186 * 1) ARC 6187 * 2) vdev cache of L2ARC devices 6188 * 3) L2ARC devices 6189 * 4) vdev cache of disks 6190 * 5) disks 6191 * 6192 * Some L2ARC device types exhibit extremely slow write performance. 6193 * To accommodate for this there are some significant differences between 6194 * the L2ARC and traditional cache design: 6195 * 6196 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 6197 * the ARC behave as usual, freeing buffers and placing headers on ghost 6198 * lists. The ARC does not send buffers to the L2ARC during eviction as 6199 * this would add inflated write latencies for all ARC memory pressure. 6200 * 6201 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 6202 * It does this by periodically scanning buffers from the eviction-end of 6203 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 6204 * not already there. It scans until a headroom of buffers is satisfied, 6205 * which itself is a buffer for ARC eviction. If a compressible buffer is 6206 * found during scanning and selected for writing to an L2ARC device, we 6207 * temporarily boost scanning headroom during the next scan cycle to make 6208 * sure we adapt to compression effects (which might significantly reduce 6209 * the data volume we write to L2ARC). The thread that does this is 6210 * l2arc_feed_thread(), illustrated below; example sizes are included to 6211 * provide a better sense of ratio than this diagram: 6212 * 6213 * head --> tail 6214 * +---------------------+----------+ 6215 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 6216 * +---------------------+----------+ | o L2ARC eligible 6217 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 6218 * +---------------------+----------+ | 6219 * 15.9 Gbytes ^ 32 Mbytes | 6220 * headroom | 6221 * l2arc_feed_thread() 6222 * | 6223 * l2arc write hand <--[oooo]--' 6224 * | 8 Mbyte 6225 * | write max 6226 * V 6227 * +==============================+ 6228 * L2ARC dev |####|#|###|###| |####| ... | 6229 * +==============================+ 6230 * 32 Gbytes 6231 * 6232 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 6233 * evicted, then the L2ARC has cached a buffer much sooner than it probably 6234 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 6235 * safe to say that this is an uncommon case, since buffers at the end of 6236 * the ARC lists have moved there due to inactivity. 6237 * 6238 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 6239 * then the L2ARC simply misses copying some buffers. This serves as a 6240 * pressure valve to prevent heavy read workloads from both stalling the ARC 6241 * with waits and clogging the L2ARC with writes. This also helps prevent 6242 * the potential for the L2ARC to churn if it attempts to cache content too 6243 * quickly, such as during backups of the entire pool. 6244 * 6245 * 5. After system boot and before the ARC has filled main memory, there are 6246 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 6247 * lists can remain mostly static. Instead of searching from tail of these 6248 * lists as pictured, the l2arc_feed_thread() will search from the list heads 6249 * for eligible buffers, greatly increasing its chance of finding them. 6250 * 6251 * The L2ARC device write speed is also boosted during this time so that 6252 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 6253 * there are no L2ARC reads, and no fear of degrading read performance 6254 * through increased writes. 6255 * 6256 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 6257 * the vdev queue can aggregate them into larger and fewer writes. Each 6258 * device is written to in a rotor fashion, sweeping writes through 6259 * available space then repeating. 6260 * 6261 * 7. The L2ARC does not store dirty content. It never needs to flush 6262 * write buffers back to disk based storage. 6263 * 6264 * 8. If an ARC buffer is written (and dirtied) which also exists in the 6265 * L2ARC, the now stale L2ARC buffer is immediately dropped. 6266 * 6267 * The performance of the L2ARC can be tweaked by a number of tunables, which 6268 * may be necessary for different workloads: 6269 * 6270 * l2arc_write_max max write bytes per interval 6271 * l2arc_write_boost extra write bytes during device warmup 6272 * l2arc_noprefetch skip caching prefetched buffers 6273 * l2arc_headroom number of max device writes to precache 6274 * l2arc_headroom_boost when we find compressed buffers during ARC 6275 * scanning, we multiply headroom by this 6276 * percentage factor for the next scan cycle, 6277 * since more compressed buffers are likely to 6278 * be present 6279 * l2arc_feed_secs seconds between L2ARC writing 6280 * 6281 * Tunables may be removed or added as future performance improvements are 6282 * integrated, and also may become zpool properties. 6283 * 6284 * There are three key functions that control how the L2ARC warms up: 6285 * 6286 * l2arc_write_eligible() check if a buffer is eligible to cache 6287 * l2arc_write_size() calculate how much to write 6288 * l2arc_write_interval() calculate sleep delay between writes 6289 * 6290 * These three functions determine what to write, how much, and how quickly 6291 * to send writes. 6292 */ 6293 6294 static boolean_t 6295 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) 6296 { 6297 /* 6298 * A buffer is *not* eligible for the L2ARC if it: 6299 * 1. belongs to a different spa. 6300 * 2. is already cached on the L2ARC. 6301 * 3. has an I/O in progress (it may be an incomplete read). 6302 * 4. is flagged not eligible (zfs property). 6303 */ 6304 if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) || 6305 HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr)) 6306 return (B_FALSE); 6307 6308 return (B_TRUE); 6309 } 6310 6311 static uint64_t 6312 l2arc_write_size(void) 6313 { 6314 uint64_t size; 6315 6316 /* 6317 * Make sure our globals have meaningful values in case the user 6318 * altered them. 6319 */ 6320 size = l2arc_write_max; 6321 if (size == 0) { 6322 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " 6323 "be greater than zero, resetting it to the default (%d)", 6324 L2ARC_WRITE_SIZE); 6325 size = l2arc_write_max = L2ARC_WRITE_SIZE; 6326 } 6327 6328 if (arc_warm == B_FALSE) 6329 size += l2arc_write_boost; 6330 6331 return (size); 6332 6333 } 6334 6335 static clock_t 6336 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 6337 { 6338 clock_t interval, next, now; 6339 6340 /* 6341 * If the ARC lists are busy, increase our write rate; if the 6342 * lists are stale, idle back. This is achieved by checking 6343 * how much we previously wrote - if it was more than half of 6344 * what we wanted, schedule the next write much sooner. 6345 */ 6346 if (l2arc_feed_again && wrote > (wanted / 2)) 6347 interval = (hz * l2arc_feed_min_ms) / 1000; 6348 else 6349 interval = hz * l2arc_feed_secs; 6350 6351 now = ddi_get_lbolt(); 6352 next = MAX(now, MIN(now + interval, began + interval)); 6353 6354 return (next); 6355 } 6356 6357 /* 6358 * Cycle through L2ARC devices. This is how L2ARC load balances. 6359 * If a device is returned, this also returns holding the spa config lock. 6360 */ 6361 static l2arc_dev_t * 6362 l2arc_dev_get_next(void) 6363 { 6364 l2arc_dev_t *first, *next = NULL; 6365 6366 /* 6367 * Lock out the removal of spas (spa_namespace_lock), then removal 6368 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 6369 * both locks will be dropped and a spa config lock held instead. 6370 */ 6371 mutex_enter(&spa_namespace_lock); 6372 mutex_enter(&l2arc_dev_mtx); 6373 6374 /* if there are no vdevs, there is nothing to do */ 6375 if (l2arc_ndev == 0) 6376 goto out; 6377 6378 first = NULL; 6379 next = l2arc_dev_last; 6380 do { 6381 /* loop around the list looking for a non-faulted vdev */ 6382 if (next == NULL) { 6383 next = list_head(l2arc_dev_list); 6384 } else { 6385 next = list_next(l2arc_dev_list, next); 6386 if (next == NULL) 6387 next = list_head(l2arc_dev_list); 6388 } 6389 6390 /* if we have come back to the start, bail out */ 6391 if (first == NULL) 6392 first = next; 6393 else if (next == first) 6394 break; 6395 6396 } while (vdev_is_dead(next->l2ad_vdev)); 6397 6398 /* if we were unable to find any usable vdevs, return NULL */ 6399 if (vdev_is_dead(next->l2ad_vdev)) 6400 next = NULL; 6401 6402 l2arc_dev_last = next; 6403 6404 out: 6405 mutex_exit(&l2arc_dev_mtx); 6406 6407 /* 6408 * Grab the config lock to prevent the 'next' device from being 6409 * removed while we are writing to it. 6410 */ 6411 if (next != NULL) 6412 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 6413 mutex_exit(&spa_namespace_lock); 6414 6415 return (next); 6416 } 6417 6418 /* 6419 * Free buffers that were tagged for destruction. 6420 */ 6421 static void 6422 l2arc_do_free_on_write() 6423 { 6424 list_t *buflist; 6425 l2arc_data_free_t *df, *df_prev; 6426 6427 mutex_enter(&l2arc_free_on_write_mtx); 6428 buflist = l2arc_free_on_write; 6429 6430 for (df = list_tail(buflist); df; df = df_prev) { 6431 df_prev = list_prev(buflist, df); 6432 ASSERT3P(df->l2df_abd, !=, NULL); 6433 abd_free(df->l2df_abd); 6434 list_remove(buflist, df); 6435 kmem_free(df, sizeof (l2arc_data_free_t)); 6436 } 6437 6438 mutex_exit(&l2arc_free_on_write_mtx); 6439 } 6440 6441 /* 6442 * A write to a cache device has completed. Update all headers to allow 6443 * reads from these buffers to begin. 6444 */ 6445 static void 6446 l2arc_write_done(zio_t *zio) 6447 { 6448 l2arc_write_callback_t *cb; 6449 l2arc_dev_t *dev; 6450 list_t *buflist; 6451 arc_buf_hdr_t *head, *hdr, *hdr_prev; 6452 kmutex_t *hash_lock; 6453 int64_t bytes_dropped = 0; 6454 6455 cb = zio->io_private; 6456 ASSERT3P(cb, !=, NULL); 6457 dev = cb->l2wcb_dev; 6458 ASSERT3P(dev, !=, NULL); 6459 head = cb->l2wcb_head; 6460 ASSERT3P(head, !=, NULL); 6461 buflist = &dev->l2ad_buflist; 6462 ASSERT3P(buflist, !=, NULL); 6463 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 6464 l2arc_write_callback_t *, cb); 6465 6466 if (zio->io_error != 0) 6467 ARCSTAT_BUMP(arcstat_l2_writes_error); 6468 6469 /* 6470 * All writes completed, or an error was hit. 6471 */ 6472 top: 6473 mutex_enter(&dev->l2ad_mtx); 6474 for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { 6475 hdr_prev = list_prev(buflist, hdr); 6476 6477 hash_lock = HDR_LOCK(hdr); 6478 6479 /* 6480 * We cannot use mutex_enter or else we can deadlock 6481 * with l2arc_write_buffers (due to swapping the order 6482 * the hash lock and l2ad_mtx are taken). 6483 */ 6484 if (!mutex_tryenter(hash_lock)) { 6485 /* 6486 * Missed the hash lock. We must retry so we 6487 * don't leave the ARC_FLAG_L2_WRITING bit set. 6488 */ 6489 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry); 6490 6491 /* 6492 * We don't want to rescan the headers we've 6493 * already marked as having been written out, so 6494 * we reinsert the head node so we can pick up 6495 * where we left off. 6496 */ 6497 list_remove(buflist, head); 6498 list_insert_after(buflist, hdr, head); 6499 6500 mutex_exit(&dev->l2ad_mtx); 6501 6502 /* 6503 * We wait for the hash lock to become available 6504 * to try and prevent busy waiting, and increase 6505 * the chance we'll be able to acquire the lock 6506 * the next time around. 6507 */ 6508 mutex_enter(hash_lock); 6509 mutex_exit(hash_lock); 6510 goto top; 6511 } 6512 6513 /* 6514 * We could not have been moved into the arc_l2c_only 6515 * state while in-flight due to our ARC_FLAG_L2_WRITING 6516 * bit being set. Let's just ensure that's being enforced. 6517 */ 6518 ASSERT(HDR_HAS_L1HDR(hdr)); 6519 6520 if (zio->io_error != 0) { 6521 /* 6522 * Error - drop L2ARC entry. 6523 */ 6524 list_remove(buflist, hdr); 6525 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); 6526 6527 ARCSTAT_INCR(arcstat_l2_psize, -arc_hdr_size(hdr)); 6528 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr)); 6529 6530 bytes_dropped += arc_hdr_size(hdr); 6531 (void) refcount_remove_many(&dev->l2ad_alloc, 6532 arc_hdr_size(hdr), hdr); 6533 } 6534 6535 /* 6536 * Allow ARC to begin reads and ghost list evictions to 6537 * this L2ARC entry. 6538 */ 6539 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING); 6540 6541 mutex_exit(hash_lock); 6542 } 6543 6544 atomic_inc_64(&l2arc_writes_done); 6545 list_remove(buflist, head); 6546 ASSERT(!HDR_HAS_L1HDR(head)); 6547 kmem_cache_free(hdr_l2only_cache, head); 6548 mutex_exit(&dev->l2ad_mtx); 6549 6550 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); 6551 6552 l2arc_do_free_on_write(); 6553 6554 kmem_free(cb, sizeof (l2arc_write_callback_t)); 6555 } 6556 6557 /* 6558 * A read to a cache device completed. Validate buffer contents before 6559 * handing over to the regular ARC routines. 6560 */ 6561 static void 6562 l2arc_read_done(zio_t *zio) 6563 { 6564 l2arc_read_callback_t *cb; 6565 arc_buf_hdr_t *hdr; 6566 kmutex_t *hash_lock; 6567 boolean_t valid_cksum; 6568 6569 ASSERT3P(zio->io_vd, !=, NULL); 6570 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 6571 6572 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 6573 6574 cb = zio->io_private; 6575 ASSERT3P(cb, !=, NULL); 6576 hdr = cb->l2rcb_hdr; 6577 ASSERT3P(hdr, !=, NULL); 6578 6579 hash_lock = HDR_LOCK(hdr); 6580 mutex_enter(hash_lock); 6581 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 6582 6583 /* 6584 * If the data was read into a temporary buffer, 6585 * move it and free the buffer. 6586 */ 6587 if (cb->l2rcb_abd != NULL) { 6588 ASSERT3U(arc_hdr_size(hdr), <, zio->io_size); 6589 if (zio->io_error == 0) { 6590 abd_copy(hdr->b_l1hdr.b_pabd, cb->l2rcb_abd, 6591 arc_hdr_size(hdr)); 6592 } 6593 6594 /* 6595 * The following must be done regardless of whether 6596 * there was an error: 6597 * - free the temporary buffer 6598 * - point zio to the real ARC buffer 6599 * - set zio size accordingly 6600 * These are required because zio is either re-used for 6601 * an I/O of the block in the case of the error 6602 * or the zio is passed to arc_read_done() and it 6603 * needs real data. 6604 */ 6605 abd_free(cb->l2rcb_abd); 6606 zio->io_size = zio->io_orig_size = arc_hdr_size(hdr); 6607 zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd; 6608 } 6609 6610 ASSERT3P(zio->io_abd, !=, NULL); 6611 6612 /* 6613 * Check this survived the L2ARC journey. 6614 */ 6615 ASSERT3P(zio->io_abd, ==, hdr->b_l1hdr.b_pabd); 6616 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 6617 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 6618 6619 valid_cksum = arc_cksum_is_equal(hdr, zio); 6620 if (valid_cksum && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 6621 mutex_exit(hash_lock); 6622 zio->io_private = hdr; 6623 arc_read_done(zio); 6624 } else { 6625 mutex_exit(hash_lock); 6626 /* 6627 * Buffer didn't survive caching. Increment stats and 6628 * reissue to the original storage device. 6629 */ 6630 if (zio->io_error != 0) { 6631 ARCSTAT_BUMP(arcstat_l2_io_error); 6632 } else { 6633 zio->io_error = SET_ERROR(EIO); 6634 } 6635 if (!valid_cksum) 6636 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 6637 6638 /* 6639 * If there's no waiter, issue an async i/o to the primary 6640 * storage now. If there *is* a waiter, the caller must 6641 * issue the i/o in a context where it's OK to block. 6642 */ 6643 if (zio->io_waiter == NULL) { 6644 zio_t *pio = zio_unique_parent(zio); 6645 6646 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 6647 6648 zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp, 6649 hdr->b_l1hdr.b_pabd, zio->io_size, arc_read_done, 6650 hdr, zio->io_priority, cb->l2rcb_flags, 6651 &cb->l2rcb_zb)); 6652 } 6653 } 6654 6655 kmem_free(cb, sizeof (l2arc_read_callback_t)); 6656 } 6657 6658 /* 6659 * This is the list priority from which the L2ARC will search for pages to 6660 * cache. This is used within loops (0..3) to cycle through lists in the 6661 * desired order. This order can have a significant effect on cache 6662 * performance. 6663 * 6664 * Currently the metadata lists are hit first, MFU then MRU, followed by 6665 * the data lists. This function returns a locked list, and also returns 6666 * the lock pointer. 6667 */ 6668 static multilist_sublist_t * 6669 l2arc_sublist_lock(int list_num) 6670 { 6671 multilist_t *ml = NULL; 6672 unsigned int idx; 6673 6674 ASSERT(list_num >= 0 && list_num <= 3); 6675 6676 switch (list_num) { 6677 case 0: 6678 ml = arc_mfu->arcs_list[ARC_BUFC_METADATA]; 6679 break; 6680 case 1: 6681 ml = arc_mru->arcs_list[ARC_BUFC_METADATA]; 6682 break; 6683 case 2: 6684 ml = arc_mfu->arcs_list[ARC_BUFC_DATA]; 6685 break; 6686 case 3: 6687 ml = arc_mru->arcs_list[ARC_BUFC_DATA]; 6688 break; 6689 } 6690 6691 /* 6692 * Return a randomly-selected sublist. This is acceptable 6693 * because the caller feeds only a little bit of data for each 6694 * call (8MB). Subsequent calls will result in different 6695 * sublists being selected. 6696 */ 6697 idx = multilist_get_random_index(ml); 6698 return (multilist_sublist_lock(ml, idx)); 6699 } 6700 6701 /* 6702 * Evict buffers from the device write hand to the distance specified in 6703 * bytes. This distance may span populated buffers, it may span nothing. 6704 * This is clearing a region on the L2ARC device ready for writing. 6705 * If the 'all' boolean is set, every buffer is evicted. 6706 */ 6707 static void 6708 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 6709 { 6710 list_t *buflist; 6711 arc_buf_hdr_t *hdr, *hdr_prev; 6712 kmutex_t *hash_lock; 6713 uint64_t taddr; 6714 6715 buflist = &dev->l2ad_buflist; 6716 6717 if (!all && dev->l2ad_first) { 6718 /* 6719 * This is the first sweep through the device. There is 6720 * nothing to evict. 6721 */ 6722 return; 6723 } 6724 6725 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 6726 /* 6727 * When nearing the end of the device, evict to the end 6728 * before the device write hand jumps to the start. 6729 */ 6730 taddr = dev->l2ad_end; 6731 } else { 6732 taddr = dev->l2ad_hand + distance; 6733 } 6734 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 6735 uint64_t, taddr, boolean_t, all); 6736 6737 top: 6738 mutex_enter(&dev->l2ad_mtx); 6739 for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { 6740 hdr_prev = list_prev(buflist, hdr); 6741 6742 hash_lock = HDR_LOCK(hdr); 6743 6744 /* 6745 * We cannot use mutex_enter or else we can deadlock 6746 * with l2arc_write_buffers (due to swapping the order 6747 * the hash lock and l2ad_mtx are taken). 6748 */ 6749 if (!mutex_tryenter(hash_lock)) { 6750 /* 6751 * Missed the hash lock. Retry. 6752 */ 6753 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 6754 mutex_exit(&dev->l2ad_mtx); 6755 mutex_enter(hash_lock); 6756 mutex_exit(hash_lock); 6757 goto top; 6758 } 6759 6760 /* 6761 * A header can't be on this list if it doesn't have L2 header. 6762 */ 6763 ASSERT(HDR_HAS_L2HDR(hdr)); 6764 6765 /* Ensure this header has finished being written. */ 6766 ASSERT(!HDR_L2_WRITING(hdr)); 6767 ASSERT(!HDR_L2_WRITE_HEAD(hdr)); 6768 6769 if (!all && (hdr->b_l2hdr.b_daddr >= taddr || 6770 hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { 6771 /* 6772 * We've evicted to the target address, 6773 * or the end of the device. 6774 */ 6775 mutex_exit(hash_lock); 6776 break; 6777 } 6778 6779 if (!HDR_HAS_L1HDR(hdr)) { 6780 ASSERT(!HDR_L2_READING(hdr)); 6781 /* 6782 * This doesn't exist in the ARC. Destroy. 6783 * arc_hdr_destroy() will call list_remove() 6784 * and decrement arcstat_l2_lsize. 6785 */ 6786 arc_change_state(arc_anon, hdr, hash_lock); 6787 arc_hdr_destroy(hdr); 6788 } else { 6789 ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); 6790 ARCSTAT_BUMP(arcstat_l2_evict_l1cached); 6791 /* 6792 * Invalidate issued or about to be issued 6793 * reads, since we may be about to write 6794 * over this location. 6795 */ 6796 if (HDR_L2_READING(hdr)) { 6797 ARCSTAT_BUMP(arcstat_l2_evict_reading); 6798 arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED); 6799 } 6800 6801 arc_hdr_l2hdr_destroy(hdr); 6802 } 6803 mutex_exit(hash_lock); 6804 } 6805 mutex_exit(&dev->l2ad_mtx); 6806 } 6807 6808 /* 6809 * Find and write ARC buffers to the L2ARC device. 6810 * 6811 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid 6812 * for reading until they have completed writing. 6813 * The headroom_boost is an in-out parameter used to maintain headroom boost 6814 * state between calls to this function. 6815 * 6816 * Returns the number of bytes actually written (which may be smaller than 6817 * the delta by which the device hand has changed due to alignment). 6818 */ 6819 static uint64_t 6820 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 6821 { 6822 arc_buf_hdr_t *hdr, *hdr_prev, *head; 6823 uint64_t write_asize, write_psize, write_lsize, headroom; 6824 boolean_t full; 6825 l2arc_write_callback_t *cb; 6826 zio_t *pio, *wzio; 6827 uint64_t guid = spa_load_guid(spa); 6828 6829 ASSERT3P(dev->l2ad_vdev, !=, NULL); 6830 6831 pio = NULL; 6832 write_lsize = write_asize = write_psize = 0; 6833 full = B_FALSE; 6834 head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE); 6835 arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR); 6836 6837 /* 6838 * Copy buffers for L2ARC writing. 6839 */ 6840 for (int try = 0; try <= 3; try++) { 6841 multilist_sublist_t *mls = l2arc_sublist_lock(try); 6842 uint64_t passed_sz = 0; 6843 6844 /* 6845 * L2ARC fast warmup. 6846 * 6847 * Until the ARC is warm and starts to evict, read from the 6848 * head of the ARC lists rather than the tail. 6849 */ 6850 if (arc_warm == B_FALSE) 6851 hdr = multilist_sublist_head(mls); 6852 else 6853 hdr = multilist_sublist_tail(mls); 6854 6855 headroom = target_sz * l2arc_headroom; 6856 if (zfs_compressed_arc_enabled) 6857 headroom = (headroom * l2arc_headroom_boost) / 100; 6858 6859 for (; hdr; hdr = hdr_prev) { 6860 kmutex_t *hash_lock; 6861 6862 if (arc_warm == B_FALSE) 6863 hdr_prev = multilist_sublist_next(mls, hdr); 6864 else 6865 hdr_prev = multilist_sublist_prev(mls, hdr); 6866 6867 hash_lock = HDR_LOCK(hdr); 6868 if (!mutex_tryenter(hash_lock)) { 6869 /* 6870 * Skip this buffer rather than waiting. 6871 */ 6872 continue; 6873 } 6874 6875 passed_sz += HDR_GET_LSIZE(hdr); 6876 if (passed_sz > headroom) { 6877 /* 6878 * Searched too far. 6879 */ 6880 mutex_exit(hash_lock); 6881 break; 6882 } 6883 6884 if (!l2arc_write_eligible(guid, hdr)) { 6885 mutex_exit(hash_lock); 6886 continue; 6887 } 6888 6889 /* 6890 * We rely on the L1 portion of the header below, so 6891 * it's invalid for this header to have been evicted out 6892 * of the ghost cache, prior to being written out. The 6893 * ARC_FLAG_L2_WRITING bit ensures this won't happen. 6894 */ 6895 ASSERT(HDR_HAS_L1HDR(hdr)); 6896 6897 ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); 6898 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 6899 ASSERT3U(arc_hdr_size(hdr), >, 0); 6900 uint64_t psize = arc_hdr_size(hdr); 6901 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, 6902 psize); 6903 6904 if ((write_asize + asize) > target_sz) { 6905 full = B_TRUE; 6906 mutex_exit(hash_lock); 6907 break; 6908 } 6909 6910 if (pio == NULL) { 6911 /* 6912 * Insert a dummy header on the buflist so 6913 * l2arc_write_done() can find where the 6914 * write buffers begin without searching. 6915 */ 6916 mutex_enter(&dev->l2ad_mtx); 6917 list_insert_head(&dev->l2ad_buflist, head); 6918 mutex_exit(&dev->l2ad_mtx); 6919 6920 cb = kmem_alloc( 6921 sizeof (l2arc_write_callback_t), KM_SLEEP); 6922 cb->l2wcb_dev = dev; 6923 cb->l2wcb_head = head; 6924 pio = zio_root(spa, l2arc_write_done, cb, 6925 ZIO_FLAG_CANFAIL); 6926 } 6927 6928 hdr->b_l2hdr.b_dev = dev; 6929 hdr->b_l2hdr.b_daddr = dev->l2ad_hand; 6930 arc_hdr_set_flags(hdr, 6931 ARC_FLAG_L2_WRITING | ARC_FLAG_HAS_L2HDR); 6932 6933 mutex_enter(&dev->l2ad_mtx); 6934 list_insert_head(&dev->l2ad_buflist, hdr); 6935 mutex_exit(&dev->l2ad_mtx); 6936 6937 (void) refcount_add_many(&dev->l2ad_alloc, psize, hdr); 6938 6939 /* 6940 * Normally the L2ARC can use the hdr's data, but if 6941 * we're sharing data between the hdr and one of its 6942 * bufs, L2ARC needs its own copy of the data so that 6943 * the ZIO below can't race with the buf consumer. 6944 * Another case where we need to create a copy of the 6945 * data is when the buffer size is not device-aligned 6946 * and we need to pad the block to make it such. 6947 * That also keeps the clock hand suitably aligned. 6948 * 6949 * To ensure that the copy will be available for the 6950 * lifetime of the ZIO and be cleaned up afterwards, we 6951 * add it to the l2arc_free_on_write queue. 6952 */ 6953 abd_t *to_write; 6954 if (!HDR_SHARED_DATA(hdr) && psize == asize) { 6955 to_write = hdr->b_l1hdr.b_pabd; 6956 } else { 6957 to_write = abd_alloc_for_io(asize, 6958 HDR_ISTYPE_METADATA(hdr)); 6959 abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize); 6960 if (asize != psize) { 6961 abd_zero_off(to_write, psize, 6962 asize - psize); 6963 } 6964 l2arc_free_abd_on_write(to_write, asize, 6965 arc_buf_type(hdr)); 6966 } 6967 wzio = zio_write_phys(pio, dev->l2ad_vdev, 6968 hdr->b_l2hdr.b_daddr, asize, to_write, 6969 ZIO_CHECKSUM_OFF, NULL, hdr, 6970 ZIO_PRIORITY_ASYNC_WRITE, 6971 ZIO_FLAG_CANFAIL, B_FALSE); 6972 6973 write_lsize += HDR_GET_LSIZE(hdr); 6974 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 6975 zio_t *, wzio); 6976 6977 write_psize += psize; 6978 write_asize += asize; 6979 dev->l2ad_hand += asize; 6980 6981 mutex_exit(hash_lock); 6982 6983 (void) zio_nowait(wzio); 6984 } 6985 6986 multilist_sublist_unlock(mls); 6987 6988 if (full == B_TRUE) 6989 break; 6990 } 6991 6992 /* No buffers selected for writing? */ 6993 if (pio == NULL) { 6994 ASSERT0(write_lsize); 6995 ASSERT(!HDR_HAS_L1HDR(head)); 6996 kmem_cache_free(hdr_l2only_cache, head); 6997 return (0); 6998 } 6999 7000 ASSERT3U(write_asize, <=, target_sz); 7001 ARCSTAT_BUMP(arcstat_l2_writes_sent); 7002 ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize); 7003 ARCSTAT_INCR(arcstat_l2_lsize, write_lsize); 7004 ARCSTAT_INCR(arcstat_l2_psize, write_psize); 7005 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0); 7006 7007 /* 7008 * Bump device hand to the device start if it is approaching the end. 7009 * l2arc_evict() will already have evicted ahead for this case. 7010 */ 7011 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 7012 dev->l2ad_hand = dev->l2ad_start; 7013 dev->l2ad_first = B_FALSE; 7014 } 7015 7016 dev->l2ad_writing = B_TRUE; 7017 (void) zio_wait(pio); 7018 dev->l2ad_writing = B_FALSE; 7019 7020 return (write_asize); 7021 } 7022 7023 /* 7024 * This thread feeds the L2ARC at regular intervals. This is the beating 7025 * heart of the L2ARC. 7026 */ 7027 /* ARGSUSED */ 7028 static void 7029 l2arc_feed_thread(void *unused) 7030 { 7031 callb_cpr_t cpr; 7032 l2arc_dev_t *dev; 7033 spa_t *spa; 7034 uint64_t size, wrote; 7035 clock_t begin, next = ddi_get_lbolt(); 7036 7037 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 7038 7039 mutex_enter(&l2arc_feed_thr_lock); 7040 7041 while (l2arc_thread_exit == 0) { 7042 CALLB_CPR_SAFE_BEGIN(&cpr); 7043 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 7044 next); 7045 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 7046 next = ddi_get_lbolt() + hz; 7047 7048 /* 7049 * Quick check for L2ARC devices. 7050 */ 7051 mutex_enter(&l2arc_dev_mtx); 7052 if (l2arc_ndev == 0) { 7053 mutex_exit(&l2arc_dev_mtx); 7054 continue; 7055 } 7056 mutex_exit(&l2arc_dev_mtx); 7057 begin = ddi_get_lbolt(); 7058 7059 /* 7060 * This selects the next l2arc device to write to, and in 7061 * doing so the next spa to feed from: dev->l2ad_spa. This 7062 * will return NULL if there are now no l2arc devices or if 7063 * they are all faulted. 7064 * 7065 * If a device is returned, its spa's config lock is also 7066 * held to prevent device removal. l2arc_dev_get_next() 7067 * will grab and release l2arc_dev_mtx. 7068 */ 7069 if ((dev = l2arc_dev_get_next()) == NULL) 7070 continue; 7071 7072 spa = dev->l2ad_spa; 7073 ASSERT3P(spa, !=, NULL); 7074 7075 /* 7076 * If the pool is read-only then force the feed thread to 7077 * sleep a little longer. 7078 */ 7079 if (!spa_writeable(spa)) { 7080 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 7081 spa_config_exit(spa, SCL_L2ARC, dev); 7082 continue; 7083 } 7084 7085 /* 7086 * Avoid contributing to memory pressure. 7087 */ 7088 if (arc_reclaim_needed()) { 7089 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 7090 spa_config_exit(spa, SCL_L2ARC, dev); 7091 continue; 7092 } 7093 7094 ARCSTAT_BUMP(arcstat_l2_feeds); 7095 7096 size = l2arc_write_size(); 7097 7098 /* 7099 * Evict L2ARC buffers that will be overwritten. 7100 */ 7101 l2arc_evict(dev, size, B_FALSE); 7102 7103 /* 7104 * Write ARC buffers. 7105 */ 7106 wrote = l2arc_write_buffers(spa, dev, size); 7107 7108 /* 7109 * Calculate interval between writes. 7110 */ 7111 next = l2arc_write_interval(begin, size, wrote); 7112 spa_config_exit(spa, SCL_L2ARC, dev); 7113 } 7114 7115 l2arc_thread_exit = 0; 7116 cv_broadcast(&l2arc_feed_thr_cv); 7117 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 7118 thread_exit(); 7119 } 7120 7121 boolean_t 7122 l2arc_vdev_present(vdev_t *vd) 7123 { 7124 l2arc_dev_t *dev; 7125 7126 mutex_enter(&l2arc_dev_mtx); 7127 for (dev = list_head(l2arc_dev_list); dev != NULL; 7128 dev = list_next(l2arc_dev_list, dev)) { 7129 if (dev->l2ad_vdev == vd) 7130 break; 7131 } 7132 mutex_exit(&l2arc_dev_mtx); 7133 7134 return (dev != NULL); 7135 } 7136 7137 /* 7138 * Add a vdev for use by the L2ARC. By this point the spa has already 7139 * validated the vdev and opened it. 7140 */ 7141 void 7142 l2arc_add_vdev(spa_t *spa, vdev_t *vd) 7143 { 7144 l2arc_dev_t *adddev; 7145 7146 ASSERT(!l2arc_vdev_present(vd)); 7147 7148 /* 7149 * Create a new l2arc device entry. 7150 */ 7151 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 7152 adddev->l2ad_spa = spa; 7153 adddev->l2ad_vdev = vd; 7154 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 7155 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 7156 adddev->l2ad_hand = adddev->l2ad_start; 7157 adddev->l2ad_first = B_TRUE; 7158 adddev->l2ad_writing = B_FALSE; 7159 7160 mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); 7161 /* 7162 * This is a list of all ARC buffers that are still valid on the 7163 * device. 7164 */ 7165 list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 7166 offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node)); 7167 7168 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 7169 refcount_create(&adddev->l2ad_alloc); 7170 7171 /* 7172 * Add device to global list 7173 */ 7174 mutex_enter(&l2arc_dev_mtx); 7175 list_insert_head(l2arc_dev_list, adddev); 7176 atomic_inc_64(&l2arc_ndev); 7177 mutex_exit(&l2arc_dev_mtx); 7178 } 7179 7180 /* 7181 * Remove a vdev from the L2ARC. 7182 */ 7183 void 7184 l2arc_remove_vdev(vdev_t *vd) 7185 { 7186 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 7187 7188 /* 7189 * Find the device by vdev 7190 */ 7191 mutex_enter(&l2arc_dev_mtx); 7192 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 7193 nextdev = list_next(l2arc_dev_list, dev); 7194 if (vd == dev->l2ad_vdev) { 7195 remdev = dev; 7196 break; 7197 } 7198 } 7199 ASSERT3P(remdev, !=, NULL); 7200 7201 /* 7202 * Remove device from global list 7203 */ 7204 list_remove(l2arc_dev_list, remdev); 7205 l2arc_dev_last = NULL; /* may have been invalidated */ 7206 atomic_dec_64(&l2arc_ndev); 7207 mutex_exit(&l2arc_dev_mtx); 7208 7209 /* 7210 * Clear all buflists and ARC references. L2ARC device flush. 7211 */ 7212 l2arc_evict(remdev, 0, B_TRUE); 7213 list_destroy(&remdev->l2ad_buflist); 7214 mutex_destroy(&remdev->l2ad_mtx); 7215 refcount_destroy(&remdev->l2ad_alloc); 7216 kmem_free(remdev, sizeof (l2arc_dev_t)); 7217 } 7218 7219 void 7220 l2arc_init(void) 7221 { 7222 l2arc_thread_exit = 0; 7223 l2arc_ndev = 0; 7224 l2arc_writes_sent = 0; 7225 l2arc_writes_done = 0; 7226 7227 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 7228 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 7229 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 7230 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 7231 7232 l2arc_dev_list = &L2ARC_dev_list; 7233 l2arc_free_on_write = &L2ARC_free_on_write; 7234 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 7235 offsetof(l2arc_dev_t, l2ad_node)); 7236 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 7237 offsetof(l2arc_data_free_t, l2df_list_node)); 7238 } 7239 7240 void 7241 l2arc_fini(void) 7242 { 7243 /* 7244 * This is called from dmu_fini(), which is called from spa_fini(); 7245 * Because of this, we can assume that all l2arc devices have 7246 * already been removed when the pools themselves were removed. 7247 */ 7248 7249 l2arc_do_free_on_write(); 7250 7251 mutex_destroy(&l2arc_feed_thr_lock); 7252 cv_destroy(&l2arc_feed_thr_cv); 7253 mutex_destroy(&l2arc_dev_mtx); 7254 mutex_destroy(&l2arc_free_on_write_mtx); 7255 7256 list_destroy(l2arc_dev_list); 7257 list_destroy(l2arc_free_on_write); 7258 } 7259 7260 void 7261 l2arc_start(void) 7262 { 7263 if (!(spa_mode_global & FWRITE)) 7264 return; 7265 7266 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 7267 TS_RUN, minclsyspri); 7268 } 7269 7270 void 7271 l2arc_stop(void) 7272 { 7273 if (!(spa_mode_global & FWRITE)) 7274 return; 7275 7276 mutex_enter(&l2arc_feed_thr_lock); 7277 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 7278 l2arc_thread_exit = 1; 7279 while (l2arc_thread_exit != 0) 7280 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 7281 mutex_exit(&l2arc_feed_thr_lock); 7282 } 7283