1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2018, Joyent, Inc. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29 /* 30 * DVA-based Adjustable Replacement Cache 31 * 32 * While much of the theory of operation used here is 33 * based on the self-tuning, low overhead replacement cache 34 * presented by Megiddo and Modha at FAST 2003, there are some 35 * significant differences: 36 * 37 * 1. The Megiddo and Modha model assumes any page is evictable. 38 * Pages in its cache cannot be "locked" into memory. This makes 39 * the eviction algorithm simple: evict the last page in the list. 40 * This also make the performance characteristics easy to reason 41 * about. Our cache is not so simple. At any given moment, some 42 * subset of the blocks in the cache are un-evictable because we 43 * have handed out a reference to them. Blocks are only evictable 44 * when there are no external references active. This makes 45 * eviction far more problematic: we choose to evict the evictable 46 * blocks that are the "lowest" in the list. 47 * 48 * There are times when it is not possible to evict the requested 49 * space. In these circumstances we are unable to adjust the cache 50 * size. To prevent the cache growing unbounded at these times we 51 * implement a "cache throttle" that slows the flow of new data 52 * into the cache until we can make space available. 53 * 54 * 2. The Megiddo and Modha model assumes a fixed cache size. 55 * Pages are evicted when the cache is full and there is a cache 56 * miss. Our model has a variable sized cache. It grows with 57 * high use, but also tries to react to memory pressure from the 58 * operating system: decreasing its size when system memory is 59 * tight. 60 * 61 * 3. The Megiddo and Modha model assumes a fixed page size. All 62 * elements of the cache are therefore exactly the same size. So 63 * when adjusting the cache size following a cache miss, its simply 64 * a matter of choosing a single page to evict. In our model, we 65 * have variable sized cache blocks (rangeing from 512 bytes to 66 * 128K bytes). We therefore choose a set of blocks to evict to make 67 * space for a cache miss that approximates as closely as possible 68 * the space used by the new block. 69 * 70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 71 * by N. Megiddo & D. Modha, FAST 2003 72 */ 73 74 /* 75 * The locking model: 76 * 77 * A new reference to a cache buffer can be obtained in two 78 * ways: 1) via a hash table lookup using the DVA as a key, 79 * or 2) via one of the ARC lists. The arc_read() interface 80 * uses method 1, while the internal ARC algorithms for 81 * adjusting the cache use method 2. We therefore provide two 82 * types of locks: 1) the hash table lock array, and 2) the 83 * ARC list locks. 84 * 85 * Buffers do not have their own mutexes, rather they rely on the 86 * hash table mutexes for the bulk of their protection (i.e. most 87 * fields in the arc_buf_hdr_t are protected by these mutexes). 88 * 89 * buf_hash_find() returns the appropriate mutex (held) when it 90 * locates the requested buffer in the hash table. It returns 91 * NULL for the mutex if the buffer was not in the table. 92 * 93 * buf_hash_remove() expects the appropriate hash mutex to be 94 * already held before it is invoked. 95 * 96 * Each ARC state also has a mutex which is used to protect the 97 * buffer list associated with the state. When attempting to 98 * obtain a hash table lock while holding an ARC list lock you 99 * must use: mutex_tryenter() to avoid deadlock. Also note that 100 * the active state mutex must be held before the ghost state mutex. 101 * 102 * Note that the majority of the performance stats are manipulated 103 * with atomic operations. 104 * 105 * The L2ARC uses the l2ad_mtx on each vdev for the following: 106 * 107 * - L2ARC buflist creation 108 * - L2ARC buflist eviction 109 * - L2ARC write completion, which walks L2ARC buflists 110 * - ARC header destruction, as it removes from L2ARC buflists 111 * - ARC header release, as it removes from L2ARC buflists 112 */ 113 114 /* 115 * ARC operation: 116 * 117 * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure. 118 * This structure can point either to a block that is still in the cache or to 119 * one that is only accessible in an L2 ARC device, or it can provide 120 * information about a block that was recently evicted. If a block is 121 * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough 122 * information to retrieve it from the L2ARC device. This information is 123 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block 124 * that is in this state cannot access the data directly. 125 * 126 * Blocks that are actively being referenced or have not been evicted 127 * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within 128 * the arc_buf_hdr_t that will point to the data block in memory. A block can 129 * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC 130 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and 131 * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd). 132 * 133 * The L1ARC's data pointer may or may not be uncompressed. The ARC has the 134 * ability to store the physical data (b_pabd) associated with the DVA of the 135 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block, 136 * it will match its on-disk compression characteristics. This behavior can be 137 * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the 138 * compressed ARC functionality is disabled, the b_pabd will point to an 139 * uncompressed version of the on-disk data. 140 * 141 * Data in the L1ARC is not accessed by consumers of the ARC directly. Each 142 * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it. 143 * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC 144 * consumer. The ARC will provide references to this data and will keep it 145 * cached until it is no longer in use. The ARC caches only the L1ARC's physical 146 * data block and will evict any arc_buf_t that is no longer referenced. The 147 * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the 148 * "overhead_size" kstat. 149 * 150 * Depending on the consumer, an arc_buf_t can be requested in uncompressed or 151 * compressed form. The typical case is that consumers will want uncompressed 152 * data, and when that happens a new data buffer is allocated where the data is 153 * decompressed for them to use. Currently the only consumer who wants 154 * compressed arc_buf_t's is "zfs send", when it streams data exactly as it 155 * exists on disk. When this happens, the arc_buf_t's data buffer is shared 156 * with the arc_buf_hdr_t. 157 * 158 * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The 159 * first one is owned by a compressed send consumer (and therefore references 160 * the same compressed data buffer as the arc_buf_hdr_t) and the second could be 161 * used by any other consumer (and has its own uncompressed copy of the data 162 * buffer). 163 * 164 * arc_buf_hdr_t 165 * +-----------+ 166 * | fields | 167 * | common to | 168 * | L1- and | 169 * | L2ARC | 170 * +-----------+ 171 * | l2arc_buf_hdr_t 172 * | | 173 * +-----------+ 174 * | l1arc_buf_hdr_t 175 * | | arc_buf_t 176 * | b_buf +------------>+-----------+ arc_buf_t 177 * | b_pabd +-+ |b_next +---->+-----------+ 178 * +-----------+ | |-----------| |b_next +-->NULL 179 * | |b_comp = T | +-----------+ 180 * | |b_data +-+ |b_comp = F | 181 * | +-----------+ | |b_data +-+ 182 * +->+------+ | +-----------+ | 183 * compressed | | | | 184 * data | |<--------------+ | uncompressed 185 * +------+ compressed, | data 186 * shared +-->+------+ 187 * data | | 188 * | | 189 * +------+ 190 * 191 * When a consumer reads a block, the ARC must first look to see if the 192 * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new 193 * arc_buf_t and either copies uncompressed data into a new data buffer from an 194 * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a 195 * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the 196 * hdr is compressed and the desired compression characteristics of the 197 * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the 198 * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be 199 * the last buffer in the hdr's b_buf list, however a shared compressed buf can 200 * be anywhere in the hdr's list. 201 * 202 * The diagram below shows an example of an uncompressed ARC hdr that is 203 * sharing its data with an arc_buf_t (note that the shared uncompressed buf is 204 * the last element in the buf list): 205 * 206 * arc_buf_hdr_t 207 * +-----------+ 208 * | | 209 * | | 210 * | | 211 * +-----------+ 212 * l2arc_buf_hdr_t| | 213 * | | 214 * +-----------+ 215 * l1arc_buf_hdr_t| | 216 * | | arc_buf_t (shared) 217 * | b_buf +------------>+---------+ arc_buf_t 218 * | | |b_next +---->+---------+ 219 * | b_pabd +-+ |---------| |b_next +-->NULL 220 * +-----------+ | | | +---------+ 221 * | |b_data +-+ | | 222 * | +---------+ | |b_data +-+ 223 * +->+------+ | +---------+ | 224 * | | | | 225 * uncompressed | | | | 226 * data +------+ | | 227 * ^ +->+------+ | 228 * | uncompressed | | | 229 * | data | | | 230 * | +------+ | 231 * +---------------------------------+ 232 * 233 * Writing to the ARC requires that the ARC first discard the hdr's b_pabd 234 * since the physical block is about to be rewritten. The new data contents 235 * will be contained in the arc_buf_t. As the I/O pipeline performs the write, 236 * it may compress the data before writing it to disk. The ARC will be called 237 * with the transformed data and will bcopy the transformed on-disk block into 238 * a newly allocated b_pabd. Writes are always done into buffers which have 239 * either been loaned (and hence are new and don't have other readers) or 240 * buffers which have been released (and hence have their own hdr, if there 241 * were originally other readers of the buf's original hdr). This ensures that 242 * the ARC only needs to update a single buf and its hdr after a write occurs. 243 * 244 * When the L2ARC is in use, it will also take advantage of the b_pabd. The 245 * L2ARC will always write the contents of b_pabd to the L2ARC. This means 246 * that when compressed ARC is enabled that the L2ARC blocks are identical 247 * to the on-disk block in the main data pool. This provides a significant 248 * advantage since the ARC can leverage the bp's checksum when reading from the 249 * L2ARC to determine if the contents are valid. However, if the compressed 250 * ARC is disabled, then the L2ARC's block must be transformed to look 251 * like the physical block in the main data pool before comparing the 252 * checksum and determining its validity. 253 * 254 * The L1ARC has a slightly different system for storing encrypted data. 255 * Raw (encrypted + possibly compressed) data has a few subtle differences from 256 * data that is just compressed. The biggest difference is that it is not 257 * possible to decrypt encrypted data (or visa versa) if the keys aren't loaded. 258 * The other difference is that encryption cannot be treated as a suggestion. 259 * If a caller would prefer compressed data, but they actually wind up with 260 * uncompressed data the worst thing that could happen is there might be a 261 * performance hit. If the caller requests encrypted data, however, we must be 262 * sure they actually get it or else secret information could be leaked. Raw 263 * data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore, 264 * may have both an encrypted version and a decrypted version of its data at 265 * once. When a caller needs a raw arc_buf_t, it is allocated and the data is 266 * copied out of this header. To avoid complications with b_pabd, raw buffers 267 * cannot be shared. 268 */ 269 270 #include <sys/spa.h> 271 #include <sys/zio.h> 272 #include <sys/spa_impl.h> 273 #include <sys/zio_compress.h> 274 #include <sys/zio_checksum.h> 275 #include <sys/zfs_context.h> 276 #include <sys/arc.h> 277 #include <sys/refcount.h> 278 #include <sys/vdev.h> 279 #include <sys/vdev_impl.h> 280 #include <sys/dsl_pool.h> 281 #include <sys/zio_checksum.h> 282 #include <sys/multilist.h> 283 #include <sys/abd.h> 284 #include <sys/zil.h> 285 #include <sys/fm/fs/zfs.h> 286 #ifdef _KERNEL 287 #include <sys/vmsystm.h> 288 #include <vm/anon.h> 289 #include <sys/fs/swapnode.h> 290 #include <sys/dnlc.h> 291 #endif 292 #include <sys/callb.h> 293 #include <sys/kstat.h> 294 #include <sys/zthr.h> 295 #include <zfs_fletcher.h> 296 #include <sys/aggsum.h> 297 #include <sys/cityhash.h> 298 299 #ifndef _KERNEL 300 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 301 boolean_t arc_watch = B_FALSE; 302 int arc_procfd; 303 #endif 304 305 /* 306 * This thread's job is to keep enough free memory in the system, by 307 * calling arc_kmem_reap_now() plus arc_shrink(), which improves 308 * arc_available_memory(). 309 */ 310 static zthr_t *arc_reap_zthr; 311 312 /* 313 * This thread's job is to keep arc_size under arc_c, by calling 314 * arc_adjust(), which improves arc_is_overflowing(). 315 */ 316 static zthr_t *arc_adjust_zthr; 317 318 static kmutex_t arc_adjust_lock; 319 static kcondvar_t arc_adjust_waiters_cv; 320 static boolean_t arc_adjust_needed = B_FALSE; 321 322 uint_t arc_reduce_dnlc_percent = 3; 323 324 /* 325 * The number of headers to evict in arc_evict_state_impl() before 326 * dropping the sublist lock and evicting from another sublist. A lower 327 * value means we're more likely to evict the "correct" header (i.e. the 328 * oldest header in the arc state), but comes with higher overhead 329 * (i.e. more invocations of arc_evict_state_impl()). 330 */ 331 int zfs_arc_evict_batch_limit = 10; 332 333 /* number of seconds before growing cache again */ 334 int arc_grow_retry = 60; 335 336 /* 337 * Minimum time between calls to arc_kmem_reap_soon(). Note that this will 338 * be converted to ticks, so with the default hz=100, a setting of 15 ms 339 * will actually wait 2 ticks, or 20ms. 340 */ 341 int arc_kmem_cache_reap_retry_ms = 1000; 342 343 /* shift of arc_c for calculating overflow limit in arc_get_data_impl */ 344 int zfs_arc_overflow_shift = 8; 345 346 /* shift of arc_c for calculating both min and max arc_p */ 347 int arc_p_min_shift = 4; 348 349 /* log2(fraction of arc to reclaim) */ 350 int arc_shrink_shift = 7; 351 352 /* 353 * log2(fraction of ARC which must be free to allow growing). 354 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory, 355 * when reading a new block into the ARC, we will evict an equal-sized block 356 * from the ARC. 357 * 358 * This must be less than arc_shrink_shift, so that when we shrink the ARC, 359 * we will still not allow it to grow. 360 */ 361 int arc_no_grow_shift = 5; 362 363 364 /* 365 * minimum lifespan of a prefetch block in clock ticks 366 * (initialized in arc_init()) 367 */ 368 static int zfs_arc_min_prefetch_ms = 1; 369 static int zfs_arc_min_prescient_prefetch_ms = 6; 370 371 /* 372 * If this percent of memory is free, don't throttle. 373 */ 374 int arc_lotsfree_percent = 10; 375 376 static boolean_t arc_initialized; 377 378 /* 379 * The arc has filled available memory and has now warmed up. 380 */ 381 static boolean_t arc_warm; 382 383 /* 384 * log2 fraction of the zio arena to keep free. 385 */ 386 int arc_zio_arena_free_shift = 2; 387 388 /* 389 * These tunables are for performance analysis. 390 */ 391 uint64_t zfs_arc_max; 392 uint64_t zfs_arc_min; 393 uint64_t zfs_arc_meta_limit = 0; 394 uint64_t zfs_arc_meta_min = 0; 395 int zfs_arc_grow_retry = 0; 396 int zfs_arc_shrink_shift = 0; 397 int zfs_arc_p_min_shift = 0; 398 int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ 399 400 /* 401 * ARC dirty data constraints for arc_tempreserve_space() throttle 402 */ 403 uint_t zfs_arc_dirty_limit_percent = 50; /* total dirty data limit */ 404 uint_t zfs_arc_anon_limit_percent = 25; /* anon block dirty limit */ 405 uint_t zfs_arc_pool_dirty_percent = 20; /* each pool's anon allowance */ 406 407 boolean_t zfs_compressed_arc_enabled = B_TRUE; 408 409 /* 410 * Note that buffers can be in one of 6 states: 411 * ARC_anon - anonymous (discussed below) 412 * ARC_mru - recently used, currently cached 413 * ARC_mru_ghost - recentely used, no longer in cache 414 * ARC_mfu - frequently used, currently cached 415 * ARC_mfu_ghost - frequently used, no longer in cache 416 * ARC_l2c_only - exists in L2ARC but not other states 417 * When there are no active references to the buffer, they are 418 * are linked onto a list in one of these arc states. These are 419 * the only buffers that can be evicted or deleted. Within each 420 * state there are multiple lists, one for meta-data and one for 421 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 422 * etc.) is tracked separately so that it can be managed more 423 * explicitly: favored over data, limited explicitly. 424 * 425 * Anonymous buffers are buffers that are not associated with 426 * a DVA. These are buffers that hold dirty block copies 427 * before they are written to stable storage. By definition, 428 * they are "ref'd" and are considered part of arc_mru 429 * that cannot be freed. Generally, they will aquire a DVA 430 * as they are written and migrate onto the arc_mru list. 431 * 432 * The ARC_l2c_only state is for buffers that are in the second 433 * level ARC but no longer in any of the ARC_m* lists. The second 434 * level ARC itself may also contain buffers that are in any of 435 * the ARC_m* states - meaning that a buffer can exist in two 436 * places. The reason for the ARC_l2c_only state is to keep the 437 * buffer header in the hash table, so that reads that hit the 438 * second level ARC benefit from these fast lookups. 439 */ 440 441 typedef struct arc_state { 442 /* 443 * list of evictable buffers 444 */ 445 multilist_t *arcs_list[ARC_BUFC_NUMTYPES]; 446 /* 447 * total amount of evictable data in this state 448 */ 449 zfs_refcount_t arcs_esize[ARC_BUFC_NUMTYPES]; 450 /* 451 * total amount of data in this state; this includes: evictable, 452 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA. 453 */ 454 zfs_refcount_t arcs_size; 455 } arc_state_t; 456 457 /* The 6 states: */ 458 static arc_state_t ARC_anon; 459 static arc_state_t ARC_mru; 460 static arc_state_t ARC_mru_ghost; 461 static arc_state_t ARC_mfu; 462 static arc_state_t ARC_mfu_ghost; 463 static arc_state_t ARC_l2c_only; 464 465 typedef struct arc_stats { 466 kstat_named_t arcstat_hits; 467 kstat_named_t arcstat_misses; 468 kstat_named_t arcstat_demand_data_hits; 469 kstat_named_t arcstat_demand_data_misses; 470 kstat_named_t arcstat_demand_metadata_hits; 471 kstat_named_t arcstat_demand_metadata_misses; 472 kstat_named_t arcstat_prefetch_data_hits; 473 kstat_named_t arcstat_prefetch_data_misses; 474 kstat_named_t arcstat_prefetch_metadata_hits; 475 kstat_named_t arcstat_prefetch_metadata_misses; 476 kstat_named_t arcstat_mru_hits; 477 kstat_named_t arcstat_mru_ghost_hits; 478 kstat_named_t arcstat_mfu_hits; 479 kstat_named_t arcstat_mfu_ghost_hits; 480 kstat_named_t arcstat_deleted; 481 /* 482 * Number of buffers that could not be evicted because the hash lock 483 * was held by another thread. The lock may not necessarily be held 484 * by something using the same buffer, since hash locks are shared 485 * by multiple buffers. 486 */ 487 kstat_named_t arcstat_mutex_miss; 488 /* 489 * Number of buffers skipped when updating the access state due to the 490 * header having already been released after acquiring the hash lock. 491 */ 492 kstat_named_t arcstat_access_skip; 493 /* 494 * Number of buffers skipped because they have I/O in progress, are 495 * indirect prefetch buffers that have not lived long enough, or are 496 * not from the spa we're trying to evict from. 497 */ 498 kstat_named_t arcstat_evict_skip; 499 /* 500 * Number of times arc_evict_state() was unable to evict enough 501 * buffers to reach its target amount. 502 */ 503 kstat_named_t arcstat_evict_not_enough; 504 kstat_named_t arcstat_evict_l2_cached; 505 kstat_named_t arcstat_evict_l2_eligible; 506 kstat_named_t arcstat_evict_l2_ineligible; 507 kstat_named_t arcstat_evict_l2_skip; 508 kstat_named_t arcstat_hash_elements; 509 kstat_named_t arcstat_hash_elements_max; 510 kstat_named_t arcstat_hash_collisions; 511 kstat_named_t arcstat_hash_chains; 512 kstat_named_t arcstat_hash_chain_max; 513 kstat_named_t arcstat_p; 514 kstat_named_t arcstat_c; 515 kstat_named_t arcstat_c_min; 516 kstat_named_t arcstat_c_max; 517 /* Not updated directly; only synced in arc_kstat_update. */ 518 kstat_named_t arcstat_size; 519 /* 520 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd. 521 * Note that the compressed bytes may match the uncompressed bytes 522 * if the block is either not compressed or compressed arc is disabled. 523 */ 524 kstat_named_t arcstat_compressed_size; 525 /* 526 * Uncompressed size of the data stored in b_pabd. If compressed 527 * arc is disabled then this value will be identical to the stat 528 * above. 529 */ 530 kstat_named_t arcstat_uncompressed_size; 531 /* 532 * Number of bytes stored in all the arc_buf_t's. This is classified 533 * as "overhead" since this data is typically short-lived and will 534 * be evicted from the arc when it becomes unreferenced unless the 535 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level 536 * values have been set (see comment in dbuf.c for more information). 537 */ 538 kstat_named_t arcstat_overhead_size; 539 /* 540 * Number of bytes consumed by internal ARC structures necessary 541 * for tracking purposes; these structures are not actually 542 * backed by ARC buffers. This includes arc_buf_hdr_t structures 543 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only 544 * caches), and arc_buf_t structures (allocated via arc_buf_t 545 * cache). 546 * Not updated directly; only synced in arc_kstat_update. 547 */ 548 kstat_named_t arcstat_hdr_size; 549 /* 550 * Number of bytes consumed by ARC buffers of type equal to 551 * ARC_BUFC_DATA. This is generally consumed by buffers backing 552 * on disk user data (e.g. plain file contents). 553 * Not updated directly; only synced in arc_kstat_update. 554 */ 555 kstat_named_t arcstat_data_size; 556 /* 557 * Number of bytes consumed by ARC buffers of type equal to 558 * ARC_BUFC_METADATA. This is generally consumed by buffers 559 * backing on disk data that is used for internal ZFS 560 * structures (e.g. ZAP, dnode, indirect blocks, etc). 561 * Not updated directly; only synced in arc_kstat_update. 562 */ 563 kstat_named_t arcstat_metadata_size; 564 /* 565 * Number of bytes consumed by various buffers and structures 566 * not actually backed with ARC buffers. This includes bonus 567 * buffers (allocated directly via zio_buf_* functions), 568 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t 569 * cache), and dnode_t structures (allocated via dnode_t cache). 570 * Not updated directly; only synced in arc_kstat_update. 571 */ 572 kstat_named_t arcstat_other_size; 573 /* 574 * Total number of bytes consumed by ARC buffers residing in the 575 * arc_anon state. This includes *all* buffers in the arc_anon 576 * state; e.g. data, metadata, evictable, and unevictable buffers 577 * are all included in this value. 578 * Not updated directly; only synced in arc_kstat_update. 579 */ 580 kstat_named_t arcstat_anon_size; 581 /* 582 * Number of bytes consumed by ARC buffers that meet the 583 * following criteria: backing buffers of type ARC_BUFC_DATA, 584 * residing in the arc_anon state, and are eligible for eviction 585 * (e.g. have no outstanding holds on the buffer). 586 * Not updated directly; only synced in arc_kstat_update. 587 */ 588 kstat_named_t arcstat_anon_evictable_data; 589 /* 590 * Number of bytes consumed by ARC buffers that meet the 591 * following criteria: backing buffers of type ARC_BUFC_METADATA, 592 * residing in the arc_anon state, and are eligible for eviction 593 * (e.g. have no outstanding holds on the buffer). 594 * Not updated directly; only synced in arc_kstat_update. 595 */ 596 kstat_named_t arcstat_anon_evictable_metadata; 597 /* 598 * Total number of bytes consumed by ARC buffers residing in the 599 * arc_mru state. This includes *all* buffers in the arc_mru 600 * state; e.g. data, metadata, evictable, and unevictable buffers 601 * are all included in this value. 602 * Not updated directly; only synced in arc_kstat_update. 603 */ 604 kstat_named_t arcstat_mru_size; 605 /* 606 * Number of bytes consumed by ARC buffers that meet the 607 * following criteria: backing buffers of type ARC_BUFC_DATA, 608 * residing in the arc_mru state, and are eligible for eviction 609 * (e.g. have no outstanding holds on the buffer). 610 * Not updated directly; only synced in arc_kstat_update. 611 */ 612 kstat_named_t arcstat_mru_evictable_data; 613 /* 614 * Number of bytes consumed by ARC buffers that meet the 615 * following criteria: backing buffers of type ARC_BUFC_METADATA, 616 * residing in the arc_mru state, and are eligible for eviction 617 * (e.g. have no outstanding holds on the buffer). 618 * Not updated directly; only synced in arc_kstat_update. 619 */ 620 kstat_named_t arcstat_mru_evictable_metadata; 621 /* 622 * Total number of bytes that *would have been* consumed by ARC 623 * buffers in the arc_mru_ghost state. The key thing to note 624 * here, is the fact that this size doesn't actually indicate 625 * RAM consumption. The ghost lists only consist of headers and 626 * don't actually have ARC buffers linked off of these headers. 627 * Thus, *if* the headers had associated ARC buffers, these 628 * buffers *would have* consumed this number of bytes. 629 * Not updated directly; only synced in arc_kstat_update. 630 */ 631 kstat_named_t arcstat_mru_ghost_size; 632 /* 633 * Number of bytes that *would have been* consumed by ARC 634 * buffers that are eligible for eviction, of type 635 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state. 636 * Not updated directly; only synced in arc_kstat_update. 637 */ 638 kstat_named_t arcstat_mru_ghost_evictable_data; 639 /* 640 * Number of bytes that *would have been* consumed by ARC 641 * buffers that are eligible for eviction, of type 642 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 643 * Not updated directly; only synced in arc_kstat_update. 644 */ 645 kstat_named_t arcstat_mru_ghost_evictable_metadata; 646 /* 647 * Total number of bytes consumed by ARC buffers residing in the 648 * arc_mfu state. This includes *all* buffers in the arc_mfu 649 * state; e.g. data, metadata, evictable, and unevictable buffers 650 * are all included in this value. 651 * Not updated directly; only synced in arc_kstat_update. 652 */ 653 kstat_named_t arcstat_mfu_size; 654 /* 655 * Number of bytes consumed by ARC buffers that are eligible for 656 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu 657 * state. 658 * Not updated directly; only synced in arc_kstat_update. 659 */ 660 kstat_named_t arcstat_mfu_evictable_data; 661 /* 662 * Number of bytes consumed by ARC buffers that are eligible for 663 * eviction, of type ARC_BUFC_METADATA, and reside in the 664 * arc_mfu state. 665 * Not updated directly; only synced in arc_kstat_update. 666 */ 667 kstat_named_t arcstat_mfu_evictable_metadata; 668 /* 669 * Total number of bytes that *would have been* consumed by ARC 670 * buffers in the arc_mfu_ghost state. See the comment above 671 * arcstat_mru_ghost_size for more details. 672 * Not updated directly; only synced in arc_kstat_update. 673 */ 674 kstat_named_t arcstat_mfu_ghost_size; 675 /* 676 * Number of bytes that *would have been* consumed by ARC 677 * buffers that are eligible for eviction, of type 678 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state. 679 * Not updated directly; only synced in arc_kstat_update. 680 */ 681 kstat_named_t arcstat_mfu_ghost_evictable_data; 682 /* 683 * Number of bytes that *would have been* consumed by ARC 684 * buffers that are eligible for eviction, of type 685 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 686 * Not updated directly; only synced in arc_kstat_update. 687 */ 688 kstat_named_t arcstat_mfu_ghost_evictable_metadata; 689 kstat_named_t arcstat_l2_hits; 690 kstat_named_t arcstat_l2_misses; 691 kstat_named_t arcstat_l2_feeds; 692 kstat_named_t arcstat_l2_rw_clash; 693 kstat_named_t arcstat_l2_read_bytes; 694 kstat_named_t arcstat_l2_write_bytes; 695 kstat_named_t arcstat_l2_writes_sent; 696 kstat_named_t arcstat_l2_writes_done; 697 kstat_named_t arcstat_l2_writes_error; 698 kstat_named_t arcstat_l2_writes_lock_retry; 699 kstat_named_t arcstat_l2_evict_lock_retry; 700 kstat_named_t arcstat_l2_evict_reading; 701 kstat_named_t arcstat_l2_evict_l1cached; 702 kstat_named_t arcstat_l2_free_on_write; 703 kstat_named_t arcstat_l2_abort_lowmem; 704 kstat_named_t arcstat_l2_cksum_bad; 705 kstat_named_t arcstat_l2_io_error; 706 kstat_named_t arcstat_l2_lsize; 707 kstat_named_t arcstat_l2_psize; 708 /* Not updated directly; only synced in arc_kstat_update. */ 709 kstat_named_t arcstat_l2_hdr_size; 710 kstat_named_t arcstat_memory_throttle_count; 711 /* Not updated directly; only synced in arc_kstat_update. */ 712 kstat_named_t arcstat_meta_used; 713 kstat_named_t arcstat_meta_limit; 714 kstat_named_t arcstat_meta_max; 715 kstat_named_t arcstat_meta_min; 716 kstat_named_t arcstat_async_upgrade_sync; 717 kstat_named_t arcstat_demand_hit_predictive_prefetch; 718 kstat_named_t arcstat_demand_hit_prescient_prefetch; 719 } arc_stats_t; 720 721 static arc_stats_t arc_stats = { 722 { "hits", KSTAT_DATA_UINT64 }, 723 { "misses", KSTAT_DATA_UINT64 }, 724 { "demand_data_hits", KSTAT_DATA_UINT64 }, 725 { "demand_data_misses", KSTAT_DATA_UINT64 }, 726 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 727 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 728 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 729 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 730 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 731 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 732 { "mru_hits", KSTAT_DATA_UINT64 }, 733 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 734 { "mfu_hits", KSTAT_DATA_UINT64 }, 735 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 736 { "deleted", KSTAT_DATA_UINT64 }, 737 { "mutex_miss", KSTAT_DATA_UINT64 }, 738 { "access_skip", KSTAT_DATA_UINT64 }, 739 { "evict_skip", KSTAT_DATA_UINT64 }, 740 { "evict_not_enough", KSTAT_DATA_UINT64 }, 741 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 742 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 743 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 744 { "evict_l2_skip", KSTAT_DATA_UINT64 }, 745 { "hash_elements", KSTAT_DATA_UINT64 }, 746 { "hash_elements_max", KSTAT_DATA_UINT64 }, 747 { "hash_collisions", KSTAT_DATA_UINT64 }, 748 { "hash_chains", KSTAT_DATA_UINT64 }, 749 { "hash_chain_max", KSTAT_DATA_UINT64 }, 750 { "p", KSTAT_DATA_UINT64 }, 751 { "c", KSTAT_DATA_UINT64 }, 752 { "c_min", KSTAT_DATA_UINT64 }, 753 { "c_max", KSTAT_DATA_UINT64 }, 754 { "size", KSTAT_DATA_UINT64 }, 755 { "compressed_size", KSTAT_DATA_UINT64 }, 756 { "uncompressed_size", KSTAT_DATA_UINT64 }, 757 { "overhead_size", KSTAT_DATA_UINT64 }, 758 { "hdr_size", KSTAT_DATA_UINT64 }, 759 { "data_size", KSTAT_DATA_UINT64 }, 760 { "metadata_size", KSTAT_DATA_UINT64 }, 761 { "other_size", KSTAT_DATA_UINT64 }, 762 { "anon_size", KSTAT_DATA_UINT64 }, 763 { "anon_evictable_data", KSTAT_DATA_UINT64 }, 764 { "anon_evictable_metadata", KSTAT_DATA_UINT64 }, 765 { "mru_size", KSTAT_DATA_UINT64 }, 766 { "mru_evictable_data", KSTAT_DATA_UINT64 }, 767 { "mru_evictable_metadata", KSTAT_DATA_UINT64 }, 768 { "mru_ghost_size", KSTAT_DATA_UINT64 }, 769 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 }, 770 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 771 { "mfu_size", KSTAT_DATA_UINT64 }, 772 { "mfu_evictable_data", KSTAT_DATA_UINT64 }, 773 { "mfu_evictable_metadata", KSTAT_DATA_UINT64 }, 774 { "mfu_ghost_size", KSTAT_DATA_UINT64 }, 775 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 }, 776 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 777 { "l2_hits", KSTAT_DATA_UINT64 }, 778 { "l2_misses", KSTAT_DATA_UINT64 }, 779 { "l2_feeds", KSTAT_DATA_UINT64 }, 780 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 781 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 782 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 783 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 784 { "l2_writes_done", KSTAT_DATA_UINT64 }, 785 { "l2_writes_error", KSTAT_DATA_UINT64 }, 786 { "l2_writes_lock_retry", KSTAT_DATA_UINT64 }, 787 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 788 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 789 { "l2_evict_l1cached", KSTAT_DATA_UINT64 }, 790 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 791 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 792 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 793 { "l2_io_error", KSTAT_DATA_UINT64 }, 794 { "l2_size", KSTAT_DATA_UINT64 }, 795 { "l2_asize", KSTAT_DATA_UINT64 }, 796 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 797 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 798 { "arc_meta_used", KSTAT_DATA_UINT64 }, 799 { "arc_meta_limit", KSTAT_DATA_UINT64 }, 800 { "arc_meta_max", KSTAT_DATA_UINT64 }, 801 { "arc_meta_min", KSTAT_DATA_UINT64 }, 802 { "async_upgrade_sync", KSTAT_DATA_UINT64 }, 803 { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 }, 804 { "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 }, 805 }; 806 807 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 808 809 #define ARCSTAT_INCR(stat, val) \ 810 atomic_add_64(&arc_stats.stat.value.ui64, (val)) 811 812 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 813 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 814 815 #define ARCSTAT_MAX(stat, val) { \ 816 uint64_t m; \ 817 while ((val) > (m = arc_stats.stat.value.ui64) && \ 818 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 819 continue; \ 820 } 821 822 #define ARCSTAT_MAXSTAT(stat) \ 823 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 824 825 /* 826 * We define a macro to allow ARC hits/misses to be easily broken down by 827 * two separate conditions, giving a total of four different subtypes for 828 * each of hits and misses (so eight statistics total). 829 */ 830 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 831 if (cond1) { \ 832 if (cond2) { \ 833 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 834 } else { \ 835 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 836 } \ 837 } else { \ 838 if (cond2) { \ 839 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 840 } else { \ 841 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 842 } \ 843 } 844 845 kstat_t *arc_ksp; 846 static arc_state_t *arc_anon; 847 static arc_state_t *arc_mru; 848 static arc_state_t *arc_mru_ghost; 849 static arc_state_t *arc_mfu; 850 static arc_state_t *arc_mfu_ghost; 851 static arc_state_t *arc_l2c_only; 852 853 /* 854 * There are several ARC variables that are critical to export as kstats -- 855 * but we don't want to have to grovel around in the kstat whenever we wish to 856 * manipulate them. For these variables, we therefore define them to be in 857 * terms of the statistic variable. This assures that we are not introducing 858 * the possibility of inconsistency by having shadow copies of the variables, 859 * while still allowing the code to be readable. 860 */ 861 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 862 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 863 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 864 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 865 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ 866 #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ 867 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ 868 869 /* compressed size of entire arc */ 870 #define arc_compressed_size ARCSTAT(arcstat_compressed_size) 871 /* uncompressed size of entire arc */ 872 #define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size) 873 /* number of bytes in the arc from arc_buf_t's */ 874 #define arc_overhead_size ARCSTAT(arcstat_overhead_size) 875 876 /* 877 * There are also some ARC variables that we want to export, but that are 878 * updated so often that having the canonical representation be the statistic 879 * variable causes a performance bottleneck. We want to use aggsum_t's for these 880 * instead, but still be able to export the kstat in the same way as before. 881 * The solution is to always use the aggsum version, except in the kstat update 882 * callback. 883 */ 884 aggsum_t arc_size; 885 aggsum_t arc_meta_used; 886 aggsum_t astat_data_size; 887 aggsum_t astat_metadata_size; 888 aggsum_t astat_hdr_size; 889 aggsum_t astat_other_size; 890 aggsum_t astat_l2_hdr_size; 891 892 static int arc_no_grow; /* Don't try to grow cache size */ 893 static hrtime_t arc_growtime; 894 static uint64_t arc_tempreserve; 895 static uint64_t arc_loaned_bytes; 896 897 typedef struct arc_callback arc_callback_t; 898 899 struct arc_callback { 900 void *acb_private; 901 arc_read_done_func_t *acb_done; 902 arc_buf_t *acb_buf; 903 boolean_t acb_encrypted; 904 boolean_t acb_compressed; 905 boolean_t acb_noauth; 906 zbookmark_phys_t acb_zb; 907 zio_t *acb_zio_dummy; 908 zio_t *acb_zio_head; 909 arc_callback_t *acb_next; 910 }; 911 912 typedef struct arc_write_callback arc_write_callback_t; 913 914 struct arc_write_callback { 915 void *awcb_private; 916 arc_write_done_func_t *awcb_ready; 917 arc_write_done_func_t *awcb_children_ready; 918 arc_write_done_func_t *awcb_physdone; 919 arc_write_done_func_t *awcb_done; 920 arc_buf_t *awcb_buf; 921 }; 922 923 /* 924 * ARC buffers are separated into multiple structs as a memory saving measure: 925 * - Common fields struct, always defined, and embedded within it: 926 * - L2-only fields, always allocated but undefined when not in L2ARC 927 * - L1-only fields, only allocated when in L1ARC 928 * 929 * Buffer in L1 Buffer only in L2 930 * +------------------------+ +------------------------+ 931 * | arc_buf_hdr_t | | arc_buf_hdr_t | 932 * | | | | 933 * | | | | 934 * | | | | 935 * +------------------------+ +------------------------+ 936 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t | 937 * | (undefined if L1-only) | | | 938 * +------------------------+ +------------------------+ 939 * | l1arc_buf_hdr_t | 940 * | | 941 * | | 942 * | | 943 * | | 944 * +------------------------+ 945 * 946 * Because it's possible for the L2ARC to become extremely large, we can wind 947 * up eating a lot of memory in L2ARC buffer headers, so the size of a header 948 * is minimized by only allocating the fields necessary for an L1-cached buffer 949 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and 950 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple 951 * words in pointers. arc_hdr_realloc() is used to switch a header between 952 * these two allocation states. 953 */ 954 typedef struct l1arc_buf_hdr { 955 kmutex_t b_freeze_lock; 956 zio_cksum_t *b_freeze_cksum; 957 #ifdef ZFS_DEBUG 958 /* 959 * Used for debugging with kmem_flags - by allocating and freeing 960 * b_thawed when the buffer is thawed, we get a record of the stack 961 * trace that thawed it. 962 */ 963 void *b_thawed; 964 #endif 965 966 arc_buf_t *b_buf; 967 uint32_t b_bufcnt; 968 /* for waiting on writes to complete */ 969 kcondvar_t b_cv; 970 uint8_t b_byteswap; 971 972 /* protected by arc state mutex */ 973 arc_state_t *b_state; 974 multilist_node_t b_arc_node; 975 976 /* updated atomically */ 977 clock_t b_arc_access; 978 979 /* self protecting */ 980 zfs_refcount_t b_refcnt; 981 982 arc_callback_t *b_acb; 983 abd_t *b_pabd; 984 } l1arc_buf_hdr_t; 985 986 /* 987 * Encrypted blocks will need to be stored encrypted on the L2ARC 988 * disk as they appear in the main pool. In order for this to work we 989 * need to pass around the encryption parameters so they can be used 990 * to write data to the L2ARC. This struct is only defined in the 991 * arc_buf_hdr_t if the L1 header is defined and has the ARC_FLAG_ENCRYPTED 992 * flag set. 993 */ 994 typedef struct arc_buf_hdr_crypt { 995 abd_t *b_rabd; /* raw encrypted data */ 996 dmu_object_type_t b_ot; /* object type */ 997 uint32_t b_ebufcnt; /* number or encryped buffers */ 998 999 /* dsobj for looking up encryption key for l2arc encryption */ 1000 uint64_t b_dsobj; /* for looking up key */ 1001 1002 /* encryption parameters */ 1003 uint8_t b_salt[ZIO_DATA_SALT_LEN]; 1004 uint8_t b_iv[ZIO_DATA_IV_LEN]; 1005 1006 /* 1007 * Technically this could be removed since we will always be able to 1008 * get the mac from the bp when we need it. However, it is inconvenient 1009 * for callers of arc code to have to pass a bp in all the time. This 1010 * also allows us to assert that L2ARC data is properly encrypted to 1011 * match the data in the main storage pool. 1012 */ 1013 uint8_t b_mac[ZIO_DATA_MAC_LEN]; 1014 } arc_buf_hdr_crypt_t; 1015 1016 typedef struct l2arc_dev l2arc_dev_t; 1017 1018 typedef struct l2arc_buf_hdr { 1019 /* protected by arc_buf_hdr mutex */ 1020 l2arc_dev_t *b_dev; /* L2ARC device */ 1021 uint64_t b_daddr; /* disk address, offset byte */ 1022 1023 list_node_t b_l2node; 1024 } l2arc_buf_hdr_t; 1025 1026 struct arc_buf_hdr { 1027 /* protected by hash lock */ 1028 dva_t b_dva; 1029 uint64_t b_birth; 1030 1031 arc_buf_contents_t b_type; 1032 arc_buf_hdr_t *b_hash_next; 1033 arc_flags_t b_flags; 1034 1035 /* 1036 * This field stores the size of the data buffer after 1037 * compression, and is set in the arc's zio completion handlers. 1038 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes). 1039 * 1040 * While the block pointers can store up to 32MB in their psize 1041 * field, we can only store up to 32MB minus 512B. This is due 1042 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e. 1043 * a field of zeros represents 512B in the bp). We can't use a 1044 * bias of 1 since we need to reserve a psize of zero, here, to 1045 * represent holes and embedded blocks. 1046 * 1047 * This isn't a problem in practice, since the maximum size of a 1048 * buffer is limited to 16MB, so we never need to store 32MB in 1049 * this field. Even in the upstream illumos code base, the 1050 * maximum size of a buffer is limited to 16MB. 1051 */ 1052 uint16_t b_psize; 1053 1054 /* 1055 * This field stores the size of the data buffer before 1056 * compression, and cannot change once set. It is in units 1057 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes) 1058 */ 1059 uint16_t b_lsize; /* immutable */ 1060 uint64_t b_spa; /* immutable */ 1061 1062 /* L2ARC fields. Undefined when not in L2ARC. */ 1063 l2arc_buf_hdr_t b_l2hdr; 1064 /* L1ARC fields. Undefined when in l2arc_only state */ 1065 l1arc_buf_hdr_t b_l1hdr; 1066 /* 1067 * Encryption parameters. Defined only when ARC_FLAG_ENCRYPTED 1068 * is set and the L1 header exists. 1069 */ 1070 arc_buf_hdr_crypt_t b_crypt_hdr; 1071 }; 1072 1073 #define GHOST_STATE(state) \ 1074 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 1075 (state) == arc_l2c_only) 1076 1077 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) 1078 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) 1079 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) 1080 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) 1081 #define HDR_PRESCIENT_PREFETCH(hdr) \ 1082 ((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) 1083 #define HDR_COMPRESSION_ENABLED(hdr) \ 1084 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC) 1085 1086 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) 1087 #define HDR_L2_READING(hdr) \ 1088 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \ 1089 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)) 1090 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) 1091 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) 1092 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) 1093 #define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED) 1094 #define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH) 1095 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA) 1096 1097 #define HDR_ISTYPE_METADATA(hdr) \ 1098 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA) 1099 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr)) 1100 1101 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR) 1102 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR) 1103 #define HDR_HAS_RABD(hdr) \ 1104 (HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \ 1105 (hdr)->b_crypt_hdr.b_rabd != NULL) 1106 #define HDR_ENCRYPTED(hdr) \ 1107 (HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot)) 1108 #define HDR_AUTHENTICATED(hdr) \ 1109 (HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot)) 1110 1111 /* For storing compression mode in b_flags */ 1112 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1) 1113 1114 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \ 1115 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS)) 1116 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \ 1117 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp)); 1118 1119 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL) 1120 #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED) 1121 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED) 1122 #define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED) 1123 1124 /* 1125 * Other sizes 1126 */ 1127 1128 #define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 1129 #define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr)) 1130 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr)) 1131 1132 /* 1133 * Hash table routines 1134 */ 1135 1136 #define HT_LOCK_PAD 64 1137 1138 struct ht_lock { 1139 kmutex_t ht_lock; 1140 #ifdef _KERNEL 1141 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 1142 #endif 1143 }; 1144 1145 #define BUF_LOCKS 256 1146 typedef struct buf_hash_table { 1147 uint64_t ht_mask; 1148 arc_buf_hdr_t **ht_table; 1149 struct ht_lock ht_locks[BUF_LOCKS]; 1150 } buf_hash_table_t; 1151 1152 static buf_hash_table_t buf_hash_table; 1153 1154 #define BUF_HASH_INDEX(spa, dva, birth) \ 1155 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 1156 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 1157 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 1158 #define HDR_LOCK(hdr) \ 1159 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 1160 1161 uint64_t zfs_crc64_table[256]; 1162 1163 /* 1164 * Level 2 ARC 1165 */ 1166 1167 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 1168 #define L2ARC_HEADROOM 2 /* num of writes */ 1169 /* 1170 * If we discover during ARC scan any buffers to be compressed, we boost 1171 * our headroom for the next scanning cycle by this percentage multiple. 1172 */ 1173 #define L2ARC_HEADROOM_BOOST 200 1174 #define L2ARC_FEED_SECS 1 /* caching interval secs */ 1175 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 1176 1177 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 1178 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 1179 1180 /* L2ARC Performance Tunables */ 1181 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 1182 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 1183 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 1184 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; 1185 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 1186 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 1187 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 1188 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 1189 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 1190 1191 /* 1192 * L2ARC Internals 1193 */ 1194 struct l2arc_dev { 1195 vdev_t *l2ad_vdev; /* vdev */ 1196 spa_t *l2ad_spa; /* spa */ 1197 uint64_t l2ad_hand; /* next write location */ 1198 uint64_t l2ad_start; /* first addr on device */ 1199 uint64_t l2ad_end; /* last addr on device */ 1200 boolean_t l2ad_first; /* first sweep through */ 1201 boolean_t l2ad_writing; /* currently writing */ 1202 kmutex_t l2ad_mtx; /* lock for buffer list */ 1203 list_t l2ad_buflist; /* buffer list */ 1204 list_node_t l2ad_node; /* device list node */ 1205 zfs_refcount_t l2ad_alloc; /* allocated bytes */ 1206 }; 1207 1208 static list_t L2ARC_dev_list; /* device list */ 1209 static list_t *l2arc_dev_list; /* device list pointer */ 1210 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 1211 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 1212 static list_t L2ARC_free_on_write; /* free after write buf list */ 1213 static list_t *l2arc_free_on_write; /* free after write list ptr */ 1214 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 1215 static uint64_t l2arc_ndev; /* number of devices */ 1216 1217 typedef struct l2arc_read_callback { 1218 arc_buf_hdr_t *l2rcb_hdr; /* read header */ 1219 blkptr_t l2rcb_bp; /* original blkptr */ 1220 zbookmark_phys_t l2rcb_zb; /* original bookmark */ 1221 int l2rcb_flags; /* original flags */ 1222 abd_t *l2rcb_abd; /* temporary buffer */ 1223 } l2arc_read_callback_t; 1224 1225 typedef struct l2arc_write_callback { 1226 l2arc_dev_t *l2wcb_dev; /* device info */ 1227 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 1228 } l2arc_write_callback_t; 1229 1230 typedef struct l2arc_data_free { 1231 /* protected by l2arc_free_on_write_mtx */ 1232 abd_t *l2df_abd; 1233 size_t l2df_size; 1234 arc_buf_contents_t l2df_type; 1235 list_node_t l2df_list_node; 1236 } l2arc_data_free_t; 1237 1238 static kmutex_t l2arc_feed_thr_lock; 1239 static kcondvar_t l2arc_feed_thr_cv; 1240 static uint8_t l2arc_thread_exit; 1241 1242 static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *); 1243 typedef enum arc_fill_flags { 1244 ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */ 1245 ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */ 1246 ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */ 1247 ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */ 1248 ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */ 1249 } arc_fill_flags_t; 1250 1251 static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *); 1252 static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *); 1253 static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *); 1254 static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *); 1255 static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag); 1256 static void arc_hdr_free_pabd(arc_buf_hdr_t *, boolean_t); 1257 static void arc_hdr_alloc_pabd(arc_buf_hdr_t *, boolean_t); 1258 static void arc_access(arc_buf_hdr_t *, kmutex_t *); 1259 static boolean_t arc_is_overflowing(); 1260 static void arc_buf_watch(arc_buf_t *); 1261 1262 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); 1263 static uint32_t arc_bufc_to_flags(arc_buf_contents_t); 1264 static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); 1265 static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); 1266 1267 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); 1268 static void l2arc_read_done(zio_t *); 1269 1270 1271 /* 1272 * We use Cityhash for this. It's fast, and has good hash properties without 1273 * requiring any large static buffers. 1274 */ 1275 static uint64_t 1276 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 1277 { 1278 return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth)); 1279 } 1280 1281 #define HDR_EMPTY(hdr) \ 1282 ((hdr)->b_dva.dva_word[0] == 0 && \ 1283 (hdr)->b_dva.dva_word[1] == 0) 1284 1285 #define HDR_EQUAL(spa, dva, birth, hdr) \ 1286 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 1287 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 1288 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa) 1289 1290 static void 1291 buf_discard_identity(arc_buf_hdr_t *hdr) 1292 { 1293 hdr->b_dva.dva_word[0] = 0; 1294 hdr->b_dva.dva_word[1] = 0; 1295 hdr->b_birth = 0; 1296 } 1297 1298 static arc_buf_hdr_t * 1299 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) 1300 { 1301 const dva_t *dva = BP_IDENTITY(bp); 1302 uint64_t birth = BP_PHYSICAL_BIRTH(bp); 1303 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 1304 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1305 arc_buf_hdr_t *hdr; 1306 1307 mutex_enter(hash_lock); 1308 for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; 1309 hdr = hdr->b_hash_next) { 1310 if (HDR_EQUAL(spa, dva, birth, hdr)) { 1311 *lockp = hash_lock; 1312 return (hdr); 1313 } 1314 } 1315 mutex_exit(hash_lock); 1316 *lockp = NULL; 1317 return (NULL); 1318 } 1319 1320 /* 1321 * Insert an entry into the hash table. If there is already an element 1322 * equal to elem in the hash table, then the already existing element 1323 * will be returned and the new element will not be inserted. 1324 * Otherwise returns NULL. 1325 * If lockp == NULL, the caller is assumed to already hold the hash lock. 1326 */ 1327 static arc_buf_hdr_t * 1328 buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) 1329 { 1330 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1331 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1332 arc_buf_hdr_t *fhdr; 1333 uint32_t i; 1334 1335 ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); 1336 ASSERT(hdr->b_birth != 0); 1337 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1338 1339 if (lockp != NULL) { 1340 *lockp = hash_lock; 1341 mutex_enter(hash_lock); 1342 } else { 1343 ASSERT(MUTEX_HELD(hash_lock)); 1344 } 1345 1346 for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; 1347 fhdr = fhdr->b_hash_next, i++) { 1348 if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) 1349 return (fhdr); 1350 } 1351 1352 hdr->b_hash_next = buf_hash_table.ht_table[idx]; 1353 buf_hash_table.ht_table[idx] = hdr; 1354 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 1355 1356 /* collect some hash table performance data */ 1357 if (i > 0) { 1358 ARCSTAT_BUMP(arcstat_hash_collisions); 1359 if (i == 1) 1360 ARCSTAT_BUMP(arcstat_hash_chains); 1361 1362 ARCSTAT_MAX(arcstat_hash_chain_max, i); 1363 } 1364 1365 ARCSTAT_BUMP(arcstat_hash_elements); 1366 ARCSTAT_MAXSTAT(arcstat_hash_elements); 1367 1368 return (NULL); 1369 } 1370 1371 static void 1372 buf_hash_remove(arc_buf_hdr_t *hdr) 1373 { 1374 arc_buf_hdr_t *fhdr, **hdrp; 1375 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1376 1377 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 1378 ASSERT(HDR_IN_HASH_TABLE(hdr)); 1379 1380 hdrp = &buf_hash_table.ht_table[idx]; 1381 while ((fhdr = *hdrp) != hdr) { 1382 ASSERT3P(fhdr, !=, NULL); 1383 hdrp = &fhdr->b_hash_next; 1384 } 1385 *hdrp = hdr->b_hash_next; 1386 hdr->b_hash_next = NULL; 1387 arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 1388 1389 /* collect some hash table performance data */ 1390 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 1391 1392 if (buf_hash_table.ht_table[idx] && 1393 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 1394 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 1395 } 1396 1397 /* 1398 * Global data structures and functions for the buf kmem cache. 1399 */ 1400 1401 static kmem_cache_t *hdr_full_cache; 1402 static kmem_cache_t *hdr_full_crypt_cache; 1403 static kmem_cache_t *hdr_l2only_cache; 1404 static kmem_cache_t *buf_cache; 1405 1406 static void 1407 buf_fini(void) 1408 { 1409 int i; 1410 1411 kmem_free(buf_hash_table.ht_table, 1412 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 1413 for (i = 0; i < BUF_LOCKS; i++) 1414 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 1415 kmem_cache_destroy(hdr_full_cache); 1416 kmem_cache_destroy(hdr_full_crypt_cache); 1417 kmem_cache_destroy(hdr_l2only_cache); 1418 kmem_cache_destroy(buf_cache); 1419 } 1420 1421 /* 1422 * Constructor callback - called when the cache is empty 1423 * and a new buf is requested. 1424 */ 1425 /* ARGSUSED */ 1426 static int 1427 hdr_full_cons(void *vbuf, void *unused, int kmflag) 1428 { 1429 arc_buf_hdr_t *hdr = vbuf; 1430 1431 bzero(hdr, HDR_FULL_SIZE); 1432 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 1433 cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL); 1434 zfs_refcount_create(&hdr->b_l1hdr.b_refcnt); 1435 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 1436 multilist_link_init(&hdr->b_l1hdr.b_arc_node); 1437 arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1438 1439 return (0); 1440 } 1441 1442 /* ARGSUSED */ 1443 static int 1444 hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag) 1445 { 1446 arc_buf_hdr_t *hdr = vbuf; 1447 1448 (void) hdr_full_cons(vbuf, unused, kmflag); 1449 bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr)); 1450 arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS); 1451 1452 return (0); 1453 } 1454 1455 /* ARGSUSED */ 1456 static int 1457 hdr_l2only_cons(void *vbuf, void *unused, int kmflag) 1458 { 1459 arc_buf_hdr_t *hdr = vbuf; 1460 1461 bzero(hdr, HDR_L2ONLY_SIZE); 1462 arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1463 1464 return (0); 1465 } 1466 1467 /* ARGSUSED */ 1468 static int 1469 buf_cons(void *vbuf, void *unused, int kmflag) 1470 { 1471 arc_buf_t *buf = vbuf; 1472 1473 bzero(buf, sizeof (arc_buf_t)); 1474 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 1475 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1476 1477 return (0); 1478 } 1479 1480 /* 1481 * Destructor callback - called when a cached buf is 1482 * no longer required. 1483 */ 1484 /* ARGSUSED */ 1485 static void 1486 hdr_full_dest(void *vbuf, void *unused) 1487 { 1488 arc_buf_hdr_t *hdr = vbuf; 1489 1490 ASSERT(HDR_EMPTY(hdr)); 1491 cv_destroy(&hdr->b_l1hdr.b_cv); 1492 zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt); 1493 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); 1494 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 1495 arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1496 } 1497 1498 /* ARGSUSED */ 1499 static void 1500 hdr_full_crypt_dest(void *vbuf, void *unused) 1501 { 1502 arc_buf_hdr_t *hdr = vbuf; 1503 1504 hdr_full_dest(hdr, unused); 1505 arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS); 1506 } 1507 1508 /* ARGSUSED */ 1509 static void 1510 hdr_l2only_dest(void *vbuf, void *unused) 1511 { 1512 arc_buf_hdr_t *hdr = vbuf; 1513 1514 ASSERT(HDR_EMPTY(hdr)); 1515 arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1516 } 1517 1518 /* ARGSUSED */ 1519 static void 1520 buf_dest(void *vbuf, void *unused) 1521 { 1522 arc_buf_t *buf = vbuf; 1523 1524 mutex_destroy(&buf->b_evict_lock); 1525 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1526 } 1527 1528 /* 1529 * Reclaim callback -- invoked when memory is low. 1530 */ 1531 /* ARGSUSED */ 1532 static void 1533 hdr_recl(void *unused) 1534 { 1535 dprintf("hdr_recl called\n"); 1536 /* 1537 * umem calls the reclaim func when we destroy the buf cache, 1538 * which is after we do arc_fini(). 1539 */ 1540 if (arc_initialized) 1541 zthr_wakeup(arc_reap_zthr); 1542 } 1543 1544 static void 1545 buf_init(void) 1546 { 1547 uint64_t *ct; 1548 uint64_t hsize = 1ULL << 12; 1549 int i, j; 1550 1551 /* 1552 * The hash table is big enough to fill all of physical memory 1553 * with an average block size of zfs_arc_average_blocksize (default 8K). 1554 * By default, the table will take up 1555 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 1556 */ 1557 while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) 1558 hsize <<= 1; 1559 retry: 1560 buf_hash_table.ht_mask = hsize - 1; 1561 buf_hash_table.ht_table = 1562 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1563 if (buf_hash_table.ht_table == NULL) { 1564 ASSERT(hsize > (1ULL << 8)); 1565 hsize >>= 1; 1566 goto retry; 1567 } 1568 1569 hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE, 1570 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0); 1571 hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt", 1572 HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest, 1573 hdr_recl, NULL, NULL, 0); 1574 hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only", 1575 HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl, 1576 NULL, NULL, 0); 1577 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1578 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1579 1580 for (i = 0; i < 256; i++) 1581 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1582 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1583 1584 for (i = 0; i < BUF_LOCKS; i++) { 1585 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1586 NULL, MUTEX_DEFAULT, NULL); 1587 } 1588 } 1589 1590 /* 1591 * This is the size that the buf occupies in memory. If the buf is compressed, 1592 * it will correspond to the compressed size. You should use this method of 1593 * getting the buf size unless you explicitly need the logical size. 1594 */ 1595 int32_t 1596 arc_buf_size(arc_buf_t *buf) 1597 { 1598 return (ARC_BUF_COMPRESSED(buf) ? 1599 HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr)); 1600 } 1601 1602 int32_t 1603 arc_buf_lsize(arc_buf_t *buf) 1604 { 1605 return (HDR_GET_LSIZE(buf->b_hdr)); 1606 } 1607 1608 /* 1609 * This function will return B_TRUE if the buffer is encrypted in memory. 1610 * This buffer can be decrypted by calling arc_untransform(). 1611 */ 1612 boolean_t 1613 arc_is_encrypted(arc_buf_t *buf) 1614 { 1615 return (ARC_BUF_ENCRYPTED(buf) != 0); 1616 } 1617 1618 /* 1619 * Returns B_TRUE if the buffer represents data that has not had its MAC 1620 * verified yet. 1621 */ 1622 boolean_t 1623 arc_is_unauthenticated(arc_buf_t *buf) 1624 { 1625 return (HDR_NOAUTH(buf->b_hdr) != 0); 1626 } 1627 1628 void 1629 arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt, 1630 uint8_t *iv, uint8_t *mac) 1631 { 1632 arc_buf_hdr_t *hdr = buf->b_hdr; 1633 1634 ASSERT(HDR_PROTECTED(hdr)); 1635 1636 bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN); 1637 bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN); 1638 bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN); 1639 *byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? 1640 /* CONSTCOND */ 1641 ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER; 1642 } 1643 1644 /* 1645 * Indicates how this buffer is compressed in memory. If it is not compressed 1646 * the value will be ZIO_COMPRESS_OFF. It can be made normally readable with 1647 * arc_untransform() as long as it is also unencrypted. 1648 */ 1649 enum zio_compress 1650 arc_get_compression(arc_buf_t *buf) 1651 { 1652 return (ARC_BUF_COMPRESSED(buf) ? 1653 HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF); 1654 } 1655 1656 #define ARC_MINTIME (hz>>4) /* 62 ms */ 1657 1658 /* 1659 * Return the compression algorithm used to store this data in the ARC. If ARC 1660 * compression is enabled or this is an encrypted block, this will be the same 1661 * as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF. 1662 */ 1663 static inline enum zio_compress 1664 arc_hdr_get_compress(arc_buf_hdr_t *hdr) 1665 { 1666 return (HDR_COMPRESSION_ENABLED(hdr) ? 1667 HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF); 1668 } 1669 1670 static inline boolean_t 1671 arc_buf_is_shared(arc_buf_t *buf) 1672 { 1673 boolean_t shared = (buf->b_data != NULL && 1674 buf->b_hdr->b_l1hdr.b_pabd != NULL && 1675 abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) && 1676 buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd)); 1677 IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr)); 1678 IMPLY(shared, ARC_BUF_SHARED(buf)); 1679 IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf)); 1680 1681 /* 1682 * It would be nice to assert arc_can_share() too, but the "hdr isn't 1683 * already being shared" requirement prevents us from doing that. 1684 */ 1685 1686 return (shared); 1687 } 1688 1689 /* 1690 * Free the checksum associated with this header. If there is no checksum, this 1691 * is a no-op. 1692 */ 1693 static inline void 1694 arc_cksum_free(arc_buf_hdr_t *hdr) 1695 { 1696 ASSERT(HDR_HAS_L1HDR(hdr)); 1697 1698 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1699 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { 1700 kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t)); 1701 hdr->b_l1hdr.b_freeze_cksum = NULL; 1702 } 1703 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1704 } 1705 1706 /* 1707 * Return true iff at least one of the bufs on hdr is not compressed. 1708 * Encrypted buffers count as compressed. 1709 */ 1710 static boolean_t 1711 arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr) 1712 { 1713 for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) { 1714 if (!ARC_BUF_COMPRESSED(b)) { 1715 return (B_TRUE); 1716 } 1717 } 1718 return (B_FALSE); 1719 } 1720 1721 /* 1722 * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data 1723 * matches the checksum that is stored in the hdr. If there is no checksum, 1724 * or if the buf is compressed, this is a no-op. 1725 */ 1726 static void 1727 arc_cksum_verify(arc_buf_t *buf) 1728 { 1729 arc_buf_hdr_t *hdr = buf->b_hdr; 1730 zio_cksum_t zc; 1731 1732 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1733 return; 1734 1735 if (ARC_BUF_COMPRESSED(buf)) { 1736 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 1737 arc_hdr_has_uncompressed_buf(hdr)); 1738 return; 1739 } 1740 1741 ASSERT(HDR_HAS_L1HDR(hdr)); 1742 1743 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1744 if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) { 1745 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1746 return; 1747 } 1748 1749 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc); 1750 if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc)) 1751 panic("buffer modified while frozen!"); 1752 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1753 } 1754 1755 /* 1756 * This function makes the assumption that data stored in the L2ARC 1757 * will be transformed exactly as it is in the main pool. Because of 1758 * this we can verify the checksum against the reading process's bp. 1759 */ 1760 static boolean_t 1761 arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio) 1762 { 1763 enum zio_compress compress = BP_GET_COMPRESS(zio->io_bp); 1764 boolean_t valid_cksum; 1765 1766 ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); 1767 VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); 1768 1769 /* 1770 * We rely on the blkptr's checksum to determine if the block 1771 * is valid or not. When compressed arc is enabled, the l2arc 1772 * writes the block to the l2arc just as it appears in the pool. 1773 * This allows us to use the blkptr's checksum to validate the 1774 * data that we just read off of the l2arc without having to store 1775 * a separate checksum in the arc_buf_hdr_t. However, if compressed 1776 * arc is disabled, then the data written to the l2arc is always 1777 * uncompressed and won't match the block as it exists in the main 1778 * pool. When this is the case, we must first compress it if it is 1779 * compressed on the main pool before we can validate the checksum. 1780 */ 1781 if (!HDR_COMPRESSION_ENABLED(hdr) && compress != ZIO_COMPRESS_OFF) { 1782 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 1783 uint64_t lsize = HDR_GET_LSIZE(hdr); 1784 uint64_t csize; 1785 1786 abd_t *cdata = abd_alloc_linear(HDR_GET_PSIZE(hdr), B_TRUE); 1787 csize = zio_compress_data(compress, zio->io_abd, 1788 abd_to_buf(cdata), lsize); 1789 1790 ASSERT3U(csize, <=, HDR_GET_PSIZE(hdr)); 1791 if (csize < HDR_GET_PSIZE(hdr)) { 1792 /* 1793 * Compressed blocks are always a multiple of the 1794 * smallest ashift in the pool. Ideally, we would 1795 * like to round up the csize to the next 1796 * spa_min_ashift but that value may have changed 1797 * since the block was last written. Instead, 1798 * we rely on the fact that the hdr's psize 1799 * was set to the psize of the block when it was 1800 * last written. We set the csize to that value 1801 * and zero out any part that should not contain 1802 * data. 1803 */ 1804 abd_zero_off(cdata, csize, HDR_GET_PSIZE(hdr) - csize); 1805 csize = HDR_GET_PSIZE(hdr); 1806 } 1807 zio_push_transform(zio, cdata, csize, HDR_GET_PSIZE(hdr), NULL); 1808 } 1809 1810 /* 1811 * Block pointers always store the checksum for the logical data. 1812 * If the block pointer has the gang bit set, then the checksum 1813 * it represents is for the reconstituted data and not for an 1814 * individual gang member. The zio pipeline, however, must be able to 1815 * determine the checksum of each of the gang constituents so it 1816 * treats the checksum comparison differently than what we need 1817 * for l2arc blocks. This prevents us from using the 1818 * zio_checksum_error() interface directly. Instead we must call the 1819 * zio_checksum_error_impl() so that we can ensure the checksum is 1820 * generated using the correct checksum algorithm and accounts for the 1821 * logical I/O size and not just a gang fragment. 1822 */ 1823 valid_cksum = (zio_checksum_error_impl(zio->io_spa, zio->io_bp, 1824 BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size, 1825 zio->io_offset, NULL) == 0); 1826 zio_pop_transforms(zio); 1827 return (valid_cksum); 1828 } 1829 1830 /* 1831 * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a 1832 * checksum and attaches it to the buf's hdr so that we can ensure that the buf 1833 * isn't modified later on. If buf is compressed or there is already a checksum 1834 * on the hdr, this is a no-op (we only checksum uncompressed bufs). 1835 */ 1836 static void 1837 arc_cksum_compute(arc_buf_t *buf) 1838 { 1839 arc_buf_hdr_t *hdr = buf->b_hdr; 1840 1841 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1842 return; 1843 1844 ASSERT(HDR_HAS_L1HDR(hdr)); 1845 1846 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1847 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { 1848 ASSERT(arc_hdr_has_uncompressed_buf(hdr)); 1849 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1850 return; 1851 } else if (ARC_BUF_COMPRESSED(buf)) { 1852 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1853 return; 1854 } 1855 1856 ASSERT(!ARC_BUF_ENCRYPTED(buf)); 1857 ASSERT(!ARC_BUF_COMPRESSED(buf)); 1858 hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), 1859 KM_SLEEP); 1860 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, 1861 hdr->b_l1hdr.b_freeze_cksum); 1862 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1863 arc_buf_watch(buf); 1864 } 1865 1866 #ifndef _KERNEL 1867 typedef struct procctl { 1868 long cmd; 1869 prwatch_t prwatch; 1870 } procctl_t; 1871 #endif 1872 1873 /* ARGSUSED */ 1874 static void 1875 arc_buf_unwatch(arc_buf_t *buf) 1876 { 1877 #ifndef _KERNEL 1878 if (arc_watch) { 1879 int result; 1880 procctl_t ctl; 1881 ctl.cmd = PCWATCH; 1882 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1883 ctl.prwatch.pr_size = 0; 1884 ctl.prwatch.pr_wflags = 0; 1885 result = write(arc_procfd, &ctl, sizeof (ctl)); 1886 ASSERT3U(result, ==, sizeof (ctl)); 1887 } 1888 #endif 1889 } 1890 1891 /* ARGSUSED */ 1892 static void 1893 arc_buf_watch(arc_buf_t *buf) 1894 { 1895 #ifndef _KERNEL 1896 if (arc_watch) { 1897 int result; 1898 procctl_t ctl; 1899 ctl.cmd = PCWATCH; 1900 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1901 ctl.prwatch.pr_size = arc_buf_size(buf); 1902 ctl.prwatch.pr_wflags = WA_WRITE; 1903 result = write(arc_procfd, &ctl, sizeof (ctl)); 1904 ASSERT3U(result, ==, sizeof (ctl)); 1905 } 1906 #endif 1907 } 1908 1909 static arc_buf_contents_t 1910 arc_buf_type(arc_buf_hdr_t *hdr) 1911 { 1912 arc_buf_contents_t type; 1913 if (HDR_ISTYPE_METADATA(hdr)) { 1914 type = ARC_BUFC_METADATA; 1915 } else { 1916 type = ARC_BUFC_DATA; 1917 } 1918 VERIFY3U(hdr->b_type, ==, type); 1919 return (type); 1920 } 1921 1922 boolean_t 1923 arc_is_metadata(arc_buf_t *buf) 1924 { 1925 return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0); 1926 } 1927 1928 static uint32_t 1929 arc_bufc_to_flags(arc_buf_contents_t type) 1930 { 1931 switch (type) { 1932 case ARC_BUFC_DATA: 1933 /* metadata field is 0 if buffer contains normal data */ 1934 return (0); 1935 case ARC_BUFC_METADATA: 1936 return (ARC_FLAG_BUFC_METADATA); 1937 default: 1938 break; 1939 } 1940 panic("undefined ARC buffer type!"); 1941 return ((uint32_t)-1); 1942 } 1943 1944 void 1945 arc_buf_thaw(arc_buf_t *buf) 1946 { 1947 arc_buf_hdr_t *hdr = buf->b_hdr; 1948 1949 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 1950 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1951 1952 arc_cksum_verify(buf); 1953 1954 /* 1955 * Compressed buffers do not manipulate the b_freeze_cksum or 1956 * allocate b_thawed. 1957 */ 1958 if (ARC_BUF_COMPRESSED(buf)) { 1959 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 1960 arc_hdr_has_uncompressed_buf(hdr)); 1961 return; 1962 } 1963 1964 ASSERT(HDR_HAS_L1HDR(hdr)); 1965 arc_cksum_free(hdr); 1966 1967 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1968 #ifdef ZFS_DEBUG 1969 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1970 if (hdr->b_l1hdr.b_thawed != NULL) 1971 kmem_free(hdr->b_l1hdr.b_thawed, 1); 1972 hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP); 1973 } 1974 #endif 1975 1976 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1977 1978 arc_buf_unwatch(buf); 1979 } 1980 1981 void 1982 arc_buf_freeze(arc_buf_t *buf) 1983 { 1984 arc_buf_hdr_t *hdr = buf->b_hdr; 1985 kmutex_t *hash_lock; 1986 1987 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1988 return; 1989 1990 if (ARC_BUF_COMPRESSED(buf)) { 1991 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 1992 arc_hdr_has_uncompressed_buf(hdr)); 1993 return; 1994 } 1995 1996 hash_lock = HDR_LOCK(hdr); 1997 mutex_enter(hash_lock); 1998 1999 ASSERT(HDR_HAS_L1HDR(hdr)); 2000 ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL || 2001 hdr->b_l1hdr.b_state == arc_anon); 2002 arc_cksum_compute(buf); 2003 mutex_exit(hash_lock); 2004 } 2005 2006 /* 2007 * The arc_buf_hdr_t's b_flags should never be modified directly. Instead, 2008 * the following functions should be used to ensure that the flags are 2009 * updated in a thread-safe way. When manipulating the flags either 2010 * the hash_lock must be held or the hdr must be undiscoverable. This 2011 * ensures that we're not racing with any other threads when updating 2012 * the flags. 2013 */ 2014 static inline void 2015 arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) 2016 { 2017 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2018 hdr->b_flags |= flags; 2019 } 2020 2021 static inline void 2022 arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) 2023 { 2024 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2025 hdr->b_flags &= ~flags; 2026 } 2027 2028 /* 2029 * Setting the compression bits in the arc_buf_hdr_t's b_flags is 2030 * done in a special way since we have to clear and set bits 2031 * at the same time. Consumers that wish to set the compression bits 2032 * must use this function to ensure that the flags are updated in 2033 * thread-safe manner. 2034 */ 2035 static void 2036 arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp) 2037 { 2038 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2039 2040 /* 2041 * Holes and embedded blocks will always have a psize = 0 so 2042 * we ignore the compression of the blkptr and set the 2043 * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF. 2044 * Holes and embedded blocks remain anonymous so we don't 2045 * want to uncompress them. Mark them as uncompressed. 2046 */ 2047 if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) { 2048 arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC); 2049 ASSERT(!HDR_COMPRESSION_ENABLED(hdr)); 2050 } else { 2051 arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC); 2052 ASSERT(HDR_COMPRESSION_ENABLED(hdr)); 2053 } 2054 2055 HDR_SET_COMPRESS(hdr, cmp); 2056 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp); 2057 } 2058 2059 /* 2060 * Looks for another buf on the same hdr which has the data decompressed, copies 2061 * from it, and returns true. If no such buf exists, returns false. 2062 */ 2063 static boolean_t 2064 arc_buf_try_copy_decompressed_data(arc_buf_t *buf) 2065 { 2066 arc_buf_hdr_t *hdr = buf->b_hdr; 2067 boolean_t copied = B_FALSE; 2068 2069 ASSERT(HDR_HAS_L1HDR(hdr)); 2070 ASSERT3P(buf->b_data, !=, NULL); 2071 ASSERT(!ARC_BUF_COMPRESSED(buf)); 2072 2073 for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL; 2074 from = from->b_next) { 2075 /* can't use our own data buffer */ 2076 if (from == buf) { 2077 continue; 2078 } 2079 2080 if (!ARC_BUF_COMPRESSED(from)) { 2081 bcopy(from->b_data, buf->b_data, arc_buf_size(buf)); 2082 copied = B_TRUE; 2083 break; 2084 } 2085 } 2086 2087 /* 2088 * Note: With encryption support, the following assertion is no longer 2089 * necessarily valid. If we receive two back to back raw snapshots 2090 * (send -w), the second receive can use a hdr with a cksum already 2091 * calculated. This happens via: 2092 * dmu_recv_stream() -> receive_read_record() -> arc_loan_raw_buf() 2093 * The rsend/send_mixed_raw test case exercises this code path. 2094 * 2095 * There were no decompressed bufs, so there should not be a 2096 * checksum on the hdr either. 2097 * EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL); 2098 */ 2099 2100 return (copied); 2101 } 2102 2103 /* 2104 * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t. 2105 */ 2106 static uint64_t 2107 arc_hdr_size(arc_buf_hdr_t *hdr) 2108 { 2109 uint64_t size; 2110 2111 if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF && 2112 HDR_GET_PSIZE(hdr) > 0) { 2113 size = HDR_GET_PSIZE(hdr); 2114 } else { 2115 ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0); 2116 size = HDR_GET_LSIZE(hdr); 2117 } 2118 return (size); 2119 } 2120 2121 static int 2122 arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj) 2123 { 2124 int ret; 2125 uint64_t csize; 2126 uint64_t lsize = HDR_GET_LSIZE(hdr); 2127 uint64_t psize = HDR_GET_PSIZE(hdr); 2128 void *tmpbuf = NULL; 2129 abd_t *abd = hdr->b_l1hdr.b_pabd; 2130 2131 ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr))); 2132 ASSERT(HDR_AUTHENTICATED(hdr)); 2133 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2134 2135 /* 2136 * The MAC is calculated on the compressed data that is stored on disk. 2137 * However, if compressed arc is disabled we will only have the 2138 * decompressed data available to us now. Compress it into a temporary 2139 * abd so we can verify the MAC. The performance overhead of this will 2140 * be relatively low, since most objects in an encrypted objset will 2141 * be encrypted (instead of authenticated) anyway. 2142 */ 2143 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && 2144 !HDR_COMPRESSION_ENABLED(hdr)) { 2145 tmpbuf = zio_buf_alloc(lsize); 2146 abd = abd_get_from_buf(tmpbuf, lsize); 2147 abd_take_ownership_of_buf(abd, B_TRUE); 2148 2149 csize = zio_compress_data(HDR_GET_COMPRESS(hdr), 2150 hdr->b_l1hdr.b_pabd, tmpbuf, lsize); 2151 ASSERT3U(csize, <=, psize); 2152 abd_zero_off(abd, csize, psize - csize); 2153 } 2154 2155 /* 2156 * Authentication is best effort. We authenticate whenever the key is 2157 * available. If we succeed we clear ARC_FLAG_NOAUTH. 2158 */ 2159 if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) { 2160 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 2161 ASSERT3U(lsize, ==, psize); 2162 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd, 2163 psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); 2164 } else { 2165 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize, 2166 hdr->b_crypt_hdr.b_mac); 2167 } 2168 2169 if (ret == 0) 2170 arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH); 2171 else if (ret != ENOENT) 2172 goto error; 2173 2174 if (tmpbuf != NULL) 2175 abd_free(abd); 2176 2177 return (0); 2178 2179 error: 2180 if (tmpbuf != NULL) 2181 abd_free(abd); 2182 2183 return (ret); 2184 } 2185 2186 /* 2187 * This function will take a header that only has raw encrypted data in 2188 * b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in 2189 * b_l1hdr.b_pabd. If designated in the header flags, this function will 2190 * also decompress the data. 2191 */ 2192 static int 2193 arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb) 2194 { 2195 int ret; 2196 abd_t *cabd = NULL; 2197 void *tmp = NULL; 2198 boolean_t no_crypt = B_FALSE; 2199 boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); 2200 2201 ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr))); 2202 ASSERT(HDR_ENCRYPTED(hdr)); 2203 2204 arc_hdr_alloc_pabd(hdr, B_FALSE); 2205 2206 ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot, 2207 B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv, 2208 hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd, 2209 hdr->b_crypt_hdr.b_rabd, &no_crypt); 2210 if (ret != 0) 2211 goto error; 2212 2213 if (no_crypt) { 2214 abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd, 2215 HDR_GET_PSIZE(hdr)); 2216 } 2217 2218 /* 2219 * If this header has disabled arc compression but the b_pabd is 2220 * compressed after decrypting it, we need to decompress the newly 2221 * decrypted data. 2222 */ 2223 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && 2224 !HDR_COMPRESSION_ENABLED(hdr)) { 2225 /* 2226 * We want to make sure that we are correctly honoring the 2227 * zfs_abd_scatter_enabled setting, so we allocate an abd here 2228 * and then loan a buffer from it, rather than allocating a 2229 * linear buffer and wrapping it in an abd later. 2230 */ 2231 cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr); 2232 tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr)); 2233 2234 ret = zio_decompress_data(HDR_GET_COMPRESS(hdr), 2235 hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr), 2236 HDR_GET_LSIZE(hdr)); 2237 if (ret != 0) { 2238 abd_return_buf(cabd, tmp, arc_hdr_size(hdr)); 2239 goto error; 2240 } 2241 2242 abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); 2243 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, 2244 arc_hdr_size(hdr), hdr); 2245 hdr->b_l1hdr.b_pabd = cabd; 2246 } 2247 2248 return (0); 2249 2250 error: 2251 arc_hdr_free_pabd(hdr, B_FALSE); 2252 if (cabd != NULL) 2253 arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr); 2254 2255 return (ret); 2256 } 2257 2258 /* 2259 * This function is called during arc_buf_fill() to prepare the header's 2260 * abd plaintext pointer for use. This involves authenticated protected 2261 * data and decrypting encrypted data into the plaintext abd. 2262 */ 2263 static int 2264 arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa, 2265 const zbookmark_phys_t *zb, boolean_t noauth) 2266 { 2267 int ret; 2268 2269 ASSERT(HDR_PROTECTED(hdr)); 2270 2271 if (hash_lock != NULL) 2272 mutex_enter(hash_lock); 2273 2274 if (HDR_NOAUTH(hdr) && !noauth) { 2275 /* 2276 * The caller requested authenticated data but our data has 2277 * not been authenticated yet. Verify the MAC now if we can. 2278 */ 2279 ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset); 2280 if (ret != 0) 2281 goto error; 2282 } else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) { 2283 /* 2284 * If we only have the encrypted version of the data, but the 2285 * unencrypted version was requested we take this opportunity 2286 * to store the decrypted version in the header for future use. 2287 */ 2288 ret = arc_hdr_decrypt(hdr, spa, zb); 2289 if (ret != 0) 2290 goto error; 2291 } 2292 2293 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2294 2295 if (hash_lock != NULL) 2296 mutex_exit(hash_lock); 2297 2298 return (0); 2299 2300 error: 2301 if (hash_lock != NULL) 2302 mutex_exit(hash_lock); 2303 2304 return (ret); 2305 } 2306 2307 /* 2308 * This function is used by the dbuf code to decrypt bonus buffers in place. 2309 * The dbuf code itself doesn't have any locking for decrypting a shared dnode 2310 * block, so we use the hash lock here to protect against concurrent calls to 2311 * arc_buf_fill(). 2312 */ 2313 /* ARGSUSED */ 2314 static void 2315 arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock) 2316 { 2317 arc_buf_hdr_t *hdr = buf->b_hdr; 2318 2319 ASSERT(HDR_ENCRYPTED(hdr)); 2320 ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); 2321 ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr))); 2322 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2323 2324 zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data, 2325 arc_buf_size(buf)); 2326 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; 2327 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; 2328 hdr->b_crypt_hdr.b_ebufcnt -= 1; 2329 } 2330 2331 /* 2332 * Given a buf that has a data buffer attached to it, this function will 2333 * efficiently fill the buf with data of the specified compression setting from 2334 * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr 2335 * are already sharing a data buf, no copy is performed. 2336 * 2337 * If the buf is marked as compressed but uncompressed data was requested, this 2338 * will allocate a new data buffer for the buf, remove that flag, and fill the 2339 * buf with uncompressed data. You can't request a compressed buf on a hdr with 2340 * uncompressed data, and (since we haven't added support for it yet) if you 2341 * want compressed data your buf must already be marked as compressed and have 2342 * the correct-sized data buffer. 2343 */ 2344 static int 2345 arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb, 2346 arc_fill_flags_t flags) 2347 { 2348 int error = 0; 2349 arc_buf_hdr_t *hdr = buf->b_hdr; 2350 boolean_t hdr_compressed = 2351 (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); 2352 boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0; 2353 boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0; 2354 dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap; 2355 kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr); 2356 2357 ASSERT3P(buf->b_data, !=, NULL); 2358 IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf)); 2359 IMPLY(compressed, ARC_BUF_COMPRESSED(buf)); 2360 IMPLY(encrypted, HDR_ENCRYPTED(hdr)); 2361 IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf)); 2362 IMPLY(encrypted, ARC_BUF_COMPRESSED(buf)); 2363 IMPLY(encrypted, !ARC_BUF_SHARED(buf)); 2364 2365 /* 2366 * If the caller wanted encrypted data we just need to copy it from 2367 * b_rabd and potentially byteswap it. We won't be able to do any 2368 * further transforms on it. 2369 */ 2370 if (encrypted) { 2371 ASSERT(HDR_HAS_RABD(hdr)); 2372 abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd, 2373 HDR_GET_PSIZE(hdr)); 2374 goto byteswap; 2375 } 2376 2377 /* 2378 * Adjust encrypted and authenticated headers to accomodate 2379 * the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are 2380 * allowed to fail decryption due to keys not being loaded 2381 * without being marked as an IO error. 2382 */ 2383 if (HDR_PROTECTED(hdr)) { 2384 error = arc_fill_hdr_crypt(hdr, hash_lock, spa, 2385 zb, !!(flags & ARC_FILL_NOAUTH)); 2386 if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) { 2387 return (error); 2388 } else if (error != 0) { 2389 if (hash_lock != NULL) 2390 mutex_enter(hash_lock); 2391 arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); 2392 if (hash_lock != NULL) 2393 mutex_exit(hash_lock); 2394 return (error); 2395 } 2396 } 2397 2398 /* 2399 * There is a special case here for dnode blocks which are 2400 * decrypting their bonus buffers. These blocks may request to 2401 * be decrypted in-place. This is necessary because there may 2402 * be many dnodes pointing into this buffer and there is 2403 * currently no method to synchronize replacing the backing 2404 * b_data buffer and updating all of the pointers. Here we use 2405 * the hash lock to ensure there are no races. If the need 2406 * arises for other types to be decrypted in-place, they must 2407 * add handling here as well. 2408 */ 2409 if ((flags & ARC_FILL_IN_PLACE) != 0) { 2410 ASSERT(!hdr_compressed); 2411 ASSERT(!compressed); 2412 ASSERT(!encrypted); 2413 2414 if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) { 2415 ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); 2416 2417 if (hash_lock != NULL) 2418 mutex_enter(hash_lock); 2419 arc_buf_untransform_in_place(buf, hash_lock); 2420 if (hash_lock != NULL) 2421 mutex_exit(hash_lock); 2422 2423 /* Compute the hdr's checksum if necessary */ 2424 arc_cksum_compute(buf); 2425 } 2426 2427 return (0); 2428 } 2429 2430 if (hdr_compressed == compressed) { 2431 if (!arc_buf_is_shared(buf)) { 2432 abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd, 2433 arc_buf_size(buf)); 2434 } 2435 } else { 2436 ASSERT(hdr_compressed); 2437 ASSERT(!compressed); 2438 ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr)); 2439 2440 /* 2441 * If the buf is sharing its data with the hdr, unlink it and 2442 * allocate a new data buffer for the buf. 2443 */ 2444 if (arc_buf_is_shared(buf)) { 2445 ASSERT(ARC_BUF_COMPRESSED(buf)); 2446 2447 /* We need to give the buf its own b_data */ 2448 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; 2449 buf->b_data = 2450 arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); 2451 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 2452 2453 /* Previously overhead was 0; just add new overhead */ 2454 ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr)); 2455 } else if (ARC_BUF_COMPRESSED(buf)) { 2456 /* We need to reallocate the buf's b_data */ 2457 arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr), 2458 buf); 2459 buf->b_data = 2460 arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); 2461 2462 /* We increased the size of b_data; update overhead */ 2463 ARCSTAT_INCR(arcstat_overhead_size, 2464 HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr)); 2465 } 2466 2467 /* 2468 * Regardless of the buf's previous compression settings, it 2469 * should not be compressed at the end of this function. 2470 */ 2471 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; 2472 2473 /* 2474 * Try copying the data from another buf which already has a 2475 * decompressed version. If that's not possible, it's time to 2476 * bite the bullet and decompress the data from the hdr. 2477 */ 2478 if (arc_buf_try_copy_decompressed_data(buf)) { 2479 /* Skip byteswapping and checksumming (already done) */ 2480 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, !=, NULL); 2481 return (0); 2482 } else { 2483 error = zio_decompress_data(HDR_GET_COMPRESS(hdr), 2484 hdr->b_l1hdr.b_pabd, buf->b_data, 2485 HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); 2486 2487 /* 2488 * Absent hardware errors or software bugs, this should 2489 * be impossible, but log it anyway so we can debug it. 2490 */ 2491 if (error != 0) { 2492 zfs_dbgmsg( 2493 "hdr %p, compress %d, psize %d, lsize %d", 2494 hdr, arc_hdr_get_compress(hdr), 2495 HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); 2496 if (hash_lock != NULL) 2497 mutex_enter(hash_lock); 2498 arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); 2499 if (hash_lock != NULL) 2500 mutex_exit(hash_lock); 2501 return (SET_ERROR(EIO)); 2502 } 2503 } 2504 } 2505 2506 byteswap: 2507 /* Byteswap the buf's data if necessary */ 2508 if (bswap != DMU_BSWAP_NUMFUNCS) { 2509 ASSERT(!HDR_SHARED_DATA(hdr)); 2510 ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS); 2511 dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr)); 2512 } 2513 2514 /* Compute the hdr's checksum if necessary */ 2515 arc_cksum_compute(buf); 2516 2517 return (0); 2518 } 2519 2520 /* 2521 * If this function is being called to decrypt an encrypted buffer or verify an 2522 * authenticated one, the key must be loaded and a mapping must be made 2523 * available in the keystore via spa_keystore_create_mapping() or one of its 2524 * callers. 2525 */ 2526 int 2527 arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb, 2528 boolean_t in_place) 2529 { 2530 int ret; 2531 arc_fill_flags_t flags = 0; 2532 2533 if (in_place) 2534 flags |= ARC_FILL_IN_PLACE; 2535 2536 ret = arc_buf_fill(buf, spa, zb, flags); 2537 if (ret == ECKSUM) { 2538 /* 2539 * Convert authentication and decryption errors to EIO 2540 * (and generate an ereport) before leaving the ARC. 2541 */ 2542 ret = SET_ERROR(EIO); 2543 spa_log_error(spa, zb); 2544 zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 2545 spa, NULL, zb, NULL, 0, 0); 2546 } 2547 2548 return (ret); 2549 } 2550 2551 /* 2552 * Increment the amount of evictable space in the arc_state_t's refcount. 2553 * We account for the space used by the hdr and the arc buf individually 2554 * so that we can add and remove them from the refcount individually. 2555 */ 2556 static void 2557 arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state) 2558 { 2559 arc_buf_contents_t type = arc_buf_type(hdr); 2560 2561 ASSERT(HDR_HAS_L1HDR(hdr)); 2562 2563 if (GHOST_STATE(state)) { 2564 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2565 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2566 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2567 ASSERT(!HDR_HAS_RABD(hdr)); 2568 (void) zfs_refcount_add_many(&state->arcs_esize[type], 2569 HDR_GET_LSIZE(hdr), hdr); 2570 return; 2571 } 2572 2573 ASSERT(!GHOST_STATE(state)); 2574 if (hdr->b_l1hdr.b_pabd != NULL) { 2575 (void) zfs_refcount_add_many(&state->arcs_esize[type], 2576 arc_hdr_size(hdr), hdr); 2577 } 2578 if (HDR_HAS_RABD(hdr)) { 2579 (void) zfs_refcount_add_many(&state->arcs_esize[type], 2580 HDR_GET_PSIZE(hdr), hdr); 2581 } 2582 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2583 buf = buf->b_next) { 2584 if (arc_buf_is_shared(buf)) 2585 continue; 2586 (void) zfs_refcount_add_many(&state->arcs_esize[type], 2587 arc_buf_size(buf), buf); 2588 } 2589 } 2590 2591 /* 2592 * Decrement the amount of evictable space in the arc_state_t's refcount. 2593 * We account for the space used by the hdr and the arc buf individually 2594 * so that we can add and remove them from the refcount individually. 2595 */ 2596 static void 2597 arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state) 2598 { 2599 arc_buf_contents_t type = arc_buf_type(hdr); 2600 2601 ASSERT(HDR_HAS_L1HDR(hdr)); 2602 2603 if (GHOST_STATE(state)) { 2604 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2605 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2606 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2607 ASSERT(!HDR_HAS_RABD(hdr)); 2608 (void) zfs_refcount_remove_many(&state->arcs_esize[type], 2609 HDR_GET_LSIZE(hdr), hdr); 2610 return; 2611 } 2612 2613 ASSERT(!GHOST_STATE(state)); 2614 if (hdr->b_l1hdr.b_pabd != NULL) { 2615 (void) zfs_refcount_remove_many(&state->arcs_esize[type], 2616 arc_hdr_size(hdr), hdr); 2617 } 2618 if (HDR_HAS_RABD(hdr)) { 2619 (void) zfs_refcount_remove_many(&state->arcs_esize[type], 2620 HDR_GET_PSIZE(hdr), hdr); 2621 } 2622 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2623 buf = buf->b_next) { 2624 if (arc_buf_is_shared(buf)) 2625 continue; 2626 (void) zfs_refcount_remove_many(&state->arcs_esize[type], 2627 arc_buf_size(buf), buf); 2628 } 2629 } 2630 2631 /* 2632 * Add a reference to this hdr indicating that someone is actively 2633 * referencing that memory. When the refcount transitions from 0 to 1, 2634 * we remove it from the respective arc_state_t list to indicate that 2635 * it is not evictable. 2636 */ 2637 static void 2638 add_reference(arc_buf_hdr_t *hdr, void *tag) 2639 { 2640 ASSERT(HDR_HAS_L1HDR(hdr)); 2641 if (!MUTEX_HELD(HDR_LOCK(hdr))) { 2642 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 2643 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2644 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2645 } 2646 2647 arc_state_t *state = hdr->b_l1hdr.b_state; 2648 2649 if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && 2650 (state != arc_anon)) { 2651 /* We don't use the L2-only state list. */ 2652 if (state != arc_l2c_only) { 2653 multilist_remove(state->arcs_list[arc_buf_type(hdr)], 2654 hdr); 2655 arc_evictable_space_decrement(hdr, state); 2656 } 2657 /* remove the prefetch flag if we get a reference */ 2658 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); 2659 } 2660 } 2661 2662 /* 2663 * Remove a reference from this hdr. When the reference transitions from 2664 * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's 2665 * list making it eligible for eviction. 2666 */ 2667 static int 2668 remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) 2669 { 2670 int cnt; 2671 arc_state_t *state = hdr->b_l1hdr.b_state; 2672 2673 ASSERT(HDR_HAS_L1HDR(hdr)); 2674 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 2675 ASSERT(!GHOST_STATE(state)); 2676 2677 /* 2678 * arc_l2c_only counts as a ghost state so we don't need to explicitly 2679 * check to prevent usage of the arc_l2c_only list. 2680 */ 2681 if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) && 2682 (state != arc_anon)) { 2683 multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr); 2684 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); 2685 arc_evictable_space_increment(hdr, state); 2686 } 2687 return (cnt); 2688 } 2689 2690 /* 2691 * Move the supplied buffer to the indicated state. The hash lock 2692 * for the buffer must be held by the caller. 2693 */ 2694 static void 2695 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, 2696 kmutex_t *hash_lock) 2697 { 2698 arc_state_t *old_state; 2699 int64_t refcnt; 2700 uint32_t bufcnt; 2701 boolean_t update_old, update_new; 2702 arc_buf_contents_t buftype = arc_buf_type(hdr); 2703 2704 /* 2705 * We almost always have an L1 hdr here, since we call arc_hdr_realloc() 2706 * in arc_read() when bringing a buffer out of the L2ARC. However, the 2707 * L1 hdr doesn't always exist when we change state to arc_anon before 2708 * destroying a header, in which case reallocating to add the L1 hdr is 2709 * pointless. 2710 */ 2711 if (HDR_HAS_L1HDR(hdr)) { 2712 old_state = hdr->b_l1hdr.b_state; 2713 refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt); 2714 bufcnt = hdr->b_l1hdr.b_bufcnt; 2715 2716 update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL || 2717 HDR_HAS_RABD(hdr)); 2718 } else { 2719 old_state = arc_l2c_only; 2720 refcnt = 0; 2721 bufcnt = 0; 2722 update_old = B_FALSE; 2723 } 2724 update_new = update_old; 2725 2726 ASSERT(MUTEX_HELD(hash_lock)); 2727 ASSERT3P(new_state, !=, old_state); 2728 ASSERT(!GHOST_STATE(new_state) || bufcnt == 0); 2729 ASSERT(old_state != arc_anon || bufcnt <= 1); 2730 2731 /* 2732 * If this buffer is evictable, transfer it from the 2733 * old state list to the new state list. 2734 */ 2735 if (refcnt == 0) { 2736 if (old_state != arc_anon && old_state != arc_l2c_only) { 2737 ASSERT(HDR_HAS_L1HDR(hdr)); 2738 multilist_remove(old_state->arcs_list[buftype], hdr); 2739 2740 if (GHOST_STATE(old_state)) { 2741 ASSERT0(bufcnt); 2742 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2743 update_old = B_TRUE; 2744 } 2745 arc_evictable_space_decrement(hdr, old_state); 2746 } 2747 if (new_state != arc_anon && new_state != arc_l2c_only) { 2748 2749 /* 2750 * An L1 header always exists here, since if we're 2751 * moving to some L1-cached state (i.e. not l2c_only or 2752 * anonymous), we realloc the header to add an L1hdr 2753 * beforehand. 2754 */ 2755 ASSERT(HDR_HAS_L1HDR(hdr)); 2756 multilist_insert(new_state->arcs_list[buftype], hdr); 2757 2758 if (GHOST_STATE(new_state)) { 2759 ASSERT0(bufcnt); 2760 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2761 update_new = B_TRUE; 2762 } 2763 arc_evictable_space_increment(hdr, new_state); 2764 } 2765 } 2766 2767 ASSERT(!HDR_EMPTY(hdr)); 2768 if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) 2769 buf_hash_remove(hdr); 2770 2771 /* adjust state sizes (ignore arc_l2c_only) */ 2772 2773 if (update_new && new_state != arc_l2c_only) { 2774 ASSERT(HDR_HAS_L1HDR(hdr)); 2775 if (GHOST_STATE(new_state)) { 2776 ASSERT0(bufcnt); 2777 2778 /* 2779 * When moving a header to a ghost state, we first 2780 * remove all arc buffers. Thus, we'll have a 2781 * bufcnt of zero, and no arc buffer to use for 2782 * the reference. As a result, we use the arc 2783 * header pointer for the reference. 2784 */ 2785 (void) zfs_refcount_add_many(&new_state->arcs_size, 2786 HDR_GET_LSIZE(hdr), hdr); 2787 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2788 ASSERT(!HDR_HAS_RABD(hdr)); 2789 } else { 2790 uint32_t buffers = 0; 2791 2792 /* 2793 * Each individual buffer holds a unique reference, 2794 * thus we must remove each of these references one 2795 * at a time. 2796 */ 2797 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2798 buf = buf->b_next) { 2799 ASSERT3U(bufcnt, !=, 0); 2800 buffers++; 2801 2802 /* 2803 * When the arc_buf_t is sharing the data 2804 * block with the hdr, the owner of the 2805 * reference belongs to the hdr. Only 2806 * add to the refcount if the arc_buf_t is 2807 * not shared. 2808 */ 2809 if (arc_buf_is_shared(buf)) 2810 continue; 2811 2812 (void) zfs_refcount_add_many( 2813 &new_state->arcs_size, 2814 arc_buf_size(buf), buf); 2815 } 2816 ASSERT3U(bufcnt, ==, buffers); 2817 2818 if (hdr->b_l1hdr.b_pabd != NULL) { 2819 (void) zfs_refcount_add_many( 2820 &new_state->arcs_size, 2821 arc_hdr_size(hdr), hdr); 2822 } 2823 2824 if (HDR_HAS_RABD(hdr)) { 2825 (void) zfs_refcount_add_many( 2826 &new_state->arcs_size, 2827 HDR_GET_PSIZE(hdr), hdr); 2828 } 2829 } 2830 } 2831 2832 if (update_old && old_state != arc_l2c_only) { 2833 ASSERT(HDR_HAS_L1HDR(hdr)); 2834 if (GHOST_STATE(old_state)) { 2835 ASSERT0(bufcnt); 2836 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2837 ASSERT(!HDR_HAS_RABD(hdr)); 2838 2839 /* 2840 * When moving a header off of a ghost state, 2841 * the header will not contain any arc buffers. 2842 * We use the arc header pointer for the reference 2843 * which is exactly what we did when we put the 2844 * header on the ghost state. 2845 */ 2846 2847 (void) zfs_refcount_remove_many(&old_state->arcs_size, 2848 HDR_GET_LSIZE(hdr), hdr); 2849 } else { 2850 uint32_t buffers = 0; 2851 2852 /* 2853 * Each individual buffer holds a unique reference, 2854 * thus we must remove each of these references one 2855 * at a time. 2856 */ 2857 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2858 buf = buf->b_next) { 2859 ASSERT3U(bufcnt, !=, 0); 2860 buffers++; 2861 2862 /* 2863 * When the arc_buf_t is sharing the data 2864 * block with the hdr, the owner of the 2865 * reference belongs to the hdr. Only 2866 * add to the refcount if the arc_buf_t is 2867 * not shared. 2868 */ 2869 if (arc_buf_is_shared(buf)) 2870 continue; 2871 2872 (void) zfs_refcount_remove_many( 2873 &old_state->arcs_size, arc_buf_size(buf), 2874 buf); 2875 } 2876 ASSERT3U(bufcnt, ==, buffers); 2877 ASSERT(hdr->b_l1hdr.b_pabd != NULL || 2878 HDR_HAS_RABD(hdr)); 2879 2880 if (hdr->b_l1hdr.b_pabd != NULL) { 2881 (void) zfs_refcount_remove_many( 2882 &old_state->arcs_size, arc_hdr_size(hdr), 2883 hdr); 2884 } 2885 2886 if (HDR_HAS_RABD(hdr)) { 2887 (void) zfs_refcount_remove_many( 2888 &old_state->arcs_size, HDR_GET_PSIZE(hdr), 2889 hdr); 2890 } 2891 } 2892 } 2893 2894 if (HDR_HAS_L1HDR(hdr)) 2895 hdr->b_l1hdr.b_state = new_state; 2896 2897 /* 2898 * L2 headers should never be on the L2 state list since they don't 2899 * have L1 headers allocated. 2900 */ 2901 ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DATA]) && 2902 multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_METADATA])); 2903 } 2904 2905 void 2906 arc_space_consume(uint64_t space, arc_space_type_t type) 2907 { 2908 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 2909 2910 switch (type) { 2911 case ARC_SPACE_DATA: 2912 aggsum_add(&astat_data_size, space); 2913 break; 2914 case ARC_SPACE_META: 2915 aggsum_add(&astat_metadata_size, space); 2916 break; 2917 case ARC_SPACE_OTHER: 2918 aggsum_add(&astat_other_size, space); 2919 break; 2920 case ARC_SPACE_HDRS: 2921 aggsum_add(&astat_hdr_size, space); 2922 break; 2923 case ARC_SPACE_L2HDRS: 2924 aggsum_add(&astat_l2_hdr_size, space); 2925 break; 2926 } 2927 2928 if (type != ARC_SPACE_DATA) 2929 aggsum_add(&arc_meta_used, space); 2930 2931 aggsum_add(&arc_size, space); 2932 } 2933 2934 void 2935 arc_space_return(uint64_t space, arc_space_type_t type) 2936 { 2937 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 2938 2939 switch (type) { 2940 case ARC_SPACE_DATA: 2941 aggsum_add(&astat_data_size, -space); 2942 break; 2943 case ARC_SPACE_META: 2944 aggsum_add(&astat_metadata_size, -space); 2945 break; 2946 case ARC_SPACE_OTHER: 2947 aggsum_add(&astat_other_size, -space); 2948 break; 2949 case ARC_SPACE_HDRS: 2950 aggsum_add(&astat_hdr_size, -space); 2951 break; 2952 case ARC_SPACE_L2HDRS: 2953 aggsum_add(&astat_l2_hdr_size, -space); 2954 break; 2955 } 2956 2957 if (type != ARC_SPACE_DATA) { 2958 ASSERT(aggsum_compare(&arc_meta_used, space) >= 0); 2959 /* 2960 * We use the upper bound here rather than the precise value 2961 * because the arc_meta_max value doesn't need to be 2962 * precise. It's only consumed by humans via arcstats. 2963 */ 2964 if (arc_meta_max < aggsum_upper_bound(&arc_meta_used)) 2965 arc_meta_max = aggsum_upper_bound(&arc_meta_used); 2966 aggsum_add(&arc_meta_used, -space); 2967 } 2968 2969 ASSERT(aggsum_compare(&arc_size, space) >= 0); 2970 aggsum_add(&arc_size, -space); 2971 } 2972 2973 /* 2974 * Given a hdr and a buf, returns whether that buf can share its b_data buffer 2975 * with the hdr's b_pabd. 2976 */ 2977 static boolean_t 2978 arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf) 2979 { 2980 /* 2981 * The criteria for sharing a hdr's data are: 2982 * 1. the buffer is not encrypted 2983 * 2. the hdr's compression matches the buf's compression 2984 * 3. the hdr doesn't need to be byteswapped 2985 * 4. the hdr isn't already being shared 2986 * 5. the buf is either compressed or it is the last buf in the hdr list 2987 * 2988 * Criterion #5 maintains the invariant that shared uncompressed 2989 * bufs must be the final buf in the hdr's b_buf list. Reading this, you 2990 * might ask, "if a compressed buf is allocated first, won't that be the 2991 * last thing in the list?", but in that case it's impossible to create 2992 * a shared uncompressed buf anyway (because the hdr must be compressed 2993 * to have the compressed buf). You might also think that #3 is 2994 * sufficient to make this guarantee, however it's possible 2995 * (specifically in the rare L2ARC write race mentioned in 2996 * arc_buf_alloc_impl()) there will be an existing uncompressed buf that 2997 * is sharable, but wasn't at the time of its allocation. Rather than 2998 * allow a new shared uncompressed buf to be created and then shuffle 2999 * the list around to make it the last element, this simply disallows 3000 * sharing if the new buf isn't the first to be added. 3001 */ 3002 ASSERT3P(buf->b_hdr, ==, hdr); 3003 boolean_t hdr_compressed = arc_hdr_get_compress(hdr) != 3004 ZIO_COMPRESS_OFF; 3005 boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0; 3006 return (!ARC_BUF_ENCRYPTED(buf) && 3007 buf_compressed == hdr_compressed && 3008 hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS && 3009 !HDR_SHARED_DATA(hdr) && 3010 (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf))); 3011 } 3012 3013 /* 3014 * Allocate a buf for this hdr. If you care about the data that's in the hdr, 3015 * or if you want a compressed buffer, pass those flags in. Returns 0 if the 3016 * copy was made successfully, or an error code otherwise. 3017 */ 3018 static int 3019 arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb, 3020 void *tag, boolean_t encrypted, boolean_t compressed, boolean_t noauth, 3021 boolean_t fill, arc_buf_t **ret) 3022 { 3023 arc_buf_t *buf; 3024 arc_fill_flags_t flags = ARC_FILL_LOCKED; 3025 3026 ASSERT(HDR_HAS_L1HDR(hdr)); 3027 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); 3028 VERIFY(hdr->b_type == ARC_BUFC_DATA || 3029 hdr->b_type == ARC_BUFC_METADATA); 3030 ASSERT3P(ret, !=, NULL); 3031 ASSERT3P(*ret, ==, NULL); 3032 IMPLY(encrypted, compressed); 3033 3034 buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 3035 buf->b_hdr = hdr; 3036 buf->b_data = NULL; 3037 buf->b_next = hdr->b_l1hdr.b_buf; 3038 buf->b_flags = 0; 3039 3040 add_reference(hdr, tag); 3041 3042 /* 3043 * We're about to change the hdr's b_flags. We must either 3044 * hold the hash_lock or be undiscoverable. 3045 */ 3046 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3047 3048 /* 3049 * Only honor requests for compressed bufs if the hdr is actually 3050 * compressed. This must be overriden if the buffer is encrypted since 3051 * encrypted buffers cannot be decompressed. 3052 */ 3053 if (encrypted) { 3054 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; 3055 buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED; 3056 flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED; 3057 } else if (compressed && 3058 arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) { 3059 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; 3060 flags |= ARC_FILL_COMPRESSED; 3061 } 3062 3063 if (noauth) { 3064 ASSERT0(encrypted); 3065 flags |= ARC_FILL_NOAUTH; 3066 } 3067 3068 /* 3069 * If the hdr's data can be shared then we share the data buffer and 3070 * set the appropriate bit in the hdr's b_flags to indicate the hdr is 3071 * allocate a new buffer to store the buf's data. 3072 * 3073 * There are two additional restrictions here because we're sharing 3074 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be 3075 * actively involved in an L2ARC write, because if this buf is used by 3076 * an arc_write() then the hdr's data buffer will be released when the 3077 * write completes, even though the L2ARC write might still be using it. 3078 * Second, the hdr's ABD must be linear so that the buf's user doesn't 3079 * need to be ABD-aware. 3080 */ 3081 boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) && 3082 hdr->b_l1hdr.b_pabd != NULL && abd_is_linear(hdr->b_l1hdr.b_pabd); 3083 3084 /* Set up b_data and sharing */ 3085 if (can_share) { 3086 buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd); 3087 buf->b_flags |= ARC_BUF_FLAG_SHARED; 3088 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); 3089 } else { 3090 buf->b_data = 3091 arc_get_data_buf(hdr, arc_buf_size(buf), buf); 3092 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); 3093 } 3094 VERIFY3P(buf->b_data, !=, NULL); 3095 3096 hdr->b_l1hdr.b_buf = buf; 3097 hdr->b_l1hdr.b_bufcnt += 1; 3098 if (encrypted) 3099 hdr->b_crypt_hdr.b_ebufcnt += 1; 3100 3101 /* 3102 * If the user wants the data from the hdr, we need to either copy or 3103 * decompress the data. 3104 */ 3105 if (fill) { 3106 ASSERT3P(zb, !=, NULL); 3107 return (arc_buf_fill(buf, spa, zb, flags)); 3108 } 3109 3110 return (0); 3111 } 3112 3113 static char *arc_onloan_tag = "onloan"; 3114 3115 static inline void 3116 arc_loaned_bytes_update(int64_t delta) 3117 { 3118 atomic_add_64(&arc_loaned_bytes, delta); 3119 3120 /* assert that it did not wrap around */ 3121 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); 3122 } 3123 3124 /* 3125 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 3126 * flight data by arc_tempreserve_space() until they are "returned". Loaned 3127 * buffers must be returned to the arc before they can be used by the DMU or 3128 * freed. 3129 */ 3130 arc_buf_t * 3131 arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size) 3132 { 3133 arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag, 3134 is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size); 3135 3136 arc_loaned_bytes_update(arc_buf_size(buf)); 3137 3138 return (buf); 3139 } 3140 3141 arc_buf_t * 3142 arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize, 3143 enum zio_compress compression_type) 3144 { 3145 arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag, 3146 psize, lsize, compression_type); 3147 3148 arc_loaned_bytes_update(arc_buf_size(buf)); 3149 3150 return (buf); 3151 } 3152 3153 arc_buf_t * 3154 arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder, 3155 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, 3156 dmu_object_type_t ot, uint64_t psize, uint64_t lsize, 3157 enum zio_compress compression_type) 3158 { 3159 arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj, 3160 byteorder, salt, iv, mac, ot, psize, lsize, compression_type); 3161 3162 atomic_add_64(&arc_loaned_bytes, psize); 3163 return (buf); 3164 } 3165 3166 3167 /* 3168 * Return a loaned arc buffer to the arc. 3169 */ 3170 void 3171 arc_return_buf(arc_buf_t *buf, void *tag) 3172 { 3173 arc_buf_hdr_t *hdr = buf->b_hdr; 3174 3175 ASSERT3P(buf->b_data, !=, NULL); 3176 ASSERT(HDR_HAS_L1HDR(hdr)); 3177 (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag); 3178 (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 3179 3180 arc_loaned_bytes_update(-arc_buf_size(buf)); 3181 } 3182 3183 /* Detach an arc_buf from a dbuf (tag) */ 3184 void 3185 arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 3186 { 3187 arc_buf_hdr_t *hdr = buf->b_hdr; 3188 3189 ASSERT3P(buf->b_data, !=, NULL); 3190 ASSERT(HDR_HAS_L1HDR(hdr)); 3191 (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 3192 (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); 3193 3194 arc_loaned_bytes_update(arc_buf_size(buf)); 3195 } 3196 3197 static void 3198 l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type) 3199 { 3200 l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP); 3201 3202 df->l2df_abd = abd; 3203 df->l2df_size = size; 3204 df->l2df_type = type; 3205 mutex_enter(&l2arc_free_on_write_mtx); 3206 list_insert_head(l2arc_free_on_write, df); 3207 mutex_exit(&l2arc_free_on_write_mtx); 3208 } 3209 3210 static void 3211 arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata) 3212 { 3213 arc_state_t *state = hdr->b_l1hdr.b_state; 3214 arc_buf_contents_t type = arc_buf_type(hdr); 3215 uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr); 3216 3217 /* protected by hash lock, if in the hash table */ 3218 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 3219 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3220 ASSERT(state != arc_anon && state != arc_l2c_only); 3221 3222 (void) zfs_refcount_remove_many(&state->arcs_esize[type], 3223 size, hdr); 3224 } 3225 (void) zfs_refcount_remove_many(&state->arcs_size, size, hdr); 3226 if (type == ARC_BUFC_METADATA) { 3227 arc_space_return(size, ARC_SPACE_META); 3228 } else { 3229 ASSERT(type == ARC_BUFC_DATA); 3230 arc_space_return(size, ARC_SPACE_DATA); 3231 } 3232 3233 if (free_rdata) { 3234 l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type); 3235 } else { 3236 l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type); 3237 } 3238 } 3239 3240 /* 3241 * Share the arc_buf_t's data with the hdr. Whenever we are sharing the 3242 * data buffer, we transfer the refcount ownership to the hdr and update 3243 * the appropriate kstats. 3244 */ 3245 static void 3246 arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) 3247 { 3248 /* LINTED */ 3249 arc_state_t *state = hdr->b_l1hdr.b_state; 3250 3251 ASSERT(arc_can_share(hdr, buf)); 3252 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 3253 ASSERT(!ARC_BUF_ENCRYPTED(buf)); 3254 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3255 3256 /* 3257 * Start sharing the data buffer. We transfer the 3258 * refcount ownership to the hdr since it always owns 3259 * the refcount whenever an arc_buf_t is shared. 3260 */ 3261 zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size, 3262 arc_hdr_size(hdr), buf, hdr); 3263 hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf)); 3264 abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd, 3265 HDR_ISTYPE_METADATA(hdr)); 3266 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); 3267 buf->b_flags |= ARC_BUF_FLAG_SHARED; 3268 3269 /* 3270 * Since we've transferred ownership to the hdr we need 3271 * to increment its compressed and uncompressed kstats and 3272 * decrement the overhead size. 3273 */ 3274 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); 3275 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); 3276 ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf)); 3277 } 3278 3279 static void 3280 arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) 3281 { 3282 /* LINTED */ 3283 arc_state_t *state = hdr->b_l1hdr.b_state; 3284 3285 ASSERT(arc_buf_is_shared(buf)); 3286 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 3287 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3288 3289 /* 3290 * We are no longer sharing this buffer so we need 3291 * to transfer its ownership to the rightful owner. 3292 */ 3293 zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size, 3294 arc_hdr_size(hdr), hdr, buf); 3295 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 3296 abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd); 3297 abd_put(hdr->b_l1hdr.b_pabd); 3298 hdr->b_l1hdr.b_pabd = NULL; 3299 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; 3300 3301 /* 3302 * Since the buffer is no longer shared between 3303 * the arc buf and the hdr, count it as overhead. 3304 */ 3305 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); 3306 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); 3307 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); 3308 } 3309 3310 /* 3311 * Remove an arc_buf_t from the hdr's buf list and return the last 3312 * arc_buf_t on the list. If no buffers remain on the list then return 3313 * NULL. 3314 */ 3315 static arc_buf_t * 3316 arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf) 3317 { 3318 arc_buf_t **bufp = &hdr->b_l1hdr.b_buf; 3319 arc_buf_t *lastbuf = NULL; 3320 3321 ASSERT(HDR_HAS_L1HDR(hdr)); 3322 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3323 3324 /* 3325 * Remove the buf from the hdr list and locate the last 3326 * remaining buffer on the list. 3327 */ 3328 while (*bufp != NULL) { 3329 if (*bufp == buf) 3330 *bufp = buf->b_next; 3331 3332 /* 3333 * If we've removed a buffer in the middle of 3334 * the list then update the lastbuf and update 3335 * bufp. 3336 */ 3337 if (*bufp != NULL) { 3338 lastbuf = *bufp; 3339 bufp = &(*bufp)->b_next; 3340 } 3341 } 3342 buf->b_next = NULL; 3343 ASSERT3P(lastbuf, !=, buf); 3344 IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL); 3345 IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL); 3346 IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf)); 3347 3348 return (lastbuf); 3349 } 3350 3351 /* 3352 * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's 3353 * list and free it. 3354 */ 3355 static void 3356 arc_buf_destroy_impl(arc_buf_t *buf) 3357 { 3358 arc_buf_hdr_t *hdr = buf->b_hdr; 3359 3360 /* 3361 * Free up the data associated with the buf but only if we're not 3362 * sharing this with the hdr. If we are sharing it with the hdr, the 3363 * hdr is responsible for doing the free. 3364 */ 3365 if (buf->b_data != NULL) { 3366 /* 3367 * We're about to change the hdr's b_flags. We must either 3368 * hold the hash_lock or be undiscoverable. 3369 */ 3370 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3371 3372 arc_cksum_verify(buf); 3373 arc_buf_unwatch(buf); 3374 3375 if (arc_buf_is_shared(buf)) { 3376 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 3377 } else { 3378 uint64_t size = arc_buf_size(buf); 3379 arc_free_data_buf(hdr, buf->b_data, size, buf); 3380 ARCSTAT_INCR(arcstat_overhead_size, -size); 3381 } 3382 buf->b_data = NULL; 3383 3384 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 3385 hdr->b_l1hdr.b_bufcnt -= 1; 3386 3387 if (ARC_BUF_ENCRYPTED(buf)) { 3388 hdr->b_crypt_hdr.b_ebufcnt -= 1; 3389 3390 /* 3391 * If we have no more encrypted buffers and we've 3392 * already gotten a copy of the decrypted data we can 3393 * free b_rabd to save some space. 3394 */ 3395 if (hdr->b_crypt_hdr.b_ebufcnt == 0 && 3396 HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd != NULL && 3397 !HDR_IO_IN_PROGRESS(hdr)) { 3398 arc_hdr_free_pabd(hdr, B_TRUE); 3399 } 3400 } 3401 } 3402 3403 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); 3404 3405 if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) { 3406 /* 3407 * If the current arc_buf_t is sharing its data buffer with the 3408 * hdr, then reassign the hdr's b_pabd to share it with the new 3409 * buffer at the end of the list. The shared buffer is always 3410 * the last one on the hdr's buffer list. 3411 * 3412 * There is an equivalent case for compressed bufs, but since 3413 * they aren't guaranteed to be the last buf in the list and 3414 * that is an exceedingly rare case, we just allow that space be 3415 * wasted temporarily. We must also be careful not to share 3416 * encrypted buffers, since they cannot be shared. 3417 */ 3418 if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) { 3419 /* Only one buf can be shared at once */ 3420 VERIFY(!arc_buf_is_shared(lastbuf)); 3421 /* hdr is uncompressed so can't have compressed buf */ 3422 VERIFY(!ARC_BUF_COMPRESSED(lastbuf)); 3423 3424 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 3425 arc_hdr_free_pabd(hdr, B_FALSE); 3426 3427 /* 3428 * We must setup a new shared block between the 3429 * last buffer and the hdr. The data would have 3430 * been allocated by the arc buf so we need to transfer 3431 * ownership to the hdr since it's now being shared. 3432 */ 3433 arc_share_buf(hdr, lastbuf); 3434 } 3435 } else if (HDR_SHARED_DATA(hdr)) { 3436 /* 3437 * Uncompressed shared buffers are always at the end 3438 * of the list. Compressed buffers don't have the 3439 * same requirements. This makes it hard to 3440 * simply assert that the lastbuf is shared so 3441 * we rely on the hdr's compression flags to determine 3442 * if we have a compressed, shared buffer. 3443 */ 3444 ASSERT3P(lastbuf, !=, NULL); 3445 ASSERT(arc_buf_is_shared(lastbuf) || 3446 arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); 3447 } 3448 3449 /* 3450 * Free the checksum if we're removing the last uncompressed buf from 3451 * this hdr. 3452 */ 3453 if (!arc_hdr_has_uncompressed_buf(hdr)) { 3454 arc_cksum_free(hdr); 3455 } 3456 3457 /* clean up the buf */ 3458 buf->b_hdr = NULL; 3459 kmem_cache_free(buf_cache, buf); 3460 } 3461 3462 static void 3463 arc_hdr_alloc_pabd(arc_buf_hdr_t *hdr, boolean_t alloc_rdata) 3464 { 3465 uint64_t size; 3466 3467 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); 3468 ASSERT(HDR_HAS_L1HDR(hdr)); 3469 ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata); 3470 IMPLY(alloc_rdata, HDR_PROTECTED(hdr)); 3471 3472 if (alloc_rdata) { 3473 size = HDR_GET_PSIZE(hdr); 3474 ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL); 3475 hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr); 3476 ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL); 3477 } else { 3478 size = arc_hdr_size(hdr); 3479 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 3480 hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr); 3481 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 3482 } 3483 3484 ARCSTAT_INCR(arcstat_compressed_size, size); 3485 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); 3486 } 3487 3488 static void 3489 arc_hdr_free_pabd(arc_buf_hdr_t *hdr, boolean_t free_rdata) 3490 { 3491 uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr); 3492 3493 ASSERT(HDR_HAS_L1HDR(hdr)); 3494 ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); 3495 IMPLY(free_rdata, HDR_HAS_RABD(hdr)); 3496 3497 3498 /* 3499 * If the hdr is currently being written to the l2arc then 3500 * we defer freeing the data by adding it to the l2arc_free_on_write 3501 * list. The l2arc will free the data once it's finished 3502 * writing it to the l2arc device. 3503 */ 3504 if (HDR_L2_WRITING(hdr)) { 3505 arc_hdr_free_on_write(hdr, free_rdata); 3506 ARCSTAT_BUMP(arcstat_l2_free_on_write); 3507 } else if (free_rdata) { 3508 arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr); 3509 } else { 3510 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, 3511 size, hdr); 3512 } 3513 3514 if (free_rdata) { 3515 hdr->b_crypt_hdr.b_rabd = NULL; 3516 } else { 3517 hdr->b_l1hdr.b_pabd = NULL; 3518 } 3519 3520 if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr)) 3521 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 3522 3523 ARCSTAT_INCR(arcstat_compressed_size, -size); 3524 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); 3525 } 3526 3527 static arc_buf_hdr_t * 3528 arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize, 3529 boolean_t protected, enum zio_compress compression_type, 3530 arc_buf_contents_t type, boolean_t alloc_rdata) 3531 { 3532 arc_buf_hdr_t *hdr; 3533 3534 VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA); 3535 if (protected) { 3536 hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE); 3537 } else { 3538 hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); 3539 } 3540 ASSERT(HDR_EMPTY(hdr)); 3541 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 3542 ASSERT3P(hdr->b_l1hdr.b_thawed, ==, NULL); 3543 HDR_SET_PSIZE(hdr, psize); 3544 HDR_SET_LSIZE(hdr, lsize); 3545 hdr->b_spa = spa; 3546 hdr->b_type = type; 3547 hdr->b_flags = 0; 3548 arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR); 3549 arc_hdr_set_compress(hdr, compression_type); 3550 if (protected) 3551 arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED); 3552 3553 hdr->b_l1hdr.b_state = arc_anon; 3554 hdr->b_l1hdr.b_arc_access = 0; 3555 hdr->b_l1hdr.b_bufcnt = 0; 3556 hdr->b_l1hdr.b_buf = NULL; 3557 3558 /* 3559 * Allocate the hdr's buffer. This will contain either 3560 * the compressed or uncompressed data depending on the block 3561 * it references and compressed arc enablement. 3562 */ 3563 arc_hdr_alloc_pabd(hdr, alloc_rdata); 3564 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3565 3566 return (hdr); 3567 } 3568 3569 /* 3570 * Transition between the two allocation states for the arc_buf_hdr struct. 3571 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without 3572 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller 3573 * version is used when a cache buffer is only in the L2ARC in order to reduce 3574 * memory usage. 3575 */ 3576 static arc_buf_hdr_t * 3577 arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) 3578 { 3579 ASSERT(HDR_HAS_L2HDR(hdr)); 3580 3581 arc_buf_hdr_t *nhdr; 3582 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 3583 3584 ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || 3585 (old == hdr_l2only_cache && new == hdr_full_cache)); 3586 3587 /* 3588 * if the caller wanted a new full header and the header is to be 3589 * encrypted we will actually allocate the header from the full crypt 3590 * cache instead. The same applies to freeing from the old cache. 3591 */ 3592 if (HDR_PROTECTED(hdr) && new == hdr_full_cache) 3593 new = hdr_full_crypt_cache; 3594 if (HDR_PROTECTED(hdr) && old == hdr_full_cache) 3595 old = hdr_full_crypt_cache; 3596 3597 nhdr = kmem_cache_alloc(new, KM_PUSHPAGE); 3598 3599 ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); 3600 buf_hash_remove(hdr); 3601 3602 bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); 3603 3604 if (new == hdr_full_cache || new == hdr_full_crypt_cache) { 3605 arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR); 3606 /* 3607 * arc_access and arc_change_state need to be aware that a 3608 * header has just come out of L2ARC, so we set its state to 3609 * l2c_only even though it's about to change. 3610 */ 3611 nhdr->b_l1hdr.b_state = arc_l2c_only; 3612 3613 /* Verify previous threads set to NULL before freeing */ 3614 ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL); 3615 ASSERT(!HDR_HAS_RABD(hdr)); 3616 } else { 3617 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 3618 ASSERT0(hdr->b_l1hdr.b_bufcnt); 3619 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 3620 3621 /* 3622 * If we've reached here, We must have been called from 3623 * arc_evict_hdr(), as such we should have already been 3624 * removed from any ghost list we were previously on 3625 * (which protects us from racing with arc_evict_state), 3626 * thus no locking is needed during this check. 3627 */ 3628 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 3629 3630 /* 3631 * A buffer must not be moved into the arc_l2c_only 3632 * state if it's not finished being written out to the 3633 * l2arc device. Otherwise, the b_l1hdr.b_pabd field 3634 * might try to be accessed, even though it was removed. 3635 */ 3636 VERIFY(!HDR_L2_WRITING(hdr)); 3637 VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL); 3638 ASSERT(!HDR_HAS_RABD(hdr)); 3639 3640 #ifdef ZFS_DEBUG 3641 if (hdr->b_l1hdr.b_thawed != NULL) { 3642 kmem_free(hdr->b_l1hdr.b_thawed, 1); 3643 hdr->b_l1hdr.b_thawed = NULL; 3644 } 3645 #endif 3646 3647 arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR); 3648 } 3649 /* 3650 * The header has been reallocated so we need to re-insert it into any 3651 * lists it was on. 3652 */ 3653 (void) buf_hash_insert(nhdr, NULL); 3654 3655 ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); 3656 3657 mutex_enter(&dev->l2ad_mtx); 3658 3659 /* 3660 * We must place the realloc'ed header back into the list at 3661 * the same spot. Otherwise, if it's placed earlier in the list, 3662 * l2arc_write_buffers() could find it during the function's 3663 * write phase, and try to write it out to the l2arc. 3664 */ 3665 list_insert_after(&dev->l2ad_buflist, hdr, nhdr); 3666 list_remove(&dev->l2ad_buflist, hdr); 3667 3668 mutex_exit(&dev->l2ad_mtx); 3669 3670 /* 3671 * Since we're using the pointer address as the tag when 3672 * incrementing and decrementing the l2ad_alloc refcount, we 3673 * must remove the old pointer (that we're about to destroy) and 3674 * add the new pointer to the refcount. Otherwise we'd remove 3675 * the wrong pointer address when calling arc_hdr_destroy() later. 3676 */ 3677 3678 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), 3679 hdr); 3680 (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), 3681 nhdr); 3682 3683 buf_discard_identity(hdr); 3684 kmem_cache_free(old, hdr); 3685 3686 return (nhdr); 3687 } 3688 3689 /* 3690 * This function allows an L1 header to be reallocated as a crypt 3691 * header and vice versa. If we are going to a crypt header, the 3692 * new fields will be zeroed out. 3693 */ 3694 static arc_buf_hdr_t * 3695 arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt) 3696 { 3697 arc_buf_hdr_t *nhdr; 3698 arc_buf_t *buf; 3699 kmem_cache_t *ncache, *ocache; 3700 3701 ASSERT(HDR_HAS_L1HDR(hdr)); 3702 ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt); 3703 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 3704 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 3705 ASSERT(!list_link_active(&hdr->b_l2hdr.b_l2node)); 3706 ASSERT3P(hdr->b_hash_next, ==, NULL); 3707 3708 if (need_crypt) { 3709 ncache = hdr_full_crypt_cache; 3710 ocache = hdr_full_cache; 3711 } else { 3712 ncache = hdr_full_cache; 3713 ocache = hdr_full_crypt_cache; 3714 } 3715 3716 nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE); 3717 3718 /* 3719 * Copy all members that aren't locks or condvars to the new header. 3720 * No lists are pointing to us (as we asserted above), so we don't 3721 * need to worry about the list nodes. 3722 */ 3723 nhdr->b_dva = hdr->b_dva; 3724 nhdr->b_birth = hdr->b_birth; 3725 nhdr->b_type = hdr->b_type; 3726 nhdr->b_flags = hdr->b_flags; 3727 nhdr->b_psize = hdr->b_psize; 3728 nhdr->b_lsize = hdr->b_lsize; 3729 nhdr->b_spa = hdr->b_spa; 3730 nhdr->b_l2hdr.b_dev = hdr->b_l2hdr.b_dev; 3731 nhdr->b_l2hdr.b_daddr = hdr->b_l2hdr.b_daddr; 3732 nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum; 3733 nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt; 3734 nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap; 3735 nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state; 3736 nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access; 3737 nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb; 3738 nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd; 3739 #ifdef ZFS_DEBUG 3740 if (hdr->b_l1hdr.b_thawed != NULL) { 3741 nhdr->b_l1hdr.b_thawed = hdr->b_l1hdr.b_thawed; 3742 hdr->b_l1hdr.b_thawed = NULL; 3743 } 3744 #endif 3745 3746 /* 3747 * This refcount_add() exists only to ensure that the individual 3748 * arc buffers always point to a header that is referenced, avoiding 3749 * a small race condition that could trigger ASSERTs. 3750 */ 3751 (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG); 3752 nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf; 3753 for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { 3754 mutex_enter(&buf->b_evict_lock); 3755 buf->b_hdr = nhdr; 3756 mutex_exit(&buf->b_evict_lock); 3757 } 3758 zfs_refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt); 3759 (void) zfs_refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG); 3760 ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt)); 3761 3762 if (need_crypt) { 3763 arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED); 3764 } else { 3765 arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED); 3766 } 3767 3768 /* unset all members of the original hdr */ 3769 bzero(&hdr->b_dva, sizeof (dva_t)); 3770 hdr->b_birth = 0; 3771 hdr->b_type = ARC_BUFC_INVALID; 3772 hdr->b_flags = 0; 3773 hdr->b_psize = 0; 3774 hdr->b_lsize = 0; 3775 hdr->b_spa = 0; 3776 hdr->b_l2hdr.b_dev = NULL; 3777 hdr->b_l2hdr.b_daddr = 0; 3778 hdr->b_l1hdr.b_freeze_cksum = NULL; 3779 hdr->b_l1hdr.b_buf = NULL; 3780 hdr->b_l1hdr.b_bufcnt = 0; 3781 hdr->b_l1hdr.b_byteswap = 0; 3782 hdr->b_l1hdr.b_state = NULL; 3783 hdr->b_l1hdr.b_arc_access = 0; 3784 hdr->b_l1hdr.b_acb = NULL; 3785 hdr->b_l1hdr.b_pabd = NULL; 3786 3787 if (ocache == hdr_full_crypt_cache) { 3788 ASSERT(!HDR_HAS_RABD(hdr)); 3789 hdr->b_crypt_hdr.b_ot = DMU_OT_NONE; 3790 hdr->b_crypt_hdr.b_ebufcnt = 0; 3791 hdr->b_crypt_hdr.b_dsobj = 0; 3792 bzero(hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); 3793 bzero(hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); 3794 bzero(hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); 3795 } 3796 3797 buf_discard_identity(hdr); 3798 kmem_cache_free(ocache, hdr); 3799 3800 return (nhdr); 3801 } 3802 3803 /* 3804 * This function is used by the send / receive code to convert a newly 3805 * allocated arc_buf_t to one that is suitable for a raw encrypted write. It 3806 * is also used to allow the root objset block to be uupdated without altering 3807 * its embedded MACs. Both block types will always be uncompressed so we do not 3808 * have to worry about compression type or psize. 3809 */ 3810 void 3811 arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder, 3812 dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv, 3813 const uint8_t *mac) 3814 { 3815 arc_buf_hdr_t *hdr = buf->b_hdr; 3816 3817 ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET); 3818 ASSERT(HDR_HAS_L1HDR(hdr)); 3819 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 3820 3821 buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED); 3822 if (!HDR_PROTECTED(hdr)) 3823 hdr = arc_hdr_realloc_crypt(hdr, B_TRUE); 3824 hdr->b_crypt_hdr.b_dsobj = dsobj; 3825 hdr->b_crypt_hdr.b_ot = ot; 3826 hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? 3827 DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot); 3828 if (!arc_hdr_has_uncompressed_buf(hdr)) 3829 arc_cksum_free(hdr); 3830 3831 if (salt != NULL) 3832 bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); 3833 if (iv != NULL) 3834 bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); 3835 if (mac != NULL) 3836 bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); 3837 } 3838 3839 /* 3840 * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller. 3841 * The buf is returned thawed since we expect the consumer to modify it. 3842 */ 3843 arc_buf_t * 3844 arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size) 3845 { 3846 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size, 3847 B_FALSE, ZIO_COMPRESS_OFF, type, B_FALSE); 3848 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); 3849 3850 arc_buf_t *buf = NULL; 3851 VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE, 3852 B_FALSE, B_FALSE, &buf)); 3853 arc_buf_thaw(buf); 3854 3855 return (buf); 3856 } 3857 3858 /* 3859 * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this 3860 * for bufs containing metadata. 3861 */ 3862 arc_buf_t * 3863 arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize, 3864 enum zio_compress compression_type) 3865 { 3866 ASSERT3U(lsize, >, 0); 3867 ASSERT3U(lsize, >=, psize); 3868 ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF); 3869 ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS); 3870 3871 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, 3872 B_FALSE, compression_type, ARC_BUFC_DATA, B_FALSE); 3873 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); 3874 3875 arc_buf_t *buf = NULL; 3876 VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, 3877 B_TRUE, B_FALSE, B_FALSE, &buf)); 3878 arc_buf_thaw(buf); 3879 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 3880 3881 if (!arc_buf_is_shared(buf)) { 3882 /* 3883 * To ensure that the hdr has the correct data in it if we call 3884 * arc_untransform() on this buf before it's been written to 3885 * disk, it's easiest if we just set up sharing between the 3886 * buf and the hdr. 3887 */ 3888 ASSERT(!abd_is_linear(hdr->b_l1hdr.b_pabd)); 3889 arc_hdr_free_pabd(hdr, B_FALSE); 3890 arc_share_buf(hdr, buf); 3891 } 3892 3893 return (buf); 3894 } 3895 3896 arc_buf_t * 3897 arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder, 3898 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, 3899 dmu_object_type_t ot, uint64_t psize, uint64_t lsize, 3900 enum zio_compress compression_type) 3901 { 3902 arc_buf_hdr_t *hdr; 3903 arc_buf_t *buf; 3904 arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ? 3905 ARC_BUFC_METADATA : ARC_BUFC_DATA; 3906 3907 ASSERT3U(lsize, >, 0); 3908 ASSERT3U(lsize, >=, psize); 3909 ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF); 3910 ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS); 3911 3912 hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE, 3913 compression_type, type, B_TRUE); 3914 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); 3915 3916 hdr->b_crypt_hdr.b_dsobj = dsobj; 3917 hdr->b_crypt_hdr.b_ot = ot; 3918 hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? 3919 DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot); 3920 bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); 3921 bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); 3922 bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); 3923 3924 /* 3925 * This buffer will be considered encrypted even if the ot is not an 3926 * encrypted type. It will become authenticated instead in 3927 * arc_write_ready(). 3928 */ 3929 buf = NULL; 3930 VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE, 3931 B_FALSE, B_FALSE, &buf)); 3932 arc_buf_thaw(buf); 3933 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 3934 3935 return (buf); 3936 } 3937 3938 static void 3939 arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr) 3940 { 3941 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; 3942 l2arc_dev_t *dev = l2hdr->b_dev; 3943 uint64_t psize = HDR_GET_PSIZE(hdr); 3944 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); 3945 3946 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); 3947 ASSERT(HDR_HAS_L2HDR(hdr)); 3948 3949 list_remove(&dev->l2ad_buflist, hdr); 3950 3951 ARCSTAT_INCR(arcstat_l2_psize, -psize); 3952 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr)); 3953 3954 vdev_space_update(dev->l2ad_vdev, -asize, 0, 0); 3955 3956 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), 3957 hdr); 3958 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); 3959 } 3960 3961 static void 3962 arc_hdr_destroy(arc_buf_hdr_t *hdr) 3963 { 3964 if (HDR_HAS_L1HDR(hdr)) { 3965 ASSERT(hdr->b_l1hdr.b_buf == NULL || 3966 hdr->b_l1hdr.b_bufcnt > 0); 3967 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3968 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 3969 } 3970 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3971 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 3972 3973 if (!HDR_EMPTY(hdr)) 3974 buf_discard_identity(hdr); 3975 3976 if (HDR_HAS_L2HDR(hdr)) { 3977 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 3978 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); 3979 3980 if (!buflist_held) 3981 mutex_enter(&dev->l2ad_mtx); 3982 3983 /* 3984 * Even though we checked this conditional above, we 3985 * need to check this again now that we have the 3986 * l2ad_mtx. This is because we could be racing with 3987 * another thread calling l2arc_evict() which might have 3988 * destroyed this header's L2 portion as we were waiting 3989 * to acquire the l2ad_mtx. If that happens, we don't 3990 * want to re-destroy the header's L2 portion. 3991 */ 3992 if (HDR_HAS_L2HDR(hdr)) 3993 arc_hdr_l2hdr_destroy(hdr); 3994 3995 if (!buflist_held) 3996 mutex_exit(&dev->l2ad_mtx); 3997 } 3998 3999 if (HDR_HAS_L1HDR(hdr)) { 4000 arc_cksum_free(hdr); 4001 4002 while (hdr->b_l1hdr.b_buf != NULL) 4003 arc_buf_destroy_impl(hdr->b_l1hdr.b_buf); 4004 4005 #ifdef ZFS_DEBUG 4006 if (hdr->b_l1hdr.b_thawed != NULL) { 4007 kmem_free(hdr->b_l1hdr.b_thawed, 1); 4008 hdr->b_l1hdr.b_thawed = NULL; 4009 } 4010 #endif 4011 4012 if (hdr->b_l1hdr.b_pabd != NULL) { 4013 arc_hdr_free_pabd(hdr, B_FALSE); 4014 } 4015 4016 if (HDR_HAS_RABD(hdr)) 4017 arc_hdr_free_pabd(hdr, B_TRUE); 4018 } 4019 4020 ASSERT3P(hdr->b_hash_next, ==, NULL); 4021 if (HDR_HAS_L1HDR(hdr)) { 4022 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 4023 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 4024 4025 if (!HDR_PROTECTED(hdr)) { 4026 kmem_cache_free(hdr_full_cache, hdr); 4027 } else { 4028 kmem_cache_free(hdr_full_crypt_cache, hdr); 4029 } 4030 } else { 4031 kmem_cache_free(hdr_l2only_cache, hdr); 4032 } 4033 } 4034 4035 void 4036 arc_buf_destroy(arc_buf_t *buf, void* tag) 4037 { 4038 arc_buf_hdr_t *hdr = buf->b_hdr; 4039 kmutex_t *hash_lock = HDR_LOCK(hdr); 4040 4041 if (hdr->b_l1hdr.b_state == arc_anon) { 4042 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 4043 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 4044 VERIFY0(remove_reference(hdr, NULL, tag)); 4045 arc_hdr_destroy(hdr); 4046 return; 4047 } 4048 4049 mutex_enter(hash_lock); 4050 ASSERT3P(hdr, ==, buf->b_hdr); 4051 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 4052 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4053 ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon); 4054 ASSERT3P(buf->b_data, !=, NULL); 4055 4056 (void) remove_reference(hdr, hash_lock, tag); 4057 arc_buf_destroy_impl(buf); 4058 mutex_exit(hash_lock); 4059 } 4060 4061 /* 4062 * Evict the arc_buf_hdr that is provided as a parameter. The resultant 4063 * state of the header is dependent on its state prior to entering this 4064 * function. The following transitions are possible: 4065 * 4066 * - arc_mru -> arc_mru_ghost 4067 * - arc_mfu -> arc_mfu_ghost 4068 * - arc_mru_ghost -> arc_l2c_only 4069 * - arc_mru_ghost -> deleted 4070 * - arc_mfu_ghost -> arc_l2c_only 4071 * - arc_mfu_ghost -> deleted 4072 */ 4073 static int64_t 4074 arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 4075 { 4076 arc_state_t *evicted_state, *state; 4077 int64_t bytes_evicted = 0; 4078 int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ? 4079 zfs_arc_min_prescient_prefetch_ms : zfs_arc_min_prefetch_ms; 4080 4081 ASSERT(MUTEX_HELD(hash_lock)); 4082 ASSERT(HDR_HAS_L1HDR(hdr)); 4083 4084 state = hdr->b_l1hdr.b_state; 4085 if (GHOST_STATE(state)) { 4086 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 4087 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 4088 4089 /* 4090 * l2arc_write_buffers() relies on a header's L1 portion 4091 * (i.e. its b_pabd field) during its write phase. 4092 * Thus, we cannot push a header onto the arc_l2c_only 4093 * state (removing its L1 piece) until the header is 4094 * done being written to the l2arc. 4095 */ 4096 if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) { 4097 ARCSTAT_BUMP(arcstat_evict_l2_skip); 4098 return (bytes_evicted); 4099 } 4100 4101 ARCSTAT_BUMP(arcstat_deleted); 4102 bytes_evicted += HDR_GET_LSIZE(hdr); 4103 4104 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); 4105 4106 if (HDR_HAS_L2HDR(hdr)) { 4107 ASSERT(hdr->b_l1hdr.b_pabd == NULL); 4108 ASSERT(!HDR_HAS_RABD(hdr)); 4109 /* 4110 * This buffer is cached on the 2nd Level ARC; 4111 * don't destroy the header. 4112 */ 4113 arc_change_state(arc_l2c_only, hdr, hash_lock); 4114 /* 4115 * dropping from L1+L2 cached to L2-only, 4116 * realloc to remove the L1 header. 4117 */ 4118 hdr = arc_hdr_realloc(hdr, hdr_full_cache, 4119 hdr_l2only_cache); 4120 } else { 4121 arc_change_state(arc_anon, hdr, hash_lock); 4122 arc_hdr_destroy(hdr); 4123 } 4124 return (bytes_evicted); 4125 } 4126 4127 ASSERT(state == arc_mru || state == arc_mfu); 4128 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 4129 4130 /* prefetch buffers have a minimum lifespan */ 4131 if (HDR_IO_IN_PROGRESS(hdr) || 4132 ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && 4133 ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < min_lifetime * hz)) { 4134 ARCSTAT_BUMP(arcstat_evict_skip); 4135 return (bytes_evicted); 4136 } 4137 4138 ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt)); 4139 while (hdr->b_l1hdr.b_buf) { 4140 arc_buf_t *buf = hdr->b_l1hdr.b_buf; 4141 if (!mutex_tryenter(&buf->b_evict_lock)) { 4142 ARCSTAT_BUMP(arcstat_mutex_miss); 4143 break; 4144 } 4145 if (buf->b_data != NULL) 4146 bytes_evicted += HDR_GET_LSIZE(hdr); 4147 mutex_exit(&buf->b_evict_lock); 4148 arc_buf_destroy_impl(buf); 4149 } 4150 4151 if (HDR_HAS_L2HDR(hdr)) { 4152 ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr)); 4153 } else { 4154 if (l2arc_write_eligible(hdr->b_spa, hdr)) { 4155 ARCSTAT_INCR(arcstat_evict_l2_eligible, 4156 HDR_GET_LSIZE(hdr)); 4157 } else { 4158 ARCSTAT_INCR(arcstat_evict_l2_ineligible, 4159 HDR_GET_LSIZE(hdr)); 4160 } 4161 } 4162 4163 if (hdr->b_l1hdr.b_bufcnt == 0) { 4164 arc_cksum_free(hdr); 4165 4166 bytes_evicted += arc_hdr_size(hdr); 4167 4168 /* 4169 * If this hdr is being evicted and has a compressed 4170 * buffer then we discard it here before we change states. 4171 * This ensures that the accounting is updated correctly 4172 * in arc_free_data_impl(). 4173 */ 4174 if (hdr->b_l1hdr.b_pabd != NULL) 4175 arc_hdr_free_pabd(hdr, B_FALSE); 4176 4177 if (HDR_HAS_RABD(hdr)) 4178 arc_hdr_free_pabd(hdr, B_TRUE); 4179 4180 arc_change_state(evicted_state, hdr, hash_lock); 4181 ASSERT(HDR_IN_HASH_TABLE(hdr)); 4182 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 4183 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); 4184 } 4185 4186 return (bytes_evicted); 4187 } 4188 4189 static uint64_t 4190 arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, 4191 uint64_t spa, int64_t bytes) 4192 { 4193 multilist_sublist_t *mls; 4194 uint64_t bytes_evicted = 0; 4195 arc_buf_hdr_t *hdr; 4196 kmutex_t *hash_lock; 4197 int evict_count = 0; 4198 4199 ASSERT3P(marker, !=, NULL); 4200 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 4201 4202 mls = multilist_sublist_lock(ml, idx); 4203 4204 for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL; 4205 hdr = multilist_sublist_prev(mls, marker)) { 4206 if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) || 4207 (evict_count >= zfs_arc_evict_batch_limit)) 4208 break; 4209 4210 /* 4211 * To keep our iteration location, move the marker 4212 * forward. Since we're not holding hdr's hash lock, we 4213 * must be very careful and not remove 'hdr' from the 4214 * sublist. Otherwise, other consumers might mistake the 4215 * 'hdr' as not being on a sublist when they call the 4216 * multilist_link_active() function (they all rely on 4217 * the hash lock protecting concurrent insertions and 4218 * removals). multilist_sublist_move_forward() was 4219 * specifically implemented to ensure this is the case 4220 * (only 'marker' will be removed and re-inserted). 4221 */ 4222 multilist_sublist_move_forward(mls, marker); 4223 4224 /* 4225 * The only case where the b_spa field should ever be 4226 * zero, is the marker headers inserted by 4227 * arc_evict_state(). It's possible for multiple threads 4228 * to be calling arc_evict_state() concurrently (e.g. 4229 * dsl_pool_close() and zio_inject_fault()), so we must 4230 * skip any markers we see from these other threads. 4231 */ 4232 if (hdr->b_spa == 0) 4233 continue; 4234 4235 /* we're only interested in evicting buffers of a certain spa */ 4236 if (spa != 0 && hdr->b_spa != spa) { 4237 ARCSTAT_BUMP(arcstat_evict_skip); 4238 continue; 4239 } 4240 4241 hash_lock = HDR_LOCK(hdr); 4242 4243 /* 4244 * We aren't calling this function from any code path 4245 * that would already be holding a hash lock, so we're 4246 * asserting on this assumption to be defensive in case 4247 * this ever changes. Without this check, it would be 4248 * possible to incorrectly increment arcstat_mutex_miss 4249 * below (e.g. if the code changed such that we called 4250 * this function with a hash lock held). 4251 */ 4252 ASSERT(!MUTEX_HELD(hash_lock)); 4253 4254 if (mutex_tryenter(hash_lock)) { 4255 uint64_t evicted = arc_evict_hdr(hdr, hash_lock); 4256 mutex_exit(hash_lock); 4257 4258 bytes_evicted += evicted; 4259 4260 /* 4261 * If evicted is zero, arc_evict_hdr() must have 4262 * decided to skip this header, don't increment 4263 * evict_count in this case. 4264 */ 4265 if (evicted != 0) 4266 evict_count++; 4267 4268 /* 4269 * If arc_size isn't overflowing, signal any 4270 * threads that might happen to be waiting. 4271 * 4272 * For each header evicted, we wake up a single 4273 * thread. If we used cv_broadcast, we could 4274 * wake up "too many" threads causing arc_size 4275 * to significantly overflow arc_c; since 4276 * arc_get_data_impl() doesn't check for overflow 4277 * when it's woken up (it doesn't because it's 4278 * possible for the ARC to be overflowing while 4279 * full of un-evictable buffers, and the 4280 * function should proceed in this case). 4281 * 4282 * If threads are left sleeping, due to not 4283 * using cv_broadcast here, they will be woken 4284 * up via cv_broadcast in arc_adjust_cb() just 4285 * before arc_adjust_zthr sleeps. 4286 */ 4287 mutex_enter(&arc_adjust_lock); 4288 if (!arc_is_overflowing()) 4289 cv_signal(&arc_adjust_waiters_cv); 4290 mutex_exit(&arc_adjust_lock); 4291 } else { 4292 ARCSTAT_BUMP(arcstat_mutex_miss); 4293 } 4294 } 4295 4296 multilist_sublist_unlock(mls); 4297 4298 return (bytes_evicted); 4299 } 4300 4301 /* 4302 * Evict buffers from the given arc state, until we've removed the 4303 * specified number of bytes. Move the removed buffers to the 4304 * appropriate evict state. 4305 * 4306 * This function makes a "best effort". It skips over any buffers 4307 * it can't get a hash_lock on, and so, may not catch all candidates. 4308 * It may also return without evicting as much space as requested. 4309 * 4310 * If bytes is specified using the special value ARC_EVICT_ALL, this 4311 * will evict all available (i.e. unlocked and evictable) buffers from 4312 * the given arc state; which is used by arc_flush(). 4313 */ 4314 static uint64_t 4315 arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes, 4316 arc_buf_contents_t type) 4317 { 4318 uint64_t total_evicted = 0; 4319 multilist_t *ml = state->arcs_list[type]; 4320 int num_sublists; 4321 arc_buf_hdr_t **markers; 4322 4323 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 4324 4325 num_sublists = multilist_get_num_sublists(ml); 4326 4327 /* 4328 * If we've tried to evict from each sublist, made some 4329 * progress, but still have not hit the target number of bytes 4330 * to evict, we want to keep trying. The markers allow us to 4331 * pick up where we left off for each individual sublist, rather 4332 * than starting from the tail each time. 4333 */ 4334 markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); 4335 for (int i = 0; i < num_sublists; i++) { 4336 markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); 4337 4338 /* 4339 * A b_spa of 0 is used to indicate that this header is 4340 * a marker. This fact is used in arc_adjust_type() and 4341 * arc_evict_state_impl(). 4342 */ 4343 markers[i]->b_spa = 0; 4344 4345 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 4346 multilist_sublist_insert_tail(mls, markers[i]); 4347 multilist_sublist_unlock(mls); 4348 } 4349 4350 /* 4351 * While we haven't hit our target number of bytes to evict, or 4352 * we're evicting all available buffers. 4353 */ 4354 while (total_evicted < bytes || bytes == ARC_EVICT_ALL) { 4355 /* 4356 * Start eviction using a randomly selected sublist, 4357 * this is to try and evenly balance eviction across all 4358 * sublists. Always starting at the same sublist 4359 * (e.g. index 0) would cause evictions to favor certain 4360 * sublists over others. 4361 */ 4362 int sublist_idx = multilist_get_random_index(ml); 4363 uint64_t scan_evicted = 0; 4364 4365 for (int i = 0; i < num_sublists; i++) { 4366 uint64_t bytes_remaining; 4367 uint64_t bytes_evicted; 4368 4369 if (bytes == ARC_EVICT_ALL) 4370 bytes_remaining = ARC_EVICT_ALL; 4371 else if (total_evicted < bytes) 4372 bytes_remaining = bytes - total_evicted; 4373 else 4374 break; 4375 4376 bytes_evicted = arc_evict_state_impl(ml, sublist_idx, 4377 markers[sublist_idx], spa, bytes_remaining); 4378 4379 scan_evicted += bytes_evicted; 4380 total_evicted += bytes_evicted; 4381 4382 /* we've reached the end, wrap to the beginning */ 4383 if (++sublist_idx >= num_sublists) 4384 sublist_idx = 0; 4385 } 4386 4387 /* 4388 * If we didn't evict anything during this scan, we have 4389 * no reason to believe we'll evict more during another 4390 * scan, so break the loop. 4391 */ 4392 if (scan_evicted == 0) { 4393 /* This isn't possible, let's make that obvious */ 4394 ASSERT3S(bytes, !=, 0); 4395 4396 /* 4397 * When bytes is ARC_EVICT_ALL, the only way to 4398 * break the loop is when scan_evicted is zero. 4399 * In that case, we actually have evicted enough, 4400 * so we don't want to increment the kstat. 4401 */ 4402 if (bytes != ARC_EVICT_ALL) { 4403 ASSERT3S(total_evicted, <, bytes); 4404 ARCSTAT_BUMP(arcstat_evict_not_enough); 4405 } 4406 4407 break; 4408 } 4409 } 4410 4411 for (int i = 0; i < num_sublists; i++) { 4412 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 4413 multilist_sublist_remove(mls, markers[i]); 4414 multilist_sublist_unlock(mls); 4415 4416 kmem_cache_free(hdr_full_cache, markers[i]); 4417 } 4418 kmem_free(markers, sizeof (*markers) * num_sublists); 4419 4420 return (total_evicted); 4421 } 4422 4423 /* 4424 * Flush all "evictable" data of the given type from the arc state 4425 * specified. This will not evict any "active" buffers (i.e. referenced). 4426 * 4427 * When 'retry' is set to B_FALSE, the function will make a single pass 4428 * over the state and evict any buffers that it can. Since it doesn't 4429 * continually retry the eviction, it might end up leaving some buffers 4430 * in the ARC due to lock misses. 4431 * 4432 * When 'retry' is set to B_TRUE, the function will continually retry the 4433 * eviction until *all* evictable buffers have been removed from the 4434 * state. As a result, if concurrent insertions into the state are 4435 * allowed (e.g. if the ARC isn't shutting down), this function might 4436 * wind up in an infinite loop, continually trying to evict buffers. 4437 */ 4438 static uint64_t 4439 arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, 4440 boolean_t retry) 4441 { 4442 uint64_t evicted = 0; 4443 4444 while (zfs_refcount_count(&state->arcs_esize[type]) != 0) { 4445 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type); 4446 4447 if (!retry) 4448 break; 4449 } 4450 4451 return (evicted); 4452 } 4453 4454 /* 4455 * Evict the specified number of bytes from the state specified, 4456 * restricting eviction to the spa and type given. This function 4457 * prevents us from trying to evict more from a state's list than 4458 * is "evictable", and to skip evicting altogether when passed a 4459 * negative value for "bytes". In contrast, arc_evict_state() will 4460 * evict everything it can, when passed a negative value for "bytes". 4461 */ 4462 static uint64_t 4463 arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes, 4464 arc_buf_contents_t type) 4465 { 4466 int64_t delta; 4467 4468 if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) { 4469 delta = MIN(zfs_refcount_count(&state->arcs_esize[type]), 4470 bytes); 4471 return (arc_evict_state(state, spa, delta, type)); 4472 } 4473 4474 return (0); 4475 } 4476 4477 /* 4478 * Evict metadata buffers from the cache, such that arc_meta_used is 4479 * capped by the arc_meta_limit tunable. 4480 */ 4481 static uint64_t 4482 arc_adjust_meta(uint64_t meta_used) 4483 { 4484 uint64_t total_evicted = 0; 4485 int64_t target; 4486 4487 /* 4488 * If we're over the meta limit, we want to evict enough 4489 * metadata to get back under the meta limit. We don't want to 4490 * evict so much that we drop the MRU below arc_p, though. If 4491 * we're over the meta limit more than we're over arc_p, we 4492 * evict some from the MRU here, and some from the MFU below. 4493 */ 4494 target = MIN((int64_t)(meta_used - arc_meta_limit), 4495 (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) + 4496 zfs_refcount_count(&arc_mru->arcs_size) - arc_p)); 4497 4498 total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 4499 4500 /* 4501 * Similar to the above, we want to evict enough bytes to get us 4502 * below the meta limit, but not so much as to drop us below the 4503 * space allotted to the MFU (which is defined as arc_c - arc_p). 4504 */ 4505 target = MIN((int64_t)(meta_used - arc_meta_limit), 4506 (int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) - 4507 (arc_c - arc_p))); 4508 4509 total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 4510 4511 return (total_evicted); 4512 } 4513 4514 /* 4515 * Return the type of the oldest buffer in the given arc state 4516 * 4517 * This function will select a random sublist of type ARC_BUFC_DATA and 4518 * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist 4519 * is compared, and the type which contains the "older" buffer will be 4520 * returned. 4521 */ 4522 static arc_buf_contents_t 4523 arc_adjust_type(arc_state_t *state) 4524 { 4525 multilist_t *data_ml = state->arcs_list[ARC_BUFC_DATA]; 4526 multilist_t *meta_ml = state->arcs_list[ARC_BUFC_METADATA]; 4527 int data_idx = multilist_get_random_index(data_ml); 4528 int meta_idx = multilist_get_random_index(meta_ml); 4529 multilist_sublist_t *data_mls; 4530 multilist_sublist_t *meta_mls; 4531 arc_buf_contents_t type; 4532 arc_buf_hdr_t *data_hdr; 4533 arc_buf_hdr_t *meta_hdr; 4534 4535 /* 4536 * We keep the sublist lock until we're finished, to prevent 4537 * the headers from being destroyed via arc_evict_state(). 4538 */ 4539 data_mls = multilist_sublist_lock(data_ml, data_idx); 4540 meta_mls = multilist_sublist_lock(meta_ml, meta_idx); 4541 4542 /* 4543 * These two loops are to ensure we skip any markers that 4544 * might be at the tail of the lists due to arc_evict_state(). 4545 */ 4546 4547 for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL; 4548 data_hdr = multilist_sublist_prev(data_mls, data_hdr)) { 4549 if (data_hdr->b_spa != 0) 4550 break; 4551 } 4552 4553 for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL; 4554 meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) { 4555 if (meta_hdr->b_spa != 0) 4556 break; 4557 } 4558 4559 if (data_hdr == NULL && meta_hdr == NULL) { 4560 type = ARC_BUFC_DATA; 4561 } else if (data_hdr == NULL) { 4562 ASSERT3P(meta_hdr, !=, NULL); 4563 type = ARC_BUFC_METADATA; 4564 } else if (meta_hdr == NULL) { 4565 ASSERT3P(data_hdr, !=, NULL); 4566 type = ARC_BUFC_DATA; 4567 } else { 4568 ASSERT3P(data_hdr, !=, NULL); 4569 ASSERT3P(meta_hdr, !=, NULL); 4570 4571 /* The headers can't be on the sublist without an L1 header */ 4572 ASSERT(HDR_HAS_L1HDR(data_hdr)); 4573 ASSERT(HDR_HAS_L1HDR(meta_hdr)); 4574 4575 if (data_hdr->b_l1hdr.b_arc_access < 4576 meta_hdr->b_l1hdr.b_arc_access) { 4577 type = ARC_BUFC_DATA; 4578 } else { 4579 type = ARC_BUFC_METADATA; 4580 } 4581 } 4582 4583 multilist_sublist_unlock(meta_mls); 4584 multilist_sublist_unlock(data_mls); 4585 4586 return (type); 4587 } 4588 4589 /* 4590 * Evict buffers from the cache, such that arc_size is capped by arc_c. 4591 */ 4592 static uint64_t 4593 arc_adjust(void) 4594 { 4595 uint64_t total_evicted = 0; 4596 uint64_t bytes; 4597 int64_t target; 4598 uint64_t asize = aggsum_value(&arc_size); 4599 uint64_t ameta = aggsum_value(&arc_meta_used); 4600 4601 /* 4602 * If we're over arc_meta_limit, we want to correct that before 4603 * potentially evicting data buffers below. 4604 */ 4605 total_evicted += arc_adjust_meta(ameta); 4606 4607 /* 4608 * Adjust MRU size 4609 * 4610 * If we're over the target cache size, we want to evict enough 4611 * from the list to get back to our target size. We don't want 4612 * to evict too much from the MRU, such that it drops below 4613 * arc_p. So, if we're over our target cache size more than 4614 * the MRU is over arc_p, we'll evict enough to get back to 4615 * arc_p here, and then evict more from the MFU below. 4616 */ 4617 target = MIN((int64_t)(asize - arc_c), 4618 (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) + 4619 zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p)); 4620 4621 /* 4622 * If we're below arc_meta_min, always prefer to evict data. 4623 * Otherwise, try to satisfy the requested number of bytes to 4624 * evict from the type which contains older buffers; in an 4625 * effort to keep newer buffers in the cache regardless of their 4626 * type. If we cannot satisfy the number of bytes from this 4627 * type, spill over into the next type. 4628 */ 4629 if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA && 4630 ameta > arc_meta_min) { 4631 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 4632 total_evicted += bytes; 4633 4634 /* 4635 * If we couldn't evict our target number of bytes from 4636 * metadata, we try to get the rest from data. 4637 */ 4638 target -= bytes; 4639 4640 total_evicted += 4641 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 4642 } else { 4643 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 4644 total_evicted += bytes; 4645 4646 /* 4647 * If we couldn't evict our target number of bytes from 4648 * data, we try to get the rest from metadata. 4649 */ 4650 target -= bytes; 4651 4652 total_evicted += 4653 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 4654 } 4655 4656 /* 4657 * Adjust MFU size 4658 * 4659 * Now that we've tried to evict enough from the MRU to get its 4660 * size back to arc_p, if we're still above the target cache 4661 * size, we evict the rest from the MFU. 4662 */ 4663 target = asize - arc_c; 4664 4665 if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA && 4666 ameta > arc_meta_min) { 4667 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 4668 total_evicted += bytes; 4669 4670 /* 4671 * If we couldn't evict our target number of bytes from 4672 * metadata, we try to get the rest from data. 4673 */ 4674 target -= bytes; 4675 4676 total_evicted += 4677 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 4678 } else { 4679 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 4680 total_evicted += bytes; 4681 4682 /* 4683 * If we couldn't evict our target number of bytes from 4684 * data, we try to get the rest from data. 4685 */ 4686 target -= bytes; 4687 4688 total_evicted += 4689 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 4690 } 4691 4692 /* 4693 * Adjust ghost lists 4694 * 4695 * In addition to the above, the ARC also defines target values 4696 * for the ghost lists. The sum of the mru list and mru ghost 4697 * list should never exceed the target size of the cache, and 4698 * the sum of the mru list, mfu list, mru ghost list, and mfu 4699 * ghost list should never exceed twice the target size of the 4700 * cache. The following logic enforces these limits on the ghost 4701 * caches, and evicts from them as needed. 4702 */ 4703 target = zfs_refcount_count(&arc_mru->arcs_size) + 4704 zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c; 4705 4706 bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA); 4707 total_evicted += bytes; 4708 4709 target -= bytes; 4710 4711 total_evicted += 4712 arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA); 4713 4714 /* 4715 * We assume the sum of the mru list and mfu list is less than 4716 * or equal to arc_c (we enforced this above), which means we 4717 * can use the simpler of the two equations below: 4718 * 4719 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c 4720 * mru ghost + mfu ghost <= arc_c 4721 */ 4722 target = zfs_refcount_count(&arc_mru_ghost->arcs_size) + 4723 zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c; 4724 4725 bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA); 4726 total_evicted += bytes; 4727 4728 target -= bytes; 4729 4730 total_evicted += 4731 arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA); 4732 4733 return (total_evicted); 4734 } 4735 4736 void 4737 arc_flush(spa_t *spa, boolean_t retry) 4738 { 4739 uint64_t guid = 0; 4740 4741 /* 4742 * If retry is B_TRUE, a spa must not be specified since we have 4743 * no good way to determine if all of a spa's buffers have been 4744 * evicted from an arc state. 4745 */ 4746 ASSERT(!retry || spa == 0); 4747 4748 if (spa != NULL) 4749 guid = spa_load_guid(spa); 4750 4751 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry); 4752 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry); 4753 4754 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry); 4755 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry); 4756 4757 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry); 4758 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry); 4759 4760 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry); 4761 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); 4762 } 4763 4764 static void 4765 arc_reduce_target_size(int64_t to_free) 4766 { 4767 uint64_t asize = aggsum_value(&arc_size); 4768 if (arc_c > arc_c_min) { 4769 4770 if (arc_c > arc_c_min + to_free) 4771 atomic_add_64(&arc_c, -to_free); 4772 else 4773 arc_c = arc_c_min; 4774 4775 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 4776 if (asize < arc_c) 4777 arc_c = MAX(asize, arc_c_min); 4778 if (arc_p > arc_c) 4779 arc_p = (arc_c >> 1); 4780 ASSERT(arc_c >= arc_c_min); 4781 ASSERT((int64_t)arc_p >= 0); 4782 } 4783 4784 if (asize > arc_c) { 4785 /* See comment in arc_adjust_cb_check() on why lock+flag */ 4786 mutex_enter(&arc_adjust_lock); 4787 arc_adjust_needed = B_TRUE; 4788 mutex_exit(&arc_adjust_lock); 4789 zthr_wakeup(arc_adjust_zthr); 4790 } 4791 } 4792 4793 typedef enum free_memory_reason_t { 4794 FMR_UNKNOWN, 4795 FMR_NEEDFREE, 4796 FMR_LOTSFREE, 4797 FMR_SWAPFS_MINFREE, 4798 FMR_PAGES_PP_MAXIMUM, 4799 FMR_HEAP_ARENA, 4800 FMR_ZIO_ARENA, 4801 } free_memory_reason_t; 4802 4803 int64_t last_free_memory; 4804 free_memory_reason_t last_free_reason; 4805 4806 /* 4807 * Additional reserve of pages for pp_reserve. 4808 */ 4809 int64_t arc_pages_pp_reserve = 64; 4810 4811 /* 4812 * Additional reserve of pages for swapfs. 4813 */ 4814 int64_t arc_swapfs_reserve = 64; 4815 4816 /* 4817 * Return the amount of memory that can be consumed before reclaim will be 4818 * needed. Positive if there is sufficient free memory, negative indicates 4819 * the amount of memory that needs to be freed up. 4820 */ 4821 static int64_t 4822 arc_available_memory(void) 4823 { 4824 int64_t lowest = INT64_MAX; 4825 int64_t n; 4826 free_memory_reason_t r = FMR_UNKNOWN; 4827 4828 #ifdef _KERNEL 4829 if (needfree > 0) { 4830 n = PAGESIZE * (-needfree); 4831 if (n < lowest) { 4832 lowest = n; 4833 r = FMR_NEEDFREE; 4834 } 4835 } 4836 4837 /* 4838 * check that we're out of range of the pageout scanner. It starts to 4839 * schedule paging if freemem is less than lotsfree and needfree. 4840 * lotsfree is the high-water mark for pageout, and needfree is the 4841 * number of needed free pages. We add extra pages here to make sure 4842 * the scanner doesn't start up while we're freeing memory. 4843 */ 4844 n = PAGESIZE * (freemem - lotsfree - needfree - desfree); 4845 if (n < lowest) { 4846 lowest = n; 4847 r = FMR_LOTSFREE; 4848 } 4849 4850 /* 4851 * check to make sure that swapfs has enough space so that anon 4852 * reservations can still succeed. anon_resvmem() checks that the 4853 * availrmem is greater than swapfs_minfree, and the number of reserved 4854 * swap pages. We also add a bit of extra here just to prevent 4855 * circumstances from getting really dire. 4856 */ 4857 n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve - 4858 desfree - arc_swapfs_reserve); 4859 if (n < lowest) { 4860 lowest = n; 4861 r = FMR_SWAPFS_MINFREE; 4862 } 4863 4864 4865 /* 4866 * Check that we have enough availrmem that memory locking (e.g., via 4867 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum 4868 * stores the number of pages that cannot be locked; when availrmem 4869 * drops below pages_pp_maximum, page locking mechanisms such as 4870 * page_pp_lock() will fail.) 4871 */ 4872 n = PAGESIZE * (availrmem - pages_pp_maximum - 4873 arc_pages_pp_reserve); 4874 if (n < lowest) { 4875 lowest = n; 4876 r = FMR_PAGES_PP_MAXIMUM; 4877 } 4878 4879 #if defined(__i386) 4880 /* 4881 * If we're on an i386 platform, it's possible that we'll exhaust the 4882 * kernel heap space before we ever run out of available physical 4883 * memory. Most checks of the size of the heap_area compare against 4884 * tune.t_minarmem, which is the minimum available real memory that we 4885 * can have in the system. However, this is generally fixed at 25 pages 4886 * which is so low that it's useless. In this comparison, we seek to 4887 * calculate the total heap-size, and reclaim if more than 3/4ths of the 4888 * heap is allocated. (Or, in the calculation, if less than 1/4th is 4889 * free) 4890 */ 4891 n = (int64_t)vmem_size(heap_arena, VMEM_FREE) - 4892 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); 4893 if (n < lowest) { 4894 lowest = n; 4895 r = FMR_HEAP_ARENA; 4896 } 4897 #endif 4898 4899 /* 4900 * If zio data pages are being allocated out of a separate heap segment, 4901 * then enforce that the size of available vmem for this arena remains 4902 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free. 4903 * 4904 * Note that reducing the arc_zio_arena_free_shift keeps more virtual 4905 * memory (in the zio_arena) free, which can avoid memory 4906 * fragmentation issues. 4907 */ 4908 if (zio_arena != NULL) { 4909 n = (int64_t)vmem_size(zio_arena, VMEM_FREE) - 4910 (vmem_size(zio_arena, VMEM_ALLOC) >> 4911 arc_zio_arena_free_shift); 4912 if (n < lowest) { 4913 lowest = n; 4914 r = FMR_ZIO_ARENA; 4915 } 4916 } 4917 #else 4918 /* Every 100 calls, free a small amount */ 4919 if (spa_get_random(100) == 0) 4920 lowest = -1024; 4921 #endif 4922 4923 last_free_memory = lowest; 4924 last_free_reason = r; 4925 4926 return (lowest); 4927 } 4928 4929 4930 /* 4931 * Determine if the system is under memory pressure and is asking 4932 * to reclaim memory. A return value of B_TRUE indicates that the system 4933 * is under memory pressure and that the arc should adjust accordingly. 4934 */ 4935 static boolean_t 4936 arc_reclaim_needed(void) 4937 { 4938 return (arc_available_memory() < 0); 4939 } 4940 4941 static void 4942 arc_kmem_reap_soon(void) 4943 { 4944 size_t i; 4945 kmem_cache_t *prev_cache = NULL; 4946 kmem_cache_t *prev_data_cache = NULL; 4947 extern kmem_cache_t *zio_buf_cache[]; 4948 extern kmem_cache_t *zio_data_buf_cache[]; 4949 extern kmem_cache_t *range_seg_cache; 4950 extern kmem_cache_t *abd_chunk_cache; 4951 4952 #ifdef _KERNEL 4953 if (aggsum_compare(&arc_meta_used, arc_meta_limit) >= 0) { 4954 /* 4955 * We are exceeding our meta-data cache limit. 4956 * Purge some DNLC entries to release holds on meta-data. 4957 */ 4958 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 4959 } 4960 #if defined(__i386) 4961 /* 4962 * Reclaim unused memory from all kmem caches. 4963 */ 4964 kmem_reap(); 4965 #endif 4966 #endif 4967 4968 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 4969 if (zio_buf_cache[i] != prev_cache) { 4970 prev_cache = zio_buf_cache[i]; 4971 kmem_cache_reap_soon(zio_buf_cache[i]); 4972 } 4973 if (zio_data_buf_cache[i] != prev_data_cache) { 4974 prev_data_cache = zio_data_buf_cache[i]; 4975 kmem_cache_reap_soon(zio_data_buf_cache[i]); 4976 } 4977 } 4978 kmem_cache_reap_soon(abd_chunk_cache); 4979 kmem_cache_reap_soon(buf_cache); 4980 kmem_cache_reap_soon(hdr_full_cache); 4981 kmem_cache_reap_soon(hdr_l2only_cache); 4982 kmem_cache_reap_soon(range_seg_cache); 4983 4984 if (zio_arena != NULL) { 4985 /* 4986 * Ask the vmem arena to reclaim unused memory from its 4987 * quantum caches. 4988 */ 4989 vmem_qcache_reap(zio_arena); 4990 } 4991 } 4992 4993 /* ARGSUSED */ 4994 static boolean_t 4995 arc_adjust_cb_check(void *arg, zthr_t *zthr) 4996 { 4997 /* 4998 * This is necessary in order for the mdb ::arc dcmd to 4999 * show up to date information. Since the ::arc command 5000 * does not call the kstat's update function, without 5001 * this call, the command may show stale stats for the 5002 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even 5003 * with this change, the data might be up to 1 second 5004 * out of date(the arc_adjust_zthr has a maximum sleep 5005 * time of 1 second); but that should suffice. The 5006 * arc_state_t structures can be queried directly if more 5007 * accurate information is needed. 5008 */ 5009 if (arc_ksp != NULL) 5010 arc_ksp->ks_update(arc_ksp, KSTAT_READ); 5011 5012 /* 5013 * We have to rely on arc_get_data_impl() to tell us when to adjust, 5014 * rather than checking if we are overflowing here, so that we are 5015 * sure to not leave arc_get_data_impl() waiting on 5016 * arc_adjust_waiters_cv. If we have become "not overflowing" since 5017 * arc_get_data_impl() checked, we need to wake it up. We could 5018 * broadcast the CV here, but arc_get_data_impl() may have not yet 5019 * gone to sleep. We would need to use a mutex to ensure that this 5020 * function doesn't broadcast until arc_get_data_impl() has gone to 5021 * sleep (e.g. the arc_adjust_lock). However, the lock ordering of 5022 * such a lock would necessarily be incorrect with respect to the 5023 * zthr_lock, which is held before this function is called, and is 5024 * held by arc_get_data_impl() when it calls zthr_wakeup(). 5025 */ 5026 return (arc_adjust_needed); 5027 } 5028 5029 /* 5030 * Keep arc_size under arc_c by running arc_adjust which evicts data 5031 * from the ARC. 5032 */ 5033 /* ARGSUSED */ 5034 static void 5035 arc_adjust_cb(void *arg, zthr_t *zthr) 5036 { 5037 uint64_t evicted = 0; 5038 5039 /* Evict from cache */ 5040 evicted = arc_adjust(); 5041 5042 /* 5043 * If evicted is zero, we couldn't evict anything 5044 * via arc_adjust(). This could be due to hash lock 5045 * collisions, but more likely due to the majority of 5046 * arc buffers being unevictable. Therefore, even if 5047 * arc_size is above arc_c, another pass is unlikely to 5048 * be helpful and could potentially cause us to enter an 5049 * infinite loop. Additionally, zthr_iscancelled() is 5050 * checked here so that if the arc is shutting down, the 5051 * broadcast will wake any remaining arc adjust waiters. 5052 */ 5053 mutex_enter(&arc_adjust_lock); 5054 arc_adjust_needed = !zthr_iscancelled(arc_adjust_zthr) && 5055 evicted > 0 && aggsum_compare(&arc_size, arc_c) > 0; 5056 if (!arc_adjust_needed) { 5057 /* 5058 * We're either no longer overflowing, or we 5059 * can't evict anything more, so we should wake 5060 * up any waiters. 5061 */ 5062 cv_broadcast(&arc_adjust_waiters_cv); 5063 } 5064 mutex_exit(&arc_adjust_lock); 5065 } 5066 5067 /* ARGSUSED */ 5068 static boolean_t 5069 arc_reap_cb_check(void *arg, zthr_t *zthr) 5070 { 5071 int64_t free_memory = arc_available_memory(); 5072 5073 /* 5074 * If a kmem reap is already active, don't schedule more. We must 5075 * check for this because kmem_cache_reap_soon() won't actually 5076 * block on the cache being reaped (this is to prevent callers from 5077 * becoming implicitly blocked by a system-wide kmem reap -- which, 5078 * on a system with many, many full magazines, can take minutes). 5079 */ 5080 if (!kmem_cache_reap_active() && 5081 free_memory < 0) { 5082 arc_no_grow = B_TRUE; 5083 arc_warm = B_TRUE; 5084 /* 5085 * Wait at least zfs_grow_retry (default 60) seconds 5086 * before considering growing. 5087 */ 5088 arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry); 5089 return (B_TRUE); 5090 } else if (free_memory < arc_c >> arc_no_grow_shift) { 5091 arc_no_grow = B_TRUE; 5092 } else if (gethrtime() >= arc_growtime) { 5093 arc_no_grow = B_FALSE; 5094 } 5095 5096 return (B_FALSE); 5097 } 5098 5099 /* 5100 * Keep enough free memory in the system by reaping the ARC's kmem 5101 * caches. To cause more slabs to be reapable, we may reduce the 5102 * target size of the cache (arc_c), causing the arc_adjust_cb() 5103 * to free more buffers. 5104 */ 5105 /* ARGSUSED */ 5106 static void 5107 arc_reap_cb(void *arg, zthr_t *zthr) 5108 { 5109 int64_t free_memory; 5110 5111 /* 5112 * Kick off asynchronous kmem_reap()'s of all our caches. 5113 */ 5114 arc_kmem_reap_soon(); 5115 5116 /* 5117 * Wait at least arc_kmem_cache_reap_retry_ms between 5118 * arc_kmem_reap_soon() calls. Without this check it is possible to 5119 * end up in a situation where we spend lots of time reaping 5120 * caches, while we're near arc_c_min. Waiting here also gives the 5121 * subsequent free memory check a chance of finding that the 5122 * asynchronous reap has already freed enough memory, and we don't 5123 * need to call arc_reduce_target_size(). 5124 */ 5125 delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000); 5126 5127 /* 5128 * Reduce the target size as needed to maintain the amount of free 5129 * memory in the system at a fraction of the arc_size (1/128th by 5130 * default). If oversubscribed (free_memory < 0) then reduce the 5131 * target arc_size by the deficit amount plus the fractional 5132 * amount. If free memory is positive but less then the fractional 5133 * amount, reduce by what is needed to hit the fractional amount. 5134 */ 5135 free_memory = arc_available_memory(); 5136 5137 int64_t to_free = 5138 (arc_c >> arc_shrink_shift) - free_memory; 5139 if (to_free > 0) { 5140 #ifdef _KERNEL 5141 to_free = MAX(to_free, ptob(needfree)); 5142 #endif 5143 arc_reduce_target_size(to_free); 5144 } 5145 } 5146 5147 /* 5148 * Adapt arc info given the number of bytes we are trying to add and 5149 * the state that we are coming from. This function is only called 5150 * when we are adding new content to the cache. 5151 */ 5152 static void 5153 arc_adapt(int bytes, arc_state_t *state) 5154 { 5155 int mult; 5156 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 5157 int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size); 5158 int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size); 5159 5160 if (state == arc_l2c_only) 5161 return; 5162 5163 ASSERT(bytes > 0); 5164 /* 5165 * Adapt the target size of the MRU list: 5166 * - if we just hit in the MRU ghost list, then increase 5167 * the target size of the MRU list. 5168 * - if we just hit in the MFU ghost list, then increase 5169 * the target size of the MFU list by decreasing the 5170 * target size of the MRU list. 5171 */ 5172 if (state == arc_mru_ghost) { 5173 mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size); 5174 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 5175 5176 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 5177 } else if (state == arc_mfu_ghost) { 5178 uint64_t delta; 5179 5180 mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size); 5181 mult = MIN(mult, 10); 5182 5183 delta = MIN(bytes * mult, arc_p); 5184 arc_p = MAX(arc_p_min, arc_p - delta); 5185 } 5186 ASSERT((int64_t)arc_p >= 0); 5187 5188 /* 5189 * Wake reap thread if we do not have any available memory 5190 */ 5191 if (arc_reclaim_needed()) { 5192 zthr_wakeup(arc_reap_zthr); 5193 return; 5194 } 5195 5196 5197 if (arc_no_grow) 5198 return; 5199 5200 if (arc_c >= arc_c_max) 5201 return; 5202 5203 /* 5204 * If we're within (2 * maxblocksize) bytes of the target 5205 * cache size, increment the target cache size 5206 */ 5207 if (aggsum_compare(&arc_size, arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) > 5208 0) { 5209 atomic_add_64(&arc_c, (int64_t)bytes); 5210 if (arc_c > arc_c_max) 5211 arc_c = arc_c_max; 5212 else if (state == arc_anon) 5213 atomic_add_64(&arc_p, (int64_t)bytes); 5214 if (arc_p > arc_c) 5215 arc_p = arc_c; 5216 } 5217 ASSERT((int64_t)arc_p >= 0); 5218 } 5219 5220 /* 5221 * Check if arc_size has grown past our upper threshold, determined by 5222 * zfs_arc_overflow_shift. 5223 */ 5224 static boolean_t 5225 arc_is_overflowing(void) 5226 { 5227 /* Always allow at least one block of overflow */ 5228 uint64_t overflow = MAX(SPA_MAXBLOCKSIZE, 5229 arc_c >> zfs_arc_overflow_shift); 5230 5231 /* 5232 * We just compare the lower bound here for performance reasons. Our 5233 * primary goals are to make sure that the arc never grows without 5234 * bound, and that it can reach its maximum size. This check 5235 * accomplishes both goals. The maximum amount we could run over by is 5236 * 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block 5237 * in the ARC. In practice, that's in the tens of MB, which is low 5238 * enough to be safe. 5239 */ 5240 return (aggsum_lower_bound(&arc_size) >= arc_c + overflow); 5241 } 5242 5243 static abd_t * 5244 arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 5245 { 5246 arc_buf_contents_t type = arc_buf_type(hdr); 5247 5248 arc_get_data_impl(hdr, size, tag); 5249 if (type == ARC_BUFC_METADATA) { 5250 return (abd_alloc(size, B_TRUE)); 5251 } else { 5252 ASSERT(type == ARC_BUFC_DATA); 5253 return (abd_alloc(size, B_FALSE)); 5254 } 5255 } 5256 5257 static void * 5258 arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 5259 { 5260 arc_buf_contents_t type = arc_buf_type(hdr); 5261 5262 arc_get_data_impl(hdr, size, tag); 5263 if (type == ARC_BUFC_METADATA) { 5264 return (zio_buf_alloc(size)); 5265 } else { 5266 ASSERT(type == ARC_BUFC_DATA); 5267 return (zio_data_buf_alloc(size)); 5268 } 5269 } 5270 5271 /* 5272 * Allocate a block and return it to the caller. If we are hitting the 5273 * hard limit for the cache size, we must sleep, waiting for the eviction 5274 * thread to catch up. If we're past the target size but below the hard 5275 * limit, we'll only signal the reclaim thread and continue on. 5276 */ 5277 static void 5278 arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 5279 { 5280 arc_state_t *state = hdr->b_l1hdr.b_state; 5281 arc_buf_contents_t type = arc_buf_type(hdr); 5282 5283 arc_adapt(size, state); 5284 5285 /* 5286 * If arc_size is currently overflowing, and has grown past our 5287 * upper limit, we must be adding data faster than the evict 5288 * thread can evict. Thus, to ensure we don't compound the 5289 * problem by adding more data and forcing arc_size to grow even 5290 * further past its target size, we halt and wait for the 5291 * eviction thread to catch up. 5292 * 5293 * It's also possible that the reclaim thread is unable to evict 5294 * enough buffers to get arc_size below the overflow limit (e.g. 5295 * due to buffers being un-evictable, or hash lock collisions). 5296 * In this case, we want to proceed regardless if we're 5297 * overflowing; thus we don't use a while loop here. 5298 */ 5299 if (arc_is_overflowing()) { 5300 mutex_enter(&arc_adjust_lock); 5301 5302 /* 5303 * Now that we've acquired the lock, we may no longer be 5304 * over the overflow limit, lets check. 5305 * 5306 * We're ignoring the case of spurious wake ups. If that 5307 * were to happen, it'd let this thread consume an ARC 5308 * buffer before it should have (i.e. before we're under 5309 * the overflow limit and were signalled by the reclaim 5310 * thread). As long as that is a rare occurrence, it 5311 * shouldn't cause any harm. 5312 */ 5313 if (arc_is_overflowing()) { 5314 arc_adjust_needed = B_TRUE; 5315 zthr_wakeup(arc_adjust_zthr); 5316 (void) cv_wait(&arc_adjust_waiters_cv, 5317 &arc_adjust_lock); 5318 } 5319 mutex_exit(&arc_adjust_lock); 5320 } 5321 5322 VERIFY3U(hdr->b_type, ==, type); 5323 if (type == ARC_BUFC_METADATA) { 5324 arc_space_consume(size, ARC_SPACE_META); 5325 } else { 5326 arc_space_consume(size, ARC_SPACE_DATA); 5327 } 5328 5329 /* 5330 * Update the state size. Note that ghost states have a 5331 * "ghost size" and so don't need to be updated. 5332 */ 5333 if (!GHOST_STATE(state)) { 5334 5335 (void) zfs_refcount_add_many(&state->arcs_size, size, tag); 5336 5337 /* 5338 * If this is reached via arc_read, the link is 5339 * protected by the hash lock. If reached via 5340 * arc_buf_alloc, the header should not be accessed by 5341 * any other thread. And, if reached via arc_read_done, 5342 * the hash lock will protect it if it's found in the 5343 * hash table; otherwise no other thread should be 5344 * trying to [add|remove]_reference it. 5345 */ 5346 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 5347 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 5348 (void) zfs_refcount_add_many(&state->arcs_esize[type], 5349 size, tag); 5350 } 5351 5352 /* 5353 * If we are growing the cache, and we are adding anonymous 5354 * data, and we have outgrown arc_p, update arc_p 5355 */ 5356 if (aggsum_compare(&arc_size, arc_c) < 0 && 5357 hdr->b_l1hdr.b_state == arc_anon && 5358 (zfs_refcount_count(&arc_anon->arcs_size) + 5359 zfs_refcount_count(&arc_mru->arcs_size) > arc_p)) 5360 arc_p = MIN(arc_c, arc_p + size); 5361 } 5362 } 5363 5364 static void 5365 arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag) 5366 { 5367 arc_free_data_impl(hdr, size, tag); 5368 abd_free(abd); 5369 } 5370 5371 static void 5372 arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag) 5373 { 5374 arc_buf_contents_t type = arc_buf_type(hdr); 5375 5376 arc_free_data_impl(hdr, size, tag); 5377 if (type == ARC_BUFC_METADATA) { 5378 zio_buf_free(buf, size); 5379 } else { 5380 ASSERT(type == ARC_BUFC_DATA); 5381 zio_data_buf_free(buf, size); 5382 } 5383 } 5384 5385 /* 5386 * Free the arc data buffer. 5387 */ 5388 static void 5389 arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 5390 { 5391 arc_state_t *state = hdr->b_l1hdr.b_state; 5392 arc_buf_contents_t type = arc_buf_type(hdr); 5393 5394 /* protected by hash lock, if in the hash table */ 5395 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 5396 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 5397 ASSERT(state != arc_anon && state != arc_l2c_only); 5398 5399 (void) zfs_refcount_remove_many(&state->arcs_esize[type], 5400 size, tag); 5401 } 5402 (void) zfs_refcount_remove_many(&state->arcs_size, size, tag); 5403 5404 VERIFY3U(hdr->b_type, ==, type); 5405 if (type == ARC_BUFC_METADATA) { 5406 arc_space_return(size, ARC_SPACE_META); 5407 } else { 5408 ASSERT(type == ARC_BUFC_DATA); 5409 arc_space_return(size, ARC_SPACE_DATA); 5410 } 5411 } 5412 5413 /* 5414 * This routine is called whenever a buffer is accessed. 5415 * NOTE: the hash lock is dropped in this function. 5416 */ 5417 static void 5418 arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 5419 { 5420 clock_t now; 5421 5422 ASSERT(MUTEX_HELD(hash_lock)); 5423 ASSERT(HDR_HAS_L1HDR(hdr)); 5424 5425 if (hdr->b_l1hdr.b_state == arc_anon) { 5426 /* 5427 * This buffer is not in the cache, and does not 5428 * appear in our "ghost" list. Add the new buffer 5429 * to the MRU state. 5430 */ 5431 5432 ASSERT0(hdr->b_l1hdr.b_arc_access); 5433 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5434 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 5435 arc_change_state(arc_mru, hdr, hash_lock); 5436 5437 } else if (hdr->b_l1hdr.b_state == arc_mru) { 5438 now = ddi_get_lbolt(); 5439 5440 /* 5441 * If this buffer is here because of a prefetch, then either: 5442 * - clear the flag if this is a "referencing" read 5443 * (any subsequent access will bump this into the MFU state). 5444 * or 5445 * - move the buffer to the head of the list if this is 5446 * another prefetch (to make it less likely to be evicted). 5447 */ 5448 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { 5449 if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 5450 /* link protected by hash lock */ 5451 ASSERT(multilist_link_active( 5452 &hdr->b_l1hdr.b_arc_node)); 5453 } else { 5454 arc_hdr_clear_flags(hdr, 5455 ARC_FLAG_PREFETCH | 5456 ARC_FLAG_PRESCIENT_PREFETCH); 5457 ARCSTAT_BUMP(arcstat_mru_hits); 5458 } 5459 hdr->b_l1hdr.b_arc_access = now; 5460 return; 5461 } 5462 5463 /* 5464 * This buffer has been "accessed" only once so far, 5465 * but it is still in the cache. Move it to the MFU 5466 * state. 5467 */ 5468 if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) { 5469 /* 5470 * More than 125ms have passed since we 5471 * instantiated this buffer. Move it to the 5472 * most frequently used state. 5473 */ 5474 hdr->b_l1hdr.b_arc_access = now; 5475 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 5476 arc_change_state(arc_mfu, hdr, hash_lock); 5477 } 5478 ARCSTAT_BUMP(arcstat_mru_hits); 5479 } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { 5480 arc_state_t *new_state; 5481 /* 5482 * This buffer has been "accessed" recently, but 5483 * was evicted from the cache. Move it to the 5484 * MFU state. 5485 */ 5486 5487 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { 5488 new_state = arc_mru; 5489 if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) { 5490 arc_hdr_clear_flags(hdr, 5491 ARC_FLAG_PREFETCH | 5492 ARC_FLAG_PRESCIENT_PREFETCH); 5493 } 5494 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 5495 } else { 5496 new_state = arc_mfu; 5497 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 5498 } 5499 5500 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5501 arc_change_state(new_state, hdr, hash_lock); 5502 5503 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 5504 } else if (hdr->b_l1hdr.b_state == arc_mfu) { 5505 /* 5506 * This buffer has been accessed more than once and is 5507 * still in the cache. Keep it in the MFU state. 5508 * 5509 * NOTE: an add_reference() that occurred when we did 5510 * the arc_read() will have kicked this off the list. 5511 * If it was a prefetch, we will explicitly move it to 5512 * the head of the list now. 5513 */ 5514 ARCSTAT_BUMP(arcstat_mfu_hits); 5515 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5516 } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { 5517 arc_state_t *new_state = arc_mfu; 5518 /* 5519 * This buffer has been accessed more than once but has 5520 * been evicted from the cache. Move it back to the 5521 * MFU state. 5522 */ 5523 5524 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { 5525 /* 5526 * This is a prefetch access... 5527 * move this block back to the MRU state. 5528 */ 5529 new_state = arc_mru; 5530 } 5531 5532 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5533 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 5534 arc_change_state(new_state, hdr, hash_lock); 5535 5536 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 5537 } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { 5538 /* 5539 * This buffer is on the 2nd Level ARC. 5540 */ 5541 5542 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5543 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 5544 arc_change_state(arc_mfu, hdr, hash_lock); 5545 } else { 5546 ASSERT(!"invalid arc state"); 5547 } 5548 } 5549 5550 /* 5551 * This routine is called by dbuf_hold() to update the arc_access() state 5552 * which otherwise would be skipped for entries in the dbuf cache. 5553 */ 5554 void 5555 arc_buf_access(arc_buf_t *buf) 5556 { 5557 mutex_enter(&buf->b_evict_lock); 5558 arc_buf_hdr_t *hdr = buf->b_hdr; 5559 5560 /* 5561 * Avoid taking the hash_lock when possible as an optimization. 5562 * The header must be checked again under the hash_lock in order 5563 * to handle the case where it is concurrently being released. 5564 */ 5565 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { 5566 mutex_exit(&buf->b_evict_lock); 5567 return; 5568 } 5569 5570 kmutex_t *hash_lock = HDR_LOCK(hdr); 5571 mutex_enter(hash_lock); 5572 5573 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { 5574 mutex_exit(hash_lock); 5575 mutex_exit(&buf->b_evict_lock); 5576 ARCSTAT_BUMP(arcstat_access_skip); 5577 return; 5578 } 5579 5580 mutex_exit(&buf->b_evict_lock); 5581 5582 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 5583 hdr->b_l1hdr.b_state == arc_mfu); 5584 5585 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 5586 arc_access(hdr, hash_lock); 5587 mutex_exit(hash_lock); 5588 5589 ARCSTAT_BUMP(arcstat_hits); 5590 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 5591 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits); 5592 } 5593 5594 /* a generic arc_read_done_func_t which you can use */ 5595 /* ARGSUSED */ 5596 void 5597 arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 5598 arc_buf_t *buf, void *arg) 5599 { 5600 if (buf == NULL) 5601 return; 5602 5603 bcopy(buf->b_data, arg, arc_buf_size(buf)); 5604 arc_buf_destroy(buf, arg); 5605 } 5606 5607 /* a generic arc_read_done_func_t */ 5608 void 5609 arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 5610 arc_buf_t *buf, void *arg) 5611 { 5612 arc_buf_t **bufp = arg; 5613 5614 if (buf == NULL) { 5615 ASSERT(zio == NULL || zio->io_error != 0); 5616 *bufp = NULL; 5617 } else { 5618 ASSERT(zio == NULL || zio->io_error == 0); 5619 *bufp = buf; 5620 ASSERT(buf->b_data != NULL); 5621 } 5622 } 5623 5624 static void 5625 arc_hdr_verify(arc_buf_hdr_t *hdr, const blkptr_t *bp) 5626 { 5627 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { 5628 ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0); 5629 ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF); 5630 } else { 5631 if (HDR_COMPRESSION_ENABLED(hdr)) { 5632 ASSERT3U(arc_hdr_get_compress(hdr), ==, 5633 BP_GET_COMPRESS(bp)); 5634 } 5635 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); 5636 ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp)); 5637 ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp)); 5638 } 5639 } 5640 5641 /* 5642 * XXX this should be changed to return an error, and callers 5643 * re-read from disk on failure (on nondebug bits). 5644 */ 5645 static void 5646 arc_hdr_verify_checksum(spa_t *spa, arc_buf_hdr_t *hdr, const blkptr_t *bp) 5647 { 5648 arc_hdr_verify(hdr, bp); 5649 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 5650 return; 5651 int err = 0; 5652 abd_t *abd = NULL; 5653 if (BP_IS_ENCRYPTED(bp)) { 5654 if (HDR_HAS_RABD(hdr)) { 5655 abd = hdr->b_crypt_hdr.b_rabd; 5656 } 5657 } else if (HDR_COMPRESSION_ENABLED(hdr)) { 5658 abd = hdr->b_l1hdr.b_pabd; 5659 } 5660 if (abd != NULL) { 5661 /* 5662 * The offset is only used for labels, which are not 5663 * cached in the ARC, so it doesn't matter what we 5664 * pass for the offset parameter. 5665 */ 5666 int psize = HDR_GET_PSIZE(hdr); 5667 err = zio_checksum_error_impl(spa, bp, 5668 BP_GET_CHECKSUM(bp), abd, psize, 0, NULL); 5669 if (err != 0) { 5670 /* 5671 * Use abd_copy_to_buf() rather than 5672 * abd_borrow_buf_copy() so that we are sure to 5673 * include the buf in crash dumps. 5674 */ 5675 void *buf = kmem_alloc(psize, KM_SLEEP); 5676 abd_copy_to_buf(buf, abd, psize); 5677 panic("checksum of cached data doesn't match BP " 5678 "err=%u hdr=%p bp=%p abd=%p buf=%p", 5679 err, (void *)hdr, (void *)bp, (void *)abd, buf); 5680 } 5681 } 5682 } 5683 5684 static void 5685 arc_read_done(zio_t *zio) 5686 { 5687 blkptr_t *bp = zio->io_bp; 5688 arc_buf_hdr_t *hdr = zio->io_private; 5689 kmutex_t *hash_lock = NULL; 5690 arc_callback_t *callback_list; 5691 arc_callback_t *acb; 5692 boolean_t freeable = B_FALSE; 5693 5694 /* 5695 * The hdr was inserted into hash-table and removed from lists 5696 * prior to starting I/O. We should find this header, since 5697 * it's in the hash table, and it should be legit since it's 5698 * not possible to evict it during the I/O. The only possible 5699 * reason for it not to be found is if we were freed during the 5700 * read. 5701 */ 5702 if (HDR_IN_HASH_TABLE(hdr)) { 5703 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); 5704 ASSERT3U(hdr->b_dva.dva_word[0], ==, 5705 BP_IDENTITY(zio->io_bp)->dva_word[0]); 5706 ASSERT3U(hdr->b_dva.dva_word[1], ==, 5707 BP_IDENTITY(zio->io_bp)->dva_word[1]); 5708 5709 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp, 5710 &hash_lock); 5711 5712 ASSERT((found == hdr && 5713 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 5714 (found == hdr && HDR_L2_READING(hdr))); 5715 ASSERT3P(hash_lock, !=, NULL); 5716 } 5717 5718 if (BP_IS_PROTECTED(bp)) { 5719 hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); 5720 hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; 5721 zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, 5722 hdr->b_crypt_hdr.b_iv); 5723 5724 if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) { 5725 void *tmpbuf; 5726 5727 tmpbuf = abd_borrow_buf_copy(zio->io_abd, 5728 sizeof (zil_chain_t)); 5729 zio_crypt_decode_mac_zil(tmpbuf, 5730 hdr->b_crypt_hdr.b_mac); 5731 abd_return_buf(zio->io_abd, tmpbuf, 5732 sizeof (zil_chain_t)); 5733 } else { 5734 zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac); 5735 } 5736 } 5737 5738 if (zio->io_error == 0) { 5739 /* byteswap if necessary */ 5740 if (BP_SHOULD_BYTESWAP(zio->io_bp)) { 5741 if (BP_GET_LEVEL(zio->io_bp) > 0) { 5742 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; 5743 } else { 5744 hdr->b_l1hdr.b_byteswap = 5745 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 5746 } 5747 } else { 5748 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 5749 } 5750 } 5751 5752 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED); 5753 if (l2arc_noprefetch && HDR_PREFETCH(hdr)) 5754 arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE); 5755 5756 callback_list = hdr->b_l1hdr.b_acb; 5757 ASSERT3P(callback_list, !=, NULL); 5758 5759 if (hash_lock && zio->io_error == 0 && 5760 hdr->b_l1hdr.b_state == arc_anon) { 5761 /* 5762 * Only call arc_access on anonymous buffers. This is because 5763 * if we've issued an I/O for an evicted buffer, we've already 5764 * called arc_access (to prevent any simultaneous readers from 5765 * getting confused). 5766 */ 5767 arc_access(hdr, hash_lock); 5768 } 5769 5770 /* 5771 * If a read request has a callback (i.e. acb_done is not NULL), then we 5772 * make a buf containing the data according to the parameters which were 5773 * passed in. The implementation of arc_buf_alloc_impl() ensures that we 5774 * aren't needlessly decompressing the data multiple times. 5775 */ 5776 int callback_cnt = 0; 5777 for (acb = callback_list; acb != NULL; acb = acb->acb_next) { 5778 if (!acb->acb_done) 5779 continue; 5780 5781 callback_cnt++; 5782 5783 if (zio->io_error != 0) 5784 continue; 5785 5786 int error = arc_buf_alloc_impl(hdr, zio->io_spa, 5787 &acb->acb_zb, acb->acb_private, acb->acb_encrypted, 5788 acb->acb_compressed, acb->acb_noauth, B_TRUE, 5789 &acb->acb_buf); 5790 5791 /* 5792 * Assert non-speculative zios didn't fail because an 5793 * encryption key wasn't loaded 5794 */ 5795 ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) || 5796 error != EACCES); 5797 5798 /* 5799 * If we failed to decrypt, report an error now (as the zio 5800 * layer would have done if it had done the transforms). 5801 */ 5802 if (error == ECKSUM) { 5803 ASSERT(BP_IS_PROTECTED(bp)); 5804 error = SET_ERROR(EIO); 5805 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { 5806 spa_log_error(zio->io_spa, &acb->acb_zb); 5807 zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 5808 zio->io_spa, NULL, &acb->acb_zb, zio, 0, 0); 5809 } 5810 } 5811 5812 if (error != 0) { 5813 /* 5814 * Decompression failed. Set io_error 5815 * so that when we call acb_done (below), 5816 * we will indicate that the read failed. 5817 * Note that in the unusual case where one 5818 * callback is compressed and another 5819 * uncompressed, we will mark all of them 5820 * as failed, even though the uncompressed 5821 * one can't actually fail. In this case, 5822 * the hdr will not be anonymous, because 5823 * if there are multiple callbacks, it's 5824 * because multiple threads found the same 5825 * arc buf in the hash table. 5826 */ 5827 zio->io_error = error; 5828 } 5829 } 5830 5831 /* 5832 * If there are multiple callbacks, we must have the hash lock, 5833 * because the only way for multiple threads to find this hdr is 5834 * in the hash table. This ensures that if there are multiple 5835 * callbacks, the hdr is not anonymous. If it were anonymous, 5836 * we couldn't use arc_buf_destroy() in the error case below. 5837 */ 5838 ASSERT(callback_cnt < 2 || hash_lock != NULL); 5839 5840 hdr->b_l1hdr.b_acb = NULL; 5841 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5842 if (callback_cnt == 0) 5843 ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); 5844 5845 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) || 5846 callback_list != NULL); 5847 5848 if (zio->io_error == 0) { 5849 arc_hdr_verify(hdr, zio->io_bp); 5850 } else { 5851 arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); 5852 if (hdr->b_l1hdr.b_state != arc_anon) 5853 arc_change_state(arc_anon, hdr, hash_lock); 5854 if (HDR_IN_HASH_TABLE(hdr)) 5855 buf_hash_remove(hdr); 5856 freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 5857 } 5858 5859 /* 5860 * Broadcast before we drop the hash_lock to avoid the possibility 5861 * that the hdr (and hence the cv) might be freed before we get to 5862 * the cv_broadcast(). 5863 */ 5864 cv_broadcast(&hdr->b_l1hdr.b_cv); 5865 5866 if (hash_lock != NULL) { 5867 mutex_exit(hash_lock); 5868 } else { 5869 /* 5870 * This block was freed while we waited for the read to 5871 * complete. It has been removed from the hash table and 5872 * moved to the anonymous state (so that it won't show up 5873 * in the cache). 5874 */ 5875 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 5876 freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 5877 } 5878 5879 /* execute each callback and free its structure */ 5880 while ((acb = callback_list) != NULL) { 5881 5882 if (acb->acb_done != NULL) { 5883 if (zio->io_error != 0 && acb->acb_buf != NULL) { 5884 /* 5885 * If arc_buf_alloc_impl() fails during 5886 * decompression, the buf will still be 5887 * allocated, and needs to be freed here. 5888 */ 5889 arc_buf_destroy(acb->acb_buf, acb->acb_private); 5890 acb->acb_buf = NULL; 5891 } 5892 acb->acb_done(zio, &zio->io_bookmark, zio->io_bp, 5893 acb->acb_buf, acb->acb_private); 5894 } 5895 5896 if (acb->acb_zio_dummy != NULL) { 5897 acb->acb_zio_dummy->io_error = zio->io_error; 5898 zio_nowait(acb->acb_zio_dummy); 5899 } 5900 5901 callback_list = acb->acb_next; 5902 kmem_free(acb, sizeof (arc_callback_t)); 5903 } 5904 5905 if (freeable) 5906 arc_hdr_destroy(hdr); 5907 } 5908 5909 /* 5910 * "Read" the block at the specified DVA (in bp) via the 5911 * cache. If the block is found in the cache, invoke the provided 5912 * callback immediately and return. Note that the `zio' parameter 5913 * in the callback will be NULL in this case, since no IO was 5914 * required. If the block is not in the cache pass the read request 5915 * on to the spa with a substitute callback function, so that the 5916 * requested block will be added to the cache. 5917 * 5918 * If a read request arrives for a block that has a read in-progress, 5919 * either wait for the in-progress read to complete (and return the 5920 * results); or, if this is a read with a "done" func, add a record 5921 * to the read to invoke the "done" func when the read completes, 5922 * and return; or just return. 5923 * 5924 * arc_read_done() will invoke all the requested "done" functions 5925 * for readers of this block. 5926 */ 5927 int 5928 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_read_done_func_t *done, 5929 void *private, zio_priority_t priority, int zio_flags, 5930 arc_flags_t *arc_flags, const zbookmark_phys_t *zb) 5931 { 5932 arc_buf_hdr_t *hdr = NULL; 5933 kmutex_t *hash_lock = NULL; 5934 zio_t *rzio; 5935 uint64_t guid = spa_load_guid(spa); 5936 boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0; 5937 boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) && 5938 (zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0; 5939 boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) && 5940 (zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0; 5941 int rc = 0; 5942 5943 ASSERT(!BP_IS_EMBEDDED(bp) || 5944 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 5945 5946 top: 5947 if (!BP_IS_EMBEDDED(bp)) { 5948 /* 5949 * Embedded BP's have no DVA and require no I/O to "read". 5950 * Create an anonymous arc buf to back it. 5951 */ 5952 hdr = buf_hash_find(guid, bp, &hash_lock); 5953 } 5954 5955 /* 5956 * Determine if we have an L1 cache hit or a cache miss. For simplicity 5957 * we maintain encrypted data seperately from compressed / uncompressed 5958 * data. If the user is requesting raw encrypted data and we don't have 5959 * that in the header we will read from disk to guarantee that we can 5960 * get it even if the encryption keys aren't loaded. 5961 */ 5962 if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) || 5963 (hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) { 5964 arc_buf_t *buf = NULL; 5965 *arc_flags |= ARC_FLAG_CACHED; 5966 5967 if (HDR_IO_IN_PROGRESS(hdr)) { 5968 zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head; 5969 5970 ASSERT3P(head_zio, !=, NULL); 5971 if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) && 5972 priority == ZIO_PRIORITY_SYNC_READ) { 5973 /* 5974 * This is a sync read that needs to wait for 5975 * an in-flight async read. Request that the 5976 * zio have its priority upgraded. 5977 */ 5978 zio_change_priority(head_zio, priority); 5979 DTRACE_PROBE1(arc__async__upgrade__sync, 5980 arc_buf_hdr_t *, hdr); 5981 ARCSTAT_BUMP(arcstat_async_upgrade_sync); 5982 } 5983 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { 5984 arc_hdr_clear_flags(hdr, 5985 ARC_FLAG_PREDICTIVE_PREFETCH); 5986 } 5987 5988 if (*arc_flags & ARC_FLAG_WAIT) { 5989 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); 5990 mutex_exit(hash_lock); 5991 goto top; 5992 } 5993 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 5994 5995 if (done) { 5996 arc_callback_t *acb = NULL; 5997 5998 acb = kmem_zalloc(sizeof (arc_callback_t), 5999 KM_SLEEP); 6000 acb->acb_done = done; 6001 acb->acb_private = private; 6002 acb->acb_compressed = compressed_read; 6003 acb->acb_encrypted = encrypted_read; 6004 acb->acb_noauth = noauth_read; 6005 acb->acb_zb = *zb; 6006 if (pio != NULL) 6007 acb->acb_zio_dummy = zio_null(pio, 6008 spa, NULL, NULL, NULL, zio_flags); 6009 6010 ASSERT3P(acb->acb_done, !=, NULL); 6011 acb->acb_zio_head = head_zio; 6012 acb->acb_next = hdr->b_l1hdr.b_acb; 6013 hdr->b_l1hdr.b_acb = acb; 6014 mutex_exit(hash_lock); 6015 return (0); 6016 } 6017 mutex_exit(hash_lock); 6018 return (0); 6019 } 6020 6021 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 6022 hdr->b_l1hdr.b_state == arc_mfu); 6023 6024 if (done) { 6025 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { 6026 /* 6027 * This is a demand read which does not have to 6028 * wait for i/o because we did a predictive 6029 * prefetch i/o for it, which has completed. 6030 */ 6031 DTRACE_PROBE1( 6032 arc__demand__hit__predictive__prefetch, 6033 arc_buf_hdr_t *, hdr); 6034 ARCSTAT_BUMP( 6035 arcstat_demand_hit_predictive_prefetch); 6036 arc_hdr_clear_flags(hdr, 6037 ARC_FLAG_PREDICTIVE_PREFETCH); 6038 } 6039 6040 if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) { 6041 ARCSTAT_BUMP( 6042 arcstat_demand_hit_prescient_prefetch); 6043 arc_hdr_clear_flags(hdr, 6044 ARC_FLAG_PRESCIENT_PREFETCH); 6045 } 6046 6047 ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp)); 6048 6049 arc_hdr_verify_checksum(spa, hdr, bp); 6050 6051 /* Get a buf with the desired data in it. */ 6052 rc = arc_buf_alloc_impl(hdr, spa, zb, private, 6053 encrypted_read, compressed_read, noauth_read, 6054 B_TRUE, &buf); 6055 if (rc == ECKSUM) { 6056 /* 6057 * Convert authentication and decryption errors 6058 * to EIO (and generate an ereport if needed) 6059 * before leaving the ARC. 6060 */ 6061 rc = SET_ERROR(EIO); 6062 if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) { 6063 spa_log_error(spa, zb); 6064 zfs_ereport_post( 6065 FM_EREPORT_ZFS_AUTHENTICATION, 6066 spa, NULL, zb, NULL, 0, 0); 6067 } 6068 } 6069 if (rc != 0) { 6070 (void) remove_reference(hdr, hash_lock, 6071 private); 6072 arc_buf_destroy_impl(buf); 6073 buf = NULL; 6074 } 6075 /* assert any errors weren't due to unloaded keys */ 6076 ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) || 6077 rc != EACCES); 6078 } else if (*arc_flags & ARC_FLAG_PREFETCH && 6079 zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 6080 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); 6081 } 6082 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 6083 arc_access(hdr, hash_lock); 6084 if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) 6085 arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH); 6086 if (*arc_flags & ARC_FLAG_L2CACHE) 6087 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 6088 mutex_exit(hash_lock); 6089 ARCSTAT_BUMP(arcstat_hits); 6090 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 6091 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 6092 data, metadata, hits); 6093 6094 if (done) 6095 done(NULL, zb, bp, buf, private); 6096 } else { 6097 uint64_t lsize = BP_GET_LSIZE(bp); 6098 uint64_t psize = BP_GET_PSIZE(bp); 6099 arc_callback_t *acb; 6100 vdev_t *vd = NULL; 6101 uint64_t addr = 0; 6102 boolean_t devw = B_FALSE; 6103 uint64_t size; 6104 abd_t *hdr_abd; 6105 6106 if (hdr == NULL) { 6107 /* this block is not in the cache */ 6108 arc_buf_hdr_t *exists = NULL; 6109 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 6110 hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, 6111 BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), type, 6112 encrypted_read); 6113 6114 if (!BP_IS_EMBEDDED(bp)) { 6115 hdr->b_dva = *BP_IDENTITY(bp); 6116 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 6117 exists = buf_hash_insert(hdr, &hash_lock); 6118 } 6119 if (exists != NULL) { 6120 /* somebody beat us to the hash insert */ 6121 mutex_exit(hash_lock); 6122 buf_discard_identity(hdr); 6123 arc_hdr_destroy(hdr); 6124 goto top; /* restart the IO request */ 6125 } 6126 } else { 6127 /* 6128 * This block is in the ghost cache or encrypted data 6129 * was requested and we didn't have it. If it was 6130 * L2-only (and thus didn't have an L1 hdr), 6131 * we realloc the header to add an L1 hdr. 6132 */ 6133 if (!HDR_HAS_L1HDR(hdr)) { 6134 hdr = arc_hdr_realloc(hdr, hdr_l2only_cache, 6135 hdr_full_cache); 6136 } 6137 6138 if (GHOST_STATE(hdr->b_l1hdr.b_state)) { 6139 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 6140 ASSERT(!HDR_HAS_RABD(hdr)); 6141 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 6142 ASSERT0(zfs_refcount_count( 6143 &hdr->b_l1hdr.b_refcnt)); 6144 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 6145 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 6146 } else if (HDR_IO_IN_PROGRESS(hdr)) { 6147 /* 6148 * If this header already had an IO in progress 6149 * and we are performing another IO to fetch 6150 * encrypted data we must wait until the first 6151 * IO completes so as not to confuse 6152 * arc_read_done(). This should be very rare 6153 * and so the performance impact shouldn't 6154 * matter. 6155 */ 6156 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); 6157 mutex_exit(hash_lock); 6158 goto top; 6159 } 6160 6161 /* 6162 * This is a delicate dance that we play here. 6163 * This hdr might be in the ghost list so we access 6164 * it to move it out of the ghost list before we 6165 * initiate the read. If it's a prefetch then 6166 * it won't have a callback so we'll remove the 6167 * reference that arc_buf_alloc_impl() created. We 6168 * do this after we've called arc_access() to 6169 * avoid hitting an assert in remove_reference(). 6170 */ 6171 arc_access(hdr, hash_lock); 6172 arc_hdr_alloc_pabd(hdr, encrypted_read); 6173 } 6174 6175 if (encrypted_read) { 6176 ASSERT(HDR_HAS_RABD(hdr)); 6177 size = HDR_GET_PSIZE(hdr); 6178 hdr_abd = hdr->b_crypt_hdr.b_rabd; 6179 zio_flags |= ZIO_FLAG_RAW; 6180 } else { 6181 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 6182 size = arc_hdr_size(hdr); 6183 hdr_abd = hdr->b_l1hdr.b_pabd; 6184 6185 if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) { 6186 zio_flags |= ZIO_FLAG_RAW_COMPRESS; 6187 } 6188 6189 /* 6190 * For authenticated bp's, we do not ask the ZIO layer 6191 * to authenticate them since this will cause the entire 6192 * IO to fail if the key isn't loaded. Instead, we 6193 * defer authentication until arc_buf_fill(), which will 6194 * verify the data when the key is available. 6195 */ 6196 if (BP_IS_AUTHENTICATED(bp)) 6197 zio_flags |= ZIO_FLAG_RAW_ENCRYPT; 6198 } 6199 6200 if (*arc_flags & ARC_FLAG_PREFETCH && 6201 zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) 6202 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); 6203 if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) 6204 arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH); 6205 6206 if (*arc_flags & ARC_FLAG_L2CACHE) 6207 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 6208 if (BP_IS_AUTHENTICATED(bp)) 6209 arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH); 6210 if (BP_GET_LEVEL(bp) > 0) 6211 arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT); 6212 if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH) 6213 arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH); 6214 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); 6215 6216 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 6217 acb->acb_done = done; 6218 acb->acb_private = private; 6219 acb->acb_compressed = compressed_read; 6220 acb->acb_encrypted = encrypted_read; 6221 acb->acb_noauth = noauth_read; 6222 acb->acb_zb = *zb; 6223 6224 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 6225 hdr->b_l1hdr.b_acb = acb; 6226 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 6227 6228 if (HDR_HAS_L2HDR(hdr) && 6229 (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { 6230 devw = hdr->b_l2hdr.b_dev->l2ad_writing; 6231 addr = hdr->b_l2hdr.b_daddr; 6232 /* 6233 * Lock out L2ARC device removal. 6234 */ 6235 if (vdev_is_dead(vd) || 6236 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 6237 vd = NULL; 6238 } 6239 6240 /* 6241 * We count both async reads and scrub IOs as asynchronous so 6242 * that both can be upgraded in the event of a cache hit while 6243 * the read IO is still in-flight. 6244 */ 6245 if (priority == ZIO_PRIORITY_ASYNC_READ || 6246 priority == ZIO_PRIORITY_SCRUB) 6247 arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); 6248 else 6249 arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); 6250 6251 /* 6252 * At this point, we have a level 1 cache miss. Try again in 6253 * L2ARC if possible. 6254 */ 6255 ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize); 6256 6257 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 6258 uint64_t, lsize, zbookmark_phys_t *, zb); 6259 ARCSTAT_BUMP(arcstat_misses); 6260 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 6261 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 6262 data, metadata, misses); 6263 6264 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 6265 /* 6266 * Read from the L2ARC if the following are true: 6267 * 1. The L2ARC vdev was previously cached. 6268 * 2. This buffer still has L2ARC metadata. 6269 * 3. This buffer isn't currently writing to the L2ARC. 6270 * 4. The L2ARC entry wasn't evicted, which may 6271 * also have invalidated the vdev. 6272 * 5. This isn't prefetch and l2arc_noprefetch is set. 6273 */ 6274 if (HDR_HAS_L2HDR(hdr) && 6275 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 6276 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 6277 l2arc_read_callback_t *cb; 6278 abd_t *abd; 6279 uint64_t asize; 6280 6281 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 6282 ARCSTAT_BUMP(arcstat_l2_hits); 6283 6284 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 6285 KM_SLEEP); 6286 cb->l2rcb_hdr = hdr; 6287 cb->l2rcb_bp = *bp; 6288 cb->l2rcb_zb = *zb; 6289 cb->l2rcb_flags = zio_flags; 6290 6291 asize = vdev_psize_to_asize(vd, size); 6292 if (asize != size) { 6293 abd = abd_alloc_for_io(asize, 6294 HDR_ISTYPE_METADATA(hdr)); 6295 cb->l2rcb_abd = abd; 6296 } else { 6297 abd = hdr_abd; 6298 } 6299 6300 ASSERT(addr >= VDEV_LABEL_START_SIZE && 6301 addr + asize <= vd->vdev_psize - 6302 VDEV_LABEL_END_SIZE); 6303 6304 /* 6305 * l2arc read. The SCL_L2ARC lock will be 6306 * released by l2arc_read_done(). 6307 * Issue a null zio if the underlying buffer 6308 * was squashed to zero size by compression. 6309 */ 6310 ASSERT3U(arc_hdr_get_compress(hdr), !=, 6311 ZIO_COMPRESS_EMPTY); 6312 rzio = zio_read_phys(pio, vd, addr, 6313 asize, abd, 6314 ZIO_CHECKSUM_OFF, 6315 l2arc_read_done, cb, priority, 6316 zio_flags | ZIO_FLAG_DONT_CACHE | 6317 ZIO_FLAG_CANFAIL | 6318 ZIO_FLAG_DONT_PROPAGATE | 6319 ZIO_FLAG_DONT_RETRY, B_FALSE); 6320 acb->acb_zio_head = rzio; 6321 6322 if (hash_lock != NULL) 6323 mutex_exit(hash_lock); 6324 6325 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 6326 zio_t *, rzio); 6327 ARCSTAT_INCR(arcstat_l2_read_bytes, 6328 HDR_GET_PSIZE(hdr)); 6329 6330 if (*arc_flags & ARC_FLAG_NOWAIT) { 6331 zio_nowait(rzio); 6332 return (0); 6333 } 6334 6335 ASSERT(*arc_flags & ARC_FLAG_WAIT); 6336 if (zio_wait(rzio) == 0) 6337 return (0); 6338 6339 /* l2arc read error; goto zio_read() */ 6340 if (hash_lock != NULL) 6341 mutex_enter(hash_lock); 6342 } else { 6343 DTRACE_PROBE1(l2arc__miss, 6344 arc_buf_hdr_t *, hdr); 6345 ARCSTAT_BUMP(arcstat_l2_misses); 6346 if (HDR_L2_WRITING(hdr)) 6347 ARCSTAT_BUMP(arcstat_l2_rw_clash); 6348 spa_config_exit(spa, SCL_L2ARC, vd); 6349 } 6350 } else { 6351 if (vd != NULL) 6352 spa_config_exit(spa, SCL_L2ARC, vd); 6353 if (l2arc_ndev != 0) { 6354 DTRACE_PROBE1(l2arc__miss, 6355 arc_buf_hdr_t *, hdr); 6356 ARCSTAT_BUMP(arcstat_l2_misses); 6357 } 6358 } 6359 6360 rzio = zio_read(pio, spa, bp, hdr_abd, size, 6361 arc_read_done, hdr, priority, zio_flags, zb); 6362 acb->acb_zio_head = rzio; 6363 6364 if (hash_lock != NULL) 6365 mutex_exit(hash_lock); 6366 6367 if (*arc_flags & ARC_FLAG_WAIT) 6368 return (zio_wait(rzio)); 6369 6370 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 6371 zio_nowait(rzio); 6372 } 6373 return (rc); 6374 } 6375 6376 /* 6377 * Notify the arc that a block was freed, and thus will never be used again. 6378 */ 6379 void 6380 arc_freed(spa_t *spa, const blkptr_t *bp) 6381 { 6382 arc_buf_hdr_t *hdr; 6383 kmutex_t *hash_lock; 6384 uint64_t guid = spa_load_guid(spa); 6385 6386 ASSERT(!BP_IS_EMBEDDED(bp)); 6387 6388 hdr = buf_hash_find(guid, bp, &hash_lock); 6389 if (hdr == NULL) 6390 return; 6391 6392 /* 6393 * We might be trying to free a block that is still doing I/O 6394 * (i.e. prefetch) or has a reference (i.e. a dedup-ed, 6395 * dmu_sync-ed block). If this block is being prefetched, then it 6396 * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr 6397 * until the I/O completes. A block may also have a reference if it is 6398 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would 6399 * have written the new block to its final resting place on disk but 6400 * without the dedup flag set. This would have left the hdr in the MRU 6401 * state and discoverable. When the txg finally syncs it detects that 6402 * the block was overridden in open context and issues an override I/O. 6403 * Since this is a dedup block, the override I/O will determine if the 6404 * block is already in the DDT. If so, then it will replace the io_bp 6405 * with the bp from the DDT and allow the I/O to finish. When the I/O 6406 * reaches the done callback, dbuf_write_override_done, it will 6407 * check to see if the io_bp and io_bp_override are identical. 6408 * If they are not, then it indicates that the bp was replaced with 6409 * the bp in the DDT and the override bp is freed. This allows 6410 * us to arrive here with a reference on a block that is being 6411 * freed. So if we have an I/O in progress, or a reference to 6412 * this hdr, then we don't destroy the hdr. 6413 */ 6414 if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) && 6415 zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) { 6416 arc_change_state(arc_anon, hdr, hash_lock); 6417 arc_hdr_destroy(hdr); 6418 mutex_exit(hash_lock); 6419 } else { 6420 mutex_exit(hash_lock); 6421 } 6422 6423 } 6424 6425 /* 6426 * Release this buffer from the cache, making it an anonymous buffer. This 6427 * must be done after a read and prior to modifying the buffer contents. 6428 * If the buffer has more than one reference, we must make 6429 * a new hdr for the buffer. 6430 */ 6431 void 6432 arc_release(arc_buf_t *buf, void *tag) 6433 { 6434 arc_buf_hdr_t *hdr = buf->b_hdr; 6435 6436 /* 6437 * It would be nice to assert that if its DMU metadata (level > 6438 * 0 || it's the dnode file), then it must be syncing context. 6439 * But we don't know that information at this level. 6440 */ 6441 6442 mutex_enter(&buf->b_evict_lock); 6443 6444 ASSERT(HDR_HAS_L1HDR(hdr)); 6445 6446 /* 6447 * We don't grab the hash lock prior to this check, because if 6448 * the buffer's header is in the arc_anon state, it won't be 6449 * linked into the hash table. 6450 */ 6451 if (hdr->b_l1hdr.b_state == arc_anon) { 6452 mutex_exit(&buf->b_evict_lock); 6453 /* 6454 * If we are called from dmu_convert_mdn_block_to_raw(), 6455 * a write might be in progress. This is OK because 6456 * the caller won't change the content of this buffer, 6457 * only the flags (via arc_convert_to_raw()). 6458 */ 6459 /* ASSERT(!HDR_IO_IN_PROGRESS(hdr)); */ 6460 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 6461 ASSERT(!HDR_HAS_L2HDR(hdr)); 6462 ASSERT(HDR_EMPTY(hdr)); 6463 6464 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 6465 ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); 6466 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node)); 6467 6468 hdr->b_l1hdr.b_arc_access = 0; 6469 6470 /* 6471 * If the buf is being overridden then it may already 6472 * have a hdr that is not empty. 6473 */ 6474 buf_discard_identity(hdr); 6475 arc_buf_thaw(buf); 6476 6477 return; 6478 } 6479 6480 kmutex_t *hash_lock = HDR_LOCK(hdr); 6481 mutex_enter(hash_lock); 6482 6483 /* 6484 * This assignment is only valid as long as the hash_lock is 6485 * held, we must be careful not to reference state or the 6486 * b_state field after dropping the lock. 6487 */ 6488 arc_state_t *state = hdr->b_l1hdr.b_state; 6489 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 6490 ASSERT3P(state, !=, arc_anon); 6491 6492 /* this buffer is not on any list */ 6493 ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0); 6494 6495 if (HDR_HAS_L2HDR(hdr)) { 6496 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); 6497 6498 /* 6499 * We have to recheck this conditional again now that 6500 * we're holding the l2ad_mtx to prevent a race with 6501 * another thread which might be concurrently calling 6502 * l2arc_evict(). In that case, l2arc_evict() might have 6503 * destroyed the header's L2 portion as we were waiting 6504 * to acquire the l2ad_mtx. 6505 */ 6506 if (HDR_HAS_L2HDR(hdr)) 6507 arc_hdr_l2hdr_destroy(hdr); 6508 6509 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); 6510 } 6511 6512 /* 6513 * Do we have more than one buf? 6514 */ 6515 if (hdr->b_l1hdr.b_bufcnt > 1) { 6516 arc_buf_hdr_t *nhdr; 6517 uint64_t spa = hdr->b_spa; 6518 uint64_t psize = HDR_GET_PSIZE(hdr); 6519 uint64_t lsize = HDR_GET_LSIZE(hdr); 6520 boolean_t protected = HDR_PROTECTED(hdr); 6521 enum zio_compress compress = arc_hdr_get_compress(hdr); 6522 arc_buf_contents_t type = arc_buf_type(hdr); 6523 VERIFY3U(hdr->b_type, ==, type); 6524 6525 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); 6526 (void) remove_reference(hdr, hash_lock, tag); 6527 6528 if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) { 6529 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); 6530 ASSERT(ARC_BUF_LAST(buf)); 6531 } 6532 6533 /* 6534 * Pull the data off of this hdr and attach it to 6535 * a new anonymous hdr. Also find the last buffer 6536 * in the hdr's buffer list. 6537 */ 6538 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); 6539 ASSERT3P(lastbuf, !=, NULL); 6540 6541 /* 6542 * If the current arc_buf_t and the hdr are sharing their data 6543 * buffer, then we must stop sharing that block. 6544 */ 6545 if (arc_buf_is_shared(buf)) { 6546 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); 6547 VERIFY(!arc_buf_is_shared(lastbuf)); 6548 6549 /* 6550 * First, sever the block sharing relationship between 6551 * buf and the arc_buf_hdr_t. 6552 */ 6553 arc_unshare_buf(hdr, buf); 6554 6555 /* 6556 * Now we need to recreate the hdr's b_pabd. Since we 6557 * have lastbuf handy, we try to share with it, but if 6558 * we can't then we allocate a new b_pabd and copy the 6559 * data from buf into it. 6560 */ 6561 if (arc_can_share(hdr, lastbuf)) { 6562 arc_share_buf(hdr, lastbuf); 6563 } else { 6564 arc_hdr_alloc_pabd(hdr, B_FALSE); 6565 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, 6566 buf->b_data, psize); 6567 } 6568 VERIFY3P(lastbuf->b_data, !=, NULL); 6569 } else if (HDR_SHARED_DATA(hdr)) { 6570 /* 6571 * Uncompressed shared buffers are always at the end 6572 * of the list. Compressed buffers don't have the 6573 * same requirements. This makes it hard to 6574 * simply assert that the lastbuf is shared so 6575 * we rely on the hdr's compression flags to determine 6576 * if we have a compressed, shared buffer. 6577 */ 6578 ASSERT(arc_buf_is_shared(lastbuf) || 6579 arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); 6580 ASSERT(!ARC_BUF_SHARED(buf)); 6581 } 6582 ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); 6583 ASSERT3P(state, !=, arc_l2c_only); 6584 6585 (void) zfs_refcount_remove_many(&state->arcs_size, 6586 arc_buf_size(buf), buf); 6587 6588 if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { 6589 ASSERT3P(state, !=, arc_l2c_only); 6590 (void) zfs_refcount_remove_many( 6591 &state->arcs_esize[type], 6592 arc_buf_size(buf), buf); 6593 } 6594 6595 hdr->b_l1hdr.b_bufcnt -= 1; 6596 if (ARC_BUF_ENCRYPTED(buf)) 6597 hdr->b_crypt_hdr.b_ebufcnt -= 1; 6598 6599 arc_cksum_verify(buf); 6600 arc_buf_unwatch(buf); 6601 6602 /* if this is the last uncompressed buf free the checksum */ 6603 if (!arc_hdr_has_uncompressed_buf(hdr)) 6604 arc_cksum_free(hdr); 6605 6606 mutex_exit(hash_lock); 6607 6608 /* 6609 * Allocate a new hdr. The new hdr will contain a b_pabd 6610 * buffer which will be freed in arc_write(). 6611 */ 6612 nhdr = arc_hdr_alloc(spa, psize, lsize, protected, 6613 compress, type, HDR_HAS_RABD(hdr)); 6614 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL); 6615 ASSERT0(nhdr->b_l1hdr.b_bufcnt); 6616 ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt)); 6617 VERIFY3U(nhdr->b_type, ==, type); 6618 ASSERT(!HDR_SHARED_DATA(nhdr)); 6619 6620 nhdr->b_l1hdr.b_buf = buf; 6621 nhdr->b_l1hdr.b_bufcnt = 1; 6622 if (ARC_BUF_ENCRYPTED(buf)) 6623 nhdr->b_crypt_hdr.b_ebufcnt = 1; 6624 (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); 6625 buf->b_hdr = nhdr; 6626 6627 mutex_exit(&buf->b_evict_lock); 6628 (void) zfs_refcount_add_many(&arc_anon->arcs_size, 6629 arc_buf_size(buf), buf); 6630 } else { 6631 mutex_exit(&buf->b_evict_lock); 6632 ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); 6633 /* protected by hash lock, or hdr is on arc_anon */ 6634 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 6635 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 6636 arc_change_state(arc_anon, hdr, hash_lock); 6637 hdr->b_l1hdr.b_arc_access = 0; 6638 6639 mutex_exit(hash_lock); 6640 buf_discard_identity(hdr); 6641 arc_buf_thaw(buf); 6642 } 6643 } 6644 6645 int 6646 arc_released(arc_buf_t *buf) 6647 { 6648 int released; 6649 6650 mutex_enter(&buf->b_evict_lock); 6651 released = (buf->b_data != NULL && 6652 buf->b_hdr->b_l1hdr.b_state == arc_anon); 6653 mutex_exit(&buf->b_evict_lock); 6654 return (released); 6655 } 6656 6657 #ifdef ZFS_DEBUG 6658 int 6659 arc_referenced(arc_buf_t *buf) 6660 { 6661 int referenced; 6662 6663 mutex_enter(&buf->b_evict_lock); 6664 referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); 6665 mutex_exit(&buf->b_evict_lock); 6666 return (referenced); 6667 } 6668 #endif 6669 6670 static void 6671 arc_write_ready(zio_t *zio) 6672 { 6673 arc_write_callback_t *callback = zio->io_private; 6674 arc_buf_t *buf = callback->awcb_buf; 6675 arc_buf_hdr_t *hdr = buf->b_hdr; 6676 blkptr_t *bp = zio->io_bp; 6677 uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp); 6678 6679 ASSERT(HDR_HAS_L1HDR(hdr)); 6680 ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); 6681 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 6682 6683 /* 6684 * If we're reexecuting this zio because the pool suspended, then 6685 * cleanup any state that was previously set the first time the 6686 * callback was invoked. 6687 */ 6688 if (zio->io_flags & ZIO_FLAG_REEXECUTED) { 6689 arc_cksum_free(hdr); 6690 arc_buf_unwatch(buf); 6691 if (hdr->b_l1hdr.b_pabd != NULL) { 6692 if (arc_buf_is_shared(buf)) { 6693 arc_unshare_buf(hdr, buf); 6694 } else { 6695 arc_hdr_free_pabd(hdr, B_FALSE); 6696 } 6697 } 6698 6699 if (HDR_HAS_RABD(hdr)) 6700 arc_hdr_free_pabd(hdr, B_TRUE); 6701 } 6702 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 6703 ASSERT(!HDR_HAS_RABD(hdr)); 6704 ASSERT(!HDR_SHARED_DATA(hdr)); 6705 ASSERT(!arc_buf_is_shared(buf)); 6706 6707 callback->awcb_ready(zio, buf, callback->awcb_private); 6708 6709 if (HDR_IO_IN_PROGRESS(hdr)) 6710 ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED); 6711 6712 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 6713 6714 if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr)) 6715 hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp)); 6716 6717 if (BP_IS_PROTECTED(bp)) { 6718 /* ZIL blocks are written through zio_rewrite */ 6719 ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG); 6720 ASSERT(HDR_PROTECTED(hdr)); 6721 6722 if (BP_SHOULD_BYTESWAP(bp)) { 6723 if (BP_GET_LEVEL(bp) > 0) { 6724 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; 6725 } else { 6726 hdr->b_l1hdr.b_byteswap = 6727 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 6728 } 6729 } else { 6730 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 6731 } 6732 6733 hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); 6734 hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; 6735 zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, 6736 hdr->b_crypt_hdr.b_iv); 6737 zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac); 6738 } 6739 6740 /* 6741 * If this block was written for raw encryption but the zio layer 6742 * ended up only authenticating it, adjust the buffer flags now. 6743 */ 6744 if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) { 6745 arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH); 6746 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; 6747 if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF) 6748 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; 6749 } else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) { 6750 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; 6751 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; 6752 } 6753 6754 /* this must be done after the buffer flags are adjusted */ 6755 arc_cksum_compute(buf); 6756 6757 enum zio_compress compress; 6758 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { 6759 compress = ZIO_COMPRESS_OFF; 6760 } else { 6761 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); 6762 compress = BP_GET_COMPRESS(bp); 6763 } 6764 HDR_SET_PSIZE(hdr, psize); 6765 arc_hdr_set_compress(hdr, compress); 6766 6767 if (zio->io_error != 0 || psize == 0) 6768 goto out; 6769 6770 /* 6771 * Fill the hdr with data. If the buffer is encrypted we have no choice 6772 * but to copy the data into b_rabd. If the hdr is compressed, the data 6773 * we want is available from the zio, otherwise we can take it from 6774 * the buf. 6775 * 6776 * We might be able to share the buf's data with the hdr here. However, 6777 * doing so would cause the ARC to be full of linear ABDs if we write a 6778 * lot of shareable data. As a compromise, we check whether scattered 6779 * ABDs are allowed, and assume that if they are then the user wants 6780 * the ARC to be primarily filled with them regardless of the data being 6781 * written. Therefore, if they're allowed then we allocate one and copy 6782 * the data into it; otherwise, we share the data directly if we can. 6783 */ 6784 if (ARC_BUF_ENCRYPTED(buf)) { 6785 ASSERT3U(psize, >, 0); 6786 ASSERT(ARC_BUF_COMPRESSED(buf)); 6787 arc_hdr_alloc_pabd(hdr, B_TRUE); 6788 abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); 6789 } else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) { 6790 /* 6791 * Ideally, we would always copy the io_abd into b_pabd, but the 6792 * user may have disabled compressed ARC, thus we must check the 6793 * hdr's compression setting rather than the io_bp's. 6794 */ 6795 if (BP_IS_ENCRYPTED(bp)) { 6796 ASSERT3U(psize, >, 0); 6797 arc_hdr_alloc_pabd(hdr, B_TRUE); 6798 abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); 6799 } else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF && 6800 !ARC_BUF_COMPRESSED(buf)) { 6801 ASSERT3U(psize, >, 0); 6802 arc_hdr_alloc_pabd(hdr, B_FALSE); 6803 abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize); 6804 } else { 6805 ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr)); 6806 arc_hdr_alloc_pabd(hdr, B_FALSE); 6807 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data, 6808 arc_buf_size(buf)); 6809 } 6810 } else { 6811 ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd)); 6812 ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf)); 6813 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 6814 arc_share_buf(hdr, buf); 6815 } 6816 6817 out: 6818 arc_hdr_verify(hdr, bp); 6819 } 6820 6821 static void 6822 arc_write_children_ready(zio_t *zio) 6823 { 6824 arc_write_callback_t *callback = zio->io_private; 6825 arc_buf_t *buf = callback->awcb_buf; 6826 6827 callback->awcb_children_ready(zio, buf, callback->awcb_private); 6828 } 6829 6830 /* 6831 * The SPA calls this callback for each physical write that happens on behalf 6832 * of a logical write. See the comment in dbuf_write_physdone() for details. 6833 */ 6834 static void 6835 arc_write_physdone(zio_t *zio) 6836 { 6837 arc_write_callback_t *cb = zio->io_private; 6838 if (cb->awcb_physdone != NULL) 6839 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); 6840 } 6841 6842 static void 6843 arc_write_done(zio_t *zio) 6844 { 6845 arc_write_callback_t *callback = zio->io_private; 6846 arc_buf_t *buf = callback->awcb_buf; 6847 arc_buf_hdr_t *hdr = buf->b_hdr; 6848 6849 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 6850 6851 if (zio->io_error == 0) { 6852 arc_hdr_verify(hdr, zio->io_bp); 6853 6854 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 6855 buf_discard_identity(hdr); 6856 } else { 6857 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 6858 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 6859 } 6860 } else { 6861 ASSERT(HDR_EMPTY(hdr)); 6862 } 6863 6864 /* 6865 * If the block to be written was all-zero or compressed enough to be 6866 * embedded in the BP, no write was performed so there will be no 6867 * dva/birth/checksum. The buffer must therefore remain anonymous 6868 * (and uncached). 6869 */ 6870 if (!HDR_EMPTY(hdr)) { 6871 arc_buf_hdr_t *exists; 6872 kmutex_t *hash_lock; 6873 6874 ASSERT3U(zio->io_error, ==, 0); 6875 6876 arc_cksum_verify(buf); 6877 6878 exists = buf_hash_insert(hdr, &hash_lock); 6879 if (exists != NULL) { 6880 /* 6881 * This can only happen if we overwrite for 6882 * sync-to-convergence, because we remove 6883 * buffers from the hash table when we arc_free(). 6884 */ 6885 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 6886 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 6887 panic("bad overwrite, hdr=%p exists=%p", 6888 (void *)hdr, (void *)exists); 6889 ASSERT(zfs_refcount_is_zero( 6890 &exists->b_l1hdr.b_refcnt)); 6891 arc_change_state(arc_anon, exists, hash_lock); 6892 mutex_exit(hash_lock); 6893 arc_hdr_destroy(exists); 6894 exists = buf_hash_insert(hdr, &hash_lock); 6895 ASSERT3P(exists, ==, NULL); 6896 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 6897 /* nopwrite */ 6898 ASSERT(zio->io_prop.zp_nopwrite); 6899 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 6900 panic("bad nopwrite, hdr=%p exists=%p", 6901 (void *)hdr, (void *)exists); 6902 } else { 6903 /* Dedup */ 6904 ASSERT(hdr->b_l1hdr.b_bufcnt == 1); 6905 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 6906 ASSERT(BP_GET_DEDUP(zio->io_bp)); 6907 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 6908 } 6909 } 6910 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 6911 /* if it's not anon, we are doing a scrub */ 6912 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) 6913 arc_access(hdr, hash_lock); 6914 mutex_exit(hash_lock); 6915 } else { 6916 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 6917 } 6918 6919 ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 6920 callback->awcb_done(zio, buf, callback->awcb_private); 6921 6922 abd_put(zio->io_abd); 6923 kmem_free(callback, sizeof (arc_write_callback_t)); 6924 } 6925 6926 zio_t * 6927 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 6928 boolean_t l2arc, const zio_prop_t *zp, arc_write_done_func_t *ready, 6929 arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone, 6930 arc_write_done_func_t *done, void *private, zio_priority_t priority, 6931 int zio_flags, const zbookmark_phys_t *zb) 6932 { 6933 arc_buf_hdr_t *hdr = buf->b_hdr; 6934 arc_write_callback_t *callback; 6935 zio_t *zio; 6936 zio_prop_t localprop = *zp; 6937 6938 ASSERT3P(ready, !=, NULL); 6939 ASSERT3P(done, !=, NULL); 6940 ASSERT(!HDR_IO_ERROR(hdr)); 6941 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 6942 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 6943 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); 6944 if (l2arc) 6945 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 6946 6947 if (ARC_BUF_ENCRYPTED(buf)) { 6948 ASSERT(ARC_BUF_COMPRESSED(buf)); 6949 localprop.zp_encrypt = B_TRUE; 6950 localprop.zp_compress = HDR_GET_COMPRESS(hdr); 6951 /* CONSTCOND */ 6952 localprop.zp_byteorder = 6953 (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? 6954 ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER; 6955 bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt, 6956 ZIO_DATA_SALT_LEN); 6957 bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv, 6958 ZIO_DATA_IV_LEN); 6959 bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac, 6960 ZIO_DATA_MAC_LEN); 6961 if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) { 6962 localprop.zp_nopwrite = B_FALSE; 6963 localprop.zp_copies = 6964 MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1); 6965 } 6966 zio_flags |= ZIO_FLAG_RAW; 6967 } else if (ARC_BUF_COMPRESSED(buf)) { 6968 ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf)); 6969 localprop.zp_compress = HDR_GET_COMPRESS(hdr); 6970 zio_flags |= ZIO_FLAG_RAW_COMPRESS; 6971 } 6972 6973 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 6974 callback->awcb_ready = ready; 6975 callback->awcb_children_ready = children_ready; 6976 callback->awcb_physdone = physdone; 6977 callback->awcb_done = done; 6978 callback->awcb_private = private; 6979 callback->awcb_buf = buf; 6980 6981 /* 6982 * The hdr's b_pabd is now stale, free it now. A new data block 6983 * will be allocated when the zio pipeline calls arc_write_ready(). 6984 */ 6985 if (hdr->b_l1hdr.b_pabd != NULL) { 6986 /* 6987 * If the buf is currently sharing the data block with 6988 * the hdr then we need to break that relationship here. 6989 * The hdr will remain with a NULL data pointer and the 6990 * buf will take sole ownership of the block. 6991 */ 6992 if (arc_buf_is_shared(buf)) { 6993 arc_unshare_buf(hdr, buf); 6994 } else { 6995 arc_hdr_free_pabd(hdr, B_FALSE); 6996 } 6997 VERIFY3P(buf->b_data, !=, NULL); 6998 } 6999 7000 if (HDR_HAS_RABD(hdr)) 7001 arc_hdr_free_pabd(hdr, B_TRUE); 7002 7003 if (!(zio_flags & ZIO_FLAG_RAW)) 7004 arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF); 7005 7006 ASSERT(!arc_buf_is_shared(buf)); 7007 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 7008 7009 zio = zio_write(pio, spa, txg, bp, 7010 abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)), 7011 HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready, 7012 (children_ready != NULL) ? arc_write_children_ready : NULL, 7013 arc_write_physdone, arc_write_done, callback, 7014 priority, zio_flags, zb); 7015 7016 return (zio); 7017 } 7018 7019 static int 7020 arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg) 7021 { 7022 #ifdef _KERNEL 7023 uint64_t available_memory = ptob(freemem); 7024 7025 #if defined(__i386) 7026 available_memory = 7027 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 7028 #endif 7029 7030 if (freemem > physmem * arc_lotsfree_percent / 100) 7031 return (0); 7032 7033 if (txg > spa->spa_lowmem_last_txg) { 7034 spa->spa_lowmem_last_txg = txg; 7035 spa->spa_lowmem_page_load = 0; 7036 } 7037 /* 7038 * If we are in pageout, we know that memory is already tight, 7039 * the arc is already going to be evicting, so we just want to 7040 * continue to let page writes occur as quickly as possible. 7041 */ 7042 if (curproc == proc_pageout) { 7043 if (spa->spa_lowmem_page_load > 7044 MAX(ptob(minfree), available_memory) / 4) 7045 return (SET_ERROR(ERESTART)); 7046 /* Note: reserve is inflated, so we deflate */ 7047 atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8); 7048 return (0); 7049 } else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) { 7050 /* memory is low, delay before restarting */ 7051 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 7052 return (SET_ERROR(EAGAIN)); 7053 } 7054 spa->spa_lowmem_page_load = 0; 7055 #endif /* _KERNEL */ 7056 return (0); 7057 } 7058 7059 void 7060 arc_tempreserve_clear(uint64_t reserve) 7061 { 7062 atomic_add_64(&arc_tempreserve, -reserve); 7063 ASSERT((int64_t)arc_tempreserve >= 0); 7064 } 7065 7066 int 7067 arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg) 7068 { 7069 int error; 7070 uint64_t anon_size; 7071 7072 if (reserve > arc_c/4 && !arc_no_grow) 7073 arc_c = MIN(arc_c_max, reserve * 4); 7074 if (reserve > arc_c) 7075 return (SET_ERROR(ENOMEM)); 7076 7077 /* 7078 * Don't count loaned bufs as in flight dirty data to prevent long 7079 * network delays from blocking transactions that are ready to be 7080 * assigned to a txg. 7081 */ 7082 7083 /* assert that it has not wrapped around */ 7084 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); 7085 7086 anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) - 7087 arc_loaned_bytes), 0); 7088 7089 /* 7090 * Writes will, almost always, require additional memory allocations 7091 * in order to compress/encrypt/etc the data. We therefore need to 7092 * make sure that there is sufficient available memory for this. 7093 */ 7094 error = arc_memory_throttle(spa, reserve, txg); 7095 if (error != 0) 7096 return (error); 7097 7098 /* 7099 * Throttle writes when the amount of dirty data in the cache 7100 * gets too large. We try to keep the cache less than half full 7101 * of dirty blocks so that our sync times don't grow too large. 7102 * 7103 * In the case of one pool being built on another pool, we want 7104 * to make sure we don't end up throttling the lower (backing) 7105 * pool when the upper pool is the majority contributor to dirty 7106 * data. To insure we make forward progress during throttling, we 7107 * also check the current pool's net dirty data and only throttle 7108 * if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty 7109 * data in the cache. 7110 * 7111 * Note: if two requests come in concurrently, we might let them 7112 * both succeed, when one of them should fail. Not a huge deal. 7113 */ 7114 uint64_t total_dirty = reserve + arc_tempreserve + anon_size; 7115 uint64_t spa_dirty_anon = spa_dirty_data(spa); 7116 7117 if (total_dirty > arc_c * zfs_arc_dirty_limit_percent / 100 && 7118 anon_size > arc_c * zfs_arc_anon_limit_percent / 100 && 7119 spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) { 7120 uint64_t meta_esize = 7121 zfs_refcount_count( 7122 &arc_anon->arcs_esize[ARC_BUFC_METADATA]); 7123 uint64_t data_esize = 7124 zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 7125 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 7126 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 7127 arc_tempreserve >> 10, meta_esize >> 10, 7128 data_esize >> 10, reserve >> 10, arc_c >> 10); 7129 return (SET_ERROR(ERESTART)); 7130 } 7131 atomic_add_64(&arc_tempreserve, reserve); 7132 return (0); 7133 } 7134 7135 static void 7136 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size, 7137 kstat_named_t *evict_data, kstat_named_t *evict_metadata) 7138 { 7139 size->value.ui64 = zfs_refcount_count(&state->arcs_size); 7140 evict_data->value.ui64 = 7141 zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]); 7142 evict_metadata->value.ui64 = 7143 zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]); 7144 } 7145 7146 static int 7147 arc_kstat_update(kstat_t *ksp, int rw) 7148 { 7149 arc_stats_t *as = ksp->ks_data; 7150 7151 if (rw == KSTAT_WRITE) { 7152 return (EACCES); 7153 } else { 7154 arc_kstat_update_state(arc_anon, 7155 &as->arcstat_anon_size, 7156 &as->arcstat_anon_evictable_data, 7157 &as->arcstat_anon_evictable_metadata); 7158 arc_kstat_update_state(arc_mru, 7159 &as->arcstat_mru_size, 7160 &as->arcstat_mru_evictable_data, 7161 &as->arcstat_mru_evictable_metadata); 7162 arc_kstat_update_state(arc_mru_ghost, 7163 &as->arcstat_mru_ghost_size, 7164 &as->arcstat_mru_ghost_evictable_data, 7165 &as->arcstat_mru_ghost_evictable_metadata); 7166 arc_kstat_update_state(arc_mfu, 7167 &as->arcstat_mfu_size, 7168 &as->arcstat_mfu_evictable_data, 7169 &as->arcstat_mfu_evictable_metadata); 7170 arc_kstat_update_state(arc_mfu_ghost, 7171 &as->arcstat_mfu_ghost_size, 7172 &as->arcstat_mfu_ghost_evictable_data, 7173 &as->arcstat_mfu_ghost_evictable_metadata); 7174 7175 ARCSTAT(arcstat_size) = aggsum_value(&arc_size); 7176 ARCSTAT(arcstat_meta_used) = aggsum_value(&arc_meta_used); 7177 ARCSTAT(arcstat_data_size) = aggsum_value(&astat_data_size); 7178 ARCSTAT(arcstat_metadata_size) = 7179 aggsum_value(&astat_metadata_size); 7180 ARCSTAT(arcstat_hdr_size) = aggsum_value(&astat_hdr_size); 7181 ARCSTAT(arcstat_other_size) = aggsum_value(&astat_other_size); 7182 ARCSTAT(arcstat_l2_hdr_size) = aggsum_value(&astat_l2_hdr_size); 7183 } 7184 7185 return (0); 7186 } 7187 7188 /* 7189 * This function *must* return indices evenly distributed between all 7190 * sublists of the multilist. This is needed due to how the ARC eviction 7191 * code is laid out; arc_evict_state() assumes ARC buffers are evenly 7192 * distributed between all sublists and uses this assumption when 7193 * deciding which sublist to evict from and how much to evict from it. 7194 */ 7195 unsigned int 7196 arc_state_multilist_index_func(multilist_t *ml, void *obj) 7197 { 7198 arc_buf_hdr_t *hdr = obj; 7199 7200 /* 7201 * We rely on b_dva to generate evenly distributed index 7202 * numbers using buf_hash below. So, as an added precaution, 7203 * let's make sure we never add empty buffers to the arc lists. 7204 */ 7205 ASSERT(!HDR_EMPTY(hdr)); 7206 7207 /* 7208 * The assumption here, is the hash value for a given 7209 * arc_buf_hdr_t will remain constant throughout its lifetime 7210 * (i.e. its b_spa, b_dva, and b_birth fields don't change). 7211 * Thus, we don't need to store the header's sublist index 7212 * on insertion, as this index can be recalculated on removal. 7213 * 7214 * Also, the low order bits of the hash value are thought to be 7215 * distributed evenly. Otherwise, in the case that the multilist 7216 * has a power of two number of sublists, each sublists' usage 7217 * would not be evenly distributed. 7218 */ 7219 return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % 7220 multilist_get_num_sublists(ml)); 7221 } 7222 7223 static void 7224 arc_state_init(void) 7225 { 7226 arc_anon = &ARC_anon; 7227 arc_mru = &ARC_mru; 7228 arc_mru_ghost = &ARC_mru_ghost; 7229 arc_mfu = &ARC_mfu; 7230 arc_mfu_ghost = &ARC_mfu_ghost; 7231 arc_l2c_only = &ARC_l2c_only; 7232 7233 arc_mru->arcs_list[ARC_BUFC_METADATA] = 7234 multilist_create(sizeof (arc_buf_hdr_t), 7235 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7236 arc_state_multilist_index_func); 7237 arc_mru->arcs_list[ARC_BUFC_DATA] = 7238 multilist_create(sizeof (arc_buf_hdr_t), 7239 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7240 arc_state_multilist_index_func); 7241 arc_mru_ghost->arcs_list[ARC_BUFC_METADATA] = 7242 multilist_create(sizeof (arc_buf_hdr_t), 7243 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7244 arc_state_multilist_index_func); 7245 arc_mru_ghost->arcs_list[ARC_BUFC_DATA] = 7246 multilist_create(sizeof (arc_buf_hdr_t), 7247 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7248 arc_state_multilist_index_func); 7249 arc_mfu->arcs_list[ARC_BUFC_METADATA] = 7250 multilist_create(sizeof (arc_buf_hdr_t), 7251 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7252 arc_state_multilist_index_func); 7253 arc_mfu->arcs_list[ARC_BUFC_DATA] = 7254 multilist_create(sizeof (arc_buf_hdr_t), 7255 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7256 arc_state_multilist_index_func); 7257 arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA] = 7258 multilist_create(sizeof (arc_buf_hdr_t), 7259 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7260 arc_state_multilist_index_func); 7261 arc_mfu_ghost->arcs_list[ARC_BUFC_DATA] = 7262 multilist_create(sizeof (arc_buf_hdr_t), 7263 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7264 arc_state_multilist_index_func); 7265 arc_l2c_only->arcs_list[ARC_BUFC_METADATA] = 7266 multilist_create(sizeof (arc_buf_hdr_t), 7267 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7268 arc_state_multilist_index_func); 7269 arc_l2c_only->arcs_list[ARC_BUFC_DATA] = 7270 multilist_create(sizeof (arc_buf_hdr_t), 7271 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 7272 arc_state_multilist_index_func); 7273 7274 zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 7275 zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 7276 zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); 7277 zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]); 7278 zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); 7279 zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); 7280 zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 7281 zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); 7282 zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); 7283 zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); 7284 zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); 7285 zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); 7286 7287 zfs_refcount_create(&arc_anon->arcs_size); 7288 zfs_refcount_create(&arc_mru->arcs_size); 7289 zfs_refcount_create(&arc_mru_ghost->arcs_size); 7290 zfs_refcount_create(&arc_mfu->arcs_size); 7291 zfs_refcount_create(&arc_mfu_ghost->arcs_size); 7292 zfs_refcount_create(&arc_l2c_only->arcs_size); 7293 7294 aggsum_init(&arc_meta_used, 0); 7295 aggsum_init(&arc_size, 0); 7296 aggsum_init(&astat_data_size, 0); 7297 aggsum_init(&astat_metadata_size, 0); 7298 aggsum_init(&astat_hdr_size, 0); 7299 aggsum_init(&astat_other_size, 0); 7300 aggsum_init(&astat_l2_hdr_size, 0); 7301 } 7302 7303 static void 7304 arc_state_fini(void) 7305 { 7306 zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 7307 zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 7308 zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); 7309 zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]); 7310 zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); 7311 zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); 7312 zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 7313 zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); 7314 zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); 7315 zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); 7316 zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); 7317 zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); 7318 7319 zfs_refcount_destroy(&arc_anon->arcs_size); 7320 zfs_refcount_destroy(&arc_mru->arcs_size); 7321 zfs_refcount_destroy(&arc_mru_ghost->arcs_size); 7322 zfs_refcount_destroy(&arc_mfu->arcs_size); 7323 zfs_refcount_destroy(&arc_mfu_ghost->arcs_size); 7324 zfs_refcount_destroy(&arc_l2c_only->arcs_size); 7325 7326 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]); 7327 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 7328 multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_METADATA]); 7329 multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 7330 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_DATA]); 7331 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 7332 multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_DATA]); 7333 multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 7334 multilist_destroy(arc_l2c_only->arcs_list[ARC_BUFC_METADATA]); 7335 multilist_destroy(arc_l2c_only->arcs_list[ARC_BUFC_DATA]); 7336 7337 aggsum_fini(&arc_meta_used); 7338 aggsum_fini(&arc_size); 7339 aggsum_fini(&astat_data_size); 7340 aggsum_fini(&astat_metadata_size); 7341 aggsum_fini(&astat_hdr_size); 7342 aggsum_fini(&astat_other_size); 7343 aggsum_fini(&astat_l2_hdr_size); 7344 7345 } 7346 7347 uint64_t 7348 arc_max_bytes(void) 7349 { 7350 return (arc_c_max); 7351 } 7352 7353 void 7354 arc_init(void) 7355 { 7356 /* 7357 * allmem is "all memory that we could possibly use". 7358 */ 7359 #ifdef _KERNEL 7360 uint64_t allmem = ptob(physmem - swapfs_minfree); 7361 #else 7362 uint64_t allmem = (physmem * PAGESIZE) / 2; 7363 #endif 7364 mutex_init(&arc_adjust_lock, NULL, MUTEX_DEFAULT, NULL); 7365 cv_init(&arc_adjust_waiters_cv, NULL, CV_DEFAULT, NULL); 7366 7367 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 7368 arc_c_min = MAX(allmem / 32, 64 << 20); 7369 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 7370 if (allmem >= 1 << 30) 7371 arc_c_max = allmem - (1 << 30); 7372 else 7373 arc_c_max = arc_c_min; 7374 arc_c_max = MAX(allmem * 3 / 4, arc_c_max); 7375 7376 /* 7377 * In userland, there's only the memory pressure that we artificially 7378 * create (see arc_available_memory()). Don't let arc_c get too 7379 * small, because it can cause transactions to be larger than 7380 * arc_c, causing arc_tempreserve_space() to fail. 7381 */ 7382 #ifndef _KERNEL 7383 arc_c_min = arc_c_max / 2; 7384 #endif 7385 7386 /* 7387 * Allow the tunables to override our calculations if they are 7388 * reasonable (ie. over 64MB) 7389 */ 7390 if (zfs_arc_max > 64 << 20 && zfs_arc_max < allmem) { 7391 arc_c_max = zfs_arc_max; 7392 arc_c_min = MIN(arc_c_min, arc_c_max); 7393 } 7394 if (zfs_arc_min > 64 << 20 && zfs_arc_min <= arc_c_max) 7395 arc_c_min = zfs_arc_min; 7396 7397 arc_c = arc_c_max; 7398 arc_p = (arc_c >> 1); 7399 7400 /* limit meta-data to 1/4 of the arc capacity */ 7401 arc_meta_limit = arc_c_max / 4; 7402 7403 #ifdef _KERNEL 7404 /* 7405 * Metadata is stored in the kernel's heap. Don't let us 7406 * use more than half the heap for the ARC. 7407 */ 7408 arc_meta_limit = MIN(arc_meta_limit, 7409 vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 2); 7410 #endif 7411 7412 /* Allow the tunable to override if it is reasonable */ 7413 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 7414 arc_meta_limit = zfs_arc_meta_limit; 7415 7416 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 7417 arc_c_min = arc_meta_limit / 2; 7418 7419 if (zfs_arc_meta_min > 0) { 7420 arc_meta_min = zfs_arc_meta_min; 7421 } else { 7422 arc_meta_min = arc_c_min / 2; 7423 } 7424 7425 if (zfs_arc_grow_retry > 0) 7426 arc_grow_retry = zfs_arc_grow_retry; 7427 7428 if (zfs_arc_shrink_shift > 0) 7429 arc_shrink_shift = zfs_arc_shrink_shift; 7430 7431 /* 7432 * Ensure that arc_no_grow_shift is less than arc_shrink_shift. 7433 */ 7434 if (arc_no_grow_shift >= arc_shrink_shift) 7435 arc_no_grow_shift = arc_shrink_shift - 1; 7436 7437 if (zfs_arc_p_min_shift > 0) 7438 arc_p_min_shift = zfs_arc_p_min_shift; 7439 7440 /* if kmem_flags are set, lets try to use less memory */ 7441 if (kmem_debugging()) 7442 arc_c = arc_c / 2; 7443 if (arc_c < arc_c_min) 7444 arc_c = arc_c_min; 7445 7446 arc_state_init(); 7447 7448 /* 7449 * The arc must be "uninitialized", so that hdr_recl() (which is 7450 * registered by buf_init()) will not access arc_reap_zthr before 7451 * it is created. 7452 */ 7453 ASSERT(!arc_initialized); 7454 buf_init(); 7455 7456 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 7457 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 7458 7459 if (arc_ksp != NULL) { 7460 arc_ksp->ks_data = &arc_stats; 7461 arc_ksp->ks_update = arc_kstat_update; 7462 kstat_install(arc_ksp); 7463 } 7464 7465 arc_adjust_zthr = zthr_create(arc_adjust_cb_check, 7466 arc_adjust_cb, NULL); 7467 arc_reap_zthr = zthr_create_timer(arc_reap_cb_check, 7468 arc_reap_cb, NULL, SEC2NSEC(1)); 7469 7470 arc_initialized = B_TRUE; 7471 arc_warm = B_FALSE; 7472 7473 /* 7474 * Calculate maximum amount of dirty data per pool. 7475 * 7476 * If it has been set by /etc/system, take that. 7477 * Otherwise, use a percentage of physical memory defined by 7478 * zfs_dirty_data_max_percent (default 10%) with a cap at 7479 * zfs_dirty_data_max_max (default 4GB). 7480 */ 7481 if (zfs_dirty_data_max == 0) { 7482 zfs_dirty_data_max = physmem * PAGESIZE * 7483 zfs_dirty_data_max_percent / 100; 7484 zfs_dirty_data_max = MIN(zfs_dirty_data_max, 7485 zfs_dirty_data_max_max); 7486 } 7487 } 7488 7489 void 7490 arc_fini(void) 7491 { 7492 /* Use B_TRUE to ensure *all* buffers are evicted */ 7493 arc_flush(NULL, B_TRUE); 7494 7495 arc_initialized = B_FALSE; 7496 7497 if (arc_ksp != NULL) { 7498 kstat_delete(arc_ksp); 7499 arc_ksp = NULL; 7500 } 7501 7502 (void) zthr_cancel(arc_adjust_zthr); 7503 zthr_destroy(arc_adjust_zthr); 7504 7505 (void) zthr_cancel(arc_reap_zthr); 7506 zthr_destroy(arc_reap_zthr); 7507 7508 mutex_destroy(&arc_adjust_lock); 7509 cv_destroy(&arc_adjust_waiters_cv); 7510 7511 /* 7512 * buf_fini() must proceed arc_state_fini() because buf_fin() may 7513 * trigger the release of kmem magazines, which can callback to 7514 * arc_space_return() which accesses aggsums freed in act_state_fini(). 7515 */ 7516 buf_fini(); 7517 arc_state_fini(); 7518 7519 ASSERT0(arc_loaned_bytes); 7520 } 7521 7522 /* 7523 * Level 2 ARC 7524 * 7525 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 7526 * It uses dedicated storage devices to hold cached data, which are populated 7527 * using large infrequent writes. The main role of this cache is to boost 7528 * the performance of random read workloads. The intended L2ARC devices 7529 * include short-stroked disks, solid state disks, and other media with 7530 * substantially faster read latency than disk. 7531 * 7532 * +-----------------------+ 7533 * | ARC | 7534 * +-----------------------+ 7535 * | ^ ^ 7536 * | | | 7537 * l2arc_feed_thread() arc_read() 7538 * | | | 7539 * | l2arc read | 7540 * V | | 7541 * +---------------+ | 7542 * | L2ARC | | 7543 * +---------------+ | 7544 * | ^ | 7545 * l2arc_write() | | 7546 * | | | 7547 * V | | 7548 * +-------+ +-------+ 7549 * | vdev | | vdev | 7550 * | cache | | cache | 7551 * +-------+ +-------+ 7552 * +=========+ .-----. 7553 * : L2ARC : |-_____-| 7554 * : devices : | Disks | 7555 * +=========+ `-_____-' 7556 * 7557 * Read requests are satisfied from the following sources, in order: 7558 * 7559 * 1) ARC 7560 * 2) vdev cache of L2ARC devices 7561 * 3) L2ARC devices 7562 * 4) vdev cache of disks 7563 * 5) disks 7564 * 7565 * Some L2ARC device types exhibit extremely slow write performance. 7566 * To accommodate for this there are some significant differences between 7567 * the L2ARC and traditional cache design: 7568 * 7569 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 7570 * the ARC behave as usual, freeing buffers and placing headers on ghost 7571 * lists. The ARC does not send buffers to the L2ARC during eviction as 7572 * this would add inflated write latencies for all ARC memory pressure. 7573 * 7574 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 7575 * It does this by periodically scanning buffers from the eviction-end of 7576 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 7577 * not already there. It scans until a headroom of buffers is satisfied, 7578 * which itself is a buffer for ARC eviction. If a compressible buffer is 7579 * found during scanning and selected for writing to an L2ARC device, we 7580 * temporarily boost scanning headroom during the next scan cycle to make 7581 * sure we adapt to compression effects (which might significantly reduce 7582 * the data volume we write to L2ARC). The thread that does this is 7583 * l2arc_feed_thread(), illustrated below; example sizes are included to 7584 * provide a better sense of ratio than this diagram: 7585 * 7586 * head --> tail 7587 * +---------------------+----------+ 7588 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 7589 * +---------------------+----------+ | o L2ARC eligible 7590 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 7591 * +---------------------+----------+ | 7592 * 15.9 Gbytes ^ 32 Mbytes | 7593 * headroom | 7594 * l2arc_feed_thread() 7595 * | 7596 * l2arc write hand <--[oooo]--' 7597 * | 8 Mbyte 7598 * | write max 7599 * V 7600 * +==============================+ 7601 * L2ARC dev |####|#|###|###| |####| ... | 7602 * +==============================+ 7603 * 32 Gbytes 7604 * 7605 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 7606 * evicted, then the L2ARC has cached a buffer much sooner than it probably 7607 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 7608 * safe to say that this is an uncommon case, since buffers at the end of 7609 * the ARC lists have moved there due to inactivity. 7610 * 7611 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 7612 * then the L2ARC simply misses copying some buffers. This serves as a 7613 * pressure valve to prevent heavy read workloads from both stalling the ARC 7614 * with waits and clogging the L2ARC with writes. This also helps prevent 7615 * the potential for the L2ARC to churn if it attempts to cache content too 7616 * quickly, such as during backups of the entire pool. 7617 * 7618 * 5. After system boot and before the ARC has filled main memory, there are 7619 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 7620 * lists can remain mostly static. Instead of searching from tail of these 7621 * lists as pictured, the l2arc_feed_thread() will search from the list heads 7622 * for eligible buffers, greatly increasing its chance of finding them. 7623 * 7624 * The L2ARC device write speed is also boosted during this time so that 7625 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 7626 * there are no L2ARC reads, and no fear of degrading read performance 7627 * through increased writes. 7628 * 7629 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 7630 * the vdev queue can aggregate them into larger and fewer writes. Each 7631 * device is written to in a rotor fashion, sweeping writes through 7632 * available space then repeating. 7633 * 7634 * 7. The L2ARC does not store dirty content. It never needs to flush 7635 * write buffers back to disk based storage. 7636 * 7637 * 8. If an ARC buffer is written (and dirtied) which also exists in the 7638 * L2ARC, the now stale L2ARC buffer is immediately dropped. 7639 * 7640 * The performance of the L2ARC can be tweaked by a number of tunables, which 7641 * may be necessary for different workloads: 7642 * 7643 * l2arc_write_max max write bytes per interval 7644 * l2arc_write_boost extra write bytes during device warmup 7645 * l2arc_noprefetch skip caching prefetched buffers 7646 * l2arc_headroom number of max device writes to precache 7647 * l2arc_headroom_boost when we find compressed buffers during ARC 7648 * scanning, we multiply headroom by this 7649 * percentage factor for the next scan cycle, 7650 * since more compressed buffers are likely to 7651 * be present 7652 * l2arc_feed_secs seconds between L2ARC writing 7653 * 7654 * Tunables may be removed or added as future performance improvements are 7655 * integrated, and also may become zpool properties. 7656 * 7657 * There are three key functions that control how the L2ARC warms up: 7658 * 7659 * l2arc_write_eligible() check if a buffer is eligible to cache 7660 * l2arc_write_size() calculate how much to write 7661 * l2arc_write_interval() calculate sleep delay between writes 7662 * 7663 * These three functions determine what to write, how much, and how quickly 7664 * to send writes. 7665 */ 7666 7667 static boolean_t 7668 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) 7669 { 7670 /* 7671 * A buffer is *not* eligible for the L2ARC if it: 7672 * 1. belongs to a different spa. 7673 * 2. is already cached on the L2ARC. 7674 * 3. has an I/O in progress (it may be an incomplete read). 7675 * 4. is flagged not eligible (zfs property). 7676 */ 7677 if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) || 7678 HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr)) 7679 return (B_FALSE); 7680 7681 return (B_TRUE); 7682 } 7683 7684 static uint64_t 7685 l2arc_write_size(void) 7686 { 7687 uint64_t size; 7688 7689 /* 7690 * Make sure our globals have meaningful values in case the user 7691 * altered them. 7692 */ 7693 size = l2arc_write_max; 7694 if (size == 0) { 7695 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " 7696 "be greater than zero, resetting it to the default (%d)", 7697 L2ARC_WRITE_SIZE); 7698 size = l2arc_write_max = L2ARC_WRITE_SIZE; 7699 } 7700 7701 if (arc_warm == B_FALSE) 7702 size += l2arc_write_boost; 7703 7704 return (size); 7705 7706 } 7707 7708 static clock_t 7709 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 7710 { 7711 clock_t interval, next, now; 7712 7713 /* 7714 * If the ARC lists are busy, increase our write rate; if the 7715 * lists are stale, idle back. This is achieved by checking 7716 * how much we previously wrote - if it was more than half of 7717 * what we wanted, schedule the next write much sooner. 7718 */ 7719 if (l2arc_feed_again && wrote > (wanted / 2)) 7720 interval = (hz * l2arc_feed_min_ms) / 1000; 7721 else 7722 interval = hz * l2arc_feed_secs; 7723 7724 now = ddi_get_lbolt(); 7725 next = MAX(now, MIN(now + interval, began + interval)); 7726 7727 return (next); 7728 } 7729 7730 /* 7731 * Cycle through L2ARC devices. This is how L2ARC load balances. 7732 * If a device is returned, this also returns holding the spa config lock. 7733 */ 7734 static l2arc_dev_t * 7735 l2arc_dev_get_next(void) 7736 { 7737 l2arc_dev_t *first, *next = NULL; 7738 7739 /* 7740 * Lock out the removal of spas (spa_namespace_lock), then removal 7741 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 7742 * both locks will be dropped and a spa config lock held instead. 7743 */ 7744 mutex_enter(&spa_namespace_lock); 7745 mutex_enter(&l2arc_dev_mtx); 7746 7747 /* if there are no vdevs, there is nothing to do */ 7748 if (l2arc_ndev == 0) 7749 goto out; 7750 7751 first = NULL; 7752 next = l2arc_dev_last; 7753 do { 7754 /* loop around the list looking for a non-faulted vdev */ 7755 if (next == NULL) { 7756 next = list_head(l2arc_dev_list); 7757 } else { 7758 next = list_next(l2arc_dev_list, next); 7759 if (next == NULL) 7760 next = list_head(l2arc_dev_list); 7761 } 7762 7763 /* if we have come back to the start, bail out */ 7764 if (first == NULL) 7765 first = next; 7766 else if (next == first) 7767 break; 7768 7769 } while (vdev_is_dead(next->l2ad_vdev)); 7770 7771 /* if we were unable to find any usable vdevs, return NULL */ 7772 if (vdev_is_dead(next->l2ad_vdev)) 7773 next = NULL; 7774 7775 l2arc_dev_last = next; 7776 7777 out: 7778 mutex_exit(&l2arc_dev_mtx); 7779 7780 /* 7781 * Grab the config lock to prevent the 'next' device from being 7782 * removed while we are writing to it. 7783 */ 7784 if (next != NULL) 7785 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 7786 mutex_exit(&spa_namespace_lock); 7787 7788 return (next); 7789 } 7790 7791 /* 7792 * Free buffers that were tagged for destruction. 7793 */ 7794 static void 7795 l2arc_do_free_on_write() 7796 { 7797 list_t *buflist; 7798 l2arc_data_free_t *df, *df_prev; 7799 7800 mutex_enter(&l2arc_free_on_write_mtx); 7801 buflist = l2arc_free_on_write; 7802 7803 for (df = list_tail(buflist); df; df = df_prev) { 7804 df_prev = list_prev(buflist, df); 7805 ASSERT3P(df->l2df_abd, !=, NULL); 7806 abd_free(df->l2df_abd); 7807 list_remove(buflist, df); 7808 kmem_free(df, sizeof (l2arc_data_free_t)); 7809 } 7810 7811 mutex_exit(&l2arc_free_on_write_mtx); 7812 } 7813 7814 /* 7815 * A write to a cache device has completed. Update all headers to allow 7816 * reads from these buffers to begin. 7817 */ 7818 static void 7819 l2arc_write_done(zio_t *zio) 7820 { 7821 l2arc_write_callback_t *cb; 7822 l2arc_dev_t *dev; 7823 list_t *buflist; 7824 arc_buf_hdr_t *head, *hdr, *hdr_prev; 7825 kmutex_t *hash_lock; 7826 int64_t bytes_dropped = 0; 7827 7828 cb = zio->io_private; 7829 ASSERT3P(cb, !=, NULL); 7830 dev = cb->l2wcb_dev; 7831 ASSERT3P(dev, !=, NULL); 7832 head = cb->l2wcb_head; 7833 ASSERT3P(head, !=, NULL); 7834 buflist = &dev->l2ad_buflist; 7835 ASSERT3P(buflist, !=, NULL); 7836 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 7837 l2arc_write_callback_t *, cb); 7838 7839 if (zio->io_error != 0) 7840 ARCSTAT_BUMP(arcstat_l2_writes_error); 7841 7842 /* 7843 * All writes completed, or an error was hit. 7844 */ 7845 top: 7846 mutex_enter(&dev->l2ad_mtx); 7847 for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { 7848 hdr_prev = list_prev(buflist, hdr); 7849 7850 hash_lock = HDR_LOCK(hdr); 7851 7852 /* 7853 * We cannot use mutex_enter or else we can deadlock 7854 * with l2arc_write_buffers (due to swapping the order 7855 * the hash lock and l2ad_mtx are taken). 7856 */ 7857 if (!mutex_tryenter(hash_lock)) { 7858 /* 7859 * Missed the hash lock. We must retry so we 7860 * don't leave the ARC_FLAG_L2_WRITING bit set. 7861 */ 7862 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry); 7863 7864 /* 7865 * We don't want to rescan the headers we've 7866 * already marked as having been written out, so 7867 * we reinsert the head node so we can pick up 7868 * where we left off. 7869 */ 7870 list_remove(buflist, head); 7871 list_insert_after(buflist, hdr, head); 7872 7873 mutex_exit(&dev->l2ad_mtx); 7874 7875 /* 7876 * We wait for the hash lock to become available 7877 * to try and prevent busy waiting, and increase 7878 * the chance we'll be able to acquire the lock 7879 * the next time around. 7880 */ 7881 mutex_enter(hash_lock); 7882 mutex_exit(hash_lock); 7883 goto top; 7884 } 7885 7886 /* 7887 * We could not have been moved into the arc_l2c_only 7888 * state while in-flight due to our ARC_FLAG_L2_WRITING 7889 * bit being set. Let's just ensure that's being enforced. 7890 */ 7891 ASSERT(HDR_HAS_L1HDR(hdr)); 7892 7893 if (zio->io_error != 0) { 7894 /* 7895 * Error - drop L2ARC entry. 7896 */ 7897 list_remove(buflist, hdr); 7898 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); 7899 7900 uint64_t psize = HDR_GET_PSIZE(hdr); 7901 ARCSTAT_INCR(arcstat_l2_psize, -psize); 7902 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr)); 7903 7904 bytes_dropped += 7905 vdev_psize_to_asize(dev->l2ad_vdev, psize); 7906 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, 7907 arc_hdr_size(hdr), hdr); 7908 } 7909 7910 /* 7911 * Allow ARC to begin reads and ghost list evictions to 7912 * this L2ARC entry. 7913 */ 7914 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING); 7915 7916 mutex_exit(hash_lock); 7917 } 7918 7919 atomic_inc_64(&l2arc_writes_done); 7920 list_remove(buflist, head); 7921 ASSERT(!HDR_HAS_L1HDR(head)); 7922 kmem_cache_free(hdr_l2only_cache, head); 7923 mutex_exit(&dev->l2ad_mtx); 7924 7925 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); 7926 7927 l2arc_do_free_on_write(); 7928 7929 kmem_free(cb, sizeof (l2arc_write_callback_t)); 7930 } 7931 7932 static int 7933 l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb) 7934 { 7935 int ret; 7936 spa_t *spa = zio->io_spa; 7937 arc_buf_hdr_t *hdr = cb->l2rcb_hdr; 7938 blkptr_t *bp = zio->io_bp; 7939 uint8_t salt[ZIO_DATA_SALT_LEN]; 7940 uint8_t iv[ZIO_DATA_IV_LEN]; 7941 uint8_t mac[ZIO_DATA_MAC_LEN]; 7942 boolean_t no_crypt = B_FALSE; 7943 7944 /* 7945 * ZIL data is never be written to the L2ARC, so we don't need 7946 * special handling for its unique MAC storage. 7947 */ 7948 ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG); 7949 ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); 7950 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 7951 7952 /* 7953 * If the data was encrypted, decrypt it now. Note that 7954 * we must check the bp here and not the hdr, since the 7955 * hdr does not have its encryption parameters updated 7956 * until arc_read_done(). 7957 */ 7958 if (BP_IS_ENCRYPTED(bp)) { 7959 abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr); 7960 7961 zio_crypt_decode_params_bp(bp, salt, iv); 7962 zio_crypt_decode_mac_bp(bp, mac); 7963 7964 ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb, 7965 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), 7966 salt, iv, mac, HDR_GET_PSIZE(hdr), eabd, 7967 hdr->b_l1hdr.b_pabd, &no_crypt); 7968 if (ret != 0) { 7969 arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr); 7970 goto error; 7971 } 7972 7973 /* 7974 * If we actually performed decryption, replace b_pabd 7975 * with the decrypted data. Otherwise we can just throw 7976 * our decryption buffer away. 7977 */ 7978 if (!no_crypt) { 7979 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, 7980 arc_hdr_size(hdr), hdr); 7981 hdr->b_l1hdr.b_pabd = eabd; 7982 zio->io_abd = eabd; 7983 } else { 7984 arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr); 7985 } 7986 } 7987 7988 /* 7989 * If the L2ARC block was compressed, but ARC compression 7990 * is disabled we decompress the data into a new buffer and 7991 * replace the existing data. 7992 */ 7993 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && 7994 !HDR_COMPRESSION_ENABLED(hdr)) { 7995 abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr); 7996 void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr)); 7997 7998 ret = zio_decompress_data(HDR_GET_COMPRESS(hdr), 7999 hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr), 8000 HDR_GET_LSIZE(hdr)); 8001 if (ret != 0) { 8002 abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); 8003 arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr); 8004 goto error; 8005 } 8006 8007 abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); 8008 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, 8009 arc_hdr_size(hdr), hdr); 8010 hdr->b_l1hdr.b_pabd = cabd; 8011 zio->io_abd = cabd; 8012 zio->io_size = HDR_GET_LSIZE(hdr); 8013 } 8014 8015 return (0); 8016 8017 error: 8018 return (ret); 8019 } 8020 8021 8022 /* 8023 * A read to a cache device completed. Validate buffer contents before 8024 * handing over to the regular ARC routines. 8025 */ 8026 static void 8027 l2arc_read_done(zio_t *zio) 8028 { 8029 int tfm_error = 0; 8030 l2arc_read_callback_t *cb = zio->io_private; 8031 arc_buf_hdr_t *hdr; 8032 kmutex_t *hash_lock; 8033 boolean_t valid_cksum; 8034 boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) && 8035 (cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT)); 8036 8037 ASSERT3P(zio->io_vd, !=, NULL); 8038 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 8039 8040 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 8041 8042 ASSERT3P(cb, !=, NULL); 8043 hdr = cb->l2rcb_hdr; 8044 ASSERT3P(hdr, !=, NULL); 8045 8046 hash_lock = HDR_LOCK(hdr); 8047 mutex_enter(hash_lock); 8048 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 8049 8050 /* 8051 * If the data was read into a temporary buffer, 8052 * move it and free the buffer. 8053 */ 8054 if (cb->l2rcb_abd != NULL) { 8055 ASSERT3U(arc_hdr_size(hdr), <, zio->io_size); 8056 if (zio->io_error == 0) { 8057 if (using_rdata) { 8058 abd_copy(hdr->b_crypt_hdr.b_rabd, 8059 cb->l2rcb_abd, arc_hdr_size(hdr)); 8060 } else { 8061 abd_copy(hdr->b_l1hdr.b_pabd, 8062 cb->l2rcb_abd, arc_hdr_size(hdr)); 8063 } 8064 } 8065 8066 /* 8067 * The following must be done regardless of whether 8068 * there was an error: 8069 * - free the temporary buffer 8070 * - point zio to the real ARC buffer 8071 * - set zio size accordingly 8072 * These are required because zio is either re-used for 8073 * an I/O of the block in the case of the error 8074 * or the zio is passed to arc_read_done() and it 8075 * needs real data. 8076 */ 8077 abd_free(cb->l2rcb_abd); 8078 zio->io_size = zio->io_orig_size = arc_hdr_size(hdr); 8079 8080 if (using_rdata) { 8081 ASSERT(HDR_HAS_RABD(hdr)); 8082 zio->io_abd = zio->io_orig_abd = 8083 hdr->b_crypt_hdr.b_rabd; 8084 } else { 8085 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 8086 zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd; 8087 } 8088 } 8089 8090 ASSERT3P(zio->io_abd, !=, NULL); 8091 8092 /* 8093 * Check this survived the L2ARC journey. 8094 */ 8095 ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd || 8096 (HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd)); 8097 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 8098 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 8099 8100 valid_cksum = arc_cksum_is_equal(hdr, zio); 8101 8102 /* 8103 * b_rabd will always match the data as it exists on disk if it is 8104 * being used. Therefore if we are reading into b_rabd we do not 8105 * attempt to untransform the data. 8106 */ 8107 if (valid_cksum && !using_rdata) 8108 tfm_error = l2arc_untransform(zio, cb); 8109 8110 if (valid_cksum && tfm_error == 0 && zio->io_error == 0 && 8111 !HDR_L2_EVICTED(hdr)) { 8112 mutex_exit(hash_lock); 8113 zio->io_private = hdr; 8114 arc_read_done(zio); 8115 } else { 8116 mutex_exit(hash_lock); 8117 /* 8118 * Buffer didn't survive caching. Increment stats and 8119 * reissue to the original storage device. 8120 */ 8121 if (zio->io_error != 0) { 8122 ARCSTAT_BUMP(arcstat_l2_io_error); 8123 } else { 8124 zio->io_error = SET_ERROR(EIO); 8125 } 8126 if (!valid_cksum || tfm_error != 0) 8127 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 8128 8129 /* 8130 * If there's no waiter, issue an async i/o to the primary 8131 * storage now. If there *is* a waiter, the caller must 8132 * issue the i/o in a context where it's OK to block. 8133 */ 8134 if (zio->io_waiter == NULL) { 8135 zio_t *pio = zio_unique_parent(zio); 8136 void *abd = (using_rdata) ? 8137 hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd; 8138 8139 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 8140 8141 zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp, 8142 abd, zio->io_size, arc_read_done, 8143 hdr, zio->io_priority, cb->l2rcb_flags, 8144 &cb->l2rcb_zb)); 8145 } 8146 } 8147 8148 kmem_free(cb, sizeof (l2arc_read_callback_t)); 8149 } 8150 8151 /* 8152 * This is the list priority from which the L2ARC will search for pages to 8153 * cache. This is used within loops (0..3) to cycle through lists in the 8154 * desired order. This order can have a significant effect on cache 8155 * performance. 8156 * 8157 * Currently the metadata lists are hit first, MFU then MRU, followed by 8158 * the data lists. This function returns a locked list, and also returns 8159 * the lock pointer. 8160 */ 8161 static multilist_sublist_t * 8162 l2arc_sublist_lock(int list_num) 8163 { 8164 multilist_t *ml = NULL; 8165 unsigned int idx; 8166 8167 ASSERT(list_num >= 0 && list_num <= 3); 8168 8169 switch (list_num) { 8170 case 0: 8171 ml = arc_mfu->arcs_list[ARC_BUFC_METADATA]; 8172 break; 8173 case 1: 8174 ml = arc_mru->arcs_list[ARC_BUFC_METADATA]; 8175 break; 8176 case 2: 8177 ml = arc_mfu->arcs_list[ARC_BUFC_DATA]; 8178 break; 8179 case 3: 8180 ml = arc_mru->arcs_list[ARC_BUFC_DATA]; 8181 break; 8182 } 8183 8184 /* 8185 * Return a randomly-selected sublist. This is acceptable 8186 * because the caller feeds only a little bit of data for each 8187 * call (8MB). Subsequent calls will result in different 8188 * sublists being selected. 8189 */ 8190 idx = multilist_get_random_index(ml); 8191 return (multilist_sublist_lock(ml, idx)); 8192 } 8193 8194 /* 8195 * Evict buffers from the device write hand to the distance specified in 8196 * bytes. This distance may span populated buffers, it may span nothing. 8197 * This is clearing a region on the L2ARC device ready for writing. 8198 * If the 'all' boolean is set, every buffer is evicted. 8199 */ 8200 static void 8201 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 8202 { 8203 list_t *buflist; 8204 arc_buf_hdr_t *hdr, *hdr_prev; 8205 kmutex_t *hash_lock; 8206 uint64_t taddr; 8207 8208 buflist = &dev->l2ad_buflist; 8209 8210 if (!all && dev->l2ad_first) { 8211 /* 8212 * This is the first sweep through the device. There is 8213 * nothing to evict. 8214 */ 8215 return; 8216 } 8217 8218 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 8219 /* 8220 * When nearing the end of the device, evict to the end 8221 * before the device write hand jumps to the start. 8222 */ 8223 taddr = dev->l2ad_end; 8224 } else { 8225 taddr = dev->l2ad_hand + distance; 8226 } 8227 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 8228 uint64_t, taddr, boolean_t, all); 8229 8230 top: 8231 mutex_enter(&dev->l2ad_mtx); 8232 for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { 8233 hdr_prev = list_prev(buflist, hdr); 8234 8235 hash_lock = HDR_LOCK(hdr); 8236 8237 /* 8238 * We cannot use mutex_enter or else we can deadlock 8239 * with l2arc_write_buffers (due to swapping the order 8240 * the hash lock and l2ad_mtx are taken). 8241 */ 8242 if (!mutex_tryenter(hash_lock)) { 8243 /* 8244 * Missed the hash lock. Retry. 8245 */ 8246 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 8247 mutex_exit(&dev->l2ad_mtx); 8248 mutex_enter(hash_lock); 8249 mutex_exit(hash_lock); 8250 goto top; 8251 } 8252 8253 /* 8254 * A header can't be on this list if it doesn't have L2 header. 8255 */ 8256 ASSERT(HDR_HAS_L2HDR(hdr)); 8257 8258 /* Ensure this header has finished being written. */ 8259 ASSERT(!HDR_L2_WRITING(hdr)); 8260 ASSERT(!HDR_L2_WRITE_HEAD(hdr)); 8261 8262 if (!all && (hdr->b_l2hdr.b_daddr >= taddr || 8263 hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { 8264 /* 8265 * We've evicted to the target address, 8266 * or the end of the device. 8267 */ 8268 mutex_exit(hash_lock); 8269 break; 8270 } 8271 8272 if (!HDR_HAS_L1HDR(hdr)) { 8273 ASSERT(!HDR_L2_READING(hdr)); 8274 /* 8275 * This doesn't exist in the ARC. Destroy. 8276 * arc_hdr_destroy() will call list_remove() 8277 * and decrement arcstat_l2_lsize. 8278 */ 8279 arc_change_state(arc_anon, hdr, hash_lock); 8280 arc_hdr_destroy(hdr); 8281 } else { 8282 ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); 8283 ARCSTAT_BUMP(arcstat_l2_evict_l1cached); 8284 /* 8285 * Invalidate issued or about to be issued 8286 * reads, since we may be about to write 8287 * over this location. 8288 */ 8289 if (HDR_L2_READING(hdr)) { 8290 ARCSTAT_BUMP(arcstat_l2_evict_reading); 8291 arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED); 8292 } 8293 8294 arc_hdr_l2hdr_destroy(hdr); 8295 } 8296 mutex_exit(hash_lock); 8297 } 8298 mutex_exit(&dev->l2ad_mtx); 8299 } 8300 8301 /* 8302 * Handle any abd transforms that might be required for writing to the L2ARC. 8303 * If successful, this function will always return an abd with the data 8304 * transformed as it is on disk in a new abd of asize bytes. 8305 */ 8306 static int 8307 l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize, 8308 abd_t **abd_out) 8309 { 8310 int ret; 8311 void *tmp = NULL; 8312 abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd; 8313 enum zio_compress compress = HDR_GET_COMPRESS(hdr); 8314 uint64_t psize = HDR_GET_PSIZE(hdr); 8315 uint64_t size = arc_hdr_size(hdr); 8316 boolean_t ismd = HDR_ISTYPE_METADATA(hdr); 8317 boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); 8318 dsl_crypto_key_t *dck = NULL; 8319 uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 }; 8320 boolean_t no_crypt = B_FALSE; 8321 8322 ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && 8323 !HDR_COMPRESSION_ENABLED(hdr)) || 8324 HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize); 8325 ASSERT3U(psize, <=, asize); 8326 8327 /* 8328 * If this data simply needs its own buffer, we simply allocate it 8329 * and copy the data. This may be done to eliminate a dependency on a 8330 * shared buffer or to reallocate the buffer to match asize. 8331 */ 8332 if (HDR_HAS_RABD(hdr) && asize != psize) { 8333 ASSERT3U(asize, >=, psize); 8334 to_write = abd_alloc_for_io(asize, ismd); 8335 abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize); 8336 if (psize != asize) 8337 abd_zero_off(to_write, psize, asize - psize); 8338 goto out; 8339 } 8340 8341 if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) && 8342 !HDR_ENCRYPTED(hdr)) { 8343 ASSERT3U(size, ==, psize); 8344 to_write = abd_alloc_for_io(asize, ismd); 8345 abd_copy(to_write, hdr->b_l1hdr.b_pabd, size); 8346 if (size != asize) 8347 abd_zero_off(to_write, size, asize - size); 8348 goto out; 8349 } 8350 8351 if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) { 8352 cabd = abd_alloc_for_io(asize, ismd); 8353 tmp = abd_borrow_buf(cabd, asize); 8354 8355 psize = zio_compress_data(compress, to_write, tmp, size); 8356 ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr)); 8357 if (psize < asize) 8358 bzero((char *)tmp + psize, asize - psize); 8359 psize = HDR_GET_PSIZE(hdr); 8360 abd_return_buf_copy(cabd, tmp, asize); 8361 to_write = cabd; 8362 } 8363 8364 if (HDR_ENCRYPTED(hdr)) { 8365 eabd = abd_alloc_for_io(asize, ismd); 8366 8367 /* 8368 * If the dataset was disowned before the buffer 8369 * made it to this point, the key to re-encrypt 8370 * it won't be available. In this case we simply 8371 * won't write the buffer to the L2ARC. 8372 */ 8373 ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj, 8374 FTAG, &dck); 8375 if (ret != 0) 8376 goto error; 8377 8378 ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key, 8379 hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt, 8380 hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd, 8381 &no_crypt); 8382 if (ret != 0) 8383 goto error; 8384 8385 if (no_crypt) 8386 abd_copy(eabd, to_write, psize); 8387 8388 if (psize != asize) 8389 abd_zero_off(eabd, psize, asize - psize); 8390 8391 /* assert that the MAC we got here matches the one we saved */ 8392 ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN)); 8393 spa_keystore_dsl_key_rele(spa, dck, FTAG); 8394 8395 if (to_write == cabd) 8396 abd_free(cabd); 8397 8398 to_write = eabd; 8399 } 8400 8401 out: 8402 ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd); 8403 *abd_out = to_write; 8404 return (0); 8405 8406 error: 8407 if (dck != NULL) 8408 spa_keystore_dsl_key_rele(spa, dck, FTAG); 8409 if (cabd != NULL) 8410 abd_free(cabd); 8411 if (eabd != NULL) 8412 abd_free(eabd); 8413 8414 *abd_out = NULL; 8415 return (ret); 8416 } 8417 8418 /* 8419 * Find and write ARC buffers to the L2ARC device. 8420 * 8421 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid 8422 * for reading until they have completed writing. 8423 * The headroom_boost is an in-out parameter used to maintain headroom boost 8424 * state between calls to this function. 8425 * 8426 * Returns the number of bytes actually written (which may be smaller than 8427 * the delta by which the device hand has changed due to alignment). 8428 */ 8429 static uint64_t 8430 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 8431 { 8432 arc_buf_hdr_t *hdr, *hdr_prev, *head; 8433 uint64_t write_asize, write_psize, write_lsize, headroom; 8434 boolean_t full; 8435 l2arc_write_callback_t *cb; 8436 zio_t *pio, *wzio; 8437 uint64_t guid = spa_load_guid(spa); 8438 8439 ASSERT3P(dev->l2ad_vdev, !=, NULL); 8440 8441 pio = NULL; 8442 write_lsize = write_asize = write_psize = 0; 8443 full = B_FALSE; 8444 head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE); 8445 arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR); 8446 8447 /* 8448 * Copy buffers for L2ARC writing. 8449 */ 8450 for (int try = 0; try <= 3; try++) { 8451 multilist_sublist_t *mls = l2arc_sublist_lock(try); 8452 uint64_t passed_sz = 0; 8453 8454 VERIFY3P(mls, !=, NULL); 8455 8456 /* 8457 * L2ARC fast warmup. 8458 * 8459 * Until the ARC is warm and starts to evict, read from the 8460 * head of the ARC lists rather than the tail. 8461 */ 8462 if (arc_warm == B_FALSE) 8463 hdr = multilist_sublist_head(mls); 8464 else 8465 hdr = multilist_sublist_tail(mls); 8466 8467 headroom = target_sz * l2arc_headroom; 8468 if (zfs_compressed_arc_enabled) 8469 headroom = (headroom * l2arc_headroom_boost) / 100; 8470 8471 for (; hdr; hdr = hdr_prev) { 8472 kmutex_t *hash_lock; 8473 abd_t *to_write = NULL; 8474 8475 if (arc_warm == B_FALSE) 8476 hdr_prev = multilist_sublist_next(mls, hdr); 8477 else 8478 hdr_prev = multilist_sublist_prev(mls, hdr); 8479 8480 hash_lock = HDR_LOCK(hdr); 8481 if (!mutex_tryenter(hash_lock)) { 8482 /* 8483 * Skip this buffer rather than waiting. 8484 */ 8485 continue; 8486 } 8487 8488 passed_sz += HDR_GET_LSIZE(hdr); 8489 if (passed_sz > headroom) { 8490 /* 8491 * Searched too far. 8492 */ 8493 mutex_exit(hash_lock); 8494 break; 8495 } 8496 8497 if (!l2arc_write_eligible(guid, hdr)) { 8498 mutex_exit(hash_lock); 8499 continue; 8500 } 8501 8502 /* 8503 * We rely on the L1 portion of the header below, so 8504 * it's invalid for this header to have been evicted out 8505 * of the ghost cache, prior to being written out. The 8506 * ARC_FLAG_L2_WRITING bit ensures this won't happen. 8507 */ 8508 ASSERT(HDR_HAS_L1HDR(hdr)); 8509 8510 ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); 8511 ASSERT3U(arc_hdr_size(hdr), >, 0); 8512 ASSERT(hdr->b_l1hdr.b_pabd != NULL || 8513 HDR_HAS_RABD(hdr)); 8514 uint64_t psize = HDR_GET_PSIZE(hdr); 8515 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, 8516 psize); 8517 8518 if ((write_asize + asize) > target_sz) { 8519 full = B_TRUE; 8520 mutex_exit(hash_lock); 8521 break; 8522 } 8523 8524 /* 8525 * We rely on the L1 portion of the header below, so 8526 * it's invalid for this header to have been evicted out 8527 * of the ghost cache, prior to being written out. The 8528 * ARC_FLAG_L2_WRITING bit ensures this won't happen. 8529 */ 8530 arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING); 8531 ASSERT(HDR_HAS_L1HDR(hdr)); 8532 8533 ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); 8534 ASSERT(hdr->b_l1hdr.b_pabd != NULL || 8535 HDR_HAS_RABD(hdr)); 8536 ASSERT3U(arc_hdr_size(hdr), >, 0); 8537 8538 /* 8539 * If this header has b_rabd, we can use this since it 8540 * must always match the data exactly as it exists on 8541 * disk. Otherwise, the L2ARC can normally use the 8542 * hdr's data, but if we're sharing data between the 8543 * hdr and one of its bufs, L2ARC needs its own copy of 8544 * the data so that the ZIO below can't race with the 8545 * buf consumer. To ensure that this copy will be 8546 * available for the lifetime of the ZIO and be cleaned 8547 * up afterwards, we add it to the l2arc_free_on_write 8548 * queue. If we need to apply any transforms to the 8549 * data (compression, encryption) we will also need the 8550 * extra buffer. 8551 */ 8552 if (HDR_HAS_RABD(hdr) && psize == asize) { 8553 to_write = hdr->b_crypt_hdr.b_rabd; 8554 } else if ((HDR_COMPRESSION_ENABLED(hdr) || 8555 HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) && 8556 !HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) && 8557 psize == asize) { 8558 to_write = hdr->b_l1hdr.b_pabd; 8559 } else { 8560 int ret; 8561 arc_buf_contents_t type = arc_buf_type(hdr); 8562 8563 ret = l2arc_apply_transforms(spa, hdr, asize, 8564 &to_write); 8565 if (ret != 0) { 8566 arc_hdr_clear_flags(hdr, 8567 ARC_FLAG_L2_WRITING); 8568 mutex_exit(hash_lock); 8569 continue; 8570 } 8571 8572 l2arc_free_abd_on_write(to_write, asize, type); 8573 } 8574 8575 if (pio == NULL) { 8576 /* 8577 * Insert a dummy header on the buflist so 8578 * l2arc_write_done() can find where the 8579 * write buffers begin without searching. 8580 */ 8581 mutex_enter(&dev->l2ad_mtx); 8582 list_insert_head(&dev->l2ad_buflist, head); 8583 mutex_exit(&dev->l2ad_mtx); 8584 8585 cb = kmem_alloc( 8586 sizeof (l2arc_write_callback_t), KM_SLEEP); 8587 cb->l2wcb_dev = dev; 8588 cb->l2wcb_head = head; 8589 pio = zio_root(spa, l2arc_write_done, cb, 8590 ZIO_FLAG_CANFAIL); 8591 } 8592 8593 hdr->b_l2hdr.b_dev = dev; 8594 hdr->b_l2hdr.b_daddr = dev->l2ad_hand; 8595 arc_hdr_set_flags(hdr, 8596 ARC_FLAG_L2_WRITING | ARC_FLAG_HAS_L2HDR); 8597 8598 mutex_enter(&dev->l2ad_mtx); 8599 list_insert_head(&dev->l2ad_buflist, hdr); 8600 mutex_exit(&dev->l2ad_mtx); 8601 8602 (void) zfs_refcount_add_many(&dev->l2ad_alloc, 8603 arc_hdr_size(hdr), hdr); 8604 8605 wzio = zio_write_phys(pio, dev->l2ad_vdev, 8606 hdr->b_l2hdr.b_daddr, asize, to_write, 8607 ZIO_CHECKSUM_OFF, NULL, hdr, 8608 ZIO_PRIORITY_ASYNC_WRITE, 8609 ZIO_FLAG_CANFAIL, B_FALSE); 8610 8611 write_lsize += HDR_GET_LSIZE(hdr); 8612 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 8613 zio_t *, wzio); 8614 8615 write_psize += psize; 8616 write_asize += asize; 8617 dev->l2ad_hand += asize; 8618 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); 8619 8620 mutex_exit(hash_lock); 8621 8622 (void) zio_nowait(wzio); 8623 } 8624 8625 multilist_sublist_unlock(mls); 8626 8627 if (full == B_TRUE) 8628 break; 8629 } 8630 8631 /* No buffers selected for writing? */ 8632 if (pio == NULL) { 8633 ASSERT0(write_lsize); 8634 ASSERT(!HDR_HAS_L1HDR(head)); 8635 kmem_cache_free(hdr_l2only_cache, head); 8636 return (0); 8637 } 8638 8639 ASSERT3U(write_asize, <=, target_sz); 8640 ARCSTAT_BUMP(arcstat_l2_writes_sent); 8641 ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize); 8642 ARCSTAT_INCR(arcstat_l2_lsize, write_lsize); 8643 ARCSTAT_INCR(arcstat_l2_psize, write_psize); 8644 8645 /* 8646 * Bump device hand to the device start if it is approaching the end. 8647 * l2arc_evict() will already have evicted ahead for this case. 8648 */ 8649 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 8650 dev->l2ad_hand = dev->l2ad_start; 8651 dev->l2ad_first = B_FALSE; 8652 } 8653 8654 dev->l2ad_writing = B_TRUE; 8655 (void) zio_wait(pio); 8656 dev->l2ad_writing = B_FALSE; 8657 8658 return (write_asize); 8659 } 8660 8661 /* 8662 * This thread feeds the L2ARC at regular intervals. This is the beating 8663 * heart of the L2ARC. 8664 */ 8665 /* ARGSUSED */ 8666 static void 8667 l2arc_feed_thread(void *unused) 8668 { 8669 callb_cpr_t cpr; 8670 l2arc_dev_t *dev; 8671 spa_t *spa; 8672 uint64_t size, wrote; 8673 clock_t begin, next = ddi_get_lbolt(); 8674 8675 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 8676 8677 mutex_enter(&l2arc_feed_thr_lock); 8678 8679 while (l2arc_thread_exit == 0) { 8680 CALLB_CPR_SAFE_BEGIN(&cpr); 8681 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 8682 next); 8683 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 8684 next = ddi_get_lbolt() + hz; 8685 8686 /* 8687 * Quick check for L2ARC devices. 8688 */ 8689 mutex_enter(&l2arc_dev_mtx); 8690 if (l2arc_ndev == 0) { 8691 mutex_exit(&l2arc_dev_mtx); 8692 continue; 8693 } 8694 mutex_exit(&l2arc_dev_mtx); 8695 begin = ddi_get_lbolt(); 8696 8697 /* 8698 * This selects the next l2arc device to write to, and in 8699 * doing so the next spa to feed from: dev->l2ad_spa. This 8700 * will return NULL if there are now no l2arc devices or if 8701 * they are all faulted. 8702 * 8703 * If a device is returned, its spa's config lock is also 8704 * held to prevent device removal. l2arc_dev_get_next() 8705 * will grab and release l2arc_dev_mtx. 8706 */ 8707 if ((dev = l2arc_dev_get_next()) == NULL) 8708 continue; 8709 8710 spa = dev->l2ad_spa; 8711 ASSERT3P(spa, !=, NULL); 8712 8713 /* 8714 * If the pool is read-only then force the feed thread to 8715 * sleep a little longer. 8716 */ 8717 if (!spa_writeable(spa)) { 8718 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 8719 spa_config_exit(spa, SCL_L2ARC, dev); 8720 continue; 8721 } 8722 8723 /* 8724 * Avoid contributing to memory pressure. 8725 */ 8726 if (arc_reclaim_needed()) { 8727 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 8728 spa_config_exit(spa, SCL_L2ARC, dev); 8729 continue; 8730 } 8731 8732 ARCSTAT_BUMP(arcstat_l2_feeds); 8733 8734 size = l2arc_write_size(); 8735 8736 /* 8737 * Evict L2ARC buffers that will be overwritten. 8738 */ 8739 l2arc_evict(dev, size, B_FALSE); 8740 8741 /* 8742 * Write ARC buffers. 8743 */ 8744 wrote = l2arc_write_buffers(spa, dev, size); 8745 8746 /* 8747 * Calculate interval between writes. 8748 */ 8749 next = l2arc_write_interval(begin, size, wrote); 8750 spa_config_exit(spa, SCL_L2ARC, dev); 8751 } 8752 8753 l2arc_thread_exit = 0; 8754 cv_broadcast(&l2arc_feed_thr_cv); 8755 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 8756 thread_exit(); 8757 } 8758 8759 boolean_t 8760 l2arc_vdev_present(vdev_t *vd) 8761 { 8762 l2arc_dev_t *dev; 8763 8764 mutex_enter(&l2arc_dev_mtx); 8765 for (dev = list_head(l2arc_dev_list); dev != NULL; 8766 dev = list_next(l2arc_dev_list, dev)) { 8767 if (dev->l2ad_vdev == vd) 8768 break; 8769 } 8770 mutex_exit(&l2arc_dev_mtx); 8771 8772 return (dev != NULL); 8773 } 8774 8775 /* 8776 * Add a vdev for use by the L2ARC. By this point the spa has already 8777 * validated the vdev and opened it. 8778 */ 8779 void 8780 l2arc_add_vdev(spa_t *spa, vdev_t *vd) 8781 { 8782 l2arc_dev_t *adddev; 8783 8784 ASSERT(!l2arc_vdev_present(vd)); 8785 8786 /* 8787 * Create a new l2arc device entry. 8788 */ 8789 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 8790 adddev->l2ad_spa = spa; 8791 adddev->l2ad_vdev = vd; 8792 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 8793 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 8794 adddev->l2ad_hand = adddev->l2ad_start; 8795 adddev->l2ad_first = B_TRUE; 8796 adddev->l2ad_writing = B_FALSE; 8797 8798 mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); 8799 /* 8800 * This is a list of all ARC buffers that are still valid on the 8801 * device. 8802 */ 8803 list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 8804 offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node)); 8805 8806 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 8807 zfs_refcount_create(&adddev->l2ad_alloc); 8808 8809 /* 8810 * Add device to global list 8811 */ 8812 mutex_enter(&l2arc_dev_mtx); 8813 list_insert_head(l2arc_dev_list, adddev); 8814 atomic_inc_64(&l2arc_ndev); 8815 mutex_exit(&l2arc_dev_mtx); 8816 } 8817 8818 /* 8819 * Remove a vdev from the L2ARC. 8820 */ 8821 void 8822 l2arc_remove_vdev(vdev_t *vd) 8823 { 8824 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 8825 8826 /* 8827 * Find the device by vdev 8828 */ 8829 mutex_enter(&l2arc_dev_mtx); 8830 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 8831 nextdev = list_next(l2arc_dev_list, dev); 8832 if (vd == dev->l2ad_vdev) { 8833 remdev = dev; 8834 break; 8835 } 8836 } 8837 ASSERT3P(remdev, !=, NULL); 8838 8839 /* 8840 * Remove device from global list 8841 */ 8842 list_remove(l2arc_dev_list, remdev); 8843 l2arc_dev_last = NULL; /* may have been invalidated */ 8844 atomic_dec_64(&l2arc_ndev); 8845 mutex_exit(&l2arc_dev_mtx); 8846 8847 /* 8848 * Clear all buflists and ARC references. L2ARC device flush. 8849 */ 8850 l2arc_evict(remdev, 0, B_TRUE); 8851 list_destroy(&remdev->l2ad_buflist); 8852 mutex_destroy(&remdev->l2ad_mtx); 8853 zfs_refcount_destroy(&remdev->l2ad_alloc); 8854 kmem_free(remdev, sizeof (l2arc_dev_t)); 8855 } 8856 8857 void 8858 l2arc_init(void) 8859 { 8860 l2arc_thread_exit = 0; 8861 l2arc_ndev = 0; 8862 l2arc_writes_sent = 0; 8863 l2arc_writes_done = 0; 8864 8865 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 8866 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 8867 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 8868 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 8869 8870 l2arc_dev_list = &L2ARC_dev_list; 8871 l2arc_free_on_write = &L2ARC_free_on_write; 8872 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 8873 offsetof(l2arc_dev_t, l2ad_node)); 8874 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 8875 offsetof(l2arc_data_free_t, l2df_list_node)); 8876 } 8877 8878 void 8879 l2arc_fini(void) 8880 { 8881 /* 8882 * This is called from dmu_fini(), which is called from spa_fini(); 8883 * Because of this, we can assume that all l2arc devices have 8884 * already been removed when the pools themselves were removed. 8885 */ 8886 8887 l2arc_do_free_on_write(); 8888 8889 mutex_destroy(&l2arc_feed_thr_lock); 8890 cv_destroy(&l2arc_feed_thr_cv); 8891 mutex_destroy(&l2arc_dev_mtx); 8892 mutex_destroy(&l2arc_free_on_write_mtx); 8893 8894 list_destroy(l2arc_dev_list); 8895 list_destroy(l2arc_free_on_write); 8896 } 8897 8898 void 8899 l2arc_start(void) 8900 { 8901 if (!(spa_mode_global & FWRITE)) 8902 return; 8903 8904 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 8905 TS_RUN, minclsyspri); 8906 } 8907 8908 void 8909 l2arc_stop(void) 8910 { 8911 if (!(spa_mode_global & FWRITE)) 8912 return; 8913 8914 mutex_enter(&l2arc_feed_thr_lock); 8915 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 8916 l2arc_thread_exit = 1; 8917 while (l2arc_thread_exit != 0) 8918 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 8919 mutex_exit(&l2arc_feed_thr_lock); 8920 } 8921