1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 */ 26 27 /* Portions Copyright 2010 Robert Milkowski */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/spa_impl.h> 32 #include <sys/dmu.h> 33 #include <sys/zap.h> 34 #include <sys/arc.h> 35 #include <sys/stat.h> 36 #include <sys/resource.h> 37 #include <sys/zil.h> 38 #include <sys/zil_impl.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/vdev_impl.h> 41 #include <sys/dmu_tx.h> 42 #include <sys/dsl_pool.h> 43 #include <sys/abd.h> 44 45 /* 46 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system 47 * calls that change the file system. Each itx has enough information to 48 * be able to replay them after a system crash, power loss, or 49 * equivalent failure mode. These are stored in memory until either: 50 * 51 * 1. they are committed to the pool by the DMU transaction group 52 * (txg), at which point they can be discarded; or 53 * 2. they are committed to the on-disk ZIL for the dataset being 54 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous 55 * requirement). 56 * 57 * In the event of a crash or power loss, the itxs contained by each 58 * dataset's on-disk ZIL will be replayed when that dataset is first 59 * instantianted (e.g. if the dataset is a normal fileystem, when it is 60 * first mounted). 61 * 62 * As hinted at above, there is one ZIL per dataset (both the in-memory 63 * representation, and the on-disk representation). The on-disk format 64 * consists of 3 parts: 65 * 66 * - a single, per-dataset, ZIL header; which points to a chain of 67 * - zero or more ZIL blocks; each of which contains 68 * - zero or more ZIL records 69 * 70 * A ZIL record holds the information necessary to replay a single 71 * system call transaction. A ZIL block can hold many ZIL records, and 72 * the blocks are chained together, similarly to a singly linked list. 73 * 74 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL 75 * block in the chain, and the ZIL header points to the first block in 76 * the chain. 77 * 78 * Note, there is not a fixed place in the pool to hold these ZIL 79 * blocks; they are dynamically allocated and freed as needed from the 80 * blocks available on the pool, though they can be preferentially 81 * allocated from a dedicated "log" vdev. 82 */ 83 84 /* 85 * This controls the amount of time that a ZIL block (lwb) will remain 86 * "open" when it isn't "full", and it has a thread waiting for it to be 87 * committed to stable storage. Please refer to the zil_commit_waiter() 88 * function (and the comments within it) for more details. 89 */ 90 int zfs_commit_timeout_pct = 5; 91 92 /* 93 * Disable intent logging replay. This global ZIL switch affects all pools. 94 */ 95 int zil_replay_disable = 0; 96 97 /* 98 * Tunable parameter for debugging or performance analysis. Setting 99 * zfs_nocacheflush will cause corruption on power loss if a volatile 100 * out-of-order write cache is enabled. 101 */ 102 boolean_t zfs_nocacheflush = B_FALSE; 103 104 /* 105 * Limit SLOG write size per commit executed with synchronous priority. 106 * Any writes above that will be executed with lower (asynchronous) priority 107 * to limit potential SLOG device abuse by single active ZIL writer. 108 */ 109 uint64_t zil_slog_bulk = 768 * 1024; 110 111 static kmem_cache_t *zil_lwb_cache; 112 static kmem_cache_t *zil_zcw_cache; 113 114 static void zil_async_to_sync(zilog_t *zilog, uint64_t foid); 115 116 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 117 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 118 119 static int 120 zil_bp_compare(const void *x1, const void *x2) 121 { 122 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 123 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 124 125 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 126 return (-1); 127 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 128 return (1); 129 130 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 131 return (-1); 132 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 133 return (1); 134 135 return (0); 136 } 137 138 static void 139 zil_bp_tree_init(zilog_t *zilog) 140 { 141 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 142 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 143 } 144 145 static void 146 zil_bp_tree_fini(zilog_t *zilog) 147 { 148 avl_tree_t *t = &zilog->zl_bp_tree; 149 zil_bp_node_t *zn; 150 void *cookie = NULL; 151 152 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 153 kmem_free(zn, sizeof (zil_bp_node_t)); 154 155 avl_destroy(t); 156 } 157 158 int 159 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 160 { 161 avl_tree_t *t = &zilog->zl_bp_tree; 162 const dva_t *dva; 163 zil_bp_node_t *zn; 164 avl_index_t where; 165 166 if (BP_IS_EMBEDDED(bp)) 167 return (0); 168 169 dva = BP_IDENTITY(bp); 170 171 if (avl_find(t, dva, &where) != NULL) 172 return (SET_ERROR(EEXIST)); 173 174 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 175 zn->zn_dva = *dva; 176 avl_insert(t, zn, where); 177 178 return (0); 179 } 180 181 static zil_header_t * 182 zil_header_in_syncing_context(zilog_t *zilog) 183 { 184 return ((zil_header_t *)zilog->zl_header); 185 } 186 187 static void 188 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 189 { 190 zio_cksum_t *zc = &bp->blk_cksum; 191 192 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 193 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 194 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 195 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 196 } 197 198 /* 199 * Read a log block and make sure it's valid. 200 */ 201 static int 202 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 203 char **end) 204 { 205 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 206 arc_flags_t aflags = ARC_FLAG_WAIT; 207 arc_buf_t *abuf = NULL; 208 zbookmark_phys_t zb; 209 int error; 210 211 if (zilog->zl_header->zh_claim_txg == 0) 212 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 213 214 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 215 zio_flags |= ZIO_FLAG_SPECULATIVE; 216 217 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 218 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 219 220 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 221 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 222 223 if (error == 0) { 224 zio_cksum_t cksum = bp->blk_cksum; 225 226 /* 227 * Validate the checksummed log block. 228 * 229 * Sequence numbers should be... sequential. The checksum 230 * verifier for the next block should be bp's checksum plus 1. 231 * 232 * Also check the log chain linkage and size used. 233 */ 234 cksum.zc_word[ZIL_ZC_SEQ]++; 235 236 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 237 zil_chain_t *zilc = abuf->b_data; 238 char *lr = (char *)(zilc + 1); 239 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 240 241 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 242 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 243 error = SET_ERROR(ECKSUM); 244 } else { 245 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 246 bcopy(lr, dst, len); 247 *end = (char *)dst + len; 248 *nbp = zilc->zc_next_blk; 249 } 250 } else { 251 char *lr = abuf->b_data; 252 uint64_t size = BP_GET_LSIZE(bp); 253 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 254 255 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 256 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 257 (zilc->zc_nused > (size - sizeof (*zilc)))) { 258 error = SET_ERROR(ECKSUM); 259 } else { 260 ASSERT3U(zilc->zc_nused, <=, 261 SPA_OLD_MAXBLOCKSIZE); 262 bcopy(lr, dst, zilc->zc_nused); 263 *end = (char *)dst + zilc->zc_nused; 264 *nbp = zilc->zc_next_blk; 265 } 266 } 267 268 arc_buf_destroy(abuf, &abuf); 269 } 270 271 return (error); 272 } 273 274 /* 275 * Read a TX_WRITE log data block. 276 */ 277 static int 278 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 279 { 280 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 281 const blkptr_t *bp = &lr->lr_blkptr; 282 arc_flags_t aflags = ARC_FLAG_WAIT; 283 arc_buf_t *abuf = NULL; 284 zbookmark_phys_t zb; 285 int error; 286 287 if (BP_IS_HOLE(bp)) { 288 if (wbuf != NULL) 289 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 290 return (0); 291 } 292 293 if (zilog->zl_header->zh_claim_txg == 0) 294 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 295 296 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 297 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 298 299 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 300 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 301 302 if (error == 0) { 303 if (wbuf != NULL) 304 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 305 arc_buf_destroy(abuf, &abuf); 306 } 307 308 return (error); 309 } 310 311 /* 312 * Parse the intent log, and call parse_func for each valid record within. 313 */ 314 int 315 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 316 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 317 { 318 const zil_header_t *zh = zilog->zl_header; 319 boolean_t claimed = !!zh->zh_claim_txg; 320 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 321 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 322 uint64_t max_blk_seq = 0; 323 uint64_t max_lr_seq = 0; 324 uint64_t blk_count = 0; 325 uint64_t lr_count = 0; 326 blkptr_t blk, next_blk; 327 char *lrbuf, *lrp; 328 int error = 0; 329 330 /* 331 * Old logs didn't record the maximum zh_claim_lr_seq. 332 */ 333 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 334 claim_lr_seq = UINT64_MAX; 335 336 /* 337 * Starting at the block pointed to by zh_log we read the log chain. 338 * For each block in the chain we strongly check that block to 339 * ensure its validity. We stop when an invalid block is found. 340 * For each block pointer in the chain we call parse_blk_func(). 341 * For each record in each valid block we call parse_lr_func(). 342 * If the log has been claimed, stop if we encounter a sequence 343 * number greater than the highest claimed sequence number. 344 */ 345 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 346 zil_bp_tree_init(zilog); 347 348 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 349 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 350 int reclen; 351 char *end; 352 353 if (blk_seq > claim_blk_seq) 354 break; 355 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 356 break; 357 ASSERT3U(max_blk_seq, <, blk_seq); 358 max_blk_seq = blk_seq; 359 blk_count++; 360 361 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 362 break; 363 364 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 365 if (error != 0) 366 break; 367 368 for (lrp = lrbuf; lrp < end; lrp += reclen) { 369 lr_t *lr = (lr_t *)lrp; 370 reclen = lr->lrc_reclen; 371 ASSERT3U(reclen, >=, sizeof (lr_t)); 372 if (lr->lrc_seq > claim_lr_seq) 373 goto done; 374 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 375 goto done; 376 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 377 max_lr_seq = lr->lrc_seq; 378 lr_count++; 379 } 380 } 381 done: 382 zilog->zl_parse_error = error; 383 zilog->zl_parse_blk_seq = max_blk_seq; 384 zilog->zl_parse_lr_seq = max_lr_seq; 385 zilog->zl_parse_blk_count = blk_count; 386 zilog->zl_parse_lr_count = lr_count; 387 388 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 389 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 390 391 zil_bp_tree_fini(zilog); 392 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 393 394 return (error); 395 } 396 397 /* ARGSUSED */ 398 static int 399 zil_clear_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 400 { 401 ASSERT(!BP_IS_HOLE(bp)); 402 403 /* 404 * As we call this function from the context of a rewind to a 405 * checkpoint, each ZIL block whose txg is later than the txg 406 * that we rewind to is invalid. Thus, we return -1 so 407 * zil_parse() doesn't attempt to read it. 408 */ 409 if (bp->blk_birth >= first_txg) 410 return (-1); 411 412 if (zil_bp_tree_add(zilog, bp) != 0) 413 return (0); 414 415 zio_free(zilog->zl_spa, first_txg, bp); 416 return (0); 417 } 418 419 /* ARGSUSED */ 420 static int 421 zil_noop_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 422 { 423 return (0); 424 } 425 426 static int 427 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 428 { 429 /* 430 * Claim log block if not already committed and not already claimed. 431 * If tx == NULL, just verify that the block is claimable. 432 */ 433 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 434 zil_bp_tree_add(zilog, bp) != 0) 435 return (0); 436 437 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 438 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 439 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 440 } 441 442 static int 443 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 444 { 445 lr_write_t *lr = (lr_write_t *)lrc; 446 int error; 447 448 if (lrc->lrc_txtype != TX_WRITE) 449 return (0); 450 451 /* 452 * If the block is not readable, don't claim it. This can happen 453 * in normal operation when a log block is written to disk before 454 * some of the dmu_sync() blocks it points to. In this case, the 455 * transaction cannot have been committed to anyone (we would have 456 * waited for all writes to be stable first), so it is semantically 457 * correct to declare this the end of the log. 458 */ 459 if (lr->lr_blkptr.blk_birth >= first_txg && 460 (error = zil_read_log_data(zilog, lr, NULL)) != 0) 461 return (error); 462 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 463 } 464 465 /* ARGSUSED */ 466 static int 467 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 468 { 469 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 470 471 return (0); 472 } 473 474 static int 475 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 476 { 477 lr_write_t *lr = (lr_write_t *)lrc; 478 blkptr_t *bp = &lr->lr_blkptr; 479 480 /* 481 * If we previously claimed it, we need to free it. 482 */ 483 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 484 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 485 !BP_IS_HOLE(bp)) 486 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 487 488 return (0); 489 } 490 491 static int 492 zil_lwb_vdev_compare(const void *x1, const void *x2) 493 { 494 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 495 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 496 497 if (v1 < v2) 498 return (-1); 499 if (v1 > v2) 500 return (1); 501 502 return (0); 503 } 504 505 static lwb_t * 506 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg) 507 { 508 lwb_t *lwb; 509 510 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 511 lwb->lwb_zilog = zilog; 512 lwb->lwb_blk = *bp; 513 lwb->lwb_slog = slog; 514 lwb->lwb_state = LWB_STATE_CLOSED; 515 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 516 lwb->lwb_max_txg = txg; 517 lwb->lwb_write_zio = NULL; 518 lwb->lwb_root_zio = NULL; 519 lwb->lwb_tx = NULL; 520 lwb->lwb_issued_timestamp = 0; 521 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 522 lwb->lwb_nused = sizeof (zil_chain_t); 523 lwb->lwb_sz = BP_GET_LSIZE(bp); 524 } else { 525 lwb->lwb_nused = 0; 526 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 527 } 528 529 mutex_enter(&zilog->zl_lock); 530 list_insert_tail(&zilog->zl_lwb_list, lwb); 531 mutex_exit(&zilog->zl_lock); 532 533 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 534 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 535 VERIFY(list_is_empty(&lwb->lwb_waiters)); 536 537 return (lwb); 538 } 539 540 static void 541 zil_free_lwb(zilog_t *zilog, lwb_t *lwb) 542 { 543 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 544 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 545 VERIFY(list_is_empty(&lwb->lwb_waiters)); 546 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 547 ASSERT3P(lwb->lwb_write_zio, ==, NULL); 548 ASSERT3P(lwb->lwb_root_zio, ==, NULL); 549 ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); 550 ASSERT(lwb->lwb_state == LWB_STATE_CLOSED || 551 lwb->lwb_state == LWB_STATE_DONE); 552 553 /* 554 * Clear the zilog's field to indicate this lwb is no longer 555 * valid, and prevent use-after-free errors. 556 */ 557 if (zilog->zl_last_lwb_opened == lwb) 558 zilog->zl_last_lwb_opened = NULL; 559 560 kmem_cache_free(zil_lwb_cache, lwb); 561 } 562 563 /* 564 * Called when we create in-memory log transactions so that we know 565 * to cleanup the itxs at the end of spa_sync(). 566 */ 567 void 568 zilog_dirty(zilog_t *zilog, uint64_t txg) 569 { 570 dsl_pool_t *dp = zilog->zl_dmu_pool; 571 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 572 573 ASSERT(spa_writeable(zilog->zl_spa)); 574 575 if (ds->ds_is_snapshot) 576 panic("dirtying snapshot!"); 577 578 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 579 /* up the hold count until we can be written out */ 580 dmu_buf_add_ref(ds->ds_dbuf, zilog); 581 582 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); 583 } 584 } 585 586 /* 587 * Determine if the zil is dirty in the specified txg. Callers wanting to 588 * ensure that the dirty state does not change must hold the itxg_lock for 589 * the specified txg. Holding the lock will ensure that the zil cannot be 590 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 591 * state. 592 */ 593 boolean_t 594 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 595 { 596 dsl_pool_t *dp = zilog->zl_dmu_pool; 597 598 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 599 return (B_TRUE); 600 return (B_FALSE); 601 } 602 603 /* 604 * Determine if the zil is dirty. The zil is considered dirty if it has 605 * any pending itx records that have not been cleaned by zil_clean(). 606 */ 607 boolean_t 608 zilog_is_dirty(zilog_t *zilog) 609 { 610 dsl_pool_t *dp = zilog->zl_dmu_pool; 611 612 for (int t = 0; t < TXG_SIZE; t++) { 613 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 614 return (B_TRUE); 615 } 616 return (B_FALSE); 617 } 618 619 /* 620 * Create an on-disk intent log. 621 */ 622 static lwb_t * 623 zil_create(zilog_t *zilog) 624 { 625 const zil_header_t *zh = zilog->zl_header; 626 lwb_t *lwb = NULL; 627 uint64_t txg = 0; 628 dmu_tx_t *tx = NULL; 629 blkptr_t blk; 630 int error = 0; 631 boolean_t slog = FALSE; 632 633 /* 634 * Wait for any previous destroy to complete. 635 */ 636 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 637 638 ASSERT(zh->zh_claim_txg == 0); 639 ASSERT(zh->zh_replay_seq == 0); 640 641 blk = zh->zh_log; 642 643 /* 644 * Allocate an initial log block if: 645 * - there isn't one already 646 * - the existing block is the wrong endianess 647 */ 648 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 649 tx = dmu_tx_create(zilog->zl_os); 650 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 651 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 652 txg = dmu_tx_get_txg(tx); 653 654 if (!BP_IS_HOLE(&blk)) { 655 zio_free(zilog->zl_spa, txg, &blk); 656 BP_ZERO(&blk); 657 } 658 659 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 660 ZIL_MIN_BLKSZ, &slog); 661 662 if (error == 0) 663 zil_init_log_chain(zilog, &blk); 664 } 665 666 /* 667 * Allocate a log write block (lwb) for the first log block. 668 */ 669 if (error == 0) 670 lwb = zil_alloc_lwb(zilog, &blk, slog, txg); 671 672 /* 673 * If we just allocated the first log block, commit our transaction 674 * and wait for zil_sync() to stuff the block poiner into zh_log. 675 * (zh is part of the MOS, so we cannot modify it in open context.) 676 */ 677 if (tx != NULL) { 678 dmu_tx_commit(tx); 679 txg_wait_synced(zilog->zl_dmu_pool, txg); 680 } 681 682 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 683 684 return (lwb); 685 } 686 687 /* 688 * In one tx, free all log blocks and clear the log header. If keep_first 689 * is set, then we're replaying a log with no content. We want to keep the 690 * first block, however, so that the first synchronous transaction doesn't 691 * require a txg_wait_synced() in zil_create(). We don't need to 692 * txg_wait_synced() here either when keep_first is set, because both 693 * zil_create() and zil_destroy() will wait for any in-progress destroys 694 * to complete. 695 */ 696 void 697 zil_destroy(zilog_t *zilog, boolean_t keep_first) 698 { 699 const zil_header_t *zh = zilog->zl_header; 700 lwb_t *lwb; 701 dmu_tx_t *tx; 702 uint64_t txg; 703 704 /* 705 * Wait for any previous destroy to complete. 706 */ 707 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 708 709 zilog->zl_old_header = *zh; /* debugging aid */ 710 711 if (BP_IS_HOLE(&zh->zh_log)) 712 return; 713 714 tx = dmu_tx_create(zilog->zl_os); 715 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 716 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 717 txg = dmu_tx_get_txg(tx); 718 719 mutex_enter(&zilog->zl_lock); 720 721 ASSERT3U(zilog->zl_destroy_txg, <, txg); 722 zilog->zl_destroy_txg = txg; 723 zilog->zl_keep_first = keep_first; 724 725 if (!list_is_empty(&zilog->zl_lwb_list)) { 726 ASSERT(zh->zh_claim_txg == 0); 727 VERIFY(!keep_first); 728 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 729 list_remove(&zilog->zl_lwb_list, lwb); 730 if (lwb->lwb_buf != NULL) 731 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 732 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); 733 zil_free_lwb(zilog, lwb); 734 } 735 } else if (!keep_first) { 736 zil_destroy_sync(zilog, tx); 737 } 738 mutex_exit(&zilog->zl_lock); 739 740 dmu_tx_commit(tx); 741 } 742 743 void 744 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 745 { 746 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 747 (void) zil_parse(zilog, zil_free_log_block, 748 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg); 749 } 750 751 int 752 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 753 { 754 dmu_tx_t *tx = txarg; 755 zilog_t *zilog; 756 uint64_t first_txg; 757 zil_header_t *zh; 758 objset_t *os; 759 int error; 760 761 error = dmu_objset_own_obj(dp, ds->ds_object, 762 DMU_OST_ANY, B_FALSE, FTAG, &os); 763 if (error != 0) { 764 /* 765 * EBUSY indicates that the objset is inconsistent, in which 766 * case it can not have a ZIL. 767 */ 768 if (error != EBUSY) { 769 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 770 (unsigned long long)ds->ds_object, error); 771 } 772 return (0); 773 } 774 775 zilog = dmu_objset_zil(os); 776 zh = zil_header_in_syncing_context(zilog); 777 ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); 778 first_txg = spa_min_claim_txg(zilog->zl_spa); 779 780 /* 781 * If the spa_log_state is not set to be cleared, check whether 782 * the current uberblock is a checkpoint one and if the current 783 * header has been claimed before moving on. 784 * 785 * If the current uberblock is a checkpointed uberblock then 786 * one of the following scenarios took place: 787 * 788 * 1] We are currently rewinding to the checkpoint of the pool. 789 * 2] We crashed in the middle of a checkpoint rewind but we 790 * did manage to write the checkpointed uberblock to the 791 * vdev labels, so when we tried to import the pool again 792 * the checkpointed uberblock was selected from the import 793 * procedure. 794 * 795 * In both cases we want to zero out all the ZIL blocks, except 796 * the ones that have been claimed at the time of the checkpoint 797 * (their zh_claim_txg != 0). The reason is that these blocks 798 * may be corrupted since we may have reused their locations on 799 * disk after we took the checkpoint. 800 * 801 * We could try to set spa_log_state to SPA_LOG_CLEAR earlier 802 * when we first figure out whether the current uberblock is 803 * checkpointed or not. Unfortunately, that would discard all 804 * the logs, including the ones that are claimed, and we would 805 * leak space. 806 */ 807 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || 808 (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 809 zh->zh_claim_txg == 0)) { 810 if (!BP_IS_HOLE(&zh->zh_log)) { 811 (void) zil_parse(zilog, zil_clear_log_block, 812 zil_noop_log_record, tx, first_txg); 813 } 814 BP_ZERO(&zh->zh_log); 815 dsl_dataset_dirty(dmu_objset_ds(os), tx); 816 dmu_objset_disown(os, FTAG); 817 return (0); 818 } 819 820 /* 821 * If we are not rewinding and opening the pool normally, then 822 * the min_claim_txg should be equal to the first txg of the pool. 823 */ 824 ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); 825 826 /* 827 * Claim all log blocks if we haven't already done so, and remember 828 * the highest claimed sequence number. This ensures that if we can 829 * read only part of the log now (e.g. due to a missing device), 830 * but we can read the entire log later, we will not try to replay 831 * or destroy beyond the last block we successfully claimed. 832 */ 833 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 834 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 835 (void) zil_parse(zilog, zil_claim_log_block, 836 zil_claim_log_record, tx, first_txg); 837 zh->zh_claim_txg = first_txg; 838 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 839 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 840 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 841 zh->zh_flags |= ZIL_REPLAY_NEEDED; 842 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 843 dsl_dataset_dirty(dmu_objset_ds(os), tx); 844 } 845 846 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 847 dmu_objset_disown(os, FTAG); 848 return (0); 849 } 850 851 /* 852 * Check the log by walking the log chain. 853 * Checksum errors are ok as they indicate the end of the chain. 854 * Any other error (no device or read failure) returns an error. 855 */ 856 /* ARGSUSED */ 857 int 858 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 859 { 860 zilog_t *zilog; 861 objset_t *os; 862 blkptr_t *bp; 863 int error; 864 865 ASSERT(tx == NULL); 866 867 error = dmu_objset_from_ds(ds, &os); 868 if (error != 0) { 869 cmn_err(CE_WARN, "can't open objset %llu, error %d", 870 (unsigned long long)ds->ds_object, error); 871 return (0); 872 } 873 874 zilog = dmu_objset_zil(os); 875 bp = (blkptr_t *)&zilog->zl_header->zh_log; 876 877 if (!BP_IS_HOLE(bp)) { 878 vdev_t *vd; 879 boolean_t valid = B_TRUE; 880 881 /* 882 * Check the first block and determine if it's on a log device 883 * which may have been removed or faulted prior to loading this 884 * pool. If so, there's no point in checking the rest of the 885 * log as its content should have already been synced to the 886 * pool. 887 */ 888 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 889 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 890 if (vd->vdev_islog && vdev_is_dead(vd)) 891 valid = vdev_log_state_valid(vd); 892 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 893 894 if (!valid) 895 return (0); 896 897 /* 898 * Check whether the current uberblock is checkpointed (e.g. 899 * we are rewinding) and whether the current header has been 900 * claimed or not. If it hasn't then skip verifying it. We 901 * do this because its ZIL blocks may be part of the pool's 902 * state before the rewind, which is no longer valid. 903 */ 904 zil_header_t *zh = zil_header_in_syncing_context(zilog); 905 if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 906 zh->zh_claim_txg == 0) 907 return (0); 908 } 909 910 /* 911 * Because tx == NULL, zil_claim_log_block() will not actually claim 912 * any blocks, but just determine whether it is possible to do so. 913 * In addition to checking the log chain, zil_claim_log_block() 914 * will invoke zio_claim() with a done func of spa_claim_notify(), 915 * which will update spa_max_claim_txg. See spa_load() for details. 916 */ 917 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 918 zilog->zl_header->zh_claim_txg ? -1ULL : 919 spa_min_claim_txg(os->os_spa)); 920 921 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 922 } 923 924 /* 925 * When an itx is "skipped", this function is used to properly mark the 926 * waiter as "done, and signal any thread(s) waiting on it. An itx can 927 * be skipped (and not committed to an lwb) for a variety of reasons, 928 * one of them being that the itx was committed via spa_sync(), prior to 929 * it being committed to an lwb; this can happen if a thread calling 930 * zil_commit() is racing with spa_sync(). 931 */ 932 static void 933 zil_commit_waiter_skip(zil_commit_waiter_t *zcw) 934 { 935 mutex_enter(&zcw->zcw_lock); 936 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 937 zcw->zcw_done = B_TRUE; 938 cv_broadcast(&zcw->zcw_cv); 939 mutex_exit(&zcw->zcw_lock); 940 } 941 942 /* 943 * This function is used when the given waiter is to be linked into an 944 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. 945 * At this point, the waiter will no longer be referenced by the itx, 946 * and instead, will be referenced by the lwb. 947 */ 948 static void 949 zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) 950 { 951 /* 952 * The lwb_waiters field of the lwb is protected by the zilog's 953 * zl_lock, thus it must be held when calling this function. 954 */ 955 ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); 956 957 mutex_enter(&zcw->zcw_lock); 958 ASSERT(!list_link_active(&zcw->zcw_node)); 959 ASSERT3P(zcw->zcw_lwb, ==, NULL); 960 ASSERT3P(lwb, !=, NULL); 961 ASSERT(lwb->lwb_state == LWB_STATE_OPENED || 962 lwb->lwb_state == LWB_STATE_ISSUED); 963 964 list_insert_tail(&lwb->lwb_waiters, zcw); 965 zcw->zcw_lwb = lwb; 966 mutex_exit(&zcw->zcw_lock); 967 } 968 969 /* 970 * This function is used when zio_alloc_zil() fails to allocate a ZIL 971 * block, and the given waiter must be linked to the "nolwb waiters" 972 * list inside of zil_process_commit_list(). 973 */ 974 static void 975 zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) 976 { 977 mutex_enter(&zcw->zcw_lock); 978 ASSERT(!list_link_active(&zcw->zcw_node)); 979 ASSERT3P(zcw->zcw_lwb, ==, NULL); 980 list_insert_tail(nolwb, zcw); 981 mutex_exit(&zcw->zcw_lock); 982 } 983 984 void 985 zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) 986 { 987 avl_tree_t *t = &lwb->lwb_vdev_tree; 988 avl_index_t where; 989 zil_vdev_node_t *zv, zvsearch; 990 int ndvas = BP_GET_NDVAS(bp); 991 int i; 992 993 if (zfs_nocacheflush) 994 return; 995 996 mutex_enter(&lwb->lwb_vdev_lock); 997 for (i = 0; i < ndvas; i++) { 998 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 999 if (avl_find(t, &zvsearch, &where) == NULL) { 1000 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 1001 zv->zv_vdev = zvsearch.zv_vdev; 1002 avl_insert(t, zv, where); 1003 } 1004 } 1005 mutex_exit(&lwb->lwb_vdev_lock); 1006 } 1007 1008 void 1009 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) 1010 { 1011 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1012 } 1013 1014 /* 1015 * This function is a called after all VDEVs associated with a given lwb 1016 * write have completed their DKIOCFLUSHWRITECACHE command; or as soon 1017 * as the lwb write completes, if "zfs_nocacheflush" is set. 1018 * 1019 * The intention is for this function to be called as soon as the 1020 * contents of an lwb are considered "stable" on disk, and will survive 1021 * any sudden loss of power. At this point, any threads waiting for the 1022 * lwb to reach this state are signalled, and the "waiter" structures 1023 * are marked "done". 1024 */ 1025 static void 1026 zil_lwb_flush_vdevs_done(zio_t *zio) 1027 { 1028 lwb_t *lwb = zio->io_private; 1029 zilog_t *zilog = lwb->lwb_zilog; 1030 dmu_tx_t *tx = lwb->lwb_tx; 1031 zil_commit_waiter_t *zcw; 1032 1033 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); 1034 1035 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1036 1037 mutex_enter(&zilog->zl_lock); 1038 1039 /* 1040 * Ensure the lwb buffer pointer is cleared before releasing the 1041 * txg. If we have had an allocation failure and the txg is 1042 * waiting to sync then we want zil_sync() to remove the lwb so 1043 * that it's not picked up as the next new one in 1044 * zil_process_commit_list(). zil_sync() will only remove the 1045 * lwb if lwb_buf is null. 1046 */ 1047 lwb->lwb_buf = NULL; 1048 lwb->lwb_tx = NULL; 1049 1050 ASSERT3U(lwb->lwb_issued_timestamp, >, 0); 1051 zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp; 1052 1053 lwb->lwb_root_zio = NULL; 1054 lwb->lwb_state = LWB_STATE_DONE; 1055 1056 if (zilog->zl_last_lwb_opened == lwb) { 1057 /* 1058 * Remember the highest committed log sequence number 1059 * for ztest. We only update this value when all the log 1060 * writes succeeded, because ztest wants to ASSERT that 1061 * it got the whole log chain. 1062 */ 1063 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1064 } 1065 1066 while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) { 1067 mutex_enter(&zcw->zcw_lock); 1068 1069 ASSERT(list_link_active(&zcw->zcw_node)); 1070 list_remove(&lwb->lwb_waiters, zcw); 1071 1072 ASSERT3P(zcw->zcw_lwb, ==, lwb); 1073 zcw->zcw_lwb = NULL; 1074 1075 zcw->zcw_zio_error = zio->io_error; 1076 1077 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1078 zcw->zcw_done = B_TRUE; 1079 cv_broadcast(&zcw->zcw_cv); 1080 1081 mutex_exit(&zcw->zcw_lock); 1082 } 1083 1084 mutex_exit(&zilog->zl_lock); 1085 1086 /* 1087 * Now that we've written this log block, we have a stable pointer 1088 * to the next block in the chain, so it's OK to let the txg in 1089 * which we allocated the next block sync. 1090 */ 1091 dmu_tx_commit(tx); 1092 } 1093 1094 /* 1095 * This is called when an lwb write completes. This means, this specific 1096 * lwb was written to disk, and all dependent lwb have also been 1097 * written to disk. 1098 * 1099 * At this point, a DKIOCFLUSHWRITECACHE command hasn't been issued to 1100 * the VDEVs involved in writing out this specific lwb. The lwb will be 1101 * "done" once zil_lwb_flush_vdevs_done() is called, which occurs in the 1102 * zio completion callback for the lwb's root zio. 1103 */ 1104 static void 1105 zil_lwb_write_done(zio_t *zio) 1106 { 1107 lwb_t *lwb = zio->io_private; 1108 spa_t *spa = zio->io_spa; 1109 zilog_t *zilog = lwb->lwb_zilog; 1110 avl_tree_t *t = &lwb->lwb_vdev_tree; 1111 void *cookie = NULL; 1112 zil_vdev_node_t *zv; 1113 1114 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); 1115 1116 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1117 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 1118 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 1119 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 1120 ASSERT(!BP_IS_GANG(zio->io_bp)); 1121 ASSERT(!BP_IS_HOLE(zio->io_bp)); 1122 ASSERT(BP_GET_FILL(zio->io_bp) == 0); 1123 1124 abd_put(zio->io_abd); 1125 1126 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); 1127 1128 mutex_enter(&zilog->zl_lock); 1129 lwb->lwb_write_zio = NULL; 1130 mutex_exit(&zilog->zl_lock); 1131 1132 if (avl_numnodes(t) == 0) 1133 return; 1134 1135 /* 1136 * If there was an IO error, we're not going to call zio_flush() 1137 * on these vdevs, so we simply empty the tree and free the 1138 * nodes. We avoid calling zio_flush() since there isn't any 1139 * good reason for doing so, after the lwb block failed to be 1140 * written out. 1141 */ 1142 if (zio->io_error != 0) { 1143 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) 1144 kmem_free(zv, sizeof (*zv)); 1145 return; 1146 } 1147 1148 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 1149 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 1150 if (vd != NULL) 1151 zio_flush(lwb->lwb_root_zio, vd); 1152 kmem_free(zv, sizeof (*zv)); 1153 } 1154 } 1155 1156 /* 1157 * This function's purpose is to "open" an lwb such that it is ready to 1158 * accept new itxs being committed to it. To do this, the lwb's zio 1159 * structures are created, and linked to the lwb. This function is 1160 * idempotent; if the passed in lwb has already been opened, this 1161 * function is essentially a no-op. 1162 */ 1163 static void 1164 zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) 1165 { 1166 zbookmark_phys_t zb; 1167 zio_priority_t prio; 1168 1169 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1170 ASSERT3P(lwb, !=, NULL); 1171 EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED); 1172 EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED); 1173 1174 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1175 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 1176 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 1177 1178 if (lwb->lwb_root_zio == NULL) { 1179 abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, 1180 BP_GET_LSIZE(&lwb->lwb_blk)); 1181 1182 if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk) 1183 prio = ZIO_PRIORITY_SYNC_WRITE; 1184 else 1185 prio = ZIO_PRIORITY_ASYNC_WRITE; 1186 1187 lwb->lwb_root_zio = zio_root(zilog->zl_spa, 1188 zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL); 1189 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1190 1191 lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, 1192 zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd, 1193 BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb, 1194 prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 1195 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1196 1197 lwb->lwb_state = LWB_STATE_OPENED; 1198 1199 mutex_enter(&zilog->zl_lock); 1200 1201 /* 1202 * The zilog's "zl_last_lwb_opened" field is used to 1203 * build the lwb/zio dependency chain, which is used to 1204 * preserve the ordering of lwb completions that is 1205 * required by the semantics of the ZIL. Each new lwb 1206 * zio becomes a parent of the "previous" lwb zio, such 1207 * that the new lwb's zio cannot complete until the 1208 * "previous" lwb's zio completes. 1209 * 1210 * This is required by the semantics of zil_commit(); 1211 * the commit waiters attached to the lwbs will be woken 1212 * in the lwb zio's completion callback, so this zio 1213 * dependency graph ensures the waiters are woken in the 1214 * correct order (the same order the lwbs were created). 1215 */ 1216 lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened; 1217 if (last_lwb_opened != NULL && 1218 last_lwb_opened->lwb_state != LWB_STATE_DONE) { 1219 ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || 1220 last_lwb_opened->lwb_state == LWB_STATE_ISSUED); 1221 ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL); 1222 zio_add_child(lwb->lwb_root_zio, 1223 last_lwb_opened->lwb_root_zio); 1224 } 1225 zilog->zl_last_lwb_opened = lwb; 1226 1227 mutex_exit(&zilog->zl_lock); 1228 } 1229 1230 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1231 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1232 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1233 } 1234 1235 /* 1236 * Define a limited set of intent log block sizes. 1237 * 1238 * These must be a multiple of 4KB. Note only the amount used (again 1239 * aligned to 4KB) actually gets written. However, we can't always just 1240 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 1241 */ 1242 uint64_t zil_block_buckets[] = { 1243 4096, /* non TX_WRITE */ 1244 8192+4096, /* data base */ 1245 32*1024 + 4096, /* NFS writes */ 1246 UINT64_MAX 1247 }; 1248 1249 /* 1250 * Start a log block write and advance to the next log block. 1251 * Calls are serialized. 1252 */ 1253 static lwb_t * 1254 zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) 1255 { 1256 lwb_t *nlwb = NULL; 1257 zil_chain_t *zilc; 1258 spa_t *spa = zilog->zl_spa; 1259 blkptr_t *bp; 1260 dmu_tx_t *tx; 1261 uint64_t txg; 1262 uint64_t zil_blksz, wsz; 1263 int i, error; 1264 boolean_t slog; 1265 1266 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1267 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1268 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1269 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1270 1271 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1272 zilc = (zil_chain_t *)lwb->lwb_buf; 1273 bp = &zilc->zc_next_blk; 1274 } else { 1275 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 1276 bp = &zilc->zc_next_blk; 1277 } 1278 1279 ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 1280 1281 /* 1282 * Allocate the next block and save its address in this block 1283 * before writing it in order to establish the log chain. 1284 * Note that if the allocation of nlwb synced before we wrote 1285 * the block that points at it (lwb), we'd leak it if we crashed. 1286 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 1287 * We dirty the dataset to ensure that zil_sync() will be called 1288 * to clean up in the event of allocation failure or I/O failure. 1289 */ 1290 1291 tx = dmu_tx_create(zilog->zl_os); 1292 1293 /* 1294 * Since we are not going to create any new dirty data, and we 1295 * can even help with clearing the existing dirty data, we 1296 * should not be subject to the dirty data based delays. We 1297 * use TXG_NOTHROTTLE to bypass the delay mechanism. 1298 */ 1299 VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); 1300 1301 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1302 txg = dmu_tx_get_txg(tx); 1303 1304 lwb->lwb_tx = tx; 1305 1306 /* 1307 * Log blocks are pre-allocated. Here we select the size of the next 1308 * block, based on size used in the last block. 1309 * - first find the smallest bucket that will fit the block from a 1310 * limited set of block sizes. This is because it's faster to write 1311 * blocks allocated from the same metaslab as they are adjacent or 1312 * close. 1313 * - next find the maximum from the new suggested size and an array of 1314 * previous sizes. This lessens a picket fence effect of wrongly 1315 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 1316 * requests. 1317 * 1318 * Note we only write what is used, but we can't just allocate 1319 * the maximum block size because we can exhaust the available 1320 * pool log space. 1321 */ 1322 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 1323 for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 1324 continue; 1325 zil_blksz = zil_block_buckets[i]; 1326 if (zil_blksz == UINT64_MAX) 1327 zil_blksz = SPA_OLD_MAXBLOCKSIZE; 1328 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 1329 for (i = 0; i < ZIL_PREV_BLKS; i++) 1330 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 1331 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 1332 1333 BP_ZERO(bp); 1334 1335 /* pass the old blkptr in order to spread log blocks across devs */ 1336 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, &slog); 1337 if (error == 0) { 1338 ASSERT3U(bp->blk_birth, ==, txg); 1339 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 1340 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 1341 1342 /* 1343 * Allocate a new log write block (lwb). 1344 */ 1345 nlwb = zil_alloc_lwb(zilog, bp, slog, txg); 1346 } 1347 1348 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1349 /* For Slim ZIL only write what is used. */ 1350 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1351 ASSERT3U(wsz, <=, lwb->lwb_sz); 1352 zio_shrink(lwb->lwb_write_zio, wsz); 1353 1354 } else { 1355 wsz = lwb->lwb_sz; 1356 } 1357 1358 zilc->zc_pad = 0; 1359 zilc->zc_nused = lwb->lwb_nused; 1360 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1361 1362 /* 1363 * clear unused data for security 1364 */ 1365 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 1366 1367 spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER); 1368 1369 zil_lwb_add_block(lwb, &lwb->lwb_blk); 1370 lwb->lwb_issued_timestamp = gethrtime(); 1371 lwb->lwb_state = LWB_STATE_ISSUED; 1372 1373 zio_nowait(lwb->lwb_root_zio); 1374 zio_nowait(lwb->lwb_write_zio); 1375 1376 /* 1377 * If there was an allocation failure then nlwb will be null which 1378 * forces a txg_wait_synced(). 1379 */ 1380 return (nlwb); 1381 } 1382 1383 static lwb_t * 1384 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1385 { 1386 lr_t *lrcb, *lrc; 1387 lr_write_t *lrwb, *lrw; 1388 char *lr_buf; 1389 uint64_t dlen, dnow, lwb_sp, reclen, txg; 1390 1391 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1392 ASSERT3P(lwb, !=, NULL); 1393 ASSERT3P(lwb->lwb_buf, !=, NULL); 1394 1395 zil_lwb_write_open(zilog, lwb); 1396 1397 lrc = &itx->itx_lr; 1398 lrw = (lr_write_t *)lrc; 1399 1400 /* 1401 * A commit itx doesn't represent any on-disk state; instead 1402 * it's simply used as a place holder on the commit list, and 1403 * provides a mechanism for attaching a "commit waiter" onto the 1404 * correct lwb (such that the waiter can be signalled upon 1405 * completion of that lwb). Thus, we don't process this itx's 1406 * log record if it's a commit itx (these itx's don't have log 1407 * records), and instead link the itx's waiter onto the lwb's 1408 * list of waiters. 1409 * 1410 * For more details, see the comment above zil_commit(). 1411 */ 1412 if (lrc->lrc_txtype == TX_COMMIT) { 1413 mutex_enter(&zilog->zl_lock); 1414 zil_commit_waiter_link_lwb(itx->itx_private, lwb); 1415 itx->itx_private = NULL; 1416 mutex_exit(&zilog->zl_lock); 1417 return (lwb); 1418 } 1419 1420 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { 1421 dlen = P2ROUNDUP_TYPED( 1422 lrw->lr_length, sizeof (uint64_t), uint64_t); 1423 } else { 1424 dlen = 0; 1425 } 1426 reclen = lrc->lrc_reclen; 1427 zilog->zl_cur_used += (reclen + dlen); 1428 txg = lrc->lrc_txg; 1429 1430 ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen)); 1431 1432 cont: 1433 /* 1434 * If this record won't fit in the current log block, start a new one. 1435 * For WR_NEED_COPY optimize layout for minimal number of chunks. 1436 */ 1437 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1438 if (reclen > lwb_sp || (reclen + dlen > lwb_sp && 1439 lwb_sp < ZIL_MAX_WASTE_SPACE && (dlen % ZIL_MAX_LOG_DATA == 0 || 1440 lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) { 1441 lwb = zil_lwb_write_issue(zilog, lwb); 1442 if (lwb == NULL) 1443 return (NULL); 1444 zil_lwb_write_open(zilog, lwb); 1445 ASSERT(LWB_EMPTY(lwb)); 1446 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1447 ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); 1448 } 1449 1450 dnow = MIN(dlen, lwb_sp - reclen); 1451 lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1452 bcopy(lrc, lr_buf, reclen); 1453 lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */ 1454 lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */ 1455 1456 /* 1457 * If it's a write, fetch the data or get its blkptr as appropriate. 1458 */ 1459 if (lrc->lrc_txtype == TX_WRITE) { 1460 if (txg > spa_freeze_txg(zilog->zl_spa)) 1461 txg_wait_synced(zilog->zl_dmu_pool, txg); 1462 if (itx->itx_wr_state != WR_COPIED) { 1463 char *dbuf; 1464 int error; 1465 1466 if (itx->itx_wr_state == WR_NEED_COPY) { 1467 dbuf = lr_buf + reclen; 1468 lrcb->lrc_reclen += dnow; 1469 if (lrwb->lr_length > dnow) 1470 lrwb->lr_length = dnow; 1471 lrw->lr_offset += dnow; 1472 lrw->lr_length -= dnow; 1473 } else { 1474 ASSERT(itx->itx_wr_state == WR_INDIRECT); 1475 dbuf = NULL; 1476 } 1477 1478 /* 1479 * We pass in the "lwb_write_zio" rather than 1480 * "lwb_root_zio" so that the "lwb_write_zio" 1481 * becomes the parent of any zio's created by 1482 * the "zl_get_data" callback. The vdevs are 1483 * flushed after the "lwb_write_zio" completes, 1484 * so we want to make sure that completion 1485 * callback waits for these additional zio's, 1486 * such that the vdevs used by those zio's will 1487 * be included in the lwb's vdev tree, and those 1488 * vdevs will be properly flushed. If we passed 1489 * in "lwb_root_zio" here, then these additional 1490 * vdevs may not be flushed; e.g. if these zio's 1491 * completed after "lwb_write_zio" completed. 1492 */ 1493 error = zilog->zl_get_data(itx->itx_private, 1494 lrwb, dbuf, lwb, lwb->lwb_write_zio); 1495 1496 if (error == EIO) { 1497 txg_wait_synced(zilog->zl_dmu_pool, txg); 1498 return (lwb); 1499 } 1500 if (error != 0) { 1501 ASSERT(error == ENOENT || error == EEXIST || 1502 error == EALREADY); 1503 return (lwb); 1504 } 1505 } 1506 } 1507 1508 /* 1509 * We're actually making an entry, so update lrc_seq to be the 1510 * log record sequence number. Note that this is generally not 1511 * equal to the itx sequence number because not all transactions 1512 * are synchronous, and sometimes spa_sync() gets there first. 1513 */ 1514 lrcb->lrc_seq = ++zilog->zl_lr_seq; 1515 lwb->lwb_nused += reclen + dnow; 1516 1517 zil_lwb_add_txg(lwb, txg); 1518 1519 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1520 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 1521 1522 dlen -= dnow; 1523 if (dlen > 0) { 1524 zilog->zl_cur_used += reclen; 1525 goto cont; 1526 } 1527 1528 return (lwb); 1529 } 1530 1531 itx_t * 1532 zil_itx_create(uint64_t txtype, size_t lrsize) 1533 { 1534 itx_t *itx; 1535 1536 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1537 1538 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1539 itx->itx_lr.lrc_txtype = txtype; 1540 itx->itx_lr.lrc_reclen = lrsize; 1541 itx->itx_lr.lrc_seq = 0; /* defensive */ 1542 itx->itx_sync = B_TRUE; /* default is synchronous */ 1543 1544 return (itx); 1545 } 1546 1547 void 1548 zil_itx_destroy(itx_t *itx) 1549 { 1550 kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 1551 } 1552 1553 /* 1554 * Free up the sync and async itxs. The itxs_t has already been detached 1555 * so no locks are needed. 1556 */ 1557 static void 1558 zil_itxg_clean(itxs_t *itxs) 1559 { 1560 itx_t *itx; 1561 list_t *list; 1562 avl_tree_t *t; 1563 void *cookie; 1564 itx_async_node_t *ian; 1565 1566 list = &itxs->i_sync_list; 1567 while ((itx = list_head(list)) != NULL) { 1568 /* 1569 * In the general case, commit itxs will not be found 1570 * here, as they'll be committed to an lwb via 1571 * zil_lwb_commit(), and free'd in that function. Having 1572 * said that, it is still possible for commit itxs to be 1573 * found here, due to the following race: 1574 * 1575 * - a thread calls zil_commit() which assigns the 1576 * commit itx to a per-txg i_sync_list 1577 * - zil_itxg_clean() is called (e.g. via spa_sync()) 1578 * while the waiter is still on the i_sync_list 1579 * 1580 * There's nothing to prevent syncing the txg while the 1581 * waiter is on the i_sync_list. This normally doesn't 1582 * happen because spa_sync() is slower than zil_commit(), 1583 * but if zil_commit() calls txg_wait_synced() (e.g. 1584 * because zil_create() or zil_commit_writer_stall() is 1585 * called) we will hit this case. 1586 */ 1587 if (itx->itx_lr.lrc_txtype == TX_COMMIT) 1588 zil_commit_waiter_skip(itx->itx_private); 1589 1590 list_remove(list, itx); 1591 zil_itx_destroy(itx); 1592 } 1593 1594 cookie = NULL; 1595 t = &itxs->i_async_tree; 1596 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1597 list = &ian->ia_list; 1598 while ((itx = list_head(list)) != NULL) { 1599 list_remove(list, itx); 1600 /* commit itxs should never be on the async lists. */ 1601 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 1602 zil_itx_destroy(itx); 1603 } 1604 list_destroy(list); 1605 kmem_free(ian, sizeof (itx_async_node_t)); 1606 } 1607 avl_destroy(t); 1608 1609 kmem_free(itxs, sizeof (itxs_t)); 1610 } 1611 1612 static int 1613 zil_aitx_compare(const void *x1, const void *x2) 1614 { 1615 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 1616 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 1617 1618 if (o1 < o2) 1619 return (-1); 1620 if (o1 > o2) 1621 return (1); 1622 1623 return (0); 1624 } 1625 1626 /* 1627 * Remove all async itx with the given oid. 1628 */ 1629 static void 1630 zil_remove_async(zilog_t *zilog, uint64_t oid) 1631 { 1632 uint64_t otxg, txg; 1633 itx_async_node_t *ian; 1634 avl_tree_t *t; 1635 avl_index_t where; 1636 list_t clean_list; 1637 itx_t *itx; 1638 1639 ASSERT(oid != 0); 1640 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1641 1642 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1643 otxg = ZILTEST_TXG; 1644 else 1645 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1646 1647 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1648 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1649 1650 mutex_enter(&itxg->itxg_lock); 1651 if (itxg->itxg_txg != txg) { 1652 mutex_exit(&itxg->itxg_lock); 1653 continue; 1654 } 1655 1656 /* 1657 * Locate the object node and append its list. 1658 */ 1659 t = &itxg->itxg_itxs->i_async_tree; 1660 ian = avl_find(t, &oid, &where); 1661 if (ian != NULL) 1662 list_move_tail(&clean_list, &ian->ia_list); 1663 mutex_exit(&itxg->itxg_lock); 1664 } 1665 while ((itx = list_head(&clean_list)) != NULL) { 1666 list_remove(&clean_list, itx); 1667 /* commit itxs should never be on the async lists. */ 1668 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 1669 zil_itx_destroy(itx); 1670 } 1671 list_destroy(&clean_list); 1672 } 1673 1674 void 1675 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1676 { 1677 uint64_t txg; 1678 itxg_t *itxg; 1679 itxs_t *itxs, *clean = NULL; 1680 1681 /* 1682 * Object ids can be re-instantiated in the next txg so 1683 * remove any async transactions to avoid future leaks. 1684 * This can happen if a fsync occurs on the re-instantiated 1685 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets 1686 * the new file data and flushes a write record for the old object. 1687 */ 1688 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) 1689 zil_remove_async(zilog, itx->itx_oid); 1690 1691 /* 1692 * Ensure the data of a renamed file is committed before the rename. 1693 */ 1694 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 1695 zil_async_to_sync(zilog, itx->itx_oid); 1696 1697 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 1698 txg = ZILTEST_TXG; 1699 else 1700 txg = dmu_tx_get_txg(tx); 1701 1702 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1703 mutex_enter(&itxg->itxg_lock); 1704 itxs = itxg->itxg_itxs; 1705 if (itxg->itxg_txg != txg) { 1706 if (itxs != NULL) { 1707 /* 1708 * The zil_clean callback hasn't got around to cleaning 1709 * this itxg. Save the itxs for release below. 1710 * This should be rare. 1711 */ 1712 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " 1713 "txg %llu", itxg->itxg_txg); 1714 clean = itxg->itxg_itxs; 1715 } 1716 itxg->itxg_txg = txg; 1717 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP); 1718 1719 list_create(&itxs->i_sync_list, sizeof (itx_t), 1720 offsetof(itx_t, itx_node)); 1721 avl_create(&itxs->i_async_tree, zil_aitx_compare, 1722 sizeof (itx_async_node_t), 1723 offsetof(itx_async_node_t, ia_node)); 1724 } 1725 if (itx->itx_sync) { 1726 list_insert_tail(&itxs->i_sync_list, itx); 1727 } else { 1728 avl_tree_t *t = &itxs->i_async_tree; 1729 uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid; 1730 itx_async_node_t *ian; 1731 avl_index_t where; 1732 1733 ian = avl_find(t, &foid, &where); 1734 if (ian == NULL) { 1735 ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP); 1736 list_create(&ian->ia_list, sizeof (itx_t), 1737 offsetof(itx_t, itx_node)); 1738 ian->ia_foid = foid; 1739 avl_insert(t, ian, where); 1740 } 1741 list_insert_tail(&ian->ia_list, itx); 1742 } 1743 1744 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1745 1746 /* 1747 * We don't want to dirty the ZIL using ZILTEST_TXG, because 1748 * zil_clean() will never be called using ZILTEST_TXG. Thus, we 1749 * need to be careful to always dirty the ZIL using the "real" 1750 * TXG (not itxg_txg) even when the SPA is frozen. 1751 */ 1752 zilog_dirty(zilog, dmu_tx_get_txg(tx)); 1753 mutex_exit(&itxg->itxg_lock); 1754 1755 /* Release the old itxs now we've dropped the lock */ 1756 if (clean != NULL) 1757 zil_itxg_clean(clean); 1758 } 1759 1760 /* 1761 * If there are any in-memory intent log transactions which have now been 1762 * synced then start up a taskq to free them. We should only do this after we 1763 * have written out the uberblocks (i.e. txg has been comitted) so that 1764 * don't inadvertently clean out in-memory log records that would be required 1765 * by zil_commit(). 1766 */ 1767 void 1768 zil_clean(zilog_t *zilog, uint64_t synced_txg) 1769 { 1770 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 1771 itxs_t *clean_me; 1772 1773 ASSERT3U(synced_txg, <, ZILTEST_TXG); 1774 1775 mutex_enter(&itxg->itxg_lock); 1776 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 1777 mutex_exit(&itxg->itxg_lock); 1778 return; 1779 } 1780 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 1781 ASSERT3U(itxg->itxg_txg, !=, 0); 1782 clean_me = itxg->itxg_itxs; 1783 itxg->itxg_itxs = NULL; 1784 itxg->itxg_txg = 0; 1785 mutex_exit(&itxg->itxg_lock); 1786 /* 1787 * Preferably start a task queue to free up the old itxs but 1788 * if taskq_dispatch can't allocate resources to do that then 1789 * free it in-line. This should be rare. Note, using TQ_SLEEP 1790 * created a bad performance problem. 1791 */ 1792 ASSERT3P(zilog->zl_dmu_pool, !=, NULL); 1793 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); 1794 if (taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, 1795 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == NULL) 1796 zil_itxg_clean(clean_me); 1797 } 1798 1799 /* 1800 * This function will traverse the queue of itxs that need to be 1801 * committed, and move them onto the ZIL's zl_itx_commit_list. 1802 */ 1803 static void 1804 zil_get_commit_list(zilog_t *zilog) 1805 { 1806 uint64_t otxg, txg; 1807 list_t *commit_list = &zilog->zl_itx_commit_list; 1808 1809 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1810 1811 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1812 otxg = ZILTEST_TXG; 1813 else 1814 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1815 1816 /* 1817 * This is inherently racy, since there is nothing to prevent 1818 * the last synced txg from changing. That's okay since we'll 1819 * only commit things in the future. 1820 */ 1821 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1822 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1823 1824 mutex_enter(&itxg->itxg_lock); 1825 if (itxg->itxg_txg != txg) { 1826 mutex_exit(&itxg->itxg_lock); 1827 continue; 1828 } 1829 1830 /* 1831 * If we're adding itx records to the zl_itx_commit_list, 1832 * then the zil better be dirty in this "txg". We can assert 1833 * that here since we're holding the itxg_lock which will 1834 * prevent spa_sync from cleaning it. Once we add the itxs 1835 * to the zl_itx_commit_list we must commit it to disk even 1836 * if it's unnecessary (i.e. the txg was synced). 1837 */ 1838 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 1839 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 1840 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 1841 1842 mutex_exit(&itxg->itxg_lock); 1843 } 1844 } 1845 1846 /* 1847 * Move the async itxs for a specified object to commit into sync lists. 1848 */ 1849 static void 1850 zil_async_to_sync(zilog_t *zilog, uint64_t foid) 1851 { 1852 uint64_t otxg, txg; 1853 itx_async_node_t *ian; 1854 avl_tree_t *t; 1855 avl_index_t where; 1856 1857 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1858 otxg = ZILTEST_TXG; 1859 else 1860 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1861 1862 /* 1863 * This is inherently racy, since there is nothing to prevent 1864 * the last synced txg from changing. 1865 */ 1866 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1867 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1868 1869 mutex_enter(&itxg->itxg_lock); 1870 if (itxg->itxg_txg != txg) { 1871 mutex_exit(&itxg->itxg_lock); 1872 continue; 1873 } 1874 1875 /* 1876 * If a foid is specified then find that node and append its 1877 * list. Otherwise walk the tree appending all the lists 1878 * to the sync list. We add to the end rather than the 1879 * beginning to ensure the create has happened. 1880 */ 1881 t = &itxg->itxg_itxs->i_async_tree; 1882 if (foid != 0) { 1883 ian = avl_find(t, &foid, &where); 1884 if (ian != NULL) { 1885 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1886 &ian->ia_list); 1887 } 1888 } else { 1889 void *cookie = NULL; 1890 1891 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1892 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1893 &ian->ia_list); 1894 list_destroy(&ian->ia_list); 1895 kmem_free(ian, sizeof (itx_async_node_t)); 1896 } 1897 } 1898 mutex_exit(&itxg->itxg_lock); 1899 } 1900 } 1901 1902 /* 1903 * This function will prune commit itxs that are at the head of the 1904 * commit list (it won't prune past the first non-commit itx), and 1905 * either: a) attach them to the last lwb that's still pending 1906 * completion, or b) skip them altogether. 1907 * 1908 * This is used as a performance optimization to prevent commit itxs 1909 * from generating new lwbs when it's unnecessary to do so. 1910 */ 1911 static void 1912 zil_prune_commit_list(zilog_t *zilog) 1913 { 1914 itx_t *itx; 1915 1916 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1917 1918 while (itx = list_head(&zilog->zl_itx_commit_list)) { 1919 lr_t *lrc = &itx->itx_lr; 1920 if (lrc->lrc_txtype != TX_COMMIT) 1921 break; 1922 1923 mutex_enter(&zilog->zl_lock); 1924 1925 lwb_t *last_lwb = zilog->zl_last_lwb_opened; 1926 if (last_lwb == NULL || last_lwb->lwb_state == LWB_STATE_DONE) { 1927 /* 1928 * All of the itxs this waiter was waiting on 1929 * must have already completed (or there were 1930 * never any itx's for it to wait on), so it's 1931 * safe to skip this waiter and mark it done. 1932 */ 1933 zil_commit_waiter_skip(itx->itx_private); 1934 } else { 1935 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); 1936 itx->itx_private = NULL; 1937 } 1938 1939 mutex_exit(&zilog->zl_lock); 1940 1941 list_remove(&zilog->zl_itx_commit_list, itx); 1942 zil_itx_destroy(itx); 1943 } 1944 1945 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 1946 } 1947 1948 static void 1949 zil_commit_writer_stall(zilog_t *zilog) 1950 { 1951 /* 1952 * When zio_alloc_zil() fails to allocate the next lwb block on 1953 * disk, we must call txg_wait_synced() to ensure all of the 1954 * lwbs in the zilog's zl_lwb_list are synced and then freed (in 1955 * zil_sync()), such that any subsequent ZIL writer (i.e. a call 1956 * to zil_process_commit_list()) will have to call zil_create(), 1957 * and start a new ZIL chain. 1958 * 1959 * Since zil_alloc_zil() failed, the lwb that was previously 1960 * issued does not have a pointer to the "next" lwb on disk. 1961 * Thus, if another ZIL writer thread was to allocate the "next" 1962 * on-disk lwb, that block could be leaked in the event of a 1963 * crash (because the previous lwb on-disk would not point to 1964 * it). 1965 * 1966 * We must hold the zilog's zl_issuer_lock while we do this, to 1967 * ensure no new threads enter zil_process_commit_list() until 1968 * all lwb's in the zl_lwb_list have been synced and freed 1969 * (which is achieved via the txg_wait_synced() call). 1970 */ 1971 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1972 txg_wait_synced(zilog->zl_dmu_pool, 0); 1973 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 1974 } 1975 1976 /* 1977 * This function will traverse the commit list, creating new lwbs as 1978 * needed, and committing the itxs from the commit list to these newly 1979 * created lwbs. Additionally, as a new lwb is created, the previous 1980 * lwb will be issued to the zio layer to be written to disk. 1981 */ 1982 static void 1983 zil_process_commit_list(zilog_t *zilog) 1984 { 1985 spa_t *spa = zilog->zl_spa; 1986 list_t nolwb_waiters; 1987 lwb_t *lwb; 1988 itx_t *itx; 1989 1990 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1991 1992 /* 1993 * Return if there's nothing to commit before we dirty the fs by 1994 * calling zil_create(). 1995 */ 1996 if (list_head(&zilog->zl_itx_commit_list) == NULL) 1997 return; 1998 1999 list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), 2000 offsetof(zil_commit_waiter_t, zcw_node)); 2001 2002 lwb = list_tail(&zilog->zl_lwb_list); 2003 if (lwb == NULL) { 2004 lwb = zil_create(zilog); 2005 } else { 2006 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2007 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE); 2008 } 2009 2010 while (itx = list_head(&zilog->zl_itx_commit_list)) { 2011 lr_t *lrc = &itx->itx_lr; 2012 uint64_t txg = lrc->lrc_txg; 2013 2014 ASSERT3U(txg, !=, 0); 2015 2016 if (lrc->lrc_txtype == TX_COMMIT) { 2017 DTRACE_PROBE2(zil__process__commit__itx, 2018 zilog_t *, zilog, itx_t *, itx); 2019 } else { 2020 DTRACE_PROBE2(zil__process__normal__itx, 2021 zilog_t *, zilog, itx_t *, itx); 2022 } 2023 2024 boolean_t synced = txg <= spa_last_synced_txg(spa); 2025 boolean_t frozen = txg > spa_freeze_txg(spa); 2026 2027 /* 2028 * If the txg of this itx has already been synced out, then 2029 * we don't need to commit this itx to an lwb. This is 2030 * because the data of this itx will have already been 2031 * written to the main pool. This is inherently racy, and 2032 * it's still ok to commit an itx whose txg has already 2033 * been synced; this will result in a write that's 2034 * unnecessary, but will do no harm. 2035 * 2036 * With that said, we always want to commit TX_COMMIT itxs 2037 * to an lwb, regardless of whether or not that itx's txg 2038 * has been synced out. We do this to ensure any OPENED lwb 2039 * will always have at least one zil_commit_waiter_t linked 2040 * to the lwb. 2041 * 2042 * As a counter-example, if we skipped TX_COMMIT itx's 2043 * whose txg had already been synced, the following 2044 * situation could occur if we happened to be racing with 2045 * spa_sync: 2046 * 2047 * 1. we commit a non-TX_COMMIT itx to an lwb, where the 2048 * itx's txg is 10 and the last synced txg is 9. 2049 * 2. spa_sync finishes syncing out txg 10. 2050 * 3. we move to the next itx in the list, it's a TX_COMMIT 2051 * whose txg is 10, so we skip it rather than committing 2052 * it to the lwb used in (1). 2053 * 2054 * If the itx that is skipped in (3) is the last TX_COMMIT 2055 * itx in the commit list, than it's possible for the lwb 2056 * used in (1) to remain in the OPENED state indefinitely. 2057 * 2058 * To prevent the above scenario from occuring, ensuring 2059 * that once an lwb is OPENED it will transition to ISSUED 2060 * and eventually DONE, we always commit TX_COMMIT itx's to 2061 * an lwb here, even if that itx's txg has already been 2062 * synced. 2063 * 2064 * Finally, if the pool is frozen, we _always_ commit the 2065 * itx. The point of freezing the pool is to prevent data 2066 * from being written to the main pool via spa_sync, and 2067 * instead rely solely on the ZIL to persistently store the 2068 * data; i.e. when the pool is frozen, the last synced txg 2069 * value can't be trusted. 2070 */ 2071 if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { 2072 if (lwb != NULL) { 2073 lwb = zil_lwb_commit(zilog, itx, lwb); 2074 } else if (lrc->lrc_txtype == TX_COMMIT) { 2075 ASSERT3P(lwb, ==, NULL); 2076 zil_commit_waiter_link_nolwb( 2077 itx->itx_private, &nolwb_waiters); 2078 } 2079 } 2080 2081 list_remove(&zilog->zl_itx_commit_list, itx); 2082 zil_itx_destroy(itx); 2083 } 2084 2085 if (lwb == NULL) { 2086 /* 2087 * This indicates zio_alloc_zil() failed to allocate the 2088 * "next" lwb on-disk. When this happens, we must stall 2089 * the ZIL write pipeline; see the comment within 2090 * zil_commit_writer_stall() for more details. 2091 */ 2092 zil_commit_writer_stall(zilog); 2093 2094 /* 2095 * Additionally, we have to signal and mark the "nolwb" 2096 * waiters as "done" here, since without an lwb, we 2097 * can't do this via zil_lwb_flush_vdevs_done() like 2098 * normal. 2099 */ 2100 zil_commit_waiter_t *zcw; 2101 while (zcw = list_head(&nolwb_waiters)) { 2102 zil_commit_waiter_skip(zcw); 2103 list_remove(&nolwb_waiters, zcw); 2104 } 2105 } else { 2106 ASSERT(list_is_empty(&nolwb_waiters)); 2107 ASSERT3P(lwb, !=, NULL); 2108 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2109 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE); 2110 2111 /* 2112 * At this point, the ZIL block pointed at by the "lwb" 2113 * variable is in one of the following states: "closed" 2114 * or "open". 2115 * 2116 * If its "closed", then no itxs have been committed to 2117 * it, so there's no point in issuing its zio (i.e. 2118 * it's "empty"). 2119 * 2120 * If its "open" state, then it contains one or more 2121 * itxs that eventually need to be committed to stable 2122 * storage. In this case we intentionally do not issue 2123 * the lwb's zio to disk yet, and instead rely on one of 2124 * the following two mechanisms for issuing the zio: 2125 * 2126 * 1. Ideally, there will be more ZIL activity occuring 2127 * on the system, such that this function will be 2128 * immediately called again (not necessarily by the same 2129 * thread) and this lwb's zio will be issued via 2130 * zil_lwb_commit(). This way, the lwb is guaranteed to 2131 * be "full" when it is issued to disk, and we'll make 2132 * use of the lwb's size the best we can. 2133 * 2134 * 2. If there isn't sufficient ZIL activity occuring on 2135 * the system, such that this lwb's zio isn't issued via 2136 * zil_lwb_commit(), zil_commit_waiter() will issue the 2137 * lwb's zio. If this occurs, the lwb is not guaranteed 2138 * to be "full" by the time its zio is issued, and means 2139 * the size of the lwb was "too large" given the amount 2140 * of ZIL activity occuring on the system at that time. 2141 * 2142 * We do this for a couple of reasons: 2143 * 2144 * 1. To try and reduce the number of IOPs needed to 2145 * write the same number of itxs. If an lwb has space 2146 * available in it's buffer for more itxs, and more itxs 2147 * will be committed relatively soon (relative to the 2148 * latency of performing a write), then it's beneficial 2149 * to wait for these "next" itxs. This way, more itxs 2150 * can be committed to stable storage with fewer writes. 2151 * 2152 * 2. To try and use the largest lwb block size that the 2153 * incoming rate of itxs can support. Again, this is to 2154 * try and pack as many itxs into as few lwbs as 2155 * possible, without significantly impacting the latency 2156 * of each individual itx. 2157 */ 2158 } 2159 } 2160 2161 /* 2162 * This function is responsible for ensuring the passed in commit waiter 2163 * (and associated commit itx) is committed to an lwb. If the waiter is 2164 * not already committed to an lwb, all itxs in the zilog's queue of 2165 * itxs will be processed. The assumption is the passed in waiter's 2166 * commit itx will found in the queue just like the other non-commit 2167 * itxs, such that when the entire queue is processed, the waiter will 2168 * have been commited to an lwb. 2169 * 2170 * The lwb associated with the passed in waiter is not guaranteed to 2171 * have been issued by the time this function completes. If the lwb is 2172 * not issued, we rely on future calls to zil_commit_writer() to issue 2173 * the lwb, or the timeout mechanism found in zil_commit_waiter(). 2174 */ 2175 static void 2176 zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) 2177 { 2178 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2179 ASSERT(spa_writeable(zilog->zl_spa)); 2180 2181 mutex_enter(&zilog->zl_issuer_lock); 2182 2183 if (zcw->zcw_lwb != NULL || zcw->zcw_done) { 2184 /* 2185 * It's possible that, while we were waiting to acquire 2186 * the "zl_issuer_lock", another thread committed this 2187 * waiter to an lwb. If that occurs, we bail out early, 2188 * without processing any of the zilog's queue of itxs. 2189 * 2190 * On certain workloads and system configurations, the 2191 * "zl_issuer_lock" can become highly contended. In an 2192 * attempt to reduce this contention, we immediately drop 2193 * the lock if the waiter has already been processed. 2194 * 2195 * We've measured this optimization to reduce CPU spent 2196 * contending on this lock by up to 5%, using a system 2197 * with 32 CPUs, low latency storage (~50 usec writes), 2198 * and 1024 threads performing sync writes. 2199 */ 2200 goto out; 2201 } 2202 2203 zil_get_commit_list(zilog); 2204 zil_prune_commit_list(zilog); 2205 zil_process_commit_list(zilog); 2206 2207 out: 2208 mutex_exit(&zilog->zl_issuer_lock); 2209 } 2210 2211 static void 2212 zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) 2213 { 2214 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 2215 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2216 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 2217 2218 lwb_t *lwb = zcw->zcw_lwb; 2219 ASSERT3P(lwb, !=, NULL); 2220 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED); 2221 2222 /* 2223 * If the lwb has already been issued by another thread, we can 2224 * immediately return since there's no work to be done (the 2225 * point of this function is to issue the lwb). Additionally, we 2226 * do this prior to acquiring the zl_issuer_lock, to avoid 2227 * acquiring it when it's not necessary to do so. 2228 */ 2229 if (lwb->lwb_state == LWB_STATE_ISSUED || 2230 lwb->lwb_state == LWB_STATE_DONE) 2231 return; 2232 2233 /* 2234 * In order to call zil_lwb_write_issue() we must hold the 2235 * zilog's "zl_issuer_lock". We can't simply acquire that lock, 2236 * since we're already holding the commit waiter's "zcw_lock", 2237 * and those two locks are aquired in the opposite order 2238 * elsewhere. 2239 */ 2240 mutex_exit(&zcw->zcw_lock); 2241 mutex_enter(&zilog->zl_issuer_lock); 2242 mutex_enter(&zcw->zcw_lock); 2243 2244 /* 2245 * Since we just dropped and re-acquired the commit waiter's 2246 * lock, we have to re-check to see if the waiter was marked 2247 * "done" during that process. If the waiter was marked "done", 2248 * the "lwb" pointer is no longer valid (it can be free'd after 2249 * the waiter is marked "done"), so without this check we could 2250 * wind up with a use-after-free error below. 2251 */ 2252 if (zcw->zcw_done) 2253 goto out; 2254 2255 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2256 2257 /* 2258 * We've already checked this above, but since we hadn't acquired 2259 * the zilog's zl_issuer_lock, we have to perform this check a 2260 * second time while holding the lock. 2261 * 2262 * We don't need to hold the zl_lock since the lwb cannot transition 2263 * from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb 2264 * _can_ transition from ISSUED to DONE, but it's OK to race with 2265 * that transition since we treat the lwb the same, whether it's in 2266 * the ISSUED or DONE states. 2267 * 2268 * The important thing, is we treat the lwb differently depending on 2269 * if it's ISSUED or OPENED, and block any other threads that might 2270 * attempt to issue this lwb. For that reason we hold the 2271 * zl_issuer_lock when checking the lwb_state; we must not call 2272 * zil_lwb_write_issue() if the lwb had already been issued. 2273 * 2274 * See the comment above the lwb_state_t structure definition for 2275 * more details on the lwb states, and locking requirements. 2276 */ 2277 if (lwb->lwb_state == LWB_STATE_ISSUED || 2278 lwb->lwb_state == LWB_STATE_DONE) 2279 goto out; 2280 2281 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 2282 2283 /* 2284 * As described in the comments above zil_commit_waiter() and 2285 * zil_process_commit_list(), we need to issue this lwb's zio 2286 * since we've reached the commit waiter's timeout and it still 2287 * hasn't been issued. 2288 */ 2289 lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb); 2290 2291 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 2292 2293 /* 2294 * Since the lwb's zio hadn't been issued by the time this thread 2295 * reached its timeout, we reset the zilog's "zl_cur_used" field 2296 * to influence the zil block size selection algorithm. 2297 * 2298 * By having to issue the lwb's zio here, it means the size of the 2299 * lwb was too large, given the incoming throughput of itxs. By 2300 * setting "zl_cur_used" to zero, we communicate this fact to the 2301 * block size selection algorithm, so it can take this informaiton 2302 * into account, and potentially select a smaller size for the 2303 * next lwb block that is allocated. 2304 */ 2305 zilog->zl_cur_used = 0; 2306 2307 if (nlwb == NULL) { 2308 /* 2309 * When zil_lwb_write_issue() returns NULL, this 2310 * indicates zio_alloc_zil() failed to allocate the 2311 * "next" lwb on-disk. When this occurs, the ZIL write 2312 * pipeline must be stalled; see the comment within the 2313 * zil_commit_writer_stall() function for more details. 2314 * 2315 * We must drop the commit waiter's lock prior to 2316 * calling zil_commit_writer_stall() or else we can wind 2317 * up with the following deadlock: 2318 * 2319 * - This thread is waiting for the txg to sync while 2320 * holding the waiter's lock; txg_wait_synced() is 2321 * used within txg_commit_writer_stall(). 2322 * 2323 * - The txg can't sync because it is waiting for this 2324 * lwb's zio callback to call dmu_tx_commit(). 2325 * 2326 * - The lwb's zio callback can't call dmu_tx_commit() 2327 * because it's blocked trying to acquire the waiter's 2328 * lock, which occurs prior to calling dmu_tx_commit() 2329 */ 2330 mutex_exit(&zcw->zcw_lock); 2331 zil_commit_writer_stall(zilog); 2332 mutex_enter(&zcw->zcw_lock); 2333 } 2334 2335 out: 2336 mutex_exit(&zilog->zl_issuer_lock); 2337 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2338 } 2339 2340 /* 2341 * This function is responsible for performing the following two tasks: 2342 * 2343 * 1. its primary responsibility is to block until the given "commit 2344 * waiter" is considered "done". 2345 * 2346 * 2. its secondary responsibility is to issue the zio for the lwb that 2347 * the given "commit waiter" is waiting on, if this function has 2348 * waited "long enough" and the lwb is still in the "open" state. 2349 * 2350 * Given a sufficient amount of itxs being generated and written using 2351 * the ZIL, the lwb's zio will be issued via the zil_lwb_commit() 2352 * function. If this does not occur, this secondary responsibility will 2353 * ensure the lwb is issued even if there is not other synchronous 2354 * activity on the system. 2355 * 2356 * For more details, see zil_process_commit_list(); more specifically, 2357 * the comment at the bottom of that function. 2358 */ 2359 static void 2360 zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) 2361 { 2362 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2363 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 2364 ASSERT(spa_writeable(zilog->zl_spa)); 2365 2366 mutex_enter(&zcw->zcw_lock); 2367 2368 /* 2369 * The timeout is scaled based on the lwb latency to avoid 2370 * significantly impacting the latency of each individual itx. 2371 * For more details, see the comment at the bottom of the 2372 * zil_process_commit_list() function. 2373 */ 2374 int pct = MAX(zfs_commit_timeout_pct, 1); 2375 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; 2376 hrtime_t wakeup = gethrtime() + sleep; 2377 boolean_t timedout = B_FALSE; 2378 2379 while (!zcw->zcw_done) { 2380 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2381 2382 lwb_t *lwb = zcw->zcw_lwb; 2383 2384 /* 2385 * Usually, the waiter will have a non-NULL lwb field here, 2386 * but it's possible for it to be NULL as a result of 2387 * zil_commit() racing with spa_sync(). 2388 * 2389 * When zil_clean() is called, it's possible for the itxg 2390 * list (which may be cleaned via a taskq) to contain 2391 * commit itxs. When this occurs, the commit waiters linked 2392 * off of these commit itxs will not be committed to an 2393 * lwb. Additionally, these commit waiters will not be 2394 * marked done until zil_commit_waiter_skip() is called via 2395 * zil_itxg_clean(). 2396 * 2397 * Thus, it's possible for this commit waiter (i.e. the 2398 * "zcw" variable) to be found in this "in between" state; 2399 * where it's "zcw_lwb" field is NULL, and it hasn't yet 2400 * been skipped, so it's "zcw_done" field is still B_FALSE. 2401 */ 2402 IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED); 2403 2404 if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { 2405 ASSERT3B(timedout, ==, B_FALSE); 2406 2407 /* 2408 * If the lwb hasn't been issued yet, then we 2409 * need to wait with a timeout, in case this 2410 * function needs to issue the lwb after the 2411 * timeout is reached; responsibility (2) from 2412 * the comment above this function. 2413 */ 2414 clock_t timeleft = cv_timedwait_hires(&zcw->zcw_cv, 2415 &zcw->zcw_lock, wakeup, USEC2NSEC(1), 2416 CALLOUT_FLAG_ABSOLUTE); 2417 2418 if (timeleft >= 0 || zcw->zcw_done) 2419 continue; 2420 2421 timedout = B_TRUE; 2422 zil_commit_waiter_timeout(zilog, zcw); 2423 2424 if (!zcw->zcw_done) { 2425 /* 2426 * If the commit waiter has already been 2427 * marked "done", it's possible for the 2428 * waiter's lwb structure to have already 2429 * been freed. Thus, we can only reliably 2430 * make these assertions if the waiter 2431 * isn't done. 2432 */ 2433 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2434 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 2435 } 2436 } else { 2437 /* 2438 * If the lwb isn't open, then it must have already 2439 * been issued. In that case, there's no need to 2440 * use a timeout when waiting for the lwb to 2441 * complete. 2442 * 2443 * Additionally, if the lwb is NULL, the waiter 2444 * will soon be signalled and marked done via 2445 * zil_clean() and zil_itxg_clean(), so no timeout 2446 * is required. 2447 */ 2448 2449 IMPLY(lwb != NULL, 2450 lwb->lwb_state == LWB_STATE_ISSUED || 2451 lwb->lwb_state == LWB_STATE_DONE); 2452 cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); 2453 } 2454 } 2455 2456 mutex_exit(&zcw->zcw_lock); 2457 } 2458 2459 static zil_commit_waiter_t * 2460 zil_alloc_commit_waiter() 2461 { 2462 zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); 2463 2464 cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); 2465 mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); 2466 list_link_init(&zcw->zcw_node); 2467 zcw->zcw_lwb = NULL; 2468 zcw->zcw_done = B_FALSE; 2469 zcw->zcw_zio_error = 0; 2470 2471 return (zcw); 2472 } 2473 2474 static void 2475 zil_free_commit_waiter(zil_commit_waiter_t *zcw) 2476 { 2477 ASSERT(!list_link_active(&zcw->zcw_node)); 2478 ASSERT3P(zcw->zcw_lwb, ==, NULL); 2479 ASSERT3B(zcw->zcw_done, ==, B_TRUE); 2480 mutex_destroy(&zcw->zcw_lock); 2481 cv_destroy(&zcw->zcw_cv); 2482 kmem_cache_free(zil_zcw_cache, zcw); 2483 } 2484 2485 /* 2486 * This function is used to create a TX_COMMIT itx and assign it. This 2487 * way, it will be linked into the ZIL's list of synchronous itxs, and 2488 * then later committed to an lwb (or skipped) when 2489 * zil_process_commit_list() is called. 2490 */ 2491 static void 2492 zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) 2493 { 2494 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 2495 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 2496 2497 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); 2498 itx->itx_sync = B_TRUE; 2499 itx->itx_private = zcw; 2500 2501 zil_itx_assign(zilog, itx, tx); 2502 2503 dmu_tx_commit(tx); 2504 } 2505 2506 /* 2507 * Commit ZFS Intent Log transactions (itxs) to stable storage. 2508 * 2509 * When writing ZIL transactions to the on-disk representation of the 2510 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple 2511 * itxs can be committed to a single lwb. Once a lwb is written and 2512 * committed to stable storage (i.e. the lwb is written, and vdevs have 2513 * been flushed), each itx that was committed to that lwb is also 2514 * considered to be committed to stable storage. 2515 * 2516 * When an itx is committed to an lwb, the log record (lr_t) contained 2517 * by the itx is copied into the lwb's zio buffer, and once this buffer 2518 * is written to disk, it becomes an on-disk ZIL block. 2519 * 2520 * As itxs are generated, they're inserted into the ZIL's queue of 2521 * uncommitted itxs. The semantics of zil_commit() are such that it will 2522 * block until all itxs that were in the queue when it was called, are 2523 * committed to stable storage. 2524 * 2525 * If "foid" is zero, this means all "synchronous" and "asynchronous" 2526 * itxs, for all objects in the dataset, will be committed to stable 2527 * storage prior to zil_commit() returning. If "foid" is non-zero, all 2528 * "synchronous" itxs for all objects, but only "asynchronous" itxs 2529 * that correspond to the foid passed in, will be committed to stable 2530 * storage prior to zil_commit() returning. 2531 * 2532 * Generally speaking, when zil_commit() is called, the consumer doesn't 2533 * actually care about _all_ of the uncommitted itxs. Instead, they're 2534 * simply trying to waiting for a specific itx to be committed to disk, 2535 * but the interface(s) for interacting with the ZIL don't allow such 2536 * fine-grained communication. A better interface would allow a consumer 2537 * to create and assign an itx, and then pass a reference to this itx to 2538 * zil_commit(); such that zil_commit() would return as soon as that 2539 * specific itx was committed to disk (instead of waiting for _all_ 2540 * itxs to be committed). 2541 * 2542 * When a thread calls zil_commit() a special "commit itx" will be 2543 * generated, along with a corresponding "waiter" for this commit itx. 2544 * zil_commit() will wait on this waiter's CV, such that when the waiter 2545 * is marked done, and signalled, zil_commit() will return. 2546 * 2547 * This commit itx is inserted into the queue of uncommitted itxs. This 2548 * provides an easy mechanism for determining which itxs were in the 2549 * queue prior to zil_commit() having been called, and which itxs were 2550 * added after zil_commit() was called. 2551 * 2552 * The commit it is special; it doesn't have any on-disk representation. 2553 * When a commit itx is "committed" to an lwb, the waiter associated 2554 * with it is linked onto the lwb's list of waiters. Then, when that lwb 2555 * completes, each waiter on the lwb's list is marked done and signalled 2556 * -- allowing the thread waiting on the waiter to return from zil_commit(). 2557 * 2558 * It's important to point out a few critical factors that allow us 2559 * to make use of the commit itxs, commit waiters, per-lwb lists of 2560 * commit waiters, and zio completion callbacks like we're doing: 2561 * 2562 * 1. The list of waiters for each lwb is traversed, and each commit 2563 * waiter is marked "done" and signalled, in the zio completion 2564 * callback of the lwb's zio[*]. 2565 * 2566 * * Actually, the waiters are signalled in the zio completion 2567 * callback of the root zio for the DKIOCFLUSHWRITECACHE commands 2568 * that are sent to the vdevs upon completion of the lwb zio. 2569 * 2570 * 2. When the itxs are inserted into the ZIL's queue of uncommitted 2571 * itxs, the order in which they are inserted is preserved[*]; as 2572 * itxs are added to the queue, they are added to the tail of 2573 * in-memory linked lists. 2574 * 2575 * When committing the itxs to lwbs (to be written to disk), they 2576 * are committed in the same order in which the itxs were added to 2577 * the uncommitted queue's linked list(s); i.e. the linked list of 2578 * itxs to commit is traversed from head to tail, and each itx is 2579 * committed to an lwb in that order. 2580 * 2581 * * To clarify: 2582 * 2583 * - the order of "sync" itxs is preserved w.r.t. other 2584 * "sync" itxs, regardless of the corresponding objects. 2585 * - the order of "async" itxs is preserved w.r.t. other 2586 * "async" itxs corresponding to the same object. 2587 * - the order of "async" itxs is *not* preserved w.r.t. other 2588 * "async" itxs corresponding to different objects. 2589 * - the order of "sync" itxs w.r.t. "async" itxs (or vice 2590 * versa) is *not* preserved, even for itxs that correspond 2591 * to the same object. 2592 * 2593 * For more details, see: zil_itx_assign(), zil_async_to_sync(), 2594 * zil_get_commit_list(), and zil_process_commit_list(). 2595 * 2596 * 3. The lwbs represent a linked list of blocks on disk. Thus, any 2597 * lwb cannot be considered committed to stable storage, until its 2598 * "previous" lwb is also committed to stable storage. This fact, 2599 * coupled with the fact described above, means that itxs are 2600 * committed in (roughly) the order in which they were generated. 2601 * This is essential because itxs are dependent on prior itxs. 2602 * Thus, we *must not* deem an itx as being committed to stable 2603 * storage, until *all* prior itxs have also been committed to 2604 * stable storage. 2605 * 2606 * To enforce this ordering of lwb zio's, while still leveraging as 2607 * much of the underlying storage performance as possible, we rely 2608 * on two fundamental concepts: 2609 * 2610 * 1. The creation and issuance of lwb zio's is protected by 2611 * the zilog's "zl_issuer_lock", which ensures only a single 2612 * thread is creating and/or issuing lwb's at a time 2613 * 2. The "previous" lwb is a child of the "current" lwb 2614 * (leveraging the zio parent-child depenency graph) 2615 * 2616 * By relying on this parent-child zio relationship, we can have 2617 * many lwb zio's concurrently issued to the underlying storage, 2618 * but the order in which they complete will be the same order in 2619 * which they were created. 2620 */ 2621 void 2622 zil_commit(zilog_t *zilog, uint64_t foid) 2623 { 2624 /* 2625 * We should never attempt to call zil_commit on a snapshot for 2626 * a couple of reasons: 2627 * 2628 * 1. A snapshot may never be modified, thus it cannot have any 2629 * in-flight itxs that would have modified the dataset. 2630 * 2631 * 2. By design, when zil_commit() is called, a commit itx will 2632 * be assigned to this zilog; as a result, the zilog will be 2633 * dirtied. We must not dirty the zilog of a snapshot; there's 2634 * checks in the code that enforce this invariant, and will 2635 * cause a panic if it's not upheld. 2636 */ 2637 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); 2638 2639 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 2640 return; 2641 2642 if (!spa_writeable(zilog->zl_spa)) { 2643 /* 2644 * If the SPA is not writable, there should never be any 2645 * pending itxs waiting to be committed to disk. If that 2646 * weren't true, we'd skip writing those itxs out, and 2647 * would break the sematics of zil_commit(); thus, we're 2648 * verifying that truth before we return to the caller. 2649 */ 2650 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2651 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 2652 for (int i = 0; i < TXG_SIZE; i++) 2653 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); 2654 return; 2655 } 2656 2657 /* 2658 * If the ZIL is suspended, we don't want to dirty it by calling 2659 * zil_commit_itx_assign() below, nor can we write out 2660 * lwbs like would be done in zil_commit_write(). Thus, we 2661 * simply rely on txg_wait_synced() to maintain the necessary 2662 * semantics, and avoid calling those functions altogether. 2663 */ 2664 if (zilog->zl_suspend > 0) { 2665 txg_wait_synced(zilog->zl_dmu_pool, 0); 2666 return; 2667 } 2668 2669 zil_commit_impl(zilog, foid); 2670 } 2671 2672 void 2673 zil_commit_impl(zilog_t *zilog, uint64_t foid) 2674 { 2675 /* 2676 * Move the "async" itxs for the specified foid to the "sync" 2677 * queues, such that they will be later committed (or skipped) 2678 * to an lwb when zil_process_commit_list() is called. 2679 * 2680 * Since these "async" itxs must be committed prior to this 2681 * call to zil_commit returning, we must perform this operation 2682 * before we call zil_commit_itx_assign(). 2683 */ 2684 zil_async_to_sync(zilog, foid); 2685 2686 /* 2687 * We allocate a new "waiter" structure which will initially be 2688 * linked to the commit itx using the itx's "itx_private" field. 2689 * Since the commit itx doesn't represent any on-disk state, 2690 * when it's committed to an lwb, rather than copying the its 2691 * lr_t into the lwb's buffer, the commit itx's "waiter" will be 2692 * added to the lwb's list of waiters. Then, when the lwb is 2693 * committed to stable storage, each waiter in the lwb's list of 2694 * waiters will be marked "done", and signalled. 2695 * 2696 * We must create the waiter and assign the commit itx prior to 2697 * calling zil_commit_writer(), or else our specific commit itx 2698 * is not guaranteed to be committed to an lwb prior to calling 2699 * zil_commit_waiter(). 2700 */ 2701 zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); 2702 zil_commit_itx_assign(zilog, zcw); 2703 2704 zil_commit_writer(zilog, zcw); 2705 zil_commit_waiter(zilog, zcw); 2706 2707 if (zcw->zcw_zio_error != 0) { 2708 /* 2709 * If there was an error writing out the ZIL blocks that 2710 * this thread is waiting on, then we fallback to 2711 * relying on spa_sync() to write out the data this 2712 * thread is waiting on. Obviously this has performance 2713 * implications, but the expectation is for this to be 2714 * an exceptional case, and shouldn't occur often. 2715 */ 2716 DTRACE_PROBE2(zil__commit__io__error, 2717 zilog_t *, zilog, zil_commit_waiter_t *, zcw); 2718 txg_wait_synced(zilog->zl_dmu_pool, 0); 2719 } 2720 2721 zil_free_commit_waiter(zcw); 2722 } 2723 2724 /* 2725 * Called in syncing context to free committed log blocks and update log header. 2726 */ 2727 void 2728 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 2729 { 2730 zil_header_t *zh = zil_header_in_syncing_context(zilog); 2731 uint64_t txg = dmu_tx_get_txg(tx); 2732 spa_t *spa = zilog->zl_spa; 2733 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 2734 lwb_t *lwb; 2735 2736 /* 2737 * We don't zero out zl_destroy_txg, so make sure we don't try 2738 * to destroy it twice. 2739 */ 2740 if (spa_sync_pass(spa) != 1) 2741 return; 2742 2743 mutex_enter(&zilog->zl_lock); 2744 2745 ASSERT(zilog->zl_stop_sync == 0); 2746 2747 if (*replayed_seq != 0) { 2748 ASSERT(zh->zh_replay_seq < *replayed_seq); 2749 zh->zh_replay_seq = *replayed_seq; 2750 *replayed_seq = 0; 2751 } 2752 2753 if (zilog->zl_destroy_txg == txg) { 2754 blkptr_t blk = zh->zh_log; 2755 2756 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 2757 2758 bzero(zh, sizeof (zil_header_t)); 2759 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 2760 2761 if (zilog->zl_keep_first) { 2762 /* 2763 * If this block was part of log chain that couldn't 2764 * be claimed because a device was missing during 2765 * zil_claim(), but that device later returns, 2766 * then this block could erroneously appear valid. 2767 * To guard against this, assign a new GUID to the new 2768 * log chain so it doesn't matter what blk points to. 2769 */ 2770 zil_init_log_chain(zilog, &blk); 2771 zh->zh_log = blk; 2772 } 2773 } 2774 2775 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 2776 zh->zh_log = lwb->lwb_blk; 2777 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 2778 break; 2779 list_remove(&zilog->zl_lwb_list, lwb); 2780 zio_free(spa, txg, &lwb->lwb_blk); 2781 zil_free_lwb(zilog, lwb); 2782 2783 /* 2784 * If we don't have anything left in the lwb list then 2785 * we've had an allocation failure and we need to zero 2786 * out the zil_header blkptr so that we don't end 2787 * up freeing the same block twice. 2788 */ 2789 if (list_head(&zilog->zl_lwb_list) == NULL) 2790 BP_ZERO(&zh->zh_log); 2791 } 2792 mutex_exit(&zilog->zl_lock); 2793 } 2794 2795 /* ARGSUSED */ 2796 static int 2797 zil_lwb_cons(void *vbuf, void *unused, int kmflag) 2798 { 2799 lwb_t *lwb = vbuf; 2800 list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), 2801 offsetof(zil_commit_waiter_t, zcw_node)); 2802 avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, 2803 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 2804 mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 2805 return (0); 2806 } 2807 2808 /* ARGSUSED */ 2809 static void 2810 zil_lwb_dest(void *vbuf, void *unused) 2811 { 2812 lwb_t *lwb = vbuf; 2813 mutex_destroy(&lwb->lwb_vdev_lock); 2814 avl_destroy(&lwb->lwb_vdev_tree); 2815 list_destroy(&lwb->lwb_waiters); 2816 } 2817 2818 void 2819 zil_init(void) 2820 { 2821 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 2822 sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); 2823 2824 zil_zcw_cache = kmem_cache_create("zil_zcw_cache", 2825 sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 2826 } 2827 2828 void 2829 zil_fini(void) 2830 { 2831 kmem_cache_destroy(zil_zcw_cache); 2832 kmem_cache_destroy(zil_lwb_cache); 2833 } 2834 2835 void 2836 zil_set_sync(zilog_t *zilog, uint64_t sync) 2837 { 2838 zilog->zl_sync = sync; 2839 } 2840 2841 void 2842 zil_set_logbias(zilog_t *zilog, uint64_t logbias) 2843 { 2844 zilog->zl_logbias = logbias; 2845 } 2846 2847 zilog_t * 2848 zil_alloc(objset_t *os, zil_header_t *zh_phys) 2849 { 2850 zilog_t *zilog; 2851 2852 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 2853 2854 zilog->zl_header = zh_phys; 2855 zilog->zl_os = os; 2856 zilog->zl_spa = dmu_objset_spa(os); 2857 zilog->zl_dmu_pool = dmu_objset_pool(os); 2858 zilog->zl_destroy_txg = TXG_INITIAL - 1; 2859 zilog->zl_logbias = dmu_objset_logbias(os); 2860 zilog->zl_sync = dmu_objset_syncprop(os); 2861 zilog->zl_dirty_max_txg = 0; 2862 zilog->zl_last_lwb_opened = NULL; 2863 zilog->zl_last_lwb_latency = 0; 2864 2865 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 2866 mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); 2867 2868 for (int i = 0; i < TXG_SIZE; i++) { 2869 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 2870 MUTEX_DEFAULT, NULL); 2871 } 2872 2873 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 2874 offsetof(lwb_t, lwb_node)); 2875 2876 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 2877 offsetof(itx_t, itx_node)); 2878 2879 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 2880 2881 return (zilog); 2882 } 2883 2884 void 2885 zil_free(zilog_t *zilog) 2886 { 2887 zilog->zl_stop_sync = 1; 2888 2889 ASSERT0(zilog->zl_suspend); 2890 ASSERT0(zilog->zl_suspending); 2891 2892 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2893 list_destroy(&zilog->zl_lwb_list); 2894 2895 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 2896 list_destroy(&zilog->zl_itx_commit_list); 2897 2898 for (int i = 0; i < TXG_SIZE; i++) { 2899 /* 2900 * It's possible for an itx to be generated that doesn't dirty 2901 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 2902 * callback to remove the entry. We remove those here. 2903 * 2904 * Also free up the ziltest itxs. 2905 */ 2906 if (zilog->zl_itxg[i].itxg_itxs) 2907 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 2908 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 2909 } 2910 2911 mutex_destroy(&zilog->zl_issuer_lock); 2912 mutex_destroy(&zilog->zl_lock); 2913 2914 cv_destroy(&zilog->zl_cv_suspend); 2915 2916 kmem_free(zilog, sizeof (zilog_t)); 2917 } 2918 2919 /* 2920 * Open an intent log. 2921 */ 2922 zilog_t * 2923 zil_open(objset_t *os, zil_get_data_t *get_data) 2924 { 2925 zilog_t *zilog = dmu_objset_zil(os); 2926 2927 ASSERT3P(zilog->zl_get_data, ==, NULL); 2928 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 2929 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2930 2931 zilog->zl_get_data = get_data; 2932 2933 return (zilog); 2934 } 2935 2936 /* 2937 * Close an intent log. 2938 */ 2939 void 2940 zil_close(zilog_t *zilog) 2941 { 2942 lwb_t *lwb; 2943 uint64_t txg; 2944 2945 if (!dmu_objset_is_snapshot(zilog->zl_os)) { 2946 zil_commit(zilog, 0); 2947 } else { 2948 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 2949 ASSERT0(zilog->zl_dirty_max_txg); 2950 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); 2951 } 2952 2953 mutex_enter(&zilog->zl_lock); 2954 lwb = list_tail(&zilog->zl_lwb_list); 2955 if (lwb == NULL) 2956 txg = zilog->zl_dirty_max_txg; 2957 else 2958 txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg); 2959 mutex_exit(&zilog->zl_lock); 2960 2961 /* 2962 * We need to use txg_wait_synced() to wait long enough for the 2963 * ZIL to be clean, and to wait for all pending lwbs to be 2964 * written out. 2965 */ 2966 if (txg != 0) 2967 txg_wait_synced(zilog->zl_dmu_pool, txg); 2968 2969 if (zilog_is_dirty(zilog)) 2970 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg); 2971 VERIFY(!zilog_is_dirty(zilog)); 2972 2973 zilog->zl_get_data = NULL; 2974 2975 /* 2976 * We should have only one lwb left on the list; remove it now. 2977 */ 2978 mutex_enter(&zilog->zl_lock); 2979 lwb = list_head(&zilog->zl_lwb_list); 2980 if (lwb != NULL) { 2981 ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list)); 2982 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2983 list_remove(&zilog->zl_lwb_list, lwb); 2984 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 2985 zil_free_lwb(zilog, lwb); 2986 } 2987 mutex_exit(&zilog->zl_lock); 2988 } 2989 2990 static char *suspend_tag = "zil suspending"; 2991 2992 /* 2993 * Suspend an intent log. While in suspended mode, we still honor 2994 * synchronous semantics, but we rely on txg_wait_synced() to do it. 2995 * On old version pools, we suspend the log briefly when taking a 2996 * snapshot so that it will have an empty intent log. 2997 * 2998 * Long holds are not really intended to be used the way we do here -- 2999 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 3000 * could fail. Therefore we take pains to only put a long hold if it is 3001 * actually necessary. Fortunately, it will only be necessary if the 3002 * objset is currently mounted (or the ZVOL equivalent). In that case it 3003 * will already have a long hold, so we are not really making things any worse. 3004 * 3005 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 3006 * zvol_state_t), and use their mechanism to prevent their hold from being 3007 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 3008 * very little gain. 3009 * 3010 * if cookiep == NULL, this does both the suspend & resume. 3011 * Otherwise, it returns with the dataset "long held", and the cookie 3012 * should be passed into zil_resume(). 3013 */ 3014 int 3015 zil_suspend(const char *osname, void **cookiep) 3016 { 3017 objset_t *os; 3018 zilog_t *zilog; 3019 const zil_header_t *zh; 3020 int error; 3021 3022 error = dmu_objset_hold(osname, suspend_tag, &os); 3023 if (error != 0) 3024 return (error); 3025 zilog = dmu_objset_zil(os); 3026 3027 mutex_enter(&zilog->zl_lock); 3028 zh = zilog->zl_header; 3029 3030 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 3031 mutex_exit(&zilog->zl_lock); 3032 dmu_objset_rele(os, suspend_tag); 3033 return (SET_ERROR(EBUSY)); 3034 } 3035 3036 /* 3037 * Don't put a long hold in the cases where we can avoid it. This 3038 * is when there is no cookie so we are doing a suspend & resume 3039 * (i.e. called from zil_vdev_offline()), and there's nothing to do 3040 * for the suspend because it's already suspended, or there's no ZIL. 3041 */ 3042 if (cookiep == NULL && !zilog->zl_suspending && 3043 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 3044 mutex_exit(&zilog->zl_lock); 3045 dmu_objset_rele(os, suspend_tag); 3046 return (0); 3047 } 3048 3049 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 3050 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 3051 3052 zilog->zl_suspend++; 3053 3054 if (zilog->zl_suspend > 1) { 3055 /* 3056 * Someone else is already suspending it. 3057 * Just wait for them to finish. 3058 */ 3059 3060 while (zilog->zl_suspending) 3061 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 3062 mutex_exit(&zilog->zl_lock); 3063 3064 if (cookiep == NULL) 3065 zil_resume(os); 3066 else 3067 *cookiep = os; 3068 return (0); 3069 } 3070 3071 /* 3072 * If there is no pointer to an on-disk block, this ZIL must not 3073 * be active (e.g. filesystem not mounted), so there's nothing 3074 * to clean up. 3075 */ 3076 if (BP_IS_HOLE(&zh->zh_log)) { 3077 ASSERT(cookiep != NULL); /* fast path already handled */ 3078 3079 *cookiep = os; 3080 mutex_exit(&zilog->zl_lock); 3081 return (0); 3082 } 3083 3084 zilog->zl_suspending = B_TRUE; 3085 mutex_exit(&zilog->zl_lock); 3086 3087 /* 3088 * We need to use zil_commit_impl to ensure we wait for all 3089 * LWB_STATE_OPENED and LWB_STATE_ISSUED lwb's to be committed 3090 * to disk before proceeding. If we used zil_commit instead, it 3091 * would just call txg_wait_synced(), because zl_suspend is set. 3092 * txg_wait_synced() doesn't wait for these lwb's to be 3093 * LWB_STATE_DONE before returning. 3094 */ 3095 zil_commit_impl(zilog, 0); 3096 3097 /* 3098 * Now that we've ensured all lwb's are LWB_STATE_DONE, we use 3099 * txg_wait_synced() to ensure the data from the zilog has 3100 * migrated to the main pool before calling zil_destroy(). 3101 */ 3102 txg_wait_synced(zilog->zl_dmu_pool, 0); 3103 3104 zil_destroy(zilog, B_FALSE); 3105 3106 mutex_enter(&zilog->zl_lock); 3107 zilog->zl_suspending = B_FALSE; 3108 cv_broadcast(&zilog->zl_cv_suspend); 3109 mutex_exit(&zilog->zl_lock); 3110 3111 if (cookiep == NULL) 3112 zil_resume(os); 3113 else 3114 *cookiep = os; 3115 return (0); 3116 } 3117 3118 void 3119 zil_resume(void *cookie) 3120 { 3121 objset_t *os = cookie; 3122 zilog_t *zilog = dmu_objset_zil(os); 3123 3124 mutex_enter(&zilog->zl_lock); 3125 ASSERT(zilog->zl_suspend != 0); 3126 zilog->zl_suspend--; 3127 mutex_exit(&zilog->zl_lock); 3128 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 3129 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 3130 } 3131 3132 typedef struct zil_replay_arg { 3133 zil_replay_func_t **zr_replay; 3134 void *zr_arg; 3135 boolean_t zr_byteswap; 3136 char *zr_lr; 3137 } zil_replay_arg_t; 3138 3139 static int 3140 zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 3141 { 3142 char name[ZFS_MAX_DATASET_NAME_LEN]; 3143 3144 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 3145 3146 dmu_objset_name(zilog->zl_os, name); 3147 3148 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 3149 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 3150 (u_longlong_t)lr->lrc_seq, 3151 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 3152 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 3153 3154 return (error); 3155 } 3156 3157 static int 3158 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 3159 { 3160 zil_replay_arg_t *zr = zra; 3161 const zil_header_t *zh = zilog->zl_header; 3162 uint64_t reclen = lr->lrc_reclen; 3163 uint64_t txtype = lr->lrc_txtype; 3164 int error = 0; 3165 3166 zilog->zl_replaying_seq = lr->lrc_seq; 3167 3168 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 3169 return (0); 3170 3171 if (lr->lrc_txg < claim_txg) /* already committed */ 3172 return (0); 3173 3174 /* Strip case-insensitive bit, still present in log record */ 3175 txtype &= ~TX_CI; 3176 3177 if (txtype == 0 || txtype >= TX_MAX_TYPE) 3178 return (zil_replay_error(zilog, lr, EINVAL)); 3179 3180 /* 3181 * If this record type can be logged out of order, the object 3182 * (lr_foid) may no longer exist. That's legitimate, not an error. 3183 */ 3184 if (TX_OOO(txtype)) { 3185 error = dmu_object_info(zilog->zl_os, 3186 ((lr_ooo_t *)lr)->lr_foid, NULL); 3187 if (error == ENOENT || error == EEXIST) 3188 return (0); 3189 } 3190 3191 /* 3192 * Make a copy of the data so we can revise and extend it. 3193 */ 3194 bcopy(lr, zr->zr_lr, reclen); 3195 3196 /* 3197 * If this is a TX_WRITE with a blkptr, suck in the data. 3198 */ 3199 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 3200 error = zil_read_log_data(zilog, (lr_write_t *)lr, 3201 zr->zr_lr + reclen); 3202 if (error != 0) 3203 return (zil_replay_error(zilog, lr, error)); 3204 } 3205 3206 /* 3207 * The log block containing this lr may have been byteswapped 3208 * so that we can easily examine common fields like lrc_txtype. 3209 * However, the log is a mix of different record types, and only the 3210 * replay vectors know how to byteswap their records. Therefore, if 3211 * the lr was byteswapped, undo it before invoking the replay vector. 3212 */ 3213 if (zr->zr_byteswap) 3214 byteswap_uint64_array(zr->zr_lr, reclen); 3215 3216 /* 3217 * We must now do two things atomically: replay this log record, 3218 * and update the log header sequence number to reflect the fact that 3219 * we did so. At the end of each replay function the sequence number 3220 * is updated if we are in replay mode. 3221 */ 3222 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 3223 if (error != 0) { 3224 /* 3225 * The DMU's dnode layer doesn't see removes until the txg 3226 * commits, so a subsequent claim can spuriously fail with 3227 * EEXIST. So if we receive any error we try syncing out 3228 * any removes then retry the transaction. Note that we 3229 * specify B_FALSE for byteswap now, so we don't do it twice. 3230 */ 3231 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 3232 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 3233 if (error != 0) 3234 return (zil_replay_error(zilog, lr, error)); 3235 } 3236 return (0); 3237 } 3238 3239 /* ARGSUSED */ 3240 static int 3241 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 3242 { 3243 zilog->zl_replay_blks++; 3244 3245 return (0); 3246 } 3247 3248 /* 3249 * If this dataset has a non-empty intent log, replay it and destroy it. 3250 */ 3251 void 3252 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 3253 { 3254 zilog_t *zilog = dmu_objset_zil(os); 3255 const zil_header_t *zh = zilog->zl_header; 3256 zil_replay_arg_t zr; 3257 3258 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 3259 zil_destroy(zilog, B_TRUE); 3260 return; 3261 } 3262 3263 zr.zr_replay = replay_func; 3264 zr.zr_arg = arg; 3265 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 3266 zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 3267 3268 /* 3269 * Wait for in-progress removes to sync before starting replay. 3270 */ 3271 txg_wait_synced(zilog->zl_dmu_pool, 0); 3272 3273 zilog->zl_replay = B_TRUE; 3274 zilog->zl_replay_time = ddi_get_lbolt(); 3275 ASSERT(zilog->zl_replay_blks == 0); 3276 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 3277 zh->zh_claim_txg); 3278 kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 3279 3280 zil_destroy(zilog, B_FALSE); 3281 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 3282 zilog->zl_replay = B_FALSE; 3283 } 3284 3285 boolean_t 3286 zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 3287 { 3288 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 3289 return (B_TRUE); 3290 3291 if (zilog->zl_replay) { 3292 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 3293 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 3294 zilog->zl_replaying_seq; 3295 return (B_TRUE); 3296 } 3297 3298 return (B_FALSE); 3299 } 3300 3301 /* ARGSUSED */ 3302 int 3303 zil_reset(const char *osname, void *arg) 3304 { 3305 int error; 3306 3307 error = zil_suspend(osname, NULL); 3308 if (error != 0) 3309 return (SET_ERROR(EEXIST)); 3310 return (0); 3311 } 3312