1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/spa.h> 28 #include <sys/spa_impl.h> 29 #include <sys/dmu.h> 30 #include <sys/zap.h> 31 #include <sys/arc.h> 32 #include <sys/stat.h> 33 #include <sys/resource.h> 34 #include <sys/zil.h> 35 #include <sys/zil_impl.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/vdev.h> 38 #include <sys/dmu_tx.h> 39 40 /* 41 * The zfs intent log (ZIL) saves transaction records of system calls 42 * that change the file system in memory with enough information 43 * to be able to replay them. These are stored in memory until 44 * either the DMU transaction group (txg) commits them to the stable pool 45 * and they can be discarded, or they are flushed to the stable log 46 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47 * requirement. In the event of a panic or power fail then those log 48 * records (transactions) are replayed. 49 * 50 * There is one ZIL per file system. Its on-disk (pool) format consists 51 * of 3 parts: 52 * 53 * - ZIL header 54 * - ZIL blocks 55 * - ZIL records 56 * 57 * A log record holds a system call transaction. Log blocks can 58 * hold many log records and the blocks are chained together. 59 * Each ZIL block contains a block pointer (blkptr_t) to the next 60 * ZIL block in the chain. The ZIL header points to the first 61 * block in the chain. Note there is not a fixed place in the pool 62 * to hold blocks. They are dynamically allocated and freed as 63 * needed from the blocks available. Figure X shows the ZIL structure: 64 */ 65 66 /* 67 * This global ZIL switch affects all pools 68 */ 69 int zil_disable = 0; /* disable intent logging */ 70 71 /* 72 * Tunable parameter for debugging or performance analysis. Setting 73 * zfs_nocacheflush will cause corruption on power loss if a volatile 74 * out-of-order write cache is enabled. 75 */ 76 boolean_t zfs_nocacheflush = B_FALSE; 77 78 static kmem_cache_t *zil_lwb_cache; 79 80 static boolean_t zil_empty(zilog_t *zilog); 81 82 static int 83 zil_dva_compare(const void *x1, const void *x2) 84 { 85 const dva_t *dva1 = x1; 86 const dva_t *dva2 = x2; 87 88 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 89 return (-1); 90 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 91 return (1); 92 93 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 94 return (-1); 95 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 96 return (1); 97 98 return (0); 99 } 100 101 static void 102 zil_dva_tree_init(avl_tree_t *t) 103 { 104 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 105 offsetof(zil_dva_node_t, zn_node)); 106 } 107 108 static void 109 zil_dva_tree_fini(avl_tree_t *t) 110 { 111 zil_dva_node_t *zn; 112 void *cookie = NULL; 113 114 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 115 kmem_free(zn, sizeof (zil_dva_node_t)); 116 117 avl_destroy(t); 118 } 119 120 static int 121 zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 122 { 123 zil_dva_node_t *zn; 124 avl_index_t where; 125 126 if (avl_find(t, dva, &where) != NULL) 127 return (EEXIST); 128 129 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 130 zn->zn_dva = *dva; 131 avl_insert(t, zn, where); 132 133 return (0); 134 } 135 136 static zil_header_t * 137 zil_header_in_syncing_context(zilog_t *zilog) 138 { 139 return ((zil_header_t *)zilog->zl_header); 140 } 141 142 static void 143 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 144 { 145 zio_cksum_t *zc = &bp->blk_cksum; 146 147 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 148 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 149 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 150 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 151 } 152 153 /* 154 * Read a log block, make sure it's valid, and byteswap it if necessary. 155 */ 156 static int 157 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 158 { 159 blkptr_t blk = *bp; 160 zbookmark_t zb; 161 uint32_t aflags = ARC_WAIT; 162 int error; 163 164 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 165 zb.zb_object = 0; 166 zb.zb_level = -1; 167 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 168 169 *abufpp = NULL; 170 171 /* 172 * We shouldn't be doing any scrubbing while we're doing log 173 * replay, it's OK to not lock. 174 */ 175 error = arc_read_nolock(NULL, zilog->zl_spa, &blk, 176 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 177 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb); 178 179 if (error == 0) { 180 char *data = (*abufpp)->b_data; 181 uint64_t blksz = BP_GET_LSIZE(bp); 182 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 183 zio_cksum_t cksum = bp->blk_cksum; 184 185 /* 186 * Validate the checksummed log block. 187 * 188 * Sequence numbers should be... sequential. The checksum 189 * verifier for the next block should be bp's checksum plus 1. 190 * 191 * Also check the log chain linkage and size used. 192 */ 193 cksum.zc_word[ZIL_ZC_SEQ]++; 194 195 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, 196 sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) || 197 (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) { 198 error = ECKSUM; 199 } 200 201 if (error) { 202 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 203 *abufpp = NULL; 204 } 205 } 206 207 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 208 209 return (error); 210 } 211 212 /* 213 * Parse the intent log, and call parse_func for each valid record within. 214 * Return the highest sequence number. 215 */ 216 uint64_t 217 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 218 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 219 { 220 const zil_header_t *zh = zilog->zl_header; 221 uint64_t claim_seq = zh->zh_claim_seq; 222 uint64_t seq = 0; 223 uint64_t max_seq = 0; 224 blkptr_t blk = zh->zh_log; 225 arc_buf_t *abuf; 226 char *lrbuf, *lrp; 227 zil_trailer_t *ztp; 228 int reclen, error; 229 230 if (BP_IS_HOLE(&blk)) 231 return (max_seq); 232 233 /* 234 * Starting at the block pointed to by zh_log we read the log chain. 235 * For each block in the chain we strongly check that block to 236 * ensure its validity. We stop when an invalid block is found. 237 * For each block pointer in the chain we call parse_blk_func(). 238 * For each record in each valid block we call parse_lr_func(). 239 * If the log has been claimed, stop if we encounter a sequence 240 * number greater than the highest claimed sequence number. 241 */ 242 zil_dva_tree_init(&zilog->zl_dva_tree); 243 for (;;) { 244 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 245 246 if (claim_seq != 0 && seq > claim_seq) 247 break; 248 249 ASSERT(max_seq < seq); 250 max_seq = seq; 251 252 error = zil_read_log_block(zilog, &blk, &abuf); 253 254 if (parse_blk_func != NULL) 255 parse_blk_func(zilog, &blk, arg, txg); 256 257 if (error) 258 break; 259 260 lrbuf = abuf->b_data; 261 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 262 blk = ztp->zit_next_blk; 263 264 if (parse_lr_func == NULL) { 265 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 266 continue; 267 } 268 269 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 270 lr_t *lr = (lr_t *)lrp; 271 reclen = lr->lrc_reclen; 272 ASSERT3U(reclen, >=, sizeof (lr_t)); 273 parse_lr_func(zilog, lr, arg, txg); 274 } 275 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 276 } 277 zil_dva_tree_fini(&zilog->zl_dva_tree); 278 279 return (max_seq); 280 } 281 282 /* ARGSUSED */ 283 static void 284 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 285 { 286 spa_t *spa = zilog->zl_spa; 287 int err; 288 289 /* 290 * Claim log block if not already committed and not already claimed. 291 */ 292 if (bp->blk_birth >= first_txg && 293 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 294 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL, 295 ZIO_FLAG_MUSTSUCCEED)); 296 ASSERT(err == 0); 297 } 298 } 299 300 static void 301 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 302 { 303 if (lrc->lrc_txtype == TX_WRITE) { 304 lr_write_t *lr = (lr_write_t *)lrc; 305 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 306 } 307 } 308 309 /* ARGSUSED */ 310 static void 311 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 312 { 313 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 314 } 315 316 static void 317 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 318 { 319 /* 320 * If we previously claimed it, we need to free it. 321 */ 322 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 323 lr_write_t *lr = (lr_write_t *)lrc; 324 blkptr_t *bp = &lr->lr_blkptr; 325 if (bp->blk_birth >= claim_txg && 326 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 327 (void) arc_free(NULL, zilog->zl_spa, 328 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 329 } 330 } 331 } 332 333 /* 334 * Create an on-disk intent log. 335 */ 336 static void 337 zil_create(zilog_t *zilog) 338 { 339 const zil_header_t *zh = zilog->zl_header; 340 lwb_t *lwb; 341 uint64_t txg = 0; 342 dmu_tx_t *tx = NULL; 343 blkptr_t blk; 344 int error = 0; 345 346 /* 347 * Wait for any previous destroy to complete. 348 */ 349 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 350 351 ASSERT(zh->zh_claim_txg == 0); 352 ASSERT(zh->zh_replay_seq == 0); 353 354 blk = zh->zh_log; 355 356 /* 357 * If we don't already have an initial log block or we have one 358 * but it's the wrong endianness then allocate one. 359 */ 360 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 361 tx = dmu_tx_create(zilog->zl_os); 362 (void) dmu_tx_assign(tx, TXG_WAIT); 363 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 364 txg = dmu_tx_get_txg(tx); 365 366 if (!BP_IS_HOLE(&blk)) { 367 zio_free_blk(zilog->zl_spa, &blk, txg); 368 BP_ZERO(&blk); 369 } 370 371 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, 372 NULL, txg, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 373 374 if (error == 0) 375 zil_init_log_chain(zilog, &blk); 376 } 377 378 /* 379 * Allocate a log write buffer (lwb) for the first log block. 380 */ 381 if (error == 0) { 382 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 383 lwb->lwb_zilog = zilog; 384 lwb->lwb_blk = blk; 385 lwb->lwb_nused = 0; 386 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 387 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 388 lwb->lwb_max_txg = txg; 389 lwb->lwb_zio = NULL; 390 391 mutex_enter(&zilog->zl_lock); 392 list_insert_tail(&zilog->zl_lwb_list, lwb); 393 mutex_exit(&zilog->zl_lock); 394 } 395 396 /* 397 * If we just allocated the first log block, commit our transaction 398 * and wait for zil_sync() to stuff the block poiner into zh_log. 399 * (zh is part of the MOS, so we cannot modify it in open context.) 400 */ 401 if (tx != NULL) { 402 dmu_tx_commit(tx); 403 txg_wait_synced(zilog->zl_dmu_pool, txg); 404 } 405 406 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 407 } 408 409 /* 410 * In one tx, free all log blocks and clear the log header. 411 * If keep_first is set, then we're replaying a log with no content. 412 * We want to keep the first block, however, so that the first 413 * synchronous transaction doesn't require a txg_wait_synced() 414 * in zil_create(). We don't need to txg_wait_synced() here either 415 * when keep_first is set, because both zil_create() and zil_destroy() 416 * will wait for any in-progress destroys to complete. 417 */ 418 void 419 zil_destroy(zilog_t *zilog, boolean_t keep_first) 420 { 421 const zil_header_t *zh = zilog->zl_header; 422 lwb_t *lwb; 423 dmu_tx_t *tx; 424 uint64_t txg; 425 426 /* 427 * Wait for any previous destroy to complete. 428 */ 429 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 430 431 if (BP_IS_HOLE(&zh->zh_log)) 432 return; 433 434 tx = dmu_tx_create(zilog->zl_os); 435 (void) dmu_tx_assign(tx, TXG_WAIT); 436 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 437 txg = dmu_tx_get_txg(tx); 438 439 mutex_enter(&zilog->zl_lock); 440 441 ASSERT3U(zilog->zl_destroy_txg, <, txg); 442 zilog->zl_destroy_txg = txg; 443 444 if (!list_is_empty(&zilog->zl_lwb_list)) { 445 ASSERT(zh->zh_claim_txg == 0); 446 zilog->zl_keep_first = B_FALSE; 447 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 448 list_remove(&zilog->zl_lwb_list, lwb); 449 if (lwb->lwb_buf != NULL) 450 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 451 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 452 kmem_cache_free(zil_lwb_cache, lwb); 453 } 454 } else { 455 zilog->zl_keep_first = keep_first; 456 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { 457 ASSERT(!keep_first); 458 (void) zil_parse(zilog, zil_free_log_block, 459 zil_free_log_record, tx, zh->zh_claim_txg); 460 } else { 461 /* 462 * Would like to assert zil_empty() but that 463 * would force us to read the log chain which 464 * requires us to do I/O to the log. This is 465 * overkill since we really just want to destroy 466 * the chain anyway. 467 */ 468 if (!keep_first) { 469 blkptr_t bp = zh->zh_log; 470 zio_free_blk(zilog->zl_spa, &bp, txg); 471 } 472 } 473 } 474 mutex_exit(&zilog->zl_lock); 475 476 dmu_tx_commit(tx); 477 } 478 479 /* 480 * return true if the initial log block is not valid 481 */ 482 static boolean_t 483 zil_empty(zilog_t *zilog) 484 { 485 const zil_header_t *zh = zilog->zl_header; 486 arc_buf_t *abuf = NULL; 487 488 if (BP_IS_HOLE(&zh->zh_log)) 489 return (B_TRUE); 490 491 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 492 return (B_TRUE); 493 494 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 495 return (B_FALSE); 496 } 497 498 int 499 zil_claim(char *osname, void *txarg) 500 { 501 dmu_tx_t *tx = txarg; 502 uint64_t first_txg = dmu_tx_get_txg(tx); 503 zilog_t *zilog; 504 zil_header_t *zh; 505 objset_t *os; 506 int error; 507 508 error = dmu_objset_hold(osname, FTAG, &os); 509 if (error) { 510 cmn_err(CE_WARN, "can't open objset for %s", osname); 511 return (0); 512 } 513 514 zilog = dmu_objset_zil(os); 515 zh = zil_header_in_syncing_context(zilog); 516 517 if (zilog->zl_spa->spa_log_state == SPA_LOG_CLEAR) { 518 if (!BP_IS_HOLE(&zh->zh_log)) 519 zio_free_blk(zilog->zl_spa, &zh->zh_log, first_txg); 520 BP_ZERO(&zh->zh_log); 521 dsl_dataset_dirty(dmu_objset_ds(os), tx); 522 dmu_objset_rele(os, FTAG); 523 return (0); 524 } 525 526 /* 527 * Record here whether the zil has any records to replay. 528 * If the header block pointer is null or the block points 529 * to the stubby then we know there are no valid log records. 530 * We use the header to store this state as the the zilog gets 531 * freed later in dmu_objset_close(). 532 * The flags (and the rest of the header fields) are cleared in 533 * zil_sync() as a result of a zil_destroy(), after replaying the log. 534 * 535 * Note, the intent log can be empty but still need the 536 * stubby to be claimed. 537 */ 538 if (!zil_empty(zilog)) { 539 zh->zh_flags |= ZIL_REPLAY_NEEDED; 540 dsl_dataset_dirty(dmu_objset_ds(os), tx); 541 } 542 543 /* 544 * Claim all log blocks if we haven't already done so, and remember 545 * the highest claimed sequence number. This ensures that if we can 546 * read only part of the log now (e.g. due to a missing device), 547 * but we can read the entire log later, we will not try to replay 548 * or destroy beyond the last block we successfully claimed. 549 */ 550 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 551 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 552 zh->zh_claim_txg = first_txg; 553 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 554 zil_claim_log_record, tx, first_txg); 555 dsl_dataset_dirty(dmu_objset_ds(os), tx); 556 } 557 558 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 559 dmu_objset_rele(os, FTAG); 560 return (0); 561 } 562 563 /* 564 * Check the log by walking the log chain. 565 * Checksum errors are ok as they indicate the end of the chain. 566 * Any other error (no device or read failure) returns an error. 567 */ 568 /* ARGSUSED */ 569 int 570 zil_check_log_chain(char *osname, void *txarg) 571 { 572 zilog_t *zilog; 573 zil_header_t *zh; 574 blkptr_t blk; 575 arc_buf_t *abuf; 576 objset_t *os; 577 char *lrbuf; 578 zil_trailer_t *ztp; 579 int error; 580 581 error = dmu_objset_hold(osname, FTAG, &os); 582 if (error) { 583 cmn_err(CE_WARN, "can't open objset for %s", osname); 584 return (0); 585 } 586 587 zilog = dmu_objset_zil(os); 588 zh = zil_header_in_syncing_context(zilog); 589 blk = zh->zh_log; 590 if (BP_IS_HOLE(&blk)) { 591 dmu_objset_rele(os, FTAG); 592 return (0); /* no chain */ 593 } 594 595 for (;;) { 596 error = zil_read_log_block(zilog, &blk, &abuf); 597 if (error) 598 break; 599 lrbuf = abuf->b_data; 600 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 601 blk = ztp->zit_next_blk; 602 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 603 } 604 dmu_objset_rele(os, FTAG); 605 if (error == ECKSUM) 606 return (0); /* normal end of chain */ 607 return (error); 608 } 609 610 static int 611 zil_vdev_compare(const void *x1, const void *x2) 612 { 613 uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 614 uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 615 616 if (v1 < v2) 617 return (-1); 618 if (v1 > v2) 619 return (1); 620 621 return (0); 622 } 623 624 void 625 zil_add_block(zilog_t *zilog, blkptr_t *bp) 626 { 627 avl_tree_t *t = &zilog->zl_vdev_tree; 628 avl_index_t where; 629 zil_vdev_node_t *zv, zvsearch; 630 int ndvas = BP_GET_NDVAS(bp); 631 int i; 632 633 if (zfs_nocacheflush) 634 return; 635 636 ASSERT(zilog->zl_writer); 637 638 /* 639 * Even though we're zl_writer, we still need a lock because the 640 * zl_get_data() callbacks may have dmu_sync() done callbacks 641 * that will run concurrently. 642 */ 643 mutex_enter(&zilog->zl_vdev_lock); 644 for (i = 0; i < ndvas; i++) { 645 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 646 if (avl_find(t, &zvsearch, &where) == NULL) { 647 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 648 zv->zv_vdev = zvsearch.zv_vdev; 649 avl_insert(t, zv, where); 650 } 651 } 652 mutex_exit(&zilog->zl_vdev_lock); 653 } 654 655 void 656 zil_flush_vdevs(zilog_t *zilog) 657 { 658 spa_t *spa = zilog->zl_spa; 659 avl_tree_t *t = &zilog->zl_vdev_tree; 660 void *cookie = NULL; 661 zil_vdev_node_t *zv; 662 zio_t *zio; 663 664 ASSERT(zilog->zl_writer); 665 666 /* 667 * We don't need zl_vdev_lock here because we're the zl_writer, 668 * and all zl_get_data() callbacks are done. 669 */ 670 if (avl_numnodes(t) == 0) 671 return; 672 673 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 674 675 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 676 677 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 678 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 679 if (vd != NULL) 680 zio_flush(zio, vd); 681 kmem_free(zv, sizeof (*zv)); 682 } 683 684 /* 685 * Wait for all the flushes to complete. Not all devices actually 686 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 687 */ 688 (void) zio_wait(zio); 689 690 spa_config_exit(spa, SCL_STATE, FTAG); 691 } 692 693 /* 694 * Function called when a log block write completes 695 */ 696 static void 697 zil_lwb_write_done(zio_t *zio) 698 { 699 lwb_t *lwb = zio->io_private; 700 zilog_t *zilog = lwb->lwb_zilog; 701 702 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 703 ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG); 704 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 705 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 706 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 707 ASSERT(!BP_IS_GANG(zio->io_bp)); 708 ASSERT(!BP_IS_HOLE(zio->io_bp)); 709 ASSERT(zio->io_bp->blk_fill == 0); 710 711 /* 712 * Ensure the lwb buffer pointer is cleared before releasing 713 * the txg. If we have had an allocation failure and 714 * the txg is waiting to sync then we want want zil_sync() 715 * to remove the lwb so that it's not picked up as the next new 716 * one in zil_commit_writer(). zil_sync() will only remove 717 * the lwb if lwb_buf is null. 718 */ 719 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 720 mutex_enter(&zilog->zl_lock); 721 lwb->lwb_buf = NULL; 722 if (zio->io_error) 723 zilog->zl_log_error = B_TRUE; 724 725 /* 726 * Now that we've written this log block, we have a stable pointer 727 * to the next block in the chain, so it's OK to let the txg in 728 * which we allocated the next block sync. We still have the 729 * zl_lock to ensure zil_sync doesn't kmem free the lwb. 730 */ 731 txg_rele_to_sync(&lwb->lwb_txgh); 732 mutex_exit(&zilog->zl_lock); 733 } 734 735 /* 736 * Initialize the io for a log block. 737 */ 738 static void 739 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 740 { 741 zbookmark_t zb; 742 743 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 744 zb.zb_object = 0; 745 zb.zb_level = -1; 746 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 747 748 if (zilog->zl_root_zio == NULL) { 749 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 750 ZIO_FLAG_CANFAIL); 751 } 752 if (lwb->lwb_zio == NULL) { 753 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 754 0, &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz, 755 zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE, 756 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 757 } 758 } 759 760 /* 761 * Use the slog as long as the logbias is 'latency' and the current commit size 762 * is less than the limit or the total list size is less than 2X the limit. 763 * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 764 */ 765 uint64_t zil_slog_limit = 1024 * 1024; 766 #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 767 (((zilog)->zl_cur_used < zil_slog_limit) || \ 768 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 769 770 /* 771 * Start a log block write and advance to the next log block. 772 * Calls are serialized. 773 */ 774 static lwb_t * 775 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 776 { 777 lwb_t *nlwb; 778 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 779 spa_t *spa = zilog->zl_spa; 780 blkptr_t *bp = &ztp->zit_next_blk; 781 uint64_t txg; 782 uint64_t zil_blksz; 783 int error; 784 785 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 786 787 /* 788 * Allocate the next block and save its address in this block 789 * before writing it in order to establish the log chain. 790 * Note that if the allocation of nlwb synced before we wrote 791 * the block that points at it (lwb), we'd leak it if we crashed. 792 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 793 */ 794 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 795 txg_rele_to_quiesce(&lwb->lwb_txgh); 796 797 /* 798 * Pick a ZIL blocksize. We request a size that is the 799 * maximum of the previous used size, the current used size and 800 * the amount waiting in the queue. 801 */ 802 zil_blksz = MAX(zilog->zl_prev_used, 803 zilog->zl_cur_used + sizeof (*ztp)); 804 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 805 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 806 if (zil_blksz > ZIL_MAX_BLKSZ) 807 zil_blksz = ZIL_MAX_BLKSZ; 808 809 BP_ZERO(bp); 810 /* pass the old blkptr in order to spread log blocks across devs */ 811 error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg, 812 USE_SLOG(zilog)); 813 if (error) { 814 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg); 815 816 /* 817 * We dirty the dataset to ensure that zil_sync() will 818 * be called to remove this lwb from our zl_lwb_list. 819 * Failing to do so, may leave an lwb with a NULL lwb_buf 820 * hanging around on the zl_lwb_list. 821 */ 822 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 823 dmu_tx_commit(tx); 824 825 /* 826 * Since we've just experienced an allocation failure so we 827 * terminate the current lwb and send it on its way. 828 */ 829 ztp->zit_pad = 0; 830 ztp->zit_nused = lwb->lwb_nused; 831 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 832 zio_nowait(lwb->lwb_zio); 833 834 /* 835 * By returning NULL the caller will call tx_wait_synced() 836 */ 837 return (NULL); 838 } 839 840 ASSERT3U(bp->blk_birth, ==, txg); 841 ztp->zit_pad = 0; 842 ztp->zit_nused = lwb->lwb_nused; 843 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 844 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 845 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 846 847 /* 848 * Allocate a new log write buffer (lwb). 849 */ 850 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 851 852 nlwb->lwb_zilog = zilog; 853 nlwb->lwb_blk = *bp; 854 nlwb->lwb_nused = 0; 855 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 856 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 857 nlwb->lwb_max_txg = txg; 858 nlwb->lwb_zio = NULL; 859 860 /* 861 * Put new lwb at the end of the log chain 862 */ 863 mutex_enter(&zilog->zl_lock); 864 list_insert_tail(&zilog->zl_lwb_list, nlwb); 865 mutex_exit(&zilog->zl_lock); 866 867 /* Record the block for later vdev flushing */ 868 zil_add_block(zilog, &lwb->lwb_blk); 869 870 /* 871 * kick off the write for the old log block 872 */ 873 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 874 ASSERT(lwb->lwb_zio); 875 zio_nowait(lwb->lwb_zio); 876 877 return (nlwb); 878 } 879 880 static lwb_t * 881 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 882 { 883 lr_t *lrc = &itx->itx_lr; /* common log record */ 884 lr_write_t *lr = (lr_write_t *)lrc; 885 uint64_t txg = lrc->lrc_txg; 886 uint64_t reclen = lrc->lrc_reclen; 887 uint64_t dlen; 888 889 if (lwb == NULL) 890 return (NULL); 891 ASSERT(lwb->lwb_buf != NULL); 892 893 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 894 dlen = P2ROUNDUP_TYPED( 895 lr->lr_length, sizeof (uint64_t), uint64_t); 896 else 897 dlen = 0; 898 899 zilog->zl_cur_used += (reclen + dlen); 900 901 zil_lwb_write_init(zilog, lwb); 902 903 /* 904 * If this record won't fit in the current log block, start a new one. 905 */ 906 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 907 lwb = zil_lwb_write_start(zilog, lwb); 908 if (lwb == NULL) 909 return (NULL); 910 zil_lwb_write_init(zilog, lwb); 911 ASSERT(lwb->lwb_nused == 0); 912 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 913 txg_wait_synced(zilog->zl_dmu_pool, txg); 914 return (lwb); 915 } 916 } 917 918 /* 919 * Update the lrc_seq, to be log record sequence number. See zil.h 920 * Then copy the record to the log buffer. 921 */ 922 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 923 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 924 925 /* 926 * If it's a write, fetch the data or get its blkptr as appropriate. 927 */ 928 if (lrc->lrc_txtype == TX_WRITE) { 929 if (txg > spa_freeze_txg(zilog->zl_spa)) 930 txg_wait_synced(zilog->zl_dmu_pool, txg); 931 if (itx->itx_wr_state != WR_COPIED) { 932 char *dbuf; 933 int error; 934 935 /* alignment is guaranteed */ 936 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); 937 if (dlen) { 938 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 939 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; 940 lr->lr_common.lrc_reclen += dlen; 941 } else { 942 ASSERT(itx->itx_wr_state == WR_INDIRECT); 943 dbuf = NULL; 944 } 945 error = zilog->zl_get_data( 946 itx->itx_private, lr, dbuf, lwb->lwb_zio); 947 if (error == EIO) { 948 txg_wait_synced(zilog->zl_dmu_pool, txg); 949 return (lwb); 950 } 951 if (error) { 952 ASSERT(error == ENOENT || error == EEXIST || 953 error == EALREADY); 954 return (lwb); 955 } 956 } 957 } 958 959 lwb->lwb_nused += reclen + dlen; 960 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 961 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 962 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 963 964 return (lwb); 965 } 966 967 itx_t * 968 zil_itx_create(uint64_t txtype, size_t lrsize) 969 { 970 itx_t *itx; 971 972 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 973 974 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 975 itx->itx_lr.lrc_txtype = txtype; 976 itx->itx_lr.lrc_reclen = lrsize; 977 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 978 itx->itx_lr.lrc_seq = 0; /* defensive */ 979 980 return (itx); 981 } 982 983 uint64_t 984 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 985 { 986 uint64_t seq; 987 988 ASSERT(itx->itx_lr.lrc_seq == 0); 989 990 mutex_enter(&zilog->zl_lock); 991 list_insert_tail(&zilog->zl_itx_list, itx); 992 zilog->zl_itx_list_sz += itx->itx_sod; 993 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 994 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 995 mutex_exit(&zilog->zl_lock); 996 997 return (seq); 998 } 999 1000 /* 1001 * Free up all in-memory intent log transactions that have now been synced. 1002 */ 1003 static void 1004 zil_itx_clean(zilog_t *zilog) 1005 { 1006 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 1007 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 1008 list_t clean_list; 1009 itx_t *itx; 1010 1011 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1012 1013 mutex_enter(&zilog->zl_lock); 1014 /* wait for a log writer to finish walking list */ 1015 while (zilog->zl_writer) { 1016 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1017 } 1018 1019 /* 1020 * Move the sync'd log transactions to a separate list so we can call 1021 * kmem_free without holding the zl_lock. 1022 * 1023 * There is no need to set zl_writer as we don't drop zl_lock here 1024 */ 1025 while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 1026 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 1027 list_remove(&zilog->zl_itx_list, itx); 1028 zilog->zl_itx_list_sz -= itx->itx_sod; 1029 list_insert_tail(&clean_list, itx); 1030 } 1031 cv_broadcast(&zilog->zl_cv_writer); 1032 mutex_exit(&zilog->zl_lock); 1033 1034 /* destroy sync'd log transactions */ 1035 while ((itx = list_head(&clean_list)) != NULL) { 1036 list_remove(&clean_list, itx); 1037 kmem_free(itx, offsetof(itx_t, itx_lr) 1038 + itx->itx_lr.lrc_reclen); 1039 } 1040 list_destroy(&clean_list); 1041 } 1042 1043 /* 1044 * If there are any in-memory intent log transactions which have now been 1045 * synced then start up a taskq to free them. 1046 */ 1047 void 1048 zil_clean(zilog_t *zilog) 1049 { 1050 itx_t *itx; 1051 1052 mutex_enter(&zilog->zl_lock); 1053 itx = list_head(&zilog->zl_itx_list); 1054 if ((itx != NULL) && 1055 (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) { 1056 (void) taskq_dispatch(zilog->zl_clean_taskq, 1057 (task_func_t *)zil_itx_clean, zilog, TQ_NOSLEEP); 1058 } 1059 mutex_exit(&zilog->zl_lock); 1060 } 1061 1062 static void 1063 zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 1064 { 1065 uint64_t txg; 1066 uint64_t commit_seq = 0; 1067 itx_t *itx, *itx_next = (itx_t *)-1; 1068 lwb_t *lwb; 1069 spa_t *spa; 1070 1071 zilog->zl_writer = B_TRUE; 1072 ASSERT(zilog->zl_root_zio == NULL); 1073 spa = zilog->zl_spa; 1074 1075 if (zilog->zl_suspend) { 1076 lwb = NULL; 1077 } else { 1078 lwb = list_tail(&zilog->zl_lwb_list); 1079 if (lwb == NULL) { 1080 /* 1081 * Return if there's nothing to flush before we 1082 * dirty the fs by calling zil_create() 1083 */ 1084 if (list_is_empty(&zilog->zl_itx_list)) { 1085 zilog->zl_writer = B_FALSE; 1086 return; 1087 } 1088 mutex_exit(&zilog->zl_lock); 1089 zil_create(zilog); 1090 mutex_enter(&zilog->zl_lock); 1091 lwb = list_tail(&zilog->zl_lwb_list); 1092 } 1093 } 1094 1095 /* Loop through in-memory log transactions filling log blocks. */ 1096 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1097 for (;;) { 1098 /* 1099 * Find the next itx to push: 1100 * Push all transactions related to specified foid and all 1101 * other transactions except TX_WRITE, TX_TRUNCATE, 1102 * TX_SETATTR and TX_ACL for all other files. 1103 */ 1104 if (itx_next != (itx_t *)-1) 1105 itx = itx_next; 1106 else 1107 itx = list_head(&zilog->zl_itx_list); 1108 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) { 1109 if (foid == 0) /* push all foids? */ 1110 break; 1111 if (itx->itx_sync) /* push all O_[D]SYNC */ 1112 break; 1113 switch (itx->itx_lr.lrc_txtype) { 1114 case TX_SETATTR: 1115 case TX_WRITE: 1116 case TX_TRUNCATE: 1117 case TX_ACL: 1118 /* lr_foid is same offset for these records */ 1119 if (((lr_write_t *)&itx->itx_lr)->lr_foid 1120 != foid) { 1121 continue; /* skip this record */ 1122 } 1123 } 1124 break; 1125 } 1126 if (itx == NULL) 1127 break; 1128 1129 if ((itx->itx_lr.lrc_seq > seq) && 1130 ((lwb == NULL) || (lwb->lwb_nused == 0) || 1131 (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) { 1132 break; 1133 } 1134 1135 /* 1136 * Save the next pointer. Even though we soon drop 1137 * zl_lock all threads that may change the list 1138 * (another writer or zil_itx_clean) can't do so until 1139 * they have zl_writer. 1140 */ 1141 itx_next = list_next(&zilog->zl_itx_list, itx); 1142 list_remove(&zilog->zl_itx_list, itx); 1143 zilog->zl_itx_list_sz -= itx->itx_sod; 1144 mutex_exit(&zilog->zl_lock); 1145 txg = itx->itx_lr.lrc_txg; 1146 ASSERT(txg); 1147 1148 if (txg > spa_last_synced_txg(spa) || 1149 txg > spa_freeze_txg(spa)) 1150 lwb = zil_lwb_commit(zilog, itx, lwb); 1151 kmem_free(itx, offsetof(itx_t, itx_lr) 1152 + itx->itx_lr.lrc_reclen); 1153 mutex_enter(&zilog->zl_lock); 1154 } 1155 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1156 /* determine commit sequence number */ 1157 itx = list_head(&zilog->zl_itx_list); 1158 if (itx) 1159 commit_seq = itx->itx_lr.lrc_seq; 1160 else 1161 commit_seq = zilog->zl_itx_seq; 1162 mutex_exit(&zilog->zl_lock); 1163 1164 /* write the last block out */ 1165 if (lwb != NULL && lwb->lwb_zio != NULL) 1166 lwb = zil_lwb_write_start(zilog, lwb); 1167 1168 zilog->zl_prev_used = zilog->zl_cur_used; 1169 zilog->zl_cur_used = 0; 1170 1171 /* 1172 * Wait if necessary for the log blocks to be on stable storage. 1173 */ 1174 if (zilog->zl_root_zio) { 1175 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 1176 (void) zio_wait(zilog->zl_root_zio); 1177 zilog->zl_root_zio = NULL; 1178 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 1179 zil_flush_vdevs(zilog); 1180 } 1181 1182 if (zilog->zl_log_error || lwb == NULL) { 1183 zilog->zl_log_error = 0; 1184 txg_wait_synced(zilog->zl_dmu_pool, 0); 1185 } 1186 1187 mutex_enter(&zilog->zl_lock); 1188 zilog->zl_writer = B_FALSE; 1189 1190 ASSERT3U(commit_seq, >=, zilog->zl_commit_seq); 1191 zilog->zl_commit_seq = commit_seq; 1192 } 1193 1194 /* 1195 * Push zfs transactions to stable storage up to the supplied sequence number. 1196 * If foid is 0 push out all transactions, otherwise push only those 1197 * for that file or might have been used to create that file. 1198 */ 1199 void 1200 zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 1201 { 1202 if (zilog == NULL || seq == 0) 1203 return; 1204 1205 mutex_enter(&zilog->zl_lock); 1206 1207 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 1208 1209 while (zilog->zl_writer) { 1210 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1211 if (seq < zilog->zl_commit_seq) { 1212 mutex_exit(&zilog->zl_lock); 1213 return; 1214 } 1215 } 1216 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 1217 /* wake up others waiting on the commit */ 1218 cv_broadcast(&zilog->zl_cv_writer); 1219 mutex_exit(&zilog->zl_lock); 1220 } 1221 1222 /* 1223 * Called in syncing context to free committed log blocks and update log header. 1224 */ 1225 void 1226 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1227 { 1228 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1229 uint64_t txg = dmu_tx_get_txg(tx); 1230 spa_t *spa = zilog->zl_spa; 1231 lwb_t *lwb; 1232 1233 /* 1234 * We don't zero out zl_destroy_txg, so make sure we don't try 1235 * to destroy it twice. 1236 */ 1237 if (spa_sync_pass(spa) != 1) 1238 return; 1239 1240 mutex_enter(&zilog->zl_lock); 1241 1242 ASSERT(zilog->zl_stop_sync == 0); 1243 1244 zh->zh_replay_seq = zilog->zl_replayed_seq[txg & TXG_MASK]; 1245 1246 if (zilog->zl_destroy_txg == txg) { 1247 blkptr_t blk = zh->zh_log; 1248 1249 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1250 1251 bzero(zh, sizeof (zil_header_t)); 1252 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 1253 1254 if (zilog->zl_keep_first) { 1255 /* 1256 * If this block was part of log chain that couldn't 1257 * be claimed because a device was missing during 1258 * zil_claim(), but that device later returns, 1259 * then this block could erroneously appear valid. 1260 * To guard against this, assign a new GUID to the new 1261 * log chain so it doesn't matter what blk points to. 1262 */ 1263 zil_init_log_chain(zilog, &blk); 1264 zh->zh_log = blk; 1265 } 1266 } 1267 1268 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1269 zh->zh_log = lwb->lwb_blk; 1270 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1271 break; 1272 list_remove(&zilog->zl_lwb_list, lwb); 1273 zio_free_blk(spa, &lwb->lwb_blk, txg); 1274 kmem_cache_free(zil_lwb_cache, lwb); 1275 1276 /* 1277 * If we don't have anything left in the lwb list then 1278 * we've had an allocation failure and we need to zero 1279 * out the zil_header blkptr so that we don't end 1280 * up freeing the same block twice. 1281 */ 1282 if (list_head(&zilog->zl_lwb_list) == NULL) 1283 BP_ZERO(&zh->zh_log); 1284 } 1285 mutex_exit(&zilog->zl_lock); 1286 } 1287 1288 void 1289 zil_init(void) 1290 { 1291 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1292 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1293 } 1294 1295 void 1296 zil_fini(void) 1297 { 1298 kmem_cache_destroy(zil_lwb_cache); 1299 } 1300 1301 void 1302 zil_set_logbias(zilog_t *zilog, uint64_t logbias) 1303 { 1304 zilog->zl_logbias = logbias; 1305 } 1306 1307 zilog_t * 1308 zil_alloc(objset_t *os, zil_header_t *zh_phys) 1309 { 1310 zilog_t *zilog; 1311 1312 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1313 1314 zilog->zl_header = zh_phys; 1315 zilog->zl_os = os; 1316 zilog->zl_spa = dmu_objset_spa(os); 1317 zilog->zl_dmu_pool = dmu_objset_pool(os); 1318 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1319 zilog->zl_logbias = dmu_objset_logbias(os); 1320 1321 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1322 1323 list_create(&zilog->zl_itx_list, sizeof (itx_t), 1324 offsetof(itx_t, itx_node)); 1325 1326 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1327 offsetof(lwb_t, lwb_node)); 1328 1329 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 1330 1331 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 1332 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1333 1334 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 1335 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 1336 1337 return (zilog); 1338 } 1339 1340 void 1341 zil_free(zilog_t *zilog) 1342 { 1343 lwb_t *lwb; 1344 1345 zilog->zl_stop_sync = 1; 1346 1347 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1348 list_remove(&zilog->zl_lwb_list, lwb); 1349 if (lwb->lwb_buf != NULL) 1350 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1351 kmem_cache_free(zil_lwb_cache, lwb); 1352 } 1353 list_destroy(&zilog->zl_lwb_list); 1354 1355 avl_destroy(&zilog->zl_vdev_tree); 1356 mutex_destroy(&zilog->zl_vdev_lock); 1357 1358 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1359 list_destroy(&zilog->zl_itx_list); 1360 mutex_destroy(&zilog->zl_lock); 1361 1362 cv_destroy(&zilog->zl_cv_writer); 1363 cv_destroy(&zilog->zl_cv_suspend); 1364 1365 kmem_free(zilog, sizeof (zilog_t)); 1366 } 1367 1368 /* 1369 * Open an intent log. 1370 */ 1371 zilog_t * 1372 zil_open(objset_t *os, zil_get_data_t *get_data) 1373 { 1374 zilog_t *zilog = dmu_objset_zil(os); 1375 1376 zilog->zl_get_data = get_data; 1377 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1378 2, 2, TASKQ_PREPOPULATE); 1379 1380 return (zilog); 1381 } 1382 1383 /* 1384 * Close an intent log. 1385 */ 1386 void 1387 zil_close(zilog_t *zilog) 1388 { 1389 /* 1390 * If the log isn't already committed, mark the objset dirty 1391 * (so zil_sync() will be called) and wait for that txg to sync. 1392 */ 1393 if (!zil_is_committed(zilog)) { 1394 uint64_t txg; 1395 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1396 (void) dmu_tx_assign(tx, TXG_WAIT); 1397 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1398 txg = dmu_tx_get_txg(tx); 1399 dmu_tx_commit(tx); 1400 txg_wait_synced(zilog->zl_dmu_pool, txg); 1401 } 1402 1403 taskq_destroy(zilog->zl_clean_taskq); 1404 zilog->zl_clean_taskq = NULL; 1405 zilog->zl_get_data = NULL; 1406 1407 zil_itx_clean(zilog); 1408 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1409 } 1410 1411 /* 1412 * Suspend an intent log. While in suspended mode, we still honor 1413 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1414 * We suspend the log briefly when taking a snapshot so that the snapshot 1415 * contains all the data it's supposed to, and has an empty intent log. 1416 */ 1417 int 1418 zil_suspend(zilog_t *zilog) 1419 { 1420 const zil_header_t *zh = zilog->zl_header; 1421 1422 mutex_enter(&zilog->zl_lock); 1423 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1424 mutex_exit(&zilog->zl_lock); 1425 return (EBUSY); 1426 } 1427 if (zilog->zl_suspend++ != 0) { 1428 /* 1429 * Someone else already began a suspend. 1430 * Just wait for them to finish. 1431 */ 1432 while (zilog->zl_suspending) 1433 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1434 mutex_exit(&zilog->zl_lock); 1435 return (0); 1436 } 1437 zilog->zl_suspending = B_TRUE; 1438 mutex_exit(&zilog->zl_lock); 1439 1440 zil_commit(zilog, UINT64_MAX, 0); 1441 1442 /* 1443 * Wait for any in-flight log writes to complete. 1444 */ 1445 mutex_enter(&zilog->zl_lock); 1446 while (zilog->zl_writer) 1447 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1448 mutex_exit(&zilog->zl_lock); 1449 1450 zil_destroy(zilog, B_FALSE); 1451 1452 mutex_enter(&zilog->zl_lock); 1453 zilog->zl_suspending = B_FALSE; 1454 cv_broadcast(&zilog->zl_cv_suspend); 1455 mutex_exit(&zilog->zl_lock); 1456 1457 return (0); 1458 } 1459 1460 void 1461 zil_resume(zilog_t *zilog) 1462 { 1463 mutex_enter(&zilog->zl_lock); 1464 ASSERT(zilog->zl_suspend != 0); 1465 zilog->zl_suspend--; 1466 mutex_exit(&zilog->zl_lock); 1467 } 1468 1469 /* 1470 * Read in the data for the dmu_sync()ed block, and change the log 1471 * record to write this whole block. 1472 */ 1473 void 1474 zil_get_replay_data(zilog_t *zilog, lr_write_t *lr) 1475 { 1476 blkptr_t *wbp = &lr->lr_blkptr; 1477 char *wbuf = (char *)(lr + 1); /* data follows lr_write_t */ 1478 uint64_t blksz; 1479 1480 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1481 blksz = BP_GET_LSIZE(&lr->lr_blkptr); 1482 /* 1483 * If the blksz is zero then we must be replaying a log 1484 * from an version prior to setting the blksize of null blocks. 1485 * So we just zero the actual write size reqeusted. 1486 */ 1487 if (blksz == 0) { 1488 bzero(wbuf, lr->lr_length); 1489 return; 1490 } 1491 bzero(wbuf, blksz); 1492 } else { 1493 /* 1494 * A subsequent write may have overwritten this block, in which 1495 * case wbp may have been been freed and reallocated, and our 1496 * read of wbp may fail with a checksum error. We can safely 1497 * ignore this because the later write will provide the 1498 * correct data. 1499 */ 1500 zbookmark_t zb; 1501 1502 zb.zb_objset = dmu_objset_id(zilog->zl_os); 1503 zb.zb_object = lr->lr_foid; 1504 zb.zb_level = 0; 1505 zb.zb_blkid = -1; /* unknown */ 1506 1507 blksz = BP_GET_LSIZE(&lr->lr_blkptr); 1508 (void) zio_wait(zio_read(NULL, zilog->zl_spa, wbp, wbuf, blksz, 1509 NULL, NULL, ZIO_PRIORITY_SYNC_READ, 1510 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1511 } 1512 lr->lr_offset -= lr->lr_offset % blksz; 1513 lr->lr_length = blksz; 1514 } 1515 1516 typedef struct zil_replay_arg { 1517 objset_t *zr_os; 1518 zil_replay_func_t **zr_replay; 1519 void *zr_arg; 1520 boolean_t zr_byteswap; 1521 char *zr_lrbuf; 1522 } zil_replay_arg_t; 1523 1524 static void 1525 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1526 { 1527 zil_replay_arg_t *zr = zra; 1528 const zil_header_t *zh = zilog->zl_header; 1529 uint64_t reclen = lr->lrc_reclen; 1530 uint64_t txtype = lr->lrc_txtype; 1531 char *name; 1532 int pass, error; 1533 1534 if (!zilog->zl_replay) /* giving up */ 1535 return; 1536 1537 if (lr->lrc_txg < claim_txg) /* already committed */ 1538 return; 1539 1540 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1541 return; 1542 1543 /* Strip case-insensitive bit, still present in log record */ 1544 txtype &= ~TX_CI; 1545 1546 if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1547 error = EINVAL; 1548 goto bad; 1549 } 1550 1551 /* 1552 * Make a copy of the data so we can revise and extend it. 1553 */ 1554 bcopy(lr, zr->zr_lrbuf, reclen); 1555 1556 /* 1557 * The log block containing this lr may have been byteswapped 1558 * so that we can easily examine common fields like lrc_txtype. 1559 * However, the log is a mix of different data types, and only the 1560 * replay vectors know how to byteswap their records. Therefore, if 1561 * the lr was byteswapped, undo it before invoking the replay vector. 1562 */ 1563 if (zr->zr_byteswap) 1564 byteswap_uint64_array(zr->zr_lrbuf, reclen); 1565 1566 /* 1567 * We must now do two things atomically: replay this log record, 1568 * and update the log header sequence number to reflect the fact that 1569 * we did so. At the end of each replay function the sequence number 1570 * is updated if we are in replay mode. 1571 */ 1572 for (pass = 1; pass <= 2; pass++) { 1573 zilog->zl_replaying_seq = lr->lrc_seq; 1574 /* Only byteswap (if needed) on the 1st pass. */ 1575 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1576 zr->zr_byteswap && pass == 1); 1577 1578 if (!error) 1579 return; 1580 1581 /* 1582 * The DMU's dnode layer doesn't see removes until the txg 1583 * commits, so a subsequent claim can spuriously fail with 1584 * EEXIST. So if we receive any error we try syncing out 1585 * any removes then retry the transaction. 1586 */ 1587 if (pass == 1) 1588 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1589 } 1590 1591 bad: 1592 ASSERT(error); 1593 name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1594 dmu_objset_name(zr->zr_os, name); 1595 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1596 "dataset %s, seq 0x%llx, txtype %llu %s\n", 1597 error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype, 1598 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 1599 zilog->zl_replay = B_FALSE; 1600 kmem_free(name, MAXNAMELEN); 1601 } 1602 1603 /* ARGSUSED */ 1604 static void 1605 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 1606 { 1607 zilog->zl_replay_blks++; 1608 } 1609 1610 /* 1611 * If this dataset has a non-empty intent log, replay it and destroy it. 1612 */ 1613 void 1614 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 1615 { 1616 zilog_t *zilog = dmu_objset_zil(os); 1617 const zil_header_t *zh = zilog->zl_header; 1618 zil_replay_arg_t zr; 1619 1620 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 1621 zil_destroy(zilog, B_TRUE); 1622 return; 1623 } 1624 1625 zr.zr_os = os; 1626 zr.zr_replay = replay_func; 1627 zr.zr_arg = arg; 1628 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1629 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1630 1631 /* 1632 * Wait for in-progress removes to sync before starting replay. 1633 */ 1634 txg_wait_synced(zilog->zl_dmu_pool, 0); 1635 1636 zilog->zl_replay = B_TRUE; 1637 zilog->zl_replay_time = lbolt; 1638 ASSERT(zilog->zl_replay_blks == 0); 1639 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 1640 zh->zh_claim_txg); 1641 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1642 1643 zil_destroy(zilog, B_FALSE); 1644 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 1645 zilog->zl_replay = B_FALSE; 1646 } 1647 1648 /* 1649 * Report whether all transactions are committed 1650 */ 1651 int 1652 zil_is_committed(zilog_t *zilog) 1653 { 1654 lwb_t *lwb; 1655 int ret; 1656 1657 mutex_enter(&zilog->zl_lock); 1658 while (zilog->zl_writer) 1659 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1660 1661 /* recent unpushed intent log transactions? */ 1662 if (!list_is_empty(&zilog->zl_itx_list)) { 1663 ret = B_FALSE; 1664 goto out; 1665 } 1666 1667 /* intent log never used? */ 1668 lwb = list_head(&zilog->zl_lwb_list); 1669 if (lwb == NULL) { 1670 ret = B_TRUE; 1671 goto out; 1672 } 1673 1674 /* 1675 * more than 1 log buffer means zil_sync() hasn't yet freed 1676 * entries after a txg has committed 1677 */ 1678 if (list_next(&zilog->zl_lwb_list, lwb)) { 1679 ret = B_FALSE; 1680 goto out; 1681 } 1682 1683 ASSERT(zil_empty(zilog)); 1684 ret = B_TRUE; 1685 out: 1686 cv_broadcast(&zilog->zl_cv_writer); 1687 mutex_exit(&zilog->zl_lock); 1688 return (ret); 1689 } 1690 1691 /* ARGSUSED */ 1692 int 1693 zil_vdev_offline(char *osname, void *arg) 1694 { 1695 objset_t *os; 1696 zilog_t *zilog; 1697 int error; 1698 1699 error = dmu_objset_hold(osname, FTAG, &os); 1700 if (error) 1701 return (error); 1702 1703 zilog = dmu_objset_zil(os); 1704 if (zil_suspend(zilog) != 0) 1705 error = EEXIST; 1706 else 1707 zil_resume(zilog); 1708 dmu_objset_rele(os, FTAG); 1709 return (error); 1710 } 1711