1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Integros [integros.com]
25 */
26
27/* Portions Copyright 2010 Robert Milkowski */
28
29#include <sys/zfs_context.h>
30#include <sys/spa.h>
31#include <sys/dmu.h>
32#include <sys/zap.h>
33#include <sys/arc.h>
34#include <sys/stat.h>
35#include <sys/resource.h>
36#include <sys/zil.h>
37#include <sys/zil_impl.h>
38#include <sys/dsl_dataset.h>
39#include <sys/vdev_impl.h>
40#include <sys/dmu_tx.h>
41#include <sys/dsl_pool.h>
42#include <sys/abd.h>
43
44/*
45 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
46 * calls that change the file system. Each itx has enough information to
47 * be able to replay them after a system crash, power loss, or
48 * equivalent failure mode. These are stored in memory until either:
49 *
50 *   1. they are committed to the pool by the DMU transaction group
51 *      (txg), at which point they can be discarded; or
52 *   2. they are committed to the on-disk ZIL for the dataset being
53 *      modified (e.g. due to an fsync, O_DSYNC, or other synchronous
54 *      requirement).
55 *
56 * In the event of a crash or power loss, the itxs contained by each
57 * dataset's on-disk ZIL will be replayed when that dataset is first
58 * instantianted (e.g. if the dataset is a normal fileystem, when it is
59 * first mounted).
60 *
61 * As hinted at above, there is one ZIL per dataset (both the in-memory
62 * representation, and the on-disk representation). The on-disk format
63 * consists of 3 parts:
64 *
65 * 	- a single, per-dataset, ZIL header; which points to a chain of
66 * 	- zero or more ZIL blocks; each of which contains
67 * 	- zero or more ZIL records
68 *
69 * A ZIL record holds the information necessary to replay a single
70 * system call transaction. A ZIL block can hold many ZIL records, and
71 * the blocks are chained together, similarly to a singly linked list.
72 *
73 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
74 * block in the chain, and the ZIL header points to the first block in
75 * the chain.
76 *
77 * Note, there is not a fixed place in the pool to hold these ZIL
78 * blocks; they are dynamically allocated and freed as needed from the
79 * blocks available on the pool, though they can be preferentially
80 * allocated from a dedicated "log" vdev.
81 */
82
83/*
84 * This controls the amount of time that a ZIL block (lwb) will remain
85 * "open" when it isn't "full", and it has a thread waiting for it to be
86 * committed to stable storage. Please refer to the zil_commit_waiter()
87 * function (and the comments within it) for more details.
88 */
89int zfs_commit_timeout_pct = 5;
90
91/*
92 * Disable intent logging replay.  This global ZIL switch affects all pools.
93 */
94int zil_replay_disable = 0;
95
96/*
97 * Tunable parameter for debugging or performance analysis.  Setting
98 * zfs_nocacheflush will cause corruption on power loss if a volatile
99 * out-of-order write cache is enabled.
100 */
101boolean_t zfs_nocacheflush = B_FALSE;
102
103/*
104 * Limit SLOG write size per commit executed with synchronous priority.
105 * Any writes above that will be executed with lower (asynchronous) priority
106 * to limit potential SLOG device abuse by single active ZIL writer.
107 */
108uint64_t zil_slog_bulk = 768 * 1024;
109
110static kmem_cache_t *zil_lwb_cache;
111static kmem_cache_t *zil_zcw_cache;
112
113static void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
114
115#define	LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
116    sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
117
118static int
119zil_bp_compare(const void *x1, const void *x2)
120{
121	const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
122	const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
123
124	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
125		return (-1);
126	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
127		return (1);
128
129	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
130		return (-1);
131	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
132		return (1);
133
134	return (0);
135}
136
137static void
138zil_bp_tree_init(zilog_t *zilog)
139{
140	avl_create(&zilog->zl_bp_tree, zil_bp_compare,
141	    sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
142}
143
144static void
145zil_bp_tree_fini(zilog_t *zilog)
146{
147	avl_tree_t *t = &zilog->zl_bp_tree;
148	zil_bp_node_t *zn;
149	void *cookie = NULL;
150
151	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
152		kmem_free(zn, sizeof (zil_bp_node_t));
153
154	avl_destroy(t);
155}
156
157int
158zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
159{
160	avl_tree_t *t = &zilog->zl_bp_tree;
161	const dva_t *dva;
162	zil_bp_node_t *zn;
163	avl_index_t where;
164
165	if (BP_IS_EMBEDDED(bp))
166		return (0);
167
168	dva = BP_IDENTITY(bp);
169
170	if (avl_find(t, dva, &where) != NULL)
171		return (SET_ERROR(EEXIST));
172
173	zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
174	zn->zn_dva = *dva;
175	avl_insert(t, zn, where);
176
177	return (0);
178}
179
180static zil_header_t *
181zil_header_in_syncing_context(zilog_t *zilog)
182{
183	return ((zil_header_t *)zilog->zl_header);
184}
185
186static void
187zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
188{
189	zio_cksum_t *zc = &bp->blk_cksum;
190
191	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
192	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
193	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
194	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
195}
196
197/*
198 * Read a log block and make sure it's valid.
199 */
200static int
201zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
202    char **end)
203{
204	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
205	arc_flags_t aflags = ARC_FLAG_WAIT;
206	arc_buf_t *abuf = NULL;
207	zbookmark_phys_t zb;
208	int error;
209
210	if (zilog->zl_header->zh_claim_txg == 0)
211		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
212
213	if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
214		zio_flags |= ZIO_FLAG_SPECULATIVE;
215
216	SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
217	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
218
219	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
220	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
221
222	if (error == 0) {
223		zio_cksum_t cksum = bp->blk_cksum;
224
225		/*
226		 * Validate the checksummed log block.
227		 *
228		 * Sequence numbers should be... sequential.  The checksum
229		 * verifier for the next block should be bp's checksum plus 1.
230		 *
231		 * Also check the log chain linkage and size used.
232		 */
233		cksum.zc_word[ZIL_ZC_SEQ]++;
234
235		if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
236			zil_chain_t *zilc = abuf->b_data;
237			char *lr = (char *)(zilc + 1);
238			uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
239
240			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
241			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
242				error = SET_ERROR(ECKSUM);
243			} else {
244				ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
245				bcopy(lr, dst, len);
246				*end = (char *)dst + len;
247				*nbp = zilc->zc_next_blk;
248			}
249		} else {
250			char *lr = abuf->b_data;
251			uint64_t size = BP_GET_LSIZE(bp);
252			zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
253
254			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
255			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
256			    (zilc->zc_nused > (size - sizeof (*zilc)))) {
257				error = SET_ERROR(ECKSUM);
258			} else {
259				ASSERT3U(zilc->zc_nused, <=,
260				    SPA_OLD_MAXBLOCKSIZE);
261				bcopy(lr, dst, zilc->zc_nused);
262				*end = (char *)dst + zilc->zc_nused;
263				*nbp = zilc->zc_next_blk;
264			}
265		}
266
267		arc_buf_destroy(abuf, &abuf);
268	}
269
270	return (error);
271}
272
273/*
274 * Read a TX_WRITE log data block.
275 */
276static int
277zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
278{
279	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
280	const blkptr_t *bp = &lr->lr_blkptr;
281	arc_flags_t aflags = ARC_FLAG_WAIT;
282	arc_buf_t *abuf = NULL;
283	zbookmark_phys_t zb;
284	int error;
285
286	if (BP_IS_HOLE(bp)) {
287		if (wbuf != NULL)
288			bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
289		return (0);
290	}
291
292	if (zilog->zl_header->zh_claim_txg == 0)
293		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
294
295	SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
296	    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
297
298	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
299	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
300
301	if (error == 0) {
302		if (wbuf != NULL)
303			bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
304		arc_buf_destroy(abuf, &abuf);
305	}
306
307	return (error);
308}
309
310/*
311 * Parse the intent log, and call parse_func for each valid record within.
312 */
313int
314zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
315    zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
316{
317	const zil_header_t *zh = zilog->zl_header;
318	boolean_t claimed = !!zh->zh_claim_txg;
319	uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
320	uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
321	uint64_t max_blk_seq = 0;
322	uint64_t max_lr_seq = 0;
323	uint64_t blk_count = 0;
324	uint64_t lr_count = 0;
325	blkptr_t blk, next_blk;
326	char *lrbuf, *lrp;
327	int error = 0;
328
329	/*
330	 * Old logs didn't record the maximum zh_claim_lr_seq.
331	 */
332	if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
333		claim_lr_seq = UINT64_MAX;
334
335	/*
336	 * Starting at the block pointed to by zh_log we read the log chain.
337	 * For each block in the chain we strongly check that block to
338	 * ensure its validity.  We stop when an invalid block is found.
339	 * For each block pointer in the chain we call parse_blk_func().
340	 * For each record in each valid block we call parse_lr_func().
341	 * If the log has been claimed, stop if we encounter a sequence
342	 * number greater than the highest claimed sequence number.
343	 */
344	lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
345	zil_bp_tree_init(zilog);
346
347	for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
348		uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
349		int reclen;
350		char *end;
351
352		if (blk_seq > claim_blk_seq)
353			break;
354		if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
355			break;
356		ASSERT3U(max_blk_seq, <, blk_seq);
357		max_blk_seq = blk_seq;
358		blk_count++;
359
360		if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
361			break;
362
363		error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
364		if (error != 0)
365			break;
366
367		for (lrp = lrbuf; lrp < end; lrp += reclen) {
368			lr_t *lr = (lr_t *)lrp;
369			reclen = lr->lrc_reclen;
370			ASSERT3U(reclen, >=, sizeof (lr_t));
371			if (lr->lrc_seq > claim_lr_seq)
372				goto done;
373			if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
374				goto done;
375			ASSERT3U(max_lr_seq, <, lr->lrc_seq);
376			max_lr_seq = lr->lrc_seq;
377			lr_count++;
378		}
379	}
380done:
381	zilog->zl_parse_error = error;
382	zilog->zl_parse_blk_seq = max_blk_seq;
383	zilog->zl_parse_lr_seq = max_lr_seq;
384	zilog->zl_parse_blk_count = blk_count;
385	zilog->zl_parse_lr_count = lr_count;
386
387	ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
388	    (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
389
390	zil_bp_tree_fini(zilog);
391	zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
392
393	return (error);
394}
395
396static int
397zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
398{
399	/*
400	 * Claim log block if not already committed and not already claimed.
401	 * If tx == NULL, just verify that the block is claimable.
402	 */
403	if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
404	    zil_bp_tree_add(zilog, bp) != 0)
405		return (0);
406
407	return (zio_wait(zio_claim(NULL, zilog->zl_spa,
408	    tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
409	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
410}
411
412static int
413zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
414{
415	lr_write_t *lr = (lr_write_t *)lrc;
416	int error;
417
418	if (lrc->lrc_txtype != TX_WRITE)
419		return (0);
420
421	/*
422	 * If the block is not readable, don't claim it.  This can happen
423	 * in normal operation when a log block is written to disk before
424	 * some of the dmu_sync() blocks it points to.  In this case, the
425	 * transaction cannot have been committed to anyone (we would have
426	 * waited for all writes to be stable first), so it is semantically
427	 * correct to declare this the end of the log.
428	 */
429	if (lr->lr_blkptr.blk_birth >= first_txg &&
430	    (error = zil_read_log_data(zilog, lr, NULL)) != 0)
431		return (error);
432	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
433}
434
435/* ARGSUSED */
436static int
437zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
438{
439	zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
440
441	return (0);
442}
443
444static int
445zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
446{
447	lr_write_t *lr = (lr_write_t *)lrc;
448	blkptr_t *bp = &lr->lr_blkptr;
449
450	/*
451	 * If we previously claimed it, we need to free it.
452	 */
453	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
454	    bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
455	    !BP_IS_HOLE(bp))
456		zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
457
458	return (0);
459}
460
461static int
462zil_lwb_vdev_compare(const void *x1, const void *x2)
463{
464	const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
465	const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
466
467	if (v1 < v2)
468		return (-1);
469	if (v1 > v2)
470		return (1);
471
472	return (0);
473}
474
475static lwb_t *
476zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg)
477{
478	lwb_t *lwb;
479
480	lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
481	lwb->lwb_zilog = zilog;
482	lwb->lwb_blk = *bp;
483	lwb->lwb_slog = slog;
484	lwb->lwb_state = LWB_STATE_CLOSED;
485	lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
486	lwb->lwb_max_txg = txg;
487	lwb->lwb_write_zio = NULL;
488	lwb->lwb_root_zio = NULL;
489	lwb->lwb_tx = NULL;
490	lwb->lwb_issued_timestamp = 0;
491	if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
492		lwb->lwb_nused = sizeof (zil_chain_t);
493		lwb->lwb_sz = BP_GET_LSIZE(bp);
494	} else {
495		lwb->lwb_nused = 0;
496		lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
497	}
498
499	mutex_enter(&zilog->zl_lock);
500	list_insert_tail(&zilog->zl_lwb_list, lwb);
501	mutex_exit(&zilog->zl_lock);
502
503	ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
504	ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
505	ASSERT(list_is_empty(&lwb->lwb_waiters));
506
507	return (lwb);
508}
509
510static void
511zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
512{
513	ASSERT(MUTEX_HELD(&zilog->zl_lock));
514	ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
515	ASSERT(list_is_empty(&lwb->lwb_waiters));
516
517	if (lwb->lwb_state == LWB_STATE_OPENED) {
518		avl_tree_t *t = &lwb->lwb_vdev_tree;
519		void *cookie = NULL;
520		zil_vdev_node_t *zv;
521
522		while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
523			kmem_free(zv, sizeof (*zv));
524
525		ASSERT3P(lwb->lwb_root_zio, !=, NULL);
526		ASSERT3P(lwb->lwb_write_zio, !=, NULL);
527
528		zio_cancel(lwb->lwb_root_zio);
529		zio_cancel(lwb->lwb_write_zio);
530
531		lwb->lwb_root_zio = NULL;
532		lwb->lwb_write_zio = NULL;
533	} else {
534		ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
535	}
536
537	ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
538	ASSERT3P(lwb->lwb_write_zio, ==, NULL);
539	ASSERT3P(lwb->lwb_root_zio, ==, NULL);
540
541	/*
542	 * Clear the zilog's field to indicate this lwb is no longer
543	 * valid, and prevent use-after-free errors.
544	 */
545	if (zilog->zl_last_lwb_opened == lwb)
546		zilog->zl_last_lwb_opened = NULL;
547
548	kmem_cache_free(zil_lwb_cache, lwb);
549}
550
551/*
552 * Called when we create in-memory log transactions so that we know
553 * to cleanup the itxs at the end of spa_sync().
554 */
555void
556zilog_dirty(zilog_t *zilog, uint64_t txg)
557{
558	dsl_pool_t *dp = zilog->zl_dmu_pool;
559	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
560
561	ASSERT(spa_writeable(zilog->zl_spa));
562
563	if (ds->ds_is_snapshot)
564		panic("dirtying snapshot!");
565
566	if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
567		/* up the hold count until we can be written out */
568		dmu_buf_add_ref(ds->ds_dbuf, zilog);
569
570		zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
571	}
572}
573
574/*
575 * Determine if the zil is dirty in the specified txg. Callers wanting to
576 * ensure that the dirty state does not change must hold the itxg_lock for
577 * the specified txg. Holding the lock will ensure that the zil cannot be
578 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
579 * state.
580 */
581boolean_t
582zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
583{
584	dsl_pool_t *dp = zilog->zl_dmu_pool;
585
586	if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
587		return (B_TRUE);
588	return (B_FALSE);
589}
590
591/*
592 * Determine if the zil is dirty. The zil is considered dirty if it has
593 * any pending itx records that have not been cleaned by zil_clean().
594 */
595boolean_t
596zilog_is_dirty(zilog_t *zilog)
597{
598	dsl_pool_t *dp = zilog->zl_dmu_pool;
599
600	for (int t = 0; t < TXG_SIZE; t++) {
601		if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
602			return (B_TRUE);
603	}
604	return (B_FALSE);
605}
606
607/*
608 * Create an on-disk intent log.
609 */
610static lwb_t *
611zil_create(zilog_t *zilog)
612{
613	const zil_header_t *zh = zilog->zl_header;
614	lwb_t *lwb = NULL;
615	uint64_t txg = 0;
616	dmu_tx_t *tx = NULL;
617	blkptr_t blk;
618	int error = 0;
619	boolean_t slog = FALSE;
620
621	/*
622	 * Wait for any previous destroy to complete.
623	 */
624	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
625
626	ASSERT(zh->zh_claim_txg == 0);
627	ASSERT(zh->zh_replay_seq == 0);
628
629	blk = zh->zh_log;
630
631	/*
632	 * Allocate an initial log block if:
633	 *    - there isn't one already
634	 *    - the existing block is the wrong endianess
635	 */
636	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
637		tx = dmu_tx_create(zilog->zl_os);
638		VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
639		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
640		txg = dmu_tx_get_txg(tx);
641
642		if (!BP_IS_HOLE(&blk)) {
643			zio_free_zil(zilog->zl_spa, txg, &blk);
644			BP_ZERO(&blk);
645		}
646
647		error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
648		    ZIL_MIN_BLKSZ, &slog);
649
650		if (error == 0)
651			zil_init_log_chain(zilog, &blk);
652	}
653
654	/*
655	 * Allocate a log write block (lwb) for the first log block.
656	 */
657	if (error == 0)
658		lwb = zil_alloc_lwb(zilog, &blk, slog, txg);
659
660	/*
661	 * If we just allocated the first log block, commit our transaction
662	 * and wait for zil_sync() to stuff the block poiner into zh_log.
663	 * (zh is part of the MOS, so we cannot modify it in open context.)
664	 */
665	if (tx != NULL) {
666		dmu_tx_commit(tx);
667		txg_wait_synced(zilog->zl_dmu_pool, txg);
668	}
669
670	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
671
672	return (lwb);
673}
674
675/*
676 * In one tx, free all log blocks and clear the log header. If keep_first
677 * is set, then we're replaying a log with no content. We want to keep the
678 * first block, however, so that the first synchronous transaction doesn't
679 * require a txg_wait_synced() in zil_create(). We don't need to
680 * txg_wait_synced() here either when keep_first is set, because both
681 * zil_create() and zil_destroy() will wait for any in-progress destroys
682 * to complete.
683 */
684void
685zil_destroy(zilog_t *zilog, boolean_t keep_first)
686{
687	const zil_header_t *zh = zilog->zl_header;
688	lwb_t *lwb;
689	dmu_tx_t *tx;
690	uint64_t txg;
691
692	/*
693	 * Wait for any previous destroy to complete.
694	 */
695	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
696
697	zilog->zl_old_header = *zh;		/* debugging aid */
698
699	if (BP_IS_HOLE(&zh->zh_log))
700		return;
701
702	tx = dmu_tx_create(zilog->zl_os);
703	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
704	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
705	txg = dmu_tx_get_txg(tx);
706
707	mutex_enter(&zilog->zl_lock);
708
709	ASSERT3U(zilog->zl_destroy_txg, <, txg);
710	zilog->zl_destroy_txg = txg;
711	zilog->zl_keep_first = keep_first;
712
713	if (!list_is_empty(&zilog->zl_lwb_list)) {
714		ASSERT(zh->zh_claim_txg == 0);
715		VERIFY(!keep_first);
716		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
717			list_remove(&zilog->zl_lwb_list, lwb);
718			if (lwb->lwb_buf != NULL)
719				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
720			zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
721			zil_free_lwb(zilog, lwb);
722		}
723	} else if (!keep_first) {
724		zil_destroy_sync(zilog, tx);
725	}
726	mutex_exit(&zilog->zl_lock);
727
728	dmu_tx_commit(tx);
729}
730
731void
732zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
733{
734	ASSERT(list_is_empty(&zilog->zl_lwb_list));
735	(void) zil_parse(zilog, zil_free_log_block,
736	    zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
737}
738
739int
740zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
741{
742	dmu_tx_t *tx = txarg;
743	uint64_t first_txg = dmu_tx_get_txg(tx);
744	zilog_t *zilog;
745	zil_header_t *zh;
746	objset_t *os;
747	int error;
748
749	error = dmu_objset_own_obj(dp, ds->ds_object,
750	    DMU_OST_ANY, B_FALSE, FTAG, &os);
751	if (error != 0) {
752		/*
753		 * EBUSY indicates that the objset is inconsistent, in which
754		 * case it can not have a ZIL.
755		 */
756		if (error != EBUSY) {
757			cmn_err(CE_WARN, "can't open objset for %llu, error %u",
758			    (unsigned long long)ds->ds_object, error);
759		}
760		return (0);
761	}
762
763	zilog = dmu_objset_zil(os);
764	zh = zil_header_in_syncing_context(zilog);
765
766	if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
767		if (!BP_IS_HOLE(&zh->zh_log))
768			zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
769		BP_ZERO(&zh->zh_log);
770		dsl_dataset_dirty(dmu_objset_ds(os), tx);
771		dmu_objset_disown(os, FTAG);
772		return (0);
773	}
774
775	/*
776	 * Claim all log blocks if we haven't already done so, and remember
777	 * the highest claimed sequence number.  This ensures that if we can
778	 * read only part of the log now (e.g. due to a missing device),
779	 * but we can read the entire log later, we will not try to replay
780	 * or destroy beyond the last block we successfully claimed.
781	 */
782	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
783	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
784		(void) zil_parse(zilog, zil_claim_log_block,
785		    zil_claim_log_record, tx, first_txg);
786		zh->zh_claim_txg = first_txg;
787		zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
788		zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
789		if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
790			zh->zh_flags |= ZIL_REPLAY_NEEDED;
791		zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
792		dsl_dataset_dirty(dmu_objset_ds(os), tx);
793	}
794
795	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
796	dmu_objset_disown(os, FTAG);
797	return (0);
798}
799
800/*
801 * Check the log by walking the log chain.
802 * Checksum errors are ok as they indicate the end of the chain.
803 * Any other error (no device or read failure) returns an error.
804 */
805/* ARGSUSED */
806int
807zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
808{
809	zilog_t *zilog;
810	objset_t *os;
811	blkptr_t *bp;
812	int error;
813
814	ASSERT(tx == NULL);
815
816	error = dmu_objset_from_ds(ds, &os);
817	if (error != 0) {
818		cmn_err(CE_WARN, "can't open objset %llu, error %d",
819		    (unsigned long long)ds->ds_object, error);
820		return (0);
821	}
822
823	zilog = dmu_objset_zil(os);
824	bp = (blkptr_t *)&zilog->zl_header->zh_log;
825
826	/*
827	 * Check the first block and determine if it's on a log device
828	 * which may have been removed or faulted prior to loading this
829	 * pool.  If so, there's no point in checking the rest of the log
830	 * as its content should have already been synced to the pool.
831	 */
832	if (!BP_IS_HOLE(bp)) {
833		vdev_t *vd;
834		boolean_t valid = B_TRUE;
835
836		spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
837		vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
838		if (vd->vdev_islog && vdev_is_dead(vd))
839			valid = vdev_log_state_valid(vd);
840		spa_config_exit(os->os_spa, SCL_STATE, FTAG);
841
842		if (!valid)
843			return (0);
844	}
845
846	/*
847	 * Because tx == NULL, zil_claim_log_block() will not actually claim
848	 * any blocks, but just determine whether it is possible to do so.
849	 * In addition to checking the log chain, zil_claim_log_block()
850	 * will invoke zio_claim() with a done func of spa_claim_notify(),
851	 * which will update spa_max_claim_txg.  See spa_load() for details.
852	 */
853	error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
854	    zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
855
856	return ((error == ECKSUM || error == ENOENT) ? 0 : error);
857}
858
859/*
860 * When an itx is "skipped", this function is used to properly mark the
861 * waiter as "done, and signal any thread(s) waiting on it. An itx can
862 * be skipped (and not committed to an lwb) for a variety of reasons,
863 * one of them being that the itx was committed via spa_sync(), prior to
864 * it being committed to an lwb; this can happen if a thread calling
865 * zil_commit() is racing with spa_sync().
866 */
867static void
868zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
869{
870	mutex_enter(&zcw->zcw_lock);
871	ASSERT3B(zcw->zcw_done, ==, B_FALSE);
872	zcw->zcw_done = B_TRUE;
873	cv_broadcast(&zcw->zcw_cv);
874	mutex_exit(&zcw->zcw_lock);
875}
876
877/*
878 * This function is used when the given waiter is to be linked into an
879 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
880 * At this point, the waiter will no longer be referenced by the itx,
881 * and instead, will be referenced by the lwb.
882 */
883static void
884zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
885{
886	mutex_enter(&zcw->zcw_lock);
887	ASSERT(!list_link_active(&zcw->zcw_node));
888	ASSERT3P(zcw->zcw_lwb, ==, NULL);
889	ASSERT3P(lwb, !=, NULL);
890	ASSERT(lwb->lwb_state == LWB_STATE_OPENED ||
891	    lwb->lwb_state == LWB_STATE_ISSUED);
892
893	list_insert_tail(&lwb->lwb_waiters, zcw);
894	zcw->zcw_lwb = lwb;
895	mutex_exit(&zcw->zcw_lock);
896}
897
898/*
899 * This function is used when zio_alloc_zil() fails to allocate a ZIL
900 * block, and the given waiter must be linked to the "nolwb waiters"
901 * list inside of zil_process_commit_list().
902 */
903static void
904zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
905{
906	mutex_enter(&zcw->zcw_lock);
907	ASSERT(!list_link_active(&zcw->zcw_node));
908	ASSERT3P(zcw->zcw_lwb, ==, NULL);
909	list_insert_tail(nolwb, zcw);
910	mutex_exit(&zcw->zcw_lock);
911}
912
913void
914zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
915{
916	avl_tree_t *t = &lwb->lwb_vdev_tree;
917	avl_index_t where;
918	zil_vdev_node_t *zv, zvsearch;
919	int ndvas = BP_GET_NDVAS(bp);
920	int i;
921
922	if (zfs_nocacheflush)
923		return;
924
925	mutex_enter(&lwb->lwb_vdev_lock);
926	for (i = 0; i < ndvas; i++) {
927		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
928		if (avl_find(t, &zvsearch, &where) == NULL) {
929			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
930			zv->zv_vdev = zvsearch.zv_vdev;
931			avl_insert(t, zv, where);
932		}
933	}
934	mutex_exit(&lwb->lwb_vdev_lock);
935}
936
937void
938zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
939{
940	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
941}
942
943/*
944 * This function is a called after all VDEVs associated with a given lwb
945 * write have completed their DKIOCFLUSHWRITECACHE command; or as soon
946 * as the lwb write completes, if "zfs_nocacheflush" is set.
947 *
948 * The intention is for this function to be called as soon as the
949 * contents of an lwb are considered "stable" on disk, and will survive
950 * any sudden loss of power. At this point, any threads waiting for the
951 * lwb to reach this state are signalled, and the "waiter" structures
952 * are marked "done".
953 */
954static void
955zil_lwb_flush_vdevs_done(zio_t *zio)
956{
957	lwb_t *lwb = zio->io_private;
958	zilog_t *zilog = lwb->lwb_zilog;
959	dmu_tx_t *tx = lwb->lwb_tx;
960	zil_commit_waiter_t *zcw;
961
962	spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
963
964	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
965
966	mutex_enter(&zilog->zl_lock);
967
968	/*
969	 * Ensure the lwb buffer pointer is cleared before releasing the
970	 * txg. If we have had an allocation failure and the txg is
971	 * waiting to sync then we want zil_sync() to remove the lwb so
972	 * that it's not picked up as the next new one in
973	 * zil_process_commit_list(). zil_sync() will only remove the
974	 * lwb if lwb_buf is null.
975	 */
976	lwb->lwb_buf = NULL;
977	lwb->lwb_tx = NULL;
978
979	ASSERT3U(lwb->lwb_issued_timestamp, >, 0);
980	zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp;
981
982	lwb->lwb_root_zio = NULL;
983	lwb->lwb_state = LWB_STATE_DONE;
984
985	if (zilog->zl_last_lwb_opened == lwb) {
986		/*
987		 * Remember the highest committed log sequence number
988		 * for ztest. We only update this value when all the log
989		 * writes succeeded, because ztest wants to ASSERT that
990		 * it got the whole log chain.
991		 */
992		zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
993	}
994
995	while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) {
996		mutex_enter(&zcw->zcw_lock);
997
998		ASSERT(list_link_active(&zcw->zcw_node));
999		list_remove(&lwb->lwb_waiters, zcw);
1000
1001		ASSERT3P(zcw->zcw_lwb, ==, lwb);
1002		zcw->zcw_lwb = NULL;
1003
1004		zcw->zcw_zio_error = zio->io_error;
1005
1006		ASSERT3B(zcw->zcw_done, ==, B_FALSE);
1007		zcw->zcw_done = B_TRUE;
1008		cv_broadcast(&zcw->zcw_cv);
1009
1010		mutex_exit(&zcw->zcw_lock);
1011	}
1012
1013	mutex_exit(&zilog->zl_lock);
1014
1015	/*
1016	 * Now that we've written this log block, we have a stable pointer
1017	 * to the next block in the chain, so it's OK to let the txg in
1018	 * which we allocated the next block sync.
1019	 */
1020	dmu_tx_commit(tx);
1021}
1022
1023/*
1024 * This is called when an lwb write completes. This means, this specific
1025 * lwb was written to disk, and all dependent lwb have also been
1026 * written to disk.
1027 *
1028 * At this point, a DKIOCFLUSHWRITECACHE command hasn't been issued to
1029 * the VDEVs involved in writing out this specific lwb. The lwb will be
1030 * "done" once zil_lwb_flush_vdevs_done() is called, which occurs in the
1031 * zio completion callback for the lwb's root zio.
1032 */
1033static void
1034zil_lwb_write_done(zio_t *zio)
1035{
1036	lwb_t *lwb = zio->io_private;
1037	spa_t *spa = zio->io_spa;
1038	zilog_t *zilog = lwb->lwb_zilog;
1039	avl_tree_t *t = &lwb->lwb_vdev_tree;
1040	void *cookie = NULL;
1041	zil_vdev_node_t *zv;
1042
1043	ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
1044
1045	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1046	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
1047	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
1048	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
1049	ASSERT(!BP_IS_GANG(zio->io_bp));
1050	ASSERT(!BP_IS_HOLE(zio->io_bp));
1051	ASSERT(BP_GET_FILL(zio->io_bp) == 0);
1052
1053	abd_put(zio->io_abd);
1054
1055	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
1056
1057	mutex_enter(&zilog->zl_lock);
1058	lwb->lwb_write_zio = NULL;
1059	mutex_exit(&zilog->zl_lock);
1060
1061	if (avl_numnodes(t) == 0)
1062		return;
1063
1064	/*
1065	 * If there was an IO error, we're not going to call zio_flush()
1066	 * on these vdevs, so we simply empty the tree and free the
1067	 * nodes. We avoid calling zio_flush() since there isn't any
1068	 * good reason for doing so, after the lwb block failed to be
1069	 * written out.
1070	 */
1071	if (zio->io_error != 0) {
1072		while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
1073			kmem_free(zv, sizeof (*zv));
1074		return;
1075	}
1076
1077	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
1078		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
1079		if (vd != NULL)
1080			zio_flush(lwb->lwb_root_zio, vd);
1081		kmem_free(zv, sizeof (*zv));
1082	}
1083}
1084
1085/*
1086 * This function's purpose is to "open" an lwb such that it is ready to
1087 * accept new itxs being committed to it. To do this, the lwb's zio
1088 * structures are created, and linked to the lwb. This function is
1089 * idempotent; if the passed in lwb has already been opened, this
1090 * function is essentially a no-op.
1091 */
1092static void
1093zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
1094{
1095	zbookmark_phys_t zb;
1096	zio_priority_t prio;
1097
1098	ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1099	ASSERT3P(lwb, !=, NULL);
1100	EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
1101	EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
1102
1103	SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1104	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
1105	    lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
1106
1107	if (lwb->lwb_root_zio == NULL) {
1108		abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
1109		    BP_GET_LSIZE(&lwb->lwb_blk));
1110
1111		if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
1112			prio = ZIO_PRIORITY_SYNC_WRITE;
1113		else
1114			prio = ZIO_PRIORITY_ASYNC_WRITE;
1115
1116		lwb->lwb_root_zio = zio_root(zilog->zl_spa,
1117		    zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
1118		ASSERT3P(lwb->lwb_root_zio, !=, NULL);
1119
1120		lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio,
1121		    zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd,
1122		    BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb,
1123		    prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
1124		ASSERT3P(lwb->lwb_write_zio, !=, NULL);
1125
1126		lwb->lwb_state = LWB_STATE_OPENED;
1127
1128		mutex_enter(&zilog->zl_lock);
1129
1130		/*
1131		 * The zilog's "zl_last_lwb_opened" field is used to
1132		 * build the lwb/zio dependency chain, which is used to
1133		 * preserve the ordering of lwb completions that is
1134		 * required by the semantics of the ZIL. Each new lwb
1135		 * zio becomes a parent of the "previous" lwb zio, such
1136		 * that the new lwb's zio cannot complete until the
1137		 * "previous" lwb's zio completes.
1138		 *
1139		 * This is required by the semantics of zil_commit();
1140		 * the commit waiters attached to the lwbs will be woken
1141		 * in the lwb zio's completion callback, so this zio
1142		 * dependency graph ensures the waiters are woken in the
1143		 * correct order (the same order the lwbs were created).
1144		 */
1145		lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened;
1146		if (last_lwb_opened != NULL &&
1147		    last_lwb_opened->lwb_state != LWB_STATE_DONE) {
1148			ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
1149			    last_lwb_opened->lwb_state == LWB_STATE_ISSUED);
1150			ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL);
1151			zio_add_child(lwb->lwb_root_zio,
1152			    last_lwb_opened->lwb_root_zio);
1153		}
1154		zilog->zl_last_lwb_opened = lwb;
1155
1156		mutex_exit(&zilog->zl_lock);
1157	}
1158
1159	ASSERT3P(lwb->lwb_root_zio, !=, NULL);
1160	ASSERT3P(lwb->lwb_write_zio, !=, NULL);
1161	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
1162}
1163
1164/*
1165 * Define a limited set of intent log block sizes.
1166 *
1167 * These must be a multiple of 4KB. Note only the amount used (again
1168 * aligned to 4KB) actually gets written. However, we can't always just
1169 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
1170 */
1171uint64_t zil_block_buckets[] = {
1172    4096,		/* non TX_WRITE */
1173    8192+4096,		/* data base */
1174    32*1024 + 4096, 	/* NFS writes */
1175    UINT64_MAX
1176};
1177
1178/*
1179 * Start a log block write and advance to the next log block.
1180 * Calls are serialized.
1181 */
1182static lwb_t *
1183zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
1184{
1185	lwb_t *nlwb = NULL;
1186	zil_chain_t *zilc;
1187	spa_t *spa = zilog->zl_spa;
1188	blkptr_t *bp;
1189	dmu_tx_t *tx;
1190	uint64_t txg;
1191	uint64_t zil_blksz, wsz;
1192	int i, error;
1193	boolean_t slog;
1194
1195	ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1196	ASSERT3P(lwb->lwb_root_zio, !=, NULL);
1197	ASSERT3P(lwb->lwb_write_zio, !=, NULL);
1198	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
1199
1200	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1201		zilc = (zil_chain_t *)lwb->lwb_buf;
1202		bp = &zilc->zc_next_blk;
1203	} else {
1204		zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
1205		bp = &zilc->zc_next_blk;
1206	}
1207
1208	ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
1209
1210	/*
1211	 * Allocate the next block and save its address in this block
1212	 * before writing it in order to establish the log chain.
1213	 * Note that if the allocation of nlwb synced before we wrote
1214	 * the block that points at it (lwb), we'd leak it if we crashed.
1215	 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
1216	 * We dirty the dataset to ensure that zil_sync() will be called
1217	 * to clean up in the event of allocation failure or I/O failure.
1218	 */
1219
1220	tx = dmu_tx_create(zilog->zl_os);
1221
1222	/*
1223	 * Since we are not going to create any new dirty data and we can even
1224	 * help with clearing the existing dirty data, we should not be subject
1225	 * to the dirty data based delays.
1226	 * We (ab)use TXG_WAITED to bypass the delay mechanism.
1227	 * One side effect from using TXG_WAITED is that dmu_tx_assign() can
1228	 * fail if the pool is suspended.  Those are dramatic circumstances,
1229	 * so we return NULL to signal that the normal ZIL processing is not
1230	 * possible and txg_wait_synced() should be used to ensure that the data
1231	 * is on disk.
1232	 */
1233	error = dmu_tx_assign(tx, TXG_WAITED);
1234	if (error != 0) {
1235		ASSERT3S(error, ==, EIO);
1236		dmu_tx_abort(tx);
1237		return (NULL);
1238	}
1239	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1240	txg = dmu_tx_get_txg(tx);
1241
1242	lwb->lwb_tx = tx;
1243
1244	/*
1245	 * Log blocks are pre-allocated. Here we select the size of the next
1246	 * block, based on size used in the last block.
1247	 * - first find the smallest bucket that will fit the block from a
1248	 *   limited set of block sizes. This is because it's faster to write
1249	 *   blocks allocated from the same metaslab as they are adjacent or
1250	 *   close.
1251	 * - next find the maximum from the new suggested size and an array of
1252	 *   previous sizes. This lessens a picket fence effect of wrongly
1253	 *   guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
1254	 *   requests.
1255	 *
1256	 * Note we only write what is used, but we can't just allocate
1257	 * the maximum block size because we can exhaust the available
1258	 * pool log space.
1259	 */
1260	zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
1261	for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
1262		continue;
1263	zil_blksz = zil_block_buckets[i];
1264	if (zil_blksz == UINT64_MAX)
1265		zil_blksz = SPA_OLD_MAXBLOCKSIZE;
1266	zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
1267	for (i = 0; i < ZIL_PREV_BLKS; i++)
1268		zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
1269	zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
1270
1271	BP_ZERO(bp);
1272
1273	/* pass the old blkptr in order to spread log blocks across devs */
1274	error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, &slog);
1275	if (error == 0) {
1276		ASSERT3U(bp->blk_birth, ==, txg);
1277		bp->blk_cksum = lwb->lwb_blk.blk_cksum;
1278		bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
1279
1280		/*
1281		 * Allocate a new log write block (lwb).
1282		 */
1283		nlwb = zil_alloc_lwb(zilog, bp, slog, txg);
1284	}
1285
1286	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1287		/* For Slim ZIL only write what is used. */
1288		wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
1289		ASSERT3U(wsz, <=, lwb->lwb_sz);
1290		zio_shrink(lwb->lwb_write_zio, wsz);
1291
1292	} else {
1293		wsz = lwb->lwb_sz;
1294	}
1295
1296	zilc->zc_pad = 0;
1297	zilc->zc_nused = lwb->lwb_nused;
1298	zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
1299
1300	/*
1301	 * clear unused data for security
1302	 */
1303	bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
1304
1305	spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER);
1306
1307	zil_lwb_add_block(lwb, &lwb->lwb_blk);
1308	lwb->lwb_issued_timestamp = gethrtime();
1309	lwb->lwb_state = LWB_STATE_ISSUED;
1310
1311	zio_nowait(lwb->lwb_root_zio);
1312	zio_nowait(lwb->lwb_write_zio);
1313
1314	/*
1315	 * If there was an allocation failure then nlwb will be null which
1316	 * forces a txg_wait_synced().
1317	 */
1318	return (nlwb);
1319}
1320
1321static lwb_t *
1322zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1323{
1324	lr_t *lrcb, *lrc;
1325	lr_write_t *lrwb, *lrw;
1326	char *lr_buf;
1327	uint64_t dlen, dnow, lwb_sp, reclen, txg;
1328
1329	ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1330	ASSERT3P(lwb, !=, NULL);
1331	ASSERT3P(lwb->lwb_buf, !=, NULL);
1332
1333	zil_lwb_write_open(zilog, lwb);
1334
1335	lrc = &itx->itx_lr;
1336	lrw = (lr_write_t *)lrc;
1337
1338	/*
1339	 * A commit itx doesn't represent any on-disk state; instead
1340	 * it's simply used as a place holder on the commit list, and
1341	 * provides a mechanism for attaching a "commit waiter" onto the
1342	 * correct lwb (such that the waiter can be signalled upon
1343	 * completion of that lwb). Thus, we don't process this itx's
1344	 * log record if it's a commit itx (these itx's don't have log
1345	 * records), and instead link the itx's waiter onto the lwb's
1346	 * list of waiters.
1347	 *
1348	 * For more details, see the comment above zil_commit().
1349	 */
1350	if (lrc->lrc_txtype == TX_COMMIT) {
1351		zil_commit_waiter_link_lwb(itx->itx_private, lwb);
1352		itx->itx_private = NULL;
1353		return (lwb);
1354	}
1355
1356	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
1357		dlen = P2ROUNDUP_TYPED(
1358		    lrw->lr_length, sizeof (uint64_t), uint64_t);
1359	} else {
1360		dlen = 0;
1361	}
1362	reclen = lrc->lrc_reclen;
1363	zilog->zl_cur_used += (reclen + dlen);
1364	txg = lrc->lrc_txg;
1365
1366	ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen));
1367
1368cont:
1369	/*
1370	 * If this record won't fit in the current log block, start a new one.
1371	 * For WR_NEED_COPY optimize layout for minimal number of chunks.
1372	 */
1373	lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
1374	if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
1375	    lwb_sp < ZIL_MAX_WASTE_SPACE && (dlen % ZIL_MAX_LOG_DATA == 0 ||
1376	    lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) {
1377		lwb = zil_lwb_write_issue(zilog, lwb);
1378		if (lwb == NULL)
1379			return (NULL);
1380		zil_lwb_write_open(zilog, lwb);
1381		ASSERT(LWB_EMPTY(lwb));
1382		lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
1383		ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
1384	}
1385
1386	dnow = MIN(dlen, lwb_sp - reclen);
1387	lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1388	bcopy(lrc, lr_buf, reclen);
1389	lrcb = (lr_t *)lr_buf;		/* Like lrc, but inside lwb. */
1390	lrwb = (lr_write_t *)lrcb;	/* Like lrw, but inside lwb. */
1391
1392	/*
1393	 * If it's a write, fetch the data or get its blkptr as appropriate.
1394	 */
1395	if (lrc->lrc_txtype == TX_WRITE) {
1396		if (txg > spa_freeze_txg(zilog->zl_spa))
1397			txg_wait_synced(zilog->zl_dmu_pool, txg);
1398		if (itx->itx_wr_state != WR_COPIED) {
1399			char *dbuf;
1400			int error;
1401
1402			if (itx->itx_wr_state == WR_NEED_COPY) {
1403				dbuf = lr_buf + reclen;
1404				lrcb->lrc_reclen += dnow;
1405				if (lrwb->lr_length > dnow)
1406					lrwb->lr_length = dnow;
1407				lrw->lr_offset += dnow;
1408				lrw->lr_length -= dnow;
1409			} else {
1410				ASSERT(itx->itx_wr_state == WR_INDIRECT);
1411				dbuf = NULL;
1412			}
1413
1414			/*
1415			 * We pass in the "lwb_write_zio" rather than
1416			 * "lwb_root_zio" so that the "lwb_write_zio"
1417			 * becomes the parent of any zio's created by
1418			 * the "zl_get_data" callback. The vdevs are
1419			 * flushed after the "lwb_write_zio" completes,
1420			 * so we want to make sure that completion
1421			 * callback waits for these additional zio's,
1422			 * such that the vdevs used by those zio's will
1423			 * be included in the lwb's vdev tree, and those
1424			 * vdevs will be properly flushed. If we passed
1425			 * in "lwb_root_zio" here, then these additional
1426			 * vdevs may not be flushed; e.g. if these zio's
1427			 * completed after "lwb_write_zio" completed.
1428			 */
1429			error = zilog->zl_get_data(itx->itx_private,
1430			    lrwb, dbuf, lwb, lwb->lwb_write_zio);
1431
1432			if (error == EIO) {
1433				txg_wait_synced(zilog->zl_dmu_pool, txg);
1434				return (lwb);
1435			}
1436			if (error != 0) {
1437				ASSERT(error == ENOENT || error == EEXIST ||
1438				    error == EALREADY);
1439				return (lwb);
1440			}
1441		}
1442	}
1443
1444	/*
1445	 * We're actually making an entry, so update lrc_seq to be the
1446	 * log record sequence number.  Note that this is generally not
1447	 * equal to the itx sequence number because not all transactions
1448	 * are synchronous, and sometimes spa_sync() gets there first.
1449	 */
1450	lrcb->lrc_seq = ++zilog->zl_lr_seq;
1451	lwb->lwb_nused += reclen + dnow;
1452
1453	zil_lwb_add_txg(lwb, txg);
1454
1455	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
1456	ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
1457
1458	dlen -= dnow;
1459	if (dlen > 0) {
1460		zilog->zl_cur_used += reclen;
1461		goto cont;
1462	}
1463
1464	return (lwb);
1465}
1466
1467itx_t *
1468zil_itx_create(uint64_t txtype, size_t lrsize)
1469{
1470	itx_t *itx;
1471
1472	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1473
1474	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
1475	itx->itx_lr.lrc_txtype = txtype;
1476	itx->itx_lr.lrc_reclen = lrsize;
1477	itx->itx_lr.lrc_seq = 0;	/* defensive */
1478	itx->itx_sync = B_TRUE;		/* default is synchronous */
1479
1480	return (itx);
1481}
1482
1483void
1484zil_itx_destroy(itx_t *itx)
1485{
1486	kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen);
1487}
1488
1489/*
1490 * Free up the sync and async itxs. The itxs_t has already been detached
1491 * so no locks are needed.
1492 */
1493static void
1494zil_itxg_clean(itxs_t *itxs)
1495{
1496	itx_t *itx;
1497	list_t *list;
1498	avl_tree_t *t;
1499	void *cookie;
1500	itx_async_node_t *ian;
1501
1502	list = &itxs->i_sync_list;
1503	while ((itx = list_head(list)) != NULL) {
1504		/*
1505		 * In the general case, commit itxs will not be found
1506		 * here, as they'll be committed to an lwb via
1507		 * zil_lwb_commit(), and free'd in that function. Having
1508		 * said that, it is still possible for commit itxs to be
1509		 * found here, due to the following race:
1510		 *
1511		 *	- a thread calls zil_commit() which assigns the
1512		 *	  commit itx to a per-txg i_sync_list
1513		 *	- zil_itxg_clean() is called (e.g. via spa_sync())
1514		 *	  while the waiter is still on the i_sync_list
1515		 *
1516		 * There's nothing to prevent syncing the txg while the
1517		 * waiter is on the i_sync_list. This normally doesn't
1518		 * happen because spa_sync() is slower than zil_commit(),
1519		 * but if zil_commit() calls txg_wait_synced() (e.g.
1520		 * because zil_create() or zil_commit_writer_stall() is
1521		 * called) we will hit this case.
1522		 */
1523		if (itx->itx_lr.lrc_txtype == TX_COMMIT)
1524			zil_commit_waiter_skip(itx->itx_private);
1525
1526		list_remove(list, itx);
1527		zil_itx_destroy(itx);
1528	}
1529
1530	cookie = NULL;
1531	t = &itxs->i_async_tree;
1532	while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1533		list = &ian->ia_list;
1534		while ((itx = list_head(list)) != NULL) {
1535			list_remove(list, itx);
1536			/* commit itxs should never be on the async lists. */
1537			ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
1538			zil_itx_destroy(itx);
1539		}
1540		list_destroy(list);
1541		kmem_free(ian, sizeof (itx_async_node_t));
1542	}
1543	avl_destroy(t);
1544
1545	kmem_free(itxs, sizeof (itxs_t));
1546}
1547
1548static int
1549zil_aitx_compare(const void *x1, const void *x2)
1550{
1551	const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
1552	const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1553
1554	if (o1 < o2)
1555		return (-1);
1556	if (o1 > o2)
1557		return (1);
1558
1559	return (0);
1560}
1561
1562/*
1563 * Remove all async itx with the given oid.
1564 */
1565static void
1566zil_remove_async(zilog_t *zilog, uint64_t oid)
1567{
1568	uint64_t otxg, txg;
1569	itx_async_node_t *ian;
1570	avl_tree_t *t;
1571	avl_index_t where;
1572	list_t clean_list;
1573	itx_t *itx;
1574
1575	ASSERT(oid != 0);
1576	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1577
1578	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1579		otxg = ZILTEST_TXG;
1580	else
1581		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1582
1583	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1584		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1585
1586		mutex_enter(&itxg->itxg_lock);
1587		if (itxg->itxg_txg != txg) {
1588			mutex_exit(&itxg->itxg_lock);
1589			continue;
1590		}
1591
1592		/*
1593		 * Locate the object node and append its list.
1594		 */
1595		t = &itxg->itxg_itxs->i_async_tree;
1596		ian = avl_find(t, &oid, &where);
1597		if (ian != NULL)
1598			list_move_tail(&clean_list, &ian->ia_list);
1599		mutex_exit(&itxg->itxg_lock);
1600	}
1601	while ((itx = list_head(&clean_list)) != NULL) {
1602		list_remove(&clean_list, itx);
1603		/* commit itxs should never be on the async lists. */
1604		ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
1605		zil_itx_destroy(itx);
1606	}
1607	list_destroy(&clean_list);
1608}
1609
1610void
1611zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1612{
1613	uint64_t txg;
1614	itxg_t *itxg;
1615	itxs_t *itxs, *clean = NULL;
1616
1617	/*
1618	 * Object ids can be re-instantiated in the next txg so
1619	 * remove any async transactions to avoid future leaks.
1620	 * This can happen if a fsync occurs on the re-instantiated
1621	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1622	 * the new file data and flushes a write record for the old object.
1623	 */
1624	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
1625		zil_remove_async(zilog, itx->itx_oid);
1626
1627	/*
1628	 * Ensure the data of a renamed file is committed before the rename.
1629	 */
1630	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
1631		zil_async_to_sync(zilog, itx->itx_oid);
1632
1633	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1634		txg = ZILTEST_TXG;
1635	else
1636		txg = dmu_tx_get_txg(tx);
1637
1638	itxg = &zilog->zl_itxg[txg & TXG_MASK];
1639	mutex_enter(&itxg->itxg_lock);
1640	itxs = itxg->itxg_itxs;
1641	if (itxg->itxg_txg != txg) {
1642		if (itxs != NULL) {
1643			/*
1644			 * The zil_clean callback hasn't got around to cleaning
1645			 * this itxg. Save the itxs for release below.
1646			 * This should be rare.
1647			 */
1648			zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
1649			    "txg %llu", itxg->itxg_txg);
1650			clean = itxg->itxg_itxs;
1651		}
1652		itxg->itxg_txg = txg;
1653		itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
1654
1655		list_create(&itxs->i_sync_list, sizeof (itx_t),
1656		    offsetof(itx_t, itx_node));
1657		avl_create(&itxs->i_async_tree, zil_aitx_compare,
1658		    sizeof (itx_async_node_t),
1659		    offsetof(itx_async_node_t, ia_node));
1660	}
1661	if (itx->itx_sync) {
1662		list_insert_tail(&itxs->i_sync_list, itx);
1663	} else {
1664		avl_tree_t *t = &itxs->i_async_tree;
1665		uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
1666		itx_async_node_t *ian;
1667		avl_index_t where;
1668
1669		ian = avl_find(t, &foid, &where);
1670		if (ian == NULL) {
1671			ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
1672			list_create(&ian->ia_list, sizeof (itx_t),
1673			    offsetof(itx_t, itx_node));
1674			ian->ia_foid = foid;
1675			avl_insert(t, ian, where);
1676		}
1677		list_insert_tail(&ian->ia_list, itx);
1678	}
1679
1680	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
1681
1682	/*
1683	 * We don't want to dirty the ZIL using ZILTEST_TXG, because
1684	 * zil_clean() will never be called using ZILTEST_TXG. Thus, we
1685	 * need to be careful to always dirty the ZIL using the "real"
1686	 * TXG (not itxg_txg) even when the SPA is frozen.
1687	 */
1688	zilog_dirty(zilog, dmu_tx_get_txg(tx));
1689	mutex_exit(&itxg->itxg_lock);
1690
1691	/* Release the old itxs now we've dropped the lock */
1692	if (clean != NULL)
1693		zil_itxg_clean(clean);
1694}
1695
1696/*
1697 * If there are any in-memory intent log transactions which have now been
1698 * synced then start up a taskq to free them. We should only do this after we
1699 * have written out the uberblocks (i.e. txg has been comitted) so that
1700 * don't inadvertently clean out in-memory log records that would be required
1701 * by zil_commit().
1702 */
1703void
1704zil_clean(zilog_t *zilog, uint64_t synced_txg)
1705{
1706	itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1707	itxs_t *clean_me;
1708
1709	ASSERT3U(synced_txg, <, ZILTEST_TXG);
1710
1711	mutex_enter(&itxg->itxg_lock);
1712	if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
1713		mutex_exit(&itxg->itxg_lock);
1714		return;
1715	}
1716	ASSERT3U(itxg->itxg_txg, <=, synced_txg);
1717	ASSERT3U(itxg->itxg_txg, !=, 0);
1718	clean_me = itxg->itxg_itxs;
1719	itxg->itxg_itxs = NULL;
1720	itxg->itxg_txg = 0;
1721	mutex_exit(&itxg->itxg_lock);
1722	/*
1723	 * Preferably start a task queue to free up the old itxs but
1724	 * if taskq_dispatch can't allocate resources to do that then
1725	 * free it in-line. This should be rare. Note, using TQ_SLEEP
1726	 * created a bad performance problem.
1727	 */
1728	ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
1729	ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
1730	if (taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
1731	    (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == NULL)
1732		zil_itxg_clean(clean_me);
1733}
1734
1735/*
1736 * This function will traverse the queue of itxs that need to be
1737 * committed, and move them onto the ZIL's zl_itx_commit_list.
1738 */
1739static void
1740zil_get_commit_list(zilog_t *zilog)
1741{
1742	uint64_t otxg, txg;
1743	list_t *commit_list = &zilog->zl_itx_commit_list;
1744
1745	ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1746
1747	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1748		otxg = ZILTEST_TXG;
1749	else
1750		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1751
1752	/*
1753	 * This is inherently racy, since there is nothing to prevent
1754	 * the last synced txg from changing. That's okay since we'll
1755	 * only commit things in the future.
1756	 */
1757	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1758		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1759
1760		mutex_enter(&itxg->itxg_lock);
1761		if (itxg->itxg_txg != txg) {
1762			mutex_exit(&itxg->itxg_lock);
1763			continue;
1764		}
1765
1766		/*
1767		 * If we're adding itx records to the zl_itx_commit_list,
1768		 * then the zil better be dirty in this "txg". We can assert
1769		 * that here since we're holding the itxg_lock which will
1770		 * prevent spa_sync from cleaning it. Once we add the itxs
1771		 * to the zl_itx_commit_list we must commit it to disk even
1772		 * if it's unnecessary (i.e. the txg was synced).
1773		 */
1774		ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
1775		    spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1776		list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
1777
1778		mutex_exit(&itxg->itxg_lock);
1779	}
1780}
1781
1782/*
1783 * Move the async itxs for a specified object to commit into sync lists.
1784 */
1785static void
1786zil_async_to_sync(zilog_t *zilog, uint64_t foid)
1787{
1788	uint64_t otxg, txg;
1789	itx_async_node_t *ian;
1790	avl_tree_t *t;
1791	avl_index_t where;
1792
1793	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1794		otxg = ZILTEST_TXG;
1795	else
1796		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1797
1798	/*
1799	 * This is inherently racy, since there is nothing to prevent
1800	 * the last synced txg from changing.
1801	 */
1802	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1803		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1804
1805		mutex_enter(&itxg->itxg_lock);
1806		if (itxg->itxg_txg != txg) {
1807			mutex_exit(&itxg->itxg_lock);
1808			continue;
1809		}
1810
1811		/*
1812		 * If a foid is specified then find that node and append its
1813		 * list. Otherwise walk the tree appending all the lists
1814		 * to the sync list. We add to the end rather than the
1815		 * beginning to ensure the create has happened.
1816		 */
1817		t = &itxg->itxg_itxs->i_async_tree;
1818		if (foid != 0) {
1819			ian = avl_find(t, &foid, &where);
1820			if (ian != NULL) {
1821				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1822				    &ian->ia_list);
1823			}
1824		} else {
1825			void *cookie = NULL;
1826
1827			while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1828				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1829				    &ian->ia_list);
1830				list_destroy(&ian->ia_list);
1831				kmem_free(ian, sizeof (itx_async_node_t));
1832			}
1833		}
1834		mutex_exit(&itxg->itxg_lock);
1835	}
1836}
1837
1838/*
1839 * This function will prune commit itxs that are at the head of the
1840 * commit list (it won't prune past the first non-commit itx), and
1841 * either: a) attach them to the last lwb that's still pending
1842 * completion, or b) skip them altogether.
1843 *
1844 * This is used as a performance optimization to prevent commit itxs
1845 * from generating new lwbs when it's unnecessary to do so.
1846 */
1847static void
1848zil_prune_commit_list(zilog_t *zilog)
1849{
1850	itx_t *itx;
1851
1852	ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1853
1854	while (itx = list_head(&zilog->zl_itx_commit_list)) {
1855		lr_t *lrc = &itx->itx_lr;
1856		if (lrc->lrc_txtype != TX_COMMIT)
1857			break;
1858
1859		mutex_enter(&zilog->zl_lock);
1860
1861		lwb_t *last_lwb = zilog->zl_last_lwb_opened;
1862		if (last_lwb == NULL || last_lwb->lwb_state == LWB_STATE_DONE) {
1863			/*
1864			 * All of the itxs this waiter was waiting on
1865			 * must have already completed (or there were
1866			 * never any itx's for it to wait on), so it's
1867			 * safe to skip this waiter and mark it done.
1868			 */
1869			zil_commit_waiter_skip(itx->itx_private);
1870		} else {
1871			zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
1872			itx->itx_private = NULL;
1873		}
1874
1875		mutex_exit(&zilog->zl_lock);
1876
1877		list_remove(&zilog->zl_itx_commit_list, itx);
1878		zil_itx_destroy(itx);
1879	}
1880
1881	IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
1882}
1883
1884static void
1885zil_commit_writer_stall(zilog_t *zilog)
1886{
1887	/*
1888	 * When zio_alloc_zil() fails to allocate the next lwb block on
1889	 * disk, we must call txg_wait_synced() to ensure all of the
1890	 * lwbs in the zilog's zl_lwb_list are synced and then freed (in
1891	 * zil_sync()), such that any subsequent ZIL writer (i.e. a call
1892	 * to zil_process_commit_list()) will have to call zil_create(),
1893	 * and start a new ZIL chain.
1894	 *
1895	 * Since zil_alloc_zil() failed, the lwb that was previously
1896	 * issued does not have a pointer to the "next" lwb on disk.
1897	 * Thus, if another ZIL writer thread was to allocate the "next"
1898	 * on-disk lwb, that block could be leaked in the event of a
1899	 * crash (because the previous lwb on-disk would not point to
1900	 * it).
1901	 *
1902	 * We must hold the zilog's zl_writer_lock while we do this, to
1903	 * ensure no new threads enter zil_process_commit_list() until
1904	 * all lwb's in the zl_lwb_list have been synced and freed
1905	 * (which is achieved via the txg_wait_synced() call).
1906	 */
1907	ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1908	txg_wait_synced(zilog->zl_dmu_pool, 0);
1909	ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
1910}
1911
1912/*
1913 * This function will traverse the commit list, creating new lwbs as
1914 * needed, and committing the itxs from the commit list to these newly
1915 * created lwbs. Additionally, as a new lwb is created, the previous
1916 * lwb will be issued to the zio layer to be written to disk.
1917 */
1918static void
1919zil_process_commit_list(zilog_t *zilog)
1920{
1921	spa_t *spa = zilog->zl_spa;
1922	list_t nolwb_waiters;
1923	lwb_t *lwb;
1924	itx_t *itx;
1925
1926	ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1927
1928	/*
1929	 * Return if there's nothing to commit before we dirty the fs by
1930	 * calling zil_create().
1931	 */
1932	if (list_head(&zilog->zl_itx_commit_list) == NULL)
1933		return;
1934
1935	list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
1936	    offsetof(zil_commit_waiter_t, zcw_node));
1937
1938	lwb = list_tail(&zilog->zl_lwb_list);
1939	if (lwb == NULL) {
1940		lwb = zil_create(zilog);
1941	} else {
1942		ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
1943		ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE);
1944	}
1945
1946	while (itx = list_head(&zilog->zl_itx_commit_list)) {
1947		lr_t *lrc = &itx->itx_lr;
1948		uint64_t txg = lrc->lrc_txg;
1949
1950		ASSERT3U(txg, !=, 0);
1951
1952		if (lrc->lrc_txtype == TX_COMMIT) {
1953			DTRACE_PROBE2(zil__process__commit__itx,
1954			    zilog_t *, zilog, itx_t *, itx);
1955		} else {
1956			DTRACE_PROBE2(zil__process__normal__itx,
1957			    zilog_t *, zilog, itx_t *, itx);
1958		}
1959
1960		/*
1961		 * This is inherently racy and may result in us writing
1962		 * out a log block for a txg that was just synced. This
1963		 * is ok since we'll end cleaning up that log block the
1964		 * next time we call zil_sync().
1965		 */
1966		boolean_t synced = txg <= spa_last_synced_txg(spa);
1967		boolean_t frozen = txg > spa_freeze_txg(spa);
1968
1969		if (!synced || frozen) {
1970			if (lwb != NULL) {
1971				lwb = zil_lwb_commit(zilog, itx, lwb);
1972			} else if (lrc->lrc_txtype == TX_COMMIT) {
1973				ASSERT3P(lwb, ==, NULL);
1974				zil_commit_waiter_link_nolwb(
1975				    itx->itx_private, &nolwb_waiters);
1976			}
1977		} else if (lrc->lrc_txtype == TX_COMMIT) {
1978			ASSERT3B(synced, ==, B_TRUE);
1979			ASSERT3B(frozen, ==, B_FALSE);
1980
1981			/*
1982			 * If this is a commit itx, then there will be a
1983			 * thread that is either: already waiting for
1984			 * it, or soon will be waiting.
1985			 *
1986			 * This itx has already been committed to disk
1987			 * via spa_sync() so we don't bother committing
1988			 * it to an lwb. As a result, we cannot use the
1989			 * lwb zio callback to signal the waiter and
1990			 * mark it as done, so we must do that here.
1991			 */
1992			zil_commit_waiter_skip(itx->itx_private);
1993		}
1994
1995		list_remove(&zilog->zl_itx_commit_list, itx);
1996		zil_itx_destroy(itx);
1997	}
1998	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1999
2000	if (lwb == NULL) {
2001		/*
2002		 * This indicates zio_alloc_zil() failed to allocate the
2003		 * "next" lwb on-disk. When this happens, we must stall
2004		 * the ZIL write pipeline; see the comment within
2005		 * zil_commit_writer_stall() for more details.
2006		 */
2007		zil_commit_writer_stall(zilog);
2008
2009		/*
2010		 * Additionally, we have to signal and mark the "nolwb"
2011		 * waiters as "done" here, since without an lwb, we
2012		 * can't do this via zil_lwb_flush_vdevs_done() like
2013		 * normal.
2014		 */
2015		zil_commit_waiter_t *zcw;
2016		while (zcw = list_head(&nolwb_waiters)) {
2017			zil_commit_waiter_skip(zcw);
2018			list_remove(&nolwb_waiters, zcw);
2019		}
2020	} else {
2021		ASSERT(list_is_empty(&nolwb_waiters));
2022		ASSERT3P(lwb, !=, NULL);
2023		ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
2024		ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE);
2025
2026		/*
2027		 * At this point, the ZIL block pointed at by the "lwb"
2028		 * variable is in one of the following states: "closed"
2029		 * or "open".
2030		 *
2031		 * If its "closed", then no itxs have been committed to
2032		 * it, so there's no point in issuing its zio (i.e.
2033		 * it's "empty").
2034		 *
2035		 * If its "open" state, then it contains one or more
2036		 * itxs that eventually need to be committed to stable
2037		 * storage. In this case we intentionally do not issue
2038		 * the lwb's zio to disk yet, and instead rely on one of
2039		 * the following two mechanisms for issuing the zio:
2040		 *
2041		 * 1. Ideally, there will be more ZIL activity occuring
2042		 * on the system, such that this function will be
2043		 * immediately called again (not necessarily by the same
2044		 * thread) and this lwb's zio will be issued via
2045		 * zil_lwb_commit(). This way, the lwb is guaranteed to
2046		 * be "full" when it is issued to disk, and we'll make
2047		 * use of the lwb's size the best we can.
2048		 *
2049		 * 2. If there isn't sufficient ZIL activity occuring on
2050		 * the system, such that this lwb's zio isn't issued via
2051		 * zil_lwb_commit(), zil_commit_waiter() will issue the
2052		 * lwb's zio. If this occurs, the lwb is not guaranteed
2053		 * to be "full" by the time its zio is issued, and means
2054		 * the size of the lwb was "too large" given the amount
2055		 * of ZIL activity occuring on the system at that time.
2056		 *
2057		 * We do this for a couple of reasons:
2058		 *
2059		 * 1. To try and reduce the number of IOPs needed to
2060		 * write the same number of itxs. If an lwb has space
2061		 * available in it's buffer for more itxs, and more itxs
2062		 * will be committed relatively soon (relative to the
2063		 * latency of performing a write), then it's beneficial
2064		 * to wait for these "next" itxs. This way, more itxs
2065		 * can be committed to stable storage with fewer writes.
2066		 *
2067		 * 2. To try and use the largest lwb block size that the
2068		 * incoming rate of itxs can support. Again, this is to
2069		 * try and pack as many itxs into as few lwbs as
2070		 * possible, without significantly impacting the latency
2071		 * of each individual itx.
2072		 */
2073	}
2074}
2075
2076/*
2077 * This function is responsible for ensuring the passed in commit waiter
2078 * (and associated commit itx) is committed to an lwb. If the waiter is
2079 * not already committed to an lwb, all itxs in the zilog's queue of
2080 * itxs will be processed. The assumption is the passed in waiter's
2081 * commit itx will found in the queue just like the other non-commit
2082 * itxs, such that when the entire queue is processed, the waiter will
2083 * have been commited to an lwb.
2084 *
2085 * The lwb associated with the passed in waiter is not guaranteed to
2086 * have been issued by the time this function completes. If the lwb is
2087 * not issued, we rely on future calls to zil_commit_writer() to issue
2088 * the lwb, or the timeout mechanism found in zil_commit_waiter().
2089 */
2090static void
2091zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
2092{
2093	ASSERT(!MUTEX_HELD(&zilog->zl_lock));
2094	ASSERT(spa_writeable(zilog->zl_spa));
2095	ASSERT0(zilog->zl_suspend);
2096
2097	mutex_enter(&zilog->zl_writer_lock);
2098
2099	if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
2100		/*
2101		 * It's possible that, while we were waiting to acquire
2102		 * the "zl_writer_lock", another thread committed this
2103		 * waiter to an lwb. If that occurs, we bail out early,
2104		 * without processing any of the zilog's queue of itxs.
2105		 *
2106		 * On certain workloads and system configurations, the
2107		 * "zl_writer_lock" can become highly contended. In an
2108		 * attempt to reduce this contention, we immediately drop
2109		 * the lock if the waiter has already been processed.
2110		 *
2111		 * We've measured this optimization to reduce CPU spent
2112		 * contending on this lock by up to 5%, using a system
2113		 * with 32 CPUs, low latency storage (~50 usec writes),
2114		 * and 1024 threads performing sync writes.
2115		 */
2116		goto out;
2117	}
2118
2119	zil_get_commit_list(zilog);
2120	zil_prune_commit_list(zilog);
2121	zil_process_commit_list(zilog);
2122
2123out:
2124	mutex_exit(&zilog->zl_writer_lock);
2125}
2126
2127static void
2128zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
2129{
2130	ASSERT(!MUTEX_HELD(&zilog->zl_writer_lock));
2131	ASSERT(MUTEX_HELD(&zcw->zcw_lock));
2132	ASSERT3B(zcw->zcw_done, ==, B_FALSE);
2133
2134	lwb_t *lwb = zcw->zcw_lwb;
2135	ASSERT3P(lwb, !=, NULL);
2136	ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED);
2137
2138	/*
2139	 * If the lwb has already been issued by another thread, we can
2140	 * immediately return since there's no work to be done (the
2141	 * point of this function is to issue the lwb). Additionally, we
2142	 * do this prior to acquiring the zl_writer_lock, to avoid
2143	 * acquiring it when it's not necessary to do so.
2144	 */
2145	if (lwb->lwb_state == LWB_STATE_ISSUED ||
2146	    lwb->lwb_state == LWB_STATE_DONE)
2147		return;
2148
2149	/*
2150	 * In order to call zil_lwb_write_issue() we must hold the
2151	 * zilog's "zl_writer_lock". We can't simply acquire that lock,
2152	 * since we're already holding the commit waiter's "zcw_lock",
2153	 * and those two locks are aquired in the opposite order
2154	 * elsewhere.
2155	 */
2156	mutex_exit(&zcw->zcw_lock);
2157	mutex_enter(&zilog->zl_writer_lock);
2158	mutex_enter(&zcw->zcw_lock);
2159
2160	/*
2161	 * Since we just dropped and re-acquired the commit waiter's
2162	 * lock, we have to re-check to see if the waiter was marked
2163	 * "done" during that process. If the waiter was marked "done",
2164	 * the "lwb" pointer is no longer valid (it can be free'd after
2165	 * the waiter is marked "done"), so without this check we could
2166	 * wind up with a use-after-free error below.
2167	 */
2168	if (zcw->zcw_done)
2169		goto out;
2170
2171	ASSERT3P(lwb, ==, zcw->zcw_lwb);
2172
2173	/*
2174	 * We've already checked this above, but since we hadn't
2175	 * acquired the zilog's zl_writer_lock, we have to perform this
2176	 * check a second time while holding the lock. We can't call
2177	 * zil_lwb_write_issue() if the lwb had already been issued.
2178	 */
2179	if (lwb->lwb_state == LWB_STATE_ISSUED ||
2180	    lwb->lwb_state == LWB_STATE_DONE)
2181		goto out;
2182
2183	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
2184
2185	/*
2186	 * As described in the comments above zil_commit_waiter() and
2187	 * zil_process_commit_list(), we need to issue this lwb's zio
2188	 * since we've reached the commit waiter's timeout and it still
2189	 * hasn't been issued.
2190	 */
2191	lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb);
2192
2193	ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
2194
2195	/*
2196	 * Since the lwb's zio hadn't been issued by the time this thread
2197	 * reached its timeout, we reset the zilog's "zl_cur_used" field
2198	 * to influence the zil block size selection algorithm.
2199	 *
2200	 * By having to issue the lwb's zio here, it means the size of the
2201	 * lwb was too large, given the incoming throughput of itxs.  By
2202	 * setting "zl_cur_used" to zero, we communicate this fact to the
2203	 * block size selection algorithm, so it can take this informaiton
2204	 * into account, and potentially select a smaller size for the
2205	 * next lwb block that is allocated.
2206	 */
2207	zilog->zl_cur_used = 0;
2208
2209	if (nlwb == NULL) {
2210		/*
2211		 * When zil_lwb_write_issue() returns NULL, this
2212		 * indicates zio_alloc_zil() failed to allocate the
2213		 * "next" lwb on-disk. When this occurs, the ZIL write
2214		 * pipeline must be stalled; see the comment within the
2215		 * zil_commit_writer_stall() function for more details.
2216		 *
2217		 * We must drop the commit waiter's lock prior to
2218		 * calling zil_commit_writer_stall() or else we can wind
2219		 * up with the following deadlock:
2220		 *
2221		 * - This thread is waiting for the txg to sync while
2222		 *   holding the waiter's lock; txg_wait_synced() is
2223		 *   used within txg_commit_writer_stall().
2224		 *
2225		 * - The txg can't sync because it is waiting for this
2226		 *   lwb's zio callback to call dmu_tx_commit().
2227		 *
2228		 * - The lwb's zio callback can't call dmu_tx_commit()
2229		 *   because it's blocked trying to acquire the waiter's
2230		 *   lock, which occurs prior to calling dmu_tx_commit()
2231		 */
2232		mutex_exit(&zcw->zcw_lock);
2233		zil_commit_writer_stall(zilog);
2234		mutex_enter(&zcw->zcw_lock);
2235	}
2236
2237out:
2238	mutex_exit(&zilog->zl_writer_lock);
2239	ASSERT(MUTEX_HELD(&zcw->zcw_lock));
2240}
2241
2242/*
2243 * This function is responsible for performing the following two tasks:
2244 *
2245 * 1. its primary responsibility is to block until the given "commit
2246 *    waiter" is considered "done".
2247 *
2248 * 2. its secondary responsibility is to issue the zio for the lwb that
2249 *    the given "commit waiter" is waiting on, if this function has
2250 *    waited "long enough" and the lwb is still in the "open" state.
2251 *
2252 * Given a sufficient amount of itxs being generated and written using
2253 * the ZIL, the lwb's zio will be issued via the zil_lwb_commit()
2254 * function. If this does not occur, this secondary responsibility will
2255 * ensure the lwb is issued even if there is not other synchronous
2256 * activity on the system.
2257 *
2258 * For more details, see zil_process_commit_list(); more specifically,
2259 * the comment at the bottom of that function.
2260 */
2261static void
2262zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
2263{
2264	ASSERT(!MUTEX_HELD(&zilog->zl_lock));
2265	ASSERT(!MUTEX_HELD(&zilog->zl_writer_lock));
2266	ASSERT(spa_writeable(zilog->zl_spa));
2267	ASSERT0(zilog->zl_suspend);
2268
2269	mutex_enter(&zcw->zcw_lock);
2270
2271	/*
2272	 * The timeout is scaled based on the lwb latency to avoid
2273	 * significantly impacting the latency of each individual itx.
2274	 * For more details, see the comment at the bottom of the
2275	 * zil_process_commit_list() function.
2276	 */
2277	int pct = MAX(zfs_commit_timeout_pct, 1);
2278	hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
2279	hrtime_t wakeup = gethrtime() + sleep;
2280	boolean_t timedout = B_FALSE;
2281
2282	while (!zcw->zcw_done) {
2283		ASSERT(MUTEX_HELD(&zcw->zcw_lock));
2284
2285		lwb_t *lwb = zcw->zcw_lwb;
2286
2287		/*
2288		 * Usually, the waiter will have a non-NULL lwb field here,
2289		 * but it's possible for it to be NULL as a result of
2290		 * zil_commit() racing with spa_sync().
2291		 *
2292		 * When zil_clean() is called, it's possible for the itxg
2293		 * list (which may be cleaned via a taskq) to contain
2294		 * commit itxs. When this occurs, the commit waiters linked
2295		 * off of these commit itxs will not be committed to an
2296		 * lwb.  Additionally, these commit waiters will not be
2297		 * marked done until zil_commit_waiter_skip() is called via
2298		 * zil_itxg_clean().
2299		 *
2300		 * Thus, it's possible for this commit waiter (i.e. the
2301		 * "zcw" variable) to be found in this "in between" state;
2302		 * where it's "zcw_lwb" field is NULL, and it hasn't yet
2303		 * been skipped, so it's "zcw_done" field is still B_FALSE.
2304		 */
2305		IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED);
2306
2307		if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
2308			ASSERT3B(timedout, ==, B_FALSE);
2309
2310			/*
2311			 * If the lwb hasn't been issued yet, then we
2312			 * need to wait with a timeout, in case this
2313			 * function needs to issue the lwb after the
2314			 * timeout is reached; responsibility (2) from
2315			 * the comment above this function.
2316			 */
2317			clock_t timeleft = cv_timedwait_hires(&zcw->zcw_cv,
2318			    &zcw->zcw_lock, wakeup, USEC2NSEC(1),
2319			    CALLOUT_FLAG_ABSOLUTE);
2320
2321			if (timeleft >= 0 || zcw->zcw_done)
2322				continue;
2323
2324			timedout = B_TRUE;
2325			zil_commit_waiter_timeout(zilog, zcw);
2326
2327			if (!zcw->zcw_done) {
2328				/*
2329				 * If the commit waiter has already been
2330				 * marked "done", it's possible for the
2331				 * waiter's lwb structure to have already
2332				 * been freed.  Thus, we can only reliably
2333				 * make these assertions if the waiter
2334				 * isn't done.
2335				 */
2336				ASSERT3P(lwb, ==, zcw->zcw_lwb);
2337				ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
2338			}
2339		} else {
2340			/*
2341			 * If the lwb isn't open, then it must have already
2342			 * been issued. In that case, there's no need to
2343			 * use a timeout when waiting for the lwb to
2344			 * complete.
2345			 *
2346			 * Additionally, if the lwb is NULL, the waiter
2347			 * will soon be signalled and marked done via
2348			 * zil_clean() and zil_itxg_clean(), so no timeout
2349			 * is required.
2350			 */
2351
2352			IMPLY(lwb != NULL,
2353			    lwb->lwb_state == LWB_STATE_ISSUED ||
2354			    lwb->lwb_state == LWB_STATE_DONE);
2355			cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
2356		}
2357	}
2358
2359	mutex_exit(&zcw->zcw_lock);
2360}
2361
2362static zil_commit_waiter_t *
2363zil_alloc_commit_waiter()
2364{
2365	zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
2366
2367	cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
2368	mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
2369	list_link_init(&zcw->zcw_node);
2370	zcw->zcw_lwb = NULL;
2371	zcw->zcw_done = B_FALSE;
2372	zcw->zcw_zio_error = 0;
2373
2374	return (zcw);
2375}
2376
2377static void
2378zil_free_commit_waiter(zil_commit_waiter_t *zcw)
2379{
2380	ASSERT(!list_link_active(&zcw->zcw_node));
2381	ASSERT3P(zcw->zcw_lwb, ==, NULL);
2382	ASSERT3B(zcw->zcw_done, ==, B_TRUE);
2383	mutex_destroy(&zcw->zcw_lock);
2384	cv_destroy(&zcw->zcw_cv);
2385	kmem_cache_free(zil_zcw_cache, zcw);
2386}
2387
2388/*
2389 * This function is used to create a TX_COMMIT itx and assign it. This
2390 * way, it will be linked into the ZIL's list of synchronous itxs, and
2391 * then later committed to an lwb (or skipped) when
2392 * zil_process_commit_list() is called.
2393 */
2394static void
2395zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
2396{
2397	dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
2398	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
2399
2400	itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
2401	itx->itx_sync = B_TRUE;
2402	itx->itx_private = zcw;
2403
2404	zil_itx_assign(zilog, itx, tx);
2405
2406	dmu_tx_commit(tx);
2407}
2408
2409/*
2410 * Commit ZFS Intent Log transactions (itxs) to stable storage.
2411 *
2412 * When writing ZIL transactions to the on-disk representation of the
2413 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
2414 * itxs can be committed to a single lwb. Once a lwb is written and
2415 * committed to stable storage (i.e. the lwb is written, and vdevs have
2416 * been flushed), each itx that was committed to that lwb is also
2417 * considered to be committed to stable storage.
2418 *
2419 * When an itx is committed to an lwb, the log record (lr_t) contained
2420 * by the itx is copied into the lwb's zio buffer, and once this buffer
2421 * is written to disk, it becomes an on-disk ZIL block.
2422 *
2423 * As itxs are generated, they're inserted into the ZIL's queue of
2424 * uncommitted itxs. The semantics of zil_commit() are such that it will
2425 * block until all itxs that were in the queue when it was called, are
2426 * committed to stable storage.
2427 *
2428 * If "foid" is zero, this means all "synchronous" and "asynchronous"
2429 * itxs, for all objects in the dataset, will be committed to stable
2430 * storage prior to zil_commit() returning. If "foid" is non-zero, all
2431 * "synchronous" itxs for all objects, but only "asynchronous" itxs
2432 * that correspond to the foid passed in, will be committed to stable
2433 * storage prior to zil_commit() returning.
2434 *
2435 * Generally speaking, when zil_commit() is called, the consumer doesn't
2436 * actually care about _all_ of the uncommitted itxs. Instead, they're
2437 * simply trying to waiting for a specific itx to be committed to disk,
2438 * but the interface(s) for interacting with the ZIL don't allow such
2439 * fine-grained communication. A better interface would allow a consumer
2440 * to create and assign an itx, and then pass a reference to this itx to
2441 * zil_commit(); such that zil_commit() would return as soon as that
2442 * specific itx was committed to disk (instead of waiting for _all_
2443 * itxs to be committed).
2444 *
2445 * When a thread calls zil_commit() a special "commit itx" will be
2446 * generated, along with a corresponding "waiter" for this commit itx.
2447 * zil_commit() will wait on this waiter's CV, such that when the waiter
2448 * is marked done, and signalled, zil_commit() will return.
2449 *
2450 * This commit itx is inserted into the queue of uncommitted itxs. This
2451 * provides an easy mechanism for determining which itxs were in the
2452 * queue prior to zil_commit() having been called, and which itxs were
2453 * added after zil_commit() was called.
2454 *
2455 * The commit it is special; it doesn't have any on-disk representation.
2456 * When a commit itx is "committed" to an lwb, the waiter associated
2457 * with it is linked onto the lwb's list of waiters. Then, when that lwb
2458 * completes, each waiter on the lwb's list is marked done and signalled
2459 * -- allowing the thread waiting on the waiter to return from zil_commit().
2460 *
2461 * It's important to point out a few critical factors that allow us
2462 * to make use of the commit itxs, commit waiters, per-lwb lists of
2463 * commit waiters, and zio completion callbacks like we're doing:
2464 *
2465 *   1. The list of waiters for each lwb is traversed, and each commit
2466 *      waiter is marked "done" and signalled, in the zio completion
2467 *      callback of the lwb's zio[*].
2468 *
2469 *      * Actually, the waiters are signalled in the zio completion
2470 *        callback of the root zio for the DKIOCFLUSHWRITECACHE commands
2471 *        that are sent to the vdevs upon completion of the lwb zio.
2472 *
2473 *   2. When the itxs are inserted into the ZIL's queue of uncommitted
2474 *      itxs, the order in which they are inserted is preserved[*]; as
2475 *      itxs are added to the queue, they are added to the tail of
2476 *      in-memory linked lists.
2477 *
2478 *      When committing the itxs to lwbs (to be written to disk), they
2479 *      are committed in the same order in which the itxs were added to
2480 *      the uncommitted queue's linked list(s); i.e. the linked list of
2481 *      itxs to commit is traversed from head to tail, and each itx is
2482 *      committed to an lwb in that order.
2483 *
2484 *      * To clarify:
2485 *
2486 *        - the order of "sync" itxs is preserved w.r.t. other
2487 *          "sync" itxs, regardless of the corresponding objects.
2488 *        - the order of "async" itxs is preserved w.r.t. other
2489 *          "async" itxs corresponding to the same object.
2490 *        - the order of "async" itxs is *not* preserved w.r.t. other
2491 *          "async" itxs corresponding to different objects.
2492 *        - the order of "sync" itxs w.r.t. "async" itxs (or vice
2493 *          versa) is *not* preserved, even for itxs that correspond
2494 *          to the same object.
2495 *
2496 *      For more details, see: zil_itx_assign(), zil_async_to_sync(),
2497 *      zil_get_commit_list(), and zil_process_commit_list().
2498 *
2499 *   3. The lwbs represent a linked list of blocks on disk. Thus, any
2500 *      lwb cannot be considered committed to stable storage, until its
2501 *      "previous" lwb is also committed to stable storage. This fact,
2502 *      coupled with the fact described above, means that itxs are
2503 *      committed in (roughly) the order in which they were generated.
2504 *      This is essential because itxs are dependent on prior itxs.
2505 *      Thus, we *must not* deem an itx as being committed to stable
2506 *      storage, until *all* prior itxs have also been committed to
2507 *      stable storage.
2508 *
2509 *      To enforce this ordering of lwb zio's, while still leveraging as
2510 *      much of the underlying storage performance as possible, we rely
2511 *      on two fundamental concepts:
2512 *
2513 *          1. The creation and issuance of lwb zio's is protected by
2514 *             the zilog's "zl_writer_lock", which ensures only a single
2515 *             thread is creating and/or issuing lwb's at a time
2516 *          2. The "previous" lwb is a child of the "current" lwb
2517 *             (leveraging the zio parent-child depenency graph)
2518 *
2519 *      By relying on this parent-child zio relationship, we can have
2520 *      many lwb zio's concurrently issued to the underlying storage,
2521 *      but the order in which they complete will be the same order in
2522 *      which they were created.
2523 */
2524void
2525zil_commit(zilog_t *zilog, uint64_t foid)
2526{
2527	/*
2528	 * We should never attempt to call zil_commit on a snapshot for
2529	 * a couple of reasons:
2530	 *
2531	 * 1. A snapshot may never be modified, thus it cannot have any
2532	 *    in-flight itxs that would have modified the dataset.
2533	 *
2534	 * 2. By design, when zil_commit() is called, a commit itx will
2535	 *    be assigned to this zilog; as a result, the zilog will be
2536	 *    dirtied. We must not dirty the zilog of a snapshot; there's
2537	 *    checks in the code that enforce this invariant, and will
2538	 *    cause a panic if it's not upheld.
2539	 */
2540	ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
2541
2542	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2543		return;
2544
2545	if (!spa_writeable(zilog->zl_spa)) {
2546		/*
2547		 * If the SPA is not writable, there should never be any
2548		 * pending itxs waiting to be committed to disk. If that
2549		 * weren't true, we'd skip writing those itxs out, and
2550		 * would break the sematics of zil_commit(); thus, we're
2551		 * verifying that truth before we return to the caller.
2552		 */
2553		ASSERT(list_is_empty(&zilog->zl_lwb_list));
2554		ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
2555		for (int i = 0; i < TXG_SIZE; i++)
2556			ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
2557		return;
2558	}
2559
2560	/*
2561	 * If the ZIL is suspended, we don't want to dirty it by calling
2562	 * zil_commit_itx_assign() below, nor can we write out
2563	 * lwbs like would be done in zil_commit_write(). Thus, we
2564	 * simply rely on txg_wait_synced() to maintain the necessary
2565	 * semantics, and avoid calling those functions altogether.
2566	 */
2567	if (zilog->zl_suspend > 0) {
2568		txg_wait_synced(zilog->zl_dmu_pool, 0);
2569		return;
2570	}
2571
2572	/*
2573	 * Move the "async" itxs for the specified foid to the "sync"
2574	 * queues, such that they will be later committed (or skipped)
2575	 * to an lwb when zil_process_commit_list() is called.
2576	 *
2577	 * Since these "async" itxs must be committed prior to this
2578	 * call to zil_commit returning, we must perform this operation
2579	 * before we call zil_commit_itx_assign().
2580	 */
2581	zil_async_to_sync(zilog, foid);
2582
2583	/*
2584	 * We allocate a new "waiter" structure which will initially be
2585	 * linked to the commit itx using the itx's "itx_private" field.
2586	 * Since the commit itx doesn't represent any on-disk state,
2587	 * when it's committed to an lwb, rather than copying the its
2588	 * lr_t into the lwb's buffer, the commit itx's "waiter" will be
2589	 * added to the lwb's list of waiters. Then, when the lwb is
2590	 * committed to stable storage, each waiter in the lwb's list of
2591	 * waiters will be marked "done", and signalled.
2592	 *
2593	 * We must create the waiter and assign the commit itx prior to
2594	 * calling zil_commit_writer(), or else our specific commit itx
2595	 * is not guaranteed to be committed to an lwb prior to calling
2596	 * zil_commit_waiter().
2597	 */
2598	zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
2599	zil_commit_itx_assign(zilog, zcw);
2600
2601	zil_commit_writer(zilog, zcw);
2602	zil_commit_waiter(zilog, zcw);
2603
2604	if (zcw->zcw_zio_error != 0) {
2605		/*
2606		 * If there was an error writing out the ZIL blocks that
2607		 * this thread is waiting on, then we fallback to
2608		 * relying on spa_sync() to write out the data this
2609		 * thread is waiting on. Obviously this has performance
2610		 * implications, but the expectation is for this to be
2611		 * an exceptional case, and shouldn't occur often.
2612		 */
2613		DTRACE_PROBE2(zil__commit__io__error,
2614		    zilog_t *, zilog, zil_commit_waiter_t *, zcw);
2615		txg_wait_synced(zilog->zl_dmu_pool, 0);
2616	}
2617
2618	zil_free_commit_waiter(zcw);
2619}
2620
2621/*
2622 * Called in syncing context to free committed log blocks and update log header.
2623 */
2624void
2625zil_sync(zilog_t *zilog, dmu_tx_t *tx)
2626{
2627	zil_header_t *zh = zil_header_in_syncing_context(zilog);
2628	uint64_t txg = dmu_tx_get_txg(tx);
2629	spa_t *spa = zilog->zl_spa;
2630	uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
2631	lwb_t *lwb;
2632
2633	/*
2634	 * We don't zero out zl_destroy_txg, so make sure we don't try
2635	 * to destroy it twice.
2636	 */
2637	if (spa_sync_pass(spa) != 1)
2638		return;
2639
2640	mutex_enter(&zilog->zl_lock);
2641
2642	ASSERT(zilog->zl_stop_sync == 0);
2643
2644	if (*replayed_seq != 0) {
2645		ASSERT(zh->zh_replay_seq < *replayed_seq);
2646		zh->zh_replay_seq = *replayed_seq;
2647		*replayed_seq = 0;
2648	}
2649
2650	if (zilog->zl_destroy_txg == txg) {
2651		blkptr_t blk = zh->zh_log;
2652
2653		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
2654
2655		bzero(zh, sizeof (zil_header_t));
2656		bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
2657
2658		if (zilog->zl_keep_first) {
2659			/*
2660			 * If this block was part of log chain that couldn't
2661			 * be claimed because a device was missing during
2662			 * zil_claim(), but that device later returns,
2663			 * then this block could erroneously appear valid.
2664			 * To guard against this, assign a new GUID to the new
2665			 * log chain so it doesn't matter what blk points to.
2666			 */
2667			zil_init_log_chain(zilog, &blk);
2668			zh->zh_log = blk;
2669		}
2670	}
2671
2672	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
2673		zh->zh_log = lwb->lwb_blk;
2674		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
2675			break;
2676		list_remove(&zilog->zl_lwb_list, lwb);
2677		zio_free(spa, txg, &lwb->lwb_blk);
2678		zil_free_lwb(zilog, lwb);
2679
2680		/*
2681		 * If we don't have anything left in the lwb list then
2682		 * we've had an allocation failure and we need to zero
2683		 * out the zil_header blkptr so that we don't end
2684		 * up freeing the same block twice.
2685		 */
2686		if (list_head(&zilog->zl_lwb_list) == NULL)
2687			BP_ZERO(&zh->zh_log);
2688	}
2689	mutex_exit(&zilog->zl_lock);
2690}
2691
2692/* ARGSUSED */
2693static int
2694zil_lwb_cons(void *vbuf, void *unused, int kmflag)
2695{
2696	lwb_t *lwb = vbuf;
2697	list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
2698	    offsetof(zil_commit_waiter_t, zcw_node));
2699	avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
2700	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
2701	mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
2702	return (0);
2703}
2704
2705/* ARGSUSED */
2706static void
2707zil_lwb_dest(void *vbuf, void *unused)
2708{
2709	lwb_t *lwb = vbuf;
2710	mutex_destroy(&lwb->lwb_vdev_lock);
2711	avl_destroy(&lwb->lwb_vdev_tree);
2712	list_destroy(&lwb->lwb_waiters);
2713}
2714
2715void
2716zil_init(void)
2717{
2718	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
2719	    sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
2720
2721	zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
2722	    sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
2723}
2724
2725void
2726zil_fini(void)
2727{
2728	kmem_cache_destroy(zil_zcw_cache);
2729	kmem_cache_destroy(zil_lwb_cache);
2730}
2731
2732void
2733zil_set_sync(zilog_t *zilog, uint64_t sync)
2734{
2735	zilog->zl_sync = sync;
2736}
2737
2738void
2739zil_set_logbias(zilog_t *zilog, uint64_t logbias)
2740{
2741	zilog->zl_logbias = logbias;
2742}
2743
2744zilog_t *
2745zil_alloc(objset_t *os, zil_header_t *zh_phys)
2746{
2747	zilog_t *zilog;
2748
2749	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
2750
2751	zilog->zl_header = zh_phys;
2752	zilog->zl_os = os;
2753	zilog->zl_spa = dmu_objset_spa(os);
2754	zilog->zl_dmu_pool = dmu_objset_pool(os);
2755	zilog->zl_destroy_txg = TXG_INITIAL - 1;
2756	zilog->zl_logbias = dmu_objset_logbias(os);
2757	zilog->zl_sync = dmu_objset_syncprop(os);
2758	zilog->zl_dirty_max_txg = 0;
2759	zilog->zl_last_lwb_opened = NULL;
2760	zilog->zl_last_lwb_latency = 0;
2761
2762	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
2763	mutex_init(&zilog->zl_writer_lock, NULL, MUTEX_DEFAULT, NULL);
2764
2765	for (int i = 0; i < TXG_SIZE; i++) {
2766		mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
2767		    MUTEX_DEFAULT, NULL);
2768	}
2769
2770	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
2771	    offsetof(lwb_t, lwb_node));
2772
2773	list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
2774	    offsetof(itx_t, itx_node));
2775
2776	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
2777
2778	return (zilog);
2779}
2780
2781void
2782zil_free(zilog_t *zilog)
2783{
2784	zilog->zl_stop_sync = 1;
2785
2786	ASSERT0(zilog->zl_suspend);
2787	ASSERT0(zilog->zl_suspending);
2788
2789	ASSERT(list_is_empty(&zilog->zl_lwb_list));
2790	list_destroy(&zilog->zl_lwb_list);
2791
2792	ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
2793	list_destroy(&zilog->zl_itx_commit_list);
2794
2795	for (int i = 0; i < TXG_SIZE; i++) {
2796		/*
2797		 * It's possible for an itx to be generated that doesn't dirty
2798		 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
2799		 * callback to remove the entry. We remove those here.
2800		 *
2801		 * Also free up the ziltest itxs.
2802		 */
2803		if (zilog->zl_itxg[i].itxg_itxs)
2804			zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
2805		mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
2806	}
2807
2808	mutex_destroy(&zilog->zl_writer_lock);
2809	mutex_destroy(&zilog->zl_lock);
2810
2811	cv_destroy(&zilog->zl_cv_suspend);
2812
2813	kmem_free(zilog, sizeof (zilog_t));
2814}
2815
2816/*
2817 * Open an intent log.
2818 */
2819zilog_t *
2820zil_open(objset_t *os, zil_get_data_t *get_data)
2821{
2822	zilog_t *zilog = dmu_objset_zil(os);
2823
2824	ASSERT3P(zilog->zl_get_data, ==, NULL);
2825	ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
2826	ASSERT(list_is_empty(&zilog->zl_lwb_list));
2827
2828	zilog->zl_get_data = get_data;
2829
2830	return (zilog);
2831}
2832
2833/*
2834 * Close an intent log.
2835 */
2836void
2837zil_close(zilog_t *zilog)
2838{
2839	lwb_t *lwb;
2840	uint64_t txg;
2841
2842	if (!dmu_objset_is_snapshot(zilog->zl_os)) {
2843		zil_commit(zilog, 0);
2844	} else {
2845		ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
2846		ASSERT0(zilog->zl_dirty_max_txg);
2847		ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
2848	}
2849
2850	mutex_enter(&zilog->zl_lock);
2851	lwb = list_tail(&zilog->zl_lwb_list);
2852	if (lwb == NULL)
2853		txg = zilog->zl_dirty_max_txg;
2854	else
2855		txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg);
2856	mutex_exit(&zilog->zl_lock);
2857
2858	/*
2859	 * We need to use txg_wait_synced() to wait long enough for the
2860	 * ZIL to be clean, and to wait for all pending lwbs to be
2861	 * written out.
2862	 */
2863	if (txg != 0)
2864		txg_wait_synced(zilog->zl_dmu_pool, txg);
2865
2866	if (zilog_is_dirty(zilog))
2867		zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg);
2868	VERIFY(!zilog_is_dirty(zilog));
2869
2870	zilog->zl_get_data = NULL;
2871
2872	/*
2873	 * We should have only one lwb left on the list; remove it now.
2874	 */
2875	mutex_enter(&zilog->zl_lock);
2876	lwb = list_head(&zilog->zl_lwb_list);
2877	if (lwb != NULL) {
2878		ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list));
2879		ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
2880		list_remove(&zilog->zl_lwb_list, lwb);
2881		zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
2882		zil_free_lwb(zilog, lwb);
2883	}
2884	mutex_exit(&zilog->zl_lock);
2885}
2886
2887static char *suspend_tag = "zil suspending";
2888
2889/*
2890 * Suspend an intent log.  While in suspended mode, we still honor
2891 * synchronous semantics, but we rely on txg_wait_synced() to do it.
2892 * On old version pools, we suspend the log briefly when taking a
2893 * snapshot so that it will have an empty intent log.
2894 *
2895 * Long holds are not really intended to be used the way we do here --
2896 * held for such a short time.  A concurrent caller of dsl_dataset_long_held()
2897 * could fail.  Therefore we take pains to only put a long hold if it is
2898 * actually necessary.  Fortunately, it will only be necessary if the
2899 * objset is currently mounted (or the ZVOL equivalent).  In that case it
2900 * will already have a long hold, so we are not really making things any worse.
2901 *
2902 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
2903 * zvol_state_t), and use their mechanism to prevent their hold from being
2904 * dropped (e.g. VFS_HOLD()).  However, that would be even more pain for
2905 * very little gain.
2906 *
2907 * if cookiep == NULL, this does both the suspend & resume.
2908 * Otherwise, it returns with the dataset "long held", and the cookie
2909 * should be passed into zil_resume().
2910 */
2911int
2912zil_suspend(const char *osname, void **cookiep)
2913{
2914	objset_t *os;
2915	zilog_t *zilog;
2916	const zil_header_t *zh;
2917	int error;
2918
2919	error = dmu_objset_hold(osname, suspend_tag, &os);
2920	if (error != 0)
2921		return (error);
2922	zilog = dmu_objset_zil(os);
2923
2924	mutex_enter(&zilog->zl_lock);
2925	zh = zilog->zl_header;
2926
2927	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
2928		mutex_exit(&zilog->zl_lock);
2929		dmu_objset_rele(os, suspend_tag);
2930		return (SET_ERROR(EBUSY));
2931	}
2932
2933	/*
2934	 * Don't put a long hold in the cases where we can avoid it.  This
2935	 * is when there is no cookie so we are doing a suspend & resume
2936	 * (i.e. called from zil_vdev_offline()), and there's nothing to do
2937	 * for the suspend because it's already suspended, or there's no ZIL.
2938	 */
2939	if (cookiep == NULL && !zilog->zl_suspending &&
2940	    (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
2941		mutex_exit(&zilog->zl_lock);
2942		dmu_objset_rele(os, suspend_tag);
2943		return (0);
2944	}
2945
2946	dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
2947	dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
2948
2949	zilog->zl_suspend++;
2950
2951	if (zilog->zl_suspend > 1) {
2952		/*
2953		 * Someone else is already suspending it.
2954		 * Just wait for them to finish.
2955		 */
2956
2957		while (zilog->zl_suspending)
2958			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
2959		mutex_exit(&zilog->zl_lock);
2960
2961		if (cookiep == NULL)
2962			zil_resume(os);
2963		else
2964			*cookiep = os;
2965		return (0);
2966	}
2967
2968	/*
2969	 * If there is no pointer to an on-disk block, this ZIL must not
2970	 * be active (e.g. filesystem not mounted), so there's nothing
2971	 * to clean up.
2972	 */
2973	if (BP_IS_HOLE(&zh->zh_log)) {
2974		ASSERT(cookiep != NULL); /* fast path already handled */
2975
2976		*cookiep = os;
2977		mutex_exit(&zilog->zl_lock);
2978		return (0);
2979	}
2980
2981	zilog->zl_suspending = B_TRUE;
2982	mutex_exit(&zilog->zl_lock);
2983
2984	zil_commit(zilog, 0);
2985
2986	zil_destroy(zilog, B_FALSE);
2987
2988	mutex_enter(&zilog->zl_lock);
2989	zilog->zl_suspending = B_FALSE;
2990	cv_broadcast(&zilog->zl_cv_suspend);
2991	mutex_exit(&zilog->zl_lock);
2992
2993	if (cookiep == NULL)
2994		zil_resume(os);
2995	else
2996		*cookiep = os;
2997	return (0);
2998}
2999
3000void
3001zil_resume(void *cookie)
3002{
3003	objset_t *os = cookie;
3004	zilog_t *zilog = dmu_objset_zil(os);
3005
3006	mutex_enter(&zilog->zl_lock);
3007	ASSERT(zilog->zl_suspend != 0);
3008	zilog->zl_suspend--;
3009	mutex_exit(&zilog->zl_lock);
3010	dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
3011	dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
3012}
3013
3014typedef struct zil_replay_arg {
3015	zil_replay_func_t **zr_replay;
3016	void		*zr_arg;
3017	boolean_t	zr_byteswap;
3018	char		*zr_lr;
3019} zil_replay_arg_t;
3020
3021static int
3022zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
3023{
3024	char name[ZFS_MAX_DATASET_NAME_LEN];
3025
3026	zilog->zl_replaying_seq--;	/* didn't actually replay this one */
3027
3028	dmu_objset_name(zilog->zl_os, name);
3029
3030	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
3031	    "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
3032	    (u_longlong_t)lr->lrc_seq,
3033	    (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
3034	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
3035
3036	return (error);
3037}
3038
3039static int
3040zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
3041{
3042	zil_replay_arg_t *zr = zra;
3043	const zil_header_t *zh = zilog->zl_header;
3044	uint64_t reclen = lr->lrc_reclen;
3045	uint64_t txtype = lr->lrc_txtype;
3046	int error = 0;
3047
3048	zilog->zl_replaying_seq = lr->lrc_seq;
3049
3050	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
3051		return (0);
3052
3053	if (lr->lrc_txg < claim_txg)		/* already committed */
3054		return (0);
3055
3056	/* Strip case-insensitive bit, still present in log record */
3057	txtype &= ~TX_CI;
3058
3059	if (txtype == 0 || txtype >= TX_MAX_TYPE)
3060		return (zil_replay_error(zilog, lr, EINVAL));
3061
3062	/*
3063	 * If this record type can be logged out of order, the object
3064	 * (lr_foid) may no longer exist.  That's legitimate, not an error.
3065	 */
3066	if (TX_OOO(txtype)) {
3067		error = dmu_object_info(zilog->zl_os,
3068		    ((lr_ooo_t *)lr)->lr_foid, NULL);
3069		if (error == ENOENT || error == EEXIST)
3070			return (0);
3071	}
3072
3073	/*
3074	 * Make a copy of the data so we can revise and extend it.
3075	 */
3076	bcopy(lr, zr->zr_lr, reclen);
3077
3078	/*
3079	 * If this is a TX_WRITE with a blkptr, suck in the data.
3080	 */
3081	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
3082		error = zil_read_log_data(zilog, (lr_write_t *)lr,
3083		    zr->zr_lr + reclen);
3084		if (error != 0)
3085			return (zil_replay_error(zilog, lr, error));
3086	}
3087
3088	/*
3089	 * The log block containing this lr may have been byteswapped
3090	 * so that we can easily examine common fields like lrc_txtype.
3091	 * However, the log is a mix of different record types, and only the
3092	 * replay vectors know how to byteswap their records.  Therefore, if
3093	 * the lr was byteswapped, undo it before invoking the replay vector.
3094	 */
3095	if (zr->zr_byteswap)
3096		byteswap_uint64_array(zr->zr_lr, reclen);
3097
3098	/*
3099	 * We must now do two things atomically: replay this log record,
3100	 * and update the log header sequence number to reflect the fact that
3101	 * we did so. At the end of each replay function the sequence number
3102	 * is updated if we are in replay mode.
3103	 */
3104	error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
3105	if (error != 0) {
3106		/*
3107		 * The DMU's dnode layer doesn't see removes until the txg
3108		 * commits, so a subsequent claim can spuriously fail with
3109		 * EEXIST. So if we receive any error we try syncing out
3110		 * any removes then retry the transaction.  Note that we
3111		 * specify B_FALSE for byteswap now, so we don't do it twice.
3112		 */
3113		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
3114		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
3115		if (error != 0)
3116			return (zil_replay_error(zilog, lr, error));
3117	}
3118	return (0);
3119}
3120
3121/* ARGSUSED */
3122static int
3123zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
3124{
3125	zilog->zl_replay_blks++;
3126
3127	return (0);
3128}
3129
3130/*
3131 * If this dataset has a non-empty intent log, replay it and destroy it.
3132 */
3133void
3134zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
3135{
3136	zilog_t *zilog = dmu_objset_zil(os);
3137	const zil_header_t *zh = zilog->zl_header;
3138	zil_replay_arg_t zr;
3139
3140	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
3141		zil_destroy(zilog, B_TRUE);
3142		return;
3143	}
3144
3145	zr.zr_replay = replay_func;
3146	zr.zr_arg = arg;
3147	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
3148	zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
3149
3150	/*
3151	 * Wait for in-progress removes to sync before starting replay.
3152	 */
3153	txg_wait_synced(zilog->zl_dmu_pool, 0);
3154
3155	zilog->zl_replay = B_TRUE;
3156	zilog->zl_replay_time = ddi_get_lbolt();
3157	ASSERT(zilog->zl_replay_blks == 0);
3158	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
3159	    zh->zh_claim_txg);
3160	kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
3161
3162	zil_destroy(zilog, B_FALSE);
3163	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
3164	zilog->zl_replay = B_FALSE;
3165}
3166
3167boolean_t
3168zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
3169{
3170	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
3171		return (B_TRUE);
3172
3173	if (zilog->zl_replay) {
3174		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
3175		zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
3176		    zilog->zl_replaying_seq;
3177		return (B_TRUE);
3178	}
3179
3180	return (B_FALSE);
3181}
3182
3183/* ARGSUSED */
3184int
3185zil_vdev_offline(const char *osname, void *arg)
3186{
3187	int error;
3188
3189	error = zil_suspend(osname, NULL);
3190	if (error != 0)
3191		return (SET_ERROR(EEXIST));
3192	return (0);
3193}
3194