xref: /illumos-gate/usr/src/uts/common/fs/zfs/zil.c (revision c9ba2a43cb76c223d115e021fdabd2c066e020ed)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5fe9cf88cSperrin  * Common Development and Distribution License (the "License").
6fe9cf88cSperrin  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
2255da60b9SMark J Musante  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23*c9ba2a43SEric Schrock  * Copyright (c) 2011 by Delphix. All rights reserved.
24fa9e4066Sahrens  */
25fa9e4066Sahrens 
2655da60b9SMark J Musante /* Portions Copyright 2010 Robert Milkowski */
2755da60b9SMark J Musante 
28fa9e4066Sahrens #include <sys/zfs_context.h>
29fa9e4066Sahrens #include <sys/spa.h>
30fa9e4066Sahrens #include <sys/dmu.h>
31fa9e4066Sahrens #include <sys/zap.h>
32fa9e4066Sahrens #include <sys/arc.h>
33fa9e4066Sahrens #include <sys/stat.h>
34fa9e4066Sahrens #include <sys/resource.h>
35fa9e4066Sahrens #include <sys/zil.h>
36fa9e4066Sahrens #include <sys/zil_impl.h>
37fa9e4066Sahrens #include <sys/dsl_dataset.h>
384b964adaSGeorge Wilson #include <sys/vdev_impl.h>
39d63d470bSgw #include <sys/dmu_tx.h>
403f9d6ad7SLin Ling #include <sys/dsl_pool.h>
41fa9e4066Sahrens 
42fa9e4066Sahrens /*
43fa9e4066Sahrens  * The zfs intent log (ZIL) saves transaction records of system calls
44fa9e4066Sahrens  * that change the file system in memory with enough information
45fa9e4066Sahrens  * to be able to replay them. These are stored in memory until
46fa9e4066Sahrens  * either the DMU transaction group (txg) commits them to the stable pool
47fa9e4066Sahrens  * and they can be discarded, or they are flushed to the stable log
48fa9e4066Sahrens  * (also in the pool) due to a fsync, O_DSYNC or other synchronous
49fa9e4066Sahrens  * requirement. In the event of a panic or power fail then those log
50fa9e4066Sahrens  * records (transactions) are replayed.
51fa9e4066Sahrens  *
52fa9e4066Sahrens  * There is one ZIL per file system. Its on-disk (pool) format consists
53fa9e4066Sahrens  * of 3 parts:
54fa9e4066Sahrens  *
55fa9e4066Sahrens  * 	- ZIL header
56fa9e4066Sahrens  * 	- ZIL blocks
57fa9e4066Sahrens  * 	- ZIL records
58fa9e4066Sahrens  *
59fa9e4066Sahrens  * A log record holds a system call transaction. Log blocks can
60fa9e4066Sahrens  * hold many log records and the blocks are chained together.
61fa9e4066Sahrens  * Each ZIL block contains a block pointer (blkptr_t) to the next
62fa9e4066Sahrens  * ZIL block in the chain. The ZIL header points to the first
63fa9e4066Sahrens  * block in the chain. Note there is not a fixed place in the pool
64fa9e4066Sahrens  * to hold blocks. They are dynamically allocated and freed as
65fa9e4066Sahrens  * needed from the blocks available. Figure X shows the ZIL structure:
66fa9e4066Sahrens  */
67fa9e4066Sahrens 
68fa9e4066Sahrens /*
69416e0cd8Sek  * This global ZIL switch affects all pools
70fa9e4066Sahrens  */
7155da60b9SMark J Musante int zil_replay_disable = 0;    /* disable intent logging replay */
72416e0cd8Sek 
73416e0cd8Sek /*
74416e0cd8Sek  * Tunable parameter for debugging or performance analysis.  Setting
75416e0cd8Sek  * zfs_nocacheflush will cause corruption on power loss if a volatile
76416e0cd8Sek  * out-of-order write cache is enabled.
77416e0cd8Sek  */
78416e0cd8Sek boolean_t zfs_nocacheflush = B_FALSE;
79fa9e4066Sahrens 
80fa9e4066Sahrens static kmem_cache_t *zil_lwb_cache;
81fa9e4066Sahrens 
8291de656bSNeil Perrin static void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
838f18d1faSGeorge Wilson 
846e1f5caaSNeil Perrin #define	LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
856e1f5caaSNeil Perrin     sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
866e1f5caaSNeil Perrin 
876e1f5caaSNeil Perrin 
885002558fSNeil Perrin /*
895002558fSNeil Perrin  * ziltest is by and large an ugly hack, but very useful in
905002558fSNeil Perrin  * checking replay without tedious work.
915002558fSNeil Perrin  * When running ziltest we want to keep all itx's and so maintain
925002558fSNeil Perrin  * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
935002558fSNeil Perrin  * We subtract TXG_CONCURRENT_STATES to allow for common code.
945002558fSNeil Perrin  */
955002558fSNeil Perrin #define	ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
965002558fSNeil Perrin 
97fa9e4066Sahrens static int
98b24ab676SJeff Bonwick zil_bp_compare(const void *x1, const void *x2)
99fa9e4066Sahrens {
100b24ab676SJeff Bonwick 	const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
101b24ab676SJeff Bonwick 	const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
102fa9e4066Sahrens 
103fa9e4066Sahrens 	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
104fa9e4066Sahrens 		return (-1);
105fa9e4066Sahrens 	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
106fa9e4066Sahrens 		return (1);
107fa9e4066Sahrens 
108fa9e4066Sahrens 	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
109fa9e4066Sahrens 		return (-1);
110fa9e4066Sahrens 	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
111fa9e4066Sahrens 		return (1);
112fa9e4066Sahrens 
113fa9e4066Sahrens 	return (0);
114fa9e4066Sahrens }
115fa9e4066Sahrens 
116fa9e4066Sahrens static void
117b24ab676SJeff Bonwick zil_bp_tree_init(zilog_t *zilog)
118fa9e4066Sahrens {
119b24ab676SJeff Bonwick 	avl_create(&zilog->zl_bp_tree, zil_bp_compare,
120b24ab676SJeff Bonwick 	    sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
121fa9e4066Sahrens }
122fa9e4066Sahrens 
123fa9e4066Sahrens static void
124b24ab676SJeff Bonwick zil_bp_tree_fini(zilog_t *zilog)
125fa9e4066Sahrens {
126b24ab676SJeff Bonwick 	avl_tree_t *t = &zilog->zl_bp_tree;
127b24ab676SJeff Bonwick 	zil_bp_node_t *zn;
128fa9e4066Sahrens 	void *cookie = NULL;
129fa9e4066Sahrens 
130fa9e4066Sahrens 	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
131b24ab676SJeff Bonwick 		kmem_free(zn, sizeof (zil_bp_node_t));
132fa9e4066Sahrens 
133fa9e4066Sahrens 	avl_destroy(t);
134fa9e4066Sahrens }
135fa9e4066Sahrens 
136b24ab676SJeff Bonwick int
137b24ab676SJeff Bonwick zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
138fa9e4066Sahrens {
139b24ab676SJeff Bonwick 	avl_tree_t *t = &zilog->zl_bp_tree;
140b24ab676SJeff Bonwick 	const dva_t *dva = BP_IDENTITY(bp);
141b24ab676SJeff Bonwick 	zil_bp_node_t *zn;
142fa9e4066Sahrens 	avl_index_t where;
143fa9e4066Sahrens 
144fa9e4066Sahrens 	if (avl_find(t, dva, &where) != NULL)
145fa9e4066Sahrens 		return (EEXIST);
146fa9e4066Sahrens 
147b24ab676SJeff Bonwick 	zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
148fa9e4066Sahrens 	zn->zn_dva = *dva;
149fa9e4066Sahrens 	avl_insert(t, zn, where);
150fa9e4066Sahrens 
151fa9e4066Sahrens 	return (0);
152fa9e4066Sahrens }
153fa9e4066Sahrens 
154d80c45e0Sbonwick static zil_header_t *
155d80c45e0Sbonwick zil_header_in_syncing_context(zilog_t *zilog)
156d80c45e0Sbonwick {
157d80c45e0Sbonwick 	return ((zil_header_t *)zilog->zl_header);
158d80c45e0Sbonwick }
159d80c45e0Sbonwick 
160d80c45e0Sbonwick static void
161d80c45e0Sbonwick zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
162d80c45e0Sbonwick {
163d80c45e0Sbonwick 	zio_cksum_t *zc = &bp->blk_cksum;
164d80c45e0Sbonwick 
165d80c45e0Sbonwick 	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
166d80c45e0Sbonwick 	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
167d80c45e0Sbonwick 	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
168d80c45e0Sbonwick 	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
169d80c45e0Sbonwick }
170d80c45e0Sbonwick 
171fa9e4066Sahrens /*
172b24ab676SJeff Bonwick  * Read a log block and make sure it's valid.
173fa9e4066Sahrens  */
174fa9e4066Sahrens static int
1756e1f5caaSNeil Perrin zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
1766e1f5caaSNeil Perrin     char **end)
177fa9e4066Sahrens {
178b24ab676SJeff Bonwick 	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
17913506d1eSmaybee 	uint32_t aflags = ARC_WAIT;
180b24ab676SJeff Bonwick 	arc_buf_t *abuf = NULL;
181b24ab676SJeff Bonwick 	zbookmark_t zb;
182fa9e4066Sahrens 	int error;
183fa9e4066Sahrens 
184b24ab676SJeff Bonwick 	if (zilog->zl_header->zh_claim_txg == 0)
185b24ab676SJeff Bonwick 		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
186ea8dc4b6Seschrock 
187b24ab676SJeff Bonwick 	if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
188b24ab676SJeff Bonwick 		zio_flags |= ZIO_FLAG_SPECULATIVE;
189fa9e4066Sahrens 
190b24ab676SJeff Bonwick 	SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
191b24ab676SJeff Bonwick 	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
192b24ab676SJeff Bonwick 
1933f9d6ad7SLin Ling 	error = dsl_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
194b24ab676SJeff Bonwick 	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
195fa9e4066Sahrens 
196d80c45e0Sbonwick 	if (error == 0) {
197d80c45e0Sbonwick 		zio_cksum_t cksum = bp->blk_cksum;
198fa9e4066Sahrens 
199d80c45e0Sbonwick 		/*
200f5e6e722SNeil Perrin 		 * Validate the checksummed log block.
201f5e6e722SNeil Perrin 		 *
202d80c45e0Sbonwick 		 * Sequence numbers should be... sequential.  The checksum
203d80c45e0Sbonwick 		 * verifier for the next block should be bp's checksum plus 1.
204f5e6e722SNeil Perrin 		 *
205f5e6e722SNeil Perrin 		 * Also check the log chain linkage and size used.
206d80c45e0Sbonwick 		 */
207d80c45e0Sbonwick 		cksum.zc_word[ZIL_ZC_SEQ]++;
208d80c45e0Sbonwick 
2096e1f5caaSNeil Perrin 		if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
2106e1f5caaSNeil Perrin 			zil_chain_t *zilc = abuf->b_data;
2116e1f5caaSNeil Perrin 			char *lr = (char *)(zilc + 1);
2126e1f5caaSNeil Perrin 			uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
2136e1f5caaSNeil Perrin 
2146e1f5caaSNeil Perrin 			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
2156e1f5caaSNeil Perrin 			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
2166e1f5caaSNeil Perrin 				error = ECKSUM;
2176e1f5caaSNeil Perrin 			} else {
2186e1f5caaSNeil Perrin 				bcopy(lr, dst, len);
2196e1f5caaSNeil Perrin 				*end = (char *)dst + len;
2206e1f5caaSNeil Perrin 				*nbp = zilc->zc_next_blk;
2216e1f5caaSNeil Perrin 			}
2226e1f5caaSNeil Perrin 		} else {
2236e1f5caaSNeil Perrin 			char *lr = abuf->b_data;
2246e1f5caaSNeil Perrin 			uint64_t size = BP_GET_LSIZE(bp);
2256e1f5caaSNeil Perrin 			zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
2266e1f5caaSNeil Perrin 
2276e1f5caaSNeil Perrin 			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
2286e1f5caaSNeil Perrin 			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
2296e1f5caaSNeil Perrin 			    (zilc->zc_nused > (size - sizeof (*zilc)))) {
2306e1f5caaSNeil Perrin 				error = ECKSUM;
2316e1f5caaSNeil Perrin 			} else {
2326e1f5caaSNeil Perrin 				bcopy(lr, dst, zilc->zc_nused);
2336e1f5caaSNeil Perrin 				*end = (char *)dst + zilc->zc_nused;
2346e1f5caaSNeil Perrin 				*nbp = zilc->zc_next_blk;
2356e1f5caaSNeil Perrin 			}
2366e1f5caaSNeil Perrin 		}
237fa9e4066Sahrens 
238b24ab676SJeff Bonwick 		VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
239fa9e4066Sahrens 	}
240fa9e4066Sahrens 
241b24ab676SJeff Bonwick 	return (error);
242b24ab676SJeff Bonwick }
243b24ab676SJeff Bonwick 
244b24ab676SJeff Bonwick /*
245b24ab676SJeff Bonwick  * Read a TX_WRITE log data block.
246b24ab676SJeff Bonwick  */
247b24ab676SJeff Bonwick static int
248b24ab676SJeff Bonwick zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
249b24ab676SJeff Bonwick {
250b24ab676SJeff Bonwick 	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
251b24ab676SJeff Bonwick 	const blkptr_t *bp = &lr->lr_blkptr;
252b24ab676SJeff Bonwick 	uint32_t aflags = ARC_WAIT;
253b24ab676SJeff Bonwick 	arc_buf_t *abuf = NULL;
254b24ab676SJeff Bonwick 	zbookmark_t zb;
255b24ab676SJeff Bonwick 	int error;
256b24ab676SJeff Bonwick 
257b24ab676SJeff Bonwick 	if (BP_IS_HOLE(bp)) {
258b24ab676SJeff Bonwick 		if (wbuf != NULL)
259b24ab676SJeff Bonwick 			bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
260b24ab676SJeff Bonwick 		return (0);
261b24ab676SJeff Bonwick 	}
262b24ab676SJeff Bonwick 
263b24ab676SJeff Bonwick 	if (zilog->zl_header->zh_claim_txg == 0)
264b24ab676SJeff Bonwick 		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
265b24ab676SJeff Bonwick 
266b24ab676SJeff Bonwick 	SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
267b24ab676SJeff Bonwick 	    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
268b24ab676SJeff Bonwick 
269b24ab676SJeff Bonwick 	error = arc_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
270b24ab676SJeff Bonwick 	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
271b24ab676SJeff Bonwick 
272b24ab676SJeff Bonwick 	if (error == 0) {
273b24ab676SJeff Bonwick 		if (wbuf != NULL)
274b24ab676SJeff Bonwick 			bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
275b24ab676SJeff Bonwick 		(void) arc_buf_remove_ref(abuf, &abuf);
276b24ab676SJeff Bonwick 	}
277fa9e4066Sahrens 
278d80c45e0Sbonwick 	return (error);
279fa9e4066Sahrens }
280fa9e4066Sahrens 
281fa9e4066Sahrens /*
282fa9e4066Sahrens  * Parse the intent log, and call parse_func for each valid record within.
283fa9e4066Sahrens  */
284b24ab676SJeff Bonwick int
285fa9e4066Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
286fa9e4066Sahrens     zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
287fa9e4066Sahrens {
288d80c45e0Sbonwick 	const zil_header_t *zh = zilog->zl_header;
289b24ab676SJeff Bonwick 	boolean_t claimed = !!zh->zh_claim_txg;
290b24ab676SJeff Bonwick 	uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
291b24ab676SJeff Bonwick 	uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
292b24ab676SJeff Bonwick 	uint64_t max_blk_seq = 0;
293b24ab676SJeff Bonwick 	uint64_t max_lr_seq = 0;
294b24ab676SJeff Bonwick 	uint64_t blk_count = 0;
295b24ab676SJeff Bonwick 	uint64_t lr_count = 0;
296b24ab676SJeff Bonwick 	blkptr_t blk, next_blk;
297fa9e4066Sahrens 	char *lrbuf, *lrp;
298b24ab676SJeff Bonwick 	int error = 0;
299fa9e4066Sahrens 
300b24ab676SJeff Bonwick 	/*
301b24ab676SJeff Bonwick 	 * Old logs didn't record the maximum zh_claim_lr_seq.
302b24ab676SJeff Bonwick 	 */
303b24ab676SJeff Bonwick 	if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
304b24ab676SJeff Bonwick 		claim_lr_seq = UINT64_MAX;
305fa9e4066Sahrens 
306fa9e4066Sahrens 	/*
307fa9e4066Sahrens 	 * Starting at the block pointed to by zh_log we read the log chain.
308fa9e4066Sahrens 	 * For each block in the chain we strongly check that block to
309fa9e4066Sahrens 	 * ensure its validity.  We stop when an invalid block is found.
310fa9e4066Sahrens 	 * For each block pointer in the chain we call parse_blk_func().
311fa9e4066Sahrens 	 * For each record in each valid block we call parse_lr_func().
312d80c45e0Sbonwick 	 * If the log has been claimed, stop if we encounter a sequence
313d80c45e0Sbonwick 	 * number greater than the highest claimed sequence number.
314fa9e4066Sahrens 	 */
315b24ab676SJeff Bonwick 	lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE);
316b24ab676SJeff Bonwick 	zil_bp_tree_init(zilog);
317d80c45e0Sbonwick 
318b24ab676SJeff Bonwick 	for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
319b24ab676SJeff Bonwick 		uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
320b24ab676SJeff Bonwick 		int reclen;
3216e1f5caaSNeil Perrin 		char *end;
322d80c45e0Sbonwick 
323b24ab676SJeff Bonwick 		if (blk_seq > claim_blk_seq)
324b24ab676SJeff Bonwick 			break;
325b24ab676SJeff Bonwick 		if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
326b24ab676SJeff Bonwick 			break;
3276e1f5caaSNeil Perrin 		ASSERT3U(max_blk_seq, <, blk_seq);
328b24ab676SJeff Bonwick 		max_blk_seq = blk_seq;
329b24ab676SJeff Bonwick 		blk_count++;
330fa9e4066Sahrens 
331b24ab676SJeff Bonwick 		if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
332b24ab676SJeff Bonwick 			break;
333fa9e4066Sahrens 
3346e1f5caaSNeil Perrin 		error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
335fa9e4066Sahrens 		if (error)
336fa9e4066Sahrens 			break;
337fa9e4066Sahrens 
3386e1f5caaSNeil Perrin 		for (lrp = lrbuf; lrp < end; lrp += reclen) {
339fa9e4066Sahrens 			lr_t *lr = (lr_t *)lrp;
340fa9e4066Sahrens 			reclen = lr->lrc_reclen;
341fa9e4066Sahrens 			ASSERT3U(reclen, >=, sizeof (lr_t));
342b24ab676SJeff Bonwick 			if (lr->lrc_seq > claim_lr_seq)
343b24ab676SJeff Bonwick 				goto done;
344b24ab676SJeff Bonwick 			if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
345b24ab676SJeff Bonwick 				goto done;
3466e1f5caaSNeil Perrin 			ASSERT3U(max_lr_seq, <, lr->lrc_seq);
347b24ab676SJeff Bonwick 			max_lr_seq = lr->lrc_seq;
348b24ab676SJeff Bonwick 			lr_count++;
349fa9e4066Sahrens 		}
350fa9e4066Sahrens 	}
351b24ab676SJeff Bonwick done:
352b24ab676SJeff Bonwick 	zilog->zl_parse_error = error;
353b24ab676SJeff Bonwick 	zilog->zl_parse_blk_seq = max_blk_seq;
354b24ab676SJeff Bonwick 	zilog->zl_parse_lr_seq = max_lr_seq;
355b24ab676SJeff Bonwick 	zilog->zl_parse_blk_count = blk_count;
356b24ab676SJeff Bonwick 	zilog->zl_parse_lr_count = lr_count;
357b24ab676SJeff Bonwick 
358b24ab676SJeff Bonwick 	ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
359b24ab676SJeff Bonwick 	    (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
360d80c45e0Sbonwick 
361b24ab676SJeff Bonwick 	zil_bp_tree_fini(zilog);
362b24ab676SJeff Bonwick 	zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE);
363b24ab676SJeff Bonwick 
364b24ab676SJeff Bonwick 	return (error);
365fa9e4066Sahrens }
366fa9e4066Sahrens 
367b24ab676SJeff Bonwick static int
368fa9e4066Sahrens zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
369fa9e4066Sahrens {
370fa9e4066Sahrens 	/*
371fa9e4066Sahrens 	 * Claim log block if not already committed and not already claimed.
372b24ab676SJeff Bonwick 	 * If tx == NULL, just verify that the block is claimable.
373fa9e4066Sahrens 	 */
374b24ab676SJeff Bonwick 	if (bp->blk_birth < first_txg || zil_bp_tree_add(zilog, bp) != 0)
375b24ab676SJeff Bonwick 		return (0);
376b24ab676SJeff Bonwick 
377b24ab676SJeff Bonwick 	return (zio_wait(zio_claim(NULL, zilog->zl_spa,
378b24ab676SJeff Bonwick 	    tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
379b24ab676SJeff Bonwick 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
380fa9e4066Sahrens }
381fa9e4066Sahrens 
382b24ab676SJeff Bonwick static int
383fa9e4066Sahrens zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
384fa9e4066Sahrens {
385b24ab676SJeff Bonwick 	lr_write_t *lr = (lr_write_t *)lrc;
386b24ab676SJeff Bonwick 	int error;
387b24ab676SJeff Bonwick 
388b24ab676SJeff Bonwick 	if (lrc->lrc_txtype != TX_WRITE)
389b24ab676SJeff Bonwick 		return (0);
390b24ab676SJeff Bonwick 
391b24ab676SJeff Bonwick 	/*
392b24ab676SJeff Bonwick 	 * If the block is not readable, don't claim it.  This can happen
393b24ab676SJeff Bonwick 	 * in normal operation when a log block is written to disk before
394b24ab676SJeff Bonwick 	 * some of the dmu_sync() blocks it points to.  In this case, the
395b24ab676SJeff Bonwick 	 * transaction cannot have been committed to anyone (we would have
396b24ab676SJeff Bonwick 	 * waited for all writes to be stable first), so it is semantically
397b24ab676SJeff Bonwick 	 * correct to declare this the end of the log.
398b24ab676SJeff Bonwick 	 */
399b24ab676SJeff Bonwick 	if (lr->lr_blkptr.blk_birth >= first_txg &&
400b24ab676SJeff Bonwick 	    (error = zil_read_log_data(zilog, lr, NULL)) != 0)
401b24ab676SJeff Bonwick 		return (error);
402b24ab676SJeff Bonwick 	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
403fa9e4066Sahrens }
404fa9e4066Sahrens 
405fa9e4066Sahrens /* ARGSUSED */
406b24ab676SJeff Bonwick static int
407fa9e4066Sahrens zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
408fa9e4066Sahrens {
409b24ab676SJeff Bonwick 	zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
410b24ab676SJeff Bonwick 
411b24ab676SJeff Bonwick 	return (0);
412fa9e4066Sahrens }
413fa9e4066Sahrens 
414b24ab676SJeff Bonwick static int
415fa9e4066Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
416fa9e4066Sahrens {
417b24ab676SJeff Bonwick 	lr_write_t *lr = (lr_write_t *)lrc;
418b24ab676SJeff Bonwick 	blkptr_t *bp = &lr->lr_blkptr;
419b24ab676SJeff Bonwick 
420fa9e4066Sahrens 	/*
421fa9e4066Sahrens 	 * If we previously claimed it, we need to free it.
422fa9e4066Sahrens 	 */
423b24ab676SJeff Bonwick 	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
424b24ab676SJeff Bonwick 	    bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0)
425b24ab676SJeff Bonwick 		zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
426b24ab676SJeff Bonwick 
427b24ab676SJeff Bonwick 	return (0);
428fa9e4066Sahrens }
429fa9e4066Sahrens 
4306e1f5caaSNeil Perrin static lwb_t *
4316e1f5caaSNeil Perrin zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
4326e1f5caaSNeil Perrin {
4336e1f5caaSNeil Perrin 	lwb_t *lwb;
4346e1f5caaSNeil Perrin 
4356e1f5caaSNeil Perrin 	lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
4366e1f5caaSNeil Perrin 	lwb->lwb_zilog = zilog;
4376e1f5caaSNeil Perrin 	lwb->lwb_blk = *bp;
4386e1f5caaSNeil Perrin 	lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
4396e1f5caaSNeil Perrin 	lwb->lwb_max_txg = txg;
4406e1f5caaSNeil Perrin 	lwb->lwb_zio = NULL;
4416e1f5caaSNeil Perrin 	lwb->lwb_tx = NULL;
4426e1f5caaSNeil Perrin 	if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
4436e1f5caaSNeil Perrin 		lwb->lwb_nused = sizeof (zil_chain_t);
4446e1f5caaSNeil Perrin 		lwb->lwb_sz = BP_GET_LSIZE(bp);
4456e1f5caaSNeil Perrin 	} else {
4466e1f5caaSNeil Perrin 		lwb->lwb_nused = 0;
4476e1f5caaSNeil Perrin 		lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
4486e1f5caaSNeil Perrin 	}
4496e1f5caaSNeil Perrin 
4506e1f5caaSNeil Perrin 	mutex_enter(&zilog->zl_lock);
4516e1f5caaSNeil Perrin 	list_insert_tail(&zilog->zl_lwb_list, lwb);
4526e1f5caaSNeil Perrin 	mutex_exit(&zilog->zl_lock);
4536e1f5caaSNeil Perrin 
4546e1f5caaSNeil Perrin 	return (lwb);
4556e1f5caaSNeil Perrin }
4566e1f5caaSNeil Perrin 
457fa9e4066Sahrens /*
458fa9e4066Sahrens  * Create an on-disk intent log.
459fa9e4066Sahrens  */
4606e1f5caaSNeil Perrin static lwb_t *
461fa9e4066Sahrens zil_create(zilog_t *zilog)
462fa9e4066Sahrens {
463d80c45e0Sbonwick 	const zil_header_t *zh = zilog->zl_header;
4646e1f5caaSNeil Perrin 	lwb_t *lwb = NULL;
465d80c45e0Sbonwick 	uint64_t txg = 0;
466d80c45e0Sbonwick 	dmu_tx_t *tx = NULL;
467fa9e4066Sahrens 	blkptr_t blk;
468d80c45e0Sbonwick 	int error = 0;
469fa9e4066Sahrens 
470fa9e4066Sahrens 	/*
471d80c45e0Sbonwick 	 * Wait for any previous destroy to complete.
472fa9e4066Sahrens 	 */
473d80c45e0Sbonwick 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
474d80c45e0Sbonwick 
475d80c45e0Sbonwick 	ASSERT(zh->zh_claim_txg == 0);
476d80c45e0Sbonwick 	ASSERT(zh->zh_replay_seq == 0);
477d80c45e0Sbonwick 
478d80c45e0Sbonwick 	blk = zh->zh_log;
479fa9e4066Sahrens 
480fa9e4066Sahrens 	/*
4816e1f5caaSNeil Perrin 	 * Allocate an initial log block if:
4826e1f5caaSNeil Perrin 	 *    - there isn't one already
4836e1f5caaSNeil Perrin 	 *    - the existing block is the wrong endianess
484fa9e4066Sahrens 	 */
485899217ddSNeil Perrin 	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
486d80c45e0Sbonwick 		tx = dmu_tx_create(zilog->zl_os);
487b24ab676SJeff Bonwick 		VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
488d80c45e0Sbonwick 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
489d80c45e0Sbonwick 		txg = dmu_tx_get_txg(tx);
490d80c45e0Sbonwick 
491899217ddSNeil Perrin 		if (!BP_IS_HOLE(&blk)) {
492b24ab676SJeff Bonwick 			zio_free_zil(zilog->zl_spa, txg, &blk);
493899217ddSNeil Perrin 			BP_ZERO(&blk);
494899217ddSNeil Perrin 		}
495899217ddSNeil Perrin 
496b24ab676SJeff Bonwick 		error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
497b24ab676SJeff Bonwick 		    ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
498d80c45e0Sbonwick 
499d80c45e0Sbonwick 		if (error == 0)
500d80c45e0Sbonwick 			zil_init_log_chain(zilog, &blk);
50113f5297eSperrin 	}
502fa9e4066Sahrens 
503d80c45e0Sbonwick 	/*
504d80c45e0Sbonwick 	 * Allocate a log write buffer (lwb) for the first log block.
505d80c45e0Sbonwick 	 */
5066e1f5caaSNeil Perrin 	if (error == 0)
5076e1f5caaSNeil Perrin 		lwb = zil_alloc_lwb(zilog, &blk, txg);
508fa9e4066Sahrens 
509d80c45e0Sbonwick 	/*
510d80c45e0Sbonwick 	 * If we just allocated the first log block, commit our transaction
511d80c45e0Sbonwick 	 * and wait for zil_sync() to stuff the block poiner into zh_log.
512d80c45e0Sbonwick 	 * (zh is part of the MOS, so we cannot modify it in open context.)
513d80c45e0Sbonwick 	 */
514d80c45e0Sbonwick 	if (tx != NULL) {
515d80c45e0Sbonwick 		dmu_tx_commit(tx);
51613f5297eSperrin 		txg_wait_synced(zilog->zl_dmu_pool, txg);
517d80c45e0Sbonwick 	}
518d80c45e0Sbonwick 
519d80c45e0Sbonwick 	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
5206e1f5caaSNeil Perrin 
5216e1f5caaSNeil Perrin 	return (lwb);
522fa9e4066Sahrens }
523fa9e4066Sahrens 
524fa9e4066Sahrens /*
525fa9e4066Sahrens  * In one tx, free all log blocks and clear the log header.
526d80c45e0Sbonwick  * If keep_first is set, then we're replaying a log with no content.
527d80c45e0Sbonwick  * We want to keep the first block, however, so that the first
528d80c45e0Sbonwick  * synchronous transaction doesn't require a txg_wait_synced()
529d80c45e0Sbonwick  * in zil_create().  We don't need to txg_wait_synced() here either
530d80c45e0Sbonwick  * when keep_first is set, because both zil_create() and zil_destroy()
531d80c45e0Sbonwick  * will wait for any in-progress destroys to complete.
532fa9e4066Sahrens  */
533fa9e4066Sahrens void
534d80c45e0Sbonwick zil_destroy(zilog_t *zilog, boolean_t keep_first)
535fa9e4066Sahrens {
536d80c45e0Sbonwick 	const zil_header_t *zh = zilog->zl_header;
537d80c45e0Sbonwick 	lwb_t *lwb;
538fa9e4066Sahrens 	dmu_tx_t *tx;
539fa9e4066Sahrens 	uint64_t txg;
540fa9e4066Sahrens 
541d80c45e0Sbonwick 	/*
542d80c45e0Sbonwick 	 * Wait for any previous destroy to complete.
543d80c45e0Sbonwick 	 */
544d80c45e0Sbonwick 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
545fa9e4066Sahrens 
546b24ab676SJeff Bonwick 	zilog->zl_old_header = *zh;		/* debugging aid */
547b24ab676SJeff Bonwick 
548d80c45e0Sbonwick 	if (BP_IS_HOLE(&zh->zh_log))
549fa9e4066Sahrens 		return;
550fa9e4066Sahrens 
551fa9e4066Sahrens 	tx = dmu_tx_create(zilog->zl_os);
552b24ab676SJeff Bonwick 	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
553fa9e4066Sahrens 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
554fa9e4066Sahrens 	txg = dmu_tx_get_txg(tx);
555fa9e4066Sahrens 
556d80c45e0Sbonwick 	mutex_enter(&zilog->zl_lock);
557d80c45e0Sbonwick 
558d80c45e0Sbonwick 	ASSERT3U(zilog->zl_destroy_txg, <, txg);
559fa9e4066Sahrens 	zilog->zl_destroy_txg = txg;
560b24ab676SJeff Bonwick 	zilog->zl_keep_first = keep_first;
561d80c45e0Sbonwick 
562d80c45e0Sbonwick 	if (!list_is_empty(&zilog->zl_lwb_list)) {
563d80c45e0Sbonwick 		ASSERT(zh->zh_claim_txg == 0);
564*c9ba2a43SEric Schrock 		VERIFY(!keep_first);
565d80c45e0Sbonwick 		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
566d80c45e0Sbonwick 			list_remove(&zilog->zl_lwb_list, lwb);
567d80c45e0Sbonwick 			if (lwb->lwb_buf != NULL)
568d80c45e0Sbonwick 				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
569b24ab676SJeff Bonwick 			zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
570d80c45e0Sbonwick 			kmem_cache_free(zil_lwb_cache, lwb);
571d80c45e0Sbonwick 		}
572b24ab676SJeff Bonwick 	} else if (!keep_first) {
573b24ab676SJeff Bonwick 		(void) zil_parse(zilog, zil_free_log_block,
574b24ab676SJeff Bonwick 		    zil_free_log_record, tx, zh->zh_claim_txg);
575d80c45e0Sbonwick 	}
576b19a79ecSperrin 	mutex_exit(&zilog->zl_lock);
577fa9e4066Sahrens 
578fa9e4066Sahrens 	dmu_tx_commit(tx);
579fa9e4066Sahrens }
580fa9e4066Sahrens 
5811d452cf5Sahrens int
582fd136879SMatthew Ahrens zil_claim(const char *osname, void *txarg)
583fa9e4066Sahrens {
584fa9e4066Sahrens 	dmu_tx_t *tx = txarg;
585fa9e4066Sahrens 	uint64_t first_txg = dmu_tx_get_txg(tx);
586fa9e4066Sahrens 	zilog_t *zilog;
587fa9e4066Sahrens 	zil_header_t *zh;
588fa9e4066Sahrens 	objset_t *os;
589fa9e4066Sahrens 	int error;
590fa9e4066Sahrens 
591503ad85cSMatthew Ahrens 	error = dmu_objset_hold(osname, FTAG, &os);
592fa9e4066Sahrens 	if (error) {
593b87f3af3Sperrin 		cmn_err(CE_WARN, "can't open objset for %s", osname);
5941d452cf5Sahrens 		return (0);
595fa9e4066Sahrens 	}
596fa9e4066Sahrens 
597fa9e4066Sahrens 	zilog = dmu_objset_zil(os);
598d80c45e0Sbonwick 	zh = zil_header_in_syncing_context(zilog);
599fa9e4066Sahrens 
600b24ab676SJeff Bonwick 	if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
601e6ca193dSGeorge Wilson 		if (!BP_IS_HOLE(&zh->zh_log))
602b24ab676SJeff Bonwick 			zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
603e6ca193dSGeorge Wilson 		BP_ZERO(&zh->zh_log);
604e6ca193dSGeorge Wilson 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
605468c413aSTim Haley 		dmu_objset_rele(os, FTAG);
606468c413aSTim Haley 		return (0);
607e6ca193dSGeorge Wilson 	}
608e6ca193dSGeorge Wilson 
609fa9e4066Sahrens 	/*
610d80c45e0Sbonwick 	 * Claim all log blocks if we haven't already done so, and remember
611d80c45e0Sbonwick 	 * the highest claimed sequence number.  This ensures that if we can
612d80c45e0Sbonwick 	 * read only part of the log now (e.g. due to a missing device),
613d80c45e0Sbonwick 	 * but we can read the entire log later, we will not try to replay
614d80c45e0Sbonwick 	 * or destroy beyond the last block we successfully claimed.
615fa9e4066Sahrens 	 */
616fa9e4066Sahrens 	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
617fa9e4066Sahrens 	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
618b24ab676SJeff Bonwick 		(void) zil_parse(zilog, zil_claim_log_block,
619d80c45e0Sbonwick 		    zil_claim_log_record, tx, first_txg);
620b24ab676SJeff Bonwick 		zh->zh_claim_txg = first_txg;
621b24ab676SJeff Bonwick 		zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
622b24ab676SJeff Bonwick 		zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
623b24ab676SJeff Bonwick 		if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
624b24ab676SJeff Bonwick 			zh->zh_flags |= ZIL_REPLAY_NEEDED;
625b24ab676SJeff Bonwick 		zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
626fa9e4066Sahrens 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
627fa9e4066Sahrens 	}
628d80c45e0Sbonwick 
629fa9e4066Sahrens 	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
630503ad85cSMatthew Ahrens 	dmu_objset_rele(os, FTAG);
6311d452cf5Sahrens 	return (0);
632b87f3af3Sperrin }
633b87f3af3Sperrin 
634b87f3af3Sperrin /*
635b87f3af3Sperrin  * Check the log by walking the log chain.
636b87f3af3Sperrin  * Checksum errors are ok as they indicate the end of the chain.
637b87f3af3Sperrin  * Any other error (no device or read failure) returns an error.
638b87f3af3Sperrin  */
639b87f3af3Sperrin int
640fd136879SMatthew Ahrens zil_check_log_chain(const char *osname, void *tx)
641b87f3af3Sperrin {
642b87f3af3Sperrin 	zilog_t *zilog;
643b87f3af3Sperrin 	objset_t *os;
6444b964adaSGeorge Wilson 	blkptr_t *bp;
645b87f3af3Sperrin 	int error;
646b87f3af3Sperrin 
647b24ab676SJeff Bonwick 	ASSERT(tx == NULL);
648b24ab676SJeff Bonwick 
649503ad85cSMatthew Ahrens 	error = dmu_objset_hold(osname, FTAG, &os);
650b87f3af3Sperrin 	if (error) {
651b87f3af3Sperrin 		cmn_err(CE_WARN, "can't open objset for %s", osname);
652b87f3af3Sperrin 		return (0);
653b87f3af3Sperrin 	}
654b87f3af3Sperrin 
655b87f3af3Sperrin 	zilog = dmu_objset_zil(os);
6564b964adaSGeorge Wilson 	bp = (blkptr_t *)&zilog->zl_header->zh_log;
6574b964adaSGeorge Wilson 
6584b964adaSGeorge Wilson 	/*
6594b964adaSGeorge Wilson 	 * Check the first block and determine if it's on a log device
6604b964adaSGeorge Wilson 	 * which may have been removed or faulted prior to loading this
6614b964adaSGeorge Wilson 	 * pool.  If so, there's no point in checking the rest of the log
6624b964adaSGeorge Wilson 	 * as its content should have already been synced to the pool.
6634b964adaSGeorge Wilson 	 */
6644b964adaSGeorge Wilson 	if (!BP_IS_HOLE(bp)) {
6654b964adaSGeorge Wilson 		vdev_t *vd;
6664b964adaSGeorge Wilson 		boolean_t valid = B_TRUE;
6674b964adaSGeorge Wilson 
6684b964adaSGeorge Wilson 		spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
6694b964adaSGeorge Wilson 		vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
6704b964adaSGeorge Wilson 		if (vd->vdev_islog && vdev_is_dead(vd))
6714b964adaSGeorge Wilson 			valid = vdev_log_state_valid(vd);
6724b964adaSGeorge Wilson 		spa_config_exit(os->os_spa, SCL_STATE, FTAG);
6734b964adaSGeorge Wilson 
6744b964adaSGeorge Wilson 		if (!valid) {
6754b964adaSGeorge Wilson 			dmu_objset_rele(os, FTAG);
6764b964adaSGeorge Wilson 			return (0);
6774b964adaSGeorge Wilson 		}
6784b964adaSGeorge Wilson 	}
679b87f3af3Sperrin 
680b24ab676SJeff Bonwick 	/*
681b24ab676SJeff Bonwick 	 * Because tx == NULL, zil_claim_log_block() will not actually claim
682b24ab676SJeff Bonwick 	 * any blocks, but just determine whether it is possible to do so.
683b24ab676SJeff Bonwick 	 * In addition to checking the log chain, zil_claim_log_block()
684b24ab676SJeff Bonwick 	 * will invoke zio_claim() with a done func of spa_claim_notify(),
685b24ab676SJeff Bonwick 	 * which will update spa_max_claim_txg.  See spa_load() for details.
686b24ab676SJeff Bonwick 	 */
687b24ab676SJeff Bonwick 	error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
688b24ab676SJeff Bonwick 	    zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
689b24ab676SJeff Bonwick 
690503ad85cSMatthew Ahrens 	dmu_objset_rele(os, FTAG);
691b24ab676SJeff Bonwick 
692b24ab676SJeff Bonwick 	return ((error == ECKSUM || error == ENOENT) ? 0 : error);
693b87f3af3Sperrin }
694b87f3af3Sperrin 
69517f17c2dSbonwick static int
69617f17c2dSbonwick zil_vdev_compare(const void *x1, const void *x2)
69717f17c2dSbonwick {
6985002558fSNeil Perrin 	const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
6995002558fSNeil Perrin 	const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
70017f17c2dSbonwick 
70117f17c2dSbonwick 	if (v1 < v2)
70217f17c2dSbonwick 		return (-1);
70317f17c2dSbonwick 	if (v1 > v2)
70417f17c2dSbonwick 		return (1);
70517f17c2dSbonwick 
70617f17c2dSbonwick 	return (0);
70717f17c2dSbonwick }
70817f17c2dSbonwick 
709fa9e4066Sahrens void
710b24ab676SJeff Bonwick zil_add_block(zilog_t *zilog, const blkptr_t *bp)
711fa9e4066Sahrens {
71217f17c2dSbonwick 	avl_tree_t *t = &zilog->zl_vdev_tree;
71317f17c2dSbonwick 	avl_index_t where;
71417f17c2dSbonwick 	zil_vdev_node_t *zv, zvsearch;
71517f17c2dSbonwick 	int ndvas = BP_GET_NDVAS(bp);
71617f17c2dSbonwick 	int i;
717fa9e4066Sahrens 
718416e0cd8Sek 	if (zfs_nocacheflush)
719fa9e4066Sahrens 		return;
720fa9e4066Sahrens 
72117f17c2dSbonwick 	ASSERT(zilog->zl_writer);
72217f17c2dSbonwick 
72317f17c2dSbonwick 	/*
72417f17c2dSbonwick 	 * Even though we're zl_writer, we still need a lock because the
72517f17c2dSbonwick 	 * zl_get_data() callbacks may have dmu_sync() done callbacks
72617f17c2dSbonwick 	 * that will run concurrently.
72717f17c2dSbonwick 	 */
72817f17c2dSbonwick 	mutex_enter(&zilog->zl_vdev_lock);
72917f17c2dSbonwick 	for (i = 0; i < ndvas; i++) {
73017f17c2dSbonwick 		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
73117f17c2dSbonwick 		if (avl_find(t, &zvsearch, &where) == NULL) {
73217f17c2dSbonwick 			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
73317f17c2dSbonwick 			zv->zv_vdev = zvsearch.zv_vdev;
73417f17c2dSbonwick 			avl_insert(t, zv, where);
73567bd71c6Sperrin 		}
73667bd71c6Sperrin 	}
73717f17c2dSbonwick 	mutex_exit(&zilog->zl_vdev_lock);
738fa9e4066Sahrens }
739fa9e4066Sahrens 
74091de656bSNeil Perrin static void
74167bd71c6Sperrin zil_flush_vdevs(zilog_t *zilog)
74267bd71c6Sperrin {
74367bd71c6Sperrin 	spa_t *spa = zilog->zl_spa;
74417f17c2dSbonwick 	avl_tree_t *t = &zilog->zl_vdev_tree;
74517f17c2dSbonwick 	void *cookie = NULL;
74617f17c2dSbonwick 	zil_vdev_node_t *zv;
74717f17c2dSbonwick 	zio_t *zio;
748fa9e4066Sahrens 
74967bd71c6Sperrin 	ASSERT(zilog->zl_writer);
75067bd71c6Sperrin 
75117f17c2dSbonwick 	/*
75217f17c2dSbonwick 	 * We don't need zl_vdev_lock here because we're the zl_writer,
75317f17c2dSbonwick 	 * and all zl_get_data() callbacks are done.
75417f17c2dSbonwick 	 */
75517f17c2dSbonwick 	if (avl_numnodes(t) == 0)
75617f17c2dSbonwick 		return;
75717f17c2dSbonwick 
758e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
75917f17c2dSbonwick 
760e14bb325SJeff Bonwick 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
761fa9e4066Sahrens 
76217f17c2dSbonwick 	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
76317f17c2dSbonwick 		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
76417f17c2dSbonwick 		if (vd != NULL)
76517f17c2dSbonwick 			zio_flush(zio, vd);
76617f17c2dSbonwick 		kmem_free(zv, sizeof (*zv));
76767bd71c6Sperrin 	}
76817f17c2dSbonwick 
769fa9e4066Sahrens 	/*
770fa9e4066Sahrens 	 * Wait for all the flushes to complete.  Not all devices actually
771fa9e4066Sahrens 	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
772fa9e4066Sahrens 	 */
77317f17c2dSbonwick 	(void) zio_wait(zio);
77417f17c2dSbonwick 
775e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_STATE, FTAG);
776fa9e4066Sahrens }
777fa9e4066Sahrens 
778fa9e4066Sahrens /*
779fa9e4066Sahrens  * Function called when a log block write completes
780fa9e4066Sahrens  */
781fa9e4066Sahrens static void
782fa9e4066Sahrens zil_lwb_write_done(zio_t *zio)
783fa9e4066Sahrens {
784fa9e4066Sahrens 	lwb_t *lwb = zio->io_private;
785fa9e4066Sahrens 	zilog_t *zilog = lwb->lwb_zilog;
786b24ab676SJeff Bonwick 	dmu_tx_t *tx = lwb->lwb_tx;
787fa9e4066Sahrens 
788e14bb325SJeff Bonwick 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
789e14bb325SJeff Bonwick 	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
790e14bb325SJeff Bonwick 	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
791e14bb325SJeff Bonwick 	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
792e14bb325SJeff Bonwick 	ASSERT(!BP_IS_GANG(zio->io_bp));
793e14bb325SJeff Bonwick 	ASSERT(!BP_IS_HOLE(zio->io_bp));
794e14bb325SJeff Bonwick 	ASSERT(zio->io_bp->blk_fill == 0);
795e14bb325SJeff Bonwick 
796fa9e4066Sahrens 	/*
797ef0d8e11SNeil Perrin 	 * Ensure the lwb buffer pointer is cleared before releasing
798ef0d8e11SNeil Perrin 	 * the txg. If we have had an allocation failure and
799ef0d8e11SNeil Perrin 	 * the txg is waiting to sync then we want want zil_sync()
800ef0d8e11SNeil Perrin 	 * to remove the lwb so that it's not picked up as the next new
801ef0d8e11SNeil Perrin 	 * one in zil_commit_writer(). zil_sync() will only remove
802ef0d8e11SNeil Perrin 	 * the lwb if lwb_buf is null.
803fa9e4066Sahrens 	 */
804fa9e4066Sahrens 	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
805fa9e4066Sahrens 	mutex_enter(&zilog->zl_lock);
806fa9e4066Sahrens 	lwb->lwb_buf = NULL;
807b24ab676SJeff Bonwick 	lwb->lwb_tx = NULL;
808b24ab676SJeff Bonwick 	mutex_exit(&zilog->zl_lock);
809ef0d8e11SNeil Perrin 
810ef0d8e11SNeil Perrin 	/*
811ef0d8e11SNeil Perrin 	 * Now that we've written this log block, we have a stable pointer
812ef0d8e11SNeil Perrin 	 * to the next block in the chain, so it's OK to let the txg in
813b24ab676SJeff Bonwick 	 * which we allocated the next block sync.
814ef0d8e11SNeil Perrin 	 */
815b24ab676SJeff Bonwick 	dmu_tx_commit(tx);
816fa9e4066Sahrens }
817fa9e4066Sahrens 
818c5c6ffa0Smaybee /*
819c5c6ffa0Smaybee  * Initialize the io for a log block.
820c5c6ffa0Smaybee  */
821c5c6ffa0Smaybee static void
822c5c6ffa0Smaybee zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
823c5c6ffa0Smaybee {
824c5c6ffa0Smaybee 	zbookmark_t zb;
825c5c6ffa0Smaybee 
826b24ab676SJeff Bonwick 	SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
827b24ab676SJeff Bonwick 	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
828b24ab676SJeff Bonwick 	    lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
829c5c6ffa0Smaybee 
830b19a79ecSperrin 	if (zilog->zl_root_zio == NULL) {
831b19a79ecSperrin 		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
832b19a79ecSperrin 		    ZIO_FLAG_CANFAIL);
833b19a79ecSperrin 	}
83467bd71c6Sperrin 	if (lwb->lwb_zio == NULL) {
83567bd71c6Sperrin 		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
8366e1f5caaSNeil Perrin 		    0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk),
837e6ca193dSGeorge Wilson 		    zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE,
8388f18d1faSGeorge Wilson 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
83967bd71c6Sperrin 	}
840c5c6ffa0Smaybee }
841c5c6ffa0Smaybee 
8426e1f5caaSNeil Perrin /*
8436e1f5caaSNeil Perrin  * Define a limited set of intent log block sizes.
8446e1f5caaSNeil Perrin  * These must be a multiple of 4KB. Note only the amount used (again
8456e1f5caaSNeil Perrin  * aligned to 4KB) actually gets written. However, we can't always just
8466e1f5caaSNeil Perrin  * allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted.
8476e1f5caaSNeil Perrin  */
8486e1f5caaSNeil Perrin uint64_t zil_block_buckets[] = {
8496e1f5caaSNeil Perrin     4096,		/* non TX_WRITE */
8506e1f5caaSNeil Perrin     8192+4096,		/* data base */
8516e1f5caaSNeil Perrin     32*1024 + 4096, 	/* NFS writes */
8526e1f5caaSNeil Perrin     UINT64_MAX
8536e1f5caaSNeil Perrin };
8546e1f5caaSNeil Perrin 
855d48e086fSNeil Perrin /*
856d48e086fSNeil Perrin  * Use the slog as long as the logbias is 'latency' and the current commit size
857d48e086fSNeil Perrin  * is less than the limit or the total list size is less than 2X the limit.
858d48e086fSNeil Perrin  * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
859d48e086fSNeil Perrin  */
860d48e086fSNeil Perrin uint64_t zil_slog_limit = 1024 * 1024;
861d48e086fSNeil Perrin #define	USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
862d48e086fSNeil Perrin 	(((zilog)->zl_cur_used < zil_slog_limit) || \
863d48e086fSNeil Perrin 	((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
864d48e086fSNeil Perrin 
865fa9e4066Sahrens /*
866fa9e4066Sahrens  * Start a log block write and advance to the next log block.
867fa9e4066Sahrens  * Calls are serialized.
868fa9e4066Sahrens  */
869fa9e4066Sahrens static lwb_t *
870fa9e4066Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
871fa9e4066Sahrens {
8726e1f5caaSNeil Perrin 	lwb_t *nlwb = NULL;
8736e1f5caaSNeil Perrin 	zil_chain_t *zilc;
874d80c45e0Sbonwick 	spa_t *spa = zilog->zl_spa;
8756e1f5caaSNeil Perrin 	blkptr_t *bp;
876b24ab676SJeff Bonwick 	dmu_tx_t *tx;
877fa9e4066Sahrens 	uint64_t txg;
878ada693c4SNeil Perrin 	uint64_t zil_blksz, wsz;
8796e1f5caaSNeil Perrin 	int i, error;
8806e1f5caaSNeil Perrin 
8816e1f5caaSNeil Perrin 	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
8826e1f5caaSNeil Perrin 		zilc = (zil_chain_t *)lwb->lwb_buf;
8836e1f5caaSNeil Perrin 		bp = &zilc->zc_next_blk;
8846e1f5caaSNeil Perrin 	} else {
8856e1f5caaSNeil Perrin 		zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
8866e1f5caaSNeil Perrin 		bp = &zilc->zc_next_blk;
8876e1f5caaSNeil Perrin 	}
888fa9e4066Sahrens 
8896e1f5caaSNeil Perrin 	ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
890fa9e4066Sahrens 
891fa9e4066Sahrens 	/*
892fa9e4066Sahrens 	 * Allocate the next block and save its address in this block
893fa9e4066Sahrens 	 * before writing it in order to establish the log chain.
894fa9e4066Sahrens 	 * Note that if the allocation of nlwb synced before we wrote
895fa9e4066Sahrens 	 * the block that points at it (lwb), we'd leak it if we crashed.
896b24ab676SJeff Bonwick 	 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
897b24ab676SJeff Bonwick 	 * We dirty the dataset to ensure that zil_sync() will be called
898b24ab676SJeff Bonwick 	 * to clean up in the event of allocation failure or I/O failure.
899fa9e4066Sahrens 	 */
900b24ab676SJeff Bonwick 	tx = dmu_tx_create(zilog->zl_os);
901b24ab676SJeff Bonwick 	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
902b24ab676SJeff Bonwick 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
903b24ab676SJeff Bonwick 	txg = dmu_tx_get_txg(tx);
904b24ab676SJeff Bonwick 
905b24ab676SJeff Bonwick 	lwb->lwb_tx = tx;
906fa9e4066Sahrens 
907fa9e4066Sahrens 	/*
9086e1f5caaSNeil Perrin 	 * Log blocks are pre-allocated. Here we select the size of the next
9096e1f5caaSNeil Perrin 	 * block, based on size used in the last block.
9106e1f5caaSNeil Perrin 	 * - first find the smallest bucket that will fit the block from a
9116e1f5caaSNeil Perrin 	 *   limited set of block sizes. This is because it's faster to write
9126e1f5caaSNeil Perrin 	 *   blocks allocated from the same metaslab as they are adjacent or
9136e1f5caaSNeil Perrin 	 *   close.
9146e1f5caaSNeil Perrin 	 * - next find the maximum from the new suggested size and an array of
9156e1f5caaSNeil Perrin 	 *   previous sizes. This lessens a picket fence effect of wrongly
9166e1f5caaSNeil Perrin 	 *   guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
9176e1f5caaSNeil Perrin 	 *   requests.
9186e1f5caaSNeil Perrin 	 *
9196e1f5caaSNeil Perrin 	 * Note we only write what is used, but we can't just allocate
9206e1f5caaSNeil Perrin 	 * the maximum block size because we can exhaust the available
9216e1f5caaSNeil Perrin 	 * pool log space.
922fa9e4066Sahrens 	 */
9236e1f5caaSNeil Perrin 	zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
9246e1f5caaSNeil Perrin 	for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
9256e1f5caaSNeil Perrin 		continue;
9266e1f5caaSNeil Perrin 	zil_blksz = zil_block_buckets[i];
9276e1f5caaSNeil Perrin 	if (zil_blksz == UINT64_MAX)
9286e1f5caaSNeil Perrin 		zil_blksz = SPA_MAXBLOCKSIZE;
9296e1f5caaSNeil Perrin 	zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
9306e1f5caaSNeil Perrin 	for (i = 0; i < ZIL_PREV_BLKS; i++)
9316e1f5caaSNeil Perrin 		zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
9326e1f5caaSNeil Perrin 	zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
933fa9e4066Sahrens 
93467bd71c6Sperrin 	BP_ZERO(bp);
93567bd71c6Sperrin 	/* pass the old blkptr in order to spread log blocks across devs */
936b24ab676SJeff Bonwick 	error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
937d48e086fSNeil Perrin 	    USE_SLOG(zilog));
9386e1f5caaSNeil Perrin 	if (!error) {
9396e1f5caaSNeil Perrin 		ASSERT3U(bp->blk_birth, ==, txg);
9406e1f5caaSNeil Perrin 		bp->blk_cksum = lwb->lwb_blk.blk_cksum;
9416e1f5caaSNeil Perrin 		bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
942d63d470bSgw 
943ea8dc4b6Seschrock 		/*
9446e1f5caaSNeil Perrin 		 * Allocate a new log write buffer (lwb).
945ea8dc4b6Seschrock 		 */
9466e1f5caaSNeil Perrin 		nlwb = zil_alloc_lwb(zilog, bp, txg);
9476e1f5caaSNeil Perrin 
9486e1f5caaSNeil Perrin 		/* Record the block for later vdev flushing */
9496e1f5caaSNeil Perrin 		zil_add_block(zilog, &lwb->lwb_blk);
950fa9e4066Sahrens 	}
951fa9e4066Sahrens 
9526e1f5caaSNeil Perrin 	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
9536e1f5caaSNeil Perrin 		/* For Slim ZIL only write what is used. */
954ada693c4SNeil Perrin 		wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
955ada693c4SNeil Perrin 		ASSERT3U(wsz, <=, lwb->lwb_sz);
956ada693c4SNeil Perrin 		zio_shrink(lwb->lwb_zio, wsz);
957fa9e4066Sahrens 
958ada693c4SNeil Perrin 	} else {
959ada693c4SNeil Perrin 		wsz = lwb->lwb_sz;
9606e1f5caaSNeil Perrin 	}
961ada693c4SNeil Perrin 
9626e1f5caaSNeil Perrin 	zilc->zc_pad = 0;
9636e1f5caaSNeil Perrin 	zilc->zc_nused = lwb->lwb_nused;
9646e1f5caaSNeil Perrin 	zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
965fa9e4066Sahrens 
966ada693c4SNeil Perrin 	/*
967ada693c4SNeil Perrin 	 * clear unused data for security
968ada693c4SNeil Perrin 	 */
969ada693c4SNeil Perrin 	bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
970ada693c4SNeil Perrin 
9716e1f5caaSNeil Perrin 	zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */
97267bd71c6Sperrin 
973fa9e4066Sahrens 	/*
9746e1f5caaSNeil Perrin 	 * If there was an allocation failure then nlwb will be null which
9756e1f5caaSNeil Perrin 	 * forces a txg_wait_synced().
976fa9e4066Sahrens 	 */
977fa9e4066Sahrens 	return (nlwb);
978fa9e4066Sahrens }
979fa9e4066Sahrens 
980fa9e4066Sahrens static lwb_t *
981fa9e4066Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
982fa9e4066Sahrens {
983fa9e4066Sahrens 	lr_t *lrc = &itx->itx_lr; /* common log record */
984b24ab676SJeff Bonwick 	lr_write_t *lrw = (lr_write_t *)lrc;
985b24ab676SJeff Bonwick 	char *lr_buf;
986fa9e4066Sahrens 	uint64_t txg = lrc->lrc_txg;
987fa9e4066Sahrens 	uint64_t reclen = lrc->lrc_reclen;
988b24ab676SJeff Bonwick 	uint64_t dlen = 0;
989fa9e4066Sahrens 
990fa9e4066Sahrens 	if (lwb == NULL)
991fa9e4066Sahrens 		return (NULL);
992b24ab676SJeff Bonwick 
993fa9e4066Sahrens 	ASSERT(lwb->lwb_buf != NULL);
994fa9e4066Sahrens 
995c5c6ffa0Smaybee 	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
996c5c6ffa0Smaybee 		dlen = P2ROUNDUP_TYPED(
997b24ab676SJeff Bonwick 		    lrw->lr_length, sizeof (uint64_t), uint64_t);
998fa9e4066Sahrens 
999104e2ed7Sperrin 	zilog->zl_cur_used += (reclen + dlen);
100022ac5be4Sperrin 
100167bd71c6Sperrin 	zil_lwb_write_init(zilog, lwb);
100267bd71c6Sperrin 
1003fa9e4066Sahrens 	/*
1004fa9e4066Sahrens 	 * If this record won't fit in the current log block, start a new one.
1005fa9e4066Sahrens 	 */
10066e1f5caaSNeil Perrin 	if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
1007fa9e4066Sahrens 		lwb = zil_lwb_write_start(zilog, lwb);
1008c5c6ffa0Smaybee 		if (lwb == NULL)
1009fa9e4066Sahrens 			return (NULL);
101067bd71c6Sperrin 		zil_lwb_write_init(zilog, lwb);
10116e1f5caaSNeil Perrin 		ASSERT(LWB_EMPTY(lwb));
10126e1f5caaSNeil Perrin 		if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
1013fa9e4066Sahrens 			txg_wait_synced(zilog->zl_dmu_pool, txg);
1014fa9e4066Sahrens 			return (lwb);
1015fa9e4066Sahrens 		}
1016fa9e4066Sahrens 	}
1017fa9e4066Sahrens 
1018b24ab676SJeff Bonwick 	lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1019b24ab676SJeff Bonwick 	bcopy(lrc, lr_buf, reclen);
1020b24ab676SJeff Bonwick 	lrc = (lr_t *)lr_buf;
1021b24ab676SJeff Bonwick 	lrw = (lr_write_t *)lrc;
1022c5c6ffa0Smaybee 
1023c5c6ffa0Smaybee 	/*
1024c5c6ffa0Smaybee 	 * If it's a write, fetch the data or get its blkptr as appropriate.
1025c5c6ffa0Smaybee 	 */
1026c5c6ffa0Smaybee 	if (lrc->lrc_txtype == TX_WRITE) {
1027c5c6ffa0Smaybee 		if (txg > spa_freeze_txg(zilog->zl_spa))
1028c5c6ffa0Smaybee 			txg_wait_synced(zilog->zl_dmu_pool, txg);
1029c5c6ffa0Smaybee 		if (itx->itx_wr_state != WR_COPIED) {
1030c5c6ffa0Smaybee 			char *dbuf;
1031c5c6ffa0Smaybee 			int error;
1032c5c6ffa0Smaybee 
1033c5c6ffa0Smaybee 			if (dlen) {
1034c5c6ffa0Smaybee 				ASSERT(itx->itx_wr_state == WR_NEED_COPY);
1035b24ab676SJeff Bonwick 				dbuf = lr_buf + reclen;
1036b24ab676SJeff Bonwick 				lrw->lr_common.lrc_reclen += dlen;
1037c5c6ffa0Smaybee 			} else {
1038c5c6ffa0Smaybee 				ASSERT(itx->itx_wr_state == WR_INDIRECT);
1039c5c6ffa0Smaybee 				dbuf = NULL;
1040c5c6ffa0Smaybee 			}
1041c5c6ffa0Smaybee 			error = zilog->zl_get_data(
1042b24ab676SJeff Bonwick 			    itx->itx_private, lrw, dbuf, lwb->lwb_zio);
1043c87b8fc5SMark J Musante 			if (error == EIO) {
1044c87b8fc5SMark J Musante 				txg_wait_synced(zilog->zl_dmu_pool, txg);
1045c87b8fc5SMark J Musante 				return (lwb);
1046c87b8fc5SMark J Musante 			}
1047c5c6ffa0Smaybee 			if (error) {
1048c5c6ffa0Smaybee 				ASSERT(error == ENOENT || error == EEXIST ||
1049c5c6ffa0Smaybee 				    error == EALREADY);
1050c5c6ffa0Smaybee 				return (lwb);
1051c5c6ffa0Smaybee 			}
1052c5c6ffa0Smaybee 		}
1053104e2ed7Sperrin 	}
1054c5c6ffa0Smaybee 
1055b24ab676SJeff Bonwick 	/*
1056b24ab676SJeff Bonwick 	 * We're actually making an entry, so update lrc_seq to be the
1057b24ab676SJeff Bonwick 	 * log record sequence number.  Note that this is generally not
1058b24ab676SJeff Bonwick 	 * equal to the itx sequence number because not all transactions
1059b24ab676SJeff Bonwick 	 * are synchronous, and sometimes spa_sync() gets there first.
1060b24ab676SJeff Bonwick 	 */
1061b24ab676SJeff Bonwick 	lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1062c5c6ffa0Smaybee 	lwb->lwb_nused += reclen + dlen;
1063fa9e4066Sahrens 	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
10646e1f5caaSNeil Perrin 	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
1065fa9e4066Sahrens 	ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
1066fa9e4066Sahrens 
1067fa9e4066Sahrens 	return (lwb);
1068fa9e4066Sahrens }
1069fa9e4066Sahrens 
1070fa9e4066Sahrens itx_t *
1071da6c28aaSamw zil_itx_create(uint64_t txtype, size_t lrsize)
1072fa9e4066Sahrens {
1073fa9e4066Sahrens 	itx_t *itx;
1074fa9e4066Sahrens 
1075b4d654b0Sperrin 	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1076fa9e4066Sahrens 
1077fa9e4066Sahrens 	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
1078fa9e4066Sahrens 	itx->itx_lr.lrc_txtype = txtype;
1079fa9e4066Sahrens 	itx->itx_lr.lrc_reclen = lrsize;
1080abf76b6eSperrin 	itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
1081fa9e4066Sahrens 	itx->itx_lr.lrc_seq = 0;	/* defensive */
10825002558fSNeil Perrin 	itx->itx_sync = B_TRUE;		/* default is synchronous */
1083fa9e4066Sahrens 
1084fa9e4066Sahrens 	return (itx);
1085fa9e4066Sahrens }
1086fa9e4066Sahrens 
1087b24ab676SJeff Bonwick void
1088b24ab676SJeff Bonwick zil_itx_destroy(itx_t *itx)
1089b24ab676SJeff Bonwick {
1090b24ab676SJeff Bonwick 	kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen);
1091b24ab676SJeff Bonwick }
1092b24ab676SJeff Bonwick 
10935002558fSNeil Perrin /*
10945002558fSNeil Perrin  * Free up the sync and async itxs. The itxs_t has already been detached
10955002558fSNeil Perrin  * so no locks are needed.
10965002558fSNeil Perrin  */
10975002558fSNeil Perrin static void
10985002558fSNeil Perrin zil_itxg_clean(itxs_t *itxs)
1099fa9e4066Sahrens {
11005002558fSNeil Perrin 	itx_t *itx;
11015002558fSNeil Perrin 	list_t *list;
11025002558fSNeil Perrin 	avl_tree_t *t;
11035002558fSNeil Perrin 	void *cookie;
11045002558fSNeil Perrin 	itx_async_node_t *ian;
11055002558fSNeil Perrin 
11065002558fSNeil Perrin 	list = &itxs->i_sync_list;
11075002558fSNeil Perrin 	while ((itx = list_head(list)) != NULL) {
11085002558fSNeil Perrin 		list_remove(list, itx);
11095002558fSNeil Perrin 		kmem_free(itx, offsetof(itx_t, itx_lr) +
11105002558fSNeil Perrin 		    itx->itx_lr.lrc_reclen);
11115002558fSNeil Perrin 	}
1112fa9e4066Sahrens 
11135002558fSNeil Perrin 	cookie = NULL;
11145002558fSNeil Perrin 	t = &itxs->i_async_tree;
11155002558fSNeil Perrin 	while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
11165002558fSNeil Perrin 		list = &ian->ia_list;
11175002558fSNeil Perrin 		while ((itx = list_head(list)) != NULL) {
11185002558fSNeil Perrin 			list_remove(list, itx);
11195002558fSNeil Perrin 			kmem_free(itx, offsetof(itx_t, itx_lr) +
11205002558fSNeil Perrin 			    itx->itx_lr.lrc_reclen);
11215002558fSNeil Perrin 		}
11225002558fSNeil Perrin 		list_destroy(list);
11235002558fSNeil Perrin 		kmem_free(ian, sizeof (itx_async_node_t));
11245002558fSNeil Perrin 	}
11255002558fSNeil Perrin 	avl_destroy(t);
1126fa9e4066Sahrens 
11275002558fSNeil Perrin 	kmem_free(itxs, sizeof (itxs_t));
11285002558fSNeil Perrin }
11295002558fSNeil Perrin 
11305002558fSNeil Perrin static int
11315002558fSNeil Perrin zil_aitx_compare(const void *x1, const void *x2)
11325002558fSNeil Perrin {
11335002558fSNeil Perrin 	const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
11345002558fSNeil Perrin 	const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1135fa9e4066Sahrens 
11365002558fSNeil Perrin 	if (o1 < o2)
11375002558fSNeil Perrin 		return (-1);
11385002558fSNeil Perrin 	if (o1 > o2)
11395002558fSNeil Perrin 		return (1);
11405002558fSNeil Perrin 
11415002558fSNeil Perrin 	return (0);
1142fa9e4066Sahrens }
1143fa9e4066Sahrens 
1144fa9e4066Sahrens /*
11455002558fSNeil Perrin  * Remove all async itx with the given oid.
1146fa9e4066Sahrens  */
114791de656bSNeil Perrin static void
11485002558fSNeil Perrin zil_remove_async(zilog_t *zilog, uint64_t oid)
1149fa9e4066Sahrens {
11505002558fSNeil Perrin 	uint64_t otxg, txg;
11515002558fSNeil Perrin 	itx_async_node_t *ian;
11525002558fSNeil Perrin 	avl_tree_t *t;
11535002558fSNeil Perrin 	avl_index_t where;
1154a584ef65Sjohansen 	list_t clean_list;
1155fa9e4066Sahrens 	itx_t *itx;
1156fa9e4066Sahrens 
11575002558fSNeil Perrin 	ASSERT(oid != 0);
1158a584ef65Sjohansen 	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1159a584ef65Sjohansen 
11605002558fSNeil Perrin 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
11615002558fSNeil Perrin 		otxg = ZILTEST_TXG;
11625002558fSNeil Perrin 	else
11635002558fSNeil Perrin 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1164a584ef65Sjohansen 
11655002558fSNeil Perrin 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
11665002558fSNeil Perrin 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
11675002558fSNeil Perrin 
11685002558fSNeil Perrin 		mutex_enter(&itxg->itxg_lock);
11695002558fSNeil Perrin 		if (itxg->itxg_txg != txg) {
11705002558fSNeil Perrin 			mutex_exit(&itxg->itxg_lock);
11715002558fSNeil Perrin 			continue;
11725002558fSNeil Perrin 		}
1173a584ef65Sjohansen 
11745002558fSNeil Perrin 		/*
11755002558fSNeil Perrin 		 * Locate the object node and append its list.
11765002558fSNeil Perrin 		 */
11775002558fSNeil Perrin 		t = &itxg->itxg_itxs->i_async_tree;
11785002558fSNeil Perrin 		ian = avl_find(t, &oid, &where);
11795002558fSNeil Perrin 		if (ian != NULL)
11805002558fSNeil Perrin 			list_move_tail(&clean_list, &ian->ia_list);
11815002558fSNeil Perrin 		mutex_exit(&itxg->itxg_lock);
11825002558fSNeil Perrin 	}
1183a584ef65Sjohansen 	while ((itx = list_head(&clean_list)) != NULL) {
1184a584ef65Sjohansen 		list_remove(&clean_list, itx);
11855002558fSNeil Perrin 		kmem_free(itx, offsetof(itx_t, itx_lr) +
11865002558fSNeil Perrin 		    itx->itx_lr.lrc_reclen);
1187a584ef65Sjohansen 	}
1188a584ef65Sjohansen 	list_destroy(&clean_list);
1189fa9e4066Sahrens }
1190fa9e4066Sahrens 
11915002558fSNeil Perrin void
11925002558fSNeil Perrin zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
11935002558fSNeil Perrin {
11945002558fSNeil Perrin 	uint64_t txg;
11955002558fSNeil Perrin 	itxg_t *itxg;
11965002558fSNeil Perrin 	itxs_t *itxs, *clean = NULL;
11975002558fSNeil Perrin 
11985002558fSNeil Perrin 	/*
119991de656bSNeil Perrin 	 * Object ids can be re-instantiated in the next txg so
12005002558fSNeil Perrin 	 * remove any async transactions to avoid future leaks.
12015002558fSNeil Perrin 	 * This can happen if a fsync occurs on the re-instantiated
12025002558fSNeil Perrin 	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
12035002558fSNeil Perrin 	 * the new file data and flushes a write record for the old object.
12045002558fSNeil Perrin 	 */
12055002558fSNeil Perrin 	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
120651bd2f97SNeil Perrin 		zil_remove_async(zilog, itx->itx_oid);
12075002558fSNeil Perrin 
120891de656bSNeil Perrin 	/*
120991de656bSNeil Perrin 	 * Ensure the data of a renamed file is committed before the rename.
121091de656bSNeil Perrin 	 */
121191de656bSNeil Perrin 	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
121291de656bSNeil Perrin 		zil_async_to_sync(zilog, itx->itx_oid);
121391de656bSNeil Perrin 
12145002558fSNeil Perrin 	if (spa_freeze_txg(zilog->zl_spa) !=  UINT64_MAX)
12155002558fSNeil Perrin 		txg = ZILTEST_TXG;
12165002558fSNeil Perrin 	else
12175002558fSNeil Perrin 		txg = dmu_tx_get_txg(tx);
12185002558fSNeil Perrin 
12195002558fSNeil Perrin 	itxg = &zilog->zl_itxg[txg & TXG_MASK];
12205002558fSNeil Perrin 	mutex_enter(&itxg->itxg_lock);
12215002558fSNeil Perrin 	itxs = itxg->itxg_itxs;
12225002558fSNeil Perrin 	if (itxg->itxg_txg != txg) {
12235002558fSNeil Perrin 		if (itxs != NULL) {
12245002558fSNeil Perrin 			/*
12255002558fSNeil Perrin 			 * The zil_clean callback hasn't got around to cleaning
12265002558fSNeil Perrin 			 * this itxg. Save the itxs for release below.
12275002558fSNeil Perrin 			 * This should be rare.
12285002558fSNeil Perrin 			 */
12295002558fSNeil Perrin 			atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
12305002558fSNeil Perrin 			itxg->itxg_sod = 0;
12315002558fSNeil Perrin 			clean = itxg->itxg_itxs;
12325002558fSNeil Perrin 		}
12335002558fSNeil Perrin 		ASSERT(itxg->itxg_sod == 0);
12345002558fSNeil Perrin 		itxg->itxg_txg = txg;
12355002558fSNeil Perrin 		itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
12365002558fSNeil Perrin 
12375002558fSNeil Perrin 		list_create(&itxs->i_sync_list, sizeof (itx_t),
12385002558fSNeil Perrin 		    offsetof(itx_t, itx_node));
12395002558fSNeil Perrin 		avl_create(&itxs->i_async_tree, zil_aitx_compare,
12405002558fSNeil Perrin 		    sizeof (itx_async_node_t),
12415002558fSNeil Perrin 		    offsetof(itx_async_node_t, ia_node));
12425002558fSNeil Perrin 	}
12435002558fSNeil Perrin 	if (itx->itx_sync) {
12445002558fSNeil Perrin 		list_insert_tail(&itxs->i_sync_list, itx);
12455002558fSNeil Perrin 		atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod);
12465002558fSNeil Perrin 		itxg->itxg_sod += itx->itx_sod;
12475002558fSNeil Perrin 	} else {
12485002558fSNeil Perrin 		avl_tree_t *t = &itxs->i_async_tree;
12495002558fSNeil Perrin 		uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
12505002558fSNeil Perrin 		itx_async_node_t *ian;
12515002558fSNeil Perrin 		avl_index_t where;
12525002558fSNeil Perrin 
12535002558fSNeil Perrin 		ian = avl_find(t, &foid, &where);
12545002558fSNeil Perrin 		if (ian == NULL) {
12555002558fSNeil Perrin 			ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
12565002558fSNeil Perrin 			list_create(&ian->ia_list, sizeof (itx_t),
12575002558fSNeil Perrin 			    offsetof(itx_t, itx_node));
12585002558fSNeil Perrin 			ian->ia_foid = foid;
12595002558fSNeil Perrin 			avl_insert(t, ian, where);
12605002558fSNeil Perrin 		}
12615002558fSNeil Perrin 		list_insert_tail(&ian->ia_list, itx);
12625002558fSNeil Perrin 	}
12635002558fSNeil Perrin 
12645002558fSNeil Perrin 	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
12655002558fSNeil Perrin 	mutex_exit(&itxg->itxg_lock);
12665002558fSNeil Perrin 
12675002558fSNeil Perrin 	/* Release the old itxs now we've dropped the lock */
12685002558fSNeil Perrin 	if (clean != NULL)
12695002558fSNeil Perrin 		zil_itxg_clean(clean);
12705002558fSNeil Perrin }
12715002558fSNeil Perrin 
1272b19a79ecSperrin /*
127367bd71c6Sperrin  * If there are any in-memory intent log transactions which have now been
127467bd71c6Sperrin  * synced then start up a taskq to free them.
1275b19a79ecSperrin  */
1276fa9e4066Sahrens void
12775002558fSNeil Perrin zil_clean(zilog_t *zilog, uint64_t synced_txg)
1278fa9e4066Sahrens {
12795002558fSNeil Perrin 	itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
12805002558fSNeil Perrin 	itxs_t *clean_me;
128167bd71c6Sperrin 
12825002558fSNeil Perrin 	mutex_enter(&itxg->itxg_lock);
12835002558fSNeil Perrin 	if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
12845002558fSNeil Perrin 		mutex_exit(&itxg->itxg_lock);
12855002558fSNeil Perrin 		return;
12865002558fSNeil Perrin 	}
12875002558fSNeil Perrin 	ASSERT3U(itxg->itxg_txg, <=, synced_txg);
12885002558fSNeil Perrin 	ASSERT(itxg->itxg_txg != 0);
12895002558fSNeil Perrin 	ASSERT(zilog->zl_clean_taskq != NULL);
12905002558fSNeil Perrin 	atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
12915002558fSNeil Perrin 	itxg->itxg_sod = 0;
12925002558fSNeil Perrin 	clean_me = itxg->itxg_itxs;
12935002558fSNeil Perrin 	itxg->itxg_itxs = NULL;
12945002558fSNeil Perrin 	itxg->itxg_txg = 0;
12955002558fSNeil Perrin 	mutex_exit(&itxg->itxg_lock);
12965002558fSNeil Perrin 	/*
12975002558fSNeil Perrin 	 * Preferably start a task queue to free up the old itxs but
12985002558fSNeil Perrin 	 * if taskq_dispatch can't allocate resources to do that then
12995002558fSNeil Perrin 	 * free it in-line. This should be rare. Note, using TQ_SLEEP
13005002558fSNeil Perrin 	 * created a bad performance problem.
13015002558fSNeil Perrin 	 */
13025002558fSNeil Perrin 	if (taskq_dispatch(zilog->zl_clean_taskq,
13035002558fSNeil Perrin 	    (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == NULL)
13045002558fSNeil Perrin 		zil_itxg_clean(clean_me);
13055002558fSNeil Perrin }
13065002558fSNeil Perrin 
13075002558fSNeil Perrin /*
13085002558fSNeil Perrin  * Get the list of itxs to commit into zl_itx_commit_list.
13095002558fSNeil Perrin  */
131091de656bSNeil Perrin static void
13115002558fSNeil Perrin zil_get_commit_list(zilog_t *zilog)
13125002558fSNeil Perrin {
13135002558fSNeil Perrin 	uint64_t otxg, txg;
13145002558fSNeil Perrin 	list_t *commit_list = &zilog->zl_itx_commit_list;
13155002558fSNeil Perrin 	uint64_t push_sod = 0;
13165002558fSNeil Perrin 
13175002558fSNeil Perrin 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
13185002558fSNeil Perrin 		otxg = ZILTEST_TXG;
13195002558fSNeil Perrin 	else
13205002558fSNeil Perrin 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
13215002558fSNeil Perrin 
13225002558fSNeil Perrin 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
13235002558fSNeil Perrin 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
13245002558fSNeil Perrin 
13255002558fSNeil Perrin 		mutex_enter(&itxg->itxg_lock);
13265002558fSNeil Perrin 		if (itxg->itxg_txg != txg) {
13275002558fSNeil Perrin 			mutex_exit(&itxg->itxg_lock);
13285002558fSNeil Perrin 			continue;
13295002558fSNeil Perrin 		}
13305002558fSNeil Perrin 
13315002558fSNeil Perrin 		list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
13325002558fSNeil Perrin 		push_sod += itxg->itxg_sod;
13335002558fSNeil Perrin 		itxg->itxg_sod = 0;
13345002558fSNeil Perrin 
13355002558fSNeil Perrin 		mutex_exit(&itxg->itxg_lock);
13365002558fSNeil Perrin 	}
13375002558fSNeil Perrin 	atomic_add_64(&zilog->zl_itx_list_sz, -push_sod);
13385002558fSNeil Perrin }
13395002558fSNeil Perrin 
13405002558fSNeil Perrin /*
13415002558fSNeil Perrin  * Move the async itxs for a specified object to commit into sync lists.
13425002558fSNeil Perrin  */
134391de656bSNeil Perrin static void
13445002558fSNeil Perrin zil_async_to_sync(zilog_t *zilog, uint64_t foid)
13455002558fSNeil Perrin {
13465002558fSNeil Perrin 	uint64_t otxg, txg;
13475002558fSNeil Perrin 	itx_async_node_t *ian;
13485002558fSNeil Perrin 	avl_tree_t *t;
13495002558fSNeil Perrin 	avl_index_t where;
13505002558fSNeil Perrin 
13515002558fSNeil Perrin 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
13525002558fSNeil Perrin 		otxg = ZILTEST_TXG;
13535002558fSNeil Perrin 	else
13545002558fSNeil Perrin 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
13555002558fSNeil Perrin 
13565002558fSNeil Perrin 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
13575002558fSNeil Perrin 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
13585002558fSNeil Perrin 
13595002558fSNeil Perrin 		mutex_enter(&itxg->itxg_lock);
13605002558fSNeil Perrin 		if (itxg->itxg_txg != txg) {
13615002558fSNeil Perrin 			mutex_exit(&itxg->itxg_lock);
13625002558fSNeil Perrin 			continue;
13635002558fSNeil Perrin 		}
13645002558fSNeil Perrin 
13655002558fSNeil Perrin 		/*
13665002558fSNeil Perrin 		 * If a foid is specified then find that node and append its
13675002558fSNeil Perrin 		 * list. Otherwise walk the tree appending all the lists
13685002558fSNeil Perrin 		 * to the sync list. We add to the end rather than the
13695002558fSNeil Perrin 		 * beginning to ensure the create has happened.
13705002558fSNeil Perrin 		 */
13715002558fSNeil Perrin 		t = &itxg->itxg_itxs->i_async_tree;
13725002558fSNeil Perrin 		if (foid != 0) {
13735002558fSNeil Perrin 			ian = avl_find(t, &foid, &where);
13745002558fSNeil Perrin 			if (ian != NULL) {
13755002558fSNeil Perrin 				list_move_tail(&itxg->itxg_itxs->i_sync_list,
13765002558fSNeil Perrin 				    &ian->ia_list);
13775002558fSNeil Perrin 			}
13785002558fSNeil Perrin 		} else {
13795002558fSNeil Perrin 			void *cookie = NULL;
13805002558fSNeil Perrin 
13815002558fSNeil Perrin 			while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
13825002558fSNeil Perrin 				list_move_tail(&itxg->itxg_itxs->i_sync_list,
13835002558fSNeil Perrin 				    &ian->ia_list);
13845002558fSNeil Perrin 				list_destroy(&ian->ia_list);
13855002558fSNeil Perrin 				kmem_free(ian, sizeof (itx_async_node_t));
13865002558fSNeil Perrin 			}
13875002558fSNeil Perrin 		}
13885002558fSNeil Perrin 		mutex_exit(&itxg->itxg_lock);
138967bd71c6Sperrin 	}
1390fa9e4066Sahrens }
1391fa9e4066Sahrens 
1392e14bb325SJeff Bonwick static void
13935002558fSNeil Perrin zil_commit_writer(zilog_t *zilog)
1394fa9e4066Sahrens {
1395fa9e4066Sahrens 	uint64_t txg;
13965002558fSNeil Perrin 	itx_t *itx;
1397fa9e4066Sahrens 	lwb_t *lwb;
13985002558fSNeil Perrin 	spa_t *spa = zilog->zl_spa;
1399b24ab676SJeff Bonwick 	int error = 0;
1400fa9e4066Sahrens 
1401e14bb325SJeff Bonwick 	ASSERT(zilog->zl_root_zio == NULL);
14025002558fSNeil Perrin 
14035002558fSNeil Perrin 	mutex_exit(&zilog->zl_lock);
14045002558fSNeil Perrin 
14055002558fSNeil Perrin 	zil_get_commit_list(zilog);
14065002558fSNeil Perrin 
14075002558fSNeil Perrin 	/*
14085002558fSNeil Perrin 	 * Return if there's nothing to commit before we dirty the fs by
14095002558fSNeil Perrin 	 * calling zil_create().
14105002558fSNeil Perrin 	 */
14115002558fSNeil Perrin 	if (list_head(&zilog->zl_itx_commit_list) == NULL) {
14125002558fSNeil Perrin 		mutex_enter(&zilog->zl_lock);
14135002558fSNeil Perrin 		return;
14145002558fSNeil Perrin 	}
1415fa9e4066Sahrens 
1416fa9e4066Sahrens 	if (zilog->zl_suspend) {
1417fa9e4066Sahrens 		lwb = NULL;
1418fa9e4066Sahrens 	} else {
1419fa9e4066Sahrens 		lwb = list_tail(&zilog->zl_lwb_list);
14205002558fSNeil Perrin 		if (lwb == NULL)
14216e1f5caaSNeil Perrin 			lwb = zil_create(zilog);
1422fa9e4066Sahrens 	}
1423fa9e4066Sahrens 
1424b19a79ecSperrin 	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
14255002558fSNeil Perrin 	while (itx = list_head(&zilog->zl_itx_commit_list)) {
1426fa9e4066Sahrens 		txg = itx->itx_lr.lrc_txg;
1427fa9e4066Sahrens 		ASSERT(txg);
1428fa9e4066Sahrens 
14295002558fSNeil Perrin 		if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1430fa9e4066Sahrens 			lwb = zil_lwb_commit(zilog, itx, lwb);
14315002558fSNeil Perrin 		list_remove(&zilog->zl_itx_commit_list, itx);
14325002558fSNeil Perrin 		kmem_free(itx, offsetof(itx_t, itx_lr)
14335002558fSNeil Perrin 		    + itx->itx_lr.lrc_reclen);
1434fa9e4066Sahrens 	}
1435b19a79ecSperrin 	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1436fa9e4066Sahrens 
1437fa9e4066Sahrens 	/* write the last block out */
143867bd71c6Sperrin 	if (lwb != NULL && lwb->lwb_zio != NULL)
1439fa9e4066Sahrens 		lwb = zil_lwb_write_start(zilog, lwb);
1440fa9e4066Sahrens 
144122ac5be4Sperrin 	zilog->zl_cur_used = 0;
1442fa9e4066Sahrens 
1443fa9e4066Sahrens 	/*
1444b19a79ecSperrin 	 * Wait if necessary for the log blocks to be on stable storage.
1445fa9e4066Sahrens 	 */
1446b19a79ecSperrin 	if (zilog->zl_root_zio) {
1447b24ab676SJeff Bonwick 		error = zio_wait(zilog->zl_root_zio);
1448e14bb325SJeff Bonwick 		zilog->zl_root_zio = NULL;
144917f17c2dSbonwick 		zil_flush_vdevs(zilog);
1450fa9e4066Sahrens 	}
145122ac5be4Sperrin 
1452b24ab676SJeff Bonwick 	if (error || lwb == NULL)
1453fa9e4066Sahrens 		txg_wait_synced(zilog->zl_dmu_pool, 0);
145467bd71c6Sperrin 
145567bd71c6Sperrin 	mutex_enter(&zilog->zl_lock);
1456b24ab676SJeff Bonwick 
1457b24ab676SJeff Bonwick 	/*
1458b24ab676SJeff Bonwick 	 * Remember the highest committed log sequence number for ztest.
1459b24ab676SJeff Bonwick 	 * We only update this value when all the log writes succeeded,
1460b24ab676SJeff Bonwick 	 * because ztest wants to ASSERT that it got the whole log chain.
1461b24ab676SJeff Bonwick 	 */
1462b24ab676SJeff Bonwick 	if (error == 0 && lwb != NULL)
1463b24ab676SJeff Bonwick 		zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1464b19a79ecSperrin }
1465b19a79ecSperrin 
1466b19a79ecSperrin /*
14675002558fSNeil Perrin  * Commit zfs transactions to stable storage.
1468b19a79ecSperrin  * If foid is 0 push out all transactions, otherwise push only those
14695002558fSNeil Perrin  * for that object or might reference that object.
14705002558fSNeil Perrin  *
14715002558fSNeil Perrin  * itxs are committed in batches. In a heavily stressed zil there will be
14725002558fSNeil Perrin  * a commit writer thread who is writing out a bunch of itxs to the log
14735002558fSNeil Perrin  * for a set of committing threads (cthreads) in the same batch as the writer.
14745002558fSNeil Perrin  * Those cthreads are all waiting on the same cv for that batch.
14755002558fSNeil Perrin  *
14765002558fSNeil Perrin  * There will also be a different and growing batch of threads that are
14775002558fSNeil Perrin  * waiting to commit (qthreads). When the committing batch completes
14785002558fSNeil Perrin  * a transition occurs such that the cthreads exit and the qthreads become
14795002558fSNeil Perrin  * cthreads. One of the new cthreads becomes the writer thread for the
14805002558fSNeil Perrin  * batch. Any new threads arriving become new qthreads.
14815002558fSNeil Perrin  *
14825002558fSNeil Perrin  * Only 2 condition variables are needed and there's no transition
14835002558fSNeil Perrin  * between the two cvs needed. They just flip-flop between qthreads
14845002558fSNeil Perrin  * and cthreads.
14855002558fSNeil Perrin  *
14865002558fSNeil Perrin  * Using this scheme we can efficiently wakeup up only those threads
14875002558fSNeil Perrin  * that have been committed.
1488b19a79ecSperrin  */
1489b19a79ecSperrin void
14905002558fSNeil Perrin zil_commit(zilog_t *zilog, uint64_t foid)
1491b19a79ecSperrin {
14925002558fSNeil Perrin 	uint64_t mybatch;
1493b19a79ecSperrin 
14945002558fSNeil Perrin 	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
14955002558fSNeil Perrin 		return;
1496b19a79ecSperrin 
14975002558fSNeil Perrin 	/* move the async itxs for the foid to the sync queues */
14985002558fSNeil Perrin 	zil_async_to_sync(zilog, foid);
1499b19a79ecSperrin 
15005002558fSNeil Perrin 	mutex_enter(&zilog->zl_lock);
15015002558fSNeil Perrin 	mybatch = zilog->zl_next_batch;
150267bd71c6Sperrin 	while (zilog->zl_writer) {
15035002558fSNeil Perrin 		cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
15045002558fSNeil Perrin 		if (mybatch <= zilog->zl_com_batch) {
150567bd71c6Sperrin 			mutex_exit(&zilog->zl_lock);
150667bd71c6Sperrin 			return;
150767bd71c6Sperrin 		}
150867bd71c6Sperrin 	}
1509b24ab676SJeff Bonwick 
15105002558fSNeil Perrin 	zilog->zl_next_batch++;
15115002558fSNeil Perrin 	zilog->zl_writer = B_TRUE;
15125002558fSNeil Perrin 	zil_commit_writer(zilog);
15135002558fSNeil Perrin 	zilog->zl_com_batch = mybatch;
15145002558fSNeil Perrin 	zilog->zl_writer = B_FALSE;
15155002558fSNeil Perrin 	mutex_exit(&zilog->zl_lock);
1516b24ab676SJeff Bonwick 
15175002558fSNeil Perrin 	/* wake up one thread to become the next writer */
15185002558fSNeil Perrin 	cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1519b24ab676SJeff Bonwick 
15205002558fSNeil Perrin 	/* wake up all threads waiting for this batch to be committed */
15215002558fSNeil Perrin 	cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1522b24ab676SJeff Bonwick }
1523b24ab676SJeff Bonwick 
1524fa9e4066Sahrens /*
1525fa9e4066Sahrens  * Called in syncing context to free committed log blocks and update log header.
1526fa9e4066Sahrens  */
1527fa9e4066Sahrens void
1528fa9e4066Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1529fa9e4066Sahrens {
1530d80c45e0Sbonwick 	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1531fa9e4066Sahrens 	uint64_t txg = dmu_tx_get_txg(tx);
1532fa9e4066Sahrens 	spa_t *spa = zilog->zl_spa;
1533b24ab676SJeff Bonwick 	uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1534fa9e4066Sahrens 	lwb_t *lwb;
1535fa9e4066Sahrens 
153614843421SMatthew Ahrens 	/*
153714843421SMatthew Ahrens 	 * We don't zero out zl_destroy_txg, so make sure we don't try
153814843421SMatthew Ahrens 	 * to destroy it twice.
153914843421SMatthew Ahrens 	 */
154014843421SMatthew Ahrens 	if (spa_sync_pass(spa) != 1)
154114843421SMatthew Ahrens 		return;
154214843421SMatthew Ahrens 
1543d80c45e0Sbonwick 	mutex_enter(&zilog->zl_lock);
1544d80c45e0Sbonwick 
1545fa9e4066Sahrens 	ASSERT(zilog->zl_stop_sync == 0);
1546fa9e4066Sahrens 
1547b24ab676SJeff Bonwick 	if (*replayed_seq != 0) {
1548b24ab676SJeff Bonwick 		ASSERT(zh->zh_replay_seq < *replayed_seq);
1549b24ab676SJeff Bonwick 		zh->zh_replay_seq = *replayed_seq;
1550b24ab676SJeff Bonwick 		*replayed_seq = 0;
1551b24ab676SJeff Bonwick 	}
1552fa9e4066Sahrens 
1553fa9e4066Sahrens 	if (zilog->zl_destroy_txg == txg) {
1554d80c45e0Sbonwick 		blkptr_t blk = zh->zh_log;
1555d80c45e0Sbonwick 
1556d80c45e0Sbonwick 		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1557d80c45e0Sbonwick 
1558d80c45e0Sbonwick 		bzero(zh, sizeof (zil_header_t));
15591209a471SNeil Perrin 		bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1560d80c45e0Sbonwick 
1561d80c45e0Sbonwick 		if (zilog->zl_keep_first) {
1562d80c45e0Sbonwick 			/*
1563d80c45e0Sbonwick 			 * If this block was part of log chain that couldn't
1564d80c45e0Sbonwick 			 * be claimed because a device was missing during
1565d80c45e0Sbonwick 			 * zil_claim(), but that device later returns,
1566d80c45e0Sbonwick 			 * then this block could erroneously appear valid.
1567d80c45e0Sbonwick 			 * To guard against this, assign a new GUID to the new
1568d80c45e0Sbonwick 			 * log chain so it doesn't matter what blk points to.
1569d80c45e0Sbonwick 			 */
1570d80c45e0Sbonwick 			zil_init_log_chain(zilog, &blk);
1571d80c45e0Sbonwick 			zh->zh_log = blk;
1572d80c45e0Sbonwick 		}
1573fa9e4066Sahrens 	}
1574fa9e4066Sahrens 
1575e6ca193dSGeorge Wilson 	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1576b19a79ecSperrin 		zh->zh_log = lwb->lwb_blk;
1577fa9e4066Sahrens 		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1578fa9e4066Sahrens 			break;
1579fa9e4066Sahrens 		list_remove(&zilog->zl_lwb_list, lwb);
1580b24ab676SJeff Bonwick 		zio_free_zil(spa, txg, &lwb->lwb_blk);
1581fa9e4066Sahrens 		kmem_cache_free(zil_lwb_cache, lwb);
1582d63d470bSgw 
1583d63d470bSgw 		/*
1584d63d470bSgw 		 * If we don't have anything left in the lwb list then
1585d63d470bSgw 		 * we've had an allocation failure and we need to zero
1586d63d470bSgw 		 * out the zil_header blkptr so that we don't end
1587d63d470bSgw 		 * up freeing the same block twice.
1588d63d470bSgw 		 */
1589d63d470bSgw 		if (list_head(&zilog->zl_lwb_list) == NULL)
1590d63d470bSgw 			BP_ZERO(&zh->zh_log);
1591fa9e4066Sahrens 	}
1592fa9e4066Sahrens 	mutex_exit(&zilog->zl_lock);
1593fa9e4066Sahrens }
1594fa9e4066Sahrens 
1595fa9e4066Sahrens void
1596fa9e4066Sahrens zil_init(void)
1597fa9e4066Sahrens {
1598fa9e4066Sahrens 	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
15995ad82045Snd 	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1600fa9e4066Sahrens }
1601fa9e4066Sahrens 
1602fa9e4066Sahrens void
1603fa9e4066Sahrens zil_fini(void)
1604fa9e4066Sahrens {
1605fa9e4066Sahrens 	kmem_cache_destroy(zil_lwb_cache);
1606fa9e4066Sahrens }
1607fa9e4066Sahrens 
160855da60b9SMark J Musante void
160955da60b9SMark J Musante zil_set_sync(zilog_t *zilog, uint64_t sync)
161055da60b9SMark J Musante {
161155da60b9SMark J Musante 	zilog->zl_sync = sync;
161255da60b9SMark J Musante }
161355da60b9SMark J Musante 
1614e09fa4daSNeil Perrin void
1615e09fa4daSNeil Perrin zil_set_logbias(zilog_t *zilog, uint64_t logbias)
1616e09fa4daSNeil Perrin {
1617e09fa4daSNeil Perrin 	zilog->zl_logbias = logbias;
1618e09fa4daSNeil Perrin }
1619e09fa4daSNeil Perrin 
1620fa9e4066Sahrens zilog_t *
1621fa9e4066Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys)
1622fa9e4066Sahrens {
1623fa9e4066Sahrens 	zilog_t *zilog;
1624fa9e4066Sahrens 
1625fa9e4066Sahrens 	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1626fa9e4066Sahrens 
1627fa9e4066Sahrens 	zilog->zl_header = zh_phys;
1628fa9e4066Sahrens 	zilog->zl_os = os;
1629fa9e4066Sahrens 	zilog->zl_spa = dmu_objset_spa(os);
1630fa9e4066Sahrens 	zilog->zl_dmu_pool = dmu_objset_pool(os);
1631d80c45e0Sbonwick 	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1632e09fa4daSNeil Perrin 	zilog->zl_logbias = dmu_objset_logbias(os);
163355da60b9SMark J Musante 	zilog->zl_sync = dmu_objset_syncprop(os);
16345002558fSNeil Perrin 	zilog->zl_next_batch = 1;
1635fa9e4066Sahrens 
16365ad82045Snd 	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
16375ad82045Snd 
16385002558fSNeil Perrin 	for (int i = 0; i < TXG_SIZE; i++) {
16395002558fSNeil Perrin 		mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
16405002558fSNeil Perrin 		    MUTEX_DEFAULT, NULL);
16415002558fSNeil Perrin 	}
1642fa9e4066Sahrens 
1643fa9e4066Sahrens 	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1644fa9e4066Sahrens 	    offsetof(lwb_t, lwb_node));
1645fa9e4066Sahrens 
16465002558fSNeil Perrin 	list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
16475002558fSNeil Perrin 	    offsetof(itx_t, itx_node));
16485002558fSNeil Perrin 
164917f17c2dSbonwick 	mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
165017f17c2dSbonwick 
165117f17c2dSbonwick 	avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
165217f17c2dSbonwick 	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1653fa9e4066Sahrens 
1654b7b97454Sperrin 	cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1655b7b97454Sperrin 	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
16565002558fSNeil Perrin 	cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
16575002558fSNeil Perrin 	cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1658b7b97454Sperrin 
1659fa9e4066Sahrens 	return (zilog);
1660fa9e4066Sahrens }
1661fa9e4066Sahrens 
1662fa9e4066Sahrens void
1663fa9e4066Sahrens zil_free(zilog_t *zilog)
1664fa9e4066Sahrens {
1665fa9e4066Sahrens 	zilog->zl_stop_sync = 1;
1666fa9e4066Sahrens 
1667*c9ba2a43SEric Schrock 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1668fa9e4066Sahrens 	list_destroy(&zilog->zl_lwb_list);
1669fa9e4066Sahrens 
167017f17c2dSbonwick 	avl_destroy(&zilog->zl_vdev_tree);
167117f17c2dSbonwick 	mutex_destroy(&zilog->zl_vdev_lock);
1672fa9e4066Sahrens 
16735002558fSNeil Perrin 	ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
16745002558fSNeil Perrin 	list_destroy(&zilog->zl_itx_commit_list);
16755002558fSNeil Perrin 
16765002558fSNeil Perrin 	for (int i = 0; i < TXG_SIZE; i++) {
16775002558fSNeil Perrin 		/*
16785002558fSNeil Perrin 		 * It's possible for an itx to be generated that doesn't dirty
16795002558fSNeil Perrin 		 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
16805002558fSNeil Perrin 		 * callback to remove the entry. We remove those here.
16815002558fSNeil Perrin 		 *
16825002558fSNeil Perrin 		 * Also free up the ziltest itxs.
16835002558fSNeil Perrin 		 */
16845002558fSNeil Perrin 		if (zilog->zl_itxg[i].itxg_itxs)
16855002558fSNeil Perrin 			zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
16865002558fSNeil Perrin 		mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
16875002558fSNeil Perrin 	}
16885002558fSNeil Perrin 
16895ad82045Snd 	mutex_destroy(&zilog->zl_lock);
1690fa9e4066Sahrens 
1691b7b97454Sperrin 	cv_destroy(&zilog->zl_cv_writer);
1692b7b97454Sperrin 	cv_destroy(&zilog->zl_cv_suspend);
16935002558fSNeil Perrin 	cv_destroy(&zilog->zl_cv_batch[0]);
16945002558fSNeil Perrin 	cv_destroy(&zilog->zl_cv_batch[1]);
1695b7b97454Sperrin 
1696fa9e4066Sahrens 	kmem_free(zilog, sizeof (zilog_t));
1697fa9e4066Sahrens }
1698fa9e4066Sahrens 
1699fa9e4066Sahrens /*
1700fa9e4066Sahrens  * Open an intent log.
1701fa9e4066Sahrens  */
1702fa9e4066Sahrens zilog_t *
1703fa9e4066Sahrens zil_open(objset_t *os, zil_get_data_t *get_data)
1704fa9e4066Sahrens {
1705fa9e4066Sahrens 	zilog_t *zilog = dmu_objset_zil(os);
1706fa9e4066Sahrens 
1707*c9ba2a43SEric Schrock 	ASSERT(zilog->zl_clean_taskq == NULL);
1708*c9ba2a43SEric Schrock 	ASSERT(zilog->zl_get_data == NULL);
1709*c9ba2a43SEric Schrock 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1710*c9ba2a43SEric Schrock 
1711fa9e4066Sahrens 	zilog->zl_get_data = get_data;
1712fa9e4066Sahrens 	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1713fa9e4066Sahrens 	    2, 2, TASKQ_PREPOPULATE);
1714fa9e4066Sahrens 
1715fa9e4066Sahrens 	return (zilog);
1716fa9e4066Sahrens }
1717fa9e4066Sahrens 
1718fa9e4066Sahrens /*
1719fa9e4066Sahrens  * Close an intent log.
1720fa9e4066Sahrens  */
1721fa9e4066Sahrens void
1722fa9e4066Sahrens zil_close(zilog_t *zilog)
1723fa9e4066Sahrens {
1724*c9ba2a43SEric Schrock 	lwb_t *lwb;
17255002558fSNeil Perrin 	uint64_t txg = 0;
17265002558fSNeil Perrin 
17275002558fSNeil Perrin 	zil_commit(zilog, 0); /* commit all itx */
17285002558fSNeil Perrin 
1729d80c45e0Sbonwick 	/*
17305002558fSNeil Perrin 	 * The lwb_max_txg for the stubby lwb will reflect the last activity
17315002558fSNeil Perrin 	 * for the zil.  After a txg_wait_synced() on the txg we know all the
17325002558fSNeil Perrin 	 * callbacks have occurred that may clean the zil.  Only then can we
17335002558fSNeil Perrin 	 * destroy the zl_clean_taskq.
1734d80c45e0Sbonwick 	 */
17355002558fSNeil Perrin 	mutex_enter(&zilog->zl_lock);
1736*c9ba2a43SEric Schrock 	lwb = list_tail(&zilog->zl_lwb_list);
1737*c9ba2a43SEric Schrock 	if (lwb != NULL)
1738*c9ba2a43SEric Schrock 		txg = lwb->lwb_max_txg;
17395002558fSNeil Perrin 	mutex_exit(&zilog->zl_lock);
17405002558fSNeil Perrin 	if (txg)
1741d80c45e0Sbonwick 		txg_wait_synced(zilog->zl_dmu_pool, txg);
1742d80c45e0Sbonwick 
1743fa9e4066Sahrens 	taskq_destroy(zilog->zl_clean_taskq);
1744fa9e4066Sahrens 	zilog->zl_clean_taskq = NULL;
1745fa9e4066Sahrens 	zilog->zl_get_data = NULL;
1746*c9ba2a43SEric Schrock 
1747*c9ba2a43SEric Schrock 	/*
1748*c9ba2a43SEric Schrock 	 * We should have only one LWB left on the list; remove it now.
1749*c9ba2a43SEric Schrock 	 */
1750*c9ba2a43SEric Schrock 	mutex_enter(&zilog->zl_lock);
1751*c9ba2a43SEric Schrock 	lwb = list_head(&zilog->zl_lwb_list);
1752*c9ba2a43SEric Schrock 	if (lwb != NULL) {
1753*c9ba2a43SEric Schrock 		ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1754*c9ba2a43SEric Schrock 		list_remove(&zilog->zl_lwb_list, lwb);
1755*c9ba2a43SEric Schrock 		zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1756*c9ba2a43SEric Schrock 		kmem_cache_free(zil_lwb_cache, lwb);
1757*c9ba2a43SEric Schrock 	}
1758*c9ba2a43SEric Schrock 	mutex_exit(&zilog->zl_lock);
1759fa9e4066Sahrens }
1760fa9e4066Sahrens 
1761fa9e4066Sahrens /*
1762fa9e4066Sahrens  * Suspend an intent log.  While in suspended mode, we still honor
1763fa9e4066Sahrens  * synchronous semantics, but we rely on txg_wait_synced() to do it.
1764fa9e4066Sahrens  * We suspend the log briefly when taking a snapshot so that the snapshot
1765fa9e4066Sahrens  * contains all the data it's supposed to, and has an empty intent log.
1766fa9e4066Sahrens  */
1767fa9e4066Sahrens int
1768fa9e4066Sahrens zil_suspend(zilog_t *zilog)
1769fa9e4066Sahrens {
1770d80c45e0Sbonwick 	const zil_header_t *zh = zilog->zl_header;
1771fa9e4066Sahrens 
1772fa9e4066Sahrens 	mutex_enter(&zilog->zl_lock);
17733589c4f0SNeil Perrin 	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
1774fa9e4066Sahrens 		mutex_exit(&zilog->zl_lock);
1775fa9e4066Sahrens 		return (EBUSY);
1776fa9e4066Sahrens 	}
1777d80c45e0Sbonwick 	if (zilog->zl_suspend++ != 0) {
1778d80c45e0Sbonwick 		/*
1779d80c45e0Sbonwick 		 * Someone else already began a suspend.
1780d80c45e0Sbonwick 		 * Just wait for them to finish.
1781d80c45e0Sbonwick 		 */
1782d80c45e0Sbonwick 		while (zilog->zl_suspending)
1783d80c45e0Sbonwick 			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1784d80c45e0Sbonwick 		mutex_exit(&zilog->zl_lock);
1785d80c45e0Sbonwick 		return (0);
1786d80c45e0Sbonwick 	}
1787d80c45e0Sbonwick 	zilog->zl_suspending = B_TRUE;
1788fa9e4066Sahrens 	mutex_exit(&zilog->zl_lock);
1789fa9e4066Sahrens 
17905002558fSNeil Perrin 	zil_commit(zilog, 0);
1791fa9e4066Sahrens 
1792d80c45e0Sbonwick 	zil_destroy(zilog, B_FALSE);
1793d80c45e0Sbonwick 
1794d80c45e0Sbonwick 	mutex_enter(&zilog->zl_lock);
1795d80c45e0Sbonwick 	zilog->zl_suspending = B_FALSE;
1796d80c45e0Sbonwick 	cv_broadcast(&zilog->zl_cv_suspend);
1797d80c45e0Sbonwick 	mutex_exit(&zilog->zl_lock);
1798fa9e4066Sahrens 
1799fa9e4066Sahrens 	return (0);
1800fa9e4066Sahrens }
1801fa9e4066Sahrens 
1802fa9e4066Sahrens void
1803fa9e4066Sahrens zil_resume(zilog_t *zilog)
1804fa9e4066Sahrens {
1805fa9e4066Sahrens 	mutex_enter(&zilog->zl_lock);
1806fa9e4066Sahrens 	ASSERT(zilog->zl_suspend != 0);
1807fa9e4066Sahrens 	zilog->zl_suspend--;
1808fa9e4066Sahrens 	mutex_exit(&zilog->zl_lock);
1809fa9e4066Sahrens }
1810fa9e4066Sahrens 
1811fa9e4066Sahrens typedef struct zil_replay_arg {
1812fa9e4066Sahrens 	zil_replay_func_t **zr_replay;
1813fa9e4066Sahrens 	void		*zr_arg;
1814fa9e4066Sahrens 	boolean_t	zr_byteswap;
1815b24ab676SJeff Bonwick 	char		*zr_lr;
1816fa9e4066Sahrens } zil_replay_arg_t;
1817fa9e4066Sahrens 
1818b24ab676SJeff Bonwick static int
1819b24ab676SJeff Bonwick zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
1820b24ab676SJeff Bonwick {
1821b24ab676SJeff Bonwick 	char name[MAXNAMELEN];
1822b24ab676SJeff Bonwick 
1823b24ab676SJeff Bonwick 	zilog->zl_replaying_seq--;	/* didn't actually replay this one */
1824b24ab676SJeff Bonwick 
1825b24ab676SJeff Bonwick 	dmu_objset_name(zilog->zl_os, name);
1826b24ab676SJeff Bonwick 
1827b24ab676SJeff Bonwick 	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1828b24ab676SJeff Bonwick 	    "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
1829b24ab676SJeff Bonwick 	    (u_longlong_t)lr->lrc_seq,
1830b24ab676SJeff Bonwick 	    (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
1831b24ab676SJeff Bonwick 	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
1832b24ab676SJeff Bonwick 
1833b24ab676SJeff Bonwick 	return (error);
1834b24ab676SJeff Bonwick }
1835b24ab676SJeff Bonwick 
1836b24ab676SJeff Bonwick static int
1837fa9e4066Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1838fa9e4066Sahrens {
1839fa9e4066Sahrens 	zil_replay_arg_t *zr = zra;
1840d80c45e0Sbonwick 	const zil_header_t *zh = zilog->zl_header;
1841fa9e4066Sahrens 	uint64_t reclen = lr->lrc_reclen;
1842fa9e4066Sahrens 	uint64_t txtype = lr->lrc_txtype;
1843b24ab676SJeff Bonwick 	int error = 0;
1844fa9e4066Sahrens 
1845b24ab676SJeff Bonwick 	zilog->zl_replaying_seq = lr->lrc_seq;
1846fa9e4066Sahrens 
1847fa9e4066Sahrens 	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
1848b24ab676SJeff Bonwick 		return (0);
1849b24ab676SJeff Bonwick 
1850b24ab676SJeff Bonwick 	if (lr->lrc_txg < claim_txg)		/* already committed */
1851b24ab676SJeff Bonwick 		return (0);
1852fa9e4066Sahrens 
1853da6c28aaSamw 	/* Strip case-insensitive bit, still present in log record */
1854da6c28aaSamw 	txtype &= ~TX_CI;
1855da6c28aaSamw 
1856b24ab676SJeff Bonwick 	if (txtype == 0 || txtype >= TX_MAX_TYPE)
1857b24ab676SJeff Bonwick 		return (zil_replay_error(zilog, lr, EINVAL));
1858b24ab676SJeff Bonwick 
1859b24ab676SJeff Bonwick 	/*
1860b24ab676SJeff Bonwick 	 * If this record type can be logged out of order, the object
1861b24ab676SJeff Bonwick 	 * (lr_foid) may no longer exist.  That's legitimate, not an error.
1862b24ab676SJeff Bonwick 	 */
1863b24ab676SJeff Bonwick 	if (TX_OOO(txtype)) {
1864b24ab676SJeff Bonwick 		error = dmu_object_info(zilog->zl_os,
1865b24ab676SJeff Bonwick 		    ((lr_ooo_t *)lr)->lr_foid, NULL);
1866b24ab676SJeff Bonwick 		if (error == ENOENT || error == EEXIST)
1867b24ab676SJeff Bonwick 			return (0);
18681209a471SNeil Perrin 	}
18691209a471SNeil Perrin 
1870fa9e4066Sahrens 	/*
1871fa9e4066Sahrens 	 * Make a copy of the data so we can revise and extend it.
1872fa9e4066Sahrens 	 */
1873b24ab676SJeff Bonwick 	bcopy(lr, zr->zr_lr, reclen);
1874b24ab676SJeff Bonwick 
1875b24ab676SJeff Bonwick 	/*
1876b24ab676SJeff Bonwick 	 * If this is a TX_WRITE with a blkptr, suck in the data.
1877b24ab676SJeff Bonwick 	 */
1878b24ab676SJeff Bonwick 	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1879b24ab676SJeff Bonwick 		error = zil_read_log_data(zilog, (lr_write_t *)lr,
1880b24ab676SJeff Bonwick 		    zr->zr_lr + reclen);
1881b24ab676SJeff Bonwick 		if (error)
1882b24ab676SJeff Bonwick 			return (zil_replay_error(zilog, lr, error));
1883b24ab676SJeff Bonwick 	}
1884fa9e4066Sahrens 
1885fa9e4066Sahrens 	/*
1886fa9e4066Sahrens 	 * The log block containing this lr may have been byteswapped
1887fa9e4066Sahrens 	 * so that we can easily examine common fields like lrc_txtype.
1888b24ab676SJeff Bonwick 	 * However, the log is a mix of different record types, and only the
1889fa9e4066Sahrens 	 * replay vectors know how to byteswap their records.  Therefore, if
1890fa9e4066Sahrens 	 * the lr was byteswapped, undo it before invoking the replay vector.
1891fa9e4066Sahrens 	 */
1892fa9e4066Sahrens 	if (zr->zr_byteswap)
1893b24ab676SJeff Bonwick 		byteswap_uint64_array(zr->zr_lr, reclen);
1894fa9e4066Sahrens 
1895fa9e4066Sahrens 	/*
1896fa9e4066Sahrens 	 * We must now do two things atomically: replay this log record,
18971209a471SNeil Perrin 	 * and update the log header sequence number to reflect the fact that
18981209a471SNeil Perrin 	 * we did so. At the end of each replay function the sequence number
18991209a471SNeil Perrin 	 * is updated if we are in replay mode.
1900fa9e4066Sahrens 	 */
1901b24ab676SJeff Bonwick 	error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
1902b24ab676SJeff Bonwick 	if (error) {
190367bd71c6Sperrin 		/*
190467bd71c6Sperrin 		 * The DMU's dnode layer doesn't see removes until the txg
190567bd71c6Sperrin 		 * commits, so a subsequent claim can spuriously fail with
19061209a471SNeil Perrin 		 * EEXIST. So if we receive any error we try syncing out
1907b24ab676SJeff Bonwick 		 * any removes then retry the transaction.  Note that we
1908b24ab676SJeff Bonwick 		 * specify B_FALSE for byteswap now, so we don't do it twice.
190967bd71c6Sperrin 		 */
1910b24ab676SJeff Bonwick 		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1911b24ab676SJeff Bonwick 		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
1912b24ab676SJeff Bonwick 		if (error)
1913b24ab676SJeff Bonwick 			return (zil_replay_error(zilog, lr, error));
1914fa9e4066Sahrens 	}
1915b24ab676SJeff Bonwick 	return (0);
191667bd71c6Sperrin }
1917fa9e4066Sahrens 
191867bd71c6Sperrin /* ARGSUSED */
1919b24ab676SJeff Bonwick static int
192067bd71c6Sperrin zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
192167bd71c6Sperrin {
192267bd71c6Sperrin 	zilog->zl_replay_blks++;
1923b24ab676SJeff Bonwick 
1924b24ab676SJeff Bonwick 	return (0);
1925fa9e4066Sahrens }
1926fa9e4066Sahrens 
1927fa9e4066Sahrens /*
192813f5297eSperrin  * If this dataset has a non-empty intent log, replay it and destroy it.
1929fa9e4066Sahrens  */
1930fa9e4066Sahrens void
19311209a471SNeil Perrin zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
1932fa9e4066Sahrens {
1933fa9e4066Sahrens 	zilog_t *zilog = dmu_objset_zil(os);
1934d80c45e0Sbonwick 	const zil_header_t *zh = zilog->zl_header;
1935d80c45e0Sbonwick 	zil_replay_arg_t zr;
193613f5297eSperrin 
19373589c4f0SNeil Perrin 	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
1938d80c45e0Sbonwick 		zil_destroy(zilog, B_TRUE);
193913f5297eSperrin 		return;
194013f5297eSperrin 	}
1941fa9e4066Sahrens 
1942fa9e4066Sahrens 	zr.zr_replay = replay_func;
1943fa9e4066Sahrens 	zr.zr_arg = arg;
1944d80c45e0Sbonwick 	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1945b24ab676SJeff Bonwick 	zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1946fa9e4066Sahrens 
1947fa9e4066Sahrens 	/*
1948fa9e4066Sahrens 	 * Wait for in-progress removes to sync before starting replay.
1949fa9e4066Sahrens 	 */
1950fa9e4066Sahrens 	txg_wait_synced(zilog->zl_dmu_pool, 0);
1951fa9e4066Sahrens 
19521209a471SNeil Perrin 	zilog->zl_replay = B_TRUE;
1953d3d50737SRafael Vanoni 	zilog->zl_replay_time = ddi_get_lbolt();
195467bd71c6Sperrin 	ASSERT(zilog->zl_replay_blks == 0);
195567bd71c6Sperrin 	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
1956d80c45e0Sbonwick 	    zh->zh_claim_txg);
1957b24ab676SJeff Bonwick 	kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
1958fa9e4066Sahrens 
1959d80c45e0Sbonwick 	zil_destroy(zilog, B_FALSE);
1960a4611edeSahrens 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
19611209a471SNeil Perrin 	zilog->zl_replay = B_FALSE;
1962fa9e4066Sahrens }
1963436b2950Sperrin 
1964b24ab676SJeff Bonwick boolean_t
1965b24ab676SJeff Bonwick zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
1966436b2950Sperrin {
196755da60b9SMark J Musante 	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1968b24ab676SJeff Bonwick 		return (B_TRUE);
1969436b2950Sperrin 
1970b24ab676SJeff Bonwick 	if (zilog->zl_replay) {
1971b24ab676SJeff Bonwick 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1972b24ab676SJeff Bonwick 		zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
1973b24ab676SJeff Bonwick 		    zilog->zl_replaying_seq;
1974b24ab676SJeff Bonwick 		return (B_TRUE);
1975b19a79ecSperrin 	}
1976b19a79ecSperrin 
1977b24ab676SJeff Bonwick 	return (B_FALSE);
1978436b2950Sperrin }
1979e6ca193dSGeorge Wilson 
1980e6ca193dSGeorge Wilson /* ARGSUSED */
1981e6ca193dSGeorge Wilson int
1982fd136879SMatthew Ahrens zil_vdev_offline(const char *osname, void *arg)
1983e6ca193dSGeorge Wilson {
1984e6ca193dSGeorge Wilson 	objset_t *os;
1985e6ca193dSGeorge Wilson 	zilog_t *zilog;
1986e6ca193dSGeorge Wilson 	int error;
1987e6ca193dSGeorge Wilson 
1988503ad85cSMatthew Ahrens 	error = dmu_objset_hold(osname, FTAG, &os);
1989e6ca193dSGeorge Wilson 	if (error)
1990e6ca193dSGeorge Wilson 		return (error);
1991e6ca193dSGeorge Wilson 
1992e6ca193dSGeorge Wilson 	zilog = dmu_objset_zil(os);
1993e6ca193dSGeorge Wilson 	if (zil_suspend(zilog) != 0)
1994e6ca193dSGeorge Wilson 		error = EEXIST;
1995e6ca193dSGeorge Wilson 	else
1996e6ca193dSGeorge Wilson 		zil_resume(zilog);
1997503ad85cSMatthew Ahrens 	dmu_objset_rele(os, FTAG);
1998e6ca193dSGeorge Wilson 	return (error);
1999e6ca193dSGeorge Wilson }
2000