xref: /illumos-gate/usr/src/uts/common/fs/zfs/sys/zil_impl.h (revision cab3a55e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24  * Copyright (c) 2014 Integros [integros.com]
25  */
26 
27 /* Portions Copyright 2010 Robert Milkowski */
28 
29 #ifndef	_SYS_ZIL_IMPL_H
30 #define	_SYS_ZIL_IMPL_H
31 
32 #include <sys/zil.h>
33 #include <sys/dmu_objset.h>
34 
35 #ifdef	__cplusplus
36 extern "C" {
37 #endif
38 
39 /*
40  * Possbile states for a given lwb structure.
41  *
42  * An lwb will start out in the "closed" state, and then transition to
43  * the "opened" state via a call to zil_lwb_write_open(). When
44  * transitioning from "closed" to "opened" the zilog's "zl_issuer_lock"
45  * must be held.
46  *
47  * After the lwb is "opened", it can transition into the "issued" state
48  * via zil_lwb_write_issue(). Again, the zilog's "zl_issuer_lock" must
49  * be held when making this transition.
50  *
51  * After the lwb's write zio completes, it transitions into the "write
52  * done" state via zil_lwb_write_done(); and then into the "flush done"
53  * state via zil_lwb_flush_vdevs_done(). When transitioning from
54  * "issued" to "write done", and then from "write done" to "flush done",
55  * the zilog's "zl_lock" must be held, *not* the "zl_issuer_lock".
56  *
57  * The zilog's "zl_issuer_lock" can become heavily contended in certain
58  * workloads, so we specifically avoid acquiring that lock when
59  * transitioning an lwb from "issued" to "done". This allows us to avoid
60  * having to acquire the "zl_issuer_lock" for each lwb ZIO completion,
61  * which would have added more lock contention on an already heavily
62  * contended lock.
63  *
64  * Additionally, correctness when reading an lwb's state is often
65  * acheived by exploiting the fact that these state transitions occur in
66  * this specific order; i.e. "closed" to "opened" to "issued" to "done".
67  *
68  * Thus, if an lwb is in the "closed" or "opened" state, holding the
69  * "zl_issuer_lock" will prevent a concurrent thread from transitioning
70  * that lwb to the "issued" state. Likewise, if an lwb is already in the
71  * "issued" state, holding the "zl_lock" will prevent a concurrent
72  * thread from transitioning that lwb to the "write done" state.
73  */
74 typedef enum {
75     LWB_STATE_CLOSED,
76     LWB_STATE_OPENED,
77     LWB_STATE_ISSUED,
78     LWB_STATE_WRITE_DONE,
79     LWB_STATE_FLUSH_DONE,
80     LWB_NUM_STATES
81 } lwb_state_t;
82 
83 /*
84  * Log write block (lwb)
85  *
86  * Prior to an lwb being issued to disk via zil_lwb_write_issue(), it
87  * will be protected by the zilog's "zl_issuer_lock". Basically, prior
88  * to it being issued, it will only be accessed by the thread that's
89  * holding the "zl_issuer_lock". After the lwb is issued, the zilog's
90  * "zl_lock" is used to protect the lwb against concurrent access.
91  */
92 typedef struct lwb {
93 	zilog_t		*lwb_zilog;	/* back pointer to log struct */
94 	blkptr_t	lwb_blk;	/* on disk address of this log blk */
95 	boolean_t	lwb_slog;	/* lwb_blk is on SLOG device */
96 	int		lwb_nused;	/* # used bytes in buffer */
97 	int		lwb_sz;		/* size of block and buffer */
98 	lwb_state_t	lwb_state;	/* the state of this lwb */
99 	char		*lwb_buf;	/* log write buffer */
100 	zio_t		*lwb_write_zio;	/* zio for the lwb buffer */
101 	zio_t		*lwb_root_zio;	/* root zio for lwb write and flushes */
102 	dmu_tx_t	*lwb_tx;	/* tx for log block allocation */
103 	uint64_t	lwb_max_txg;	/* highest txg in this lwb */
104 	list_node_t	lwb_node;	/* zilog->zl_lwb_list linkage */
105 	list_t		lwb_waiters;	/* list of zil_commit_waiter's */
106 	avl_tree_t	lwb_vdev_tree;	/* vdevs to flush after lwb write */
107 	kmutex_t	lwb_vdev_lock;	/* protects lwb_vdev_tree */
108 	hrtime_t	lwb_issued_timestamp; /* when was the lwb issued? */
109 } lwb_t;
110 
111 /*
112  * ZIL commit waiter.
113  *
114  * This structure is allocated each time zil_commit() is called, and is
115  * used by zil_commit() to communicate with other parts of the ZIL, such
116  * that zil_commit() can know when it safe for it return. For more
117  * details, see the comment above zil_commit().
118  *
119  * The "zcw_lock" field is used to protect the commit waiter against
120  * concurrent access. This lock is often acquired while already holding
121  * the zilog's "zl_issuer_lock" or "zl_lock"; see the functions
122  * zil_process_commit_list() and zil_lwb_flush_vdevs_done() as examples
123  * of this. Thus, one must be careful not to acquire the
124  * "zl_issuer_lock" or "zl_lock" when already holding the "zcw_lock";
125  * e.g. see the zil_commit_waiter_timeout() function.
126  */
127 typedef struct zil_commit_waiter {
128 	kcondvar_t	zcw_cv;		/* signalled when "done" */
129 	kmutex_t	zcw_lock;	/* protects fields of this struct */
130 	list_node_t	zcw_node;	/* linkage in lwb_t:lwb_waiter list */
131 	lwb_t		*zcw_lwb;	/* back pointer to lwb when linked */
132 	boolean_t	zcw_done;	/* B_TRUE when "done", else B_FALSE */
133 	int		zcw_zio_error;	/* contains the zio io_error value */
134 } zil_commit_waiter_t;
135 
136 /*
137  * Intent log transaction lists
138  */
139 typedef struct itxs {
140 	list_t		i_sync_list;	/* list of synchronous itxs */
141 	avl_tree_t	i_async_tree;	/* tree of foids for async itxs */
142 } itxs_t;
143 
144 typedef struct itxg {
145 	kmutex_t	itxg_lock;	/* lock for this structure */
146 	uint64_t	itxg_txg;	/* txg for this chain */
147 	itxs_t		*itxg_itxs;	/* sync and async itxs */
148 } itxg_t;
149 
150 /* for async nodes we build up an AVL tree of lists of async itxs per file */
151 typedef struct itx_async_node {
152 	uint64_t	ia_foid;	/* file object id */
153 	list_t		ia_list;	/* list of async itxs for this foid */
154 	avl_node_t	ia_node;	/* AVL tree linkage */
155 } itx_async_node_t;
156 
157 /*
158  * Vdev flushing: during a zil_commit(), we build up an AVL tree of the vdevs
159  * we've touched so we know which ones need a write cache flush at the end.
160  */
161 typedef struct zil_vdev_node {
162 	uint64_t	zv_vdev;	/* vdev to be flushed */
163 	avl_node_t	zv_node;	/* AVL tree linkage */
164 } zil_vdev_node_t;
165 
166 #define	ZIL_PREV_BLKS 16
167 
168 /*
169  * Stable storage intent log management structure.  One per dataset.
170  */
171 struct zilog {
172 	kmutex_t	zl_lock;	/* protects most zilog_t fields */
173 	struct dsl_pool	*zl_dmu_pool;	/* DSL pool */
174 	spa_t		*zl_spa;	/* handle for read/write log */
175 	const zil_header_t *zl_header;	/* log header buffer */
176 	objset_t	*zl_os;		/* object set we're logging */
177 	zil_get_data_t	*zl_get_data;	/* callback to get object content */
178 	lwb_t		*zl_last_lwb_opened; /* most recent lwb opened */
179 	hrtime_t	zl_last_lwb_latency; /* zio latency of last lwb done */
180 	uint64_t	zl_lr_seq;	/* on-disk log record sequence number */
181 	uint64_t	zl_commit_lr_seq; /* last committed on-disk lr seq */
182 	uint64_t	zl_destroy_txg;	/* txg of last zil_destroy() */
183 	uint64_t	zl_replayed_seq[TXG_SIZE]; /* last replayed rec seq */
184 	uint64_t	zl_replaying_seq; /* current replay seq number */
185 	uint32_t	zl_suspend;	/* log suspend count */
186 	kcondvar_t	zl_cv_suspend;	/* log suspend completion */
187 	uint8_t		zl_suspending;	/* log is currently suspending */
188 	uint8_t		zl_keep_first;	/* keep first log block in destroy */
189 	uint8_t		zl_replay;	/* replaying records while set */
190 	uint8_t		zl_stop_sync;	/* for debugging */
191 	kmutex_t	zl_issuer_lock;	/* single writer, per ZIL, at a time */
192 	uint8_t		zl_logbias;	/* latency or throughput */
193 	uint8_t		zl_sync;	/* synchronous or asynchronous */
194 	int		zl_parse_error;	/* last zil_parse() error */
195 	uint64_t	zl_parse_blk_seq; /* highest blk seq on last parse */
196 	uint64_t	zl_parse_lr_seq; /* highest lr seq on last parse */
197 	uint64_t	zl_parse_blk_count; /* number of blocks parsed */
198 	uint64_t	zl_parse_lr_count; /* number of log records parsed */
199 	itxg_t		zl_itxg[TXG_SIZE]; /* intent log txg chains */
200 	list_t		zl_itx_commit_list; /* itx list to be committed */
201 	uint64_t	zl_cur_used;	/* current commit log size used */
202 	list_t		zl_lwb_list;	/* in-flight log write list */
203 	avl_tree_t	zl_bp_tree;	/* track bps during log parse */
204 	clock_t		zl_replay_time;	/* lbolt of when replay started */
205 	uint64_t	zl_replay_blks;	/* number of log blocks replayed */
206 	zil_header_t	zl_old_header;	/* debugging aid */
207 	uint_t		zl_prev_blks[ZIL_PREV_BLKS]; /* size - sector rounded */
208 	uint_t		zl_prev_rotor;	/* rotor for zl_prev[] */
209 	txg_node_t	zl_dirty_link;	/* protected by dp_dirty_zilogs list */
210 	uint64_t	zl_dirty_max_txg; /* highest txg used to dirty zilog */
211 };
212 
213 typedef struct zil_bp_node {
214 	dva_t		zn_dva;
215 	avl_node_t	zn_node;
216 } zil_bp_node_t;
217 
218 /*
219  * Maximum amount of write data that can be put into single log block.
220  */
221 #define	ZIL_MAX_LOG_DATA (SPA_OLD_MAXBLOCKSIZE - sizeof (zil_chain_t) - \
222     sizeof (lr_write_t))
223 
224 /*
225  * Maximum amount of log space we agree to waste to reduce number of
226  * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
227  */
228 #define	ZIL_MAX_WASTE_SPACE (ZIL_MAX_LOG_DATA / 8)
229 
230 /*
231  * Maximum amount of write data for WR_COPIED.  Fall back to WR_NEED_COPY
232  * as more space efficient if we can't fit at least two log records into
233  * maximum sized log block.
234  */
235 #define	ZIL_MAX_COPIED_DATA ((SPA_OLD_MAXBLOCKSIZE - \
236     sizeof (zil_chain_t)) / 2 - sizeof (lr_write_t))
237 
238 #ifdef	__cplusplus
239 }
240 #endif
241 
242 #endif	/* _SYS_ZIL_IMPL_H */
243