spa_log_spacemap.c revision 814dcd43c3de9925fd6226c256e4d4327841a0e1
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2018, 2019 by Delphix. All rights reserved.
24 */
25
26#include <sys/dmu_objset.h>
27#include <sys/metaslab.h>
28#include <sys/metaslab_impl.h>
29#include <sys/spa.h>
30#include <sys/spa_impl.h>
31#include <sys/spa_log_spacemap.h>
32#include <sys/vdev_impl.h>
33#include <sys/zap.h>
34
35/*
36 * Log Space Maps
37 *
38 * Log space maps are an optimization in ZFS metadata allocations for pools
39 * whose workloads are primarily random-writes. Random-write workloads are also
40 * typically random-free, meaning that they are freeing from locations scattered
41 * throughout the pool. This means that each TXG we will have to append some
42 * FREE records to almost every metaslab. With log space maps, we hold their
43 * changes in memory and log them altogether in one pool-wide space map on-disk
44 * for persistence. As more blocks are accumulated in the log space maps and
45 * more unflushed changes are accounted in memory, we flush a selected group
46 * of metaslabs every TXG to relieve memory pressure and potential overheads
47 * when loading the pool. Flushing a metaslab to disk relieves memory as we
48 * flush any unflushed changes from memory to disk (i.e. the metaslab's space
49 * map) and saves import time by making old log space maps obsolete and
50 * eventually destroying them. [A log space map is said to be obsolete when all
51 * its entries have made it to their corresponding metaslab space maps].
52 *
53 * == On disk data structures used ==
54 *
55 * - The pool has a new feature flag and a new entry in the MOS. The feature
56 *   is activated when we create the first log space map and remains active
57 *   for the lifetime of the pool. The new entry in the MOS Directory [refer
58 *   to DMU_POOL_LOG_SPACEMAP_ZAP] is populated with a ZAP whose key-value
59 *   pairs are of the form <key: txg, value: log space map object for that txg>.
60 *   This entry is our on-disk reference of the log space maps that exist in
61 *   the pool for each TXG and it is used during import to load all the
62 *   metaslab unflushed changes in memory. To see how this structure is first
63 *   created and later populated refer to spa_generate_syncing_log_sm(). To see
64 *   how it is used during import time refer to spa_ld_log_sm_metadata().
65 *
66 * - Each vdev has a new entry in its vdev_top_zap (see field
67 *   VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS) which holds the msp_unflushed_txg of
68 *   each metaslab in this vdev. This field is the on-disk counterpart of the
69 *   in-memory field ms_unflushed_txg which tells us from which TXG and onwards
70 *   the metaslab haven't had its changes flushed. During import, we use this
71 *   to ignore any entries in the space map log that are for this metaslab but
72 *   from a TXG before msp_unflushed_txg. At that point, we also populate its
73 *   in-memory counterpart and from there both fields are updated every time
74 *   we flush that metaslab.
75 *
76 * - A space map is created every TXG and, during that TXG, it is used to log
77 *   all incoming changes (the log space map). When created, the log space map
78 *   is referenced in memory by spa_syncing_log_sm and its object ID is inserted
79 *   to the space map ZAP mentioned above. The log space map is closed at the
80 *   end of the TXG and will be destroyed when it becomes fully obsolete. We
81 *   know when a log space map has become obsolete by looking at the oldest
82 *   (and smallest) ms_unflushed_txg in the pool. If the value of that is bigger
83 *   than the log space map's TXG, then it means that there is no metaslab who
84 *   doesn't have the changes from that log and we can therefore destroy it.
85 *   [see spa_cleanup_old_sm_logs()].
86 *
87 * == Important in-memory structures ==
88 *
89 * - The per-spa field spa_metaslabs_by_flushed sorts all the metaslabs in
90 *   the pool by their ms_unflushed_txg field. It is primarily used for three
91 *   reasons. First of all, it is used during flushing where we try to flush
92 *   metaslabs in-order from the oldest-flushed to the most recently flushed
93 *   every TXG. Secondly, it helps us to lookup the ms_unflushed_txg of the
94 *   oldest flushed metaslab to distinguish which log space maps have become
95 *   obsolete and which ones are still relevant. Finally it tells us which
96 *   metaslabs have unflushed changes in a pool where this feature was just
97 *   enabled, as we don't immediately add all of the pool's metaslabs but we
98 *   add them over time as they go through metaslab_sync(). The reason that
99 *   we do that is to ease these pools into the behavior of the flushing
100 *   algorithm (described later on).
101 *
102 * - The per-spa field spa_sm_logs_by_txg can be thought as the in-memory
103 *   counterpart of the space map ZAP mentioned above. It's an AVL tree whose
104 *   nodes represent the log space maps in the pool. This in-memory
105 *   representation of log space maps in the pool sorts the log space maps by
106 *   the TXG that they were created (which is also the TXG of their unflushed
107 *   changes). It also contains the following extra information for each
108 *   space map:
109 *   [1] The number of metaslabs that were last flushed on that TXG. This is
110 *       important because if that counter is zero and this is the oldest
111 *       log then it means that it is also obsolete.
112 *   [2] The number of blocks of that space map. This field is used by the
113 *       block heuristic of our flushing algorithm (described later on).
114 *       It represents how many blocks of metadata changes ZFS had to write
115 *       to disk for that TXG.
116 *
117 * - The per-spa field spa_log_summary is a list of entries that summarizes
118 *   the metaslab and block counts of all the nodes of the spa_sm_logs_by_txg
119 *   AVL tree mentioned above. The reason this exists is that our flushing
120 *   algorithm (described later) tries to estimate how many metaslabs to flush
121 *   in each TXG by iterating over all the log space maps and looking at their
122 *   block counts. Summarizing that information means that don't have to
123 *   iterate through each space map, minimizing the runtime overhead of the
124 *   flushing algorithm which would be induced in syncing context. In terms of
125 *   implementation the log summary is used as a queue:
126 *   * we modify or pop entries from its head when we flush metaslabs
127 *   * we modify or append entries to its tail when we sync changes.
128 *
129 * - Each metaslab has two new range trees that hold its unflushed changes,
130 *   ms_unflushed_allocs and ms_unflushed_frees. These are always disjoint.
131 *
132 * == Flushing algorithm ==
133 *
134 * The decision of how many metaslabs to flush on a give TXG is guided by
135 * two heuristics:
136 *
137 * [1] The memory heuristic -
138 * We keep track of the memory used by the unflushed trees from all the
139 * metaslabs [see sus_memused of spa_unflushed_stats] and we ensure that it
140 * stays below a certain threshold which is determined by an arbitrary hard
141 * limit and an arbitrary percentage of the system's memory [see
142 * spa_log_exceeds_memlimit()]. When we see that the memory usage of the
143 * unflushed changes are passing that threshold, we flush metaslabs, which
144 * empties their unflushed range trees, reducing the memory used.
145 *
146 * [2] The block heuristic -
147 * We try to keep the total number of blocks in the log space maps in check
148 * so the log doesn't grow indefinitely and we don't induce a lot of overhead
149 * when loading the pool. At the same time we don't want to flush a lot of
150 * metaslabs too often as this would defeat the purpose of the log space map.
151 * As a result we set a limit in the amount of blocks that we think it's
152 * acceptable for the log space maps to have and try not to cross it.
153 * [see sus_blocklimit from spa_unflushed_stats].
154 *
155 * In order to stay below the block limit every TXG we have to estimate how
156 * many metaslabs we need to flush based on the current rate of incoming blocks
157 * and our history of log space map blocks. The main idea here is to answer
158 * the question of how many metaslabs do we need to flush in order to get rid
159 * at least an X amount of log space map blocks. We can answer this question
160 * by iterating backwards from the oldest log space map to the newest one
161 * and looking at their metaslab and block counts. At this point the log summary
162 * mentioned above comes handy as it reduces the amount of things that we have
163 * to iterate (even though it may reduce the preciseness of our estimates due
164 * to its aggregation of data). So with that in mind, we project the incoming
165 * rate of the current TXG into the future and attempt to approximate how many
166 * metaslabs would we need to flush from now in order to avoid exceeding our
167 * block limit in different points in the future (granted that we would keep
168 * flushing the same number of metaslabs for every TXG). Then we take the
169 * maximum number from all these estimates to be on the safe side. For the
170 * exact implementation details of algorithm refer to
171 * spa_estimate_metaslabs_to_flush.
172 */
173
174/*
175 * This is used as the block size for the space maps used for the
176 * log space map feature. These space maps benefit from a bigger
177 * block size as we expect to be writing a lot of data to them at
178 * once.
179 */
180unsigned long zfs_log_sm_blksz = 1ULL << 17;
181
182/*
183 * Percentage of the overall system���s memory that ZFS allows to be
184 * used for unflushed changes (e.g. the sum of size of all the nodes
185 * in the unflushed trees).
186 *
187 * Note that this value is calculated over 1000000 for finer granularity
188 * (thus the _ppm suffix; reads as "parts per million"). As an example,
189 * the default of 1000 allows 0.1% of memory to be used.
190 */
191unsigned long zfs_unflushed_max_mem_ppm = 1000;
192
193/*
194 * Specific hard-limit in memory that ZFS allows to be used for
195 * unflushed changes.
196 */
197unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30;
198
199/*
200 * The following tunable determines the number of blocks that can be used for
201 * the log space maps. It is expressed as a percentage of the total number of
202 * metaslabs in the pool (i.e. the default of 400 means that the number of log
203 * blocks is capped at 4 times the number of metaslabs).
204 *
205 * This value exists to tune our flushing algorithm, with higher values
206 * flushing metaslabs less often (doing less I/Os) per TXG versus lower values
207 * flushing metaslabs more aggressively with the upside of saving overheads
208 * when loading the pool. Another factor in this tradeoff is that flushing
209 * less often can potentially lead to better utilization of the metaslab space
210 * map's block size as we accumulate more changes per flush.
211 *
212 * Given that this tunable indirectly controls the flush rate (metaslabs
213 * flushed per txg) and that's why making it a percentage in terms of the
214 * number of metaslabs in the pool makes sense here.
215 *
216 * As a rule of thumb we default this tunable to 400% based on the following:
217 *
218 * 1] Assuming a constant flush rate and a constant incoming rate of log blocks
219 *    it is reasonable to expect that the amount of obsolete entries changes
220 *    linearly from txg to txg (e.g. the oldest log should have the most
221 *    obsolete entries, and the most recent one the least). With this we could
222 *    say that, at any given time, about half of the entries in the whole space
223 *    map log are obsolete. Thus for every two entries for a metaslab in the
224 *    log space map, only one of them is valid and actually makes it to the
225 *    metaslab's space map.
226 *    [factor of 2]
227 * 2] Each entry in the log space map is guaranteed to be two words while
228 *    entries in metaslab space maps are generally single-word.
229 *    [an extra factor of 2 - 400% overall]
230 * 3] Even if [1] and [2] are slightly less than 2 each, we haven't taken into
231 *    account any consolidation of segments from the log space map to the
232 *    unflushed range trees nor their history (e.g. a segment being allocated,
233 *    then freed, then allocated again means 3 log space map entries but 0
234 *    metaslab space map entries). Depending on the workload, we've seen ~1.8
235 *    non-obsolete log space map entries per metaslab entry, for a total of
236 *    ~600%. Since most of these estimates though are workload dependent, we
237 *    default on 400% to be conservative.
238 *
239 *    Thus we could say that even in the worst
240 *    case of [1] and [2], the factor should end up being 4.
241 *
242 * That said, regardless of the number of metaslabs in the pool we need to
243 * provide upper and lower bounds for the log block limit.
244 * [see zfs_unflushed_log_block_{min,max}]
245 */
246unsigned long zfs_unflushed_log_block_pct = 400;
247
248/*
249 * If the number of metaslabs is small and our incoming rate is high, we could
250 * get into a situation that we are flushing all our metaslabs every TXG. Thus
251 * we always allow at least this many log blocks.
252 */
253unsigned long zfs_unflushed_log_block_min = 1000;
254
255/*
256 * If the log becomes too big, the import time of the pool can take a hit in
257 * terms of performance. Thus we have a hard limit in the size of the log in
258 * terms of blocks.
259 */
260unsigned long zfs_unflushed_log_block_max = (1ULL << 18);
261
262/*
263 * Max # of rows allowed for the log_summary. The tradeoff here is accuracy and
264 * stability of the flushing algorithm (longer summary) vs its runtime overhead
265 * (smaller summary is faster to traverse).
266 */
267unsigned long zfs_max_logsm_summary_length = 10;
268
269/*
270 * Tunable that sets the lower bound on the metaslabs to flush every TXG.
271 *
272 * Setting this to 0 has no effect since if the pool is idle we won't even be
273 * creating log space maps and therefore we won't be flushing. On the other
274 * hand if the pool has any incoming workload our block heuristic will start
275 * flushing metaslabs anyway.
276 *
277 * The point of this tunable is to be used in extreme cases where we really
278 * want to flush more metaslabs than our adaptable heuristic plans to flush.
279 */
280unsigned long zfs_min_metaslabs_to_flush = 1;
281
282/*
283 * Tunable that specifies how far in the past do we want to look when trying to
284 * estimate the incoming log blocks for the current TXG.
285 *
286 * Setting this too high may not only increase runtime but also minimize the
287 * effect of the incoming rates from the most recent TXGs as we take the
288 * average over all the blocks that we walk
289 * [see spa_estimate_incoming_log_blocks].
290 */
291unsigned long zfs_max_log_walking = 5;
292
293/*
294 * This tunable exists solely for testing purposes. It ensures that the log
295 * spacemaps are not flushed and destroyed during export in order for the
296 * relevant log spacemap import code paths to be tested (effectively simulating
297 * a crash).
298 */
299int zfs_keep_log_spacemaps_at_export = 0;
300
301static uint64_t
302spa_estimate_incoming_log_blocks(spa_t *spa)
303{
304	ASSERT3U(spa_sync_pass(spa), ==, 1);
305	uint64_t steps = 0, sum = 0;
306
307	for (spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
308	    sls != NULL && steps < zfs_max_log_walking;
309	    sls = AVL_PREV(&spa->spa_sm_logs_by_txg, sls)) {
310		if (sls->sls_txg == spa_syncing_txg(spa)) {
311			/*
312			 * skip the log created in this TXG as this would
313			 * make our estimations inaccurate.
314			 */
315			continue;
316		}
317		sum += sls->sls_nblocks;
318		steps++;
319	}
320	return ((steps > 0) ? DIV_ROUND_UP(sum, steps) : 0);
321}
322
323uint64_t
324spa_log_sm_blocklimit(spa_t *spa)
325{
326	return (spa->spa_unflushed_stats.sus_blocklimit);
327}
328
329void
330spa_log_sm_set_blocklimit(spa_t *spa)
331{
332	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
333		ASSERT0(spa_log_sm_blocklimit(spa));
334		return;
335	}
336
337	uint64_t calculated_limit =
338	    (spa_total_metaslabs(spa) * zfs_unflushed_log_block_pct) / 100;
339	spa->spa_unflushed_stats.sus_blocklimit = MIN(MAX(calculated_limit,
340	    zfs_unflushed_log_block_min), zfs_unflushed_log_block_max);
341}
342
343uint64_t
344spa_log_sm_nblocks(spa_t *spa)
345{
346	return (spa->spa_unflushed_stats.sus_nblocks);
347}
348
349/*
350 * Ensure that the in-memory log space map structures and the summary
351 * have the same block and metaslab counts.
352 */
353static void
354spa_log_summary_verify_counts(spa_t *spa)
355{
356	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
357
358	if ((zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) == 0)
359		return;
360
361	uint64_t ms_in_avl = avl_numnodes(&spa->spa_metaslabs_by_flushed);
362
363	uint64_t ms_in_summary = 0, blk_in_summary = 0;
364	for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
365	    e; e = list_next(&spa->spa_log_summary, e)) {
366		ms_in_summary += e->lse_mscount;
367		blk_in_summary += e->lse_blkcount;
368	}
369
370	uint64_t ms_in_logs = 0, blk_in_logs = 0;
371	for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
372	    sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
373		ms_in_logs += sls->sls_mscount;
374		blk_in_logs += sls->sls_nblocks;
375	}
376
377	VERIFY3U(ms_in_logs, ==, ms_in_summary);
378	VERIFY3U(ms_in_logs, ==, ms_in_avl);
379	VERIFY3U(blk_in_logs, ==, blk_in_summary);
380	VERIFY3U(blk_in_logs, ==, spa_log_sm_nblocks(spa));
381}
382
383static boolean_t
384summary_entry_is_full(spa_t *spa, log_summary_entry_t *e)
385{
386	uint64_t blocks_per_row = MAX(1,
387	    DIV_ROUND_UP(spa_log_sm_blocklimit(spa),
388	    zfs_max_logsm_summary_length));
389
390	return (blocks_per_row <= e->lse_blkcount);
391}
392
393/*
394 * Update the log summary information to reflect the fact that a metaslab
395 * was flushed or destroyed (e.g due to device removal or pool export/destroy).
396 *
397 * We typically flush the oldest flushed metaslab so the first (and oldest)
398 * entry of the summary is updated. However if that metaslab is getting loaded
399 * we may flush the second oldest one which may be part of an entry later in
400 * the summary. Moreover, if we call into this function from metaslab_fini()
401 * the metaslabs probably won't be ordered by ms_unflushed_txg. Thus we ask
402 * for a txg as an argument so we can locate the appropriate summary entry for
403 * the metaslab.
404 */
405void
406spa_log_summary_decrement_mscount(spa_t *spa, uint64_t txg)
407{
408	/*
409	 * We don't track summary data for read-only pools and this function
410	 * can be called from metaslab_fini(). In that case return immediately.
411	 */
412	if (!spa_writeable(spa))
413		return;
414
415	log_summary_entry_t *target = NULL;
416	for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
417	    e != NULL; e = list_next(&spa->spa_log_summary, e)) {
418		if (e->lse_start > txg)
419			break;
420		target = e;
421	}
422
423	if (target == NULL || target->lse_mscount == 0) {
424		/*
425		 * We didn't find a summary entry for this metaslab. We must be
426		 * at the teardown of a spa_load() attempt that got an error
427		 * while reading the log space maps.
428		 */
429		VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
430		return;
431	}
432
433	target->lse_mscount--;
434}
435
436/*
437 * Update the log summary information to reflect the fact that we destroyed
438 * old log space maps. Since we can only destroy the oldest log space maps,
439 * we decrement the block count of the oldest summary entry and potentially
440 * destroy it when that count hits 0.
441 *
442 * This function is called after a metaslab is flushed and typically that
443 * metaslab is the oldest flushed, which means that this function will
444 * typically decrement the block count of the first entry of the summary and
445 * potentially free it if the block count gets to zero (its metaslab count
446 * should be zero too at that point).
447 *
448 * There are certain scenarios though that don't work exactly like that so we
449 * need to account for them:
450 *
451 * Scenario [1]: It is possible that after we flushed the oldest flushed
452 * metaslab and we destroyed the oldest log space map, more recent logs had 0
453 * metaslabs pointing to them so we got rid of them too. This can happen due
454 * to metaslabs being destroyed through device removal, or because the oldest
455 * flushed metaslab was loading but we kept flushing more recently flushed
456 * metaslabs due to the memory pressure of unflushed changes. Because of that,
457 * we always iterate from the beginning of the summary and if blocks_gone is
458 * bigger than the block_count of the current entry we free that entry (we
459 * expect its metaslab count to be zero), we decrement blocks_gone and on to
460 * the next entry repeating this procedure until blocks_gone gets decremented
461 * to 0. Doing this also works for the typical case mentioned above.
462 *
463 * Scenario [2]: The oldest flushed metaslab isn't necessarily accounted by
464 * the first (and oldest) entry in the summary. If the first few entries of
465 * the summary were only accounting metaslabs from a device that was just
466 * removed, then the current oldest flushed metaslab could be accounted by an
467 * entry somewhere in the middle of the summary. Moreover flushing that
468 * metaslab will destroy all the log space maps older than its ms_unflushed_txg
469 * because they became obsolete after the removal. Thus, iterating as we did
470 * for scenario [1] works out for this case too.
471 *
472 * Scenario [3]: At times we decide to flush all the metaslabs in the pool
473 * in one TXG (either because we are exporting the pool or because our flushing
474 * heuristics decided to do so). When that happens all the log space maps get
475 * destroyed except the one created for the current TXG which doesn't have
476 * any log blocks yet. As log space maps get destroyed with every metaslab that
477 * we flush, entries in the summary are also destroyed. This brings a weird
478 * corner-case when we flush the last metaslab and the log space map of the
479 * current TXG is in the same summary entry with other log space maps that
480 * are older. When that happens we are eventually left with this one last
481 * summary entry whose blocks are gone (blocks_gone equals the entry's block
482 * count) but its metaslab count is non-zero (because it accounts all the
483 * metaslabs in the pool as they all got flushed). Under this scenario we can't
484 * free this last summary entry as it's referencing all the metaslabs in the
485 * pool and its block count will get incremented at the end of this sync (when
486 * we close the syncing log space map). Thus we just decrement its current
487 * block count and leave it alone. In the case that the pool gets exported,
488 * its metaslab count will be decremented over time as we call metaslab_fini()
489 * for all the metaslabs in the pool and the entry will be freed at
490 * spa_unload_log_sm_metadata().
491 */
492void
493spa_log_summary_decrement_blkcount(spa_t *spa, uint64_t blocks_gone)
494{
495	for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
496	    e != NULL; e = list_head(&spa->spa_log_summary)) {
497		if (e->lse_blkcount > blocks_gone) {
498			/*
499			 * Assert that we stopped at an entry that is not
500			 * obsolete.
501			 */
502			ASSERT(e->lse_mscount != 0);
503
504			e->lse_blkcount -= blocks_gone;
505			blocks_gone = 0;
506			break;
507		} else if (e->lse_mscount == 0) {
508			/* remove obsolete entry */
509			blocks_gone -= e->lse_blkcount;
510			list_remove(&spa->spa_log_summary, e);
511			kmem_free(e, sizeof (log_summary_entry_t));
512		} else {
513			/* Verify that this is scenario [3] mentioned above. */
514			VERIFY3U(blocks_gone, ==, e->lse_blkcount);
515
516			/*
517			 * Assert that this is scenario [3] further by ensuring
518			 * that this is the only entry in the summary.
519			 */
520			VERIFY3P(e, ==, list_tail(&spa->spa_log_summary));
521			ASSERT3P(e, ==, list_head(&spa->spa_log_summary));
522
523			blocks_gone = e->lse_blkcount = 0;
524			break;
525		}
526	}
527
528	/*
529	 * Ensure that there is no way we are trying to remove more blocks
530	 * than the # of blocks in the summary.
531	 */
532	ASSERT0(blocks_gone);
533}
534
535void
536spa_log_sm_decrement_mscount(spa_t *spa, uint64_t txg)
537{
538	spa_log_sm_t target = { .sls_txg = txg };
539	spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
540	    &target, NULL);
541
542	if (sls == NULL) {
543		/*
544		 * We must be at the teardown of a spa_load() attempt that
545		 * got an error while reading the log space maps.
546		 */
547		VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
548		return;
549	}
550
551	ASSERT(sls->sls_mscount > 0);
552	sls->sls_mscount--;
553}
554
555void
556spa_log_sm_increment_current_mscount(spa_t *spa)
557{
558	spa_log_sm_t *last_sls = avl_last(&spa->spa_sm_logs_by_txg);
559
560	ASSERT3U(last_sls->sls_txg, ==, spa_syncing_txg(spa));
561	last_sls->sls_mscount++;
562}
563
564static void
565summary_add_data(spa_t *spa, uint64_t txg, uint64_t metaslabs_flushed,
566    uint64_t nblocks)
567{
568	log_summary_entry_t *e = list_tail(&spa->spa_log_summary);
569
570	if (e == NULL || summary_entry_is_full(spa, e)) {
571		e = kmem_zalloc(sizeof (log_summary_entry_t), KM_SLEEP);
572		e->lse_start = txg;
573		list_insert_tail(&spa->spa_log_summary, e);
574	}
575
576	ASSERT3U(e->lse_start, <=, txg);
577	e->lse_mscount += metaslabs_flushed;
578	e->lse_blkcount += nblocks;
579}
580
581static void
582spa_log_summary_add_incoming_blocks(spa_t *spa, uint64_t nblocks)
583{
584	summary_add_data(spa, spa_syncing_txg(spa), 0, nblocks);
585}
586
587void
588spa_log_summary_add_flushed_metaslab(spa_t *spa)
589{
590	summary_add_data(spa, spa_syncing_txg(spa), 1, 0);
591}
592
593/*
594 * This function attempts to estimate how many metaslabs should
595 * we flush to satisfy our block heuristic for the log spacemap
596 * for the upcoming TXGs.
597 *
598 * Specifically, it first tries to estimate the number of incoming
599 * blocks in this TXG. Then by projecting that incoming rate to
600 * future TXGs and using the log summary, it figures out how many
601 * flushes we would need to do for future TXGs individually to
602 * stay below our block limit and returns the maximum number of
603 * flushes from those estimates.
604 */
605static uint64_t
606spa_estimate_metaslabs_to_flush(spa_t *spa)
607{
608	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
609	ASSERT3U(spa_sync_pass(spa), ==, 1);
610	ASSERT(spa_log_sm_blocklimit(spa) != 0);
611
612	/*
613	 * This variable contains the incoming rate that will be projected
614	 * and used for our flushing estimates in the future.
615	 */
616	uint64_t incoming = spa_estimate_incoming_log_blocks(spa);
617
618	/*
619	 * At any point in time this variable tells us how many
620	 * TXGs in the future we are so we can make our estimations.
621	 */
622	uint64_t txgs_in_future = 1;
623
624	/*
625	 * This variable tells us how much room do we have until we hit
626	 * our limit. When it goes negative, it means that we've exceeded
627	 * our limit and we need to flush.
628	 *
629	 * Note that since we start at the first TXG in the future (i.e.
630	 * txgs_in_future starts from 1) we already decrement this
631	 * variable by the incoming rate.
632	 */
633	int64_t available_blocks =
634	    spa_log_sm_blocklimit(spa) - spa_log_sm_nblocks(spa) - incoming;
635
636	/*
637	 * This variable tells us the total number of flushes needed to
638	 * keep the log size within the limit when we reach txgs_in_future.
639	 */
640	uint64_t total_flushes = 0;
641
642	/* Holds the current maximum of our estimates so far. */
643	uint64_t max_flushes_pertxg =
644	    MIN(avl_numnodes(&spa->spa_metaslabs_by_flushed),
645	    zfs_min_metaslabs_to_flush);
646
647	/*
648	 * For our estimations we only look as far in the future
649	 * as the summary allows us.
650	 */
651	for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
652	    e; e = list_next(&spa->spa_log_summary, e)) {
653
654		/*
655		 * If there is still room before we exceed our limit
656		 * then keep skipping TXGs accumulating more blocks
657		 * based on the incoming rate until we exceed it.
658		 */
659		if (available_blocks >= 0) {
660			uint64_t skip_txgs = (available_blocks / incoming) + 1;
661			available_blocks -= (skip_txgs * incoming);
662			txgs_in_future += skip_txgs;
663			ASSERT3S(available_blocks, >=, -incoming);
664		}
665
666		/*
667		 * At this point we're far enough into the future where
668		 * the limit was just exceeded and we flush metaslabs
669		 * based on the current entry in the summary, updating
670		 * our available_blocks.
671		 */
672		ASSERT3S(available_blocks, <, 0);
673		available_blocks += e->lse_blkcount;
674		total_flushes += e->lse_mscount;
675
676		/*
677		 * Keep the running maximum of the total_flushes that
678		 * we've done so far over the number of TXGs in the
679		 * future that we are. The idea here is to estimate
680		 * the average number of flushes that we should do
681		 * every TXG so that when we are that many TXGs in the
682		 * future we stay under the limit.
683		 */
684		max_flushes_pertxg = MAX(max_flushes_pertxg,
685		    DIV_ROUND_UP(total_flushes, txgs_in_future));
686		ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=,
687		    max_flushes_pertxg);
688	}
689	return (max_flushes_pertxg);
690}
691
692uint64_t
693spa_log_sm_memused(spa_t *spa)
694{
695	return (spa->spa_unflushed_stats.sus_memused);
696}
697
698static boolean_t
699spa_log_exceeds_memlimit(spa_t *spa)
700{
701	if (spa_log_sm_memused(spa) > zfs_unflushed_max_mem_amt)
702		return (B_TRUE);
703
704	uint64_t system_mem_allowed = ((physmem * PAGESIZE) *
705	    zfs_unflushed_max_mem_ppm) / 1000000;
706	if (spa_log_sm_memused(spa) > system_mem_allowed)
707		return (B_TRUE);
708
709	return (B_FALSE);
710}
711
712boolean_t
713spa_flush_all_logs_requested(spa_t *spa)
714{
715	return (spa->spa_log_flushall_txg != 0);
716}
717
718void
719spa_flush_metaslabs(spa_t *spa, dmu_tx_t *tx)
720{
721	uint64_t txg = dmu_tx_get_txg(tx);
722
723	if (spa_sync_pass(spa) != 1)
724		return;
725
726	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
727		return;
728
729	/*
730	 * If we don't have any metaslabs with unflushed changes
731	 * return immediately.
732	 */
733	if (avl_numnodes(&spa->spa_metaslabs_by_flushed) == 0)
734		return;
735
736	/*
737	 * During SPA export we leave a few empty TXGs to go by [see
738	 * spa_final_dirty_txg() to understand why]. For this specific
739	 * case, it is important to not flush any metaslabs as that
740	 * would dirty this TXG.
741	 *
742	 * That said, during one of these dirty TXGs that is less or
743	 * equal to spa_final_dirty(), spa_unload() will request that
744	 * we try to flush all the metaslabs for that TXG before
745	 * exporting the pool, thus we ensure that we didn't get a
746	 * request of flushing everything before we attempt to return
747	 * immediately.
748	 */
749	if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
750	    !dmu_objset_is_dirty(spa_meta_objset(spa), txg) &&
751	    !spa_flush_all_logs_requested(spa))
752		return;
753
754	/*
755	 * We need to generate a log space map before flushing because this
756	 * will set up the in-memory data (i.e. node in spa_sm_logs_by_txg)
757	 * for this TXG's flushed metaslab count (aka sls_mscount which is
758	 * manipulated in many ways down the metaslab_flush() codepath).
759	 *
760	 * That is not to say that we may generate a log space map when we
761	 * don't need it. If we are flushing metaslabs, that means that we
762	 * were going to write changes to disk anyway, so even if we were
763	 * not flushing, a log space map would have been created anyway in
764	 * metaslab_sync().
765	 */
766	spa_generate_syncing_log_sm(spa, tx);
767
768	/*
769	 * This variable tells us how many metaslabs we want to flush based
770	 * on the block-heuristic of our flushing algorithm (see block comment
771	 * of log space map feature). We also decrement this as we flush
772	 * metaslabs and attempt to destroy old log space maps.
773	 */
774	uint64_t want_to_flush;
775	if (spa_flush_all_logs_requested(spa)) {
776		ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
777		want_to_flush = avl_numnodes(&spa->spa_metaslabs_by_flushed);
778	} else {
779		want_to_flush = spa_estimate_metaslabs_to_flush(spa);
780	}
781
782	ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=,
783	    want_to_flush);
784
785	/* Used purely for verification purposes */
786	uint64_t visited = 0;
787
788	/*
789	 * Ideally we would only iterate through spa_metaslabs_by_flushed
790	 * using only one variable (curr). We can't do that because
791	 * metaslab_flush() mutates position of curr in the AVL when
792	 * it flushes that metaslab by moving it to the end of the tree.
793	 * Thus we always keep track of the original next node of the
794	 * current node (curr) in another variable (next).
795	 */
796	metaslab_t *next = NULL;
797	for (metaslab_t *curr = avl_first(&spa->spa_metaslabs_by_flushed);
798	    curr != NULL; curr = next) {
799		next = AVL_NEXT(&spa->spa_metaslabs_by_flushed, curr);
800
801		/*
802		 * If this metaslab has been flushed this txg then we've done
803		 * a full circle over the metaslabs.
804		 */
805		if (metaslab_unflushed_txg(curr) == txg)
806			break;
807
808		/*
809		 * If we are done flushing for the block heuristic and the
810		 * unflushed changes don't exceed the memory limit just stop.
811		 */
812		if (want_to_flush == 0 && !spa_log_exceeds_memlimit(spa))
813			break;
814
815		mutex_enter(&curr->ms_sync_lock);
816		mutex_enter(&curr->ms_lock);
817		boolean_t flushed = metaslab_flush(curr, tx);
818		mutex_exit(&curr->ms_lock);
819		mutex_exit(&curr->ms_sync_lock);
820
821		/*
822		 * If we failed to flush a metaslab (because it was loading),
823		 * then we are done with the block heuristic as it's not
824		 * possible to destroy any log space maps once you've skipped
825		 * a metaslab. In that case we just set our counter to 0 but
826		 * we continue looping in case there is still memory pressure
827		 * due to unflushed changes. Note that, flushing a metaslab
828		 * that is not the oldest flushed in the pool, will never
829		 * destroy any log space maps [see spa_cleanup_old_sm_logs()].
830		 */
831		if (!flushed) {
832			want_to_flush = 0;
833		} else if (want_to_flush > 0) {
834			want_to_flush--;
835		}
836
837		visited++;
838	}
839	ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=, visited);
840}
841
842/*
843 * Close the log space map for this TXG and update the block counts
844 * for the the log's in-memory structure and the summary.
845 */
846void
847spa_sync_close_syncing_log_sm(spa_t *spa)
848{
849	if (spa_syncing_log_sm(spa) == NULL)
850		return;
851	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
852
853	spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
854	ASSERT3U(sls->sls_txg, ==, spa_syncing_txg(spa));
855
856	sls->sls_nblocks = space_map_nblocks(spa_syncing_log_sm(spa));
857	spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
858
859	/*
860	 * Note that we can't assert that sls_mscount is not 0,
861	 * because there is the case where the first metaslab
862	 * in spa_metaslabs_by_flushed is loading and we were
863	 * not able to flush any metaslabs the current TXG.
864	 */
865	ASSERT(sls->sls_nblocks != 0);
866
867	spa_log_summary_add_incoming_blocks(spa, sls->sls_nblocks);
868	spa_log_summary_verify_counts(spa);
869
870	space_map_close(spa->spa_syncing_log_sm);
871	spa->spa_syncing_log_sm = NULL;
872
873	/*
874	 * At this point we tried to flush as many metaslabs as we
875	 * can as the pool is getting exported. Reset the "flush all"
876	 * so the last few TXGs before closing the pool can be empty
877	 * (e.g. not dirty).
878	 */
879	if (spa_flush_all_logs_requested(spa)) {
880		ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
881		spa->spa_log_flushall_txg = 0;
882	}
883}
884
885void
886spa_cleanup_old_sm_logs(spa_t *spa, dmu_tx_t *tx)
887{
888	objset_t *mos = spa_meta_objset(spa);
889
890	uint64_t spacemap_zap;
891	int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
892	    DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
893	if (error == ENOENT) {
894		ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
895		return;
896	}
897	VERIFY0(error);
898
899	metaslab_t *oldest = avl_first(&spa->spa_metaslabs_by_flushed);
900	uint64_t oldest_flushed_txg = metaslab_unflushed_txg(oldest);
901
902	/* Free all log space maps older than the oldest_flushed_txg. */
903	for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
904	    sls && sls->sls_txg < oldest_flushed_txg;
905	    sls = avl_first(&spa->spa_sm_logs_by_txg)) {
906		ASSERT0(sls->sls_mscount);
907		avl_remove(&spa->spa_sm_logs_by_txg, sls);
908		space_map_free_obj(mos, sls->sls_sm_obj, tx);
909		VERIFY0(zap_remove_int(mos, spacemap_zap, sls->sls_txg, tx));
910		spa->spa_unflushed_stats.sus_nblocks -= sls->sls_nblocks;
911		kmem_free(sls, sizeof (spa_log_sm_t));
912	}
913}
914
915static spa_log_sm_t *
916spa_log_sm_alloc(uint64_t sm_obj, uint64_t txg)
917{
918	spa_log_sm_t *sls = kmem_zalloc(sizeof (*sls), KM_SLEEP);
919
920	sls->sls_sm_obj = sm_obj;
921	sls->sls_txg = txg;
922	return (sls);
923}
924
925void
926spa_generate_syncing_log_sm(spa_t *spa, dmu_tx_t *tx)
927{
928	uint64_t txg = dmu_tx_get_txg(tx);
929	objset_t *mos = spa_meta_objset(spa);
930
931	if (spa_syncing_log_sm(spa) != NULL)
932		return;
933
934	if (!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP))
935		return;
936
937	uint64_t spacemap_zap;
938	int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
939	    DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
940	if (error == ENOENT) {
941		ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
942
943		error = 0;
944		spacemap_zap = zap_create(mos,
945		    DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
946		VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
947		    DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1,
948		    &spacemap_zap, tx));
949		spa_feature_incr(spa, SPA_FEATURE_LOG_SPACEMAP, tx);
950	}
951	VERIFY0(error);
952
953	uint64_t sm_obj;
954	ASSERT3U(zap_lookup_int_key(mos, spacemap_zap, txg, &sm_obj),
955	    ==, ENOENT);
956	sm_obj = space_map_alloc(mos, zfs_log_sm_blksz, tx);
957	VERIFY0(zap_add_int_key(mos, spacemap_zap, txg, sm_obj, tx));
958	avl_add(&spa->spa_sm_logs_by_txg, spa_log_sm_alloc(sm_obj, txg));
959
960	/*
961	 * We pass UINT64_MAX as the space map's representation size
962	 * and SPA_MINBLOCKSHIFT as the shift, to make the space map
963	 * accept any sorts of segments since there's no real advantage
964	 * to being more restrictive (given that we're already going
965	 * to be using 2-word entries).
966	 */
967	VERIFY0(space_map_open(&spa->spa_syncing_log_sm, mos, sm_obj,
968	    0, UINT64_MAX, SPA_MINBLOCKSHIFT));
969
970	/*
971	 * If the log space map feature was just enabled, the blocklimit
972	 * has not yet been set.
973	 */
974	if (spa_log_sm_blocklimit(spa) == 0)
975		spa_log_sm_set_blocklimit(spa);
976}
977
978/*
979 * Find all the log space maps stored in the space map ZAP and sort
980 * them by their TXG in spa_sm_logs_by_txg.
981 */
982static int
983spa_ld_log_sm_metadata(spa_t *spa)
984{
985	int error;
986	uint64_t spacemap_zap;
987
988	ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
989
990	error = zap_lookup(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
991	    DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
992	if (error == ENOENT) {
993		/* the space map ZAP doesn't exist yet */
994		return (0);
995	} else if (error != 0) {
996		spa_load_failed(spa, "spa_ld_log_sm_metadata(): failed at "
997		    "zap_lookup(DMU_POOL_DIRECTORY_OBJECT) [error %d]",
998		    error);
999		return (error);
1000	}
1001
1002	zap_cursor_t zc;
1003	zap_attribute_t za;
1004	for (zap_cursor_init(&zc, spa_meta_objset(spa), spacemap_zap);
1005	    (error = zap_cursor_retrieve(&zc, &za)) == 0;
1006	    zap_cursor_advance(&zc)) {
1007		uint64_t log_txg = zfs_strtonum(za.za_name, NULL);
1008		spa_log_sm_t *sls =
1009		    spa_log_sm_alloc(za.za_first_integer, log_txg);
1010		avl_add(&spa->spa_sm_logs_by_txg, sls);
1011	}
1012	zap_cursor_fini(&zc);
1013	if (error != ENOENT) {
1014		spa_load_failed(spa, "spa_ld_log_sm_metadata(): failed at "
1015		    "zap_cursor_retrieve(spacemap_zap) [error %d]",
1016		    error);
1017		return (error);
1018	}
1019
1020	for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
1021	    m; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
1022		spa_log_sm_t target = { .sls_txg = metaslab_unflushed_txg(m) };
1023		spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
1024		    &target, NULL);
1025
1026		/*
1027		 * At this point if sls is zero it means that a bug occurred
1028		 * in ZFS the last time the pool was open or earlier in the
1029		 * import code path. In general, we would have placed a
1030		 * VERIFY() here or in this case just let the kernel panic
1031		 * with NULL pointer dereference when incrementing sls_mscount,
1032		 * but since this is the import code path we can be a bit more
1033		 * lenient. Thus, for DEBUG bits we always cause a panic, while
1034		 * in production we log the error and just fail the import.
1035		 */
1036		ASSERT(sls != NULL);
1037		if (sls == NULL) {
1038			spa_load_failed(spa, "spa_ld_log_sm_metadata(): bug "
1039			    "encountered: could not find log spacemap for "
1040			    "TXG %ld [error %d]",
1041			    metaslab_unflushed_txg(m), ENOENT);
1042			return (ENOENT);
1043		}
1044		sls->sls_mscount++;
1045	}
1046
1047	return (0);
1048}
1049
1050typedef struct spa_ld_log_sm_arg {
1051	spa_t *slls_spa;
1052	uint64_t slls_txg;
1053} spa_ld_log_sm_arg_t;
1054
1055static int
1056spa_ld_log_sm_cb(space_map_entry_t *sme, void *arg)
1057{
1058	uint64_t offset = sme->sme_offset;
1059	uint64_t size = sme->sme_run;
1060	uint32_t vdev_id = sme->sme_vdev;
1061	spa_ld_log_sm_arg_t *slls = arg;
1062	spa_t *spa = slls->slls_spa;
1063
1064	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
1065
1066	/*
1067	 * If the vdev has been removed (i.e. it is indirect or a hole)
1068	 * skip this entry. The contents of this vdev have already moved
1069	 * elsewhere.
1070	 */
1071	if (!vdev_is_concrete(vd))
1072		return (0);
1073
1074	metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1075	ASSERT(!ms->ms_loaded);
1076
1077	/*
1078	 * If we have already flushed entries for this TXG to this
1079	 * metaslab's space map, then ignore it. Note that we flush
1080	 * before processing any allocations/frees for that TXG, so
1081	 * the metaslab's space map only has entries from *before*
1082	 * the unflushed TXG.
1083	 */
1084	if (slls->slls_txg < metaslab_unflushed_txg(ms))
1085		return (0);
1086
1087	switch (sme->sme_type) {
1088	case SM_ALLOC:
1089		range_tree_remove_xor_add_segment(offset, offset + size,
1090		    ms->ms_unflushed_frees, ms->ms_unflushed_allocs);
1091		break;
1092	case SM_FREE:
1093		range_tree_remove_xor_add_segment(offset, offset + size,
1094		    ms->ms_unflushed_allocs, ms->ms_unflushed_frees);
1095		break;
1096	default:
1097		panic("invalid maptype_t");
1098		break;
1099	}
1100	return (0);
1101}
1102
1103static int
1104spa_ld_log_sm_data(spa_t *spa)
1105{
1106	int error = 0;
1107
1108	/*
1109	 * If we are not going to do any writes there is no need
1110	 * to read the log space maps.
1111	 */
1112	if (!spa_writeable(spa))
1113		return (0);
1114
1115	ASSERT0(spa->spa_unflushed_stats.sus_nblocks);
1116	ASSERT0(spa->spa_unflushed_stats.sus_memused);
1117
1118	hrtime_t read_logs_starttime = gethrtime();
1119	/* this is a no-op when we don't have space map logs */
1120	for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
1121	    sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
1122		space_map_t *sm = NULL;
1123		error = space_map_open(&sm, spa_meta_objset(spa),
1124		    sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT);
1125		if (error != 0) {
1126			spa_load_failed(spa, "spa_ld_log_sm_data(): failed at "
1127			    "space_map_open(obj=%llu) [error %d]",
1128			    (u_longlong_t)sls->sls_sm_obj, error);
1129			goto out;
1130		}
1131
1132		struct spa_ld_log_sm_arg vla = {
1133			.slls_spa = spa,
1134			.slls_txg = sls->sls_txg
1135		};
1136		error = space_map_iterate(sm, space_map_length(sm),
1137		    spa_ld_log_sm_cb, &vla);
1138		if (error != 0) {
1139			space_map_close(sm);
1140			spa_load_failed(spa, "spa_ld_log_sm_data(): failed "
1141			    "at space_map_iterate(obj=%llu) [error %d]",
1142			    (u_longlong_t)sls->sls_sm_obj, error);
1143			goto out;
1144		}
1145
1146		ASSERT0(sls->sls_nblocks);
1147		sls->sls_nblocks = space_map_nblocks(sm);
1148		spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
1149		summary_add_data(spa, sls->sls_txg,
1150		    sls->sls_mscount, sls->sls_nblocks);
1151
1152		space_map_close(sm);
1153	}
1154	hrtime_t read_logs_endtime = gethrtime();
1155	spa_load_note(spa,
1156	    "read %llu log space maps (%llu total blocks - blksz = %llu bytes) "
1157	    "in %lld ms", (u_longlong_t)avl_numnodes(&spa->spa_sm_logs_by_txg),
1158	    (u_longlong_t)spa_log_sm_nblocks(spa),
1159	    (u_longlong_t)zfs_log_sm_blksz,
1160	    (longlong_t)((read_logs_endtime - read_logs_starttime) / 1000000));
1161
1162out:
1163	/*
1164	 * Now that the metaslabs contain their unflushed changes:
1165	 * [1] recalculate their actual allocated space
1166	 * [2] recalculate their weights
1167	 * [3] sum up the memory usage of their unflushed range trees
1168	 * [4] optionally load them, if debug_load is set
1169	 *
1170	 * Note that even in the case where we get here because of an
1171	 * error (e.g. error != 0), we still want to update the fields
1172	 * below in order to have a proper teardown in spa_unload().
1173	 */
1174	for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
1175	    m != NULL; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
1176		mutex_enter(&m->ms_lock);
1177		m->ms_allocated_space = space_map_allocated(m->ms_sm) +
1178		    range_tree_space(m->ms_unflushed_allocs) -
1179		    range_tree_space(m->ms_unflushed_frees);
1180
1181		vdev_t *vd = m->ms_group->mg_vd;
1182		metaslab_space_update(vd, m->ms_group->mg_class,
1183		    range_tree_space(m->ms_unflushed_allocs), 0, 0);
1184		metaslab_space_update(vd, m->ms_group->mg_class,
1185		    -range_tree_space(m->ms_unflushed_frees), 0, 0);
1186
1187		ASSERT0(m->ms_weight & METASLAB_ACTIVE_MASK);
1188		metaslab_recalculate_weight_and_sort(m);
1189
1190		spa->spa_unflushed_stats.sus_memused +=
1191		    metaslab_unflushed_changes_memused(m);
1192
1193		if (metaslab_debug_load && m->ms_sm != NULL) {
1194			VERIFY0(metaslab_load(m));
1195		}
1196		mutex_exit(&m->ms_lock);
1197	}
1198
1199	return (error);
1200}
1201
1202static int
1203spa_ld_unflushed_txgs(vdev_t *vd)
1204{
1205	spa_t *spa = vd->vdev_spa;
1206	objset_t *mos = spa_meta_objset(spa);
1207
1208	if (vd->vdev_top_zap == 0)
1209		return (0);
1210
1211	uint64_t object = 0;
1212	int error = zap_lookup(mos, vd->vdev_top_zap,
1213	    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
1214	    sizeof (uint64_t), 1, &object);
1215	if (error == ENOENT)
1216		return (0);
1217	else if (error != 0) {
1218		spa_load_failed(spa, "spa_ld_unflushed_txgs(): failed at "
1219		    "zap_lookup(vdev_top_zap=%llu) [error %d]",
1220		    (u_longlong_t)vd->vdev_top_zap, error);
1221		return (error);
1222	}
1223
1224	for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
1225		metaslab_t *ms = vd->vdev_ms[m];
1226		ASSERT(ms != NULL);
1227
1228		metaslab_unflushed_phys_t entry;
1229		uint64_t entry_size = sizeof (entry);
1230		uint64_t entry_offset = ms->ms_id * entry_size;
1231
1232		error = dmu_read(mos, object,
1233		    entry_offset, entry_size, &entry, 0);
1234		if (error != 0) {
1235			spa_load_failed(spa, "spa_ld_unflushed_txgs(): "
1236			    "failed at dmu_read(obj=%llu) [error %d]",
1237			    (u_longlong_t)object, error);
1238			return (error);
1239		}
1240
1241		ms->ms_unflushed_txg = entry.msp_unflushed_txg;
1242		if (ms->ms_unflushed_txg != 0) {
1243			mutex_enter(&spa->spa_flushed_ms_lock);
1244			avl_add(&spa->spa_metaslabs_by_flushed, ms);
1245			mutex_exit(&spa->spa_flushed_ms_lock);
1246		}
1247	}
1248	return (0);
1249}
1250
1251/*
1252 * Read all the log space map entries into their respective
1253 * metaslab unflushed trees and keep them sorted by TXG in the
1254 * SPA's metadata. In addition, setup all the metadata for the
1255 * memory and the block heuristics.
1256 */
1257int
1258spa_ld_log_spacemaps(spa_t *spa)
1259{
1260	int error;
1261
1262	spa_log_sm_set_blocklimit(spa);
1263
1264	for (uint64_t c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
1265		vdev_t *vd = spa->spa_root_vdev->vdev_child[c];
1266		error = spa_ld_unflushed_txgs(vd);
1267		if (error != 0)
1268			return (error);
1269	}
1270
1271	error = spa_ld_log_sm_metadata(spa);
1272	if (error != 0)
1273		return (error);
1274
1275	/*
1276	 * Note: we don't actually expect anything to change at this point
1277	 * but we grab the config lock so we don't fail any assertions
1278	 * when using vdev_lookup_top().
1279	 */
1280	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1281	error = spa_ld_log_sm_data(spa);
1282	spa_config_exit(spa, SCL_CONFIG, FTAG);
1283
1284	return (error);
1285}
1286