spa_errlog.c revision 088f389458728c464569a5506b58070254fa4f7d
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * Routines to manage the on-disk persistent error log.
30 *
31 * Each pool stores a log of all logical data errors seen during normal
32 * operation.  This is actually the union of two distinct logs: the last log,
33 * and the current log.  All errors seen are logged to the current log.  When a
34 * scrub completes, the current log becomes the last log, the last log is thrown
35 * out, and the current log is reinitialized.  This way, if an error is somehow
36 * corrected, a new scrub will show that that it no longer exists, and will be
37 * deleted from the log when the scrub completes.
38 *
39 * The log is stored using a ZAP object whose key is a string form of the
40 * zbookmark tuple (objset, object, level, blkid), and whose contents is an
41 * optional 'objset:object' human-readable string describing the data.  When an
42 * error is first logged, this string will be empty, indicating that no name is
43 * known.  This prevents us from having to issue a potentially large amount of
44 * I/O to discover the object name during an error path.  Instead, we do the
45 * calculation when the data is requested, storing the result so future queries
46 * will be faster.
47 *
48 * This log is then shipped into an nvlist where the key is the dataset name and
49 * the value is the object name.  Userland is then responsible for uniquifying
50 * this list and displaying it to the user.
51 */
52
53#include <sys/dmu_tx.h>
54#include <sys/spa.h>
55#include <sys/spa_impl.h>
56#include <sys/zap.h>
57#include <sys/zio.h>
58
59/*
60 * This is a stripped-down version of strtoull, suitable only for converting
61 * lowercase hexidecimal numbers that don't overflow.
62 */
63#ifdef _KERNEL
64static uint64_t
65strtonum(char *str, char **nptr)
66{
67	uint64_t val = 0;
68	char c;
69	int digit;
70
71	while ((c = *str) != '\0') {
72		if (c >= '0' && c <= '9')
73			digit = c - '0';
74		else if (c >= 'a' && c <= 'f')
75			digit = 10 + c - 'a';
76		else
77			break;
78
79		val *= 16;
80		val += digit;
81
82		str++;
83	}
84
85	*nptr = str;
86
87	return (val);
88}
89#endif
90
91/*
92 * Convert a bookmark to a string.
93 */
94static void
95bookmark_to_name(zbookmark_t *zb, char *buf, size_t len)
96{
97	(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
98	    (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
99	    (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
100}
101
102/*
103 * Convert a string to a bookmark
104 */
105#ifdef _KERNEL
106static void
107name_to_bookmark(char *buf, zbookmark_t *zb)
108{
109	zb->zb_objset = strtonum(buf, &buf);
110	ASSERT(*buf == ':');
111	zb->zb_object = strtonum(buf + 1, &buf);
112	ASSERT(*buf == ':');
113	zb->zb_level = (int)strtonum(buf + 1, &buf);
114	ASSERT(*buf == ':');
115	zb->zb_blkid = strtonum(buf + 1, &buf);
116	ASSERT(*buf == '\0');
117}
118#endif
119
120/*
121 * Log an uncorrectable error to the persistent error log.  We add it to the
122 * spa's list of pending errors.  The changes are actually synced out to disk
123 * during spa_errlog_sync().
124 */
125void
126spa_log_error(spa_t *spa, zio_t *zio)
127{
128	zbookmark_t *zb = &zio->io_logical->io_bookmark;
129	spa_error_entry_t search;
130	spa_error_entry_t *new;
131	avl_tree_t *tree;
132	avl_index_t where;
133
134	/*
135	 * If we are trying to import a pool, ignore any errors, as we won't be
136	 * writing to the pool any time soon.
137	 */
138	if (spa->spa_load_state == SPA_LOAD_TRYIMPORT)
139		return;
140
141	mutex_enter(&spa->spa_errlist_lock);
142
143	/*
144	 * If we have had a request to rotate the log, log it to the next list
145	 * instead of the current one.
146	 */
147	if (spa->spa_scrub_active || spa->spa_scrub_finished)
148		tree = &spa->spa_errlist_scrub;
149	else
150		tree = &spa->spa_errlist_last;
151
152	search.se_bookmark = *zb;
153	if (avl_find(tree, &search, &where) != NULL) {
154		mutex_exit(&spa->spa_errlist_lock);
155		return;
156	}
157
158	new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
159	new->se_bookmark = *zb;
160	avl_insert(tree, new, where);
161
162	mutex_exit(&spa->spa_errlist_lock);
163}
164
165/*
166 * Return the number of errors currently in the error log.  This is actually the
167 * sum of both the last log and the current log, since we don't know the union
168 * of these logs until we reach userland.
169 */
170uint64_t
171spa_get_errlog_size(spa_t *spa)
172{
173	uint64_t total = 0, count;
174
175	mutex_enter(&spa->spa_errlog_lock);
176	if (spa->spa_errlog_scrub != 0 &&
177	    zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
178	    &count) == 0)
179		total += count;
180
181	if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
182	    zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
183	    &count) == 0)
184		total += count;
185	mutex_exit(&spa->spa_errlog_lock);
186
187	mutex_enter(&spa->spa_errlist_lock);
188	total += avl_numnodes(&spa->spa_errlist_last);
189	total += avl_numnodes(&spa->spa_errlist_scrub);
190	mutex_exit(&spa->spa_errlist_lock);
191
192	return (total);
193}
194
195#ifdef _KERNEL
196static int
197process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count)
198{
199	zap_cursor_t zc;
200	zap_attribute_t za;
201	zbookmark_t zb;
202
203	if (obj == 0)
204		return (0);
205
206	for (zap_cursor_init(&zc, spa->spa_meta_objset, obj);
207	    zap_cursor_retrieve(&zc, &za) == 0;
208	    zap_cursor_advance(&zc)) {
209
210		if (*count == 0) {
211			zap_cursor_fini(&zc);
212			return (ENOMEM);
213		}
214
215		name_to_bookmark(za.za_name, &zb);
216
217		if (copyout(&zb, (char *)addr +
218		    (*count - 1) * sizeof (zbookmark_t),
219		    sizeof (zbookmark_t)) != 0)
220			return (EFAULT);
221
222		*count -= 1;
223	}
224
225	zap_cursor_fini(&zc);
226
227	return (0);
228}
229
230static int
231process_error_list(avl_tree_t *list, void *addr, size_t *count)
232{
233	spa_error_entry_t *se;
234
235	for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
236
237		if (*count == 0)
238			return (ENOMEM);
239
240		if (copyout(&se->se_bookmark, (char *)addr +
241		    (*count - 1) * sizeof (zbookmark_t),
242		    sizeof (zbookmark_t)) != 0)
243			return (EFAULT);
244
245		*count -= 1;
246	}
247
248	return (0);
249}
250#endif
251
252/*
253 * Copy all known errors to userland as an array of bookmarks.  This is
254 * actually a union of the on-disk last log and current log, as well as any
255 * pending error requests.
256 *
257 * Because the act of reading the on-disk log could cause errors to be
258 * generated, we have two separate locks: one for the error log and one for the
259 * in-core error lists.  We only need the error list lock to log and error, so
260 * we grab the error log lock while we read the on-disk logs, and only pick up
261 * the error list lock when we are finished.
262 */
263int
264spa_get_errlog(spa_t *spa, void *uaddr, size_t *count)
265{
266	int ret = 0;
267
268#ifdef _KERNEL
269	mutex_enter(&spa->spa_errlog_lock);
270
271	ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
272
273	if (!ret && !spa->spa_scrub_finished)
274		ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
275		    count);
276
277	mutex_enter(&spa->spa_errlist_lock);
278	if (!ret)
279		ret = process_error_list(&spa->spa_errlist_scrub, uaddr,
280		    count);
281	if (!ret)
282		ret = process_error_list(&spa->spa_errlist_last, uaddr,
283		    count);
284	mutex_exit(&spa->spa_errlist_lock);
285
286	mutex_exit(&spa->spa_errlog_lock);
287#endif
288
289	return (ret);
290}
291
292/*
293 * Called when a scrub completes.  This simply set a bit which tells which AVL
294 * tree to add new errors.  spa_errlog_sync() is responsible for actually
295 * syncing the changes to the underlying objects.
296 */
297void
298spa_errlog_rotate(spa_t *spa)
299{
300	mutex_enter(&spa->spa_errlist_lock);
301	spa->spa_scrub_finished = B_TRUE;
302	mutex_exit(&spa->spa_errlist_lock);
303}
304
305/*
306 * Discard any pending errors from the spa_t.  Called when unloading a faulted
307 * pool, as the errors encountered during the open cannot be synced to disk.
308 */
309void
310spa_errlog_drain(spa_t *spa)
311{
312	spa_error_entry_t *se;
313	void *cookie;
314
315	mutex_enter(&spa->spa_errlist_lock);
316
317	cookie = NULL;
318	while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
319	    &cookie)) != NULL)
320		kmem_free(se, sizeof (spa_error_entry_t));
321	cookie = NULL;
322	while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
323	    &cookie)) != NULL)
324		kmem_free(se, sizeof (spa_error_entry_t));
325
326	mutex_exit(&spa->spa_errlist_lock);
327}
328
329/*
330 * Process a list of errors into the current on-disk log.
331 */
332static void
333sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
334{
335	spa_error_entry_t *se;
336	char buf[64];
337	void *cookie;
338
339	if (avl_numnodes(t) != 0) {
340		/* create log if necessary */
341		if (*obj == 0)
342			*obj = zap_create(spa->spa_meta_objset,
343			    DMU_OT_ERROR_LOG, DMU_OT_NONE,
344			    0, tx);
345
346		/* add errors to the current log */
347		for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
348			char *name = se->se_name ? se->se_name : "";
349
350			bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
351
352			(void) zap_update(spa->spa_meta_objset,
353			    *obj, buf, 1, strlen(name) + 1, name, tx);
354		}
355
356		/* purge the error list */
357		cookie = NULL;
358		while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
359			kmem_free(se, sizeof (spa_error_entry_t));
360	}
361}
362
363/*
364 * Sync the error log out to disk.  This is a little tricky because the act of
365 * writing the error log requires the spa_errlist_lock.  So, we need to lock the
366 * error lists, take a copy of the lists, and then reinitialize them.  Then, we
367 * drop the error list lock and take the error log lock, at which point we
368 * do the errlog processing.  Then, if we encounter an I/O error during this
369 * process, we can successfully add the error to the list.  Note that this will
370 * result in the perpetual recycling of errors, but it is an unlikely situation
371 * and not a performance critical operation.
372 */
373void
374spa_errlog_sync(spa_t *spa, uint64_t txg)
375{
376	dmu_tx_t *tx;
377	avl_tree_t scrub, last;
378	int scrub_finished;
379
380	mutex_enter(&spa->spa_errlist_lock);
381
382	/*
383	 * Bail out early under normal circumstances.
384	 */
385	if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
386	    avl_numnodes(&spa->spa_errlist_last) == 0 &&
387	    !spa->spa_scrub_finished) {
388		mutex_exit(&spa->spa_errlist_lock);
389		return;
390	}
391
392	spa_get_errlists(spa, &last, &scrub);
393	scrub_finished = spa->spa_scrub_finished;
394	spa->spa_scrub_finished = B_FALSE;
395
396	mutex_exit(&spa->spa_errlist_lock);
397	mutex_enter(&spa->spa_errlog_lock);
398
399	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
400
401	/*
402	 * Sync out the current list of errors.
403	 */
404	sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
405
406	/*
407	 * Rotate the log if necessary.
408	 */
409	if (scrub_finished) {
410		if (spa->spa_errlog_last != 0)
411			VERIFY(dmu_object_free(spa->spa_meta_objset,
412			    spa->spa_errlog_last, tx) == 0);
413		spa->spa_errlog_last = spa->spa_errlog_scrub;
414		spa->spa_errlog_scrub = 0;
415
416		sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
417	}
418
419	/*
420	 * Sync out any pending scrub errors.
421	 */
422	sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
423
424	/*
425	 * Update the MOS to reflect the new values.
426	 */
427	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428	    DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
429	    &spa->spa_errlog_last, tx);
430	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
431	    DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
432	    &spa->spa_errlog_scrub, tx);
433
434	dmu_tx_commit(tx);
435
436	mutex_exit(&spa->spa_errlog_lock);
437}
438